41 lines
1.6 KiB
Python
41 lines
1.6 KiB
Python
# File: backend/app/api/v1/endpoints/chat.py (更新)
|
|
# Description: 聊天功能的 API 路由 (使用 ChatService)
|
|
|
|
from fastapi import APIRouter, HTTPException, Depends
|
|
from app.models.pydantic_models import ChatRequest, ChatResponse
|
|
# 导入 ChatService 实例
|
|
from app.services.chat_service import chat_service_instance, ChatService
|
|
|
|
router = APIRouter()
|
|
|
|
# --- (可选) 使用 FastAPI 的依赖注入来获取 ChatService 实例 ---
|
|
# 这样更符合 FastAPI 的风格,方便测试和替换实现
|
|
# async def get_chat_service() -> ChatService:
|
|
# return chat_service_instance
|
|
|
|
@router.post("/", response_model=ChatResponse)
|
|
async def handle_chat_message(
|
|
request: ChatRequest,
|
|
# chat_service: ChatService = Depends(get_chat_service) # 使用依赖注入
|
|
):
|
|
"""
|
|
处理用户发送的聊天消息,并使用 LangChain 获取 AI 回复
|
|
"""
|
|
user_message = request.message
|
|
# session_id = request.session_id # 如果 ChatRequest 中包含 session_id
|
|
print(f"接收到用户消息: {user_message}")
|
|
|
|
try:
|
|
# --- 调用 ChatService 获取 AI 回复 ---
|
|
# 使用全局实例 (简单方式)
|
|
ai_reply = await chat_service_instance.get_ai_reply(user_message)
|
|
# 或者使用依赖注入获取的实例
|
|
# ai_reply = await chat_service.get_ai_reply(user_message, session_id)
|
|
|
|
print(f"发送 AI 回复: {ai_reply}")
|
|
return ChatResponse(reply=ai_reply)
|
|
|
|
except Exception as e:
|
|
# 如果 ChatService 抛出异常,捕获并返回 HTTP 500 错误
|
|
print(f"处理聊天消息时发生错误: {e}")
|
|
raise HTTPException(status_code=500, detail=str(e)) |