cherry-ai/backend/app/models/pydantic_models.py
2025-05-02 17:31:33 +08:00

133 lines
4.5 KiB
Python

# File: backend/app/models/pydantic_models.py (Update Read models, add Message models)
# Description: Pydantic 模型定义 API 数据结构
from pydantic import BaseModel, Field
from typing import Dict, Optional, List
import uuid
from datetime import datetime # Use datetime directly
# --- Assistant Models ---
class AssistantBase(BaseModel):
"""助手的基础模型,包含通用字段"""
name: str = Field(..., min_length=1, max_length=50, description="助手名称")
description: Optional[str] = Field(None, max_length=200, description="助手描述")
avatar: Optional[str] = Field(None, max_length=5, description="头像 Emoji 或字符")
system_prompt: str = Field(..., min_length=1, max_length=4000, description="系统提示")
model: str = Field(..., description="使用的 LLM 模型")
temperature: float = Field(0.7, ge=0.0, le=1.0, description="温度参数 (0.0-1.0)")
# 可以添加 top_p, max_tokens 等
class AssistantCreate(AssistantBase):
"""创建助手时使用的模型 (不需要 ID)"""
pass
class AssistantUpdate(BaseModel):
"""更新助手时使用的模型 (所有字段可选)"""
name: Optional[str] = Field(None, min_length=1, max_length=50)
description: Optional[str] = Field(None, max_length=200)
avatar: Optional[str] = Field(None, max_length=5)
system_prompt: Optional[str] = Field(None, min_length=1, max_length=4000)
model: Optional[str] = None
temperature: Optional[float] = Field(None, ge=0.0, le=1.0)
class AssistantRead(AssistantBase):
"""读取助手信息时返回的模型 (包含 ID)"""
id: str = Field(..., description="助手唯一 ID")
created_at: datetime # Add timestamps
updated_at: Optional[datetime] = None
class Config:
from_attributes = True # Pydantic v2: orm_mode = True
# --- Chat Models (更新) ---
class ChatRequest(BaseModel):
"""聊天请求模型 (添加 sessionId 和 assistantId)"""
message: str
session_id: str = Field(..., description="当前会话 ID (可以是 'temp-new-chat')")
assistant_id: str = Field(..., description="当前使用的助手 ID")
class ChatResponse(BaseModel):
"""聊天响应模型"""
reply: str
session_id: Optional[str] = None # (可选) 如果创建了新会话,返回新 ID
session_title: Optional[str] = None # (可选) 如果创建了新会话,返回新标题
# --- Session Models ---
class SessionCreateRequest(BaseModel):
"""创建会话请求模型"""
assistant_id: str
first_message: str # 用户的第一条消息,用于生成标题
class SessionCreateResponse(BaseModel):
"""创建会话响应模型"""
id: str
title: str
assistant_id: str
created_at: str # 返回 ISO 格式时间字符串
class SessionRead(BaseModel):
"""读取会话信息模型"""
id: str
title: str
assistant_id: str
created_at: datetime # Use datetime
updated_at: Optional[datetime] = None
class Config:
from_attributes = True
# --- Message Models (New) ---
class MessageBase(BaseModel):
sender: str # 'user' or 'ai'
text: str
class MessageRead(MessageBase):
id: str
session_id: str
order: int
created_at: datetime
class Config:
from_attributes = True
# --- Workflow Node/Edge Models (for API request/response) ---
# Mirrors React Flow structure loosely
class NodeData(BaseModel):
# Define common fields or use Dict[str, Any]
label: Optional[str] = None
text: Optional[str] = None # For ChatInput
displayText: Optional[str] = None # For ChatOutput
model: Optional[str] = None # For LLMNode
temperature: Optional[float] = None # For LLMNode
systemPrompt: Optional[str] = None # For LLMNode
# Add other potential data fields from your nodes
# Use Extra.allow for flexibility if needed:
# class Config:
# extra = 'allow'
class NodeModel(BaseModel):
id: str
type: str # e.g., 'chatInputNode', 'llmNode'
position: Dict[str, float] # { x: number, y: number }
data: NodeData # Use the specific data model
class EdgeModel(BaseModel):
id: str
source: str
target: str
sourceHandle: Optional[str] = None
targetHandle: Optional[str] = None
# --- Workflow Execution Models ---
class WorkflowRunRequest(BaseModel):
nodes: List[NodeModel]
edges: List[EdgeModel]
class WorkflowRunResponse(BaseModel):
success: bool
message: Optional[str] = None
output: Optional[str] = None # The final output text
output_node_id: Optional[str] = None # ID of the node that produced the output