77 lines
4.1 KiB
Python
77 lines
4.1 KiB
Python
# File: backend/app/flow_components/llm_node.py (New)
|
|
# Description: Backend component for LLMNode
|
|
|
|
from .base import BaseComponent, InputField, OutputField, register_component
|
|
from typing import ClassVar, Dict, Any, Optional, List
|
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
from app.services.chat_service import ChatService # Assuming ChatService can be used or adapted
|
|
from app.core.config import OPENAI_API_KEY
|
|
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, BaseMessage
|
|
|
|
# Instantiate or get ChatService instance
|
|
# This might need better dependency injection in a real app
|
|
chat_service_instance = ChatService(default_api_key=OPENAI_API_KEY)
|
|
|
|
@register_component
|
|
class LLMNodeComponent(BaseComponent):
|
|
name: ClassVar[str] = "llmNode"
|
|
display_name: ClassVar[str] = "LLM 调用"
|
|
description: ClassVar[str] = "使用大语言模型生成文本。"
|
|
icon: ClassVar[str] = "BrainCircuit"
|
|
|
|
inputs: ClassVar[List[InputField]] = [
|
|
InputField(name="input-text", display_name="输入", field_type="message", required=True, is_handle=True, info="连接输入的文本或消息。"),
|
|
InputField(name="systemPrompt", display_name="系统提示", field_type="str", required=True, is_handle=False, info="定义助手的角色和行为。"),
|
|
InputField(name="model", display_name="模型名称", field_type="str", required=True, is_handle=False, info="要使用的 LLM 模型。"), # Add options if needed
|
|
InputField(name="temperature", display_name="温度", field_type="float", required=True, is_handle=False, value=0.7, range_spec={'min': 0, 'max': 1, 'step': 0.1}),
|
|
InputField(name="apiKey", display_name="API Key", field_type="secret", required=False, is_handle=False, info="(不安全)覆盖默认 API Key。"),
|
|
# Add other parameters like max_tokens, etc. as InputFields
|
|
]
|
|
outputs: ClassVar[List[OutputField]] = [
|
|
OutputField(name="output-message", display_name="Message", field_type="message", info="LLM 生成的消息。")
|
|
]
|
|
|
|
async def run(self, inputs: Dict[str, Any], db: Optional[AsyncSession] = None) -> Dict[str, Any]:
|
|
prompt_input = inputs.get("input-text")
|
|
system_prompt = inputs.get("systemPrompt")
|
|
model = inputs.get("model")
|
|
temperature = inputs.get("temperature")
|
|
# api_key = inputs.get("apiKey") # Handle API key securely if used
|
|
|
|
if not prompt_input or not system_prompt or not model or temperature is None:
|
|
raise ValueError("LLMNode 配置或输入不完整。")
|
|
|
|
print(f"LLMNode ({self.node_data.get('id', 'N/A')}): 运行模型 '{model}' (Temp: {temperature})")
|
|
print(f" System Prompt: {system_prompt[:50]}...")
|
|
print(f" Input Prompt: {prompt_input[:50]}...")
|
|
|
|
# --- Adapt ChatService or LangChain call ---
|
|
# This simplified call assumes a method that takes direct inputs
|
|
# In reality, you might build a small LangChain chain here
|
|
try:
|
|
# Construct messages for a more robust call
|
|
messages: List[BaseMessage] = []
|
|
if system_prompt:
|
|
messages.append(SystemMessage(content=system_prompt))
|
|
# Assume input-text provides the user message content
|
|
messages.append(HumanMessage(content=str(prompt_input))) # Ensure it's a string
|
|
|
|
# Simplified call - needs adaptation based on ChatService structure
|
|
# Maybe ChatService needs a method like:
|
|
# async def invoke_llm(self, messages: List[BaseMessage], model_name: str, temperature: float, ...) -> str:
|
|
# result = await chat_service_instance.invoke_llm(messages, model, temperature)
|
|
|
|
# --- Temporary Simulation ---
|
|
await asyncio.sleep(1)
|
|
result = f"AI回复(模拟): 处理了 '{str(prompt_input)[:20]}...'"
|
|
# --- End Simulation ---
|
|
|
|
print(f"LLMNode Output: {result[:50]}...")
|
|
return {"output-message": result}
|
|
|
|
except Exception as e:
|
|
print(f"LLMNode 执行失败: {e}")
|
|
raise # Re-raise the exception for the executor to handle
|
|
|
|
# Need asyncio for simulation
|
|
import asyncio |