diff --git a/src/renderer/src/pages/home/Messages/MessageContent.tsx b/src/renderer/src/pages/home/Messages/MessageContent.tsx index ba342600..f399fdfd 100644 --- a/src/renderer/src/pages/home/Messages/MessageContent.tsx +++ b/src/renderer/src/pages/home/Messages/MessageContent.tsx @@ -17,6 +17,7 @@ import MessageAttachments from './MessageAttachments' import MessageError from './MessageError' import MessageSearchResults from './MessageSearchResults' import MessageThought from './MessageThought' +import MessageTools from './MessageTools' interface Props { message: Message @@ -100,6 +101,7 @@ const MessageContent: React.FC = ({ message: _message, model }) => { {message.mentions?.map((model) => {'@' + model.name})} + {message.translatedContent && ( diff --git a/src/renderer/src/pages/home/Messages/MessageTools.tsx b/src/renderer/src/pages/home/Messages/MessageTools.tsx new file mode 100644 index 00000000..21dcce81 --- /dev/null +++ b/src/renderer/src/pages/home/Messages/MessageTools.tsx @@ -0,0 +1,291 @@ +import { CheckOutlined, ExpandOutlined, LoadingOutlined } from '@ant-design/icons' +import { useSettings } from '@renderer/hooks/useSettings' +import { MCPToolResponse, Message } from '@renderer/types' +import { Collapse, message as antdMessage, Modal, Tooltip } from 'antd' +import { FC, useMemo, useState } from 'react' +import { useTranslation } from 'react-i18next' +import styled from 'styled-components' + +interface Props { + message: Message +} + +const MessageTools: FC = ({ message }) => { + const [activeKeys, setActiveKeys] = useState([]) + const [copiedMap, setCopiedMap] = useState>({}) + const [expandedResponse, setExpandedResponse] = useState<{ content: string; title: string } | null>(null) + const { t } = useTranslation() + const { messageFont, fontSize } = useSettings() + const fontFamily = useMemo(() => { + return messageFont === 'serif' + ? 'serif' + : '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Open Sans","Helvetica Neue", sans-serif' + }, [messageFont]) + + const toolResponses = message.metadata?.mcpTools || [] + + if (!toolResponses.length && !message.reasoning_content) { + return null + } + + const copyContent = (content: string, toolId: string) => { + navigator.clipboard.writeText(content) + antdMessage.success({ content: t('message.copied'), key: 'copy-message' }) + setCopiedMap((prev) => ({ ...prev, [toolId]: true })) + setTimeout(() => setCopiedMap((prev) => ({ ...prev, [toolId]: false })), 2000) + } + + const handleCollapseChange = (keys: string | string[]) => { + setActiveKeys(Array.isArray(keys) ? keys : [keys]) + } + + // Format tool responses for collapse items + const getCollapseItems = () => { + const items: { key: string; label: JSX.Element; children: React.ReactNode }[] = [] + + // Add tool responses + toolResponses.forEach((toolResponse: MCPToolResponse) => { + const { tool, status } = toolResponse + const toolId = tool.id + const isInvoking = status === 'invoking' + const isDone = status === 'done' + const response = { + params: tool.inputSchema, + response: toolResponse.response + } + + items.push({ + key: toolId, + label: ( + + + {tool.name} + + {isInvoking ? t('tools.invoking') : t('tools.completed')} + {isInvoking && } + {isDone && } + + + + {isDone && response && ( + <> + + { + e.stopPropagation() + setExpandedResponse({ + content: JSON.stringify(response, null, 2), + title: tool.name + }) + }} + aria-label={t('common.expand')}> + + + + + { + e.stopPropagation() + copyContent(JSON.stringify(response, null, 2), toolId) + }} + aria-label={t('common.copy')}> + {!copiedMap[toolId] && } + {copiedMap[toolId] && } + + + + )} + + + ), + children: isDone && response && ( + +
{JSON.stringify(response, null, 2)}
+
+ ) + }) + }) + + return items + } + + return ( + <> + ( + + )} + /> + + setExpandedResponse(null)} + footer={null} + width="80%" + bodyStyle={{ maxHeight: '80vh', overflow: 'auto' }}> + {expandedResponse && ( + + { + if (expandedResponse) { + navigator.clipboard.writeText(expandedResponse.content) + antdMessage.success({ content: t('message.copied'), key: 'copy-expanded' }) + } + }} + aria-label={t('common.copy')}> + + +
{expandedResponse.content}
+
+ )} +
+ + ) +} + +const CollapseContainer = styled(Collapse)` + margin-bottom: 15px; + border-radius: 8px; + overflow: hidden; + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); + + .ant-collapse-header { + background-color: var(--color-bg-2); + transition: background-color 0.2s; + + &:hover { + background-color: var(--color-bg-3); + } + } + + .ant-collapse-content-box { + padding: 0 !important; + } +` + +const MessageTitleLabel = styled.div` + display: flex; + flex-direction: row; + align-items: center; + justify-content: space-between; + width: 100%; + min-height: 26px; + gap: 10px; + padding: 0; +` + +const TitleContent = styled.div` + display: flex; + flex-direction: row; + align-items: center; + gap: 8px; +` + +const ToolName = styled.span` + color: var(--color-text); + font-weight: 500; + font-size: 13px; +` + +const StatusIndicator = styled.span<{ $isInvoking: boolean }>` + color: ${(props) => (props.$isInvoking ? 'var(--color-primary)' : 'var(--color-success, #52c41a)')}; + font-size: 11px; + display: flex; + align-items: center; + opacity: 0.85; + border-left: 1px solid var(--color-border); + padding-left: 8px; +` + +const ActionButtonsContainer = styled.div` + display: flex; + gap: 8px; + margin-left: auto; +` + +const ActionButton = styled.button` + background: none; + border: none; + color: var(--color-text-2); + cursor: pointer; + padding: 4px 8px; + display: flex; + align-items: center; + justify-content: center; + opacity: 0.7; + transition: all 0.2s; + border-radius: 4px; + + &:hover { + opacity: 1; + color: var(--color-text); + background-color: var(--color-bg-1); + } + + &:focus-visible { + outline: 2px solid var(--color-primary); + outline-offset: 2px; + opacity: 1; + } + + .iconfont { + font-size: 14px; + } +` + +const CollapsibleIcon = styled.i` + color: var(--color-text-2); + font-size: 12px; + transition: transform 0.2s; +` + +const ToolResponseContainer = styled.div` + background: var(--color-bg-1); + border-radius: 0 0 4px 4px; + padding: 12px 16px; + overflow: auto; + max-height: 300px; + border-top: 1px solid var(--color-border); + position: relative; + + pre { + margin: 0; + white-space: pre-wrap; + word-break: break-word; + color: var(--color-text); + } +` + +const ExpandedResponseContainer = styled.div` + background: var(--color-bg-1); + border-radius: 8px; + padding: 16px; + position: relative; + + .copy-expanded-button { + position: absolute; + top: 10px; + right: 10px; + background-color: var(--color-bg-2); + border-radius: 4px; + z-index: 1; + } + + pre { + margin: 0; + white-space: pre-wrap; + word-break: break-word; + color: var(--color-text); + } +` + +export default MessageTools diff --git a/src/renderer/src/providers/AnthropicProvider.ts b/src/renderer/src/providers/AnthropicProvider.ts index 13f91c45..0f9aa1fe 100644 --- a/src/renderer/src/providers/AnthropicProvider.ts +++ b/src/renderer/src/providers/AnthropicProvider.ts @@ -12,14 +12,14 @@ import i18n from '@renderer/i18n' import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService' import { EVENT_NAMES } from '@renderer/services/EventService' import { filterContextMessages, filterUserRoleStartMessages } from '@renderer/services/MessagesService' -import { Assistant, FileTypes, Message, Model, Provider, Suggestion } from '@renderer/types' +import { Assistant, FileTypes, MCPToolResponse, Message, Model, Provider, Suggestion } from '@renderer/types' import { removeSpecialCharacters } from '@renderer/utils' import { first, flatten, sum, takeRight } from 'lodash' import OpenAI from 'openai' import { CompletionsParams } from '.' import BaseProvider from './BaseProvider' -import { anthropicToolUseToMcpTool, callMCPTool, mcpToolsToAnthropicTools } from './mcpToolUtils' +import { anthropicToolUseToMcpTool, callMCPTool, mcpToolsToAnthropicTools, upsertMCPToolResponse } from './mcpToolUtils' type ReasoningEffort = 'high' | 'medium' | 'low' @@ -193,7 +193,7 @@ export default class AnthropicProvider extends BaseProvider { const { abortController, cleanup } = this.createAbortController(lastUserMessage?.id) const { signal } = abortController - + const toolResponses: MCPToolResponse[] = [] const processStream = async (body: MessageCreateParamsNonStreaming) => { new Promise((resolve, reject) => { const toolCalls: ToolUseBlock[] = [] @@ -256,12 +256,30 @@ export default class AnthropicProvider extends BaseProvider { for (const toolCall of toolCalls) { const mcpTool = anthropicToolUseToMcpTool(mcpTools, toolCall) if (mcpTool) { + upsertMCPToolResponse( + toolResponses, + { + tool: mcpTool, + status: 'invoking' + }, + onChunk + ) + const resp = await callMCPTool(mcpTool) toolCallResults.push({ type: 'tool_result', tool_use_id: toolCall.id, content: resp.content }) + upsertMCPToolResponse( + toolResponses, + { + tool: mcpTool, + status: 'done', + response: resp + }, + onChunk + ) } } @@ -297,7 +315,8 @@ export default class AnthropicProvider extends BaseProvider { time_completion_millsec, time_first_token_millsec, time_thinking_millsec - } + }, + mcpToolResponse: toolResponses }) resolve() }) diff --git a/src/renderer/src/providers/GeminiProvider.ts b/src/renderer/src/providers/GeminiProvider.ts index 84dddc3d..c6693b05 100644 --- a/src/renderer/src/providers/GeminiProvider.ts +++ b/src/renderer/src/providers/GeminiProvider.ts @@ -19,7 +19,7 @@ import i18n from '@renderer/i18n' import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService' import { EVENT_NAMES } from '@renderer/services/EventService' import { filterContextMessages, filterUserRoleStartMessages } from '@renderer/services/MessagesService' -import { Assistant, FileType, FileTypes, Message, Model, Provider, Suggestion } from '@renderer/types' +import { Assistant, FileType, FileTypes, MCPToolResponse, Message, Model, Provider, Suggestion } from '@renderer/types' import { removeSpecialCharacters } from '@renderer/utils' import axios from 'axios' import { isEmpty, takeRight } from 'lodash' @@ -27,7 +27,7 @@ import OpenAI from 'openai' import { CompletionsParams } from '.' import BaseProvider from './BaseProvider' -import { callMCPTool, geminiFunctionCallToMcpTool, mcpToolsToGeminiTools } from './mcpToolUtils' +import { callMCPTool, geminiFunctionCallToMcpTool, mcpToolsToGeminiTools, upsertMCPToolResponse } from './mcpToolUtils' export default class GeminiProvider extends BaseProvider { private sdk: GoogleGenerativeAI @@ -163,6 +163,7 @@ export default class GeminiProvider extends BaseProvider { } const tools = mcpToolsToGeminiTools(mcpTools) + const toolResponses: MCPToolResponse[] = [] if (assistant.enableWebSearch && isWebSearchModel(model)) { tools.push({ // @ts-ignore googleSearch is not a valid tool for Gemini @@ -235,6 +236,14 @@ export default class GeminiProvider extends BaseProvider { fcallParts.push({ functionCall: call } as FunctionCallPart) const mcpTool = geminiFunctionCallToMcpTool(mcpTools, call) if (mcpTool) { + upsertMCPToolResponse( + toolResponses, + { + tool: mcpTool, + status: 'invoking' + }, + onChunk + ) const toolCallResponse = await callMCPTool(mcpTool) fcRespParts.push({ functionResponse: { @@ -242,6 +251,15 @@ export default class GeminiProvider extends BaseProvider { response: toolCallResponse } }) + upsertMCPToolResponse( + toolResponses, + { + tool: mcpTool, + status: 'done', + response: toolCallResponse + }, + onChunk + ) } } if (fcRespParts) { @@ -268,7 +286,8 @@ export default class GeminiProvider extends BaseProvider { time_completion_millsec, time_first_token_millsec }, - search: chunk.candidates?.[0]?.groundingMetadata + search: chunk.candidates?.[0]?.groundingMetadata, + mcpToolResponse: toolResponses }) } } diff --git a/src/renderer/src/providers/OpenAIProvider.ts b/src/renderer/src/providers/OpenAIProvider.ts index c57ff5a2..7716bfaf 100644 --- a/src/renderer/src/providers/OpenAIProvider.ts +++ b/src/renderer/src/providers/OpenAIProvider.ts @@ -11,7 +11,16 @@ import i18n from '@renderer/i18n' import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService' import { EVENT_NAMES } from '@renderer/services/EventService' import { filterContextMessages, filterUserRoleStartMessages } from '@renderer/services/MessagesService' -import { Assistant, FileTypes, GenerateImageParams, Message, Model, Provider, Suggestion } from '@renderer/types' +import { + Assistant, + FileTypes, + GenerateImageParams, + MCPToolResponse, + Message, + Model, + Provider, + Suggestion +} from '@renderer/types' import { removeSpecialCharacters } from '@renderer/utils' import { takeRight } from 'lodash' import OpenAI, { AzureOpenAI } from 'openai' @@ -26,7 +35,7 @@ import { import { CompletionsParams } from '.' import BaseProvider from './BaseProvider' -import { callMCPTool, mcpToolsToOpenAITools, openAIToolsToMcpTool } from './mcpToolUtils' +import { callMCPTool, mcpToolsToOpenAITools, openAIToolsToMcpTool, upsertMCPToolResponse } from './mcpToolUtils' type ReasoningEffort = 'high' | 'medium' | 'low' @@ -295,6 +304,8 @@ export default class OpenAIProvider extends BaseProvider { Boolean ) as ChatCompletionMessageParam[] + const toolResponses: MCPToolResponse[] = [] + const processStream = async (stream: any) => { if (!isSupportStreamOutput()) { const time_completion_millsec = new Date().getTime() - start_time_millsec @@ -367,6 +378,14 @@ export default class OpenAIProvider extends BaseProvider { continue } + upsertMCPToolResponse( + toolResponses, + { + tool: mcpTool, + status: 'invoking' + }, + onChunk + ) const toolCallResponse = await callMCPTool(mcpTool) console.log(toolCallResponse) reqMessages.push({ @@ -374,6 +393,15 @@ export default class OpenAIProvider extends BaseProvider { content: toolCallResponse.content, tool_call_id: toolCall.id } as ChatCompletionToolMessageParam) + upsertMCPToolResponse( + toolResponses, + { + tool: mcpTool, + status: 'done', + response: toolCallResponse + }, + onChunk + ) } const newStream = await this.sdk.chat.completions @@ -411,7 +439,8 @@ export default class OpenAIProvider extends BaseProvider { time_first_token_millsec, time_thinking_millsec }, - citations + citations, + mcpToolResponse: toolResponses }) } } diff --git a/src/renderer/src/providers/index.d.ts b/src/renderer/src/providers/index.d.ts index 6bba2af7..40b9f680 100644 --- a/src/renderer/src/providers/index.d.ts +++ b/src/renderer/src/providers/index.d.ts @@ -1,5 +1,5 @@ import type { GroundingMetadata } from '@google/generative-ai' -import type { Assistant, Message, Metrics } from '@renderer/types' +import type { Assistant, MCPToolResponse, Message, Metrics } from '@renderer/types' interface ChunkCallbackData { text?: string @@ -8,12 +8,13 @@ interface ChunkCallbackData { metrics?: Metrics search?: GroundingMetadata citations?: string[] + mcpToolResponse?: MCPToolResponse[] } interface CompletionsParams { messages: Message[] assistant: Assistant - onChunk: ({ text, reasoning_content, usage, metrics, search, citations }: ChunkCallbackData) => void + onChunk: ({ text, reasoning_content, usage, metrics, search, citations, mcpToolResponse }: ChunkCallbackData) => void onFilterMessages: (messages: Message[]) => void mcpTools?: MCPTool[] } diff --git a/src/renderer/src/providers/mcpToolUtils.ts b/src/renderer/src/providers/mcpToolUtils.ts index a27d7d71..c3731557 100644 --- a/src/renderer/src/providers/mcpToolUtils.ts +++ b/src/renderer/src/providers/mcpToolUtils.ts @@ -1,8 +1,10 @@ import { Tool, ToolUnion, ToolUseBlock } from '@anthropic-ai/sdk/resources' import { FunctionCall, FunctionDeclaration, SchemaType, Tool as geminiToool } from '@google/generative-ai' -import { MCPTool } from '@renderer/types' +import { MCPTool, MCPToolResponse } from '@renderer/types' import { ChatCompletionMessageToolCall, ChatCompletionTool } from 'openai/resources' +import { ChunkCallbackData } from '.' + const supportedAttributes = [ 'type', 'nullable', @@ -122,3 +124,25 @@ export function geminiFunctionCallToMcpTool( tool.inputSchema = fcall.args return tool } + +export function upsertMCPToolResponse( + results: MCPToolResponse[], + resp: MCPToolResponse, + onChunk: ({ mcpToolResponse }: ChunkCallbackData) => void +) { + try { + for (const ret of results) { + if (ret.tool.id == resp.tool.id) { + ret.response = resp.response + ret.status = resp.status + return + } + } + results.push(resp) + } finally { + onChunk({ + text: '', + mcpToolResponse: results + }) + } +} diff --git a/src/renderer/src/services/ApiService.ts b/src/renderer/src/services/ApiService.ts index 55028bfe..1c024a9e 100644 --- a/src/renderer/src/services/ApiService.ts +++ b/src/renderer/src/services/ApiService.ts @@ -83,7 +83,7 @@ export async function fetchChatCompletion({ messages: filterUsefulMessages(messages), assistant, onFilterMessages: (messages) => (_messages = messages), - onChunk: ({ text, reasoning_content, usage, metrics, search, citations }) => { + onChunk: ({ text, reasoning_content, usage, metrics, search, citations, mcpToolResponse }) => { message.content = message.content + text || '' message.usage = usage message.metrics = metrics @@ -96,6 +96,10 @@ export async function fetchChatCompletion({ message.metadata = { ...message.metadata, groundingMetadata: search } } + if (mcpToolResponse) { + message.metadata = { ...message.metadata, mcpTools: mcpToolResponse } + } + // Handle citations from Perplexity API if (isFirstChunk && citations) { message.metadata = { diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index d2319d8a..451b421a 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -74,6 +74,8 @@ export type Message = { citations?: string[] // Web search webSearch?: WebSearchResponse + // MCP Tools + mcpTools?: MCPToolResponse[] } } @@ -354,3 +356,9 @@ export interface MCPTool { export interface MCPConfig { servers: MCPServer[] } + +export interface MCPToolResponse { + tool: MCPTool + status: string + response?: any +}