🔧 feat: add mcp tool response visualization and handling

- Introduce `MessageTools` component for displaying tool responses
- Add handling and state management for tool invocation statuses
- Implement tool response collapsing, expanding and copying functionality
- Update multiple providers (Anthropic, Gemini, OpenAI) to handle tool responses
- Add `upsertMCPToolResponse` utility for managing tool response states
- Extend types and interfaces to support new tool response metadata
- Integrate tool response handling into chat completion process
- Add necessary styling for tool response UI components
This commit is contained in:
Vaayne 2025-03-06 23:44:44 +08:00 committed by 亢奋猫
parent 371d38a9ee
commit f29eeeac9e
9 changed files with 411 additions and 14 deletions

View File

@ -17,6 +17,7 @@ import MessageAttachments from './MessageAttachments'
import MessageError from './MessageError'
import MessageSearchResults from './MessageSearchResults'
import MessageThought from './MessageThought'
import MessageTools from './MessageTools'
interface Props {
message: Message
@ -100,6 +101,7 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
{message.mentions?.map((model) => <MentionTag key={getModelUniqId(model)}>{'@' + model.name}</MentionTag>)}
</Flex>
<MessageThought message={message} />
<MessageTools message={message} />
<Markdown message={{ ...message, content: processedContent }} />
{message.translatedContent && (
<Fragment>

View File

@ -0,0 +1,291 @@
import { CheckOutlined, ExpandOutlined, LoadingOutlined } from '@ant-design/icons'
import { useSettings } from '@renderer/hooks/useSettings'
import { MCPToolResponse, Message } from '@renderer/types'
import { Collapse, message as antdMessage, Modal, Tooltip } from 'antd'
import { FC, useMemo, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
interface Props {
message: Message
}
const MessageTools: FC<Props> = ({ message }) => {
const [activeKeys, setActiveKeys] = useState<string[]>([])
const [copiedMap, setCopiedMap] = useState<Record<string, boolean>>({})
const [expandedResponse, setExpandedResponse] = useState<{ content: string; title: string } | null>(null)
const { t } = useTranslation()
const { messageFont, fontSize } = useSettings()
const fontFamily = useMemo(() => {
return messageFont === 'serif'
? 'serif'
: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Open Sans","Helvetica Neue", sans-serif'
}, [messageFont])
const toolResponses = message.metadata?.mcpTools || []
if (!toolResponses.length && !message.reasoning_content) {
return null
}
const copyContent = (content: string, toolId: string) => {
navigator.clipboard.writeText(content)
antdMessage.success({ content: t('message.copied'), key: 'copy-message' })
setCopiedMap((prev) => ({ ...prev, [toolId]: true }))
setTimeout(() => setCopiedMap((prev) => ({ ...prev, [toolId]: false })), 2000)
}
const handleCollapseChange = (keys: string | string[]) => {
setActiveKeys(Array.isArray(keys) ? keys : [keys])
}
// Format tool responses for collapse items
const getCollapseItems = () => {
const items: { key: string; label: JSX.Element; children: React.ReactNode }[] = []
// Add tool responses
toolResponses.forEach((toolResponse: MCPToolResponse) => {
const { tool, status } = toolResponse
const toolId = tool.id
const isInvoking = status === 'invoking'
const isDone = status === 'done'
const response = {
params: tool.inputSchema,
response: toolResponse.response
}
items.push({
key: toolId,
label: (
<MessageTitleLabel>
<TitleContent>
<ToolName>{tool.name}</ToolName>
<StatusIndicator $isInvoking={isInvoking}>
{isInvoking ? t('tools.invoking') : t('tools.completed')}
{isInvoking && <LoadingOutlined spin style={{ marginLeft: 6 }} />}
{isDone && <CheckOutlined style={{ marginLeft: 6 }} />}
</StatusIndicator>
</TitleContent>
<ActionButtonsContainer>
{isDone && response && (
<>
<Tooltip title={t('common.expand')} mouseEnterDelay={0.5}>
<ActionButton
className="message-action-button"
onClick={(e) => {
e.stopPropagation()
setExpandedResponse({
content: JSON.stringify(response, null, 2),
title: tool.name
})
}}
aria-label={t('common.expand')}>
<ExpandOutlined />
</ActionButton>
</Tooltip>
<Tooltip title={t('common.copy')} mouseEnterDelay={0.5}>
<ActionButton
className="message-action-button"
onClick={(e) => {
e.stopPropagation()
copyContent(JSON.stringify(response, null, 2), toolId)
}}
aria-label={t('common.copy')}>
{!copiedMap[toolId] && <i className="iconfont icon-copy"></i>}
{copiedMap[toolId] && <CheckOutlined style={{ color: 'var(--color-primary)' }} />}
</ActionButton>
</Tooltip>
</>
)}
</ActionButtonsContainer>
</MessageTitleLabel>
),
children: isDone && response && (
<ToolResponseContainer style={{ fontFamily, fontSize }}>
<pre>{JSON.stringify(response, null, 2)}</pre>
</ToolResponseContainer>
)
})
})
return items
}
return (
<>
<CollapseContainer
activeKey={activeKeys}
size="small"
onChange={handleCollapseChange}
className="message-tools-container"
items={getCollapseItems()}
expandIcon={({ isActive }) => (
<CollapsibleIcon className={`iconfont ${isActive ? 'icon-chevron-down' : 'icon-chevron-right'}`} />
)}
/>
<Modal
title={expandedResponse?.title}
open={!!expandedResponse}
onCancel={() => setExpandedResponse(null)}
footer={null}
width="80%"
bodyStyle={{ maxHeight: '80vh', overflow: 'auto' }}>
{expandedResponse && (
<ExpandedResponseContainer style={{ fontFamily, fontSize }}>
<ActionButton
className="copy-expanded-button"
onClick={() => {
if (expandedResponse) {
navigator.clipboard.writeText(expandedResponse.content)
antdMessage.success({ content: t('message.copied'), key: 'copy-expanded' })
}
}}
aria-label={t('common.copy')}>
<i className="iconfont icon-copy"></i>
</ActionButton>
<pre>{expandedResponse.content}</pre>
</ExpandedResponseContainer>
)}
</Modal>
</>
)
}
const CollapseContainer = styled(Collapse)`
margin-bottom: 15px;
border-radius: 8px;
overflow: hidden;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
.ant-collapse-header {
background-color: var(--color-bg-2);
transition: background-color 0.2s;
&:hover {
background-color: var(--color-bg-3);
}
}
.ant-collapse-content-box {
padding: 0 !important;
}
`
const MessageTitleLabel = styled.div`
display: flex;
flex-direction: row;
align-items: center;
justify-content: space-between;
width: 100%;
min-height: 26px;
gap: 10px;
padding: 0;
`
const TitleContent = styled.div`
display: flex;
flex-direction: row;
align-items: center;
gap: 8px;
`
const ToolName = styled.span`
color: var(--color-text);
font-weight: 500;
font-size: 13px;
`
const StatusIndicator = styled.span<{ $isInvoking: boolean }>`
color: ${(props) => (props.$isInvoking ? 'var(--color-primary)' : 'var(--color-success, #52c41a)')};
font-size: 11px;
display: flex;
align-items: center;
opacity: 0.85;
border-left: 1px solid var(--color-border);
padding-left: 8px;
`
const ActionButtonsContainer = styled.div`
display: flex;
gap: 8px;
margin-left: auto;
`
const ActionButton = styled.button`
background: none;
border: none;
color: var(--color-text-2);
cursor: pointer;
padding: 4px 8px;
display: flex;
align-items: center;
justify-content: center;
opacity: 0.7;
transition: all 0.2s;
border-radius: 4px;
&:hover {
opacity: 1;
color: var(--color-text);
background-color: var(--color-bg-1);
}
&:focus-visible {
outline: 2px solid var(--color-primary);
outline-offset: 2px;
opacity: 1;
}
.iconfont {
font-size: 14px;
}
`
const CollapsibleIcon = styled.i`
color: var(--color-text-2);
font-size: 12px;
transition: transform 0.2s;
`
const ToolResponseContainer = styled.div`
background: var(--color-bg-1);
border-radius: 0 0 4px 4px;
padding: 12px 16px;
overflow: auto;
max-height: 300px;
border-top: 1px solid var(--color-border);
position: relative;
pre {
margin: 0;
white-space: pre-wrap;
word-break: break-word;
color: var(--color-text);
}
`
const ExpandedResponseContainer = styled.div`
background: var(--color-bg-1);
border-radius: 8px;
padding: 16px;
position: relative;
.copy-expanded-button {
position: absolute;
top: 10px;
right: 10px;
background-color: var(--color-bg-2);
border-radius: 4px;
z-index: 1;
}
pre {
margin: 0;
white-space: pre-wrap;
word-break: break-word;
color: var(--color-text);
}
`
export default MessageTools

View File

@ -12,14 +12,14 @@ import i18n from '@renderer/i18n'
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService'
import { EVENT_NAMES } from '@renderer/services/EventService'
import { filterContextMessages, filterUserRoleStartMessages } from '@renderer/services/MessagesService'
import { Assistant, FileTypes, Message, Model, Provider, Suggestion } from '@renderer/types'
import { Assistant, FileTypes, MCPToolResponse, Message, Model, Provider, Suggestion } from '@renderer/types'
import { removeSpecialCharacters } from '@renderer/utils'
import { first, flatten, sum, takeRight } from 'lodash'
import OpenAI from 'openai'
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
import { anthropicToolUseToMcpTool, callMCPTool, mcpToolsToAnthropicTools } from './mcpToolUtils'
import { anthropicToolUseToMcpTool, callMCPTool, mcpToolsToAnthropicTools, upsertMCPToolResponse } from './mcpToolUtils'
type ReasoningEffort = 'high' | 'medium' | 'low'
@ -193,7 +193,7 @@ export default class AnthropicProvider extends BaseProvider {
const { abortController, cleanup } = this.createAbortController(lastUserMessage?.id)
const { signal } = abortController
const toolResponses: MCPToolResponse[] = []
const processStream = async (body: MessageCreateParamsNonStreaming) => {
new Promise<void>((resolve, reject) => {
const toolCalls: ToolUseBlock[] = []
@ -256,12 +256,30 @@ export default class AnthropicProvider extends BaseProvider {
for (const toolCall of toolCalls) {
const mcpTool = anthropicToolUseToMcpTool(mcpTools, toolCall)
if (mcpTool) {
upsertMCPToolResponse(
toolResponses,
{
tool: mcpTool,
status: 'invoking'
},
onChunk
)
const resp = await callMCPTool(mcpTool)
toolCallResults.push({
type: 'tool_result',
tool_use_id: toolCall.id,
content: resp.content
})
upsertMCPToolResponse(
toolResponses,
{
tool: mcpTool,
status: 'done',
response: resp
},
onChunk
)
}
}
@ -297,7 +315,8 @@ export default class AnthropicProvider extends BaseProvider {
time_completion_millsec,
time_first_token_millsec,
time_thinking_millsec
}
},
mcpToolResponse: toolResponses
})
resolve()
})

View File

@ -19,7 +19,7 @@ import i18n from '@renderer/i18n'
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService'
import { EVENT_NAMES } from '@renderer/services/EventService'
import { filterContextMessages, filterUserRoleStartMessages } from '@renderer/services/MessagesService'
import { Assistant, FileType, FileTypes, Message, Model, Provider, Suggestion } from '@renderer/types'
import { Assistant, FileType, FileTypes, MCPToolResponse, Message, Model, Provider, Suggestion } from '@renderer/types'
import { removeSpecialCharacters } from '@renderer/utils'
import axios from 'axios'
import { isEmpty, takeRight } from 'lodash'
@ -27,7 +27,7 @@ import OpenAI from 'openai'
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
import { callMCPTool, geminiFunctionCallToMcpTool, mcpToolsToGeminiTools } from './mcpToolUtils'
import { callMCPTool, geminiFunctionCallToMcpTool, mcpToolsToGeminiTools, upsertMCPToolResponse } from './mcpToolUtils'
export default class GeminiProvider extends BaseProvider {
private sdk: GoogleGenerativeAI
@ -163,6 +163,7 @@ export default class GeminiProvider extends BaseProvider {
}
const tools = mcpToolsToGeminiTools(mcpTools)
const toolResponses: MCPToolResponse[] = []
if (assistant.enableWebSearch && isWebSearchModel(model)) {
tools.push({
// @ts-ignore googleSearch is not a valid tool for Gemini
@ -235,6 +236,14 @@ export default class GeminiProvider extends BaseProvider {
fcallParts.push({ functionCall: call } as FunctionCallPart)
const mcpTool = geminiFunctionCallToMcpTool(mcpTools, call)
if (mcpTool) {
upsertMCPToolResponse(
toolResponses,
{
tool: mcpTool,
status: 'invoking'
},
onChunk
)
const toolCallResponse = await callMCPTool(mcpTool)
fcRespParts.push({
functionResponse: {
@ -242,6 +251,15 @@ export default class GeminiProvider extends BaseProvider {
response: toolCallResponse
}
})
upsertMCPToolResponse(
toolResponses,
{
tool: mcpTool,
status: 'done',
response: toolCallResponse
},
onChunk
)
}
}
if (fcRespParts) {
@ -268,7 +286,8 @@ export default class GeminiProvider extends BaseProvider {
time_completion_millsec,
time_first_token_millsec
},
search: chunk.candidates?.[0]?.groundingMetadata
search: chunk.candidates?.[0]?.groundingMetadata,
mcpToolResponse: toolResponses
})
}
}

View File

@ -11,7 +11,16 @@ import i18n from '@renderer/i18n'
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService'
import { EVENT_NAMES } from '@renderer/services/EventService'
import { filterContextMessages, filterUserRoleStartMessages } from '@renderer/services/MessagesService'
import { Assistant, FileTypes, GenerateImageParams, Message, Model, Provider, Suggestion } from '@renderer/types'
import {
Assistant,
FileTypes,
GenerateImageParams,
MCPToolResponse,
Message,
Model,
Provider,
Suggestion
} from '@renderer/types'
import { removeSpecialCharacters } from '@renderer/utils'
import { takeRight } from 'lodash'
import OpenAI, { AzureOpenAI } from 'openai'
@ -26,7 +35,7 @@ import {
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
import { callMCPTool, mcpToolsToOpenAITools, openAIToolsToMcpTool } from './mcpToolUtils'
import { callMCPTool, mcpToolsToOpenAITools, openAIToolsToMcpTool, upsertMCPToolResponse } from './mcpToolUtils'
type ReasoningEffort = 'high' | 'medium' | 'low'
@ -295,6 +304,8 @@ export default class OpenAIProvider extends BaseProvider {
Boolean
) as ChatCompletionMessageParam[]
const toolResponses: MCPToolResponse[] = []
const processStream = async (stream: any) => {
if (!isSupportStreamOutput()) {
const time_completion_millsec = new Date().getTime() - start_time_millsec
@ -367,6 +378,14 @@ export default class OpenAIProvider extends BaseProvider {
continue
}
upsertMCPToolResponse(
toolResponses,
{
tool: mcpTool,
status: 'invoking'
},
onChunk
)
const toolCallResponse = await callMCPTool(mcpTool)
console.log(toolCallResponse)
reqMessages.push({
@ -374,6 +393,15 @@ export default class OpenAIProvider extends BaseProvider {
content: toolCallResponse.content,
tool_call_id: toolCall.id
} as ChatCompletionToolMessageParam)
upsertMCPToolResponse(
toolResponses,
{
tool: mcpTool,
status: 'done',
response: toolCallResponse
},
onChunk
)
}
const newStream = await this.sdk.chat.completions
@ -411,7 +439,8 @@ export default class OpenAIProvider extends BaseProvider {
time_first_token_millsec,
time_thinking_millsec
},
citations
citations,
mcpToolResponse: toolResponses
})
}
}

View File

@ -1,5 +1,5 @@
import type { GroundingMetadata } from '@google/generative-ai'
import type { Assistant, Message, Metrics } from '@renderer/types'
import type { Assistant, MCPToolResponse, Message, Metrics } from '@renderer/types'
interface ChunkCallbackData {
text?: string
@ -8,12 +8,13 @@ interface ChunkCallbackData {
metrics?: Metrics
search?: GroundingMetadata
citations?: string[]
mcpToolResponse?: MCPToolResponse[]
}
interface CompletionsParams {
messages: Message[]
assistant: Assistant
onChunk: ({ text, reasoning_content, usage, metrics, search, citations }: ChunkCallbackData) => void
onChunk: ({ text, reasoning_content, usage, metrics, search, citations, mcpToolResponse }: ChunkCallbackData) => void
onFilterMessages: (messages: Message[]) => void
mcpTools?: MCPTool[]
}

View File

@ -1,8 +1,10 @@
import { Tool, ToolUnion, ToolUseBlock } from '@anthropic-ai/sdk/resources'
import { FunctionCall, FunctionDeclaration, SchemaType, Tool as geminiToool } from '@google/generative-ai'
import { MCPTool } from '@renderer/types'
import { MCPTool, MCPToolResponse } from '@renderer/types'
import { ChatCompletionMessageToolCall, ChatCompletionTool } from 'openai/resources'
import { ChunkCallbackData } from '.'
const supportedAttributes = [
'type',
'nullable',
@ -122,3 +124,25 @@ export function geminiFunctionCallToMcpTool(
tool.inputSchema = fcall.args
return tool
}
export function upsertMCPToolResponse(
results: MCPToolResponse[],
resp: MCPToolResponse,
onChunk: ({ mcpToolResponse }: ChunkCallbackData) => void
) {
try {
for (const ret of results) {
if (ret.tool.id == resp.tool.id) {
ret.response = resp.response
ret.status = resp.status
return
}
}
results.push(resp)
} finally {
onChunk({
text: '',
mcpToolResponse: results
})
}
}

View File

@ -83,7 +83,7 @@ export async function fetchChatCompletion({
messages: filterUsefulMessages(messages),
assistant,
onFilterMessages: (messages) => (_messages = messages),
onChunk: ({ text, reasoning_content, usage, metrics, search, citations }) => {
onChunk: ({ text, reasoning_content, usage, metrics, search, citations, mcpToolResponse }) => {
message.content = message.content + text || ''
message.usage = usage
message.metrics = metrics
@ -96,6 +96,10 @@ export async function fetchChatCompletion({
message.metadata = { ...message.metadata, groundingMetadata: search }
}
if (mcpToolResponse) {
message.metadata = { ...message.metadata, mcpTools: mcpToolResponse }
}
// Handle citations from Perplexity API
if (isFirstChunk && citations) {
message.metadata = {

View File

@ -74,6 +74,8 @@ export type Message = {
citations?: string[]
// Web search
webSearch?: WebSearchResponse
// MCP Tools
mcpTools?: MCPToolResponse[]
}
}
@ -354,3 +356,9 @@ export interface MCPTool {
export interface MCPConfig {
servers: MCPServer[]
}
export interface MCPToolResponse {
tool: MCPTool
status: string
response?: any
}