refactor(Providers): Optimize tool handling and message filtering

- Move MCP tool utilities to a dedicated utils folder
- Update import paths for MCP tool functions across providers
- Add isEmpty check for tools in Anthropic provider
- Enhance message filtering in OpenAI provider with filterEmptyMessages
- Simplify tool and message preparation logic
This commit is contained in:
kangfenmao 2025-03-11 13:53:06 +08:00
parent 48d824fe6f
commit 6bcc21c578
4 changed files with 31 additions and 24 deletions

View File

@ -212,7 +212,7 @@ const Messages: React.FC<MessagesProps> = ({ assistant, topic, setActiveTopic })
inverse={true}
scrollableTarget="messages">
<ScrollContainer>
<LoaderContainer $loading={loading || isLoadingMore}>
<LoaderContainer $loading={isLoadingMore}>
<BeatLoader size={8} color="var(--color-text-2)" />
</LoaderContainer>
{Object.entries(getGroupedMessages(displayMessages)).map(([key, groupMessages]) => (

View File

@ -18,18 +18,18 @@ import {
} from '@renderer/services/MessagesService'
import { Assistant, FileTypes, MCPToolResponse, Message, Model, Provider, Suggestion } from '@renderer/types'
import { removeSpecialCharactersForTopicName } from '@renderer/utils'
import { first, flatten, sum, takeRight } from 'lodash'
import OpenAI from 'openai'
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
import {
anthropicToolUseToMcpTool,
callMCPTool,
filterMCPTools,
mcpToolsToAnthropicTools,
upsertMCPToolResponse
} from './mcpToolUtils'
} from '@renderer/utils/mcp-tools'
import { first, flatten, isEmpty, sum, takeRight } from 'lodash'
import OpenAI from 'openai'
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
type ReasoningEffort = 'high' | 'medium' | 'low'
@ -159,7 +159,7 @@ export default class AnthropicProvider extends BaseProvider {
const body: MessageCreateParamsNonStreaming = {
model: model.id,
messages: userMessages,
tools: tools,
tools: isEmpty(tools) ? undefined : tools,
max_tokens: maxTokens || DEFAULT_MAX_TOKENS,
temperature: this.getTemperature(assistant, model),
top_p: this.getTopP(assistant, model),

View File

@ -21,19 +21,19 @@ import { EVENT_NAMES } from '@renderer/services/EventService'
import { filterContextMessages, filterUserRoleStartMessages } from '@renderer/services/MessagesService'
import { Assistant, FileType, FileTypes, MCPToolResponse, Message, Model, Provider, Suggestion } from '@renderer/types'
import { removeSpecialCharactersForTopicName } from '@renderer/utils'
import axios from 'axios'
import { isEmpty, takeRight } from 'lodash'
import OpenAI from 'openai'
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
import {
callMCPTool,
filterMCPTools,
geminiFunctionCallToMcpTool,
mcpToolsToGeminiTools,
upsertMCPToolResponse
} from './mcpToolUtils'
} from '@renderer/utils/mcp-tools'
import axios from 'axios'
import { isEmpty, takeRight } from 'lodash'
import OpenAI from 'openai'
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
export default class GeminiProvider extends BaseProvider {
private sdk: GoogleGenerativeAI

View File

@ -10,7 +10,11 @@ import { getStoreSetting } from '@renderer/hooks/useSettings'
import i18n from '@renderer/i18n'
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService'
import { EVENT_NAMES } from '@renderer/services/EventService'
import { filterContextMessages, filterUserRoleStartMessages } from '@renderer/services/MessagesService'
import {
filterContextMessages,
filterEmptyMessages,
filterUserRoleStartMessages
} from '@renderer/services/MessagesService'
import {
Assistant,
FileTypes,
@ -22,6 +26,13 @@ import {
Suggestion
} from '@renderer/types'
import { removeSpecialCharactersForTopicName } from '@renderer/utils'
import {
callMCPTool,
filterMCPTools,
mcpToolsToOpenAITools,
openAIToolsToMcpTool,
upsertMCPToolResponse
} from '@renderer/utils/mcp-tools'
import { takeRight } from 'lodash'
import OpenAI, { AzureOpenAI } from 'openai'
import {
@ -35,13 +46,6 @@ import {
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
import {
callMCPTool,
filterMCPTools,
mcpToolsToOpenAITools,
openAIToolsToMcpTool,
upsertMCPToolResponse
} from './mcpToolUtils'
type ReasoningEffort = 'high' | 'medium' | 'low'
@ -248,7 +252,10 @@ export default class OpenAIProvider extends BaseProvider {
const userMessages: ChatCompletionMessageParam[] = []
const _messages = filterUserRoleStartMessages(filterContextMessages(takeRight(messages, contextCount + 1)))
const _messages = filterUserRoleStartMessages(
filterContextMessages(filterEmptyMessages(takeRight(messages, contextCount + 1)))
)
onFilterMessages(_messages)
for (const message of _messages) {