feat(MCP): Support GLM-4-alltools (#3304)
- Added Gemma3 as a vision model. - Improved functioncall model check logic. - Introduced a new method to clean tool call arguments, ensuring proper formatting and extraction of parameters. - Adjusted tool call handling in OpenAIProvider to accommodate new GLM-4-alltools model checks and argument processing.
This commit is contained in:
parent
e89e27b0d7
commit
25f354c651
@ -157,7 +157,8 @@ const visionAllowedModels = [
|
||||
'chatgpt-4o(?:-[\\w-]+)?',
|
||||
'o1(?:-[\\w-]+)?',
|
||||
'deepseek-vl(?:[\\w-]+)?',
|
||||
'kimi-latest'
|
||||
'kimi-latest',
|
||||
'gemma-3(?:-[\\w-]+)'
|
||||
]
|
||||
|
||||
const visionExcludedModels = ['gpt-4-\\d+-preview', 'gpt-4-turbo-preview', 'gpt-4-32k', 'gpt-4-\\d+']
|
||||
@ -178,14 +179,34 @@ export const EMBEDDING_REGEX = /(?:^text-|embed|bge-|e5-|LLM2Vec|retrieval|uae-|
|
||||
export const NOT_SUPPORTED_REGEX = /(?:^tts|rerank|whisper|speech)/i
|
||||
|
||||
// Tool calling models
|
||||
export const FUNCTION_CALLING_MODELS = ['gpt-4o', 'gpt-4o-mini', 'gpt-4', 'gpt-4.5', 'claude', 'qwen']
|
||||
export const FUNCTION_CALLING_REGEX = new RegExp(`\\b(?:${FUNCTION_CALLING_MODELS.join('|')})\\b`, 'i')
|
||||
export const FUNCTION_CALLING_MODELS = [
|
||||
'gpt-4o',
|
||||
'gpt-4o-mini',
|
||||
'gpt-4',
|
||||
'gpt-4.5',
|
||||
'claude',
|
||||
'qwen',
|
||||
'glm-4(?:-[\\w-]+)?',
|
||||
'learnlm(?:-[\\w-]+)?',
|
||||
'gemini(?:-[\\w-]+)?' // 提前排除了gemini的嵌入模型
|
||||
]
|
||||
|
||||
const FUNCTION_CALLING_EXCLUDED_MODELS = ['aqa(?:-[\\w-]+)?', 'imagen(?:-[\\w-]+)?']
|
||||
|
||||
export const FUNCTION_CALLING_REGEX = new RegExp(
|
||||
`\\b(?!(?:${FUNCTION_CALLING_EXCLUDED_MODELS.join('|')})\\b)(?:${FUNCTION_CALLING_MODELS.join('|')})\\b`,
|
||||
'i'
|
||||
)
|
||||
export function isFunctionCallingModel(model: Model): boolean {
|
||||
if (model.type?.includes('function_calling')) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (['gemini', 'deepseek', 'anthropic'].includes(model.provider) && !EMBEDDING_REGEX.test(model.id)) {
|
||||
if (isEmbeddingModel(model)) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (['deepseek', 'anthropic'].includes(model.provider)) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@ -284,6 +284,55 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
return model.id.startsWith('o1') || model.id.startsWith('o3')
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the model is a Glm-4-alltools
|
||||
* @param model - The model
|
||||
* @returns True if the model is a Glm-4-alltools, false otherwise
|
||||
*/
|
||||
private isZhipuTool(model: Model) {
|
||||
return model.id.includes('glm-4-alltools')
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean the tool call arguments
|
||||
* @param toolCall - The tool call
|
||||
* @returns The cleaned tool call
|
||||
*/
|
||||
private cleanToolCallArgs(toolCall: ChatCompletionMessageToolCall): ChatCompletionMessageToolCall {
|
||||
if (toolCall.function.arguments) {
|
||||
let args = toolCall.function.arguments
|
||||
const codeBlockRegex = /```(?:\w*\n)?([\s\S]*?)```/
|
||||
const match = args.match(codeBlockRegex)
|
||||
if (match) {
|
||||
// Extract content from code block
|
||||
let extractedArgs = match[1].trim()
|
||||
// Clean function call format like tool_call(name1=value1,name2=value2)
|
||||
const functionCallRegex = /^\s*\w+\s*\(([\s\S]*?)\)\s*$/
|
||||
const functionMatch = extractedArgs.match(functionCallRegex)
|
||||
if (functionMatch) {
|
||||
// Try to convert parameters to JSON format
|
||||
const params = functionMatch[1].split(',').filter(Boolean)
|
||||
const paramsObj = {}
|
||||
params.forEach((param) => {
|
||||
const [name, value] = param.split('=').map((p) => p.trim())
|
||||
if (name && value !== undefined) {
|
||||
paramsObj[name] = value
|
||||
}
|
||||
})
|
||||
extractedArgs = JSON.stringify(paramsObj)
|
||||
}
|
||||
toolCall.function.arguments = extractedArgs
|
||||
}
|
||||
args = toolCall.function.arguments
|
||||
const firstBraceIndex = args.indexOf('{')
|
||||
const lastBraceIndex = args.lastIndexOf('}')
|
||||
if (firstBraceIndex !== -1 && lastBraceIndex !== -1 && firstBraceIndex < lastBraceIndex) {
|
||||
toolCall.function.arguments = args.substring(firstBraceIndex, lastBraceIndex + 1)
|
||||
}
|
||||
}
|
||||
return toolCall
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate completions for the assistant
|
||||
* @param messages - The messages
|
||||
@ -443,11 +492,19 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
}
|
||||
|
||||
if (finishReason === 'tool_calls') {
|
||||
const toolCalls = Object.values(final_tool_calls)
|
||||
reqMessages.push({
|
||||
role: 'assistant',
|
||||
tool_calls: toolCalls
|
||||
} as ChatCompletionAssistantMessageParam)
|
||||
const toolCalls = Object.values(final_tool_calls).map(this.cleanToolCallArgs)
|
||||
console.log('start invoke tools', toolCalls)
|
||||
if (this.isZhipuTool(model)) {
|
||||
reqMessages.push({
|
||||
role: 'assistant',
|
||||
content: `argments=${JSON.stringify(toolCalls[0].function.arguments)}`
|
||||
})
|
||||
} else {
|
||||
reqMessages.push({
|
||||
role: 'assistant',
|
||||
tool_calls: toolCalls
|
||||
} as ChatCompletionAssistantMessageParam)
|
||||
}
|
||||
|
||||
for (const toolCall of toolCalls) {
|
||||
const mcpTool = openAIToolsToMcpTool(mcpTools, toolCall)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user