diff --git a/src/renderer/src/providers/OpenAIProvider.ts b/src/renderer/src/providers/OpenAIProvider.ts index 434d6f2e..dfa7d555 100644 --- a/src/renderer/src/providers/OpenAIProvider.ts +++ b/src/renderer/src/providers/OpenAIProvider.ts @@ -207,8 +207,33 @@ export default class OpenAIProvider extends BaseProvider { } let hasReasoningContent = false - const isReasoningJustDone = (delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta) => - hasReasoningContent ? !!delta?.content : delta?.content === '' + let lastChunk = '' + const isReasoningJustDone = ( + delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta & { reasoning_content?: string } + ) => { + if (!delta?.content) return false + + // 检查当前chunk和上一个chunk的组合是否形成###Response标记 + const combinedChunks = lastChunk + delta.content + lastChunk = delta.content + + // 检测思考结束 + if (combinedChunks.includes('###Response') || delta.content === '') { + return true + } + + // 如果有reasoning_content,说明是在思考中 + if (delta?.reasoning_content) { + hasReasoningContent = true + } + + // 如果之前有reasoning_content,现在有普通content,说明思考结束 + if (hasReasoningContent && delta.content) { + return true + } + + return false + } let time_first_token_millsec = 0 let time_first_content_millsec = 0 diff --git a/src/renderer/src/utils/formats.ts b/src/renderer/src/utils/formats.ts index be0e37ba..d0ea81b5 100644 --- a/src/renderer/src/utils/formats.ts +++ b/src/renderer/src/utils/formats.ts @@ -81,28 +81,60 @@ export function withGeminiGrounding(message: Message) { return content } +interface ThoughtProcessor { + canProcess: (content: string) => boolean + process: (content: string) => { reasoning: string; content: string } +} + +const glmZeroPreviewProcessor: ThoughtProcessor = { + canProcess: (content: string) => content.includes('###Thinking'), + process: (content: string) => { + const parts = content.split('###') + const thinkingMatch = parts.find((part) => part.trim().startsWith('Thinking')) + const responseMatch = parts.find((part) => part.trim().startsWith('Response')) + + return { + reasoning: thinkingMatch ? thinkingMatch.replace('Thinking', '').trim() : '', + content: responseMatch ? responseMatch.replace('Response', '').trim() : '' + } + } +} + +const thinkTagProcessor: ThoughtProcessor = { + canProcess: (content: string) => content.startsWith(''), + process: (content: string) => { + const thinkPattern = /^(.*?)<\/think>/s + const matches = content.match(thinkPattern) + + // 处理正常闭合的 think 标签 + if (matches) { + return { + reasoning: matches[1].trim(), + content: content.replace(thinkPattern, '').trim() + } + } + + // 处理未闭合的 think 标签 + return { + reasoning: content.slice(7).trim(), // 跳过 '' 标签 + content: '' + } + } +} + export function withMessageThought(message: Message) { if (message.role !== 'assistant') { return message } const content = message.content.trim() - const thinkPattern = /^(.*?)<\/think>/s - const matches = content.match(thinkPattern) + const processors: ThoughtProcessor[] = [glmZeroPreviewProcessor, thinkTagProcessor] - if (!matches) { - // 处理未闭合的 think 标签情况 - if (content.startsWith('')) { - message.reasoning_content = content.slice(7) // ''.length === 7 - message.content = '' - } - return message - } - - const reasoning_content = matches[1].trim() - if (reasoning_content) { - message.reasoning_content = reasoning_content - message.content = content.replace(thinkPattern, '').trim() + const processor = processors.find((p) => p.canProcess(content)) + if (processor) { + const { reasoning, content: processedContent } = processor.process(content) + message.reasoning_content = reasoning + message.content = processedContent } return message