From cec5eb39891c17a590499dbf0e38b05259cacab5 Mon Sep 17 00:00:00 2001 From: kangfenmao Date: Fri, 7 Feb 2025 12:38:12 +0800 Subject: [PATCH] fix: remove QwenLM provider #1122 close #1122 close #886 --- src/renderer/src/config/models.ts | 50 ------ src/renderer/src/config/providers.ts | 14 -- src/renderer/src/providers/ProviderFactory.ts | 3 - src/renderer/src/providers/QwenLMProvider.ts | 160 ------------------ src/renderer/src/store/index.ts | 2 +- src/renderer/src/store/llm.ts | 10 -- src/renderer/src/store/migrate.ts | 4 + 7 files changed, 5 insertions(+), 238 deletions(-) delete mode 100644 src/renderer/src/providers/QwenLMProvider.ts diff --git a/src/renderer/src/config/models.ts b/src/renderer/src/config/models.ts index 6708fd83..23fb99e3 100644 --- a/src/renderer/src/config/models.ts +++ b/src/renderer/src/config/models.ts @@ -274,56 +274,6 @@ export function getModelLogo(modelId: string) { } export const SYSTEM_MODELS: Record = { - qwenlm: [ - { - id: 'qwen-plus-latest', - provider: 'qwenlm', - name: 'Qwen2.5-Plus', - group: 'Qwen 2.5' - }, - { - id: 'qvq-72b-preview', - provider: 'qwenlm', - name: 'QVQ-72B-Preview', - group: 'QVQ' - }, - { - id: 'qwq-32b-preview', - provider: 'qwenlm', - name: 'QwQ-32B-Preview', - group: 'QVQ' - }, - { - id: 'qwen2.5-coder-32b-instruct', - provider: 'qwenlm', - name: 'Qwen2.5-Coder-32B-Instruct', - group: 'Qwen 2.5' - }, - { - id: 'qwen-vl-max-latest', - provider: 'qwenlm', - name: 'Qwen2-VL-Max', - group: 'Qwen 2' - }, - { - id: 'qwen-turbo-latest', - provider: 'qwenlm', - name: 'Qwen2.5-Turbo', - group: 'Qwen 2.5' - }, - { - id: 'qwen2.5-72b-instruct', - provider: 'qwenlm', - name: 'Qwen2.5-72B-Instruct', - group: 'Qwen 2.5' - }, - { - id: 'qwen2.5-32b-instruct', - provider: 'qwenlm', - name: 'Qwen2.5-32B-Instruct', - group: 'Qwen 2.5' - } - ], aihubmix: [ { id: 'gpt-4o', diff --git a/src/renderer/src/config/providers.ts b/src/renderer/src/config/providers.ts index 37719919..03455bf6 100644 --- a/src/renderer/src/config/providers.ts +++ b/src/renderer/src/config/providers.ts @@ -23,7 +23,6 @@ import OcoolAiProviderLogo from '@renderer/assets/images/providers/ocoolai.png' import OllamaProviderLogo from '@renderer/assets/images/providers/ollama.png' import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.png' import OpenRouterProviderLogo from '@renderer/assets/images/providers/openrouter.png' -import QwenLMProviderLogo from '@renderer/assets/images/providers/qwenlm.png' import SiliconFlowProviderLogo from '@renderer/assets/images/providers/silicon.png' import StepProviderLogo from '@renderer/assets/images/providers/step.png' import TogetherProviderLogo from '@renderer/assets/images/providers/together.png' @@ -92,8 +91,6 @@ export function getProviderLogo(providerId: string) { return MistralProviderLogo case 'jina': return JinaProviderLogo - case 'qwenlm': - return QwenLMProviderLogo default: return undefined } @@ -421,16 +418,5 @@ export const PROVIDER_CONFIG = { docs: 'https://learn.microsoft.com/en-us/azure/ai-services/openai/', models: 'https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models' } - }, - qwenlm: { - api: { - url: 'https://chat.qwenlm.ai/api/' - }, - websites: { - official: 'https://chat.qwenlm.ai', - apiKey: 'https://chat.qwenlm.ai', - docs: 'https://chat.qwenlm.ai', - models: 'https://chat.qwenlm.ai' - } } } diff --git a/src/renderer/src/providers/ProviderFactory.ts b/src/renderer/src/providers/ProviderFactory.ts index ade11c0d..c730020a 100644 --- a/src/renderer/src/providers/ProviderFactory.ts +++ b/src/renderer/src/providers/ProviderFactory.ts @@ -4,7 +4,6 @@ import AnthropicProvider from './AnthropicProvider' import BaseProvider from './BaseProvider' import GeminiProvider from './GeminiProvider' import OpenAIProvider from './OpenAIProvider' -import QwenLMProvider from './QwenLMProvider' export default class ProviderFactory { static create(provider: Provider): BaseProvider { @@ -13,8 +12,6 @@ export default class ProviderFactory { return new AnthropicProvider(provider) case 'gemini': return new GeminiProvider(provider) - case 'qwenlm': - return new QwenLMProvider(provider) default: return new OpenAIProvider(provider) } diff --git a/src/renderer/src/providers/QwenLMProvider.ts b/src/renderer/src/providers/QwenLMProvider.ts deleted file mode 100644 index a313416e..00000000 --- a/src/renderer/src/providers/QwenLMProvider.ts +++ /dev/null @@ -1,160 +0,0 @@ -import { getOpenAIWebSearchParams, isVisionModel } from '@renderer/config/models' -import { getAssistantSettings, getDefaultModel } from '@renderer/services/AssistantService' -import { EVENT_NAMES } from '@renderer/services/EventService' -import { filterContextMessages } from '@renderer/services/MessagesService' -import { FileTypes, Message, Model, Provider } from '@renderer/types' -import { takeRight } from 'lodash' -import OpenAI from 'openai' -import { ChatCompletionContentPart, ChatCompletionMessageParam } from 'openai/resources' - -import { CompletionsParams } from '.' -import OpenAIProvider from './OpenAIProvider' - -class QwenLMProvider extends OpenAIProvider { - constructor(provider: Provider) { - super(provider) - } - - private async getMessageParams( - message: Message, - model: Model - ): Promise { - const isVision = isVisionModel(model) - const content = await this.getMessageContent(message) - - if (!message.files) { - return { - role: message.role, - content - } - } - - const parts: ChatCompletionContentPart[] = [ - { - type: 'text', - text: content - } - ] - - const qwenlm_image_url: { type: string; image: string }[] = [] - - for (const file of message.files || []) { - if (file.type === FileTypes.IMAGE && isVision) { - const image = await window.api.file.binaryFile(file.id + file.ext) - - const imageId = await this.uploadImageToQwenLM(image.data, file.origin_name, image.mime) - qwenlm_image_url.push({ - type: 'image', - image: imageId - }) - } - if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) { - const fileContent = await (await window.api.file.read(file.id + file.ext)).trim() - parts.push({ - type: 'text', - text: file.origin_name + '\n' + fileContent - }) - } - } - - return { - role: message.role, - content: [...parts, ...qwenlm_image_url] - } as ChatCompletionMessageParam - } - - private async uploadImageToQwenLM(image_file: Buffer, file_name: string, mime: string): Promise { - try { - // 创建 FormData - const formData = new FormData() - formData.append('file', new Blob([image_file], { type: mime }), file_name) - - // 发送上传请求 - const response = await fetch(`${this.provider.apiHost}v1/files/`, { - method: 'POST', - headers: { - Authorization: `Bearer ${this.apiKey}` - }, - body: formData - }) - - if (!response.ok) { - throw new Error('Failed to upload image to QwenLM') - } - - const data = await response.json() - return data.id - } catch (error) { - console.error('Error uploading image to QwenLM:', error) - throw error - } - } - - async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams): Promise { - const defaultModel = getDefaultModel() - const model = assistant.model || defaultModel - const { contextCount, maxTokens } = getAssistantSettings(assistant) - - const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined - const userMessages: ChatCompletionMessageParam[] = [] - - const _messages = filterContextMessages(takeRight(messages, contextCount + 1)) - onFilterMessages(_messages) - - if (_messages[0]?.role !== 'user') { - userMessages.push({ role: 'user', content: '' }) - } - - for (const message of _messages) { - userMessages.push(await this.getMessageParams(message, model)) - } - - let time_first_token_millsec = 0 - const start_time_millsec = new Date().getTime() - - // @ts-ignore key is not typed - const stream = await this.sdk.chat.completions.create({ - model: model.id, - messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[], - temperature: assistant?.settings?.temperature, - top_p: assistant?.settings?.topP, - max_tokens: maxTokens, - stream: true, - ...getOpenAIWebSearchParams(assistant, model), - ...this.getCustomParameters(assistant) - }) - - let accumulatedText = '' - - for await (const chunk of stream) { - if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) { - break - } - if (time_first_token_millsec == 0) { - time_first_token_millsec = new Date().getTime() - start_time_millsec - } - - // 获取当前块的完整内容 - const currentContent = chunk.choices[0]?.delta?.content || '' - - // 如果内容与累积的内容不同,则只发送增量部分 - if (currentContent !== accumulatedText) { - const deltaText = currentContent.slice(accumulatedText.length) - accumulatedText = currentContent // 更新累积的文本 - - const time_completion_millsec = new Date().getTime() - start_time_millsec - onChunk({ - text: deltaText, - usage: chunk.usage, - metrics: { - completion_tokens: chunk.usage?.completion_tokens, - time_completion_millsec, - time_first_token_millsec - } - }) - } - } - } -} - -export default QwenLMProvider diff --git a/src/renderer/src/store/index.ts b/src/renderer/src/store/index.ts index 35891cc0..8a72bb19 100644 --- a/src/renderer/src/store/index.ts +++ b/src/renderer/src/store/index.ts @@ -30,7 +30,7 @@ const persistedReducer = persistReducer( { key: 'cherry-studio', storage, - version: 63, + version: 64, blacklist: ['runtime'], migrate }, diff --git a/src/renderer/src/store/llm.ts b/src/renderer/src/store/llm.ts index b912ad86..cf8fe936 100644 --- a/src/renderer/src/store/llm.ts +++ b/src/renderer/src/store/llm.ts @@ -323,16 +323,6 @@ const initialState: LlmState = { models: SYSTEM_MODELS.jina, isSystem: true, enabled: false - }, - { - id: 'qwenlm', - name: 'QwenLM', - type: 'openai', - apiKey: '', - apiHost: 'https://chat.qwenlm.ai/api/', - models: SYSTEM_MODELS.qwenlm, - isSystem: true, - enabled: false } ], settings: { diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 4b72e409..3d2b38ff 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -907,6 +907,10 @@ const migrateConfig = { } } return state + }, + '64': (state: RootState) => { + state.llm.providers = state.llm.providers.filter((provider) => provider.id !== 'qwenlm') + return state } }