diff --git a/src/main/ipc.ts b/src/main/ipc.ts index 75b78978..a2089751 100644 --- a/src/main/ipc.ts +++ b/src/main/ipc.ts @@ -117,6 +117,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) { ipcMain.handle('file:base64Image', fileManager.base64Image) ipcMain.handle('file:download', fileManager.downloadFile) ipcMain.handle('file:copy', fileManager.copyFile) + ipcMain.handle('file:binaryFile', fileManager.binaryFile) // minapp ipcMain.handle('minapp', (_, args) => { diff --git a/src/main/services/FileStorage.ts b/src/main/services/FileStorage.ts index 2ef4af4c..f0c74a63 100644 --- a/src/main/services/FileStorage.ts +++ b/src/main/services/FileStorage.ts @@ -263,6 +263,13 @@ class FileStorage { } } + public binaryFile = async (_: Electron.IpcMainInvokeEvent, id: string): Promise<{ data: Buffer; mime: string }> => { + const filePath = path.join(this.storageDir, id) + const data = await fs.promises.readFile(filePath) + const mime = `image/${path.extname(filePath).slice(1)}` + return { data, mime } + } + public clear = async (): Promise => { await fs.promises.rmdir(this.storageDir, { recursive: true }) await this.initStorageDir() diff --git a/src/preload/index.d.ts b/src/preload/index.d.ts index 26d31362..274402a9 100644 --- a/src/preload/index.d.ts +++ b/src/preload/index.d.ts @@ -53,6 +53,7 @@ declare global { base64Image: (fileId: string) => Promise<{ mime: string; base64: string; data: string }> download: (url: string) => Promise copy: (fileId: string, destPath: string) => Promise + binaryFile: (fileId: string) => Promise<{ data: Buffer; mime: string }> } export: { toWord: (markdown: string, fileName: string) => Promise diff --git a/src/preload/index.ts b/src/preload/index.ts index 8d81918c..25fa8c1f 100644 --- a/src/preload/index.ts +++ b/src/preload/index.ts @@ -43,7 +43,8 @@ const api = { saveImage: (name: string, data: string) => ipcRenderer.invoke('file:saveImage', name, data), base64Image: (fileId: string) => ipcRenderer.invoke('file:base64Image', fileId), download: (url: string) => ipcRenderer.invoke('file:download', url), - copy: (fileId: string, destPath: string) => ipcRenderer.invoke('file:copy', fileId, destPath) + copy: (fileId: string, destPath: string) => ipcRenderer.invoke('file:copy', fileId, destPath), + binaryFile: (fileId: string) => ipcRenderer.invoke('file:binaryFile', fileId) }, export: { toWord: (markdown: string, fileName: string) => ipcRenderer.invoke('export:word', markdown, fileName) diff --git a/src/renderer/src/assets/images/providers/qwenlm.png b/src/renderer/src/assets/images/providers/qwenlm.png new file mode 100644 index 00000000..d207a289 Binary files /dev/null and b/src/renderer/src/assets/images/providers/qwenlm.png differ diff --git a/src/renderer/src/config/models.ts b/src/renderer/src/config/models.ts index f3edbe4b..2fb572e3 100644 --- a/src/renderer/src/config/models.ts +++ b/src/renderer/src/config/models.ts @@ -264,6 +264,56 @@ export function getModelLogo(modelId: string) { } export const SYSTEM_MODELS: Record = { + qwenlm: [ + { + id: 'qwen-plus-latest', + provider: 'qwenlm', + name: 'Qwen2.5-Plus', + group: 'Qwen' + }, + { + id: 'qvq-72b-preview', + provider: 'qwenlm', + name: 'QVQ-72B-Preview', + group: 'Qwen' + }, + { + id: 'qwq-32b-preview', + provider: 'qwenlm', + name: 'QwQ-32B-Preview', + group: 'Qwen' + }, + { + id: 'qwen2.5-coder-32b-instruct', + provider: 'qwenlm', + name: 'Qwen2.5-Coder-32B-Instruct', + group: 'Qwen' + }, + { + id: 'qwen-vl-max-latest', + provider: 'qwenlm', + name: 'Qwen2-VL-Max', + group: 'Qwen' + }, + { + id: 'qwen-turbo-latest', + provider: 'qwenlm', + name: 'Qwen2.5-Turbo', + group: 'Qwen' + }, + { + id: 'qwen2.5-72b-instruct', + provider: 'qwenlm', + name: 'Qwen2.5-72B-Instruct', + group: 'Qwen' + }, + { + id: 'qwen2.5-32b-instruct', + provider: 'qwenlm', + name: 'Qwen2.5-32B-Instruct', + group: 'Qwen' + } + ], aihubmix: [ { id: 'gpt-4o', diff --git a/src/renderer/src/config/providers.ts b/src/renderer/src/config/providers.ts index d9060ab8..460f136c 100644 --- a/src/renderer/src/config/providers.ts +++ b/src/renderer/src/config/providers.ts @@ -28,6 +28,7 @@ import StepProviderLogo from '@renderer/assets/images/providers/step.png' import TogetherProviderLogo from '@renderer/assets/images/providers/together.png' import ZeroOneProviderLogo from '@renderer/assets/images/providers/zero-one.png' import ZhipuProviderLogo from '@renderer/assets/images/providers/zhipu.png' +import QwenLMProviderLogo from '@renderer/assets/images/providers/qwenlm.png' export function getProviderLogo(providerId: string) { switch (providerId) { @@ -91,6 +92,8 @@ export function getProviderLogo(providerId: string) { return MistralProviderLogo case 'jina': return JinaProviderLogo + case 'qwenlm': + return QwenLMProviderLogo default: return undefined } @@ -418,5 +421,16 @@ export const PROVIDER_CONFIG = { docs: 'https://learn.microsoft.com/en-us/azure/ai-services/openai/', models: 'https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models' } + }, + qwenlm: { + api: { + url: 'https://chat.qwenlm.ai/api/' + }, + websites: { + official: 'https://chat.qwenlm.ai', + apiKey: 'https://chat.qwenlm.ai', + docs: 'https://chat.qwenlm.ai', + models: 'https://chat.qwenlm.ai' + } } } diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json index f8ce637e..8b7e31d8 100644 --- a/src/renderer/src/i18n/locales/en-us.json +++ b/src/renderer/src/i18n/locales/en-us.json @@ -326,7 +326,8 @@ "together": "Together", "yi": "Yi", "zhinao": "360AI", - "zhipu": "ZHIPU AI" + "zhipu": "ZHIPU AI", + "qwenlm": "QwenLM" }, "settings": { "about": "About & Feedback", diff --git a/src/renderer/src/i18n/locales/ja-jp.json b/src/renderer/src/i18n/locales/ja-jp.json index 4870b265..d74ef218 100644 --- a/src/renderer/src/i18n/locales/ja-jp.json +++ b/src/renderer/src/i18n/locales/ja-jp.json @@ -324,7 +324,8 @@ "together": "Together", "yi": "零一万物", "zhinao": "360智脳", - "zhipu": "智譜AI" + "zhipu": "智譜AI", + "qwenlm": "QwenLM" }, "settings": { "about": "について", diff --git a/src/renderer/src/i18n/locales/ru-ru.json b/src/renderer/src/i18n/locales/ru-ru.json index 2c39e3f7..1b271787 100644 --- a/src/renderer/src/i18n/locales/ru-ru.json +++ b/src/renderer/src/i18n/locales/ru-ru.json @@ -326,7 +326,8 @@ "together": "Together", "yi": "Yi", "zhinao": "360AI", - "zhipu": "ZHIPU AI" + "zhipu": "ZHIPU AI", + "qwenlm": "QwenLM" }, "settings": { "about": "О программе и обратная связь", diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json index af56a32e..8b3e3fc4 100644 --- a/src/renderer/src/i18n/locales/zh-cn.json +++ b/src/renderer/src/i18n/locales/zh-cn.json @@ -327,7 +327,8 @@ "together": "Together", "yi": "零一万物", "zhinao": "360智脑", - "zhipu": "智谱AI" + "zhipu": "智谱AI", + "qwenlm": "QwenLM" }, "settings": { "about": "关于我们", diff --git a/src/renderer/src/i18n/locales/zh-tw.json b/src/renderer/src/i18n/locales/zh-tw.json index f18260f2..a3eb21ea 100644 --- a/src/renderer/src/i18n/locales/zh-tw.json +++ b/src/renderer/src/i18n/locales/zh-tw.json @@ -326,7 +326,8 @@ "together": "Together", "yi": "零一萬物", "zhinao": "360智腦", - "zhipu": "智譜AI" + "zhipu": "智譜AI", + "qwenlm": "QwenLM" }, "settings": { "about": "關於與回饋", diff --git a/src/renderer/src/providers/OpenAIProvider.ts b/src/renderer/src/providers/OpenAIProvider.ts index 48763df0..a424e766 100644 --- a/src/renderer/src/providers/OpenAIProvider.ts +++ b/src/renderer/src/providers/OpenAIProvider.ts @@ -46,6 +46,33 @@ export default class OpenAIProvider extends BaseProvider { return providers.includes(this.provider.id) } + private async uploadImageToQwenLM(image_file: Buffer, file_name: string, mime: string): Promise { + try { + // 创建 FormData + const formData = new FormData() + formData.append('file', new Blob([image_file], { type: mime }), file_name) + + // 发送上传请求 + const response = await fetch(`${this.provider.apiHost}v1/files/`, { + method: 'POST', + headers: { + Authorization: `Bearer ${this.apiKey}` + }, + body: formData + }) + + if (!response.ok) { + throw new Error('Failed to upload image to QwenLM') + } + + const data = await response.json() + return data.id + } catch (error) { + console.error('Error uploading image to QwenLM:', error) + throw error + } + } + private async getMessageParam( message: Message, model: Model @@ -94,20 +121,48 @@ export default class OpenAIProvider extends BaseProvider { } ] - for (const file of message.files || []) { - if (file.type === FileTypes.IMAGE && isVision) { - const image = await window.api.file.base64Image(file.id + file.ext) - parts.push({ - type: 'image_url', - image_url: { url: image.data } - }) + //QwenLM上传图片 + if (this.provider.id === 'qwenlm') { + let qwenlm_image_url: { type: string; image: string }[] = [] + + for (const file of message.files || []) { + if (file.type === FileTypes.IMAGE && isVision) { + const image = await window.api.file.binaryFile(file.id + file.ext) + + const imageId = await this.uploadImageToQwenLM(image.data, file.origin_name, image.mime) + qwenlm_image_url.push({ + type: 'image', + image: imageId + }) + } + if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) { + const fileContent = await (await window.api.file.read(file.id + file.ext)).trim() + parts.push({ + type: 'text', + text: file.origin_name + '\n' + fileContent + }) + } } - if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) { - const fileContent = await (await window.api.file.read(file.id + file.ext)).trim() - parts.push({ - type: 'text', - text: file.origin_name + '\n' + fileContent - }) + return { + role: message.role, + content: [...parts, ...qwenlm_image_url] + } as ChatCompletionMessageParam + } else { + for (const file of message.files || []) { + if (file.type === FileTypes.IMAGE && isVision) { + const image = await window.api.file.base64Image(file.id + file.ext) + parts.push({ + type: 'image_url', + image_url: { url: image.data } + }) + } + if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) { + const fileContent = await (await window.api.file.read(file.id + file.ext)).trim() + parts.push({ + type: 'text', + text: file.origin_name + '\n' + fileContent + }) + } } } @@ -172,6 +227,40 @@ export default class OpenAIProvider extends BaseProvider { }) } + // 处理QwenLM的流式输出 + if (this.provider.id === 'qwenlm') { + let accumulatedText = '' + for await (const chunk of stream) { + if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) { + break + } + if (time_first_token_millsec == 0) { + time_first_token_millsec = new Date().getTime() - start_time_millsec + } + + // 获取当前块的完整内容 + const currentContent = chunk.choices[0]?.delta?.content || '' + + // 如果内容与累积的内容不同,则只发送增量部分 + if (currentContent !== accumulatedText) { + const deltaText = currentContent.slice(accumulatedText.length) + accumulatedText = currentContent // 更新累积的文本 + + const time_completion_millsec = new Date().getTime() - start_time_millsec + onChunk({ + text: deltaText, + usage: chunk.usage, + metrics: { + completion_tokens: chunk.usage?.completion_tokens, + time_completion_millsec, + time_first_token_millsec + } + }) + } + } + return + } + for await (const chunk of stream) { if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) { break diff --git a/src/renderer/src/store/index.ts b/src/renderer/src/store/index.ts index 6a21f698..a7b10db9 100644 --- a/src/renderer/src/store/index.ts +++ b/src/renderer/src/store/index.ts @@ -30,7 +30,7 @@ const persistedReducer = persistReducer( { key: 'cherry-studio', storage, - version: 55, + version: 56, blacklist: ['runtime'], migrate }, diff --git a/src/renderer/src/store/llm.ts b/src/renderer/src/store/llm.ts index 384cd36c..906bdda2 100644 --- a/src/renderer/src/store/llm.ts +++ b/src/renderer/src/store/llm.ts @@ -323,6 +323,16 @@ const initialState: LlmState = { models: SYSTEM_MODELS.jina, isSystem: true, enabled: false + }, + { + id: 'qwenlm', + name: 'QwenLM', + type: 'openai', + apiKey: '', + apiHost: 'https://chat.qwenlm.ai/api/', + models: SYSTEM_MODELS.qwenlm, + isSystem: true, + enabled: false } ], settings: { diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index c0e47a4d..e57f2d4e 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -799,6 +799,19 @@ const migrateConfig = { } } return state + }, + '56': (state: RootState) => { + state.llm.providers.push({ + id: 'qwenlm', + name: 'QwenLM', + type: 'openai', + apiKey: '', + apiHost: 'https://chat.qwenlm.ai/api/', + models: SYSTEM_MODELS.qwenlm, + isSystem: true, + enabled: false + }) + return state } }