feat: add support for qwenlm and image upload (#726)
* feat: add support for qwenlm and image upload * fix: qwenlm return * feat: add provider config
This commit is contained in:
parent
d388aeecfb
commit
bc454d4dec
@ -117,6 +117,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
|||||||
ipcMain.handle('file:base64Image', fileManager.base64Image)
|
ipcMain.handle('file:base64Image', fileManager.base64Image)
|
||||||
ipcMain.handle('file:download', fileManager.downloadFile)
|
ipcMain.handle('file:download', fileManager.downloadFile)
|
||||||
ipcMain.handle('file:copy', fileManager.copyFile)
|
ipcMain.handle('file:copy', fileManager.copyFile)
|
||||||
|
ipcMain.handle('file:binaryFile', fileManager.binaryFile)
|
||||||
|
|
||||||
// minapp
|
// minapp
|
||||||
ipcMain.handle('minapp', (_, args) => {
|
ipcMain.handle('minapp', (_, args) => {
|
||||||
|
|||||||
@ -263,6 +263,13 @@ class FileStorage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public binaryFile = async (_: Electron.IpcMainInvokeEvent, id: string): Promise<{ data: Buffer; mime: string }> => {
|
||||||
|
const filePath = path.join(this.storageDir, id)
|
||||||
|
const data = await fs.promises.readFile(filePath)
|
||||||
|
const mime = `image/${path.extname(filePath).slice(1)}`
|
||||||
|
return { data, mime }
|
||||||
|
}
|
||||||
|
|
||||||
public clear = async (): Promise<void> => {
|
public clear = async (): Promise<void> => {
|
||||||
await fs.promises.rmdir(this.storageDir, { recursive: true })
|
await fs.promises.rmdir(this.storageDir, { recursive: true })
|
||||||
await this.initStorageDir()
|
await this.initStorageDir()
|
||||||
|
|||||||
1
src/preload/index.d.ts
vendored
1
src/preload/index.d.ts
vendored
@ -53,6 +53,7 @@ declare global {
|
|||||||
base64Image: (fileId: string) => Promise<{ mime: string; base64: string; data: string }>
|
base64Image: (fileId: string) => Promise<{ mime: string; base64: string; data: string }>
|
||||||
download: (url: string) => Promise<FileType | null>
|
download: (url: string) => Promise<FileType | null>
|
||||||
copy: (fileId: string, destPath: string) => Promise<void>
|
copy: (fileId: string, destPath: string) => Promise<void>
|
||||||
|
binaryFile: (fileId: string) => Promise<{ data: Buffer; mime: string }>
|
||||||
}
|
}
|
||||||
export: {
|
export: {
|
||||||
toWord: (markdown: string, fileName: string) => Promise<void>
|
toWord: (markdown: string, fileName: string) => Promise<void>
|
||||||
|
|||||||
@ -43,7 +43,8 @@ const api = {
|
|||||||
saveImage: (name: string, data: string) => ipcRenderer.invoke('file:saveImage', name, data),
|
saveImage: (name: string, data: string) => ipcRenderer.invoke('file:saveImage', name, data),
|
||||||
base64Image: (fileId: string) => ipcRenderer.invoke('file:base64Image', fileId),
|
base64Image: (fileId: string) => ipcRenderer.invoke('file:base64Image', fileId),
|
||||||
download: (url: string) => ipcRenderer.invoke('file:download', url),
|
download: (url: string) => ipcRenderer.invoke('file:download', url),
|
||||||
copy: (fileId: string, destPath: string) => ipcRenderer.invoke('file:copy', fileId, destPath)
|
copy: (fileId: string, destPath: string) => ipcRenderer.invoke('file:copy', fileId, destPath),
|
||||||
|
binaryFile: (fileId: string) => ipcRenderer.invoke('file:binaryFile', fileId)
|
||||||
},
|
},
|
||||||
export: {
|
export: {
|
||||||
toWord: (markdown: string, fileName: string) => ipcRenderer.invoke('export:word', markdown, fileName)
|
toWord: (markdown: string, fileName: string) => ipcRenderer.invoke('export:word', markdown, fileName)
|
||||||
|
|||||||
BIN
src/renderer/src/assets/images/providers/qwenlm.png
Normal file
BIN
src/renderer/src/assets/images/providers/qwenlm.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 81 KiB |
@ -264,6 +264,56 @@ export function getModelLogo(modelId: string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const SYSTEM_MODELS: Record<string, Model[]> = {
|
export const SYSTEM_MODELS: Record<string, Model[]> = {
|
||||||
|
qwenlm: [
|
||||||
|
{
|
||||||
|
id: 'qwen-plus-latest',
|
||||||
|
provider: 'qwenlm',
|
||||||
|
name: 'Qwen2.5-Plus',
|
||||||
|
group: 'Qwen'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qvq-72b-preview',
|
||||||
|
provider: 'qwenlm',
|
||||||
|
name: 'QVQ-72B-Preview',
|
||||||
|
group: 'Qwen'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwq-32b-preview',
|
||||||
|
provider: 'qwenlm',
|
||||||
|
name: 'QwQ-32B-Preview',
|
||||||
|
group: 'Qwen'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen2.5-coder-32b-instruct',
|
||||||
|
provider: 'qwenlm',
|
||||||
|
name: 'Qwen2.5-Coder-32B-Instruct',
|
||||||
|
group: 'Qwen'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen-vl-max-latest',
|
||||||
|
provider: 'qwenlm',
|
||||||
|
name: 'Qwen2-VL-Max',
|
||||||
|
group: 'Qwen'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen-turbo-latest',
|
||||||
|
provider: 'qwenlm',
|
||||||
|
name: 'Qwen2.5-Turbo',
|
||||||
|
group: 'Qwen'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen2.5-72b-instruct',
|
||||||
|
provider: 'qwenlm',
|
||||||
|
name: 'Qwen2.5-72B-Instruct',
|
||||||
|
group: 'Qwen'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwen2.5-32b-instruct',
|
||||||
|
provider: 'qwenlm',
|
||||||
|
name: 'Qwen2.5-32B-Instruct',
|
||||||
|
group: 'Qwen'
|
||||||
|
}
|
||||||
|
],
|
||||||
aihubmix: [
|
aihubmix: [
|
||||||
{
|
{
|
||||||
id: 'gpt-4o',
|
id: 'gpt-4o',
|
||||||
|
|||||||
@ -28,6 +28,7 @@ import StepProviderLogo from '@renderer/assets/images/providers/step.png'
|
|||||||
import TogetherProviderLogo from '@renderer/assets/images/providers/together.png'
|
import TogetherProviderLogo from '@renderer/assets/images/providers/together.png'
|
||||||
import ZeroOneProviderLogo from '@renderer/assets/images/providers/zero-one.png'
|
import ZeroOneProviderLogo from '@renderer/assets/images/providers/zero-one.png'
|
||||||
import ZhipuProviderLogo from '@renderer/assets/images/providers/zhipu.png'
|
import ZhipuProviderLogo from '@renderer/assets/images/providers/zhipu.png'
|
||||||
|
import QwenLMProviderLogo from '@renderer/assets/images/providers/qwenlm.png'
|
||||||
|
|
||||||
export function getProviderLogo(providerId: string) {
|
export function getProviderLogo(providerId: string) {
|
||||||
switch (providerId) {
|
switch (providerId) {
|
||||||
@ -91,6 +92,8 @@ export function getProviderLogo(providerId: string) {
|
|||||||
return MistralProviderLogo
|
return MistralProviderLogo
|
||||||
case 'jina':
|
case 'jina':
|
||||||
return JinaProviderLogo
|
return JinaProviderLogo
|
||||||
|
case 'qwenlm':
|
||||||
|
return QwenLMProviderLogo
|
||||||
default:
|
default:
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
@ -418,5 +421,16 @@ export const PROVIDER_CONFIG = {
|
|||||||
docs: 'https://learn.microsoft.com/en-us/azure/ai-services/openai/',
|
docs: 'https://learn.microsoft.com/en-us/azure/ai-services/openai/',
|
||||||
models: 'https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models'
|
models: 'https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models'
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
qwenlm: {
|
||||||
|
api: {
|
||||||
|
url: 'https://chat.qwenlm.ai/api/'
|
||||||
|
},
|
||||||
|
websites: {
|
||||||
|
official: 'https://chat.qwenlm.ai',
|
||||||
|
apiKey: 'https://chat.qwenlm.ai',
|
||||||
|
docs: 'https://chat.qwenlm.ai',
|
||||||
|
models: 'https://chat.qwenlm.ai'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -326,7 +326,8 @@
|
|||||||
"together": "Together",
|
"together": "Together",
|
||||||
"yi": "Yi",
|
"yi": "Yi",
|
||||||
"zhinao": "360AI",
|
"zhinao": "360AI",
|
||||||
"zhipu": "ZHIPU AI"
|
"zhipu": "ZHIPU AI",
|
||||||
|
"qwenlm": "QwenLM"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"about": "About & Feedback",
|
"about": "About & Feedback",
|
||||||
|
|||||||
@ -324,7 +324,8 @@
|
|||||||
"together": "Together",
|
"together": "Together",
|
||||||
"yi": "零一万物",
|
"yi": "零一万物",
|
||||||
"zhinao": "360智脳",
|
"zhinao": "360智脳",
|
||||||
"zhipu": "智譜AI"
|
"zhipu": "智譜AI",
|
||||||
|
"qwenlm": "QwenLM"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"about": "について",
|
"about": "について",
|
||||||
|
|||||||
@ -326,7 +326,8 @@
|
|||||||
"together": "Together",
|
"together": "Together",
|
||||||
"yi": "Yi",
|
"yi": "Yi",
|
||||||
"zhinao": "360AI",
|
"zhinao": "360AI",
|
||||||
"zhipu": "ZHIPU AI"
|
"zhipu": "ZHIPU AI",
|
||||||
|
"qwenlm": "QwenLM"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"about": "О программе и обратная связь",
|
"about": "О программе и обратная связь",
|
||||||
|
|||||||
@ -327,7 +327,8 @@
|
|||||||
"together": "Together",
|
"together": "Together",
|
||||||
"yi": "零一万物",
|
"yi": "零一万物",
|
||||||
"zhinao": "360智脑",
|
"zhinao": "360智脑",
|
||||||
"zhipu": "智谱AI"
|
"zhipu": "智谱AI",
|
||||||
|
"qwenlm": "QwenLM"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"about": "关于我们",
|
"about": "关于我们",
|
||||||
|
|||||||
@ -326,7 +326,8 @@
|
|||||||
"together": "Together",
|
"together": "Together",
|
||||||
"yi": "零一萬物",
|
"yi": "零一萬物",
|
||||||
"zhinao": "360智腦",
|
"zhinao": "360智腦",
|
||||||
"zhipu": "智譜AI"
|
"zhipu": "智譜AI",
|
||||||
|
"qwenlm": "QwenLM"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"about": "關於與回饋",
|
"about": "關於與回饋",
|
||||||
|
|||||||
@ -46,6 +46,33 @@ export default class OpenAIProvider extends BaseProvider {
|
|||||||
return providers.includes(this.provider.id)
|
return providers.includes(this.provider.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private async uploadImageToQwenLM(image_file: Buffer, file_name: string, mime: string): Promise<string> {
|
||||||
|
try {
|
||||||
|
// 创建 FormData
|
||||||
|
const formData = new FormData()
|
||||||
|
formData.append('file', new Blob([image_file], { type: mime }), file_name)
|
||||||
|
|
||||||
|
// 发送上传请求
|
||||||
|
const response = await fetch(`${this.provider.apiHost}v1/files/`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${this.apiKey}`
|
||||||
|
},
|
||||||
|
body: formData
|
||||||
|
})
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error('Failed to upload image to QwenLM')
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json()
|
||||||
|
return data.id
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error uploading image to QwenLM:', error)
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private async getMessageParam(
|
private async getMessageParam(
|
||||||
message: Message,
|
message: Message,
|
||||||
model: Model
|
model: Model
|
||||||
@ -94,20 +121,48 @@ export default class OpenAIProvider extends BaseProvider {
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
for (const file of message.files || []) {
|
//QwenLM上传图片
|
||||||
if (file.type === FileTypes.IMAGE && isVision) {
|
if (this.provider.id === 'qwenlm') {
|
||||||
const image = await window.api.file.base64Image(file.id + file.ext)
|
let qwenlm_image_url: { type: string; image: string }[] = []
|
||||||
parts.push({
|
|
||||||
type: 'image_url',
|
for (const file of message.files || []) {
|
||||||
image_url: { url: image.data }
|
if (file.type === FileTypes.IMAGE && isVision) {
|
||||||
})
|
const image = await window.api.file.binaryFile(file.id + file.ext)
|
||||||
|
|
||||||
|
const imageId = await this.uploadImageToQwenLM(image.data, file.origin_name, image.mime)
|
||||||
|
qwenlm_image_url.push({
|
||||||
|
type: 'image',
|
||||||
|
image: imageId
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) {
|
||||||
|
const fileContent = await (await window.api.file.read(file.id + file.ext)).trim()
|
||||||
|
parts.push({
|
||||||
|
type: 'text',
|
||||||
|
text: file.origin_name + '\n' + fileContent
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) {
|
return {
|
||||||
const fileContent = await (await window.api.file.read(file.id + file.ext)).trim()
|
role: message.role,
|
||||||
parts.push({
|
content: [...parts, ...qwenlm_image_url]
|
||||||
type: 'text',
|
} as ChatCompletionMessageParam
|
||||||
text: file.origin_name + '\n' + fileContent
|
} else {
|
||||||
})
|
for (const file of message.files || []) {
|
||||||
|
if (file.type === FileTypes.IMAGE && isVision) {
|
||||||
|
const image = await window.api.file.base64Image(file.id + file.ext)
|
||||||
|
parts.push({
|
||||||
|
type: 'image_url',
|
||||||
|
image_url: { url: image.data }
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) {
|
||||||
|
const fileContent = await (await window.api.file.read(file.id + file.ext)).trim()
|
||||||
|
parts.push({
|
||||||
|
type: 'text',
|
||||||
|
text: file.origin_name + '\n' + fileContent
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,6 +227,40 @@ export default class OpenAIProvider extends BaseProvider {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 处理QwenLM的流式输出
|
||||||
|
if (this.provider.id === 'qwenlm') {
|
||||||
|
let accumulatedText = ''
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if (time_first_token_millsec == 0) {
|
||||||
|
time_first_token_millsec = new Date().getTime() - start_time_millsec
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取当前块的完整内容
|
||||||
|
const currentContent = chunk.choices[0]?.delta?.content || ''
|
||||||
|
|
||||||
|
// 如果内容与累积的内容不同,则只发送增量部分
|
||||||
|
if (currentContent !== accumulatedText) {
|
||||||
|
const deltaText = currentContent.slice(accumulatedText.length)
|
||||||
|
accumulatedText = currentContent // 更新累积的文本
|
||||||
|
|
||||||
|
const time_completion_millsec = new Date().getTime() - start_time_millsec
|
||||||
|
onChunk({
|
||||||
|
text: deltaText,
|
||||||
|
usage: chunk.usage,
|
||||||
|
metrics: {
|
||||||
|
completion_tokens: chunk.usage?.completion_tokens,
|
||||||
|
time_completion_millsec,
|
||||||
|
time_first_token_millsec
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
for await (const chunk of stream) {
|
for await (const chunk of stream) {
|
||||||
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
|
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
|
||||||
break
|
break
|
||||||
|
|||||||
@ -30,7 +30,7 @@ const persistedReducer = persistReducer(
|
|||||||
{
|
{
|
||||||
key: 'cherry-studio',
|
key: 'cherry-studio',
|
||||||
storage,
|
storage,
|
||||||
version: 55,
|
version: 56,
|
||||||
blacklist: ['runtime'],
|
blacklist: ['runtime'],
|
||||||
migrate
|
migrate
|
||||||
},
|
},
|
||||||
|
|||||||
@ -323,6 +323,16 @@ const initialState: LlmState = {
|
|||||||
models: SYSTEM_MODELS.jina,
|
models: SYSTEM_MODELS.jina,
|
||||||
isSystem: true,
|
isSystem: true,
|
||||||
enabled: false
|
enabled: false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'qwenlm',
|
||||||
|
name: 'QwenLM',
|
||||||
|
type: 'openai',
|
||||||
|
apiKey: '',
|
||||||
|
apiHost: 'https://chat.qwenlm.ai/api/',
|
||||||
|
models: SYSTEM_MODELS.qwenlm,
|
||||||
|
isSystem: true,
|
||||||
|
enabled: false
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
settings: {
|
settings: {
|
||||||
|
|||||||
@ -799,6 +799,19 @@ const migrateConfig = {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return state
|
return state
|
||||||
|
},
|
||||||
|
'56': (state: RootState) => {
|
||||||
|
state.llm.providers.push({
|
||||||
|
id: 'qwenlm',
|
||||||
|
name: 'QwenLM',
|
||||||
|
type: 'openai',
|
||||||
|
apiKey: '',
|
||||||
|
apiHost: 'https://chat.qwenlm.ai/api/',
|
||||||
|
models: SYSTEM_MODELS.qwenlm,
|
||||||
|
isSystem: true,
|
||||||
|
enabled: false
|
||||||
|
})
|
||||||
|
return state
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user