refactor: add qwenlm provider

This commit is contained in:
kangfenmao 2025-01-19 15:35:31 +08:00
parent fd7132cd3a
commit 67b63ee07a
8 changed files with 202 additions and 132 deletions

View File

@ -54,6 +54,7 @@ const PopupContainer: React.FC<Props> = ({ app, resolve }) => {
const newPinned = isPinned ? pinned.filter((item) => item.id !== app.id) : [...pinned, app] const newPinned = isPinned ? pinned.filter((item) => item.id !== app.id) : [...pinned, app]
updatePinnedMinapps(newPinned) updatePinnedMinapps(newPinned)
} }
const Title = () => { const Title = () => {
return ( return (
<TitleContainer style={{ justifyContent: 'space-between' }}> <TitleContainer style={{ justifyContent: 'space-between' }}>
@ -63,7 +64,7 @@ const PopupContainer: React.FC<Props> = ({ app, resolve }) => {
<ReloadOutlined /> <ReloadOutlined />
</Button> </Button>
<Button onClick={onTogglePin} className={isPinned ? 'pinned' : ''}> <Button onClick={onTogglePin} className={isPinned ? 'pinned' : ''}>
<PushpinOutlined /> <PushpinOutlined style={{ fontSize: 16 }} />
</Button> </Button>
{canOpenExternalLink && ( {canOpenExternalLink && (
<Button onClick={onOpenLink}> <Button onClick={onOpenLink}>

View File

@ -1,38 +1,38 @@
import BaiduAiAppLogo from '@renderer/assets/images/apps/baidu-ai.png' import BaiduAiAppLogo from '@renderer/assets/images/apps/baidu-ai.png?url'
import BaicuanAppLogo from '@renderer/assets/images/apps/baixiaoying.webp' import BaicuanAppLogo from '@renderer/assets/images/apps/baixiaoying.webp?url'
import BoltAppLogo from '@renderer/assets/images/apps/bolt.svg' import BoltAppLogo from '@renderer/assets/images/apps/bolt.svg?url'
import DevvAppLogo from '@renderer/assets/images/apps/devv.png' import DevvAppLogo from '@renderer/assets/images/apps/devv.png?url'
import DoubaoAppLogo from '@renderer/assets/images/apps/doubao.png' import DoubaoAppLogo from '@renderer/assets/images/apps/doubao.png?url'
import DuckDuckGoAppLogo from '@renderer/assets/images/apps/duckduckgo.webp' import DuckDuckGoAppLogo from '@renderer/assets/images/apps/duckduckgo.webp?url'
import FeloAppLogo from '@renderer/assets/images/apps/felo.png' import FeloAppLogo from '@renderer/assets/images/apps/felo.png?url'
import GeminiAppLogo from '@renderer/assets/images/apps/gemini.png' import GeminiAppLogo from '@renderer/assets/images/apps/gemini.png?url'
import GensparkLogo from '@renderer/assets/images/apps/genspark.jpg' import GensparkLogo from '@renderer/assets/images/apps/genspark.jpg?url'
import GithubCopilotLogo from '@renderer/assets/images/apps/github-copilot.webp' import GithubCopilotLogo from '@renderer/assets/images/apps/github-copilot.webp?url'
import GrokAppLogo from '@renderer/assets/images/apps/grok.png' import GrokAppLogo from '@renderer/assets/images/apps/grok.png?url'
import HikaLogo from '@renderer/assets/images/apps/hika.webp' import HikaLogo from '@renderer/assets/images/apps/hika.webp?url'
import HuggingChatLogo from '@renderer/assets/images/apps/huggingchat.svg' import HuggingChatLogo from '@renderer/assets/images/apps/huggingchat.svg?url'
import KimiAppLogo from '@renderer/assets/images/apps/kimi.jpg' import KimiAppLogo from '@renderer/assets/images/apps/kimi.jpg?url'
import MetasoAppLogo from '@renderer/assets/images/apps/metaso.webp' import MetasoAppLogo from '@renderer/assets/images/apps/metaso.webp?url'
import NamiAiSearchLogo from '@renderer/assets/images/apps/nm.webp' import NamiAiSearchLogo from '@renderer/assets/images/apps/nm.webp?url'
import PerplexityAppLogo from '@renderer/assets/images/apps/perplexity.webp' import PerplexityAppLogo from '@renderer/assets/images/apps/perplexity.webp?url'
import PoeAppLogo from '@renderer/assets/images/apps/poe.webp' import PoeAppLogo from '@renderer/assets/images/apps/poe.webp?url'
import ZhipuProviderLogo from '@renderer/assets/images/apps/qingyan.png' import ZhipuProviderLogo from '@renderer/assets/images/apps/qingyan.png?url'
import QwenlmAppLogo from '@renderer/assets/images/apps/qwenlm.webp' import QwenlmAppLogo from '@renderer/assets/images/apps/qwenlm.webp?url'
import SensetimeAppLogo from '@renderer/assets/images/apps/sensetime.png' import SensetimeAppLogo from '@renderer/assets/images/apps/sensetime.png?url'
import SparkDeskAppLogo from '@renderer/assets/images/apps/sparkdesk.png' import SparkDeskAppLogo from '@renderer/assets/images/apps/sparkdesk.png?url'
import ThinkAnyLogo from '@renderer/assets/images/apps/thinkany.webp' import ThinkAnyLogo from '@renderer/assets/images/apps/thinkany.webp?url'
import TiangongAiLogo from '@renderer/assets/images/apps/tiangong.png' import TiangongAiLogo from '@renderer/assets/images/apps/tiangong.png?url'
import WanZhiAppLogo from '@renderer/assets/images/apps/wanzhi.jpg' import WanZhiAppLogo from '@renderer/assets/images/apps/wanzhi.jpg?url'
import TencentYuanbaoAppLogo from '@renderer/assets/images/apps/yuanbao.png' import TencentYuanbaoAppLogo from '@renderer/assets/images/apps/yuanbao.png?url'
import YuewenAppLogo from '@renderer/assets/images/apps/yuewen.png' import YuewenAppLogo from '@renderer/assets/images/apps/yuewen.png?url'
import ZhihuAppLogo from '@renderer/assets/images/apps/zhihu.png' import ZhihuAppLogo from '@renderer/assets/images/apps/zhihu.png?url'
import ClaudeAppLogo from '@renderer/assets/images/models/claude.png' import ClaudeAppLogo from '@renderer/assets/images/models/claude.png?url'
import HailuoModelLogo from '@renderer/assets/images/models/hailuo.png' import HailuoModelLogo from '@renderer/assets/images/models/hailuo.png?url'
import QwenModelLogo from '@renderer/assets/images/models/qwen.png' import QwenModelLogo from '@renderer/assets/images/models/qwen.png?url'
import DeepSeekProviderLogo from '@renderer/assets/images/providers/deepseek.png' import DeepSeekProviderLogo from '@renderer/assets/images/providers/deepseek.png?url'
import GroqProviderLogo from '@renderer/assets/images/providers/groq.png' import GroqProviderLogo from '@renderer/assets/images/providers/groq.png?url'
import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.png' import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.png?url'
import SiliconFlowProviderLogo from '@renderer/assets/images/providers/silicon.png' import SiliconFlowProviderLogo from '@renderer/assets/images/providers/silicon.png?url'
import MinApp from '@renderer/components/MinApp' import MinApp from '@renderer/components/MinApp'
import { MinAppType } from '@renderer/types' import { MinAppType } from '@renderer/types'

View File

@ -30,6 +30,7 @@ const App: FC<Props> = ({ app, onClick, size = 60 }) => {
key: 'togglePin', key: 'togglePin',
label: isPinned ? t('minapp.sidebar.remove.title') : t('minapp.sidebar.add.title'), label: isPinned ? t('minapp.sidebar.remove.title') : t('minapp.sidebar.add.title'),
onClick: () => { onClick: () => {
console.debug('togglePin', app)
const newPinned = isPinned ? pinned.filter((item) => item.id !== app.id) : [...(pinned || []), app] const newPinned = isPinned ? pinned.filter((item) => item.id !== app.id) : [...(pinned || []), app]
updatePinnedMinapps(newPinned) updatePinnedMinapps(newPinned)
} }

View File

@ -15,8 +15,6 @@ const AppsPage: FC = () => {
const [search, setSearch] = useState('') const [search, setSearch] = useState('')
const { minapps } = useMinapps() const { minapps } = useMinapps()
console.debug('minapps', minapps)
const filteredApps = search const filteredApps = search
? minapps.filter( ? minapps.filter(
(app) => app.name.toLowerCase().includes(search.toLowerCase()) || app.url.includes(search.toLowerCase()) (app) => app.name.toLowerCase().includes(search.toLowerCase()) || app.url.includes(search.toLowerCase())

View File

@ -46,33 +46,6 @@ export default class OpenAIProvider extends BaseProvider {
return providers.includes(this.provider.id) return providers.includes(this.provider.id)
} }
private async uploadImageToQwenLM(image_file: Buffer, file_name: string, mime: string): Promise<string> {
try {
// 创建 FormData
const formData = new FormData()
formData.append('file', new Blob([image_file], { type: mime }), file_name)
// 发送上传请求
const response = await fetch(`${this.provider.apiHost}v1/files/`, {
method: 'POST',
headers: {
Authorization: `Bearer ${this.apiKey}`
},
body: formData
})
if (!response.ok) {
throw new Error('Failed to upload image to QwenLM')
}
const data = await response.json()
return data.id
} catch (error) {
console.error('Error uploading image to QwenLM:', error)
throw error
}
}
private async getMessageParam( private async getMessageParam(
message: Message, message: Message,
model: Model model: Model
@ -121,34 +94,6 @@ export default class OpenAIProvider extends BaseProvider {
} }
] ]
//QwenLM上传图片
if (this.provider.id === 'qwenlm') {
const qwenlm_image_url: { type: string; image: string }[] = []
for (const file of message.files || []) {
if (file.type === FileTypes.IMAGE && isVision) {
const image = await window.api.file.binaryFile(file.id + file.ext)
const imageId = await this.uploadImageToQwenLM(image.data, file.origin_name, image.mime)
qwenlm_image_url.push({
type: 'image',
image: imageId
})
}
if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) {
const fileContent = await (await window.api.file.read(file.id + file.ext)).trim()
parts.push({
type: 'text',
text: file.origin_name + '\n' + fileContent
})
}
}
return {
role: message.role,
content: [...parts, ...qwenlm_image_url]
} as ChatCompletionMessageParam
}
for (const file of message.files || []) { for (const file of message.files || []) {
if (file.type === FileTypes.IMAGE && isVision) { if (file.type === FileTypes.IMAGE && isVision) {
const image = await window.api.file.base64Image(file.id + file.ext) const image = await window.api.file.base64Image(file.id + file.ext)
@ -183,10 +128,6 @@ export default class OpenAIProvider extends BaseProvider {
const _messages = filterContextMessages(takeRight(messages, contextCount + 1)) const _messages = filterContextMessages(takeRight(messages, contextCount + 1))
onFilterMessages(_messages) onFilterMessages(_messages)
if (this.provider.id === 'qwenlm' && _messages[0]?.role !== 'user') {
userMessages.push({ role: 'user', content: '' })
}
for (const message of _messages) { for (const message of _messages) {
userMessages.push(await this.getMessageParam(message, model)) userMessages.push(await this.getMessageParam(message, model))
} }
@ -231,40 +172,6 @@ export default class OpenAIProvider extends BaseProvider {
}) })
} }
// 处理QwenLM的流式输出
if (this.provider.id === 'qwenlm') {
let accumulatedText = ''
for await (const chunk of stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
break
}
if (time_first_token_millsec == 0) {
time_first_token_millsec = new Date().getTime() - start_time_millsec
}
// 获取当前块的完整内容
const currentContent = chunk.choices[0]?.delta?.content || ''
// 如果内容与累积的内容不同,则只发送增量部分
if (currentContent !== accumulatedText) {
const deltaText = currentContent.slice(accumulatedText.length)
accumulatedText = currentContent // 更新累积的文本
const time_completion_millsec = new Date().getTime() - start_time_millsec
onChunk({
text: deltaText,
usage: chunk.usage,
metrics: {
completion_tokens: chunk.usage?.completion_tokens,
time_completion_millsec,
time_first_token_millsec
}
})
}
}
return
}
for await (const chunk of stream) { for await (const chunk of stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) { if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
break break

View File

@ -4,6 +4,7 @@ import AnthropicProvider from './AnthropicProvider'
import BaseProvider from './BaseProvider' import BaseProvider from './BaseProvider'
import GeminiProvider from './GeminiProvider' import GeminiProvider from './GeminiProvider'
import OpenAIProvider from './OpenAIProvider' import OpenAIProvider from './OpenAIProvider'
import QwenLMProvider from './QwenLMProvider'
export default class ProviderFactory { export default class ProviderFactory {
static create(provider: Provider): BaseProvider { static create(provider: Provider): BaseProvider {
@ -12,6 +13,8 @@ export default class ProviderFactory {
return new AnthropicProvider(provider) return new AnthropicProvider(provider)
case 'gemini': case 'gemini':
return new GeminiProvider(provider) return new GeminiProvider(provider)
case 'qwenlm':
return new QwenLMProvider(provider)
default: default:
return new OpenAIProvider(provider) return new OpenAIProvider(provider)
} }

View File

@ -0,0 +1,160 @@
import { getOpenAIWebSearchParams, isVisionModel } from '@renderer/config/models'
import { getAssistantSettings, getDefaultModel } from '@renderer/services/AssistantService'
import { EVENT_NAMES } from '@renderer/services/EventService'
import { filterContextMessages } from '@renderer/services/MessagesService'
import { FileTypes, Message, Model, Provider } from '@renderer/types'
import { takeRight } from 'lodash'
import OpenAI from 'openai'
import { ChatCompletionContentPart, ChatCompletionMessageParam } from 'openai/resources'
import { CompletionsParams } from '.'
import OpenAIProvider from './OpenAIProvider'
class QwenLMProvider extends OpenAIProvider {
constructor(provider: Provider) {
super(provider)
}
private async getMessageParams(
message: Message,
model: Model
): Promise<OpenAI.Chat.Completions.ChatCompletionMessageParam> {
const isVision = isVisionModel(model)
const content = await this.getMessageContent(message)
if (!message.files) {
return {
role: message.role,
content
}
}
const parts: ChatCompletionContentPart[] = [
{
type: 'text',
text: content
}
]
const qwenlm_image_url: { type: string; image: string }[] = []
for (const file of message.files || []) {
if (file.type === FileTypes.IMAGE && isVision) {
const image = await window.api.file.binaryFile(file.id + file.ext)
const imageId = await this.uploadImageToQwenLM(image.data, file.origin_name, image.mime)
qwenlm_image_url.push({
type: 'image',
image: imageId
})
}
if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) {
const fileContent = await (await window.api.file.read(file.id + file.ext)).trim()
parts.push({
type: 'text',
text: file.origin_name + '\n' + fileContent
})
}
}
return {
role: message.role,
content: [...parts, ...qwenlm_image_url]
} as ChatCompletionMessageParam
}
private async uploadImageToQwenLM(image_file: Buffer, file_name: string, mime: string): Promise<string> {
try {
// 创建 FormData
const formData = new FormData()
formData.append('file', new Blob([image_file], { type: mime }), file_name)
// 发送上传请求
const response = await fetch(`${this.provider.apiHost}v1/files/`, {
method: 'POST',
headers: {
Authorization: `Bearer ${this.apiKey}`
},
body: formData
})
if (!response.ok) {
throw new Error('Failed to upload image to QwenLM')
}
const data = await response.json()
return data.id
} catch (error) {
console.error('Error uploading image to QwenLM:', error)
throw error
}
}
async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams): Promise<void> {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, maxTokens } = getAssistantSettings(assistant)
const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined
const userMessages: ChatCompletionMessageParam[] = []
const _messages = filterContextMessages(takeRight(messages, contextCount + 1))
onFilterMessages(_messages)
if (_messages[0]?.role !== 'user') {
userMessages.push({ role: 'user', content: '' })
}
for (const message of _messages) {
userMessages.push(await this.getMessageParams(message, model))
}
let time_first_token_millsec = 0
const start_time_millsec = new Date().getTime()
// @ts-ignore key is not typed
const stream = await this.sdk.chat.completions.create({
model: model.id,
messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[],
temperature: assistant?.settings?.temperature,
top_p: assistant?.settings?.topP,
max_tokens: maxTokens,
stream: true,
...(assistant.enableWebSearch ? getOpenAIWebSearchParams(model) : {}),
...this.getCustomParameters(assistant)
})
let accumulatedText = ''
for await (const chunk of stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
break
}
if (time_first_token_millsec == 0) {
time_first_token_millsec = new Date().getTime() - start_time_millsec
}
// 获取当前块的完整内容
const currentContent = chunk.choices[0]?.delta?.content || ''
// 如果内容与累积的内容不同,则只发送增量部分
if (currentContent !== accumulatedText) {
const deltaText = currentContent.slice(accumulatedText.length)
accumulatedText = currentContent // 更新累积的文本
const time_completion_millsec = new Date().getTime() - start_time_millsec
onChunk({
text: deltaText,
usage: chunk.usage,
metrics: {
completion_tokens: chunk.usage?.completion_tokens,
time_completion_millsec,
time_first_token_millsec
}
})
}
}
}
}
export default QwenLMProvider

View File

@ -101,7 +101,7 @@ export type Provider = {
isSystem?: boolean isSystem?: boolean
} }
export type ProviderType = 'openai' | 'anthropic' | 'gemini' export type ProviderType = 'openai' | 'anthropic' | 'gemini' | 'qwenlm'
export type ModelType = 'text' | 'vision' | 'embedding' export type ModelType = 'text' | 'vision' | 'embedding'