feat: add gemini provider

This commit is contained in:
kangfenmao 2024-08-13 16:51:52 +08:00
parent b0c479190c
commit e1c7a25b87
13 changed files with 333 additions and 36 deletions

View File

@ -37,6 +37,7 @@
"@electron-toolkit/eslint-config-prettier": "^2.0.0", "@electron-toolkit/eslint-config-prettier": "^2.0.0",
"@electron-toolkit/eslint-config-ts": "^1.0.1", "@electron-toolkit/eslint-config-ts": "^1.0.1",
"@electron-toolkit/tsconfig": "^1.0.1", "@electron-toolkit/tsconfig": "^1.0.1",
"@google/generative-ai": "^0.16.0",
"@hello-pangea/dnd": "^16.6.0", "@hello-pangea/dnd": "^16.6.0",
"@kangfenmao/keyv-storage": "^0.1.0", "@kangfenmao/keyv-storage": "^0.1.0",
"@reduxjs/toolkit": "^2.2.5", "@reduxjs/toolkit": "^2.2.5",
@ -47,6 +48,7 @@
"@vitejs/plugin-react": "^4.2.1", "@vitejs/plugin-react": "^4.2.1",
"ahooks": "^3.8.0", "ahooks": "^3.8.0",
"antd": "^5.18.3", "antd": "^5.18.3",
"axios": "^1.7.3",
"browser-image-compression": "^2.0.2", "browser-image-compression": "^2.0.2",
"dayjs": "^1.11.11", "dayjs": "^1.11.11",
"dotenv-cli": "^7.4.2", "dotenv-cli": "^7.4.2",

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@ -0,0 +1,67 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 27.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Standard_product_icon__x28_1:1_x29_"
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="192px" height="192px"
viewBox="0 0 192 192" enable-background="new 0 0 192 192" xml:space="preserve">
<symbol id="material_x5F_product_x5F_standard_x5F_icon_x5F_keylines_00000077318920148093339210000006245950728745084294_" viewBox="-96 -96 192 192">
<g opacity="0.4">
<defs>
<path id="SVGID_1_" opacity="0.4" d="M-96,96V-96H96V96H-96z"/>
</defs>
<clipPath id="SVGID_00000071517564283228984050000017848131202901217410_">
<use xlink:href="#SVGID_1_" overflow="visible"/>
</clipPath>
<g clip-path="url(#SVGID_00000071517564283228984050000017848131202901217410_)">
<g>
<path d="M95.75,95.75v-191.5h-191.5v191.5H95.75 M96,96H-96V-96H96V96L96,96z"/>
</g>
<circle fill="none" stroke="#000000" stroke-width="0.25" stroke-miterlimit="10" cx="0" cy="0" r="64"/>
</g>
<circle clip-path="url(#SVGID_00000071517564283228984050000017848131202901217410_)" fill="none" stroke="#000000" stroke-width="0.25" stroke-miterlimit="10" cx="0" cy="0" r="88"/>
<path clip-path="url(#SVGID_00000071517564283228984050000017848131202901217410_)" fill="none" stroke="#000000" stroke-width="0.25" stroke-miterlimit="10" d="
M64,76H-64c-6.6,0-12-5.4-12-12V-64c0-6.6,5.4-12,12-12H64c6.6,0,12,5.4,12,12V64C76,70.6,70.6,76,64,76z"/>
<path clip-path="url(#SVGID_00000071517564283228984050000017848131202901217410_)" fill="none" stroke="#000000" stroke-width="0.25" stroke-miterlimit="10" d="
M52,88H-52c-6.6,0-12-5.4-12-12V-76c0-6.6,5.4-12,12-12H52c6.6,0,12,5.4,12,12V76C64,82.6,58.6,88,52,88z"/>
<path clip-path="url(#SVGID_00000071517564283228984050000017848131202901217410_)" fill="none" stroke="#000000" stroke-width="0.25" stroke-miterlimit="10" d="
M76,64H-76c-6.6,0-12-5.4-12-12V-52c0-6.6,5.4-12,12-12H76c6.6,0,12,5.4,12,12V52C88,58.6,82.6,64,76,64z"/>
</g>
</symbol>
<rect id="bounding_box_1_" display="none" fill="none" width="192" height="192"/>
<g id="art_layer">
<g>
<path fill="#F9AB00" d="M96,181.92L96,181.92c6.63,0,12-5.37,12-12v-104H84v104C84,176.55,89.37,181.92,96,181.92z"/>
<g>
<path fill="#5BB974" d="M143.81,103.87C130.87,90.94,111.54,88.32,96,96l51.37,51.37c2.12,2.12,5.77,1.28,6.67-1.57
C158.56,131.49,155.15,115.22,143.81,103.87z"/>
</g>
<g>
<path fill="#129EAF" d="M48.19,103.87C61.13,90.94,80.46,88.32,96,96l-51.37,51.37c-2.12,2.12-5.77,1.28-6.67-1.57
C33.44,131.49,36.85,115.22,48.19,103.87z"/>
</g>
<g>
<path fill="#AF5CF7" d="M140,64c-20.44,0-37.79,13.4-44,32h81.24c3.33,0,5.55-3.52,4.04-6.49C173.56,74.36,157.98,64,140,64z"/>
</g>
<g>
<path fill="#FF8BCB" d="M104.49,42.26C90.03,56.72,87.24,78.45,96,96l57.45-57.45c2.36-2.36,1.44-6.42-1.73-7.45
C135.54,25.85,117.2,29.55,104.49,42.26z"/>
</g>
<g>
<path fill="#FA7B17" d="M87.51,42.26C101.97,56.72,104.76,78.45,96,96L38.55,38.55c-2.36-2.36-1.44-6.42,1.73-7.45
C56.46,25.85,74.8,29.55,87.51,42.26z"/>
</g>
<g>
<g>
<path fill="#4285F4" d="M52,64c20.44,0,37.79,13.4,44,32H14.76c-3.33,0-5.55-3.52-4.04-6.49C18.44,74.36,34.02,64,52,64z"/>
</g>
</g>
</g>
</g>
<g id="keylines" display="none">
<use xlink:href="#material_x5F_product_x5F_standard_x5F_icon_x5F_keylines_00000077318920148093339210000006245950728745084294_" width="192" height="192" id="material_x5F_product_x5F_standard_x5F_icon_x5F_keylines" x="-96" y="-96" transform="matrix(1 0 0 -1 96 96)" display="inline" overflow="visible"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@ -33,6 +33,22 @@ export const SYSTEM_MODELS: Record<string, SystemModel[]> = {
enabled: true enabled: true
} }
], ],
gemini: [
{
id: 'gemini-1.5-flash',
provider: 'gemini',
name: 'Gemini 1.5 Flash',
group: 'Gemini 1.5',
enabled: true
},
{
id: 'gemini-1.5-pro-exp-0801',
provider: 'gemini',
name: 'Gemini 1.5 Pro Experimental 0801',
group: 'Gemini 1.5',
enabled: true
}
],
silicon: [ silicon: [
{ {
id: 'Qwen/Qwen2-7B-Instruct', id: 'Qwen/Qwen2-7B-Instruct',

View File

@ -3,10 +3,13 @@ import ChatGLMModelLogo from '@renderer/assets/images/models/chatglm.jpeg'
import ChatGPTModelLogo from '@renderer/assets/images/models/chatgpt.jpeg' import ChatGPTModelLogo from '@renderer/assets/images/models/chatgpt.jpeg'
import ClaudeModelLogo from '@renderer/assets/images/models/claude.png' import ClaudeModelLogo from '@renderer/assets/images/models/claude.png'
import DeepSeekModelLogo from '@renderer/assets/images/models/deepseek.png' import DeepSeekModelLogo from '@renderer/assets/images/models/deepseek.png'
import EmbeddingModelLogo from '@renderer/assets/images/models/embedding.png'
import GeminiModelLogo from '@renderer/assets/images/models/gemini.png'
import GemmaModelLogo from '@renderer/assets/images/models/gemma.jpeg' import GemmaModelLogo from '@renderer/assets/images/models/gemma.jpeg'
import LlamaModelLogo from '@renderer/assets/images/models/llama.jpeg' import LlamaModelLogo from '@renderer/assets/images/models/llama.jpeg'
import MicrosoftModelLogo from '@renderer/assets/images/models/microsoft.png' import MicrosoftModelLogo from '@renderer/assets/images/models/microsoft.png'
import MixtralModelLogo from '@renderer/assets/images/models/mixtral.jpeg' import MixtralModelLogo from '@renderer/assets/images/models/mixtral.jpeg'
import PalmModelLogo from '@renderer/assets/images/models/palm.svg'
import QwenModelLogo from '@renderer/assets/images/models/qwen.png' import QwenModelLogo from '@renderer/assets/images/models/qwen.png'
import YiModelLogo from '@renderer/assets/images/models/yi.svg' import YiModelLogo from '@renderer/assets/images/models/yi.svg'
import AiHubMixProviderLogo from '@renderer/assets/images/providers/aihubmix.jpg' import AiHubMixProviderLogo from '@renderer/assets/images/providers/aihubmix.jpg'
@ -14,6 +17,7 @@ import AnthropicProviderLogo from '@renderer/assets/images/providers/anthropic.j
import BaichuanProviderLogo from '@renderer/assets/images/providers/baichuan.png' import BaichuanProviderLogo from '@renderer/assets/images/providers/baichuan.png'
import DashScopeProviderLogo from '@renderer/assets/images/providers/dashscope.png' import DashScopeProviderLogo from '@renderer/assets/images/providers/dashscope.png'
import DeepSeekProviderLogo from '@renderer/assets/images/providers/deepseek.png' import DeepSeekProviderLogo from '@renderer/assets/images/providers/deepseek.png'
import GeminiProviderLogo from '@renderer/assets/images/providers/gemini.png'
import GroqProviderLogo from '@renderer/assets/images/providers/groq.png' import GroqProviderLogo from '@renderer/assets/images/providers/groq.png'
import MoonshotProviderLogo from '@renderer/assets/images/providers/moonshot.jpeg' import MoonshotProviderLogo from '@renderer/assets/images/providers/moonshot.jpeg'
import MoonshotModelLogo from '@renderer/assets/images/providers/moonshot.jpeg' import MoonshotModelLogo from '@renderer/assets/images/providers/moonshot.jpeg'
@ -52,6 +56,8 @@ export function getProviderLogo(providerId: string) {
return AnthropicProviderLogo return AnthropicProviderLogo
case 'aihubmix': case 'aihubmix':
return AiHubMixProviderLogo return AiHubMixProviderLogo
case 'gemini':
return GeminiProviderLogo
default: default:
return undefined return undefined
} }
@ -75,7 +81,11 @@ export function getModelLogo(modelId: string) {
moonshot: MoonshotModelLogo, moonshot: MoonshotModelLogo,
phi: MicrosoftModelLogo, phi: MicrosoftModelLogo,
baichuan: BaichuanModelLogo, baichuan: BaichuanModelLogo,
claude: ClaudeModelLogo claude: ClaudeModelLogo,
gemini: GeminiModelLogo,
embedding: EmbeddingModelLogo,
bison: PalmModelLogo,
palm: PalmModelLogo
} }
for (const key in logoMap) { for (const key in logoMap) {
@ -242,5 +252,17 @@ export const PROVIDER_CONFIG = {
docs: 'https://doc.aihubmix.com/', docs: 'https://doc.aihubmix.com/',
models: 'https://aihubmix.com/models' models: 'https://aihubmix.com/models'
} }
},
gemini: {
api: {
url: 'https://generativelanguage.googleapis.com',
editable: false
},
websites: {
official: 'https://gemini.google.com/',
apiKey: 'https://aistudio.google.com/app/apikey',
docs: 'https://ai.google.dev/gemini-api/docs',
models: 'https://ai.google.dev/gemini-api/docs/models/gemini'
}
} }
} }

View File

@ -106,6 +106,7 @@ const resources = {
}, },
provider: { provider: {
openai: 'OpenAI', openai: 'OpenAI',
gemini: 'Gemini',
deepseek: 'DeepSeek', deepseek: 'DeepSeek',
moonshot: 'Moonshot', moonshot: 'Moonshot',
silicon: 'SiliconFlow', silicon: 'SiliconFlow',
@ -323,6 +324,7 @@ const resources = {
}, },
provider: { provider: {
openai: 'OpenAI', openai: 'OpenAI',
gemini: 'Gemini',
deepseek: '深度求索', deepseek: '深度求索',
moonshot: '月之暗面', moonshot: '月之暗面',
silicon: '硅基流动', silicon: '硅基流动',

View File

@ -1,10 +1,12 @@
import Anthropic from '@anthropic-ai/sdk' import Anthropic from '@anthropic-ai/sdk'
import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk/resources' import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk/resources'
import { GoogleGenerativeAI } from '@google/generative-ai'
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant' import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
import { getOllamaKeepAliveTime } from '@renderer/hooks/useOllama' import { getOllamaKeepAliveTime } from '@renderer/hooks/useOllama'
import { Assistant, Message, Provider, Suggestion } from '@renderer/types' import { Assistant, Message, Provider, Suggestion } from '@renderer/types'
import { removeQuotes } from '@renderer/utils' import { removeQuotes } from '@renderer/utils'
import { sum, takeRight } from 'lodash' import axios from 'axios'
import { isEmpty, sum, takeRight } from 'lodash'
import OpenAI from 'openai' import OpenAI from 'openai'
import { ChatCompletionCreateParamsNonStreaming, ChatCompletionMessageParam } from 'openai/resources' import { ChatCompletionCreateParamsNonStreaming, ChatCompletionMessageParam } from 'openai/resources'
@ -15,6 +17,7 @@ export default class ProviderSDK {
provider: Provider provider: Provider
openaiSdk: OpenAI openaiSdk: OpenAI
anthropicSdk: Anthropic anthropicSdk: Anthropic
geminiSdk: GoogleGenerativeAI
constructor(provider: Provider) { constructor(provider: Provider) {
this.provider = provider this.provider = provider
@ -22,12 +25,17 @@ export default class ProviderSDK {
const baseURL = host.endsWith('/') ? host : `${provider.apiHost}/v1/` const baseURL = host.endsWith('/') ? host : `${provider.apiHost}/v1/`
this.anthropicSdk = new Anthropic({ apiKey: provider.apiKey, baseURL }) this.anthropicSdk = new Anthropic({ apiKey: provider.apiKey, baseURL })
this.openaiSdk = new OpenAI({ dangerouslyAllowBrowser: true, apiKey: provider.apiKey, baseURL }) this.openaiSdk = new OpenAI({ dangerouslyAllowBrowser: true, apiKey: provider.apiKey, baseURL })
this.geminiSdk = new GoogleGenerativeAI(provider.apiKey)
} }
private get isAnthropic() { private get isAnthropic() {
return this.provider.id === 'anthropic' return this.provider.id === 'anthropic'
} }
private get isGemini() {
return this.provider.id === 'gemini'
}
private get keepAliveTime() { private get keepAliveTime() {
return this.provider.id === 'ollama' ? getOllamaKeepAliveTime() : undefined return this.provider.id === 'ollama' ? getOllamaKeepAliveTime() : undefined
} }
@ -42,7 +50,6 @@ export default class ProviderSDK {
const { contextCount, maxTokens } = getAssistantSettings(assistant) const { contextCount, maxTokens } = getAssistantSettings(assistant)
const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined
const userMessages = takeRight(messages, contextCount + 1).map((message) => ({ const userMessages = takeRight(messages, contextCount + 1).map((message) => ({
role: message.role, role: message.role,
content: message.content content: message.content
@ -66,25 +73,64 @@ export default class ProviderSDK {
} }
}) })
) )
} else { return
// @ts-ignore key is not typed }
const stream = await this.openaiSdk.chat.completions.create({
if (this.isGemini) {
const geminiModel = this.geminiSdk.getGenerativeModel({
model: model.id, model: model.id,
messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[], systemInstruction: assistant.prompt,
stream: true, generationConfig: {
temperature: assistant?.settings?.temperature, maxOutputTokens: maxTokens,
max_tokens: maxTokens, temperature: assistant?.settings?.temperature
keep_alive: this.keepAliveTime }
}) })
for await (const chunk of stream) {
const userLastMessage = userMessages.pop()
const chat = geminiModel.startChat({
history: userMessages.map((message) => ({
role: message.role === 'user' ? 'user' : 'model',
parts: [{ text: message.content }]
}))
})
const userMessagesStream = await chat.sendMessageStream(userLastMessage?.content!)
for await (const chunk of userMessagesStream.stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) break if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) break
onChunk({ text: chunk.choices[0]?.delta?.content || '', usage: chunk.usage }) onChunk({
text: chunk.text(),
usage: {
prompt_tokens: chunk.usageMetadata?.promptTokenCount || 0,
completion_tokens: chunk.usageMetadata?.candidatesTokenCount || 0,
total_tokens: chunk.usageMetadata?.totalTokenCount || 0
}
})
} }
return
}
// @ts-ignore key is not typed
const stream = await this.openaiSdk.chat.completions.create({
model: model.id,
messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[],
stream: true,
temperature: assistant?.settings?.temperature,
max_tokens: maxTokens,
keep_alive: this.keepAliveTime
})
for await (const chunk of stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) break
onChunk({ text: chunk.choices[0]?.delta?.content || '', usage: chunk.usage })
} }
} }
public async translate(message: Message, assistant: Assistant) { public async translate(message: Message, assistant: Assistant) {
const defaultModel = getDefaultModel() const defaultModel = getDefaultModel()
const { maxTokens } = getAssistantSettings(assistant)
const model = assistant.model || defaultModel const model = assistant.model || defaultModel
const messages = [ const messages = [
{ role: 'system', content: assistant.prompt }, { role: 'system', content: assistant.prompt },
@ -99,17 +145,34 @@ export default class ProviderSDK {
temperature: assistant?.settings?.temperature, temperature: assistant?.settings?.temperature,
stream: false stream: false
}) })
return response.content[0].type === 'text' ? response.content[0].text : '' return response.content[0].type === 'text' ? response.content[0].text : ''
} else {
// @ts-ignore key is not typed
const response = await this.openaiSdk.chat.completions.create({
model: model.id,
messages: messages as ChatCompletionMessageParam[],
stream: false,
keep_alive: this.keepAliveTime
})
return response.choices[0].message?.content || ''
} }
if (this.isGemini) {
const geminiModel = this.geminiSdk.getGenerativeModel({
model: model.id,
systemInstruction: assistant.prompt,
generationConfig: {
maxOutputTokens: maxTokens,
temperature: assistant?.settings?.temperature
}
})
const { response } = await geminiModel.generateContent(message.content)
return response.text()
}
// @ts-ignore key is not typed
const response = await this.openaiSdk.chat.completions.create({
model: model.id,
messages: messages as ChatCompletionMessageParam[],
stream: false,
keep_alive: this.keepAliveTime
})
return response.choices[0].message?.content || ''
} }
public async summaries(messages: Message[], assistant: Assistant): Promise<string | null> { public async summaries(messages: Message[], assistant: Assistant): Promise<string | null> {
@ -134,18 +197,41 @@ export default class ProviderSDK {
}) })
return message.content[0].type === 'text' ? message.content[0].text : null return message.content[0].type === 'text' ? message.content[0].text : null
} else { }
// @ts-ignore key is not typed
const response = await this.openaiSdk.chat.completions.create({ if (this.isGemini) {
const geminiModel = this.geminiSdk.getGenerativeModel({
model: model.id, model: model.id,
messages: [systemMessage, ...userMessages] as ChatCompletionMessageParam[], systemInstruction: systemMessage.content,
stream: false, generationConfig: {
max_tokens: 50, temperature: assistant?.settings?.temperature
keep_alive: this.keepAliveTime }
}) })
return removeQuotes(response.choices[0].message?.content || '') const lastUserMessage = userMessages.pop()
const chat = await geminiModel.startChat({
history: userMessages.map((message) => ({
role: message.role === 'user' ? 'user' : 'model',
parts: [{ text: message.content }]
}))
})
const { response } = await chat.sendMessage(lastUserMessage?.content!)
return response.text()
} }
// @ts-ignore key is not typed
const response = await this.openaiSdk.chat.completions.create({
model: model.id,
messages: [systemMessage, ...userMessages] as ChatCompletionMessageParam[],
stream: false,
max_tokens: 50,
keep_alive: this.keepAliveTime
})
return removeQuotes(response.choices[0].message?.content || '')
} }
public async suggestions(messages: Message[], assistant: Assistant): Promise<Suggestion[]> { public async suggestions(messages: Message[], assistant: Assistant): Promise<Suggestion[]> {
@ -172,6 +258,7 @@ export default class ProviderSDK {
public async check(): Promise<{ valid: boolean; error: Error | null }> { public async check(): Promise<{ valid: boolean; error: Error | null }> {
const model = this.provider.models[0] const model = this.provider.models[0]
const body = { const body = {
model: model.id, model: model.id,
messages: [{ role: 'user', content: 'hi' }], messages: [{ role: 'user', content: 'hi' }],
@ -182,13 +269,32 @@ export default class ProviderSDK {
try { try {
if (this.isAnthropic) { if (this.isAnthropic) {
const message = await this.anthropicSdk.messages.create(body as MessageCreateParamsNonStreaming) const message = await this.anthropicSdk.messages.create(body as MessageCreateParamsNonStreaming)
return { valid: message.content.length > 0, error: null } return {
} else { valid: message.content.length > 0,
const response = await this.openaiSdk.chat.completions.create(body as ChatCompletionCreateParamsNonStreaming) error: null
return { valid: Boolean(response?.choices[0].message), error: null } }
}
if (this.isGemini) {
const geminiModel = this.geminiSdk.getGenerativeModel({ model: body.model })
const result = await geminiModel.generateContent(body.messages[0].content)
return {
valid: !isEmpty(result.response.text()),
error: null
}
}
const response = await this.openaiSdk.chat.completions.create(body as ChatCompletionCreateParamsNonStreaming)
return {
valid: Boolean(response?.choices[0].message),
error: null
} }
} catch (error: any) { } catch (error: any) {
return { valid: false, error } return {
valid: false,
error
}
} }
} }
@ -198,6 +304,22 @@ export default class ProviderSDK {
return [] return []
} }
if (this.isGemini) {
const api = this.provider.apiHost + '/v1beta/models'
const { data } = await axios.get(api, { params: { key: this.provider.apiKey } })
return data.models.map(
(m: any) =>
({
id: m.name.replace('models/', ''),
name: m.displayName,
description: m.description,
object: 'model',
created: Date.now(),
owned_by: 'gemini'
}) as OpenAI.Models.Model
)
}
const response = await this.openaiSdk.models.list() const response = await this.openaiSdk.models.list()
return response.data return response.data
} catch (error) { } catch (error) {

View File

@ -22,7 +22,7 @@ const persistedReducer = persistReducer(
{ {
key: 'cherry-studio', key: 'cherry-studio',
storage, storage,
version: 20, version: 21,
blacklist: ['runtime'], blacklist: ['runtime'],
migrate migrate
}, },

View File

@ -31,6 +31,15 @@ const initialState: LlmState = {
isSystem: true, isSystem: true,
enabled: true enabled: true
}, },
{
id: 'gemini',
name: 'Gemini',
apiKey: '',
apiHost: 'https://generativelanguage.googleapis.com',
models: SYSTEM_MODELS.gemini.filter((m) => m.enabled),
isSystem: true,
enabled: false
},
{ {
id: 'silicon', id: 'silicon',
name: 'Silicon', name: 'Silicon',

View File

@ -296,6 +296,26 @@ const migrateConfig = {
fontSize: 14 fontSize: 14
} }
} }
},
'21': (state: RootState) => {
return {
...state,
llm: {
...state.llm,
providers: [
...state.llm.providers,
{
id: 'gemini',
name: 'Gemini',
apiKey: '',
apiHost: 'https://generativelanguage.googleapis.com',
models: SYSTEM_MODELS.gemini.filter((m) => m.enabled),
isSystem: true,
enabled: false
}
]
}
}
} }
} }

View File

@ -961,6 +961,13 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"@google/generative-ai@npm:^0.16.0":
version: 0.16.0
resolution: "@google/generative-ai@npm:0.16.0"
checksum: 10c0/5d561a41cb7be60fc9b49965b66359e15df907bf6679009de7917beff138ba69d4a0772ab2a9d6f0e543d658d72bd19b83e6abdb87a6cdfa402a8764b08eed4c
languageName: node
linkType: hard
"@hello-pangea/dnd@npm:^16.6.0": "@hello-pangea/dnd@npm:^16.6.0":
version: 16.6.0 version: 16.6.0
resolution: "@hello-pangea/dnd@npm:16.6.0" resolution: "@hello-pangea/dnd@npm:16.6.0"
@ -3099,6 +3106,17 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"axios@npm:^1.7.3":
version: 1.7.3
resolution: "axios@npm:1.7.3"
dependencies:
follow-redirects: "npm:^1.15.6"
form-data: "npm:^4.0.0"
proxy-from-env: "npm:^1.1.0"
checksum: 10c0/a18cbe559203efa05fb1fec2d1898e23bf6329bd2575784ee32aa11b5bbe1d54b9f472c49a261294125519cf62aa4fe5ef6e647bb7482eafc15bffe15ab314ce
languageName: node
linkType: hard
"bail@npm:^2.0.0": "bail@npm:^2.0.0":
version: 2.0.2 version: 2.0.2
resolution: "bail@npm:2.0.2" resolution: "bail@npm:2.0.2"
@ -3446,6 +3464,7 @@ __metadata:
"@electron-toolkit/preload": "npm:^3.0.0" "@electron-toolkit/preload": "npm:^3.0.0"
"@electron-toolkit/tsconfig": "npm:^1.0.1" "@electron-toolkit/tsconfig": "npm:^1.0.1"
"@electron-toolkit/utils": "npm:^3.0.0" "@electron-toolkit/utils": "npm:^3.0.0"
"@google/generative-ai": "npm:^0.16.0"
"@hello-pangea/dnd": "npm:^16.6.0" "@hello-pangea/dnd": "npm:^16.6.0"
"@kangfenmao/keyv-storage": "npm:^0.1.0" "@kangfenmao/keyv-storage": "npm:^0.1.0"
"@reduxjs/toolkit": "npm:^2.2.5" "@reduxjs/toolkit": "npm:^2.2.5"
@ -3457,6 +3476,7 @@ __metadata:
"@vitejs/plugin-react": "npm:^4.2.1" "@vitejs/plugin-react": "npm:^4.2.1"
ahooks: "npm:^3.8.0" ahooks: "npm:^3.8.0"
antd: "npm:^5.18.3" antd: "npm:^5.18.3"
axios: "npm:^1.7.3"
browser-image-compression: "npm:^2.0.2" browser-image-compression: "npm:^2.0.2"
dayjs: "npm:^1.11.11" dayjs: "npm:^1.11.11"
dotenv-cli: "npm:^7.4.2" dotenv-cli: "npm:^7.4.2"
@ -5037,6 +5057,16 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"follow-redirects@npm:^1.15.6":
version: 1.15.6
resolution: "follow-redirects@npm:1.15.6"
peerDependenciesMeta:
debug:
optional: true
checksum: 10c0/9ff767f0d7be6aa6870c82ac79cf0368cd73e01bbc00e9eb1c2a16fbb198ec105e3c9b6628bb98e9f3ac66fe29a957b9645bcb9a490bb7aa0d35f908b6b85071
languageName: node
linkType: hard
"for-each@npm:^0.3.3": "for-each@npm:^0.3.3":
version: 0.3.3 version: 0.3.3
resolution: "for-each@npm:0.3.3" resolution: "for-each@npm:0.3.3"
@ -8251,6 +8281,13 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"proxy-from-env@npm:^1.1.0":
version: 1.1.0
resolution: "proxy-from-env@npm:1.1.0"
checksum: 10c0/fe7dd8b1bdbbbea18d1459107729c3e4a2243ca870d26d34c2c1bcd3e4425b7bcc5112362df2d93cc7fb9746f6142b5e272fd1cc5c86ddf8580175186f6ad42b
languageName: node
linkType: hard
"pump@npm:^3.0.0": "pump@npm:^3.0.0":
version: 3.0.0 version: 3.0.0
resolution: "pump@npm:3.0.0" resolution: "pump@npm:3.0.0"