feat: change default provider

This commit is contained in:
kangfenmao 2024-09-03 20:03:09 +08:00
parent 350f13e97c
commit 5cb67e00a6
3 changed files with 62 additions and 62 deletions

View File

@ -5,6 +5,38 @@ const EMBEDDING_REGEX = /embedding/i
export const SYSTEM_MODELS: Record<string, Model[]> = {
ollama: [],
silicon: [
{
id: 'Qwen/Qwen2-7B-Instruct',
provider: 'silicon',
name: 'Qwen2-7B-Instruct',
group: 'Qwen2'
},
{
id: 'Qwen/Qwen2-72B-Instruct',
provider: 'silicon',
name: 'Qwen2-72B-Instruct',
group: 'Qwen2'
},
{
id: 'THUDM/glm-4-9b-chat',
provider: 'silicon',
name: 'GLM-4-9B-Chat',
group: 'GLM'
},
{
id: 'deepseek-ai/DeepSeek-V2-Chat',
provider: 'silicon',
name: 'DeepSeek-V2-Chat',
group: 'DeepSeek'
},
{
id: 'deepseek-ai/DeepSeek-Coder-V2-Instruct',
provider: 'silicon',
name: 'DeepSeek-Coder-V2-Instruct',
group: 'DeepSeek'
}
],
openai: [
{
id: 'gpt-4o',
@ -71,38 +103,6 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
group: 'Claude 3'
}
],
silicon: [
{
id: 'Qwen/Qwen2-7B-Instruct',
provider: 'silicon',
name: 'Qwen2-7B-Instruct',
group: 'Qwen2'
},
{
id: 'Qwen/Qwen2-72B-Instruct',
provider: 'silicon',
name: 'Qwen2-72B-Instruct',
group: 'Qwen2'
},
{
id: 'THUDM/glm-4-9b-chat',
provider: 'silicon',
name: 'GLM-4-9B-Chat',
group: 'GLM'
},
{
id: 'deepseek-ai/DeepSeek-V2-Chat',
provider: 'silicon',
name: 'DeepSeek-V2-Chat',
group: 'DeepSeek'
},
{
id: 'deepseek-ai/DeepSeek-Coder-V2-Instruct',
provider: 'silicon',
name: 'DeepSeek-Coder-V2-Instruct',
group: 'DeepSeek'
}
],
deepseek: [
{
id: 'deepseek-chat',

View File

@ -110,10 +110,10 @@ const MessageItem: FC<Props> = ({ message, index, showMenu, onDeleteMessage }) =
if (message.status === 'error') {
return (
<Alert
message={t('error.chat.response')}
message={<div style={{ fontSize: 14 }}>{t('error.chat.response')}</div>}
description={<Markdown message={message} />}
type="error"
style={{ marginBottom: 15 }}
style={{ marginBottom: 15, padding: 10, fontSize: 12 }}
/>
)
}

View File

@ -19,25 +19,25 @@ export interface LlmState {
}
const initialState: LlmState = {
defaultModel: SYSTEM_MODELS.openai[0],
topicNamingModel: SYSTEM_MODELS.openai[0],
translateModel: SYSTEM_MODELS.openai[0],
defaultModel: SYSTEM_MODELS.silicon[0],
topicNamingModel: SYSTEM_MODELS.silicon[0],
translateModel: SYSTEM_MODELS.silicon[0],
providers: [
{
id: 'openai',
name: 'OpenAI',
id: 'silicon',
name: 'Silicon',
apiKey: '',
apiHost: 'https://api.openai.com',
models: SYSTEM_MODELS.openai,
apiHost: 'https://api.siliconflow.cn',
models: SYSTEM_MODELS.silicon,
isSystem: true,
enabled: true
},
{
id: 'gemini',
name: 'Gemini',
id: 'ollama',
name: 'Ollama',
apiKey: '',
apiHost: 'https://generativelanguage.googleapis.com',
models: SYSTEM_MODELS.gemini,
apiHost: 'http://localhost:11434/v1/',
models: SYSTEM_MODELS.ollama,
isSystem: true,
enabled: false
},
@ -51,20 +51,20 @@ const initialState: LlmState = {
enabled: false
},
{
id: 'ollama',
name: 'Ollama',
id: 'openai',
name: 'OpenAI',
apiKey: '',
apiHost: 'http://localhost:11434/v1/',
models: SYSTEM_MODELS.ollama,
apiHost: 'https://api.openai.com',
models: SYSTEM_MODELS.openai,
isSystem: true,
enabled: false
},
{
id: 'silicon',
name: 'Silicon',
id: 'gemini',
name: 'Gemini',
apiKey: '',
apiHost: 'https://api.siliconflow.cn',
models: SYSTEM_MODELS.silicon,
apiHost: 'https://generativelanguage.googleapis.com',
models: SYSTEM_MODELS.gemini,
isSystem: true,
enabled: false
},
@ -149,15 +149,6 @@ const initialState: LlmState = {
isSystem: true,
enabled: false
},
{
id: 'aihubmix',
name: 'AiHubMix',
apiKey: '',
apiHost: 'https://aihubmix.com',
models: SYSTEM_MODELS.aihubmix,
isSystem: true,
enabled: false
},
{
id: 'graphrag-kylin-mountain',
name: 'GraphRAG',
@ -184,6 +175,15 @@ const initialState: LlmState = {
models: SYSTEM_MODELS.groq,
isSystem: true,
enabled: false
},
{
id: 'aihubmix',
name: 'AiHubMix',
apiKey: '',
apiHost: 'https://aihubmix.com',
models: SYSTEM_MODELS.aihubmix,
isSystem: true,
enabled: false
}
],
settings: {