fix: fetch chat completion use default provider
This commit is contained in:
parent
f077cf290d
commit
72c89d2f30
@ -1,6 +1,6 @@
|
|||||||
import { Assistant, Provider } from '@renderer/types'
|
import { Assistant, Model, Provider, Topic } from '@renderer/types'
|
||||||
import { getDefaultTopic } from './topic'
|
|
||||||
import store from '@renderer/store'
|
import store from '@renderer/store'
|
||||||
|
import { uuid } from '@renderer/utils'
|
||||||
|
|
||||||
export function getDefaultAssistant(): Assistant {
|
export function getDefaultAssistant(): Assistant {
|
||||||
return {
|
return {
|
||||||
@ -12,12 +12,26 @@ export function getDefaultAssistant(): Assistant {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function getDefaultTopic(): Topic {
|
||||||
|
return {
|
||||||
|
id: uuid(),
|
||||||
|
name: 'Default Topic',
|
||||||
|
messages: []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export function getAssistantProvider(assistant: Assistant) {
|
export function getAssistantProvider(assistant: Assistant) {
|
||||||
const providers = store.getState().llm.providers
|
const providers = store.getState().llm.providers
|
||||||
return providers.find((p) => p.id === assistant.id) || getDefaultProvider()
|
const provider = providers.find((p) => p.id === assistant.model?.provider)
|
||||||
|
return provider || getDefaultProvider()
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getProviderByModel(model: Model) {
|
||||||
|
const providers = store.getState().llm.providers
|
||||||
|
return providers.find((p) => p.id === model.provider) as Provider
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getDefaultProvider() {
|
export function getDefaultProvider() {
|
||||||
const provider = store.getState().llm.providers.find((p) => p.isSystem)
|
const defaultModel = store.getState().llm.defaultModel
|
||||||
return provider as Provider
|
return getProviderByModel(defaultModel)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,10 +0,0 @@
|
|||||||
import { Topic } from '@renderer/types'
|
|
||||||
import { uuid } from '@renderer/utils'
|
|
||||||
|
|
||||||
export function getDefaultTopic(): Topic {
|
|
||||||
return {
|
|
||||||
id: uuid(),
|
|
||||||
name: 'Default Topic',
|
|
||||||
messages: []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,7 +1,6 @@
|
|||||||
import { createSlice, PayloadAction } from '@reduxjs/toolkit'
|
import { createSlice, PayloadAction } from '@reduxjs/toolkit'
|
||||||
import { getDefaultAssistant } from '@renderer/services/assistant'
|
import { getDefaultAssistant, getDefaultTopic } from '@renderer/services/assistant'
|
||||||
import LocalStorage from '@renderer/services/storage'
|
import LocalStorage from '@renderer/services/storage'
|
||||||
import { getDefaultTopic } from '@renderer/services/topic'
|
|
||||||
import { Assistant, Model, Topic } from '@renderer/types'
|
import { Assistant, Model, Topic } from '@renderer/types'
|
||||||
import { uniqBy } from 'lodash'
|
import { uniqBy } from 'lodash'
|
||||||
|
|
||||||
|
|||||||
@ -9,6 +9,7 @@ export interface LlmState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const initialState: LlmState = {
|
const initialState: LlmState = {
|
||||||
|
defaultModel: SYSTEM_MODELS.openai[0],
|
||||||
providers: [
|
providers: [
|
||||||
{
|
{
|
||||||
id: 'openai',
|
id: 'openai',
|
||||||
@ -42,8 +43,7 @@ const initialState: LlmState = {
|
|||||||
isSystem: true,
|
isSystem: true,
|
||||||
models: SYSTEM_MODELS.groq.filter((m) => m.defaultEnabled)
|
models: SYSTEM_MODELS.groq.filter((m) => m.defaultEnabled)
|
||||||
}
|
}
|
||||||
],
|
]
|
||||||
defaultModel: SYSTEM_MODELS.openai[0]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const settingsSlice = createSlice({
|
const settingsSlice = createSlice({
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user