feat: Add LM Studio support (#1572)
Co-authored-by: hehua2008 <hegan2010@gmail.com> Co-authored-by: 亢奋猫 <kangfenmao@qq.com>
This commit is contained in:
parent
80dedc149a
commit
fd4334f331
@ -30,7 +30,7 @@ Cherry Studio is a desktop client that supports for multiple LLM providers, avai
|
||||
|
||||
- ☁️ Major LLM Cloud Services: OpenAI, Gemini, Anthropic, and more
|
||||
- 🔗 AI Web Service Integration: Claude, Peplexity, Poe, and others
|
||||
- 💻 Local Model Support with Ollama
|
||||
- 💻 Local Model Support with Ollama, LM Studio
|
||||
|
||||
2. **AI Assistants & Conversations**:
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ Cherry Studioは、複数のLLMプロバイダーをサポートするデスク
|
||||
|
||||
- ☁️ 主要な LLM クラウドサービス対応:OpenAI、Gemini、Anthropic など
|
||||
- 🔗 AI Web サービス統合:Claude、Peplexity、Poe など
|
||||
- 💻 Ollama によるローカルモデル実行対応
|
||||
- 💻 Ollama、LM Studio によるローカルモデル実行対応
|
||||
|
||||
2. **AI アシスタントと対話**:
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ Cherry Studio 是一款支持多个大语言模型(LLM)服务商的桌面客
|
||||
|
||||
- ☁️ 支持主流 LLM 云服务:OpenAI、Gemini、Anthropic、硅基流动等
|
||||
- 🔗 集成流行 AI Web 服务:Claude、Peplexity、Poe、腾讯元宝、知乎直答等
|
||||
- 💻 支持 Ollama 本地模型部署
|
||||
- 💻 支持 Ollama、LM Studio 本地模型部署
|
||||
|
||||
2. **智能助手与对话**:
|
||||
|
||||
|
||||
BIN
src/renderer/src/assets/images/providers/lmstudio.png
Normal file
BIN
src/renderer/src/assets/images/providers/lmstudio.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 13 KiB |
@ -323,6 +323,7 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
|
||||
}
|
||||
],
|
||||
ollama: [],
|
||||
lmstudio: [],
|
||||
silicon: [
|
||||
{
|
||||
id: 'deepseek-ai/DeepSeek-R1',
|
||||
|
||||
@ -24,6 +24,7 @@ import MoonshotProviderLogo from '@renderer/assets/images/providers/moonshot.png
|
||||
import NvidiaProviderLogo from '@renderer/assets/images/providers/nvidia.png'
|
||||
import OcoolAiProviderLogo from '@renderer/assets/images/providers/ocoolai.png'
|
||||
import OllamaProviderLogo from '@renderer/assets/images/providers/ollama.png'
|
||||
import LMStudioProviderLogo from '@renderer/assets/images/providers/lmstudio.png'
|
||||
import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.png'
|
||||
import OpenRouterProviderLogo from '@renderer/assets/images/providers/openrouter.png'
|
||||
import PerplexityProviderLogo from '@renderer/assets/images/providers/perplexity.png'
|
||||
@ -52,6 +53,8 @@ export function getProviderLogo(providerId: string) {
|
||||
return ZhipuProviderLogo
|
||||
case 'ollama':
|
||||
return OllamaProviderLogo
|
||||
case 'lmstudio':
|
||||
return LMStudioProviderLogo
|
||||
case 'moonshot':
|
||||
return MoonshotProviderLogo
|
||||
case 'openrouter':
|
||||
@ -373,6 +376,16 @@ export const PROVIDER_CONFIG = {
|
||||
models: 'https://ollama.com/library'
|
||||
}
|
||||
},
|
||||
lmstudio: {
|
||||
api: {
|
||||
url: 'http://localhost:1234'
|
||||
},
|
||||
websites: {
|
||||
official: 'https://lmstudio.ai/',
|
||||
docs: 'https://lmstudio.ai/docs',
|
||||
models: 'https://lmstudio.ai/models'
|
||||
}
|
||||
},
|
||||
anthropic: {
|
||||
api: {
|
||||
url: 'https://api.anthropic.com/'
|
||||
|
||||
18
src/renderer/src/hooks/useLMStudio.ts
Normal file
18
src/renderer/src/hooks/useLMStudio.ts
Normal file
@ -0,0 +1,18 @@
|
||||
import store, { useAppSelector } from '@renderer/store'
|
||||
import { setLMStudioKeepAliveTime } from '@renderer/store/llm'
|
||||
import { useDispatch } from 'react-redux'
|
||||
|
||||
export function useLMStudioSettings() {
|
||||
const settings = useAppSelector((state) => state.llm.settings.lmstudio)
|
||||
const dispatch = useDispatch()
|
||||
|
||||
return { ...settings, setKeepAliveTime: (time: number) => dispatch(setLMStudioKeepAliveTime(time)) }
|
||||
}
|
||||
|
||||
export function getLMStudioSettings() {
|
||||
return store.getState().llm.settings.lmstudio
|
||||
}
|
||||
|
||||
export function getLMStudioKeepAliveTime() {
|
||||
return store.getState().llm.settings.lmstudio.keepAliveTime + 'm'
|
||||
}
|
||||
@ -439,6 +439,12 @@
|
||||
"keep_alive_time.title": "Keep Alive Time",
|
||||
"title": "Ollama"
|
||||
},
|
||||
"lmstudio": {
|
||||
"keep_alive_time.description": "The time in minutes to keep the connection alive, default is 5 minutes.",
|
||||
"keep_alive_time.placeholder": "Minutes",
|
||||
"keep_alive_time.title": "Keep Alive Time",
|
||||
"title": "LM Studio"
|
||||
},
|
||||
"paintings": {
|
||||
"button.delete.image": "Delete Image",
|
||||
"button.delete.image.confirm": "Are you sure you want to delete this image?",
|
||||
@ -493,6 +499,7 @@
|
||||
"nvidia": "Nvidia",
|
||||
"ocoolai": "ocoolAI",
|
||||
"ollama": "Ollama",
|
||||
"lmstudio": "LM Studio",
|
||||
"openai": "OpenAI",
|
||||
"openrouter": "OpenRouter",
|
||||
"ppio": "PPIO",
|
||||
|
||||
@ -439,6 +439,12 @@
|
||||
"keep_alive_time.title": "保持時間",
|
||||
"title": "Ollama"
|
||||
},
|
||||
"lmstudio": {
|
||||
"keep_alive_time.description": "モデルがメモリに保持される時間(デフォルト:5分)",
|
||||
"keep_alive_time.placeholder": "分",
|
||||
"keep_alive_time.title": "保持時間",
|
||||
"title": "LM Studio"
|
||||
},
|
||||
"paintings": {
|
||||
"button.delete.image": "画像を削除",
|
||||
"button.delete.image.confirm": "この画像を削除してもよろしいですか?",
|
||||
@ -493,6 +499,7 @@
|
||||
"nvidia": "NVIDIA",
|
||||
"ocoolai": "ocoolAI",
|
||||
"ollama": "Ollama",
|
||||
"lmstudio": "LM Studio",
|
||||
"openai": "OpenAI",
|
||||
"openrouter": "OpenRouter",
|
||||
"qwenlm": "QwenLM",
|
||||
|
||||
@ -439,6 +439,12 @@
|
||||
"keep_alive_time.title": "Время жизни модели",
|
||||
"title": "Ollama"
|
||||
},
|
||||
"lmstudio": {
|
||||
"keep_alive_time.description": "Время в минутах, в течение которого модель остается активной, по умолчанию 5 минут.",
|
||||
"keep_alive_time.placeholder": "Минуты",
|
||||
"keep_alive_time.title": "Время жизни модели",
|
||||
"title": "LM Studio"
|
||||
},
|
||||
"paintings": {
|
||||
"button.delete.image": "Удалить изображение",
|
||||
"button.delete.image.confirm": "Вы уверены, что хотите удалить это изображение?",
|
||||
@ -493,6 +499,7 @@
|
||||
"nvidia": "Nvidia",
|
||||
"ocoolai": "ocoolAI",
|
||||
"ollama": "Ollama",
|
||||
"lmstudio": "LM Studio",
|
||||
"openai": "OpenAI",
|
||||
"openrouter": "OpenRouter",
|
||||
"qwenlm": "QwenLM",
|
||||
|
||||
@ -439,6 +439,12 @@
|
||||
"keep_alive_time.title": "保持活跃时间",
|
||||
"title": "Ollama"
|
||||
},
|
||||
"lmstudio": {
|
||||
"keep_alive_time.description": "对话后模型在内存中保持的时间(默认:5分钟)",
|
||||
"keep_alive_time.placeholder": "分钟",
|
||||
"keep_alive_time.title": "保持活跃时间",
|
||||
"title": "LM Studio"
|
||||
},
|
||||
"paintings": {
|
||||
"button.delete.image": "删除图片",
|
||||
"button.delete.image.confirm": "确定要删除此图片吗?",
|
||||
@ -493,6 +499,7 @@
|
||||
"nvidia": "英伟达",
|
||||
"ocoolai": "ocoolAI",
|
||||
"ollama": "Ollama",
|
||||
"lmstudio": "LM Studio",
|
||||
"openai": "OpenAI",
|
||||
"openrouter": "OpenRouter",
|
||||
"ppio": "PPIO 派欧云",
|
||||
|
||||
@ -439,6 +439,12 @@
|
||||
"keep_alive_time.title": "保持活躍時間",
|
||||
"title": "Ollama"
|
||||
},
|
||||
"lmstudio": {
|
||||
"keep_alive_time.description": "對話後模型在記憶體中保持的時間(預設為 5 分鐘)。",
|
||||
"keep_alive_time.placeholder": "分鐘",
|
||||
"keep_alive_time.title": "保持活躍時間",
|
||||
"title": "LM Studio"
|
||||
},
|
||||
"paintings": {
|
||||
"infini": "無問芯穹",
|
||||
"perplexity": "Perplexity",
|
||||
@ -493,6 +499,7 @@
|
||||
"nvidia": "輝達",
|
||||
"ocoolai": "ocoolAI",
|
||||
"ollama": "Ollama",
|
||||
"lmstudio": "LM Studio",
|
||||
"openai": "OpenAI",
|
||||
"openrouter": "OpenRouter",
|
||||
"ppio": "PPIO 派歐雲",
|
||||
|
||||
@ -0,0 +1,34 @@
|
||||
import { useLMStudioSettings } from '@renderer/hooks/useLMStudio'
|
||||
import { InputNumber } from 'antd'
|
||||
import { FC, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import styled from 'styled-components'
|
||||
|
||||
import { SettingHelpText, SettingHelpTextRow, SettingSubtitle } from '..'
|
||||
|
||||
const LMStudioSettings: FC = () => {
|
||||
const { keepAliveTime, setKeepAliveTime } = useLMStudioSettings()
|
||||
const [keepAliveMinutes, setKeepAliveMinutes] = useState(keepAliveTime)
|
||||
const { t } = useTranslation()
|
||||
|
||||
return (
|
||||
<Container>
|
||||
<SettingSubtitle style={{ marginBottom: 5 }}>{t('lmstudio.keep_alive_time.title')}</SettingSubtitle>
|
||||
<InputNumber
|
||||
style={{ width: '100%' }}
|
||||
value={keepAliveMinutes}
|
||||
onChange={(e) => setKeepAliveMinutes(Number(e))}
|
||||
onBlur={() => setKeepAliveTime(keepAliveMinutes)}
|
||||
suffix={t('lmstudio.keep_alive_time.placeholder')}
|
||||
step={5}
|
||||
/>
|
||||
<SettingHelpTextRow>
|
||||
<SettingHelpText>{t('lmstudio.keep_alive_time.description')}</SettingHelpText>
|
||||
</SettingHelpTextRow>
|
||||
</Container>
|
||||
)
|
||||
}
|
||||
|
||||
const Container = styled.div``
|
||||
|
||||
export default LMStudioSettings
|
||||
@ -43,6 +43,7 @@ import ApiCheckPopup from './ApiCheckPopup'
|
||||
import EditModelsPopup from './EditModelsPopup'
|
||||
import GraphRAGSettings from './GraphRAGSettings'
|
||||
import OllamSettings from './OllamaSettings'
|
||||
import LMStudioSettings from './LMStudioSettings'
|
||||
import SelectProviderModelPopup from './SelectProviderModelPopup'
|
||||
|
||||
interface Props {
|
||||
@ -319,6 +320,7 @@ const ProviderSetting: FC<Props> = ({ provider: _provider }) => {
|
||||
</>
|
||||
)}
|
||||
{provider.id === 'ollama' && <OllamSettings />}
|
||||
{provider.id === 'lmstudio' && <LMStudioSettings />}
|
||||
{provider.id === 'graphrag-kylin-mountain' && provider.models.length > 0 && (
|
||||
<GraphRAGSettings provider={provider} />
|
||||
)}
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
|
||||
import { getOllamaKeepAliveTime } from '@renderer/hooks/useOllama'
|
||||
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
|
||||
import { getKnowledgeReferences } from '@renderer/services/KnowledgeService'
|
||||
import store from '@renderer/store'
|
||||
import { Assistant, GenerateImageParams, Message, Model, Provider, Suggestion } from '@renderer/types'
|
||||
@ -63,7 +64,7 @@ export default abstract class BaseProvider {
|
||||
}
|
||||
|
||||
public get keepAliveTime() {
|
||||
return this.provider.id === 'ollama' ? getOllamaKeepAliveTime() : undefined
|
||||
return this.provider.id === 'ollama' ? getOllamaKeepAliveTime() : this.provider.id === 'lmstudio' ? getLMStudioKeepAliveTime() : undefined
|
||||
}
|
||||
|
||||
public async fakeCompletions({ onChunk }: CompletionsParams) {
|
||||
|
||||
@ -214,7 +214,7 @@ export async function checkApi(provider: Provider, model: Model) {
|
||||
const key = 'api-check'
|
||||
const style = { marginTop: '3vh' }
|
||||
|
||||
if (provider.id !== 'ollama') {
|
||||
if (provider.id !== 'ollama' && provider.id !== 'lmstudio') {
|
||||
if (!provider.apiKey) {
|
||||
window.message.error({ content: i18n.t('message.error.enter.api.key'), key, style })
|
||||
return {
|
||||
@ -252,7 +252,7 @@ export async function checkApi(provider: Provider, model: Model) {
|
||||
|
||||
function hasApiKey(provider: Provider) {
|
||||
if (!provider) return false
|
||||
if (provider.id === 'ollama') return true
|
||||
if (provider.id === 'ollama' || provider.id === 'lmstudio') return true
|
||||
return !isEmpty(provider.apiKey)
|
||||
}
|
||||
|
||||
|
||||
@ -8,6 +8,9 @@ type LlmSettings = {
|
||||
ollama: {
|
||||
keepAliveTime: number
|
||||
}
|
||||
lmstudio: {
|
||||
keepAliveTime: number
|
||||
}
|
||||
}
|
||||
|
||||
export interface LlmState {
|
||||
@ -83,6 +86,16 @@ const initialState: LlmState = {
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
{
|
||||
id: 'lmstudio',
|
||||
name: 'LM Studio',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'http://localhost:1234',
|
||||
models: SYSTEM_MODELS.lmstudio,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
{
|
||||
id: 'anthropic',
|
||||
name: 'Anthropic',
|
||||
@ -378,6 +391,9 @@ const initialState: LlmState = {
|
||||
settings: {
|
||||
ollama: {
|
||||
keepAliveTime: 0
|
||||
},
|
||||
lmstudio: {
|
||||
keepAliveTime: 0
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -398,11 +414,24 @@ const getIntegratedInitialState = () => {
|
||||
models: [model],
|
||||
isSystem: true,
|
||||
enabled: true
|
||||
},
|
||||
{
|
||||
id: 'lmstudio',
|
||||
name: 'LM Studio',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'http://localhost:1234',
|
||||
models: [model],
|
||||
isSystem: true,
|
||||
enabled: true
|
||||
}
|
||||
],
|
||||
settings: {
|
||||
ollama: {
|
||||
keepAliveTime: 3600
|
||||
},
|
||||
lmstudio: {
|
||||
keepAliveTime: 3600
|
||||
}
|
||||
}
|
||||
} as LlmState
|
||||
@ -457,6 +486,9 @@ const settingsSlice = createSlice({
|
||||
},
|
||||
setOllamaKeepAliveTime: (state, action: PayloadAction<number>) => {
|
||||
state.settings.ollama.keepAliveTime = action.payload
|
||||
},
|
||||
setLMStudioKeepAliveTime: (state, action: PayloadAction<number>) => {
|
||||
state.settings.lmstudio.keepAliveTime = action.payload
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -471,7 +503,8 @@ export const {
|
||||
setDefaultModel,
|
||||
setTopicNamingModel,
|
||||
setTranslateModel,
|
||||
setOllamaKeepAliveTime
|
||||
setOllamaKeepAliveTime,
|
||||
setLMStudioKeepAliveTime
|
||||
} = settingsSlice.actions
|
||||
|
||||
export default settingsSlice.reducer
|
||||
|
||||
@ -970,6 +970,16 @@ const migrateConfig = {
|
||||
}
|
||||
|
||||
state.llm.providers.push(
|
||||
{
|
||||
id: 'lmstudio',
|
||||
name: 'LM Studio',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'http://localhost:1234',
|
||||
models: SYSTEM_MODELS.lmstudio,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
{
|
||||
id: 'perplexity',
|
||||
name: 'Perplexity',
|
||||
@ -1001,6 +1011,11 @@ const migrateConfig = {
|
||||
enabled: false
|
||||
}
|
||||
)
|
||||
|
||||
state.llm.settings.lmstudio = {
|
||||
keepAliveTime: 5
|
||||
}
|
||||
|
||||
return state
|
||||
}
|
||||
}
|
||||
|
||||
@ -8330,6 +8330,7 @@ __metadata:
|
||||
"@langchain/google-vertexai": "*"
|
||||
"@langchain/google-vertexai-web": "*"
|
||||
"@langchain/groq": "*"
|
||||
"@langchain/lmstudio": "*"
|
||||
"@langchain/mistralai": "*"
|
||||
"@langchain/ollama": "*"
|
||||
axios: "*"
|
||||
@ -8356,6 +8357,8 @@ __metadata:
|
||||
optional: true
|
||||
"@langchain/groq":
|
||||
optional: true
|
||||
"@langchain/lmstudio":
|
||||
optional: true
|
||||
"@langchain/mistralai":
|
||||
optional: true
|
||||
"@langchain/ollama":
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user