feat: Add LM Studio support (#1572)

Co-authored-by: hehua2008 <hegan2010@gmail.com>
Co-authored-by: 亢奋猫 <kangfenmao@qq.com>
This commit is contained in:
hehua2008 2025-02-14 10:49:57 +08:00 committed by GitHub
parent 80dedc149a
commit fd4334f331
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 162 additions and 7 deletions

View File

@ -30,7 +30,7 @@ Cherry Studio is a desktop client that supports for multiple LLM providers, avai
- ☁️ Major LLM Cloud Services: OpenAI, Gemini, Anthropic, and more - ☁️ Major LLM Cloud Services: OpenAI, Gemini, Anthropic, and more
- 🔗 AI Web Service Integration: Claude, Peplexity, Poe, and others - 🔗 AI Web Service Integration: Claude, Peplexity, Poe, and others
- 💻 Local Model Support with Ollama - 💻 Local Model Support with Ollama, LM Studio
2. **AI Assistants & Conversations**: 2. **AI Assistants & Conversations**:

View File

@ -31,7 +31,7 @@ Cherry Studioは、複数のLLMプロバイダーをサポートするデスク
- ☁️ 主要な LLM クラウドサービス対応OpenAI、Gemini、Anthropic など - ☁️ 主要な LLM クラウドサービス対応OpenAI、Gemini、Anthropic など
- 🔗 AI Web サービス統合Claude、Peplexity、Poe など - 🔗 AI Web サービス統合Claude、Peplexity、Poe など
- 💻 Ollama によるローカルモデル実行対応 - 💻 Ollama、LM Studio によるローカルモデル実行対応
2. **AI アシスタントと対話** 2. **AI アシスタントと対話**

View File

@ -31,7 +31,7 @@ Cherry Studio 是一款支持多个大语言模型LLM服务商的桌面客
- ☁️ 支持主流 LLM 云服务OpenAI、Gemini、Anthropic、硅基流动等 - ☁️ 支持主流 LLM 云服务OpenAI、Gemini、Anthropic、硅基流动等
- 🔗 集成流行 AI Web 服务Claude、Peplexity、Poe、腾讯元宝、知乎直答等 - 🔗 集成流行 AI Web 服务Claude、Peplexity、Poe、腾讯元宝、知乎直答等
- 💻 支持 Ollama 本地模型部署 - 💻 支持 Ollama、LM Studio 本地模型部署
2. **智能助手与对话** 2. **智能助手与对话**

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

View File

@ -323,6 +323,7 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
} }
], ],
ollama: [], ollama: [],
lmstudio: [],
silicon: [ silicon: [
{ {
id: 'deepseek-ai/DeepSeek-R1', id: 'deepseek-ai/DeepSeek-R1',

View File

@ -24,6 +24,7 @@ import MoonshotProviderLogo from '@renderer/assets/images/providers/moonshot.png
import NvidiaProviderLogo from '@renderer/assets/images/providers/nvidia.png' import NvidiaProviderLogo from '@renderer/assets/images/providers/nvidia.png'
import OcoolAiProviderLogo from '@renderer/assets/images/providers/ocoolai.png' import OcoolAiProviderLogo from '@renderer/assets/images/providers/ocoolai.png'
import OllamaProviderLogo from '@renderer/assets/images/providers/ollama.png' import OllamaProviderLogo from '@renderer/assets/images/providers/ollama.png'
import LMStudioProviderLogo from '@renderer/assets/images/providers/lmstudio.png'
import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.png' import OpenAiProviderLogo from '@renderer/assets/images/providers/openai.png'
import OpenRouterProviderLogo from '@renderer/assets/images/providers/openrouter.png' import OpenRouterProviderLogo from '@renderer/assets/images/providers/openrouter.png'
import PerplexityProviderLogo from '@renderer/assets/images/providers/perplexity.png' import PerplexityProviderLogo from '@renderer/assets/images/providers/perplexity.png'
@ -52,6 +53,8 @@ export function getProviderLogo(providerId: string) {
return ZhipuProviderLogo return ZhipuProviderLogo
case 'ollama': case 'ollama':
return OllamaProviderLogo return OllamaProviderLogo
case 'lmstudio':
return LMStudioProviderLogo
case 'moonshot': case 'moonshot':
return MoonshotProviderLogo return MoonshotProviderLogo
case 'openrouter': case 'openrouter':
@ -373,6 +376,16 @@ export const PROVIDER_CONFIG = {
models: 'https://ollama.com/library' models: 'https://ollama.com/library'
} }
}, },
lmstudio: {
api: {
url: 'http://localhost:1234'
},
websites: {
official: 'https://lmstudio.ai/',
docs: 'https://lmstudio.ai/docs',
models: 'https://lmstudio.ai/models'
}
},
anthropic: { anthropic: {
api: { api: {
url: 'https://api.anthropic.com/' url: 'https://api.anthropic.com/'

View File

@ -0,0 +1,18 @@
import store, { useAppSelector } from '@renderer/store'
import { setLMStudioKeepAliveTime } from '@renderer/store/llm'
import { useDispatch } from 'react-redux'
export function useLMStudioSettings() {
const settings = useAppSelector((state) => state.llm.settings.lmstudio)
const dispatch = useDispatch()
return { ...settings, setKeepAliveTime: (time: number) => dispatch(setLMStudioKeepAliveTime(time)) }
}
export function getLMStudioSettings() {
return store.getState().llm.settings.lmstudio
}
export function getLMStudioKeepAliveTime() {
return store.getState().llm.settings.lmstudio.keepAliveTime + 'm'
}

View File

@ -439,6 +439,12 @@
"keep_alive_time.title": "Keep Alive Time", "keep_alive_time.title": "Keep Alive Time",
"title": "Ollama" "title": "Ollama"
}, },
"lmstudio": {
"keep_alive_time.description": "The time in minutes to keep the connection alive, default is 5 minutes.",
"keep_alive_time.placeholder": "Minutes",
"keep_alive_time.title": "Keep Alive Time",
"title": "LM Studio"
},
"paintings": { "paintings": {
"button.delete.image": "Delete Image", "button.delete.image": "Delete Image",
"button.delete.image.confirm": "Are you sure you want to delete this image?", "button.delete.image.confirm": "Are you sure you want to delete this image?",
@ -493,6 +499,7 @@
"nvidia": "Nvidia", "nvidia": "Nvidia",
"ocoolai": "ocoolAI", "ocoolai": "ocoolAI",
"ollama": "Ollama", "ollama": "Ollama",
"lmstudio": "LM Studio",
"openai": "OpenAI", "openai": "OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"ppio": "PPIO", "ppio": "PPIO",

View File

@ -439,6 +439,12 @@
"keep_alive_time.title": "保持時間", "keep_alive_time.title": "保持時間",
"title": "Ollama" "title": "Ollama"
}, },
"lmstudio": {
"keep_alive_time.description": "モデルがメモリに保持される時間デフォルト5分",
"keep_alive_time.placeholder": "分",
"keep_alive_time.title": "保持時間",
"title": "LM Studio"
},
"paintings": { "paintings": {
"button.delete.image": "画像を削除", "button.delete.image": "画像を削除",
"button.delete.image.confirm": "この画像を削除してもよろしいですか?", "button.delete.image.confirm": "この画像を削除してもよろしいですか?",
@ -493,6 +499,7 @@
"nvidia": "NVIDIA", "nvidia": "NVIDIA",
"ocoolai": "ocoolAI", "ocoolai": "ocoolAI",
"ollama": "Ollama", "ollama": "Ollama",
"lmstudio": "LM Studio",
"openai": "OpenAI", "openai": "OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"qwenlm": "QwenLM", "qwenlm": "QwenLM",

View File

@ -439,6 +439,12 @@
"keep_alive_time.title": "Время жизни модели", "keep_alive_time.title": "Время жизни модели",
"title": "Ollama" "title": "Ollama"
}, },
"lmstudio": {
"keep_alive_time.description": "Время в минутах, в течение которого модель остается активной, по умолчанию 5 минут.",
"keep_alive_time.placeholder": "Минуты",
"keep_alive_time.title": "Время жизни модели",
"title": "LM Studio"
},
"paintings": { "paintings": {
"button.delete.image": "Удалить изображение", "button.delete.image": "Удалить изображение",
"button.delete.image.confirm": "Вы уверены, что хотите удалить это изображение?", "button.delete.image.confirm": "Вы уверены, что хотите удалить это изображение?",
@ -493,6 +499,7 @@
"nvidia": "Nvidia", "nvidia": "Nvidia",
"ocoolai": "ocoolAI", "ocoolai": "ocoolAI",
"ollama": "Ollama", "ollama": "Ollama",
"lmstudio": "LM Studio",
"openai": "OpenAI", "openai": "OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"qwenlm": "QwenLM", "qwenlm": "QwenLM",

View File

@ -439,6 +439,12 @@
"keep_alive_time.title": "保持活跃时间", "keep_alive_time.title": "保持活跃时间",
"title": "Ollama" "title": "Ollama"
}, },
"lmstudio": {
"keep_alive_time.description": "对话后模型在内存中保持的时间默认5分钟",
"keep_alive_time.placeholder": "分钟",
"keep_alive_time.title": "保持活跃时间",
"title": "LM Studio"
},
"paintings": { "paintings": {
"button.delete.image": "删除图片", "button.delete.image": "删除图片",
"button.delete.image.confirm": "确定要删除此图片吗?", "button.delete.image.confirm": "确定要删除此图片吗?",
@ -493,6 +499,7 @@
"nvidia": "英伟达", "nvidia": "英伟达",
"ocoolai": "ocoolAI", "ocoolai": "ocoolAI",
"ollama": "Ollama", "ollama": "Ollama",
"lmstudio": "LM Studio",
"openai": "OpenAI", "openai": "OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"ppio": "PPIO 派欧云", "ppio": "PPIO 派欧云",

View File

@ -439,6 +439,12 @@
"keep_alive_time.title": "保持活躍時間", "keep_alive_time.title": "保持活躍時間",
"title": "Ollama" "title": "Ollama"
}, },
"lmstudio": {
"keep_alive_time.description": "對話後模型在記憶體中保持的時間(預設為 5 分鐘)。",
"keep_alive_time.placeholder": "分鐘",
"keep_alive_time.title": "保持活躍時間",
"title": "LM Studio"
},
"paintings": { "paintings": {
"infini": "無問芯穹", "infini": "無問芯穹",
"perplexity": "Perplexity", "perplexity": "Perplexity",
@ -493,6 +499,7 @@
"nvidia": "輝達", "nvidia": "輝達",
"ocoolai": "ocoolAI", "ocoolai": "ocoolAI",
"ollama": "Ollama", "ollama": "Ollama",
"lmstudio": "LM Studio",
"openai": "OpenAI", "openai": "OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"ppio": "PPIO 派歐雲", "ppio": "PPIO 派歐雲",

View File

@ -0,0 +1,34 @@
import { useLMStudioSettings } from '@renderer/hooks/useLMStudio'
import { InputNumber } from 'antd'
import { FC, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
import { SettingHelpText, SettingHelpTextRow, SettingSubtitle } from '..'
const LMStudioSettings: FC = () => {
const { keepAliveTime, setKeepAliveTime } = useLMStudioSettings()
const [keepAliveMinutes, setKeepAliveMinutes] = useState(keepAliveTime)
const { t } = useTranslation()
return (
<Container>
<SettingSubtitle style={{ marginBottom: 5 }}>{t('lmstudio.keep_alive_time.title')}</SettingSubtitle>
<InputNumber
style={{ width: '100%' }}
value={keepAliveMinutes}
onChange={(e) => setKeepAliveMinutes(Number(e))}
onBlur={() => setKeepAliveTime(keepAliveMinutes)}
suffix={t('lmstudio.keep_alive_time.placeholder')}
step={5}
/>
<SettingHelpTextRow>
<SettingHelpText>{t('lmstudio.keep_alive_time.description')}</SettingHelpText>
</SettingHelpTextRow>
</Container>
)
}
const Container = styled.div``
export default LMStudioSettings

View File

@ -43,6 +43,7 @@ import ApiCheckPopup from './ApiCheckPopup'
import EditModelsPopup from './EditModelsPopup' import EditModelsPopup from './EditModelsPopup'
import GraphRAGSettings from './GraphRAGSettings' import GraphRAGSettings from './GraphRAGSettings'
import OllamSettings from './OllamaSettings' import OllamSettings from './OllamaSettings'
import LMStudioSettings from './LMStudioSettings'
import SelectProviderModelPopup from './SelectProviderModelPopup' import SelectProviderModelPopup from './SelectProviderModelPopup'
interface Props { interface Props {
@ -319,6 +320,7 @@ const ProviderSetting: FC<Props> = ({ provider: _provider }) => {
</> </>
)} )}
{provider.id === 'ollama' && <OllamSettings />} {provider.id === 'ollama' && <OllamSettings />}
{provider.id === 'lmstudio' && <LMStudioSettings />}
{provider.id === 'graphrag-kylin-mountain' && provider.models.length > 0 && ( {provider.id === 'graphrag-kylin-mountain' && provider.models.length > 0 && (
<GraphRAGSettings provider={provider} /> <GraphRAGSettings provider={provider} />
)} )}

View File

@ -1,5 +1,6 @@
import { REFERENCE_PROMPT } from '@renderer/config/prompts' import { REFERENCE_PROMPT } from '@renderer/config/prompts'
import { getOllamaKeepAliveTime } from '@renderer/hooks/useOllama' import { getOllamaKeepAliveTime } from '@renderer/hooks/useOllama'
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
import { getKnowledgeReferences } from '@renderer/services/KnowledgeService' import { getKnowledgeReferences } from '@renderer/services/KnowledgeService'
import store from '@renderer/store' import store from '@renderer/store'
import { Assistant, GenerateImageParams, Message, Model, Provider, Suggestion } from '@renderer/types' import { Assistant, GenerateImageParams, Message, Model, Provider, Suggestion } from '@renderer/types'
@ -63,7 +64,7 @@ export default abstract class BaseProvider {
} }
public get keepAliveTime() { public get keepAliveTime() {
return this.provider.id === 'ollama' ? getOllamaKeepAliveTime() : undefined return this.provider.id === 'ollama' ? getOllamaKeepAliveTime() : this.provider.id === 'lmstudio' ? getLMStudioKeepAliveTime() : undefined
} }
public async fakeCompletions({ onChunk }: CompletionsParams) { public async fakeCompletions({ onChunk }: CompletionsParams) {

View File

@ -214,7 +214,7 @@ export async function checkApi(provider: Provider, model: Model) {
const key = 'api-check' const key = 'api-check'
const style = { marginTop: '3vh' } const style = { marginTop: '3vh' }
if (provider.id !== 'ollama') { if (provider.id !== 'ollama' && provider.id !== 'lmstudio') {
if (!provider.apiKey) { if (!provider.apiKey) {
window.message.error({ content: i18n.t('message.error.enter.api.key'), key, style }) window.message.error({ content: i18n.t('message.error.enter.api.key'), key, style })
return { return {
@ -252,7 +252,7 @@ export async function checkApi(provider: Provider, model: Model) {
function hasApiKey(provider: Provider) { function hasApiKey(provider: Provider) {
if (!provider) return false if (!provider) return false
if (provider.id === 'ollama') return true if (provider.id === 'ollama' || provider.id === 'lmstudio') return true
return !isEmpty(provider.apiKey) return !isEmpty(provider.apiKey)
} }

View File

@ -8,6 +8,9 @@ type LlmSettings = {
ollama: { ollama: {
keepAliveTime: number keepAliveTime: number
} }
lmstudio: {
keepAliveTime: number
}
} }
export interface LlmState { export interface LlmState {
@ -83,6 +86,16 @@ const initialState: LlmState = {
isSystem: true, isSystem: true,
enabled: false enabled: false
}, },
{
id: 'lmstudio',
name: 'LM Studio',
type: 'openai',
apiKey: '',
apiHost: 'http://localhost:1234',
models: SYSTEM_MODELS.lmstudio,
isSystem: true,
enabled: false
},
{ {
id: 'anthropic', id: 'anthropic',
name: 'Anthropic', name: 'Anthropic',
@ -378,6 +391,9 @@ const initialState: LlmState = {
settings: { settings: {
ollama: { ollama: {
keepAliveTime: 0 keepAliveTime: 0
},
lmstudio: {
keepAliveTime: 0
} }
} }
} }
@ -398,11 +414,24 @@ const getIntegratedInitialState = () => {
models: [model], models: [model],
isSystem: true, isSystem: true,
enabled: true enabled: true
},
{
id: 'lmstudio',
name: 'LM Studio',
type: 'openai',
apiKey: '',
apiHost: 'http://localhost:1234',
models: [model],
isSystem: true,
enabled: true
} }
], ],
settings: { settings: {
ollama: { ollama: {
keepAliveTime: 3600 keepAliveTime: 3600
},
lmstudio: {
keepAliveTime: 3600
} }
} }
} as LlmState } as LlmState
@ -457,6 +486,9 @@ const settingsSlice = createSlice({
}, },
setOllamaKeepAliveTime: (state, action: PayloadAction<number>) => { setOllamaKeepAliveTime: (state, action: PayloadAction<number>) => {
state.settings.ollama.keepAliveTime = action.payload state.settings.ollama.keepAliveTime = action.payload
},
setLMStudioKeepAliveTime: (state, action: PayloadAction<number>) => {
state.settings.lmstudio.keepAliveTime = action.payload
} }
} }
}) })
@ -471,7 +503,8 @@ export const {
setDefaultModel, setDefaultModel,
setTopicNamingModel, setTopicNamingModel,
setTranslateModel, setTranslateModel,
setOllamaKeepAliveTime setOllamaKeepAliveTime,
setLMStudioKeepAliveTime
} = settingsSlice.actions } = settingsSlice.actions
export default settingsSlice.reducer export default settingsSlice.reducer

View File

@ -970,6 +970,16 @@ const migrateConfig = {
} }
state.llm.providers.push( state.llm.providers.push(
{
id: 'lmstudio',
name: 'LM Studio',
type: 'openai',
apiKey: '',
apiHost: 'http://localhost:1234',
models: SYSTEM_MODELS.lmstudio,
isSystem: true,
enabled: false
},
{ {
id: 'perplexity', id: 'perplexity',
name: 'Perplexity', name: 'Perplexity',
@ -1001,6 +1011,11 @@ const migrateConfig = {
enabled: false enabled: false
} }
) )
state.llm.settings.lmstudio = {
keepAliveTime: 5
}
return state return state
} }
} }

View File

@ -8330,6 +8330,7 @@ __metadata:
"@langchain/google-vertexai": "*" "@langchain/google-vertexai": "*"
"@langchain/google-vertexai-web": "*" "@langchain/google-vertexai-web": "*"
"@langchain/groq": "*" "@langchain/groq": "*"
"@langchain/lmstudio": "*"
"@langchain/mistralai": "*" "@langchain/mistralai": "*"
"@langchain/ollama": "*" "@langchain/ollama": "*"
axios: "*" axios: "*"
@ -8356,6 +8357,8 @@ __metadata:
optional: true optional: true
"@langchain/groq": "@langchain/groq":
optional: true optional: true
"@langchain/lmstudio":
optional: true
"@langchain/mistralai": "@langchain/mistralai":
optional: true optional: true
"@langchain/ollama": "@langchain/ollama":