chore(version): 0.5.0

This commit is contained in:
kangfenmao 2024-08-07 21:06:35 +08:00
parent beb40f5baf
commit f7ef895ce6
9 changed files with 33 additions and 36 deletions

View File

@ -1,6 +1,6 @@
{
"name": "cherry-studio",
"version": "0.4.9",
"version": "0.5.0",
"description": "A powerful AI assistant for producer.",
"main": "./out/main/index.js",
"author": "kangfenmao@qq.com",

View File

@ -26,7 +26,7 @@ function createWindow() {
width: mainWindowState.width,
height: mainWindowState.height,
minWidth: 1080,
minHeight: 500,
minHeight: 600,
show: true,
autoHideMenuBar: true,
transparent: process.platform === 'darwin',

View File

@ -33,7 +33,6 @@ const SettingsTab: FC<Props> = (props) => {
debounce(
(settings: Partial<AssistantSettings>) => {
updateAssistantSettings({
...assistant.settings,
temperature: settings.temperature ?? temperature,
contextCount: settings.contextCount ?? contextCount,
enableMaxTokens: settings.enableMaxTokens ?? enableMaxTokens,
@ -41,12 +40,9 @@ const SettingsTab: FC<Props> = (props) => {
})
},
1000,
{
leading: false,
trailing: true
}
{ leading: true, trailing: false }
),
[]
[temperature, contextCount, enableMaxTokens, maxTokens]
)
const onTemperatureChange = (value) => {
@ -255,7 +251,7 @@ const InputNumberic = styled(InputNumber)`
const Label = styled.p`
margin: 0;
font-size: 12px;
font-weight: bold;
font-weight: 600;
margin-right: 8px;
`

View File

@ -38,7 +38,7 @@ const AssistantSettings: FC = () => {
1000,
{ leading: false, trailing: true }
),
[]
[temperature, contextCount, enableMaxTokens, maxTokens]
)
const onTemperatureChange = (value) => {

View File

@ -3,12 +3,12 @@ import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
import { getOllamaKeepAliveTime } from '@renderer/hooks/useOllama'
import { Assistant, Message, Provider, Suggestion } from '@renderer/types'
import { getAssistantSettings, removeQuotes } from '@renderer/utils'
import { removeQuotes } from '@renderer/utils'
import { sum, takeRight } from 'lodash'
import OpenAI from 'openai'
import { ChatCompletionCreateParamsNonStreaming, ChatCompletionMessageParam } from 'openai/resources'
import { getAssistantMaxTokens, getDefaultModel, getTopNamingModel } from './assistant'
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from './assistant'
import { EVENT_NAMES } from './event'
export default class ProviderSDK {
@ -39,7 +39,7 @@ export default class ProviderSDK {
) {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount } = getAssistantSettings(assistant)
const { contextCount, maxTokens } = getAssistantSettings(assistant)
const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined
@ -53,7 +53,7 @@ export default class ProviderSDK {
.stream({
model: model.id,
messages: [systemMessage, ...userMessages].filter(Boolean) as MessageParam[],
max_tokens: getAssistantMaxTokens(assistant) || DEFAULT_MAX_TOKENS,
max_tokens: maxTokens || DEFAULT_MAX_TOKENS,
temperature: assistant?.settings?.temperature
})
.on('text', (text) => onChunk({ text: text || '' }))
@ -73,7 +73,7 @@ export default class ProviderSDK {
messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[],
stream: true,
temperature: assistant?.settings?.temperature,
max_tokens: getAssistantMaxTokens(assistant),
max_tokens: maxTokens,
keep_alive: this.keepAliveTime
})
for await (const chunk of stream) {

View File

@ -1,9 +1,9 @@
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
import { DEFAULT_CONEXTCOUNT, DEFAULT_MAX_TOKENS, DEFAULT_TEMPERATURE } from '@renderer/config/constant'
import i18n from '@renderer/i18n'
import store from '@renderer/store'
import { updateAgent } from '@renderer/store/agents'
import { updateAssistant } from '@renderer/store/assistants'
import { Agent, Assistant, Model, Provider, Topic } from '@renderer/types'
import { Agent, Assistant, AssistantSettings, Model, Provider, Topic } from '@renderer/types'
import { getLeadingEmoji, removeLeadingEmoji, uuid } from '@renderer/utils'
export function getDefaultAssistant(): Assistant {
@ -57,7 +57,9 @@ export function getProviderByModelId(modelId?: string) {
return providers.find((p) => p.models.find((m) => m.id === _modelId)) as Provider
}
export function getAssistantMaxTokens(assistant: Assistant) {
export const getAssistantSettings = (assistant: Assistant): AssistantSettings => {
const contextCount = assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT
const getAssistantMaxTokens = () => {
if (assistant.settings?.enableMaxTokens) {
const maxTokens = assistant.settings.maxTokens
if (typeof maxTokens === 'number') {
@ -65,10 +67,17 @@ export function getAssistantMaxTokens(assistant: Assistant) {
}
return DEFAULT_MAX_TOKENS
}
return undefined
}
return {
contextCount: contextCount === 20 ? 100000 : contextCount,
temperature: assistant?.settings?.temperature ?? DEFAULT_TEMPERATURE,
enableMaxTokens: assistant?.settings?.enableMaxTokens ?? false,
maxTokens: getAssistantMaxTokens()
}
}
export function covertAgentToAssistant(agent: Agent): Assistant {
return {
...getDefaultAssistant(),

View File

@ -1,8 +1,9 @@
import { Assistant, Message } from '@renderer/types'
import { getAssistantSettings } from '@renderer/utils'
import { GPTTokens } from 'gpt-tokens'
import { takeRight } from 'lodash'
import { getAssistantSettings } from './assistant'
export const filterAtMessages = (messages: Message[]) => {
return messages.filter((message) => message.type !== '@')
}

View File

@ -14,7 +14,7 @@ export type Assistant = {
export type AssistantSettings = {
contextCount: number
temperature: number
maxTokens: number
maxTokens: number | undefined
enableMaxTokens: boolean
}

View File

@ -1,5 +1,4 @@
import { DEFAULT_CONEXTCOUNT, DEFAULT_TEMPERATURE } from '@renderer/config/constant'
import { Assistant, AssistantSettings, Model } from '@renderer/types'
import { Model } from '@renderer/types'
import imageCompression from 'browser-image-compression'
import { v4 as uuidv4 } from 'uuid'
@ -177,14 +176,6 @@ export function getFirstCharacter(str) {
}
}
export const getAssistantSettings = (assistant: Assistant): AssistantSettings => {
const contextCount = assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT
return {
contextCount: contextCount === 20 ? 100000 : contextCount,
temperature: assistant?.settings?.temperature ?? DEFAULT_TEMPERATURE
}
}
/**
* is valid proxy url
* @param url proxy url