diff --git a/.yarn/patches/openai-npm-4.76.2-8ff1374617.patch b/.yarn/patches/openai-npm-4.76.2-8ff1374617.patch deleted file mode 100644 index aba7f37a..00000000 --- a/.yarn/patches/openai-npm-4.76.2-8ff1374617.patch +++ /dev/null @@ -1,26 +0,0 @@ -diff --git a/core.js b/core.js -index 30c91e66bf595a66c09eb3dbcbda7d58154865f5..b511ff24ea1891904c60174c6ed26ecdd4d5ac51 100644 ---- a/core.js -+++ b/core.js -@@ -156,7 +156,7 @@ class APIClient { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'User-Agent': this.getUserAgent(), -- ...getPlatformHeaders(), -+ // ...getPlatformHeaders(), - ...this.authHeaders(opts), - }; - } -diff --git a/core.mjs b/core.mjs -index ac267bcfcff44b1f7c9bea5513bba94726a31795..dd5bd9f29609d3f0eea4bd5b225f302893df14ad 100644 ---- a/core.mjs -+++ b/core.mjs -@@ -149,7 +149,7 @@ export class APIClient { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'User-Agent': this.getUserAgent(), -- ...getPlatformHeaders(), -+ // ...getPlatformHeaders(), - ...this.authHeaders(opts), - }; - } diff --git a/.yarn/patches/openai-npm-4.77.3-59c6d42e7a.patch b/.yarn/patches/openai-npm-4.77.3-59c6d42e7a.patch new file mode 100644 index 00000000..fbb26d7c --- /dev/null +++ b/.yarn/patches/openai-npm-4.77.3-59c6d42e7a.patch @@ -0,0 +1,39 @@ +diff --git a/core.js b/core.js +index e75a18281ce8f051990c5a50bc1076afdddf91a3..e62f796791a155f23d054e74a429516c14d6e11b 100644 +--- a/core.js ++++ b/core.js +@@ -156,7 +156,7 @@ class APIClient { + Accept: 'application/json', + 'Content-Type': 'application/json', + 'User-Agent': this.getUserAgent(), +- ...getPlatformHeaders(), ++ // ...getPlatformHeaders(), + ...this.authHeaders(opts), + }; + } +diff --git a/core.mjs b/core.mjs +index fcef58eb502664c41a77483a00db8adaf29b2817..18c5d6ed4be86b3640931277bdc27700006764d7 100644 +--- a/core.mjs ++++ b/core.mjs +@@ -149,7 +149,7 @@ export class APIClient { + Accept: 'application/json', + 'Content-Type': 'application/json', + 'User-Agent': this.getUserAgent(), +- ...getPlatformHeaders(), ++ // ...getPlatformHeaders(), + ...this.authHeaders(opts), + }; + } +diff --git a/error.mjs b/error.mjs +index 7d19f5578040afa004bc887aab1725e8703d2bac..59ec725b6142299a62798ac4bdedb63ba7d9932c 100644 +--- a/error.mjs ++++ b/error.mjs +@@ -36,7 +36,7 @@ export class APIError extends OpenAIError { + if (!status || !headers) { + return new APIConnectionError({ message, cause: castToError(errorResponse) }); + } +- const error = errorResponse?.['error']; ++ const error = errorResponse?.['error'] || errorResponse; + if (status === 400) { + return new BadRequestError(status, error, message, headers); + } diff --git a/package.json b/package.json index dc17c286..192b5023 100644 --- a/package.json +++ b/package.json @@ -121,7 +121,7 @@ "i18next": "^23.11.5", "lodash": "^4.17.21", "mime": "^4.0.4", - "openai": "patch:openai@npm%3A4.76.2#~/.yarn/patches/openai-npm-4.76.2-8ff1374617.patch", + "openai": "patch:openai@npm%3A4.77.3#~/.yarn/patches/openai-npm-4.77.3-59c6d42e7a.patch", "prettier": "^3.2.4", "react": "^18.2.0", "react-dom": "^18.2.0", @@ -157,7 +157,8 @@ "resolutions": { "pdf-parse@npm:1.1.1": "patch:pdf-parse@npm%3A1.1.1#~/.yarn/patches/pdf-parse-npm-1.1.1-04a6109b2a.patch", "@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A0.3.16#~/.yarn/patches/@langchain-openai-npm-0.3.16-e525b59526.patch", - "@langchain/openai@npm:>=0.1.0 <0.4.0": "patch:@langchain/openai@npm%3A0.3.16#~/.yarn/patches/@langchain-openai-npm-0.3.16-e525b59526.patch" + "@langchain/openai@npm:>=0.1.0 <0.4.0": "patch:@langchain/openai@npm%3A0.3.16#~/.yarn/patches/@langchain-openai-npm-0.3.16-e525b59526.patch", + "openai@npm:^4.77.0": "patch:openai@npm%3A4.77.3#~/.yarn/patches/openai-npm-4.77.3-59c6d42e7a.patch" }, "packageManager": "yarn@4.5.0" } diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json index c4f3febc..840e497e 100644 --- a/src/renderer/src/i18n/locales/en-us.json +++ b/src/renderer/src/i18n/locales/en-us.json @@ -109,7 +109,7 @@ "settings.context_count.tip": "The number of previous messages to keep in the context.", "settings.max": "Max", "settings.max_tokens": "Enable max tokens limit", - "settings.max_tokens.tip": "The maximum number of tokens the model can generate. Normal chat suggests 500-800. Short text generation suggests 800-2000. Code generation suggests 2000-3600. Long text generation suggests above 4000.", + "settings.max_tokens.tip": "The maximum number of tokens the model can generate. Need to consider the context limit of the model, otherwise an error will be reported", "settings.reset": "Reset", "settings.set_as_default": "Apply to default assistant", "settings.show_line_numbers": "Show line numbers in code", @@ -117,6 +117,8 @@ "settings.temperature.tip": "Higher values make the model more creative and unpredictable, while lower values make it more deterministic and precise.", "settings.top_p": "Top-P", "settings.top_p.tip": "Default value is 1, the smaller the value, the less variety in the answers, the easier to understand, the larger the value, the larger the range of the AI's vocabulary, the more diverse", + "settings.max_tokens.confirm": "Enable max tokens limit", + "settings.max_tokens.confirm_content": "Enable max tokens limit, affects the length of the result. Need to consider the context limit of the model, otherwise an error will be reported", "suggestions.title": "Suggested Questions", "thinking": "Thinking", "topics.auto_rename": "Auto Rename", @@ -289,7 +291,7 @@ "status_processing": "Processing", "title": "Knowledge Base", "url_added": "URL added", - "url_placeholder": "Enter URL", + "url_placeholder": "Enter URL, multiple URLs separated by Enter", "urls": "URLs" }, "languages": { @@ -349,7 +351,7 @@ "message.code_style": "Code style", "message.delete.content": "Are you sure you want to delete this message?", "message.delete.title": "Delete Message", - "message.multi_model_style": "Multi-model answer style", + "message.multi_model_style": "Group style", "message.multi_model_style.fold": "Fold", "message.multi_model_style.horizontal": "Horizontal", "message.multi_model_style.vertical": "Vertical", @@ -605,7 +607,7 @@ "messages.input.show_estimated_tokens": "Show estimated tokens", "messages.input.title": "Input Settings", "messages.markdown_rendering_input_message": "Markdown render input message", - "messages.math_engine": "Math render engine", + "messages.math_engine": "Math engine", "messages.metrics": "{{time_first_token_millsec}}ms to first token | {{token_speed}} tok/sec", "messages.model.title": "Model Settings", "messages.title": "Message Settings", diff --git a/src/renderer/src/i18n/locales/ja-jp.json b/src/renderer/src/i18n/locales/ja-jp.json index b3928003..1588e599 100644 --- a/src/renderer/src/i18n/locales/ja-jp.json +++ b/src/renderer/src/i18n/locales/ja-jp.json @@ -109,7 +109,7 @@ "settings.context_count.tip": "コンテキストに保持する以前のメッセージの数", "settings.max": "最大", "settings.max_tokens": "最大トークン制限を有効にする", - "settings.max_tokens.tip": "モデルが生成できる最大トークン数。通常のチャットでは500-800、短いテキスト生成では800-2000、コード生成では2000-3600、長いテキスト生成では4000以上を推奨", + "settings.max_tokens.tip": "モデルが生成できる最大トークン数。モデルのコンテキスト制限に基づいて設定する必要があります。そうしないとエラーが発生します", "settings.reset": "リセット", "settings.set_as_default": "デフォルトのアシスタントに適用", "settings.show_line_numbers": "コードに行番号を表示", @@ -117,6 +117,8 @@ "settings.temperature.tip": "低い値はモデルをより創造的で予測不可能にし、高い値はより決定論的で正確にします", "settings.top_p": "Top-P", "settings.top_p.tip": "デフォルト値は1で、値が小さいほど回答の多様性が減り、理解しやすくなります。値が大きいほど、AIの語彙範囲が広がり、多様性が増します", + "settings.max_tokens.confirm": "最大トークン制限を有効にする", + "settings.max_tokens.confirm_content": "最大トークン制限を有効にすると、モデルが生成できる最大トークン数が制限されます。これにより、返される結果の長さに影響が出る可能性があります。モデルのコンテキスト制限に基づいて設定する必要があります。そうしないとエラーが発生します", "suggestions.title": "提案された質問", "thinking": "思考中...", "topics.auto_rename": "自動リネーム", @@ -289,7 +291,7 @@ "status_processing": "処理中", "title": "ナレッジベース", "url_added": "URLが追加されました", - "url_placeholder": "URLを入力", + "url_placeholder": "URLを入力, 複数のURLはEnterで区切る", "urls": "URL" }, "languages": { diff --git a/src/renderer/src/i18n/locales/ru-ru.json b/src/renderer/src/i18n/locales/ru-ru.json index 1ba7c1c7..47d1019b 100644 --- a/src/renderer/src/i18n/locales/ru-ru.json +++ b/src/renderer/src/i18n/locales/ru-ru.json @@ -109,7 +109,7 @@ "settings.context_count.tip": "Количество предыдущих сообщений, которые нужно сохранить в контексте.", "settings.max": "Максимум", "settings.max_tokens": "Включить лимит максимальных токенов", - "settings.max_tokens.tip": "Максимальное количество токенов, которые может сгенерировать модель. Обычный чат предполагает 500-800. Генерация короткого текста предполагает 800-2000. Генерация кода предполагает 2000-3600. Генерация длинного текста предполагает выше 4000.", + "settings.max_tokens.tip": "Максимальное количество токенов, которые может сгенерировать модель. Нужно учитывать контекст модели, иначе будет ошибка", "settings.reset": "Сбросить", "settings.set_as_default": "Применить к ассистенту по умолчанию", "settings.show_line_numbers": "Показать номера строк в коде", @@ -117,6 +117,8 @@ "settings.temperature.tip": "Меньшие значения делают модель более креативной и непредсказуемой, в то время как большие значения делают её более детерминированной и точной.", "settings.top_p": "Top-P", "settings.top_p.tip": "Значение по умолчанию 1, чем меньше значение, тем меньше вариативности в ответах, тем проще понять, чем больше значение, тем больше вариативности в ответах, тем больше разнообразие", + "settings.max_tokens.confirm": "Включить лимит максимальных токенов", + "settings.max_tokens.confirm_content": "Включить лимит максимальных токенов, влияет на длину результата. Нужно учитывать контекст модели, иначе будет ошибка", "suggestions.title": "Предложенные вопросы", "thinking": "Мыслим", "topics.auto_rename": "Автопереименование", @@ -289,7 +291,7 @@ "status_processing": "Обработка", "title": "База знаний", "url_added": "URL добавлен", - "url_placeholder": "Введите URL", + "url_placeholder": "Введите URL, несколько URL через Enter", "urls": "URL-адреса" }, "languages": { diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json index 5297518d..de61f322 100644 --- a/src/renderer/src/i18n/locales/zh-cn.json +++ b/src/renderer/src/i18n/locales/zh-cn.json @@ -109,7 +109,7 @@ "settings.context_count.tip": "要保留在上下文中的消息数量,数值越大,上下文越长,消耗的 token 越多。普通聊天建议 5-10", "settings.max": "不限", "settings.max_tokens": "开启消息长度限制", - "settings.max_tokens.tip": "单次交互所用的最大 Token 数, 会影响返回结果的长度。普通聊天建议 500-800;短文生成建议 800-2000;代码生成建议 2000-3600;长文生成建议切换模型到 4000 左右", + "settings.max_tokens.tip": "单次交互所用的最大 Token 数, 会影响返回结果的长度。要根据模型上下文限制来设置,否则会报错", "settings.reset": "重置", "settings.set_as_default": "应用到默认助手", "settings.show_line_numbers": "代码显示行号", @@ -117,6 +117,8 @@ "settings.temperature.tip": "模型生成文本的随机程度。值越大,回复内容越赋有多样性、创造性、随机性;设为 0 根据事实回答。日常聊天建议设置为 0.7", "settings.top_p": "Top-P", "settings.top_p.tip": "默认值为 1,值越小,AI 生成的内容越单调,也越容易理解;值越大,AI 回复的词汇围越大,越多样化", + "settings.max_tokens.confirm": "开启消息长度限制", + "settings.max_tokens.confirm_content": "开启消息长度限制后,单次交互所用的最大 Token 数, 会影响返回结果的长度。要根据模型上下文限制来设置,否则会报错", "suggestions.title": "建议的问题", "thinking": "思考中", "topics.auto_rename": "生成话题名", @@ -289,7 +291,7 @@ "status_processing": "处理中", "title": "知识库", "url_added": "网址已添加", - "url_placeholder": "请输入网址", + "url_placeholder": "请输入网址, 多个网址用回车分隔", "urls": "网址" }, "languages": { diff --git a/src/renderer/src/i18n/locales/zh-tw.json b/src/renderer/src/i18n/locales/zh-tw.json index d5881414..94608835 100644 --- a/src/renderer/src/i18n/locales/zh-tw.json +++ b/src/renderer/src/i18n/locales/zh-tw.json @@ -109,7 +109,7 @@ "settings.context_count.tip": "在上下文中保留的前幾則訊息。", "settings.max": "最大", "settings.max_tokens": "啟用最大 Token 限制", - "settings.max_tokens.tip": "模型可以生成的最大 Token 數。普通聊天建議 500-800。短文生成建議 800-2000。代碼生成建議 2000-3600。長文生成建議超過 4000。", + "settings.max_tokens.tip": "模型可以生成的最大 Token 數。要根据模型上下文限制来设置,否则会报错", "settings.reset": "重置", "settings.set_as_default": "設為預設助手", "settings.show_line_numbers": "代码顯示行號", @@ -117,6 +117,8 @@ "settings.temperature.tip": "模型產生文字的隨機程度。數值越高,回應內容越具多樣性、創意性及隨機性;設定為 0 則會依據事實回答。一般聊天建議設定為 0.7", "settings.top_p": "Top-P", "settings.top_p.tip": "模型生成文本的隨機程度。值越小,AI 生成的內容越單調,也越容易理解;值越大,AI 回覆的詞彙範圍越大,越多樣化", + "settings.max_tokens.confirm": "啟用消息長度限制", + "settings.max_tokens.confirm_content": "啟用消息長度限制後,單次交互所用的最大 Token 數, 會影響返回結果的長度。要根據模型上下文限制來設置,否則會報錯", "suggestions.title": "建議的問題", "thinking": "思考中", "topics.auto_rename": "自動重新命名", @@ -289,7 +291,7 @@ "status_processing": "處理中", "title": "知識庫", "url_added": "網址已添加", - "url_placeholder": "請輸入網址", + "url_placeholder": "請輸入網址, 多個網址用回車分隔", "urls": "網址" }, "languages": { diff --git a/src/renderer/src/pages/home/Tabs/SettingsTab.tsx b/src/renderer/src/pages/home/Tabs/SettingsTab.tsx index 146799b4..2df3ff66 100644 --- a/src/renderer/src/pages/home/Tabs/SettingsTab.tsx +++ b/src/renderer/src/pages/home/Tabs/SettingsTab.tsx @@ -30,6 +30,7 @@ import { setShowMessageDivider } from '@renderer/store/settings' import { Assistant, AssistantSettings, ThemeMode, TranslateLanguageVarious } from '@renderer/types' +import { modalConfirm } from '@renderer/utils' import { Col, InputNumber, Row, Select, Slider, Switch, Tooltip } from 'antd' import { FC, useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' @@ -177,7 +178,7 @@ const SettingsTab: FC = (props) => { /> - + @@ -187,25 +188,39 @@ const SettingsTab: FC = (props) => { { + onChange={async (enabled) => { + if (enabled) { + const confirmed = await modalConfirm({ + title: t('chat.settings.max_tokens.confirm'), + content: t('chat.settings.max_tokens.confirm_content'), + okButtonProps: { + danger: true + } + }) + if (!confirmed) return + } setEnableMaxTokens(enabled) onUpdateAssistantSettings({ enableMaxTokens: enabled }) }} /> - - - - - + {enableMaxTokens && ( + + + value && setMaxTokens(value)} + onBlur={() => onMaxTokensChange(maxTokens)} + style={{ width: '100%' }} + /> + + + )} {t('settings.messages.title')} diff --git a/src/renderer/src/pages/knowledge/KnowledgeContent.tsx b/src/renderer/src/pages/knowledge/KnowledgeContent.tsx index 0c1f7146..78b121ad 100644 --- a/src/renderer/src/pages/knowledge/KnowledgeContent.tsx +++ b/src/renderer/src/pages/knowledge/KnowledgeContent.tsx @@ -105,26 +105,32 @@ const KnowledgeContent: FC = ({ selectedBase }) => { return } - const url = await PromptPopup.show({ + const urlInput = await PromptPopup.show({ title: t('knowledge.add_url'), message: '', inputPlaceholder: t('knowledge.url_placeholder'), inputProps: { - maxLength: 1000, - rows: 1 + rows: 10, + onPressEnter: () => {} } }) - if (url) { - try { - new URL(url) - if (urlItems.find((item) => item.content === url)) { - message.success(t('knowledge.url_added')) - return + if (urlInput) { + // Split input by newlines and filter out empty lines + const urls = urlInput.split('\n').filter((url) => url.trim()) + + for (const url of urls) { + try { + new URL(url.trim()) + if (!urlItems.find((item) => item.content === url.trim())) { + addUrl(url.trim()) + } else { + message.success(t('knowledge.url_added')) + } + } catch (e) { + // Skip invalid URLs silently + continue } - addUrl(url) - } catch (e) { - console.error('Invalid URL:', url) } } } diff --git a/src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx b/src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx index 0dcde973..a50f4aad 100644 --- a/src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx +++ b/src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx @@ -5,6 +5,7 @@ import SelectModelPopup from '@renderer/components/Popups/SelectModelPopup' import { DEFAULT_CONTEXTCOUNT, DEFAULT_TEMPERATURE } from '@renderer/config/constant' import { SettingRow } from '@renderer/pages/settings' import { Assistant, AssistantSettingCustomParameters, AssistantSettings } from '@renderer/types' +import { modalConfirm } from '@renderer/utils' import { Button, Col, Divider, Input, InputNumber, Radio, Row, Select, Slider, Switch, Tooltip } from 'antd' import { isNull } from 'lodash' import { FC, useEffect, useRef, useState } from 'react' @@ -329,34 +330,30 @@ const AssistantModelSettings: FC = ({ assistant, updateAssistant, updateA { + onChange={async (enabled) => { + if (enabled) { + const confirmed = await modalConfirm({ + title: t('chat.settings.max_tokens.confirm'), + content: t('chat.settings.max_tokens.confirm_content'), + okButtonProps: { + danger: true + } + }) + if (!confirmed) return + } + setEnableMaxTokens(enabled) updateAssistantSettings({ enableMaxTokens: enabled }) }} /> {enableMaxTokens && ( - - - - - + + provider.id !== 'graphrag-kylin-mountain') - return state - }, - '67': (state: RootState) => { + if (state.minapps) { const aistudio = DEFAULT_MIN_APPS.find((app) => app.id === 'aistudio') if (aistudio) { state.minapps.enabled.push(aistudio) } } + return state } } diff --git a/src/renderer/src/utils/index.ts b/src/renderer/src/utils/index.ts index 8719b8d5..94f8402c 100644 --- a/src/renderer/src/utils/index.ts +++ b/src/renderer/src/utils/index.ts @@ -1,4 +1,5 @@ import { FileType, Model } from '@renderer/types' +import { ModalFuncProps } from 'antd/es/modal/interface' import imageCompression from 'browser-image-compression' import html2canvas from 'html2canvas' // @ts-ignore next-line` @@ -397,4 +398,15 @@ export function isMiniWindow() { return window.location.hash === '#/mini' } +export function modalConfirm(params: ModalFuncProps) { + return new Promise((resolve) => { + window.modal.confirm({ + centered: true, + ...params, + onOk: () => resolve(true), + onCancel: () => resolve(false) + }) + }) +} + export { classNames } diff --git a/yarn.lock b/yarn.lock index e5f54689..21f845a7 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3050,7 +3050,7 @@ __metadata: markdown-it: "npm:^14.1.0" mime: "npm:^4.0.4" officeparser: "npm:^4.1.1" - openai: "patch:openai@npm%3A4.76.2#~/.yarn/patches/openai-npm-4.76.2-8ff1374617.patch" + openai: "patch:openai@npm%3A4.77.3#~/.yarn/patches/openai-npm-4.77.3-59c6d42e7a.patch" prettier: "npm:^3.2.4" react: "npm:^18.2.0" react-dom: "npm:^18.2.0" @@ -10188,29 +10188,7 @@ __metadata: languageName: node linkType: hard -"openai@npm:4.76.2": - version: 4.76.2 - resolution: "openai@npm:4.76.2" - dependencies: - "@types/node": "npm:^18.11.18" - "@types/node-fetch": "npm:^2.6.4" - abort-controller: "npm:^3.0.0" - agentkeepalive: "npm:^4.2.1" - form-data-encoder: "npm:1.7.2" - formdata-node: "npm:^4.3.2" - node-fetch: "npm:^2.6.7" - peerDependencies: - zod: ^3.23.8 - peerDependenciesMeta: - zod: - optional: true - bin: - openai: bin/cli - checksum: 10c0/d78af0c2dd64ad24a2ce92deb92656b742b8120012e554fd562095f237ce06d84764217e040b437f955c6f26b7675f9321862a8937c8537f28ed6ead4b674559 - languageName: node - linkType: hard - -"openai@npm:^4.77.0": +"openai@npm:4.77.3": version: 4.77.3 resolution: "openai@npm:4.77.3" dependencies: @@ -10232,9 +10210,9 @@ __metadata: languageName: node linkType: hard -"openai@patch:openai@npm%3A4.76.2#~/.yarn/patches/openai-npm-4.76.2-8ff1374617.patch": - version: 4.76.2 - resolution: "openai@patch:openai@npm%3A4.76.2#~/.yarn/patches/openai-npm-4.76.2-8ff1374617.patch::version=4.76.2&hash=849914" +"openai@patch:openai@npm%3A4.77.3#~/.yarn/patches/openai-npm-4.77.3-59c6d42e7a.patch": + version: 4.77.3 + resolution: "openai@patch:openai@npm%3A4.77.3#~/.yarn/patches/openai-npm-4.77.3-59c6d42e7a.patch::version=4.77.3&hash=c5d42a" dependencies: "@types/node": "npm:^18.11.18" "@types/node-fetch": "npm:^2.6.4" @@ -10250,7 +10228,7 @@ __metadata: optional: true bin: openai: bin/cli - checksum: 10c0/09b66166fd3661b9b418addf0f3a7bc75434ee9163d4b3f89bb41e70cd9a366943c2e324ef5a05cf375831b27aee6516b96779b78f1e3d605ae605865384c1c2 + checksum: 10c0/c3449d3d9945675d7debc4e3a68f58093400985e5275b29e4eb5610300ad3fa4589e527fda526ce770f9a945d7a1d03ffb33e34a3566f996a6947125aa761b1e languageName: node linkType: hard