feat: add streaming output options #93
This commit is contained in:
parent
93710c1e78
commit
8f68aca24c
@ -41,7 +41,7 @@
|
||||
--color-hover: rgba(40, 40, 40, 1);
|
||||
--color-active: rgba(55, 55, 55, 1);
|
||||
|
||||
--navbar-background-mac: rgba(30, 30, 30, 0.4);
|
||||
--navbar-background-mac: rgba(30, 30, 30, 0.6);
|
||||
--navbar-background: rgba(30, 30, 30);
|
||||
|
||||
--navbar-height: 40px;
|
||||
@ -92,7 +92,7 @@ body[theme-mode='light'] {
|
||||
--color-hover: var(--color-white-mute);
|
||||
--color-active: var(--color-white-soft);
|
||||
|
||||
--navbar-background-mac: rgba(255, 255, 255, 0.4);
|
||||
--navbar-background-mac: rgba(255, 255, 255, 0.6);
|
||||
--navbar-background: rgba(255, 255, 255);
|
||||
}
|
||||
|
||||
|
||||
@ -116,6 +116,9 @@ const resources = {
|
||||
abbr: 'Assistant',
|
||||
search: 'Search assistants...'
|
||||
},
|
||||
model: {
|
||||
stream_output: 'Stream Output'
|
||||
},
|
||||
files: {
|
||||
title: 'Files',
|
||||
file: 'File',
|
||||
@ -398,6 +401,9 @@ const resources = {
|
||||
abbr: '助手',
|
||||
search: '搜索助手'
|
||||
},
|
||||
model: {
|
||||
stream_output: '流式输出'
|
||||
},
|
||||
files: {
|
||||
title: '文件',
|
||||
file: '文件',
|
||||
|
||||
@ -27,10 +27,11 @@ interface Props {
|
||||
message: Message
|
||||
index?: number
|
||||
total?: number
|
||||
lastMessage?: boolean
|
||||
onDeleteMessage?: (message: Message) => void
|
||||
}
|
||||
|
||||
const MessageItem: FC<Props> = ({ message, index, onDeleteMessage }) => {
|
||||
const MessageItem: FC<Props> = ({ message, index, lastMessage, onDeleteMessage }) => {
|
||||
const avatar = useAvatar()
|
||||
const { t } = useTranslation()
|
||||
const { assistant, setModel } = useAssistant(message.assistantId)
|
||||
@ -38,7 +39,7 @@ const MessageItem: FC<Props> = ({ message, index, onDeleteMessage }) => {
|
||||
const { userName, showMessageDivider, messageFont, fontSize } = useSettings()
|
||||
const { theme } = useTheme()
|
||||
|
||||
const isLastMessage = index === 0
|
||||
const isLastMessage = lastMessage || index === 0
|
||||
const isAssistantMessage = message.role === 'assistant'
|
||||
|
||||
const getUserName = useCallback(() => {
|
||||
@ -106,6 +107,7 @@ const MessageItem: FC<Props> = ({ message, index, onDeleteMessage }) => {
|
||||
</MessageHeader>
|
||||
<MessageContentContainer style={{ fontFamily, fontSize }}>
|
||||
<MessageContent message={message} />
|
||||
{!lastMessage && (
|
||||
<MessageFooter style={{ border: messageBorder, flexDirection: isLastMessage ? 'row-reverse' : undefined }}>
|
||||
<MessgeTokens message={message} />
|
||||
<MessageMenubar
|
||||
@ -118,6 +120,7 @@ const MessageItem: FC<Props> = ({ message, index, onDeleteMessage }) => {
|
||||
onDeleteMessage={onDeleteMessage}
|
||||
/>
|
||||
</MessageFooter>
|
||||
)}
|
||||
</MessageContentContainer>
|
||||
</MessageContainer>
|
||||
)
|
||||
|
||||
@ -189,7 +189,7 @@ const Messages: FC<Props> = ({ assistant, topic, setActiveTopic }) => {
|
||||
return (
|
||||
<Container id="messages" key={assistant.id} ref={containerRef}>
|
||||
<Suggestions assistant={assistant} messages={messages} lastMessage={lastMessage} />
|
||||
{lastMessage && <MessageItem key={lastMessage.id} message={lastMessage} />}
|
||||
{lastMessage && <MessageItem key={lastMessage.id} message={lastMessage} lastMessage />}
|
||||
{reverse([...messages]).map((message, index) => (
|
||||
<MessageItem key={message.id} message={message} index={index} onDeleteMessage={onDeleteMessage} />
|
||||
))}
|
||||
|
||||
@ -29,6 +29,7 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
const [contextCount, setConextCount] = useState(assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT)
|
||||
const [enableMaxTokens, setEnableMaxTokens] = useState(assistant?.settings?.enableMaxTokens ?? false)
|
||||
const [maxTokens, setMaxTokens] = useState(assistant?.settings?.maxTokens ?? 0)
|
||||
const [streamOutput, setStreamOutput] = useState(assistant?.settings?.streamOutput ?? true)
|
||||
const [fontSizeValue, setFontSizeValue] = useState(fontSize)
|
||||
const { t } = useTranslation()
|
||||
|
||||
@ -48,7 +49,8 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
temperature: settings.temperature ?? temperature,
|
||||
contextCount: settings.contextCount ?? contextCount,
|
||||
enableMaxTokens: settings.enableMaxTokens ?? enableMaxTokens,
|
||||
maxTokens: settings.maxTokens ?? maxTokens
|
||||
maxTokens: settings.maxTokens ?? maxTokens,
|
||||
streamOutput: settings.streamOutput ?? streamOutput
|
||||
})
|
||||
}
|
||||
|
||||
@ -80,7 +82,8 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
temperature: DEFAULT_TEMPERATURE,
|
||||
contextCount: DEFAULT_CONEXTCOUNT,
|
||||
enableMaxTokens: false,
|
||||
maxTokens: DEFAULT_MAX_TOKENS
|
||||
maxTokens: DEFAULT_MAX_TOKENS,
|
||||
streamOutput: true
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -90,6 +93,7 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
setConextCount(assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT)
|
||||
setEnableMaxTokens(assistant?.settings?.enableMaxTokens ?? false)
|
||||
setMaxTokens(assistant?.settings?.maxTokens ?? DEFAULT_MAX_TOKENS)
|
||||
setStreamOutput(assistant?.settings?.streamOutput ?? true)
|
||||
}, [assistant])
|
||||
|
||||
return (
|
||||
@ -137,6 +141,18 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<SettingRow>
|
||||
<SettingRowTitleSmall>{t('model.stream_output')}</SettingRowTitleSmall>
|
||||
<Switch
|
||||
size="small"
|
||||
checked={streamOutput}
|
||||
onChange={(checked) => {
|
||||
setStreamOutput(checked)
|
||||
onUpdateAssistantSettings({ streamOutput: checked })
|
||||
}}
|
||||
/>
|
||||
</SettingRow>
|
||||
<SettingDivider />
|
||||
<Row align="middle" justify="space-between">
|
||||
<HStack alignItems="center">
|
||||
<Label>{t('chat.settings.max_tokens')}</Label>
|
||||
|
||||
@ -55,7 +55,7 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
public async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams) {
|
||||
const defaultModel = getDefaultModel()
|
||||
const model = assistant.model || defaultModel
|
||||
const { contextCount, maxTokens } = getAssistantSettings(assistant)
|
||||
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
|
||||
|
||||
const userMessagesParams: MessageParam[] = []
|
||||
const _messages = filterMessages(filterContextMessages(takeRight(messages, contextCount + 2)))
|
||||
@ -72,6 +72,21 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
userMessages.shift()
|
||||
}
|
||||
|
||||
if (!streamOutput) {
|
||||
const message = await this.sdk.messages.create({
|
||||
model: model.id,
|
||||
messages: userMessages,
|
||||
max_tokens: maxTokens || DEFAULT_MAX_TOKENS,
|
||||
temperature: assistant?.settings?.temperature,
|
||||
system: assistant.prompt,
|
||||
stream: false
|
||||
})
|
||||
return onChunk({
|
||||
text: message.content[0].type === 'text' ? message.content[0].text : '',
|
||||
usage: message.usage
|
||||
})
|
||||
}
|
||||
|
||||
return new Promise<void>((resolve, reject) => {
|
||||
const stream = this.sdk.messages
|
||||
.stream({
|
||||
|
||||
@ -57,7 +57,7 @@ export default class GeminiProvider extends BaseProvider {
|
||||
public async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams) {
|
||||
const defaultModel = getDefaultModel()
|
||||
const model = assistant.model || defaultModel
|
||||
const { contextCount, maxTokens } = getAssistantSettings(assistant)
|
||||
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
|
||||
|
||||
const userMessages = filterMessages(filterContextMessages(takeRight(messages, contextCount + 1)))
|
||||
onFilterMessages(userMessages)
|
||||
@ -78,19 +78,32 @@ export default class GeminiProvider extends BaseProvider {
|
||||
temperature: assistant?.settings?.temperature
|
||||
},
|
||||
safetySettings: [
|
||||
{ category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold: HarmBlockThreshold.BLOCK_ONLY_HIGH },
|
||||
{ category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold: HarmBlockThreshold.BLOCK_NONE },
|
||||
{
|
||||
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
||||
threshold: HarmBlockThreshold.BLOCK_ONLY_HIGH
|
||||
threshold: HarmBlockThreshold.BLOCK_NONE
|
||||
},
|
||||
{ category: HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: HarmBlockThreshold.BLOCK_ONLY_HIGH },
|
||||
{ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_ONLY_HIGH }
|
||||
{ category: HarmCategory.HARM_CATEGORY_HARASSMENT, threshold: HarmBlockThreshold.BLOCK_NONE },
|
||||
{ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold: HarmBlockThreshold.BLOCK_NONE }
|
||||
]
|
||||
})
|
||||
|
||||
const chat = geminiModel.startChat({ history })
|
||||
|
||||
const messageContents = await this.getMessageContents(userLastMessage!)
|
||||
|
||||
if (!streamOutput) {
|
||||
const { response } = await chat.sendMessage(messageContents.parts)
|
||||
onChunk({
|
||||
text: response.candidates?.[0].content.parts[0].text,
|
||||
usage: {
|
||||
prompt_tokens: response.usageMetadata?.promptTokenCount || 0,
|
||||
completion_tokens: response.usageMetadata?.candidatesTokenCount || 0,
|
||||
total_tokens: response.usageMetadata?.totalTokenCount || 0
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const userMessagesStream = await chat.sendMessageStream(messageContents.parts)
|
||||
|
||||
for await (const chunk of userMessagesStream.stream) {
|
||||
|
||||
@ -28,7 +28,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
}
|
||||
|
||||
private isSupportStreamOutput(modelId: string): boolean {
|
||||
if (this.provider.id === 'openai' && modelId.includes('o1-')) {
|
||||
if (modelId.includes('o1-')) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@ -112,7 +112,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams): Promise<void> {
|
||||
const defaultModel = getDefaultModel()
|
||||
const model = assistant.model || defaultModel
|
||||
const { contextCount, maxTokens } = getAssistantSettings(assistant)
|
||||
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
|
||||
|
||||
const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined
|
||||
const userMessages: ChatCompletionMessageParam[] = []
|
||||
@ -124,7 +124,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
userMessages.push(await this.getMessageParam(message, model))
|
||||
}
|
||||
|
||||
const isSupportStreamOutput = this.isSupportStreamOutput(model.id)
|
||||
const isSupportStreamOutput = streamOutput && this.isSupportStreamOutput(model.id)
|
||||
|
||||
// @ts-ignore key is not typed
|
||||
const stream = await this.sdk.chat.completions.create({
|
||||
|
||||
@ -80,7 +80,8 @@ export const getAssistantSettings = (assistant: Assistant): AssistantSettings =>
|
||||
contextCount: contextCount === 20 ? 100000 : contextCount,
|
||||
temperature: assistant?.settings?.temperature ?? DEFAULT_TEMPERATURE,
|
||||
enableMaxTokens: assistant?.settings?.enableMaxTokens ?? false,
|
||||
maxTokens: getAssistantMaxTokens()
|
||||
maxTokens: getAssistantMaxTokens(),
|
||||
streamOutput: assistant?.settings?.streamOutput ?? true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -16,6 +16,7 @@ export type AssistantSettings = {
|
||||
temperature: number
|
||||
maxTokens: number | undefined
|
||||
enableMaxTokens: boolean
|
||||
streamOutput: boolean
|
||||
}
|
||||
|
||||
export type Message = {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user