feat: Improved chat UI with context handling and filtering #43
- Updated default context count from 5 to 6. - Updated string translations for multiple languages. - Added functionality to handle new context and update context count in Inputbar component. - Added support for displaying new chat context divider for 'clear' type messages. - Added functionality to emit estimated token count with context count when the estimated token count event is triggered. - Improved filtering and processing of user messages for the AnthropicProvider class. - Updated message filtering logic with context consideration. - Improved filtering of user messages to include only context-relevant messages. - Updated logic to pass messages directly to AI.completions and AI.suggestions API requests instead of filtered messages. - Added new event names for handling topic sidebar and context switching. - Improved handling of message filtering and context counting. - Added new valid value 'clear' to type option in Message type.
This commit is contained in:
parent
b31f518fca
commit
591bb45a4e
@ -1,5 +1,5 @@
|
||||
export const DEFAULT_TEMPERATURE = 0.7
|
||||
export const DEFAULT_CONEXTCOUNT = 5
|
||||
export const DEFAULT_CONEXTCOUNT = 6
|
||||
export const DEFAULT_MAX_TOKENS = 4096
|
||||
export const FONT_FAMILY =
|
||||
"Ubuntu, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif"
|
||||
|
||||
@ -71,11 +71,12 @@ const resources = {
|
||||
'topics.list': 'Topic List',
|
||||
'input.new_topic': 'New Topic',
|
||||
'input.topics': ' Topics ',
|
||||
'input.clear': 'Clear',
|
||||
'input.clear': 'Clear Messages',
|
||||
'input.new.context': 'Clear Context',
|
||||
'input.expand': 'Expand',
|
||||
'input.collapse': 'Collapse',
|
||||
'input.clear.title': 'Clear all messages?',
|
||||
'input.clear.content': 'Are you sure to clear all messages?',
|
||||
'input.clear.content': 'Do you want to clear all messages of the current topic?',
|
||||
'input.placeholder': 'Type your message here...',
|
||||
'input.send': 'Send',
|
||||
'input.pause': 'Pause',
|
||||
@ -95,7 +96,8 @@ const resources = {
|
||||
'settings.set_as_default': 'Apply to default assistant',
|
||||
'settings.max': 'Max',
|
||||
'suggestions.title': 'Suggested Questions',
|
||||
'add.assistant.title': 'Add Assistant'
|
||||
'add.assistant.title': 'Add Assistant',
|
||||
'message.new.context': 'New Context'
|
||||
},
|
||||
agents: {
|
||||
title: 'Agents',
|
||||
@ -320,11 +322,12 @@ const resources = {
|
||||
'topics.list': '话题列表',
|
||||
'input.new_topic': '新话题',
|
||||
'input.topics': ' 话题 ',
|
||||
'input.clear': '清除',
|
||||
'input.clear': '清除会话消息',
|
||||
'input.new.context': '清除上下文',
|
||||
'input.expand': '展开',
|
||||
'input.collapse': '收起',
|
||||
'input.clear.title': '清除所有消息?',
|
||||
'input.clear.content': '确定要清除所有消息吗?',
|
||||
'input.clear.title': '清除消息?',
|
||||
'input.clear.content': '确定要清除当前会话所有消息吗?',
|
||||
'input.placeholder': '在这里输入消息...',
|
||||
'input.send': '发送',
|
||||
'input.pause': '暂停',
|
||||
@ -345,7 +348,8 @@ const resources = {
|
||||
'settings.set_as_default': '应用到默认助手',
|
||||
'settings.max': '不限',
|
||||
'suggestions.title': '建议的问题',
|
||||
'add.assistant.title': '添加智能体'
|
||||
'add.assistant.title': '添加智能体',
|
||||
'message.new.context': '清除上下文'
|
||||
},
|
||||
agents: {
|
||||
title: '智能体',
|
||||
|
||||
@ -8,7 +8,6 @@ import {
|
||||
PauseCircleOutlined,
|
||||
QuestionCircleOutlined
|
||||
} from '@ant-design/icons'
|
||||
import { DEFAULT_CONEXTCOUNT } from '@renderer/config/constant'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { useShowTopics } from '@renderer/hooks/useStore'
|
||||
@ -44,6 +43,7 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
|
||||
const { sendMessageShortcut, showInputEstimatedTokens, fontSize } = useSettings()
|
||||
const [expended, setExpend] = useState(false)
|
||||
const [estimateTokenCount, setEstimateTokenCount] = useState(0)
|
||||
const [contextCount, setContextCount] = useState(0)
|
||||
const generating = useAppSelector((state) => state.runtime.generating)
|
||||
const textareaRef = useRef<TextAreaRef>(null)
|
||||
const [files, setFiles] = useState<File[]>([])
|
||||
@ -130,6 +130,14 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
|
||||
store.dispatch(setGenerating(false))
|
||||
}
|
||||
|
||||
const onNewContext = () => {
|
||||
if (generating) {
|
||||
onPause()
|
||||
return
|
||||
}
|
||||
EventEmitter.emit(EVENT_NAMES.NEW_CONTEXT)
|
||||
}
|
||||
|
||||
const resizeTextArea = () => {
|
||||
const textArea = textareaRef.current?.resizableTextArea?.textArea
|
||||
if (textArea) {
|
||||
@ -178,7 +186,10 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
|
||||
setText(message.content)
|
||||
textareaRef.current?.focus()
|
||||
}),
|
||||
EventEmitter.on(EVENT_NAMES.ESTIMATED_TOKEN_COUNT, _setEstimateTokenCount)
|
||||
EventEmitter.on(EVENT_NAMES.ESTIMATED_TOKEN_COUNT, ({ tokensCount, contextCount }) => {
|
||||
_setEstimateTokenCount(tokensCount)
|
||||
setContextCount(contextCount)
|
||||
})
|
||||
]
|
||||
return () => unsubscribes.forEach((unsub) => unsub())
|
||||
}, [])
|
||||
@ -212,6 +223,11 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
|
||||
<FormOutlined />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
<Tooltip placement="top" title={t('chat.input.new.context')} arrow>
|
||||
<ToolbarButton type="text" onClick={onNewContext}>
|
||||
<i className="iconfont icon-grid-row-2copy" />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
<Tooltip placement="top" title={t('chat.input.clear')} arrow>
|
||||
<Popconfirm
|
||||
title={t('chat.input.clear.content')}
|
||||
@ -247,7 +263,7 @@ const Inputbar: FC<Props> = ({ assistant, setActiveTopic }) => {
|
||||
<TextCount>
|
||||
<Tooltip title={t('chat.input.context_count.tip') + ' | ' + t('chat.input.estimated_tokens.tip')}>
|
||||
<StyledTag>
|
||||
{assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT}
|
||||
{contextCount}
|
||||
<Divider type="vertical" style={{ marginTop: 2, marginLeft: 5, marginRight: 5 }} />↑{inputTokenCount}
|
||||
<span style={{ margin: '0 2px' }}>/</span>
|
||||
{estimateTokenCount}
|
||||
|
||||
@ -19,7 +19,7 @@ import { useRuntime } from '@renderer/hooks/useStore'
|
||||
import { EVENT_NAMES, EventEmitter } from '@renderer/services/event'
|
||||
import { Message, Model } from '@renderer/types'
|
||||
import { firstLetter, removeLeadingEmoji } from '@renderer/utils'
|
||||
import { Alert, Avatar, Dropdown, Popconfirm, Tooltip } from 'antd'
|
||||
import { Alert, Avatar, Divider, Dropdown, Popconfirm, Tooltip } from 'antd'
|
||||
import dayjs from 'dayjs'
|
||||
import { upperFirst } from 'lodash'
|
||||
import { FC, memo, useCallback, useMemo, useState } from 'react'
|
||||
@ -130,6 +130,14 @@ const MessageItem: FC<Props> = ({ message, index, showMenu, onDeleteMessage }) =
|
||||
|
||||
const showMiniApp = () => model?.provider && startMinAppById(model?.provider)
|
||||
|
||||
if (message.type === 'clear') {
|
||||
return (
|
||||
<Divider dashed style={{ padding: '0 20px' }}>
|
||||
{t('chat.message.new.context')}
|
||||
</Divider>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<MessageContainer key={message.id} className="message">
|
||||
<MessageHeader>
|
||||
|
||||
@ -3,7 +3,7 @@ import { useProviderByAssistant } from '@renderer/hooks/useProvider'
|
||||
import { getTopic } from '@renderer/hooks/useTopic'
|
||||
import { fetchChatCompletion, fetchMessagesSummary } from '@renderer/services/api'
|
||||
import { EVENT_NAMES, EventEmitter } from '@renderer/services/event'
|
||||
import { estimateHistoryTokenCount, filterMessages } from '@renderer/services/messages'
|
||||
import { estimateHistoryTokenCount, filterMessages, getContextCount } from '@renderer/services/messages'
|
||||
import LocalStorage from '@renderer/services/storage'
|
||||
import { Assistant, Message, Model, Topic } from '@renderer/types'
|
||||
import { getBriefInfo, runAsyncFunction, uuid } from '@renderer/utils'
|
||||
@ -89,6 +89,28 @@ const Messages: FC<Props> = ({ assistant, topic, setActiveTopic }) => {
|
||||
setMessages([])
|
||||
updateTopic({ ...topic, messages: [] })
|
||||
LocalStorage.clearTopicMessages(topic.id)
|
||||
}),
|
||||
EventEmitter.on(EVENT_NAMES.NEW_CONTEXT, () => {
|
||||
const lastMessage = last(messages)
|
||||
|
||||
if (lastMessage && lastMessage.type === 'clear') {
|
||||
return
|
||||
}
|
||||
|
||||
if (messages.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
onSendMessage({
|
||||
id: uuid(),
|
||||
assistantId: assistant.id,
|
||||
role: 'user',
|
||||
content: '',
|
||||
topicId: topic.id,
|
||||
createdAt: new Date().toISOString(),
|
||||
status: 'success',
|
||||
type: 'clear'
|
||||
} as Message)
|
||||
})
|
||||
]
|
||||
return () => unsubscribes.forEach((unsub) => unsub())
|
||||
@ -106,7 +128,10 @@ const Messages: FC<Props> = ({ assistant, topic, setActiveTopic }) => {
|
||||
}, [messages])
|
||||
|
||||
useEffect(() => {
|
||||
EventEmitter.emit(EVENT_NAMES.ESTIMATED_TOKEN_COUNT, estimateHistoryTokenCount(assistant, messages))
|
||||
EventEmitter.emit(EVENT_NAMES.ESTIMATED_TOKEN_COUNT, {
|
||||
tokensCount: estimateHistoryTokenCount(assistant, messages),
|
||||
contextCount: getContextCount(assistant, messages)
|
||||
})
|
||||
}, [assistant, messages])
|
||||
|
||||
return (
|
||||
|
||||
@ -3,6 +3,7 @@ import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk
|
||||
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/assistant'
|
||||
import { EVENT_NAMES } from '@renderer/services/event'
|
||||
import { filterContextMessages, filterMessages } from '@renderer/services/messages'
|
||||
import { Assistant, Message, Provider, Suggestion } from '@renderer/types'
|
||||
import { first, sum, takeRight } from 'lodash'
|
||||
import OpenAI from 'openai'
|
||||
@ -26,7 +27,7 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
const model = assistant.model || defaultModel
|
||||
const { contextCount, maxTokens } = getAssistantSettings(assistant)
|
||||
|
||||
const userMessages = takeRight(messages, contextCount + 2).map((message) => {
|
||||
const userMessages = filterMessages(filterContextMessages(takeRight(messages, contextCount + 2))).map((message) => {
|
||||
return {
|
||||
role: message.role,
|
||||
content: message.content
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import { GoogleGenerativeAI } from '@google/generative-ai'
|
||||
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/assistant'
|
||||
import { EVENT_NAMES } from '@renderer/services/event'
|
||||
import { filterContextMessages, filterMessages } from '@renderer/services/messages'
|
||||
import { Assistant, Message, Provider, Suggestion } from '@renderer/types'
|
||||
import axios from 'axios'
|
||||
import { isEmpty, takeRight } from 'lodash'
|
||||
@ -25,7 +26,7 @@ export default class GeminiProvider extends BaseProvider {
|
||||
const model = assistant.model || defaultModel
|
||||
const { contextCount, maxTokens } = getAssistantSettings(assistant)
|
||||
|
||||
const userMessages = takeRight(messages, contextCount + 1).map((message) => {
|
||||
const userMessages = filterMessages(filterContextMessages(takeRight(messages, contextCount + 1))).map((message) => {
|
||||
return {
|
||||
role: message.role,
|
||||
content: message.content
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import { isLocalAi } from '@renderer/config/env'
|
||||
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/assistant'
|
||||
import { EVENT_NAMES } from '@renderer/services/event'
|
||||
import { filterContextMessages, filterMessages } from '@renderer/services/messages'
|
||||
import { Assistant, Message, Provider, Suggestion } from '@renderer/types'
|
||||
import { fileToBase64, removeQuotes } from '@renderer/utils'
|
||||
import { first, takeRight } from 'lodash'
|
||||
@ -60,7 +61,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
|
||||
const userMessages: ChatCompletionMessageParam[] = []
|
||||
|
||||
for (const message of takeRight(messages, contextCount + 1)) {
|
||||
for (const message of filterMessages(filterContextMessages(takeRight(messages, contextCount + 1)))) {
|
||||
userMessages.push({
|
||||
role: message.role,
|
||||
content: await this.getMessageContent(message)
|
||||
|
||||
@ -61,7 +61,7 @@ export async function fetchChatCompletion({
|
||||
}, 1000)
|
||||
|
||||
try {
|
||||
await AI.completions(filterMessages(messages), assistant, ({ text, usage }) => {
|
||||
await AI.completions(messages, assistant, ({ text, usage }) => {
|
||||
message.content = message.content + text || ''
|
||||
message.usage = usage
|
||||
onResponse({ ...message, status: 'pending' })
|
||||
@ -153,7 +153,7 @@ export async function fetchSuggestions({
|
||||
}
|
||||
|
||||
try {
|
||||
return await AI.suggestions(messages, assistant)
|
||||
return await AI.suggestions(filterMessages(messages), assistant)
|
||||
} catch (error: any) {
|
||||
return []
|
||||
}
|
||||
|
||||
@ -14,5 +14,6 @@ export const EVENT_NAMES = {
|
||||
ESTIMATED_TOKEN_COUNT: 'ESTIMATED_TOKEN_COUNT',
|
||||
SHOW_CHAT_SETTINGS: 'SHOW_CHAT_SETTINGS',
|
||||
SHOW_TOPIC_SIDEBAR: 'SHOW_TOPIC_SIDEBAR',
|
||||
SWITCH_TOPIC_SIDEBAR: 'SWITCH_TOPIC_SIDEBAR'
|
||||
SWITCH_TOPIC_SIDEBAR: 'SWITCH_TOPIC_SIDEBAR',
|
||||
NEW_CONTEXT: 'NEW_CONTEXT'
|
||||
}
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import { DEFAULT_CONEXTCOUNT } from '@renderer/config/constant'
|
||||
import { Assistant, Message } from '@renderer/types'
|
||||
import { GPTTokens } from 'gpt-tokens'
|
||||
import { isEmpty, takeRight } from 'lodash'
|
||||
@ -5,7 +6,30 @@ import { isEmpty, takeRight } from 'lodash'
|
||||
import { getAssistantSettings } from './assistant'
|
||||
|
||||
export const filterMessages = (messages: Message[]) => {
|
||||
return messages.filter((message) => message.type !== '@').filter((message) => !isEmpty(message.content.trim()))
|
||||
return messages
|
||||
.filter((message) => !['@', 'clear'].includes(message.type!))
|
||||
.filter((message) => !isEmpty(message.content.trim()))
|
||||
}
|
||||
|
||||
export function filterContextMessages(messages: Message[]): Message[] {
|
||||
const clearIndex = messages.findLastIndex((message) => message.type === 'clear')
|
||||
|
||||
if (clearIndex === -1) {
|
||||
return messages
|
||||
}
|
||||
|
||||
return messages.slice(clearIndex + 1)
|
||||
}
|
||||
|
||||
export function getContextCount(assistant: Assistant, messages: Message[]) {
|
||||
const contextCount = assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT
|
||||
const clearIndex = takeRight(messages, contextCount).findLastIndex((message) => message.type === 'clear')
|
||||
|
||||
if (clearIndex === -1) {
|
||||
return contextCount
|
||||
}
|
||||
|
||||
return contextCount - (clearIndex + 1)
|
||||
}
|
||||
|
||||
export function estimateInputTokenCount(text: string) {
|
||||
@ -24,7 +48,7 @@ export function estimateHistoryTokenCount(assistant: Assistant, msgs: Message[])
|
||||
model: 'gpt-4o',
|
||||
messages: [
|
||||
{ role: 'system', content: assistant.prompt },
|
||||
...filterMessages(takeRight(msgs, contextCount)).map((message) => ({
|
||||
...filterMessages(filterContextMessages(takeRight(msgs, contextCount))).map((message) => ({
|
||||
role: message.role,
|
||||
content: message.content
|
||||
}))
|
||||
|
||||
@ -30,7 +30,7 @@ export type Message = {
|
||||
files?: File[]
|
||||
images?: string[]
|
||||
usage?: OpenAI.Completions.CompletionUsage
|
||||
type?: 'text' | '@'
|
||||
type?: 'text' | '@' | 'clear'
|
||||
}
|
||||
|
||||
export type Topic = {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user