feat: Enhance image upload and model-specific message handling
- Add vision model check before image upload in Inputbar - Implement flexible message start forcing for specific models - Improve provider-level message routing logic
This commit is contained in:
parent
516315ac45
commit
ec0be1ff27
@ -366,7 +366,7 @@ const Inputbar: FC<Props> = ({ assistant: _assistant, setActiveTopic }) => {
|
||||
event.preventDefault()
|
||||
|
||||
if (file.path === '') {
|
||||
if (file.type.startsWith('image/')) {
|
||||
if (file.type.startsWith('image/') && isVisionModel(model)) {
|
||||
const tempFilePath = await window.api.file.create(file.name)
|
||||
const arrayBuffer = await file.arrayBuffer()
|
||||
const uint8Array = new Uint8Array(arrayBuffer)
|
||||
|
||||
@ -213,6 +213,18 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
return model.id.startsWith('o1')
|
||||
}
|
||||
|
||||
private isForceUserMessageStart(model: Model) {
|
||||
if (model.id === 'deepseek-reasoner') {
|
||||
return true
|
||||
}
|
||||
|
||||
if (model.provider === 'xirang') {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams): Promise<void> {
|
||||
const defaultModel = getDefaultModel()
|
||||
const model = assistant.model || defaultModel
|
||||
@ -232,7 +244,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
const _messages = filterContextMessages(takeRight(messages, contextCount + 1))
|
||||
onFilterMessages(_messages)
|
||||
|
||||
if (model.id === 'deepseek-reasoner') {
|
||||
if (this.isForceUserMessageStart(model)) {
|
||||
if (_messages[0]?.role !== 'user') {
|
||||
userMessages.push({ role: 'user', content: '' })
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user