feat: improved model safety settings for geminiprovider class
This commit is contained in:
parent
42908e8834
commit
b4de6292c3
@ -7,6 +7,7 @@ import {
|
|||||||
InlineDataPart,
|
InlineDataPart,
|
||||||
Part,
|
Part,
|
||||||
RequestOptions,
|
RequestOptions,
|
||||||
|
SafetySetting,
|
||||||
TextPart
|
TextPart
|
||||||
} from '@google/generative-ai'
|
} from '@google/generative-ai'
|
||||||
import { isWebSearchModel } from '@renderer/config/models'
|
import { isWebSearchModel } from '@renderer/config/models'
|
||||||
@ -112,15 +113,39 @@ export default class GeminiProvider extends BaseProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private getModelSafetySetting(modelId: string): HarmBlockThreshold {
|
private getSafetySettings(modelId: string): SafetySetting[] {
|
||||||
return modelId.includes('gemini-exp-') ? HarmBlockThreshold.BLOCK_NONE : "OFF" as HarmBlockThreshold
|
const safetyThreshold = modelId.includes('gemini-exp-')
|
||||||
|
? HarmBlockThreshold.BLOCK_NONE
|
||||||
|
: ('OFF' as HarmBlockThreshold)
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
||||||
|
threshold: safetyThreshold
|
||||||
|
},
|
||||||
|
{
|
||||||
|
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
||||||
|
threshold: safetyThreshold
|
||||||
|
},
|
||||||
|
{
|
||||||
|
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
||||||
|
threshold: safetyThreshold
|
||||||
|
},
|
||||||
|
{
|
||||||
|
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
||||||
|
threshold: safetyThreshold
|
||||||
|
},
|
||||||
|
{
|
||||||
|
category: 'HARM_CATEGORY_CIVIC_INTEGRITY' as HarmCategory,
|
||||||
|
threshold: safetyThreshold
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
public async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams) {
|
public async completions({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams) {
|
||||||
const defaultModel = getDefaultModel()
|
const defaultModel = getDefaultModel()
|
||||||
const model = assistant.model || defaultModel
|
const model = assistant.model || defaultModel
|
||||||
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
|
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
|
||||||
const safetyThreshold = this.getModelSafetySetting(model.id)
|
|
||||||
|
|
||||||
const userMessages = filterContextMessages(takeRight(messages, contextCount + 2))
|
const userMessages = filterContextMessages(takeRight(messages, contextCount + 2))
|
||||||
onFilterMessages(userMessages)
|
onFilterMessages(userMessages)
|
||||||
@ -143,34 +168,13 @@ export default class GeminiProvider extends BaseProvider {
|
|||||||
systemInstruction: assistant.prompt,
|
systemInstruction: assistant.prompt,
|
||||||
// @ts-ignore googleSearch is not a valid tool for Gemini
|
// @ts-ignore googleSearch is not a valid tool for Gemini
|
||||||
tools: assistant.enableWebSearch && isWebSearchModel(model) ? [{ googleSearch: {} }] : undefined,
|
tools: assistant.enableWebSearch && isWebSearchModel(model) ? [{ googleSearch: {} }] : undefined,
|
||||||
|
safetySettings: this.getSafetySettings(model.id),
|
||||||
generationConfig: {
|
generationConfig: {
|
||||||
maxOutputTokens: maxTokens,
|
maxOutputTokens: maxTokens,
|
||||||
temperature: assistant?.settings?.temperature,
|
temperature: assistant?.settings?.temperature,
|
||||||
topP: assistant?.settings?.topP,
|
topP: assistant?.settings?.topP,
|
||||||
...this.getCustomParameters(assistant)
|
...this.getCustomParameters(assistant)
|
||||||
},
|
}
|
||||||
safetySettings: [
|
|
||||||
{
|
|
||||||
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
||||||
threshold: safetyThreshold
|
|
||||||
},
|
|
||||||
{
|
|
||||||
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
||||||
threshold: safetyThreshold
|
|
||||||
},
|
|
||||||
{
|
|
||||||
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
||||||
threshold: safetyThreshold
|
|
||||||
},
|
|
||||||
{
|
|
||||||
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
||||||
threshold: safetyThreshold
|
|
||||||
},
|
|
||||||
{
|
|
||||||
category: 'HARM_CATEGORY_CIVIC_INTEGRITY' as HarmCategory,
|
|
||||||
threshold: safetyThreshold
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
this.requestOptions
|
this.requestOptions
|
||||||
)
|
)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user