support gpt-4-turbo model

This commit is contained in:
bobo.yang 2023-11-14 17:29:54 +08:00
parent 407f5d46dd
commit 077aeec080
3 changed files with 46 additions and 0 deletions

View File

@ -234,6 +234,43 @@
"order": 2, "order": 2,
"markdownDescription": "properties for gpt-4 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)" "markdownDescription": "properties for gpt-4 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
}, },
"devchat.Model.gpt-4-turbo": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat",
"openai"
],
"description": "[required*] Specify which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"presence_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"frequency_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
}
},
"additionalProperties": false,
"order": 2,
"markdownDescription": "properties for gpt-4-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
},
"devchat.Model.claude-2": { "devchat.Model.claude-2": {
"type": "object", "type": "object",
"properties": { "properties": {
@ -399,6 +436,7 @@
"gpt-3.5-turbo", "gpt-3.5-turbo",
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k",
"gpt-4", "gpt-4",
"gpt-4-1106-preview",
"claude-2", "claude-2",
"xinghuo-2", "xinghuo-2",
"chatglm_pro", "chatglm_pro",

View File

@ -110,6 +110,7 @@ async function configUpdateTo_0924() {
"Model.gpt-3-5", "Model.gpt-3-5",
"Model.gpt-3-5-16k", "Model.gpt-3-5-16k",
"Model.gpt-4", "Model.gpt-4",
"Model.gpt-4-turbo",
"Model.claude-2", "Model.claude-2",
"Model.xinghuo-2", "Model.xinghuo-2",
"Model.chatglm_pro", "Model.chatglm_pro",

View File

@ -76,6 +76,10 @@ export class ApiKeyManager {
if (openaiModel3) { if (openaiModel3) {
modelList.push(openaiModel3.model); modelList.push(openaiModel3.model);
} }
const openaiModel4 = await modelProperties('Model.gpt-4-turbo', "gpt-4-1106-preview");
if (openaiModel4) {
modelList.push(openaiModel4.model);
}
const claudeModel = await modelProperties('Model.claude-2', "claude-2"); const claudeModel = await modelProperties('Model.claude-2', "claude-2");
if (claudeModel) { if (claudeModel) {
modelList.push(claudeModel.model); modelList.push(claudeModel.model);
@ -170,6 +174,9 @@ export class ApiKeyManager {
if (llmModelT === "gpt-4") { if (llmModelT === "gpt-4") {
return await modelProperties('Model.gpt-4', "gpt-4"); return await modelProperties('Model.gpt-4', "gpt-4");
} }
if (llmModelT === "gpt-4-1106-preview") {
return await modelProperties('Model.gpt-4-turbo', "gpt-4-1106-preview");
}
if (llmModelT === "claude-2") { if (llmModelT === "claude-2") {
return await modelProperties('Model.claude-2', "claude-2"); return await modelProperties('Model.claude-2', "claude-2");
} }