diff --git a/package.json b/package.json index cb7b42d..e5f1da5 100644 --- a/package.json +++ b/package.json @@ -156,6 +156,47 @@ "order": 0, "markdownDescription": "Specify the properties for gpt-3.5-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)" }, + "devchat.Model.gpt-3-5-1106": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "default": "devchat", + "enum": [ + "devchat", + "openai" + ], + "description": "[required*] Specify which provider host this llm model" + }, + "temperature": { + "type": "number", + "default": 0.3, + "description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic." + }, + "max_tokens": { + "type": "number", + "default": 1000, + "description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens." + }, + "presence_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics." + }, + "frequency_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim." + } + }, + "required": [ + "provider", + "key" + ], + "additionalProperties": false, + "order": 0, + "markdownDescription": "Specify the properties for gpt-3.5-turbo-1106 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)" + }, "devchat.Model.gpt-3-5-16k": { "type": "object", "properties": { @@ -434,6 +475,7 @@ "default": "gpt-3.5-turbo", "enum": [ "gpt-3.5-turbo", + "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-1106-preview", diff --git a/src/extension.ts b/src/extension.ts index a73b414..2bcef1e 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -64,6 +64,26 @@ async function isProviderHasSetted() { } +async function configUpdateTo_1115() { + const support_models = [ + "Model.gpt-3-5-1106", + "Model.gpt-4-turbo", + ]; + + for (const model of support_models) { + const modelConfig1: any = UiUtilWrapper.getConfiguration("devchat", model); + if (Object.keys(modelConfig1).length === 0) { + let modelConfigNew = {}; + modelConfigNew = {"provider": "devchat"}; + if (model.startsWith("Model.gpt-")) { + modelConfigNew = {"provider": "openai"}; + } + + await vscode.workspace.getConfiguration("devchat").update(model, modelConfigNew, vscode.ConfigurationTarget.Global); + } + } +} + async function configUpdateTo_0924() { if (await isProviderHasSetted()) { return ; @@ -108,6 +128,7 @@ async function configUpdateTo_0924() { const support_models = [ "Model.gpt-3-5", + "Model.gpt-3-5-1106", "Model.gpt-3-5-16k", "Model.gpt-4", "Model.gpt-4-turbo", @@ -196,6 +217,7 @@ async function activate(context: vscode.ExtensionContext) { await configUpdateTo_0924(); await configUpdate0912To_0924(); + await configUpdateTo_1115(); regLanguageContext(); diff --git a/src/util/apiKey.ts b/src/util/apiKey.ts index dfc52fb..9874abf 100644 --- a/src/util/apiKey.ts +++ b/src/util/apiKey.ts @@ -68,6 +68,10 @@ export class ApiKeyManager { if (openaiModel) { modelList.push(openaiModel.model); } + const openaiModel1 = await modelProperties('Model.gpt-3-5-1106', "gpt-3.5-turbo-1106"); + if (openaiModel1) { + modelList.push(openaiModel1.model); + } const openaiModel2 = await modelProperties('Model.gpt-3-5-16k', "gpt-3.5-turbo-16k"); if (openaiModel2) { modelList.push(openaiModel2.model); @@ -168,6 +172,9 @@ export class ApiKeyManager { if (llmModelT === "gpt-3.5-turbo") { return await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo"); } + if (llmModelT === "gpt-3.5-turbo-1106") { + return await modelProperties('Model.gpt-3-5-1106', "gpt-3.5-turbo-1106"); + } if (llmModelT === "gpt-3.5-turbo-16k") { return await modelProperties('Model.gpt-3-5-16k', "gpt-3.5-turbo-16k"); } diff --git a/src/views/components/InputMessage/index.tsx b/src/views/components/InputMessage/index.tsx index d0ffe83..655af57 100644 --- a/src/views/components/InputMessage/index.tsx +++ b/src/views/components/InputMessage/index.tsx @@ -176,8 +176,10 @@ const InputMessage = observer((props: any) => { const getModelShowName = (modelName:string)=>{ const nameMap = { "gpt-3.5-turbo": "GPT-3.5", + "gpt-3.5-turbo-1106": "GPT-3.5-1106", "gpt-3.5-turbo-16k": "GPT-3.5-16K", "gpt-4": "GPT-4", + "gpt-4-1106-preview": "GPT-4-turbo", "claude-2": "CLAUDE-2" }; if (modelName in nameMap){