From 655a9e52f8b2d1044dee862c5c251c49af79e49e Mon Sep 17 00:00:00 2001 From: "bobo.yang" Date: Sun, 10 Mar 2024 16:32:16 +0800 Subject: [PATCH] feat: Integrate Claude3 models into vscode extension - Updated package.json to include support for claude-3 models - Removed references to claude-2 models in extension and ApiKeyManager - Added API functionality and configured default settings for new models --- package.json | 38 ++++++++++++-- src/extension.ts | 36 ++++++++++--- src/util/apiKey.ts | 113 ++++++++++++++++++++++++----------------- src/util/commonUtil.ts | 27 +++++++--- 4 files changed, 148 insertions(+), 66 deletions(-) diff --git a/package.json b/package.json index 5dab062..33a6200 100644 --- a/package.json +++ b/package.json @@ -243,7 +243,7 @@ "order": 6, "markdownDescription": "properties for gpt-4-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)" }, - "devchat.Model.claude-2": { + "devchat.Model.claude-3-sonnet": { "type": "object", "properties": { "provider": { @@ -272,7 +272,38 @@ }, "additionalProperties": false, "order": 7, - "markdownDescription": "properties for claude-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) " + "markdownDescription": "properties for claude-3-Sonnet model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) " + }, + "devchat.Model.claude-3-opus": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "default": "devchat", + "enum": [ + "devchat" + ], + "description": "[required*] which provider host this llm model" + }, + "temperature": { + "type": "number", + "default": 0.3, + "description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic." + }, + "max_tokens": { + "type": "number", + "default": 1000, + "description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens." + }, + "max_input_tokens": { + "type": "number", + "default": 32000, + "description": "[optional*] Maximum text length for input to AI." + } + }, + "additionalProperties": false, + "order": 7, + "markdownDescription": "properties for claude-3-Opus model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) " }, "devchat.Model.xinghuo-2": { "type": "object", @@ -500,7 +531,8 @@ "gpt-3.5-turbo", "gpt-4", "gpt-4-turbo-preview", - "claude-2.1", + "claude-3-sonnet", + "claude-3-opus", "xinghuo-3.5", "GLM-4", "ERNIE-Bot-4.0", diff --git a/src/extension.ts b/src/extension.ts index a8ffbfa..043f272 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -161,7 +161,6 @@ async function configUpdateTo0924() { "Model.gpt-3-5-16k", "Model.gpt-4", "Model.gpt-4-turbo", - "Model.claude-2", "Model.xinghuo-2", "Model.chatglm_pro", "Model.ERNIE-Bot", @@ -182,12 +181,6 @@ async function configUpdateTo0924() { .update(model, modelConfigNew, vscode.ConfigurationTarget.Global); } } - - if (!defaultModel) { - await vscode.workspace - .getConfiguration("devchat") - .update("defaultModel", "claude-2.1", vscode.ConfigurationTarget.Global); - } } async function configUpdate0912To0924() { @@ -375,9 +368,12 @@ async function configSetModelDefaultParams() { "Model.gpt-4-turbo": { max_input_tokens: 32000, }, - "Model.claude-2": { + "Model.claude-3-opus": { max_input_tokens: 32000, }, + "Model.claude-3-sonnet": { + max_input_tokens: 32000, + }, "Model.xinghuo-2": { max_input_tokens: 6000, }, @@ -417,6 +413,29 @@ async function configSetModelDefaultParams() { } } +async function updateClaudePrivider() { + const claudeModels = [ + "Model.claude-3-opus", + "Model.claude-3-sonnet", + ]; + + for (const model of claudeModels) { + const modelConfig: any = UiUtilWrapper.getConfiguration("devchat", model); + if (modelConfig && Object.keys(modelConfig).length === 0) { + const modelProperties: any = { + "provider": "devchat" + }; + try { + await vscode.workspace + .getConfiguration("devchat") + .update(model, modelProperties, vscode.ConfigurationTarget.Global); + } catch (error) { + logger.channel()?.error(`update ${model} error: ${error}`); + } + } + } +} + async function activate(context: vscode.ExtensionContext) { ExtensionContextHolder.context = context; @@ -430,6 +449,7 @@ async function activate(context: vscode.ExtensionContext) { await updateInvalidSettings(); await updateInvalidDefaultModel(); await configUpdateto240205(); + await updateClaudePrivider(); await configSetModelDefaultParams(); regLanguageContext(); diff --git a/src/util/apiKey.ts b/src/util/apiKey.ts index 4fbf391..887d17e 100644 --- a/src/util/apiKey.ts +++ b/src/util/apiKey.ts @@ -10,14 +10,6 @@ export class ApiKeyManager { }; return providerNameMap[provider]; } - static async getApiKey(llmType: string = "OpenAI"): Promise { - const llmModelT = await this.llmModel(); - if (!llmModelT) { - return undefined; - } - - return llmModelT.api_key; - } static async getValidModels(): Promise { const modelProperties = async (modelPropertyName: string, modelName: string) => { @@ -64,9 +56,13 @@ export class ApiKeyManager { if (openaiModel4) { modelList.push(openaiModel4.model); } - const claudeModel = await modelProperties('Model.claude-2', "claude-2.1"); - if (claudeModel) { - modelList.push(claudeModel.model); + const claude3sonnetModel = await modelProperties('Model.claude-3-sonnet', "claude-3-sonnet"); + if (claude3sonnetModel) { + modelList.push(claude3sonnetModel.model); + } + const claude3opusModel = await modelProperties('Model.claude-3-opus', "claude-3-opus"); + if (claude3opusModel) { + modelList.push(claude3opusModel.model); } const xinghuoModel = await modelProperties('Model.xinghuo-2', "xinghuo-3.5"); if (xinghuoModel) { @@ -101,17 +97,18 @@ export class ApiKeyManager { } static async llmModel() { - let llmModelT = UiUtilWrapper.getConfiguration('devchat', 'defaultModel'); - if (!llmModelT) { + // inner function to update default model + const updateDefaultModelWithValidModels = async () => { const validModels = await this.getValidModels(); if (validModels.length > 0) { await UiUtilWrapper.updateConfiguration('devchat', 'defaultModel', validModels[0]); - llmModelT = validModels[0]; + return validModels[0]; } else { return undefined; } - } + }; + // inner function to get model properties const modelProperties = async (modelPropertyName: string, modelName: string) => { const modelConfig = UiUtilWrapper.getConfiguration("devchat", modelPropertyName); if (!modelConfig) { @@ -158,41 +155,61 @@ export class ApiKeyManager { return modelProperties; }; - if (llmModelT === "gpt-3.5-turbo") { - return await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo"); + // inner function visit all models + const getModelPropertiesByName = async (modelName: string) => { + if (modelName === "gpt-3.5-turbo") { + return await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo"); + } + if (modelName === "gpt-4") { + return await modelProperties('Model.gpt-4', "gpt-4"); + } + if (modelName === "gpt-4-turbo-preview") { + return await modelProperties('Model.gpt-4-turbo', "gpt-4-turbo-preview"); + } + if (modelName === "claude-3-sonnet") { + return await modelProperties('Model.claude-3-sonnet', "claude-3-sonnet"); + } + if (modelName === "claude-3-opus") { + return await modelProperties('Model.claude-3-opus', "claude-3-opus"); + } + if (modelName === "xinghuo-3.5") { + return await modelProperties('Model.xinghuo-2', "xinghuo-3.5"); + } + if (modelName === "GLM-4") { + return await modelProperties('Model.chatglm_pro', "GLM-4"); + } + if (modelName === "ERNIE-Bot-4.0") { + return await modelProperties('Model.ERNIE-Bot', "ERNIE-Bot-4.0"); + } + if (modelName === "togetherai/codellama/CodeLlama-70b-Instruct-hf") { + return await modelProperties('Model.CodeLlama-70b', "togetherai/codellama/CodeLlama-70b-Instruct-hf"); + } + if (modelName === "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1") { + return await modelProperties('Model.Mixtral-8x7B', "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1"); + } + if (modelName === "minimax/abab6-chat") { + return await modelProperties('Model.Minimax-abab6', "minimax/abab6-chat"); + } + if (modelName === "llama-2-70b-chat") { + return await modelProperties('Model.llama-2-70b-chat', "llama-2-70b-chat"); + } + return undefined; + }; + + let llmModelT: string | undefined = UiUtilWrapper.getConfiguration('devchat', 'defaultModel'); + if (llmModelT) { + const defaultModel = await getModelPropertiesByName(llmModelT); + if (defaultModel) { + return defaultModel; + } } - if (llmModelT === "gpt-4") { - return await modelProperties('Model.gpt-4', "gpt-4"); + + // reset default model + llmModelT = await updateDefaultModelWithValidModels(); + if (!llmModelT) { + return undefined; } - if (llmModelT === "gpt-4-turbo-preview") { - return await modelProperties('Model.gpt-4-turbo', "gpt-4-turbo-preview"); - } - if (llmModelT === "claude-2.1") { - return await modelProperties('Model.claude-2', "claude-2.1"); - } - if (llmModelT === "xinghuo-3.5") { - return await modelProperties('Model.xinghuo-2', "xinghuo-3.5"); - } - if (llmModelT === "GLM-4") { - return await modelProperties('Model.chatglm_pro', "GLM-4"); - } - if (llmModelT === "ERNIE-Bot-4.0") { - return await modelProperties('Model.ERNIE-Bot', "ERNIE-Bot-4.0"); - } - if (llmModelT === "togetherai/codellama/CodeLlama-70b-Instruct-hf") { - return await modelProperties('Model.CodeLlama-70b', "togetherai/codellama/CodeLlama-70b-Instruct-hf"); - } - if (llmModelT === "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1") { - return await modelProperties('Model.Mixtral-8x7B', "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1"); - } - if (llmModelT === "minimax/abab6-chat") { - return await modelProperties('Model.Minimax-abab6', "minimax/abab6-chat"); - } - if (llmModelT === "llama-2-70b-chat") { - return await modelProperties('Model.llama-2-70b-chat', "llama-2-70b-chat"); - } - - return undefined; + return getModelPropertiesByName(llmModelT); } static getKeyType(apiKey: string): string | undefined { diff --git a/src/util/commonUtil.ts b/src/util/commonUtil.ts index 9b71586..bf23bd8 100644 --- a/src/util/commonUtil.ts +++ b/src/util/commonUtil.ts @@ -3,7 +3,7 @@ import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; import * as yaml from 'yaml'; -import * as vscode from 'vscode'; +import * as util from 'util'; import * as childProcess from 'child_process'; import { parseArgsStringToArgv } from 'string-argv'; @@ -20,7 +20,8 @@ export async function saveModelSettings(): Promise { "Model.gpt-3-5": "gpt-3.5-turbo", "Model.gpt-4": "gpt-4", "Model.gpt-4-turbo": "gpt-4-turbo-preview", - "Model.claude-2": "claude-2.1", + "Model.claude-3-sonnet": "claude-3-sonnet", + "Model.claude-3-opus": "claude-3-opus", "Model.xinghuo-2": "xinghuo-3.5", "Model.chatglm_pro": "GLM-4", "Model.ERNIE-Bot": "ERNIE-Bot-4.0", @@ -69,11 +70,10 @@ async function createOpenAiKeyEnv() { envs['OPENAI_API_KEY'] = llmModelData.api_key; } - const openAiApiBase = llmModelData.api_base; - if (openAiApiBase) { - envs['OPENAI_API_BASE'] = openAiApiBase; - envs['OPENAI_BASE_URL'] = openAiApiBase; - } + if (llmModelData && llmModelData.api_base) { + envs['OPENAI_API_BASE'] = llmModelData.api_base; + envs['OPENAI_BASE_URL'] = llmModelData.api_base; + } return envs; } @@ -331,4 +331,17 @@ export function gitLsTree(withAbsolutePath: boolean = false): string[] { } else { return lines; } +} + +export async function getFileContent(fileName: string): Promise { + const readFile = util.promisify(fs.readFile); + try { + // Read file content from fileName + const fileContent = await readFile(fileName, 'utf-8'); + // Return the whole text in the file with name fileName + return fileContent; + } catch (error) { + logger.channel()!.error(`Error reading the file ${fileName}:`, error); + return undefined; + } } \ No newline at end of file