feat: Integrate Claude3 models into vscode extension
- Updated package.json to include support for claude-3 models - Removed references to claude-2 models in extension and ApiKeyManager - Added API functionality and configured default settings for new models
This commit is contained in:
parent
ed04737f6c
commit
655a9e52f8
38
package.json
38
package.json
@ -243,7 +243,7 @@
|
|||||||
"order": 6,
|
"order": 6,
|
||||||
"markdownDescription": "properties for gpt-4-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
|
"markdownDescription": "properties for gpt-4-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
|
||||||
},
|
},
|
||||||
"devchat.Model.claude-2": {
|
"devchat.Model.claude-3-sonnet": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"provider": {
|
"provider": {
|
||||||
@ -272,7 +272,38 @@
|
|||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
"order": 7,
|
"order": 7,
|
||||||
"markdownDescription": "properties for claude-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
"markdownDescription": "properties for claude-3-Sonnet model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||||
|
},
|
||||||
|
"devchat.Model.claude-3-opus": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"provider": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "devchat",
|
||||||
|
"enum": [
|
||||||
|
"devchat"
|
||||||
|
],
|
||||||
|
"description": "[required*] which provider host this llm model"
|
||||||
|
},
|
||||||
|
"temperature": {
|
||||||
|
"type": "number",
|
||||||
|
"default": 0.3,
|
||||||
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||||
|
},
|
||||||
|
"max_tokens": {
|
||||||
|
"type": "number",
|
||||||
|
"default": 1000,
|
||||||
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||||
|
},
|
||||||
|
"max_input_tokens": {
|
||||||
|
"type": "number",
|
||||||
|
"default": 32000,
|
||||||
|
"description": "[optional*] Maximum text length for input to AI."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"order": 7,
|
||||||
|
"markdownDescription": "properties for claude-3-Opus model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||||
},
|
},
|
||||||
"devchat.Model.xinghuo-2": {
|
"devchat.Model.xinghuo-2": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@ -500,7 +531,8 @@
|
|||||||
"gpt-3.5-turbo",
|
"gpt-3.5-turbo",
|
||||||
"gpt-4",
|
"gpt-4",
|
||||||
"gpt-4-turbo-preview",
|
"gpt-4-turbo-preview",
|
||||||
"claude-2.1",
|
"claude-3-sonnet",
|
||||||
|
"claude-3-opus",
|
||||||
"xinghuo-3.5",
|
"xinghuo-3.5",
|
||||||
"GLM-4",
|
"GLM-4",
|
||||||
"ERNIE-Bot-4.0",
|
"ERNIE-Bot-4.0",
|
||||||
|
@ -161,7 +161,6 @@ async function configUpdateTo0924() {
|
|||||||
"Model.gpt-3-5-16k",
|
"Model.gpt-3-5-16k",
|
||||||
"Model.gpt-4",
|
"Model.gpt-4",
|
||||||
"Model.gpt-4-turbo",
|
"Model.gpt-4-turbo",
|
||||||
"Model.claude-2",
|
|
||||||
"Model.xinghuo-2",
|
"Model.xinghuo-2",
|
||||||
"Model.chatglm_pro",
|
"Model.chatglm_pro",
|
||||||
"Model.ERNIE-Bot",
|
"Model.ERNIE-Bot",
|
||||||
@ -182,12 +181,6 @@ async function configUpdateTo0924() {
|
|||||||
.update(model, modelConfigNew, vscode.ConfigurationTarget.Global);
|
.update(model, modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!defaultModel) {
|
|
||||||
await vscode.workspace
|
|
||||||
.getConfiguration("devchat")
|
|
||||||
.update("defaultModel", "claude-2.1", vscode.ConfigurationTarget.Global);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async function configUpdate0912To0924() {
|
async function configUpdate0912To0924() {
|
||||||
@ -375,9 +368,12 @@ async function configSetModelDefaultParams() {
|
|||||||
"Model.gpt-4-turbo": {
|
"Model.gpt-4-turbo": {
|
||||||
max_input_tokens: 32000,
|
max_input_tokens: 32000,
|
||||||
},
|
},
|
||||||
"Model.claude-2": {
|
"Model.claude-3-opus": {
|
||||||
max_input_tokens: 32000,
|
max_input_tokens: 32000,
|
||||||
},
|
},
|
||||||
|
"Model.claude-3-sonnet": {
|
||||||
|
max_input_tokens: 32000,
|
||||||
|
},
|
||||||
"Model.xinghuo-2": {
|
"Model.xinghuo-2": {
|
||||||
max_input_tokens: 6000,
|
max_input_tokens: 6000,
|
||||||
},
|
},
|
||||||
@ -417,6 +413,29 @@ async function configSetModelDefaultParams() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function updateClaudePrivider() {
|
||||||
|
const claudeModels = [
|
||||||
|
"Model.claude-3-opus",
|
||||||
|
"Model.claude-3-sonnet",
|
||||||
|
];
|
||||||
|
|
||||||
|
for (const model of claudeModels) {
|
||||||
|
const modelConfig: any = UiUtilWrapper.getConfiguration("devchat", model);
|
||||||
|
if (modelConfig && Object.keys(modelConfig).length === 0) {
|
||||||
|
const modelProperties: any = {
|
||||||
|
"provider": "devchat"
|
||||||
|
};
|
||||||
|
try {
|
||||||
|
await vscode.workspace
|
||||||
|
.getConfiguration("devchat")
|
||||||
|
.update(model, modelProperties, vscode.ConfigurationTarget.Global);
|
||||||
|
} catch (error) {
|
||||||
|
logger.channel()?.error(`update ${model} error: ${error}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async function activate(context: vscode.ExtensionContext) {
|
async function activate(context: vscode.ExtensionContext) {
|
||||||
ExtensionContextHolder.context = context;
|
ExtensionContextHolder.context = context;
|
||||||
|
|
||||||
@ -430,6 +449,7 @@ async function activate(context: vscode.ExtensionContext) {
|
|||||||
await updateInvalidSettings();
|
await updateInvalidSettings();
|
||||||
await updateInvalidDefaultModel();
|
await updateInvalidDefaultModel();
|
||||||
await configUpdateto240205();
|
await configUpdateto240205();
|
||||||
|
await updateClaudePrivider();
|
||||||
await configSetModelDefaultParams();
|
await configSetModelDefaultParams();
|
||||||
|
|
||||||
regLanguageContext();
|
regLanguageContext();
|
||||||
|
@ -10,14 +10,6 @@ export class ApiKeyManager {
|
|||||||
};
|
};
|
||||||
return providerNameMap[provider];
|
return providerNameMap[provider];
|
||||||
}
|
}
|
||||||
static async getApiKey(llmType: string = "OpenAI"): Promise<string | undefined> {
|
|
||||||
const llmModelT = await this.llmModel();
|
|
||||||
if (!llmModelT) {
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
return llmModelT.api_key;
|
|
||||||
}
|
|
||||||
|
|
||||||
static async getValidModels(): Promise<string[]> {
|
static async getValidModels(): Promise<string[]> {
|
||||||
const modelProperties = async (modelPropertyName: string, modelName: string) => {
|
const modelProperties = async (modelPropertyName: string, modelName: string) => {
|
||||||
@ -64,9 +56,13 @@ export class ApiKeyManager {
|
|||||||
if (openaiModel4) {
|
if (openaiModel4) {
|
||||||
modelList.push(openaiModel4.model);
|
modelList.push(openaiModel4.model);
|
||||||
}
|
}
|
||||||
const claudeModel = await modelProperties('Model.claude-2', "claude-2.1");
|
const claude3sonnetModel = await modelProperties('Model.claude-3-sonnet', "claude-3-sonnet");
|
||||||
if (claudeModel) {
|
if (claude3sonnetModel) {
|
||||||
modelList.push(claudeModel.model);
|
modelList.push(claude3sonnetModel.model);
|
||||||
|
}
|
||||||
|
const claude3opusModel = await modelProperties('Model.claude-3-opus', "claude-3-opus");
|
||||||
|
if (claude3opusModel) {
|
||||||
|
modelList.push(claude3opusModel.model);
|
||||||
}
|
}
|
||||||
const xinghuoModel = await modelProperties('Model.xinghuo-2', "xinghuo-3.5");
|
const xinghuoModel = await modelProperties('Model.xinghuo-2', "xinghuo-3.5");
|
||||||
if (xinghuoModel) {
|
if (xinghuoModel) {
|
||||||
@ -101,17 +97,18 @@ export class ApiKeyManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static async llmModel() {
|
static async llmModel() {
|
||||||
let llmModelT = UiUtilWrapper.getConfiguration('devchat', 'defaultModel');
|
// inner function to update default model
|
||||||
if (!llmModelT) {
|
const updateDefaultModelWithValidModels = async () => {
|
||||||
const validModels = await this.getValidModels();
|
const validModels = await this.getValidModels();
|
||||||
if (validModels.length > 0) {
|
if (validModels.length > 0) {
|
||||||
await UiUtilWrapper.updateConfiguration('devchat', 'defaultModel', validModels[0]);
|
await UiUtilWrapper.updateConfiguration('devchat', 'defaultModel', validModels[0]);
|
||||||
llmModelT = validModels[0];
|
return validModels[0];
|
||||||
} else {
|
} else {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
|
// inner function to get model properties
|
||||||
const modelProperties = async (modelPropertyName: string, modelName: string) => {
|
const modelProperties = async (modelPropertyName: string, modelName: string) => {
|
||||||
const modelConfig = UiUtilWrapper.getConfiguration("devchat", modelPropertyName);
|
const modelConfig = UiUtilWrapper.getConfiguration("devchat", modelPropertyName);
|
||||||
if (!modelConfig) {
|
if (!modelConfig) {
|
||||||
@ -158,41 +155,61 @@ export class ApiKeyManager {
|
|||||||
return modelProperties;
|
return modelProperties;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (llmModelT === "gpt-3.5-turbo") {
|
// inner function visit all models
|
||||||
return await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo");
|
const getModelPropertiesByName = async (modelName: string) => {
|
||||||
}
|
if (modelName === "gpt-3.5-turbo") {
|
||||||
if (llmModelT === "gpt-4") {
|
return await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo");
|
||||||
return await modelProperties('Model.gpt-4', "gpt-4");
|
}
|
||||||
}
|
if (modelName === "gpt-4") {
|
||||||
if (llmModelT === "gpt-4-turbo-preview") {
|
return await modelProperties('Model.gpt-4', "gpt-4");
|
||||||
return await modelProperties('Model.gpt-4-turbo', "gpt-4-turbo-preview");
|
}
|
||||||
}
|
if (modelName === "gpt-4-turbo-preview") {
|
||||||
if (llmModelT === "claude-2.1") {
|
return await modelProperties('Model.gpt-4-turbo', "gpt-4-turbo-preview");
|
||||||
return await modelProperties('Model.claude-2', "claude-2.1");
|
}
|
||||||
}
|
if (modelName === "claude-3-sonnet") {
|
||||||
if (llmModelT === "xinghuo-3.5") {
|
return await modelProperties('Model.claude-3-sonnet', "claude-3-sonnet");
|
||||||
return await modelProperties('Model.xinghuo-2', "xinghuo-3.5");
|
}
|
||||||
}
|
if (modelName === "claude-3-opus") {
|
||||||
if (llmModelT === "GLM-4") {
|
return await modelProperties('Model.claude-3-opus', "claude-3-opus");
|
||||||
return await modelProperties('Model.chatglm_pro', "GLM-4");
|
}
|
||||||
}
|
if (modelName === "xinghuo-3.5") {
|
||||||
if (llmModelT === "ERNIE-Bot-4.0") {
|
return await modelProperties('Model.xinghuo-2', "xinghuo-3.5");
|
||||||
return await modelProperties('Model.ERNIE-Bot', "ERNIE-Bot-4.0");
|
}
|
||||||
}
|
if (modelName === "GLM-4") {
|
||||||
if (llmModelT === "togetherai/codellama/CodeLlama-70b-Instruct-hf") {
|
return await modelProperties('Model.chatglm_pro', "GLM-4");
|
||||||
return await modelProperties('Model.CodeLlama-70b', "togetherai/codellama/CodeLlama-70b-Instruct-hf");
|
}
|
||||||
}
|
if (modelName === "ERNIE-Bot-4.0") {
|
||||||
if (llmModelT === "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1") {
|
return await modelProperties('Model.ERNIE-Bot', "ERNIE-Bot-4.0");
|
||||||
return await modelProperties('Model.Mixtral-8x7B', "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1");
|
}
|
||||||
}
|
if (modelName === "togetherai/codellama/CodeLlama-70b-Instruct-hf") {
|
||||||
if (llmModelT === "minimax/abab6-chat") {
|
return await modelProperties('Model.CodeLlama-70b', "togetherai/codellama/CodeLlama-70b-Instruct-hf");
|
||||||
return await modelProperties('Model.Minimax-abab6', "minimax/abab6-chat");
|
}
|
||||||
}
|
if (modelName === "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1") {
|
||||||
if (llmModelT === "llama-2-70b-chat") {
|
return await modelProperties('Model.Mixtral-8x7B', "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1");
|
||||||
return await modelProperties('Model.llama-2-70b-chat', "llama-2-70b-chat");
|
}
|
||||||
|
if (modelName === "minimax/abab6-chat") {
|
||||||
|
return await modelProperties('Model.Minimax-abab6', "minimax/abab6-chat");
|
||||||
|
}
|
||||||
|
if (modelName === "llama-2-70b-chat") {
|
||||||
|
return await modelProperties('Model.llama-2-70b-chat', "llama-2-70b-chat");
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
};
|
||||||
|
|
||||||
|
let llmModelT: string | undefined = UiUtilWrapper.getConfiguration('devchat', 'defaultModel');
|
||||||
|
if (llmModelT) {
|
||||||
|
const defaultModel = await getModelPropertiesByName(llmModelT);
|
||||||
|
if (defaultModel) {
|
||||||
|
return defaultModel;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return undefined;
|
// reset default model
|
||||||
|
llmModelT = await updateDefaultModelWithValidModels();
|
||||||
|
if (!llmModelT) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
return getModelPropertiesByName(llmModelT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static getKeyType(apiKey: string): string | undefined {
|
static getKeyType(apiKey: string): string | undefined {
|
||||||
|
@ -3,7 +3,7 @@ import * as fs from 'fs';
|
|||||||
import * as os from 'os';
|
import * as os from 'os';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
import * as yaml from 'yaml';
|
import * as yaml from 'yaml';
|
||||||
import * as vscode from 'vscode';
|
import * as util from 'util';
|
||||||
import * as childProcess from 'child_process';
|
import * as childProcess from 'child_process';
|
||||||
|
|
||||||
import { parseArgsStringToArgv } from 'string-argv';
|
import { parseArgsStringToArgv } from 'string-argv';
|
||||||
@ -20,7 +20,8 @@ export async function saveModelSettings(): Promise<void> {
|
|||||||
"Model.gpt-3-5": "gpt-3.5-turbo",
|
"Model.gpt-3-5": "gpt-3.5-turbo",
|
||||||
"Model.gpt-4": "gpt-4",
|
"Model.gpt-4": "gpt-4",
|
||||||
"Model.gpt-4-turbo": "gpt-4-turbo-preview",
|
"Model.gpt-4-turbo": "gpt-4-turbo-preview",
|
||||||
"Model.claude-2": "claude-2.1",
|
"Model.claude-3-sonnet": "claude-3-sonnet",
|
||||||
|
"Model.claude-3-opus": "claude-3-opus",
|
||||||
"Model.xinghuo-2": "xinghuo-3.5",
|
"Model.xinghuo-2": "xinghuo-3.5",
|
||||||
"Model.chatglm_pro": "GLM-4",
|
"Model.chatglm_pro": "GLM-4",
|
||||||
"Model.ERNIE-Bot": "ERNIE-Bot-4.0",
|
"Model.ERNIE-Bot": "ERNIE-Bot-4.0",
|
||||||
@ -69,11 +70,10 @@ async function createOpenAiKeyEnv() {
|
|||||||
envs['OPENAI_API_KEY'] = llmModelData.api_key;
|
envs['OPENAI_API_KEY'] = llmModelData.api_key;
|
||||||
}
|
}
|
||||||
|
|
||||||
const openAiApiBase = llmModelData.api_base;
|
if (llmModelData && llmModelData.api_base) {
|
||||||
if (openAiApiBase) {
|
envs['OPENAI_API_BASE'] = llmModelData.api_base;
|
||||||
envs['OPENAI_API_BASE'] = openAiApiBase;
|
envs['OPENAI_BASE_URL'] = llmModelData.api_base;
|
||||||
envs['OPENAI_BASE_URL'] = openAiApiBase;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return envs;
|
return envs;
|
||||||
}
|
}
|
||||||
@ -332,3 +332,16 @@ export function gitLsTree(withAbsolutePath: boolean = false): string[] {
|
|||||||
return lines;
|
return lines;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export async function getFileContent(fileName: string): Promise<string | undefined> {
|
||||||
|
const readFile = util.promisify(fs.readFile);
|
||||||
|
try {
|
||||||
|
// Read file content from fileName
|
||||||
|
const fileContent = await readFile(fileName, 'utf-8');
|
||||||
|
// Return the whole text in the file with name fileName
|
||||||
|
return fileContent;
|
||||||
|
} catch (error) {
|
||||||
|
logger.channel()!.error(`Error reading the file ${fileName}:`, error);
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user