feat: Add support for new LLM models in VSCode plugin
- Add Model.CodeLlama-70b config settings support - Implement Model.Mixtral-8x7B and Model.Minimax-abab6 support - Modify UI and API integration to facilitate new model selections
This commit is contained in:
parent
abd999a17f
commit
0f36d8a855
70
package.json
70
package.json
@ -322,7 +322,7 @@
|
||||
"order": 10,
|
||||
"markdownDescription": "properties for ERNIE-Bot model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.CodeLlama-34b-Instruct": {
|
||||
"devchat.Model.CodeLlama-70b": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
@ -351,7 +351,38 @@
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 11,
|
||||
"markdownDescription": "properties for CodeLlama-34b-Instruct. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
"markdownDescription": "properties for togetherai/codellama/CodeLlama-70b-Instruct-hf. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.Mixtral-8x7B": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"api_secret": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify secret key for selected provider."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 4000,
|
||||
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 11,
|
||||
"markdownDescription": "properties for togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.llama-2-70b-chat": {
|
||||
"type": "object",
|
||||
@ -384,6 +415,37 @@
|
||||
"order": 12,
|
||||
"markdownDescription": "properties for llama-2-70b-chat. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.Minimax-abab6": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"api_secret": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify secret key for selected provider."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 4000,
|
||||
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 12,
|
||||
"markdownDescription": "properties for minimax/abab6-chat. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.defaultModel": {
|
||||
"oneOf": [
|
||||
{
|
||||
@ -397,7 +459,9 @@
|
||||
"xinghuo-3.5",
|
||||
"GLM-4",
|
||||
"ERNIE-Bot-4.0",
|
||||
"CodeLlama-34b-Instruct",
|
||||
"togetherai/codellama/CodeLlama-70b-Instruct-hf",
|
||||
"togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"minimax/abab6-chat",
|
||||
"llama-2-70b-chat"
|
||||
]
|
||||
},
|
||||
|
@ -207,6 +207,28 @@ async function configUpdate0912To0924() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async function configUpdateto240205() {
|
||||
// rename Model.CodeLlama-34b-Instruct to Model.CodeLlama-70b
|
||||
// add new Model.Mixtral-8x7B
|
||||
// add new Model.Minimax-abab6
|
||||
const supportModels = [
|
||||
"Model.CodeLlama-70b",
|
||||
"Model.Mixtral-8x7B",
|
||||
"Model.Minimax-abab6"
|
||||
];
|
||||
|
||||
for (const model of supportModels) {
|
||||
const modelConfig1: any = UiUtilWrapper.getConfiguration("devchat", model);
|
||||
if (Object.keys(modelConfig1).length === 0) {
|
||||
let modelConfigNew = {};
|
||||
modelConfigNew = {"provider": "devchat"};
|
||||
await vscode.workspace.getConfiguration("devchat").update(model, modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async function setLangDefaultValue() {
|
||||
const lang = vscode.env.language;
|
||||
if (!UiUtilWrapper.getConfiguration("DevChat", "Language")) {
|
||||
@ -267,6 +289,7 @@ async function activate(context: vscode.ExtensionContext) {
|
||||
await setLangDefaultValue();
|
||||
await updateInvalidSettings();
|
||||
await updateInvalidDefaultModel();
|
||||
await configUpdateto240205();
|
||||
|
||||
regLanguageContext();
|
||||
|
||||
|
@ -80,10 +80,18 @@ export class ApiKeyManager {
|
||||
if (erniebotModel) {
|
||||
modelList.push(erniebotModel.model);
|
||||
}
|
||||
const llamaCode2Model = await modelProperties('Model.CodeLlama-34b-Instruct', "CodeLlama-34b-Instruct");
|
||||
const llamaCode2Model = await modelProperties('Model.CodeLlama-70b', "togetherai/codellama/CodeLlama-70b-Instruct-hf");
|
||||
if (llamaCode2Model) {
|
||||
modelList.push(llamaCode2Model.model);
|
||||
}
|
||||
const mixtralCode2Model = await modelProperties('Model.Mixtral-8x7B', "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1");
|
||||
if (mixtralCode2Model) {
|
||||
modelList.push(mixtralCode2Model.model);
|
||||
}
|
||||
const minimaxCode2Model = await modelProperties('Model.Minimax-abab6', "minimax/abab6-chat");
|
||||
if (minimaxCode2Model) {
|
||||
modelList.push(minimaxCode2Model.model);
|
||||
}
|
||||
const llama70BModel = await modelProperties('Model.llama-2-70b-chat', "llama-2-70b-chat");
|
||||
if (llama70BModel) {
|
||||
modelList.push(llama70BModel.model);
|
||||
@ -171,8 +179,14 @@ export class ApiKeyManager {
|
||||
if (llmModelT === "ERNIE-Bot-4.0") {
|
||||
return await modelProperties('Model.ERNIE-Bot', "ERNIE-Bot-4.0");
|
||||
}
|
||||
if (llmModelT === "CodeLlama-34b-Instruct") {
|
||||
return await modelProperties('Model.CodeLlama-34b-Instruct', "CodeLlama-34b-Instruct");
|
||||
if (llmModelT === "togetherai/codellama/CodeLlama-70b-Instruct-hf") {
|
||||
return await modelProperties('Model.CodeLlama-70b', "togetherai/codellama/CodeLlama-70b-Instruct-hf");
|
||||
}
|
||||
if (llmModelT === "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1") {
|
||||
return await modelProperties('Model.Mixtral-8x7B', "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1");
|
||||
}
|
||||
if (llmModelT === "minimax/abab6-chat") {
|
||||
return await modelProperties('Model.Minimax-abab6', "minimax/abab6-chat");
|
||||
}
|
||||
if (llmModelT === "llama-2-70b-chat") {
|
||||
return await modelProperties('Model.llama-2-70b-chat', "llama-2-70b-chat");
|
||||
|
@ -24,10 +24,11 @@ export async function saveModelSettings(): Promise<void> {
|
||||
"Model.xinghuo-2": "xinghuo-3.5",
|
||||
"Model.chatglm_pro": "GLM-4",
|
||||
"Model.ERNIE-Bot": "ERNIE-Bot-4.0",
|
||||
"Model.CodeLlama-34b-Instruct": "CodeLlama-34b-Instruct",
|
||||
"Model.CodeLlama-70b": "togetherai/codellama/CodeLlama-70b-Instruct-hf",
|
||||
"Model.Mixtral-8x7B": "togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"Model.Minimax-abab6": "minimax/abab6-chat",
|
||||
"Model.llama-2-70b-chat": "llama-2-70b-chat"
|
||||
};
|
||||
|
||||
// is enable stream
|
||||
const openaiStream = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.stream');
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user