support code llama model
This commit is contained in:
parent
f0abddb55c
commit
b4e3f24fdb
88
package.json
88
package.json
@ -2,7 +2,7 @@
|
||||
"name": "devchat",
|
||||
"displayName": "DevChat",
|
||||
"description": "Write prompts, not code",
|
||||
"version": "0.1.22",
|
||||
"version": "0.1.33",
|
||||
"icon": "assets/devchat.png",
|
||||
"publisher": "merico",
|
||||
"engines": {
|
||||
@ -325,58 +325,6 @@
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for ERNIE-Bot model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.llama-2-13b-chat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"api_secret": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify secret key for selected provider."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for llama-2-13b-chat model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.starchat-alpha": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"api_secret": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify secret key for selected provider."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for starchat-alpha model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.CodeLlama-34b-Instruct": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -408,6 +356,37 @@
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for CodeLlama-34b-Instruct. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.llama-2-70b-chat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"api_secret": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify secret key for selected provider."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 4000,
|
||||
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for llama-2-70b-chat. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.defaultModel": {
|
||||
"oneOf": [
|
||||
{
|
||||
@ -421,9 +400,8 @@
|
||||
"xinghuo-2",
|
||||
"chatglm_pro",
|
||||
"ERNIE-Bot",
|
||||
"llama-2-13b-chat",
|
||||
"CodeLlama-34b-Instruct",
|
||||
"starchat-alpha"
|
||||
"llama-2-70b-chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -99,9 +99,8 @@ async function configUpdateTo_0924() {
|
||||
"Model.xinghuo-2",
|
||||
"Model.chatglm_pro",
|
||||
"Model.ERNIE-Bot",
|
||||
"Model.llama-2-13b-chat",
|
||||
"Model.CodeLlama-34b-Instruct",
|
||||
"Model.starchat-alpha"
|
||||
"Model.llama-2-70b-chat"
|
||||
];
|
||||
|
||||
for (const model of support_models) {
|
||||
|
@ -105,9 +105,13 @@ export class ApiKeyManager {
|
||||
if (erniebotModel) {
|
||||
modelList.push(erniebotModel.model);
|
||||
}
|
||||
const llama2Model = await modelProperties('Model.llama-2-13b-chat', "llama-2-13b-chat");
|
||||
if (llama2Model) {
|
||||
modelList.push(llama2Model.model);
|
||||
const llamaCode2Model = await modelProperties('Model.CodeLlama-34b-Instruct', "CodeLlama-34b-Instruct");
|
||||
if (llamaCode2Model) {
|
||||
modelList.push(llamaCode2Model.model);
|
||||
}
|
||||
const llama70BModel = await modelProperties('Model.llama-2-70b-chat', "llama-2-70b-chat");
|
||||
if (llama70BModel) {
|
||||
modelList.push(llama70BModel.model);
|
||||
}
|
||||
|
||||
const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel');
|
||||
@ -223,16 +227,13 @@ export class ApiKeyManager {
|
||||
if (llmModelT === "ERNIE-Bot") {
|
||||
return await modelProperties('Model.ERNIE-Bot', "ERNIE-Bot");
|
||||
}
|
||||
if (llmModelT === "llama-2-13b-chat") {
|
||||
return await modelProperties('Model.llama-2-13b-chat', "llama-2-13b-chat");
|
||||
}
|
||||
if (llmModelT === "CodeLlama-34b-Instruct") {
|
||||
return await modelProperties('Model.CodeLlama-34b-Instruct', "CodeLlama-34b-Instruct");
|
||||
}
|
||||
if (llmModelT === "starchat-alpha") {
|
||||
return await modelProperties('Model.starchat-alpha', "starchat-alpha");
|
||||
if (llmModelT === "llama-2-70b-chat") {
|
||||
return await modelProperties('Model.llama-2-70b-chat', "llama-2-70b-chat");
|
||||
}
|
||||
|
||||
|
||||
const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel');
|
||||
if (!customModelConfig) {
|
||||
return undefined;
|
||||
|
Loading…
x
Reference in New Issue
Block a user