support code llama model and starcoder model
This commit is contained in:
parent
727d534458
commit
d26a446547
61
package.json
61
package.json
@ -351,6 +351,63 @@
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for llama-2-13b-chat model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.starchat-alpha": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"api_secret": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify secret key for selected provider."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for starchat-alpha model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.CodeLlama-34b-Instruct": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"api_secret": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify secret key for selected provider."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 4000,
|
||||
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for CodeLlama-34b-Instruct. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.defaultModel": {
|
||||
"oneOf": [
|
||||
{
|
||||
@ -364,7 +421,9 @@
|
||||
"xinghuo-2",
|
||||
"chatglm_pro",
|
||||
"ERNIE-Bot",
|
||||
"llama-2-13b-chat"
|
||||
"llama-2-13b-chat",
|
||||
"CodeLlama-34b-Instruct",
|
||||
"starchat-alpha"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -99,7 +99,9 @@ async function configUpdateTo_0924() {
|
||||
"Model.xinghuo-2",
|
||||
"Model.chatglm_pro",
|
||||
"Model.ERNIE-Bot",
|
||||
"Model.llama-2-13b-chat"
|
||||
"Model.llama-2-13b-chat",
|
||||
"Model.CodeLlama-34b-Instruct",
|
||||
"Model.starchat-alpha"
|
||||
];
|
||||
|
||||
for (const model of support_models) {
|
||||
|
@ -226,6 +226,12 @@ export class ApiKeyManager {
|
||||
if (llmModelT === "llama-2-13b-chat") {
|
||||
return await modelProperties('Model.llama-2-13b-chat', "llama-2-13b-chat");
|
||||
}
|
||||
if (llmModelT === "CodeLlama-34b-Instruct") {
|
||||
return await modelProperties('Model.CodeLlama-34b-Instruct', "CodeLlama-34b-Instruct");
|
||||
}
|
||||
if (llmModelT === "starchat-alpha") {
|
||||
return await modelProperties('Model.starchat-alpha', "starchat-alpha");
|
||||
}
|
||||
|
||||
const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel');
|
||||
if (!customModelConfig) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user