set claude-2 as default model
This commit is contained in:
parent
704df23d1b
commit
3d1229784f
123
package.json
123
package.json
@ -262,129 +262,6 @@
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for claude-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.customModel": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"openai/gpt-4",
|
||||
"openai/gpt-4-0613",
|
||||
"openai/gpt-4-0314",
|
||||
"openai/gpt-4-32k",
|
||||
"openai/gpt-4-32k-0314",
|
||||
"openai/gpt-4-32k-0613",
|
||||
"openai/gpt-3.5-turbo",
|
||||
"openai/gpt-3.5-turbo-0301",
|
||||
"openai/gpt-3.5-turbo-0613",
|
||||
"openai/gpt-3.5-turbo-16k",
|
||||
"openai/gpt-3.5-turbo-16k-0613",
|
||||
"openai/text-davinci-003",
|
||||
"openai/curie-001",
|
||||
"openai/babbage-001",
|
||||
"openai/ada-001",
|
||||
"openai/babbage-002",
|
||||
"openai/davinci-002",
|
||||
"cohere/command-nightly",
|
||||
"cohere/command",
|
||||
"cohere/command-light",
|
||||
"cohere/command-medium-beta",
|
||||
"cohere/command-xlarge-beta",
|
||||
"anthropic/claude-2",
|
||||
"anthropic/claude-instant-1",
|
||||
"anthropic/claude-instant-1.2",
|
||||
"replicate/replicate/",
|
||||
"replicate/replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
|
||||
"replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52",
|
||||
"replicate/joehoover/instructblip-vicuna13b:c4c54e3c8c97cd50c2d2fec9be3b6065563ccf7d43787fb99f84151b867178fe",
|
||||
"replicate/replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5",
|
||||
"replicate/a16z-infra/llama-2-7b-chat:7b0bfc9aff140d5b75bacbed23e91fd3c34b01a1e958d32132de6e0a19796e2c",
|
||||
"replicate/replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b",
|
||||
"replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f",
|
||||
"replicate/replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad",
|
||||
"huggingface/meta-llama/Llama-2-7b-hf",
|
||||
"huggingface/meta-llama/Llama-2-7b-chat-hf",
|
||||
"huggingface/meta-llama/Llama-2-13b-hf",
|
||||
"huggingface/meta-llama/Llama-2-13b-chat-hf",
|
||||
"huggingface/meta-llama/Llama-2-70b-hf",
|
||||
"huggingface/meta-llama/Llama-2-70b-chat-hf",
|
||||
"huggingface/meta-llama/Llama-2-7b",
|
||||
"huggingface/meta-llama/Llama-2-7b-chat",
|
||||
"huggingface/meta-llama/Llama-2-13b",
|
||||
"huggingface/meta-llama/Llama-2-13b-chat",
|
||||
"huggingface/meta-llama/Llama-2-70b",
|
||||
"huggingface/meta-llama/Llama-2-70b-chat",
|
||||
"together_ai/togethercomputer/llama-2-70b-chat",
|
||||
"together_ai/togethercomputer/Llama-2-7B-32K-Instruct",
|
||||
"together_ai/togethercomputer/llama-2-7b",
|
||||
"baseten/qvv0xeq",
|
||||
"baseten/q841o8w",
|
||||
"baseten/31dxrj3",
|
||||
"openrouter/google/palm-2-codechat-bison",
|
||||
"openrouter/google/palm-2-chat-bison",
|
||||
"openrouter/openai/gpt-3.5-turbo",
|
||||
"openrouter/openai/gpt-3.5-turbo-16k",
|
||||
"openrouter/openai/gpt-4-32k",
|
||||
"openrouter/anthropic/claude-2",
|
||||
"openrouter/anthropic/claude-instant-v1",
|
||||
"openrouter/meta-llama/llama-2-13b-chat",
|
||||
"openrouter/meta-llama/llama-2-70b-chat",
|
||||
"vertex_ai/chat-bison",
|
||||
"vertex_ai/chat-bison@001",
|
||||
"vertex_ai/text-bison",
|
||||
"vertex_ai/text-bison@001",
|
||||
"ai21/j2-ultra",
|
||||
"ai21/j2-mid",
|
||||
"ai21/j2-light"
|
||||
],
|
||||
"description": "Specify llm model name."
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "[required*] Specify llm model name."
|
||||
}
|
||||
]
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.3,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 1000,
|
||||
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
},
|
||||
"presence_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"order": 6,
|
||||
"markdownDescription": "Specify the custom llm model for DevChat."
|
||||
},
|
||||
"devchat.defaultModel": {
|
||||
"oneOf": [
|
||||
{
|
||||
|
@ -35,9 +35,7 @@ import { FT } from './util/feature_flags/feature_toggles';
|
||||
|
||||
async function configUpdateTo_0912() {
|
||||
const defaultModel: any = UiUtilWrapper.getConfiguration("devchat", "defaultModel");
|
||||
if (!defaultModel) {
|
||||
vscode.workspace.getConfiguration("devchat").update("defaultModel", "gpt-3.5-turbo", vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
|
||||
|
||||
let devchatKey = UiUtilWrapper.getConfiguration('DevChat', 'Access_Key_DevChat');
|
||||
let openaiKey = UiUtilWrapper.getConfiguration('DevChat', 'Api_Key_OpenAI');
|
||||
@ -79,6 +77,10 @@ async function configUpdateTo_0912() {
|
||||
modelConfigNew["provider"] = "openai";
|
||||
}
|
||||
|
||||
if (!defaultModel) {
|
||||
vscode.workspace.getConfiguration("devchat").update("defaultModel", "gpt-3.5-turbo", vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
|
||||
try {
|
||||
vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5", modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||
vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5-16k", modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||
@ -87,6 +89,25 @@ async function configUpdateTo_0912() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const modelConfig4: any = UiUtilWrapper.getConfiguration("devchat", "Model.claude-2");
|
||||
if (Object.keys(modelConfig4).length === 0) {
|
||||
modelConfigNew = {};
|
||||
if (devchatKey) {
|
||||
modelConfigNew["api_key"] = devchatKey;
|
||||
} else if (openaiKey) {
|
||||
modelConfigNew["api_key"] = openaiKey;
|
||||
}
|
||||
|
||||
if (modelConfigNew["api_key"].startsWith("DC.")) {
|
||||
if (!defaultModel) {
|
||||
vscode.workspace.getConfiguration("devchat").update("defaultModel", "claude-2", vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
|
||||
modelConfigNew["provider"] = "anthropic";
|
||||
vscode.workspace.getConfiguration("devchat").update("Model.claude-2", modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user