remove unused configuration config
This commit is contained in:
parent
5e995ea4ee
commit
16046ec517
35
package-lock.json
generated
35
package-lock.json
generated
@ -21,6 +21,7 @@
|
||||
"@tiptap/starter-kit": "^2.0.3",
|
||||
"axios": "^1.3.6",
|
||||
"dotenv": "^16.0.3",
|
||||
"js-yaml": "^4.1.0",
|
||||
"mobx": "^6.10.0",
|
||||
"mobx-react": "^9.0.0",
|
||||
"mobx-state-tree": "^5.1.8",
|
||||
@ -34,7 +35,8 @@
|
||||
"rehype-raw": "^6.1.1",
|
||||
"shell-escape": "^0.2.0",
|
||||
"string-argv": "^0.3.2",
|
||||
"uuid": "^9.0.0"
|
||||
"uuid": "^9.0.0",
|
||||
"yaml": "^2.3.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.21.8",
|
||||
@ -5882,6 +5884,14 @@
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/cosmiconfig/node_modules/yaml": {
|
||||
"version": "1.10.2",
|
||||
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
|
||||
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==",
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/create-require": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz",
|
||||
@ -9030,7 +9040,6 @@
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
|
||||
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"argparse": "^2.0.1"
|
||||
},
|
||||
@ -14025,11 +14034,11 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/yaml": {
|
||||
"version": "1.10.2",
|
||||
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
|
||||
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==",
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz",
|
||||
"integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==",
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/yargs": {
|
||||
@ -18429,6 +18438,13 @@
|
||||
"parse-json": "^5.0.0",
|
||||
"path-type": "^4.0.0",
|
||||
"yaml": "^1.10.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"yaml": {
|
||||
"version": "1.10.2",
|
||||
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
|
||||
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"create-require": {
|
||||
@ -20759,7 +20775,6 @@
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
|
||||
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"argparse": "^2.0.1"
|
||||
}
|
||||
@ -24350,9 +24365,9 @@
|
||||
"dev": true
|
||||
},
|
||||
"yaml": {
|
||||
"version": "1.10.2",
|
||||
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
|
||||
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg=="
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz",
|
||||
"integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg=="
|
||||
},
|
||||
"yargs": {
|
||||
"version": "16.2.0",
|
||||
|
270
package.json
270
package.json
@ -73,19 +73,21 @@
|
||||
"devchat.Model.gpt-3-5": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": ["openai"],
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": [
|
||||
"openai"
|
||||
],
|
||||
"description": "Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
@ -110,7 +112,10 @@
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
}
|
||||
},
|
||||
"required": ["provider", "key"],
|
||||
"required": [
|
||||
"provider",
|
||||
"key"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"order": 0,
|
||||
"markdownDescription": "Specify the properties for gpt-3.5-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
|
||||
@ -118,19 +123,21 @@
|
||||
"devchat.Model.gpt-3-5-16k": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": ["openai"],
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": [
|
||||
"openai"
|
||||
],
|
||||
"description": "Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
@ -155,7 +162,10 @@
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
}
|
||||
},
|
||||
"required": ["provider", "key"],
|
||||
"required": [
|
||||
"provider",
|
||||
"key"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"order": 1,
|
||||
"markdownDescription": "Specify properties for gpt-3.5-turbo-16k model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
@ -163,19 +173,21 @@
|
||||
"devchat.Model.gpt-4": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": ["openai"],
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": [
|
||||
"openai"
|
||||
],
|
||||
"description": "Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
@ -207,19 +219,21 @@
|
||||
"devchat.Model.claude-2": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "anthropic",
|
||||
"enum": ["anthropic"],
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "anthropic",
|
||||
"enum": [
|
||||
"anthropic"
|
||||
],
|
||||
"description": "Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
@ -248,7 +262,6 @@
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for claude-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
|
||||
"devchat.customModel": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@ -258,21 +271,91 @@
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"enum": ["openai/gpt-4", "openai/gpt-4-0613", "openai/gpt-4-0314", "openai/gpt-4-32k", "openai/gpt-4-32k-0314", "openai/gpt-4-32k-0613", "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo-0301", "openai/gpt-3.5-turbo-0613", "openai/gpt-3.5-turbo-16k", "openai/gpt-3.5-turbo-16k-0613", "openai/text-davinci-003", "openai/curie-001", "openai/babbage-001", "openai/ada-001", "openai/babbage-002", "openai/davinci-002", "cohere/command-nightly", "cohere/command", "cohere/command-light", "cohere/command-medium-beta", "cohere/command-xlarge-beta", "anthropic/claude-2", "anthropic/claude-instant-1", "anthropic/claude-instant-1.2", "replicate/replicate/", "replicate/replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781", "replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52", "replicate/joehoover/instructblip-vicuna13b:c4c54e3c8c97cd50c2d2fec9be3b6065563ccf7d43787fb99f84151b867178fe", "replicate/replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5", "replicate/a16z-infra/llama-2-7b-chat:7b0bfc9aff140d5b75bacbed23e91fd3c34b01a1e958d32132de6e0a19796e2c", "replicate/replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b", "replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f", "replicate/replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad", "huggingface/meta-llama/Llama-2-7b-hf", "huggingface/meta-llama/Llama-2-7b-chat-hf", "huggingface/meta-llama/Llama-2-13b-hf", "huggingface/meta-llama/Llama-2-13b-chat-hf", "huggingface/meta-llama/Llama-2-70b-hf", "huggingface/meta-llama/Llama-2-70b-chat-hf", "huggingface/meta-llama/Llama-2-7b", "huggingface/meta-llama/Llama-2-7b-chat", "huggingface/meta-llama/Llama-2-13b", "huggingface/meta-llama/Llama-2-13b-chat", "huggingface/meta-llama/Llama-2-70b", "huggingface/meta-llama/Llama-2-70b-chat", "together_ai/togethercomputer/llama-2-70b-chat", "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", "together_ai/togethercomputer/llama-2-7b", "baseten/qvv0xeq", "baseten/q841o8w", "baseten/31dxrj3", "openrouter/google/palm-2-codechat-bison", "openrouter/google/palm-2-chat-bison", "openrouter/openai/gpt-3.5-turbo", "openrouter/openai/gpt-3.5-turbo-16k", "openrouter/openai/gpt-4-32k", "openrouter/anthropic/claude-2", "openrouter/anthropic/claude-instant-v1", "openrouter/meta-llama/llama-2-13b-chat", "openrouter/meta-llama/llama-2-70b-chat", "vertex_ai/chat-bison", "vertex_ai/chat-bison@001", "vertex_ai/text-bison", "vertex_ai/text-bison@001", "ai21/j2-ultra", "ai21/j2-mid", "ai21/j2-light"],
|
||||
"enum": [
|
||||
"openai/gpt-4",
|
||||
"openai/gpt-4-0613",
|
||||
"openai/gpt-4-0314",
|
||||
"openai/gpt-4-32k",
|
||||
"openai/gpt-4-32k-0314",
|
||||
"openai/gpt-4-32k-0613",
|
||||
"openai/gpt-3.5-turbo",
|
||||
"openai/gpt-3.5-turbo-0301",
|
||||
"openai/gpt-3.5-turbo-0613",
|
||||
"openai/gpt-3.5-turbo-16k",
|
||||
"openai/gpt-3.5-turbo-16k-0613",
|
||||
"openai/text-davinci-003",
|
||||
"openai/curie-001",
|
||||
"openai/babbage-001",
|
||||
"openai/ada-001",
|
||||
"openai/babbage-002",
|
||||
"openai/davinci-002",
|
||||
"cohere/command-nightly",
|
||||
"cohere/command",
|
||||
"cohere/command-light",
|
||||
"cohere/command-medium-beta",
|
||||
"cohere/command-xlarge-beta",
|
||||
"anthropic/claude-2",
|
||||
"anthropic/claude-instant-1",
|
||||
"anthropic/claude-instant-1.2",
|
||||
"replicate/replicate/",
|
||||
"replicate/replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
|
||||
"replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52",
|
||||
"replicate/joehoover/instructblip-vicuna13b:c4c54e3c8c97cd50c2d2fec9be3b6065563ccf7d43787fb99f84151b867178fe",
|
||||
"replicate/replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5",
|
||||
"replicate/a16z-infra/llama-2-7b-chat:7b0bfc9aff140d5b75bacbed23e91fd3c34b01a1e958d32132de6e0a19796e2c",
|
||||
"replicate/replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b",
|
||||
"replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f",
|
||||
"replicate/replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad",
|
||||
"huggingface/meta-llama/Llama-2-7b-hf",
|
||||
"huggingface/meta-llama/Llama-2-7b-chat-hf",
|
||||
"huggingface/meta-llama/Llama-2-13b-hf",
|
||||
"huggingface/meta-llama/Llama-2-13b-chat-hf",
|
||||
"huggingface/meta-llama/Llama-2-70b-hf",
|
||||
"huggingface/meta-llama/Llama-2-70b-chat-hf",
|
||||
"huggingface/meta-llama/Llama-2-7b",
|
||||
"huggingface/meta-llama/Llama-2-7b-chat",
|
||||
"huggingface/meta-llama/Llama-2-13b",
|
||||
"huggingface/meta-llama/Llama-2-13b-chat",
|
||||
"huggingface/meta-llama/Llama-2-70b",
|
||||
"huggingface/meta-llama/Llama-2-70b-chat",
|
||||
"together_ai/togethercomputer/llama-2-70b-chat",
|
||||
"together_ai/togethercomputer/Llama-2-7B-32K-Instruct",
|
||||
"together_ai/togethercomputer/llama-2-7b",
|
||||
"baseten/qvv0xeq",
|
||||
"baseten/q841o8w",
|
||||
"baseten/31dxrj3",
|
||||
"openrouter/google/palm-2-codechat-bison",
|
||||
"openrouter/google/palm-2-chat-bison",
|
||||
"openrouter/openai/gpt-3.5-turbo",
|
||||
"openrouter/openai/gpt-3.5-turbo-16k",
|
||||
"openrouter/openai/gpt-4-32k",
|
||||
"openrouter/anthropic/claude-2",
|
||||
"openrouter/anthropic/claude-instant-v1",
|
||||
"openrouter/meta-llama/llama-2-13b-chat",
|
||||
"openrouter/meta-llama/llama-2-70b-chat",
|
||||
"vertex_ai/chat-bison",
|
||||
"vertex_ai/chat-bison@001",
|
||||
"vertex_ai/text-bison",
|
||||
"vertex_ai/text-bison@001",
|
||||
"ai21/j2-ultra",
|
||||
"ai21/j2-mid",
|
||||
"ai21/j2-light"
|
||||
],
|
||||
"description": "Specify llm model name."
|
||||
}, {
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Specify llm model name."
|
||||
}
|
||||
]
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
@ -302,7 +385,6 @@
|
||||
"order": 6,
|
||||
"markdownDescription": "Specify the custom llm model for DevChat."
|
||||
},
|
||||
|
||||
"devchat.defaultModel": {
|
||||
"oneOf": [
|
||||
{
|
||||
@ -314,7 +396,8 @@
|
||||
"gpt-4",
|
||||
"claude-2"
|
||||
]
|
||||
}, {
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"default": "gpt-3.5-turbo"
|
||||
}
|
||||
@ -322,54 +405,18 @@
|
||||
"order": 7,
|
||||
"markdownDescription": "Specify the default llm model for DevChat. [Price of each model](https://devchat.ai/pricing)"
|
||||
},
|
||||
|
||||
|
||||
"DevChat.activeModelKey": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "api key for llm model"
|
||||
},
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"DevChat.llmModel": {
|
||||
"type": "string",
|
||||
"default": "OpenAI",
|
||||
"enum": [
|
||||
"OpenAI"
|
||||
],
|
||||
"description": "Select which LLM to use."
|
||||
},
|
||||
"DevChat.maxLogCount": {
|
||||
"type": "number",
|
||||
"default": 20,
|
||||
"description": "Limit the number of prompts in the chat view."
|
||||
},
|
||||
"DevChat.OpenAI.temperature": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "The sampling temperature to use, between 0 and 2. Lower values like 0.2 will make it more focused and deterministic.",
|
||||
"when": "DevChat.llmModel == 'OpenAI'"
|
||||
},
|
||||
"DevChat.OpenAI.stream": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Whether to stream a response.",
|
||||
"when": "DevChat.llmModel == 'OpenAI'"
|
||||
"order": 10,
|
||||
"description": "Whether to stream a response."
|
||||
},
|
||||
"DevChat.OpenAI.tokensPerPrompt": {
|
||||
"type": "number",
|
||||
"default": 6000,
|
||||
"description": "The max number of tokens of a prompt.",
|
||||
"when": "DevChat.llmModel == 'OpenAI'"
|
||||
},
|
||||
"DevChat.API_ENDPOINT": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "API endpoint URL",
|
||||
"when": "DevChat.llmModel == 'OpenAI'"
|
||||
"DevChat.EnableFunctionCalling": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"order": 11,
|
||||
"description": "Enable/Disable function calling for GPT."
|
||||
},
|
||||
"DevChat.DevChatPath": {
|
||||
"type": "string",
|
||||
@ -382,8 +429,27 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
"order": 12,
|
||||
"description": "Where is the devchat binary located?"
|
||||
},
|
||||
"DevChat.betaInvitationCode": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"order": 13,
|
||||
"description": "The invitation code for beta testing."
|
||||
},
|
||||
"DevChat.maxLogCount": {
|
||||
"type": "number",
|
||||
"default": 20,
|
||||
"order": 14,
|
||||
"description": "Limit the number of prompts in the chat view."
|
||||
},
|
||||
"DevChat.askcode.supportedFileTypes": {
|
||||
"type": "string",
|
||||
"default": ".+\\.js$, .+\\.ts$, .+\\.jsx$, .+\\.tsx$, .+\\.java$, .+\\.py$, .+\\.go$, .+\\.rb$, .+\\.php$, .+\\.cpp$, .+\\.c$, .+\\.cs$, .+\\.swift$, .+\\.rs$, .+\\.sh$, .+\\.bash$, .+\\.zsh$, .+\\.m$, .+\\.mm$, .+\\.h$, .+\\.hpp$, .+\\.hh$, .+\\.html$, .+\\.htm$, .+\\.xhtml$, .+\\.xml$, .+\\.css$, .+\\.scss$, .+\\.sass$, .+\\.less$, .+\\.json$, .+\\.yaml$, .+\\.yml$, .+\\.toml$, .+\\.ini$, .+\\.md$, .+\\.markdown$, .+\\.txt$, .+\\.csv$, .+\\.sql$, .+\\.sqlite$, .+\\.db$, .+\\.hql$, .+\\.psql$, .+\\.pgsql$, .+\\.plpgsql$",
|
||||
"order": 15,
|
||||
"description": "Comma-separated list of regular expressions for supported file types for analysis."
|
||||
},
|
||||
"DevChat.PythonPath": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
@ -397,26 +463,10 @@
|
||||
},
|
||||
"description": "Which Python interpreter to use with DevChat?"
|
||||
},
|
||||
"DevChat.EnableFunctionCalling": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Enable/Disable function calling for GPT.",
|
||||
"when": "DevChat.llmModel == 'OpenAI'"
|
||||
},
|
||||
"DevChat.PythonVirtualEnv": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Path to the Python virtual environment for AskCode."
|
||||
},
|
||||
"DevChat.askcode.supportedFileTypes": {
|
||||
"type": "string",
|
||||
"default": ".+\\.js$, .+\\.ts$, .+\\.jsx$, .+\\.tsx$, .+\\.java$, .+\\.py$, .+\\.go$, .+\\.rb$, .+\\.php$, .+\\.cpp$, .+\\.c$, .+\\.cs$, .+\\.swift$, .+\\.rs$, .+\\.sh$, .+\\.bash$, .+\\.zsh$, .+\\.m$, .+\\.mm$, .+\\.h$, .+\\.hpp$, .+\\.hh$, .+\\.html$, .+\\.htm$, .+\\.xhtml$, .+\\.xml$, .+\\.css$, .+\\.scss$, .+\\.sass$, .+\\.less$, .+\\.json$, .+\\.yaml$, .+\\.yml$, .+\\.toml$, .+\\.ini$, .+\\.md$, .+\\.markdown$, .+\\.txt$, .+\\.csv$, .+\\.sql$, .+\\.sqlite$, .+\\.db$, .+\\.hql$, .+\\.psql$, .+\\.pgsql$, .+\\.plpgsql$",
|
||||
"description": "Comma-separated list of regular expressions for supported file types for analysis."
|
||||
},
|
||||
"DevChat.betaInvitationCode": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "The invitation code for beta testing."
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -791,6 +841,7 @@
|
||||
"@tiptap/starter-kit": "^2.0.3",
|
||||
"axios": "^1.3.6",
|
||||
"dotenv": "^16.0.3",
|
||||
"js-yaml": "^4.1.0",
|
||||
"mobx": "^6.10.0",
|
||||
"mobx-react": "^9.0.0",
|
||||
"mobx-state-tree": "^5.1.8",
|
||||
@ -804,6 +855,7 @@
|
||||
"rehype-raw": "^6.1.1",
|
||||
"shell-escape": "^0.2.0",
|
||||
"string-argv": "^0.3.2",
|
||||
"uuid": "^9.0.0"
|
||||
"uuid": "^9.0.0",
|
||||
"yaml": "^2.3.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,14 +1,10 @@
|
||||
import * as path from 'path';
|
||||
|
||||
import * as vscode from 'vscode'
|
||||
import * as vscode from 'vscode';
|
||||
|
||||
import { ChatContext } from './contextManager';
|
||||
import { createTempSubdirectory, git_ls_tree, runCommandStringAndWriteOutput } from '../util/commonUtil';
|
||||
|
||||
import { logger } from '../util/logger';
|
||||
import { handleCodeSelected } from './contextCodeSelected';
|
||||
import DevChat, { ChatOptions } from '../toolwrapper/devchat';
|
||||
import { number } from 'mobx-state-tree/dist/internal';
|
||||
|
||||
|
||||
async function getCurrentSelectText(activeEditor: vscode.TextEditor): Promise<string> {
|
||||
@ -183,7 +179,7 @@ async function getSymbolDefine(symbolList: string[], activeEditor: vscode.TextEd
|
||||
for (const child of symbol.children) {
|
||||
visitFun(child);
|
||||
}
|
||||
}
|
||||
};
|
||||
for (const symbol of symbolsT) {
|
||||
visitFun(symbol);
|
||||
}
|
||||
|
@ -313,7 +313,16 @@ async function installAskCode(supportedFileTypes, progressBar: any, callback: Fu
|
||||
async function indexCode(pythonVirtualEnv, supportedFileTypes, progressBar: any) {
|
||||
let envs = {};
|
||||
|
||||
let openaiApiKey = await ApiKeyManager.getApiKey();
|
||||
const llmModelData = await ApiKeyManager.llmModel();
|
||||
if (!llmModelData) {
|
||||
logger.channel()?.error('No valid llm model is selected!');
|
||||
logger.channel()?.show();
|
||||
|
||||
progressBar.endWithError("No valid llm model is selected!");
|
||||
return;
|
||||
}
|
||||
|
||||
let openaiApiKey = llmModelData.api_key;
|
||||
if (!openaiApiKey) {
|
||||
logger.channel()?.error('The OpenAI key is invalid!');
|
||||
logger.channel()?.show();
|
||||
@ -323,7 +332,7 @@ async function indexCode(pythonVirtualEnv, supportedFileTypes, progressBar: any)
|
||||
}
|
||||
envs['OPENAI_API_KEY'] = openaiApiKey;
|
||||
|
||||
const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
|
||||
const openAiApiBase = llmModelData.api_base;
|
||||
if (openAiApiBase) {
|
||||
envs['OPENAI_API_BASE'] = openAiApiBase;
|
||||
}
|
||||
@ -440,17 +449,26 @@ export function registerAskCodeSummaryIndexStartCommand(context: vscode.Extensio
|
||||
async function indexCodeSummary(pythonVirtualEnv, supportedFileTypes, progressBar: any) {
|
||||
let envs = {};
|
||||
|
||||
let openaiApiKey = await ApiKeyManager.getApiKey();
|
||||
const llmModelData = await ApiKeyManager.llmModel();
|
||||
if (!llmModelData) {
|
||||
logger.channel()?.error('No valid llm model is selected!');
|
||||
logger.channel()?.show();
|
||||
|
||||
progressBar.endWithError("No valid llm model is selected!");
|
||||
return;
|
||||
}
|
||||
|
||||
let openaiApiKey = llmModelData.api_key;
|
||||
if (!openaiApiKey) {
|
||||
logger.channel()?.error('The OpenAI key is invalid!');
|
||||
logger.channel()?.show();
|
||||
|
||||
progressBar.endWithError("The OpenAI key is invalid!");
|
||||
progressBar.endWithError("The OpenAI key is invalid!");
|
||||
return;
|
||||
}
|
||||
envs['OPENAI_API_KEY'] = openaiApiKey;
|
||||
|
||||
const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
|
||||
const openAiApiBase = llmModelData.api_base;
|
||||
if (openAiApiBase) {
|
||||
envs['OPENAI_API_BASE'] = openAiApiBase;
|
||||
}
|
||||
|
@ -57,7 +57,14 @@ export async function askCode(message: any, panel: vscode.WebviewPanel|vscode.We
|
||||
|
||||
let envs = {};
|
||||
|
||||
let openaiApiKey = await ApiKeyManager.getApiKey();
|
||||
const llmModelData = await ApiKeyManager.llmModel();
|
||||
if (!llmModelData) {
|
||||
logger.channel()?.error('No valid llm model is selected!');
|
||||
logger.channel()?.show();
|
||||
return;
|
||||
}
|
||||
|
||||
let openaiApiKey = llmModelData.api_key;
|
||||
if (!openaiApiKey) {
|
||||
logger.channel()?.error('The OpenAI key is invalid!');
|
||||
logger.channel()?.show();
|
||||
@ -65,7 +72,7 @@ export async function askCode(message: any, panel: vscode.WebviewPanel|vscode.We
|
||||
}
|
||||
envs['OPENAI_API_KEY'] = openaiApiKey;
|
||||
|
||||
const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
|
||||
const openAiApiBase = llmModelData.api_base;
|
||||
if (openAiApiBase) {
|
||||
envs['OPENAI_API_BASE'] = openAiApiBase;
|
||||
}
|
||||
|
@ -12,22 +12,20 @@ regInMessage({command: 'getUserAccessKey'});
|
||||
regOutMessage({command: 'getUserAccessKey', accessKey: "DC.xxx", keyType: "DevChat", endPoint: "https://xxx"});
|
||||
export async function getUserAccessKey(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
|
||||
const workspaceDir = UiUtilWrapper.workspaceFoldersFirstPath();
|
||||
let openaiApiKey = await ApiKeyManager.getApiKey();
|
||||
if (!openaiApiKey) {
|
||||
const llmModelData = await ApiKeyManager.llmModel();
|
||||
if (!llmModelData || llmModelData.api_key) {
|
||||
MessageHandler.sendMessage(panel, {"command": "getUserAccessKey", "accessKey": "", "keyType": "", "endPoint": ""});
|
||||
return;
|
||||
}
|
||||
|
||||
let keyType = ApiKeyManager.getKeyType(openaiApiKey!);
|
||||
if (keyType === "DC") {
|
||||
let keyType: string = "others";
|
||||
if (llmModelData.api_key?.startsWith("DC.")) {
|
||||
keyType = "DevChat";
|
||||
} else if (keyType === "sk") {
|
||||
keyType = "OpenAI";
|
||||
}
|
||||
|
||||
let openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
|
||||
let openAiApiBase = llmModelData.api_base;
|
||||
if (!openAiApiBase) {
|
||||
openAiApiBase = "";
|
||||
}
|
||||
MessageHandler.sendMessage(panel, {"command": "getUserAccessKey", "accessKey": openaiApiKey, "keyType": keyType, "endPoint": openAiApiBase});
|
||||
MessageHandler.sendMessage(panel, {"command": "getUserAccessKey", "accessKey": llmModelData.api_key, "keyType": keyType, "endPoint": openAiApiBase});
|
||||
}
|
@ -9,7 +9,7 @@ import ExtensionContextHolder from '../util/extensionContext';
|
||||
import { UiUtilWrapper } from '../util/uiUtil';
|
||||
import { ApiKeyManager } from '../util/apiKey';
|
||||
import { exitCode } from 'process';
|
||||
|
||||
import * as yaml from 'yaml';
|
||||
|
||||
|
||||
const envPath = path.join(__dirname, '..', '.env');
|
||||
@ -192,13 +192,6 @@ class DevChat {
|
||||
};
|
||||
}
|
||||
|
||||
apiEndpoint(apiKey: string | undefined): any {
|
||||
const openAiApiBase = ApiKeyManager.getEndPoint(apiKey);
|
||||
|
||||
const openAiApiBaseObject = openAiApiBase ? { OPENAI_API_BASE: openAiApiBase } : {};
|
||||
return openAiApiBaseObject;
|
||||
}
|
||||
|
||||
async chat(content: string, options: ChatOptions = {}, onData: (data: ChatResponse) => void): Promise<ChatResponse> {
|
||||
const llmModelData = await ApiKeyManager.llmModel();
|
||||
if (!llmModelData) {
|
||||
@ -223,11 +216,8 @@ class DevChat {
|
||||
logger.channel()?.show();
|
||||
}
|
||||
|
||||
const openaiTemperature = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.temperature');
|
||||
const openaiStream = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.stream');
|
||||
const llmModel = UiUtilWrapper.getConfiguration('DevChat', 'llmModel');
|
||||
const tokensPerPrompt = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.tokensPerPrompt');
|
||||
|
||||
|
||||
const openAiApiBaseObject = llmModelData.api_base? { OPENAI_API_BASE: llmModelData.api_base } : {};
|
||||
const activeLlmModelKey = llmModelData.api_key;
|
||||
|
||||
@ -236,20 +226,31 @@ class DevChat {
|
||||
devChat = 'devchat';
|
||||
}
|
||||
|
||||
const devchatConfig = {
|
||||
provider: llmModel,
|
||||
"tokens-per-prompt": tokensPerPrompt,
|
||||
OpenAI: {
|
||||
temperature: openaiTemperature,
|
||||
stream: openaiStream,
|
||||
const reduceModelData = Object.keys(llmModelData)
|
||||
.filter(key => key !== 'api_key' && key !== 'provider' && key !== 'model' && key !== 'api_base')
|
||||
.reduce((obj, key) => {
|
||||
obj[key] = llmModelData[key];
|
||||
return obj;
|
||||
}, {});
|
||||
let devchatConfig = {
|
||||
"id": llmModelData.model,
|
||||
"provider": llmModelData.provider,
|
||||
"parameters": {
|
||||
"stream": openaiStream,
|
||||
...reduceModelData
|
||||
}
|
||||
};
|
||||
|
||||
let devchatModels = {"models": [devchatConfig]};
|
||||
|
||||
// write to config file
|
||||
const configPath = path.join(workspaceDir!, '.chat', 'config.json');
|
||||
const os = process.platform;
|
||||
const userHome = os === 'win32' ? fs.realpathSync(process.env.USERPROFILE || '') : process.env.HOME;
|
||||
|
||||
const configPath = path.join(userHome!, '.chat', 'config.yml');
|
||||
// write devchatConfig to configPath
|
||||
const configJson = JSON.stringify(devchatConfig, null, 2);
|
||||
fs.writeFileSync(configPath, configJson);
|
||||
const yamlString = yaml.stringify(devchatModels);
|
||||
fs.writeFileSync(configPath, yamlString);
|
||||
|
||||
try {
|
||||
|
||||
|
@ -22,17 +22,17 @@ export class ApiKeyManager {
|
||||
return providerNameMap[provider];
|
||||
}
|
||||
static async getApiKey(llmType: string = "OpenAI"): Promise<string | undefined> {
|
||||
const llmModel = await this.llmModel();
|
||||
if (!llmModel) {
|
||||
const llmModelT = await this.llmModel();
|
||||
if (!llmModelT) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return llmModel.api_key;
|
||||
return llmModelT.api_key;
|
||||
}
|
||||
|
||||
static async llmModel() {
|
||||
const llmModel = UiUtilWrapper.getConfiguration('devchat', 'defaultModel');
|
||||
if (!llmModel) {
|
||||
const llmModelT = UiUtilWrapper.getConfiguration('devchat', 'defaultModel');
|
||||
if (!llmModelT) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
@ -62,20 +62,24 @@ export class ApiKeyManager {
|
||||
modelProperties["api_key"] = apiKey;
|
||||
}
|
||||
|
||||
if (!modelConfig["api_base"] && modelProperties["api_key"]?.startsWith("DC.")) {
|
||||
modelProperties["api_base"] = "https://api.devchat.ai/v1";
|
||||
}
|
||||
|
||||
modelProperties['model'] = modelName;
|
||||
return modelProperties;
|
||||
};
|
||||
|
||||
if (llmModel === "gpt-3.5-turbo") {
|
||||
if (llmModelT === "gpt-3.5-turbo") {
|
||||
return await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo");
|
||||
}
|
||||
if (llmModel === "gpt-3.5-turbo-16k") {
|
||||
if (llmModelT === "gpt-3.5-turbo-16k") {
|
||||
return await modelProperties('Model.gpt-3-5-16k', "gpt-3.5-turbo-16k");
|
||||
}
|
||||
if (llmModel === "gpt-4") {
|
||||
if (llmModelT === "gpt-4") {
|
||||
return await modelProperties('Model.gpt-4', "gpt-4");
|
||||
}
|
||||
if (llmModel === "claude-2") {
|
||||
if (llmModelT === "claude-2") {
|
||||
return await modelProperties('Model.claude-2', "claude-2");
|
||||
}
|
||||
|
||||
@ -89,7 +93,7 @@ export class ApiKeyManager {
|
||||
if (!model.model) {
|
||||
continue;
|
||||
}
|
||||
if (model.model === llmModel) {
|
||||
if (model.model === llmModelT) {
|
||||
let modelProperties: any = {};
|
||||
for (const key of Object.keys(model || {})) {
|
||||
const property = model![key];
|
||||
@ -111,6 +115,10 @@ export class ApiKeyManager {
|
||||
modelProperties["api_key"] = apiKey;
|
||||
}
|
||||
|
||||
if (!model["api_base"] && modelProperties["api_key"]?.startsWith("DC.")) {
|
||||
modelProperties["api_base"] = "https://api.devchat.ai/v1";
|
||||
}
|
||||
|
||||
modelProperties["provider"] = modelProvider;
|
||||
modelProperties["model"] = modelName;
|
||||
|
||||
@ -137,15 +145,4 @@ export class ApiKeyManager {
|
||||
static async loadApiKeySecret(llmType: string = "Unknow"): Promise<string | undefined> {
|
||||
return await UiUtilWrapper.secretStorageGet(`Access_KEY_${llmType}`);
|
||||
}
|
||||
|
||||
static getEndPoint(apiKey: string | undefined): string | undefined {
|
||||
let endPoint = UiUtilWrapper.getConfiguration('DevChat', 'API_ENDPOINT');
|
||||
if (!endPoint) {
|
||||
endPoint = process.env.OPENAI_API_BASE;
|
||||
}
|
||||
if (!endPoint && apiKey?.startsWith("DC.")) {
|
||||
endPoint = "https://api.devchat.ai/v1";
|
||||
}
|
||||
return endPoint;
|
||||
}
|
||||
}
|
@ -12,12 +12,12 @@ import { ApiKeyManager } from './apiKey';
|
||||
|
||||
async function createOpenAiKeyEnv() {
|
||||
let envs = {...process.env};
|
||||
let openaiApiKey = await ApiKeyManager.getApiKey();
|
||||
if (openaiApiKey) {
|
||||
envs['OPENAI_API_KEY'] = openaiApiKey;
|
||||
}
|
||||
const llmModelData = await ApiKeyManager.llmModel();
|
||||
if (llmModelData && llmModelData.api_key) {
|
||||
envs['OPENAI_API_KEY'] = llmModelData.api_key;
|
||||
}
|
||||
|
||||
const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
|
||||
const openAiApiBase = llmModelData.api_base;
|
||||
if (openAiApiBase) {
|
||||
envs['OPENAI_API_BASE'] = openAiApiBase;
|
||||
}
|
||||
@ -228,11 +228,11 @@ export function runCommandStringAndWriteOutputSync(command: string, outputFile:
|
||||
return JSON.stringify(data);
|
||||
};
|
||||
fs.writeFileSync(outputFile, onOutputFile(command, output));
|
||||
return { exitCode: 0, stdout: output, stderr: '' }
|
||||
return { exitCode: 0, stdout: output, stderr: '' };
|
||||
} catch (error) {
|
||||
logger.channel()?.error(`Error occurred: ${error}`);
|
||||
logger.channel()?.show();
|
||||
return { exitCode: 1, stdout: '', stderr: String(error) }
|
||||
return { exitCode: 1, stdout: '', stderr: String(error) };
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -256,6 +256,9 @@ const chatPanel = observer(() => {
|
||||
</Chip>
|
||||
<Chip size="xs" styles={{ label: chipStyle }} value="gpt-4">
|
||||
GPT-4
|
||||
</Chip>
|
||||
<Chip size="xs" styles={{ label: chipStyle }} value="claude-2">
|
||||
Claude-2
|
||||
</Chip>
|
||||
</Group>
|
||||
</Chip.Group>
|
||||
|
@ -76,6 +76,7 @@ describe('sendMessageBase', () => {
|
||||
text: 'Hello, world!'
|
||||
};
|
||||
const chatResponse: ChatResponse = {
|
||||
"finish_reason": "",
|
||||
response: 'Hello, user!',
|
||||
isError: false,
|
||||
user: 'user',
|
||||
@ -92,6 +93,7 @@ describe('sendMessageBase', () => {
|
||||
it('should handle response text correctly when isError is false', async () => {
|
||||
const partialDataText = 'Partial data';
|
||||
const chatResponse: ChatResponse = {
|
||||
"finish_reason": "",
|
||||
response: 'Hello, user!',
|
||||
isError: false,
|
||||
user: 'user',
|
||||
@ -106,6 +108,7 @@ describe('sendMessageBase', () => {
|
||||
it('should handle response text correctly when isError is true', async () => {
|
||||
const partialDataText = 'Partial data';
|
||||
const chatResponse: ChatResponse = {
|
||||
"finish_reason": "",
|
||||
response: 'Error occurred!',
|
||||
isError: true,
|
||||
user: 'user',
|
||||
@ -132,9 +135,7 @@ describe('sendMessageBase', () => {
|
||||
getConfigurationStub.withArgs('DevChat', 'Access_Key_DevChat').returns(process.env.TEST_DEVCHAT_KEY);
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.temperature').returns(0);
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.stream').returns('true');
|
||||
getConfigurationStub.withArgs('DevChat', 'llmModel').returns('OpenAI');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.tokensPerPrompt').returns(9000);
|
||||
|
||||
|
||||
const result = await sendMessageBase(message, handlePartialData);
|
||||
expect(result).to.be.an('object');
|
||||
expect(result!.command).to.equal('receiveMessage');
|
||||
@ -158,9 +159,7 @@ describe('sendMessageBase', () => {
|
||||
getConfigurationStub.withArgs('DevChat', 'Access_Key_DevChat').returns('sk-KvH7ZCtHmFDCBTqH0jUv');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.temperature').returns('0');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.stream').returns('true');
|
||||
getConfigurationStub.withArgs('DevChat', 'llmModel').returns('OpenAI');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.tokensPerPrompt').returns('9000');
|
||||
|
||||
|
||||
const result = await sendMessageBase(message, handlePartialData);
|
||||
expect(result).to.be.an('object');
|
||||
expect(result!.command).to.equal('receiveMessage');
|
||||
@ -186,9 +185,7 @@ describe('sendMessageBase', () => {
|
||||
getConfigurationStub.withArgs('DevChat', 'Access_Key_DevChat').returns(process.env.TEST_DEVCHAT_KEY);
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.temperature').returns(0);
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.stream').returns('true');
|
||||
getConfigurationStub.withArgs('DevChat', 'llmModel').returns('OpenAI');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.tokensPerPrompt').returns(9000);
|
||||
|
||||
|
||||
|
||||
// Start sendMessageBase in a separate Promise
|
||||
const sendMessagePromise = sendMessageBase(message, handlePartialData);
|
||||
|
@ -39,30 +39,6 @@ describe('ApiKeyManager', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('getEndPoint', () => {
|
||||
it('should return the configuration endpoint', () => {
|
||||
sinon.stub(UiUtilWrapper, 'getConfiguration').returns('https://config-endpoint.com');
|
||||
|
||||
const endPoint = ApiKeyManager.getEndPoint('sk-key');
|
||||
expect(endPoint).to.equal('https://config-endpoint.com');
|
||||
});
|
||||
|
||||
it('should return the environment variable endpoint', () => {
|
||||
sinon.stub(UiUtilWrapper, 'getConfiguration').returns(undefined);
|
||||
process.env.OPENAI_API_BASE = 'https://env-endpoint.com';
|
||||
|
||||
const endPoint = ApiKeyManager.getEndPoint('sk-key');
|
||||
expect(endPoint).to.equal('https://env-endpoint.com');
|
||||
});
|
||||
|
||||
it('should return the default endpoint for DC keys', () => {
|
||||
sinon.stub(UiUtilWrapper, 'getConfiguration').returns(undefined);
|
||||
|
||||
const endPoint = ApiKeyManager.getEndPoint('DC.key');
|
||||
expect(endPoint).to.equal('https://api.devchat.ai/v1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getKeyType', () => {
|
||||
it('should return "sk" for sk keys', () => {
|
||||
const keyType = ApiKeyManager.getKeyType('sk-key');
|
||||
|
Loading…
x
Reference in New Issue
Block a user