diff --git a/.vscode/settings.json b/.vscode/settings.json index 5c5ac48..b0ea598 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -9,5 +9,5 @@ "dist": true // set this to false to include "dist" folder in search results }, // Turn off tsc task auto detection since we have the necessary tasks as npm scripts - "typescript.tsc.autoDetect": "off" + "typescript.tsc.autoDetect": "off" } \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 232910a..a0fe996 100644 --- a/package-lock.json +++ b/package-lock.json @@ -21,6 +21,7 @@ "@tiptap/starter-kit": "^2.0.3", "axios": "^1.3.6", "dotenv": "^16.0.3", + "js-yaml": "^4.1.0", "mobx": "^6.10.0", "mobx-react": "^9.0.0", "mobx-state-tree": "^5.1.8", @@ -34,7 +35,8 @@ "rehype-raw": "^6.1.1", "shell-escape": "^0.2.0", "string-argv": "^0.3.2", - "uuid": "^9.0.0" + "uuid": "^9.0.0", + "yaml": "^2.3.2" }, "devDependencies": { "@babel/core": "^7.21.8", @@ -5882,6 +5884,14 @@ "node": ">=10" } }, + "node_modules/cosmiconfig/node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "engines": { + "node": ">= 6" + } + }, "node_modules/create-require": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", @@ -9030,7 +9040,6 @@ "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, "dependencies": { "argparse": "^2.0.1" }, @@ -14025,11 +14034,11 @@ "dev": true }, "node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz", + "integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==", "engines": { - "node": ">= 6" + "node": ">= 14" } }, "node_modules/yargs": { @@ -18429,6 +18438,13 @@ "parse-json": "^5.0.0", "path-type": "^4.0.0", "yaml": "^1.10.0" + }, + "dependencies": { + "yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==" + } } }, "create-require": { @@ -20759,7 +20775,6 @@ "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, "requires": { "argparse": "^2.0.1" } @@ -24350,9 +24365,9 @@ "dev": true }, "yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==" + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz", + "integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==" }, "yargs": { "version": "16.2.0", diff --git a/package.json b/package.json index 235ca51..9697c34 100644 --- a/package.json +++ b/package.json @@ -70,59 +70,353 @@ "configuration": { "title": "DevChat", "properties": { - "DevChat.llmModel": { - "type": "string", - "default": "OpenAI", - "enum": [ - "OpenAI" + "devchat.Model.gpt-3-5": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "default": "openai", + "enum": [ + "openai" + ], + "description": "[required*] Specify which provider host this llm model" + }, + "api_key": { + "type": "string", + "default": "", + "description": "[required*] Specify access key for selected provider." + }, + "api_base": { + "type": "string", + "default": "", + "description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base." + }, + "temperature": { + "type": "number", + "default": 0.3, + "description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic." + }, + "max_tokens": { + "type": "number", + "default": 1000, + "description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens." + }, + "presence_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics." + }, + "frequency_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim." + } + }, + "required": [ + "provider", + "key" ], - "description": "Select which LLM to use." + "additionalProperties": false, + "order": 0, + "markdownDescription": "Specify the properties for gpt-3.5-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)" }, - "DevChat.maxLogCount": { - "type": "number", - "default": 20, - "description": "Limit the number of prompts in the chat view." + "devchat.Model.gpt-3-5-16k": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "default": "openai", + "enum": [ + "openai" + ], + "description": "[required*] Specify which provider host this llm model" + }, + "api_key": { + "type": "string", + "default": "", + "description": "[required*] Specify access key for selected provider." + }, + "api_base": { + "type": "string", + "default": "", + "description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base." + }, + "temperature": { + "type": "number", + "default": 0.3, + "description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic." + }, + "max_tokens": { + "type": "number", + "default": 1000, + "description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens." + }, + "presence_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics." + }, + "frequency_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim." + } + }, + "required": [ + "provider", + "key" + ], + "additionalProperties": false, + "order": 1, + "markdownDescription": "Specify properties for gpt-3.5-turbo-16k model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) " }, - "DevChat.OpenAI.model": { - "type": "string", - "default": "gpt-3.5-turbo", - "description": "Specify the model ID.", - "when": "DevChat.llmModel == 'OpenAI'" + "devchat.Model.gpt-4": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "default": "openai", + "enum": [ + "openai" + ], + "description": "[required*] Specify which provider host this llm model" + }, + "api_key": { + "type": "string", + "default": "", + "description": "[required*] Specify access key for selected provider." + }, + "api_base": { + "type": "string", + "default": "", + "description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base." + }, + "temperature": { + "type": "number", + "default": 0.3, + "description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic." + }, + "max_tokens": { + "type": "number", + "default": 1000, + "description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens." + }, + "presence_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics." + }, + "frequency_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim." + } + }, + "additionalProperties": false, + "order": 2, + "markdownDescription": "properties for gpt-4 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)" }, - "DevChat.OpenAI.temperature": { - "type": "number", - "default": 0, - "description": "The sampling temperature to use, between 0 and 2. Lower values like 0.2 will make it more focused and deterministic.", - "when": "DevChat.llmModel == 'OpenAI'" + "devchat.Model.claude-2": { + "type": "object", + "properties": { + "provider": { + "type": "string", + "default": "anthropic", + "enum": [ + "anthropic" + ], + "description": "[required*] which provider host this llm model" + }, + "api_key": { + "type": "string", + "default": "", + "description": "[required*] Specify access key for selected provider." + }, + "api_base": { + "type": "string", + "default": "", + "description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base." + }, + "temperature": { + "type": "number", + "default": 0.3, + "description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic." + }, + "max_tokens": { + "type": "number", + "default": 1000, + "description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens." + }, + "presence_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics." + }, + "frequency_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim." + } + }, + "additionalProperties": false, + "order": 3, + "markdownDescription": "properties for claude-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) " }, + "devchat.customModel": { + "type": "array", + "items": { + "type": "object", + "properties": { + "model": { + "oneOf": [ + { + "type": "string", + "enum": [ + "openai/gpt-4", + "openai/gpt-4-0613", + "openai/gpt-4-0314", + "openai/gpt-4-32k", + "openai/gpt-4-32k-0314", + "openai/gpt-4-32k-0613", + "openai/gpt-3.5-turbo", + "openai/gpt-3.5-turbo-0301", + "openai/gpt-3.5-turbo-0613", + "openai/gpt-3.5-turbo-16k", + "openai/gpt-3.5-turbo-16k-0613", + "openai/text-davinci-003", + "openai/curie-001", + "openai/babbage-001", + "openai/ada-001", + "openai/babbage-002", + "openai/davinci-002", + "cohere/command-nightly", + "cohere/command", + "cohere/command-light", + "cohere/command-medium-beta", + "cohere/command-xlarge-beta", + "anthropic/claude-2", + "anthropic/claude-instant-1", + "anthropic/claude-instant-1.2", + "replicate/replicate/", + "replicate/replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781", + "replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52", + "replicate/joehoover/instructblip-vicuna13b:c4c54e3c8c97cd50c2d2fec9be3b6065563ccf7d43787fb99f84151b867178fe", + "replicate/replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5", + "replicate/a16z-infra/llama-2-7b-chat:7b0bfc9aff140d5b75bacbed23e91fd3c34b01a1e958d32132de6e0a19796e2c", + "replicate/replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b", + "replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f", + "replicate/replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad", + "huggingface/meta-llama/Llama-2-7b-hf", + "huggingface/meta-llama/Llama-2-7b-chat-hf", + "huggingface/meta-llama/Llama-2-13b-hf", + "huggingface/meta-llama/Llama-2-13b-chat-hf", + "huggingface/meta-llama/Llama-2-70b-hf", + "huggingface/meta-llama/Llama-2-70b-chat-hf", + "huggingface/meta-llama/Llama-2-7b", + "huggingface/meta-llama/Llama-2-7b-chat", + "huggingface/meta-llama/Llama-2-13b", + "huggingface/meta-llama/Llama-2-13b-chat", + "huggingface/meta-llama/Llama-2-70b", + "huggingface/meta-llama/Llama-2-70b-chat", + "together_ai/togethercomputer/llama-2-70b-chat", + "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", + "together_ai/togethercomputer/llama-2-7b", + "baseten/qvv0xeq", + "baseten/q841o8w", + "baseten/31dxrj3", + "openrouter/google/palm-2-codechat-bison", + "openrouter/google/palm-2-chat-bison", + "openrouter/openai/gpt-3.5-turbo", + "openrouter/openai/gpt-3.5-turbo-16k", + "openrouter/openai/gpt-4-32k", + "openrouter/anthropic/claude-2", + "openrouter/anthropic/claude-instant-v1", + "openrouter/meta-llama/llama-2-13b-chat", + "openrouter/meta-llama/llama-2-70b-chat", + "vertex_ai/chat-bison", + "vertex_ai/chat-bison@001", + "vertex_ai/text-bison", + "vertex_ai/text-bison@001", + "ai21/j2-ultra", + "ai21/j2-mid", + "ai21/j2-light" + ], + "description": "Specify llm model name." + }, + { + "type": "string", + "description": "[required*] Specify llm model name." + } + ] + }, + "api_key": { + "type": "string", + "default": "", + "description": "[required*] Specify access key for selected provider." + }, + "api_base": { + "type": "string", + "default": "", + "description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base." + }, + "temperature": { + "type": "number", + "default": 0.3, + "description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic." + }, + "max_tokens": { + "type": "number", + "default": 1000, + "description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens." + }, + "presence_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics." + }, + "frequency_penalty": { + "type": "number", + "default": 0, + "description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim." + } + }, + "additionalProperties": false + }, + "order": 6, + "markdownDescription": "Specify the custom llm model for DevChat." + }, + "devchat.defaultModel": { + "oneOf": [ + { + "type": "string", + "default": "gpt-3.5-turbo", + "enum": [ + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-4", + "claude-2" + ] + }, + { + "type": "string", + "default": "gpt-3.5-turbo" + } + ], + "order": 7, + "markdownDescription": "Specify the default llm model for DevChat. [Price of each model](https://devchat.ai/pricing)" + }, + "DevChat.OpenAI.stream": { "type": "boolean", "default": true, - "description": "Whether to stream a response.", - "when": "DevChat.llmModel == 'OpenAI'" + "order": 10, + "description": "Whether to stream a response." }, - "DevChat.OpenAI.tokensPerPrompt": { - "type": "number", - "default": 6000, - "description": "The max number of tokens of a prompt.", - "when": "DevChat.llmModel == 'OpenAI'" - }, - "DevChat.Access_Key_DevChat": { - "type": "string", - "default": "", - "description": "DevChat's secret key for accessing multiple LLM models" - }, - "DevChat.Api_Key_OpenAI": { - "type": "string", - "default": "", - "description": "OpenAI's secret key for accessing LLM models. (Leave blank if using DevChat's key.)", - "when": "DevChat.llmModel == 'OpenAI'" - }, - "DevChat.API_ENDPOINT": { - "type": "string", - "default": "", - "description": "API endpoint URL", - "when": "DevChat.llmModel == 'OpenAI'" + "DevChat.EnableFunctionCalling": { + "type": "boolean", + "default": false, + "order": 11, + "description": "Enable function calling for GPT." }, "DevChat.DevChatPath": { "type": "string", @@ -135,8 +429,27 @@ ] } }, + "order": 12, "description": "Where is the devchat binary located?" }, + "DevChat.betaInvitationCode": { + "type": "string", + "default": "", + "order": 13, + "description": "The invitation code for beta testing." + }, + "DevChat.maxLogCount": { + "type": "number", + "default": 20, + "order": 14, + "description": "Limit the number of prompts in the chat view." + }, + "DevChat.askcode.supportedFileTypes": { + "type": "string", + "default": ".+\\.js$, .+\\.ts$, .+\\.jsx$, .+\\.tsx$, .+\\.java$, .+\\.py$, .+\\.go$, .+\\.rb$, .+\\.php$, .+\\.cpp$, .+\\.c$, .+\\.cs$, .+\\.swift$, .+\\.rs$, .+\\.sh$, .+\\.bash$, .+\\.zsh$, .+\\.m$, .+\\.mm$, .+\\.h$, .+\\.hpp$, .+\\.hh$, .+\\.html$, .+\\.htm$, .+\\.xhtml$, .+\\.xml$, .+\\.css$, .+\\.scss$, .+\\.sass$, .+\\.less$, .+\\.json$, .+\\.yaml$, .+\\.yml$, .+\\.toml$, .+\\.ini$, .+\\.md$, .+\\.markdown$, .+\\.txt$, .+\\.csv$, .+\\.sql$, .+\\.sqlite$, .+\\.db$, .+\\.hql$, .+\\.psql$, .+\\.pgsql$, .+\\.plpgsql$", + "order": 15, + "description": "Comma-separated list of regular expressions for supported file types for analysis." + }, "DevChat.PythonPath": { "type": "string", "default": "", @@ -150,26 +463,10 @@ }, "description": "Which Python interpreter to use with DevChat?" }, - "DevChat.EnableFunctionCalling": { - "type": "boolean", - "default": false, - "description": "Enable/Disable function calling for GPT.", - "when": "DevChat.llmModel == 'OpenAI'" - }, "DevChat.PythonVirtualEnv": { "type": "string", "default": "", "description": "Path to the Python virtual environment for AskCode." - }, - "DevChat.askcode.supportedFileTypes": { - "type": "string", - "default": ".+\\.js$, .+\\.ts$, .+\\.jsx$, .+\\.tsx$, .+\\.java$, .+\\.py$, .+\\.go$, .+\\.rb$, .+\\.php$, .+\\.cpp$, .+\\.c$, .+\\.cs$, .+\\.swift$, .+\\.rs$, .+\\.sh$, .+\\.bash$, .+\\.zsh$, .+\\.m$, .+\\.mm$, .+\\.h$, .+\\.hpp$, .+\\.hh$, .+\\.html$, .+\\.htm$, .+\\.xhtml$, .+\\.xml$, .+\\.css$, .+\\.scss$, .+\\.sass$, .+\\.less$, .+\\.json$, .+\\.yaml$, .+\\.yml$, .+\\.toml$, .+\\.ini$, .+\\.md$, .+\\.markdown$, .+\\.txt$, .+\\.csv$, .+\\.sql$, .+\\.sqlite$, .+\\.db$, .+\\.hql$, .+\\.psql$, .+\\.pgsql$, .+\\.plpgsql$", - "description": "Comma-separated list of regular expressions for supported file types for analysis." - }, - "DevChat.betaInvitationCode": { - "type": "string", - "default": "", - "description": "The invitation code for beta testing." } } }, @@ -231,13 +528,73 @@ "title": "Create Entry" }, { - "command": "DevChat.Api_Key_OpenAI", - "title": "Input OpenAI Api Key", + "command": "DevChat.AccessKey.OpenAI", + "title": "Input Access Key for OpenAI", "category": "DevChat" }, { - "command": "DevChat.Access_Key_DevChat", - "title": "Input DevChat Access Key", + "command": "DevChat.AccessKey.Cohere", + "title": "Input Access Key for Cohere", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.Anthropic", + "title": "Input Access Key for Anthropic", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.Replicate", + "title": "Input Access Key for Replicate", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.HuggingFace", + "title": "Input Access Key for HuggingFace", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.TogetherAI", + "title": "Input Access Key for TogetherAI", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.OpenRouter", + "title": "Input Access Key for OpenRouter", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.VertexAI", + "title": "Input Access Key for VertexAI", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.AI21", + "title": "Input Access Key for AI21", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.BaseTen", + "title": "Input Access Key for BaseTen", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.Azure", + "title": "Input Access Key for Azure", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.SageMaker", + "title": "Input Access Key for SageMaker", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.Bedrock", + "title": "Input Access Key for Bedrock", + "category": "DevChat" + }, + { + "command": "DevChat.AccessKey.DevChat", + "title": "Input Access Key for DevChat", "category": "DevChat" }, { @@ -287,6 +644,11 @@ "command": "DevChat.InstallCommands", "title": "Install slash commands", "category": "DevChat" + }, + { + "command": "DevChat.UpdataChatModels", + "title": "Update Chat Models", + "category": "DevChat" } ], "menus": { @@ -484,6 +846,7 @@ "@tiptap/starter-kit": "^2.0.3", "axios": "^1.3.6", "dotenv": "^16.0.3", + "js-yaml": "^4.1.0", "mobx": "^6.10.0", "mobx-react": "^9.0.0", "mobx-state-tree": "^5.1.8", @@ -497,6 +860,7 @@ "rehype-raw": "^6.1.1", "shell-escape": "^0.2.0", "string-argv": "^0.3.2", - "uuid": "^9.0.0" + "uuid": "^9.0.0", + "yaml": "^2.3.2" } -} \ No newline at end of file +} diff --git a/src/context/contextRefDefs.ts b/src/context/contextRefDefs.ts index 7663410..1ae331f 100644 --- a/src/context/contextRefDefs.ts +++ b/src/context/contextRefDefs.ts @@ -1,14 +1,10 @@ -import * as path from 'path'; - -import * as vscode from 'vscode' +import * as vscode from 'vscode'; import { ChatContext } from './contextManager'; -import { createTempSubdirectory, git_ls_tree, runCommandStringAndWriteOutput } from '../util/commonUtil'; import { logger } from '../util/logger'; import { handleCodeSelected } from './contextCodeSelected'; import DevChat, { ChatOptions } from '../toolwrapper/devchat'; -import { number } from 'mobx-state-tree/dist/internal'; async function getCurrentSelectText(activeEditor: vscode.TextEditor): Promise { @@ -183,7 +179,7 @@ async function getSymbolDefine(symbolList: string[], activeEditor: vscode.TextEd for (const child of symbol.children) { visitFun(child); } - } + }; for (const symbol of symbolsT) { visitFun(symbol); } diff --git a/src/contributes/commands.ts b/src/contributes/commands.ts index 21b5cbe..bffa23c 100644 --- a/src/contributes/commands.ts +++ b/src/contributes/commands.ts @@ -21,7 +21,7 @@ import { FT } from '../util/feature_flags/feature_toggles'; import { getPackageVersion } from '../util/python_installer/pip_package_version'; import { exec } from 'child_process'; -import { sendCommandListByDevChatRun } from '../handler/regCommandList'; +import { sendCommandListByDevChatRun, updateChatModels } from '../handler/regCommandList'; import DevChat from "../toolwrapper/devchat"; let indexProcess: CommandRun | null = null; @@ -82,42 +82,39 @@ function registerAskForFileCommand(context: vscode.ExtensionContext) { context.subscriptions.push(vscode.commands.registerCommand('devchat.askForFile_chinese', callback)); } -export function registerOpenAiApiKeySettingCommand(context: vscode.ExtensionContext) { - const secretStorage: vscode.SecretStorage = context.secrets; +function regAccessKeyCommand(context: vscode.ExtensionContext, provider: string) { context.subscriptions.push( - vscode.commands.registerCommand('DevChat.Api_Key_OpenAI', async () => { + vscode.commands.registerCommand(`DevChat.AccessKey.${provider}`, async () => { const passwordInput: string = await vscode.window.showInputBox({ password: true, - title: "Input OpenAi Api Key", - placeHolder: "Set OpenAI Api Key.(Leave blank if clearing stored key.)" + title: `Input ${provider} Access Key`, + placeHolder: `Set ${provider} Access Key.(Leave blank if clearing stored key.)` }) ?? ''; if (passwordInput.trim() !== "" && !isValidApiKey(passwordInput)) { UiUtilWrapper.showErrorMessage("Your api key is invalid!"); return ; } - ApiKeyManager.writeApiKeySecret(passwordInput, "OpenAI"); + await ApiKeyManager.writeApiKeySecret(passwordInput, provider); }) ); } -export function registerDevChatApiKeySettingCommand(context: vscode.ExtensionContext) { - const secretStorage: vscode.SecretStorage = context.secrets; - context.subscriptions.push( - vscode.commands.registerCommand('DevChat.Access_Key_DevChat', async () => { - const passwordInput: string = await vscode.window.showInputBox({ - password: true, - title: "Input DevChat Access Key", - placeHolder: "Set DevChat Access Key.(Leave blank if clearing stored key.)" - }) ?? ''; - - if (passwordInput.trim() !== "" && !isValidApiKey(passwordInput)) { - UiUtilWrapper.showErrorMessage("Your access key is invalid!"); - return ; - } - ApiKeyManager.writeApiKeySecret(passwordInput, "DevChat"); - }) - ); +export function registerAccessKeySettingCommand(context: vscode.ExtensionContext) { + regAccessKeyCommand(context, "OpenAI"); + regAccessKeyCommand(context, "Cohere"); + regAccessKeyCommand(context, "Anthropic"); + regAccessKeyCommand(context, "Replicate"); + regAccessKeyCommand(context, "HuggingFace"); + regAccessKeyCommand(context, "TogetherAI"); + regAccessKeyCommand(context, "OpenRouter"); + regAccessKeyCommand(context, "VertexAI"); + regAccessKeyCommand(context, "AI21"); + regAccessKeyCommand(context, "BaseTen"); + regAccessKeyCommand(context, "Azure"); + regAccessKeyCommand(context, "SageMaker"); + regAccessKeyCommand(context, "Bedrock"); + regAccessKeyCommand(context, "DevChat"); } export function registerStatusBarItemClickCommand(context: vscode.ExtensionContext) { @@ -142,7 +139,7 @@ const topicDeleteCallback = async (item: TopicTreeItem) => { TopicManager.getInstance().deleteTopic(item.id); } }; -; + export function regTopicDeleteCommand(context: vscode.ExtensionContext) { context.subscriptions.push( @@ -316,7 +313,16 @@ async function installAskCode(supportedFileTypes, progressBar: any, callback: Fu async function indexCode(pythonVirtualEnv, supportedFileTypes, progressBar: any) { let envs = {}; - let openaiApiKey = await ApiKeyManager.getApiKey(); + const llmModelData = await ApiKeyManager.llmModel(); + if (!llmModelData) { + logger.channel()?.error('No valid llm model is selected!'); + logger.channel()?.show(); + + progressBar.endWithError("No valid llm model is selected!"); + return; + } + + let openaiApiKey = llmModelData.api_key; if (!openaiApiKey) { logger.channel()?.error('The OpenAI key is invalid!'); logger.channel()?.show(); @@ -326,7 +332,7 @@ async function indexCode(pythonVirtualEnv, supportedFileTypes, progressBar: any) } envs['OPENAI_API_KEY'] = openaiApiKey; - const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey); + const openAiApiBase = llmModelData.api_base; if (openAiApiBase) { envs['OPENAI_API_BASE'] = openAiApiBase; } @@ -443,17 +449,26 @@ export function registerAskCodeSummaryIndexStartCommand(context: vscode.Extensio async function indexCodeSummary(pythonVirtualEnv, supportedFileTypes, progressBar: any) { let envs = {}; - let openaiApiKey = await ApiKeyManager.getApiKey(); + const llmModelData = await ApiKeyManager.llmModel(); + if (!llmModelData) { + logger.channel()?.error('No valid llm model is selected!'); + logger.channel()?.show(); + + progressBar.endWithError("No valid llm model is selected!"); + return; + } + + let openaiApiKey = llmModelData.api_key; if (!openaiApiKey) { logger.channel()?.error('The OpenAI key is invalid!'); logger.channel()?.show(); - progressBar.endWithError("The OpenAI key is invalid!"); + progressBar.endWithError("The OpenAI key is invalid!"); return; } envs['OPENAI_API_KEY'] = openaiApiKey; - const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey); + const openAiApiBase = llmModelData.api_base; if (openAiApiBase) { envs['OPENAI_API_BASE'] = openAiApiBase; } @@ -524,6 +539,14 @@ export function registerInstallCommandsCommand(context: vscode.ExtensionContext) context.subscriptions.push(disposable); } +export function registerUpdateChatModelsCommand(context: vscode.ExtensionContext) { + let disposable = vscode.commands.registerCommand('DevChat.UpdataChatModels', async () => { + updateChatModels(); + }); + + context.subscriptions.push(disposable); +} + export async function addSummaryContextFun(fsPath: string ) { if (!FT("ask-code-summary")) { UiUtilWrapper.showErrorMessage("This command is a beta version command and has not been released yet."); diff --git a/src/contributes/commandsBase.ts b/src/contributes/commandsBase.ts index 8ee6554..2484cac 100644 --- a/src/contributes/commandsBase.ts +++ b/src/contributes/commandsBase.ts @@ -14,7 +14,7 @@ export function checkDevChatDependency(showError: boolean = true): boolean { try { // Check if DevChat is installed - const expectVersion = 'DevChat 0.2.3'; + const expectVersion = 'DevChat 0.2.4'; const devchatVersion = runCommand(`"${devChat}" --version`).toString().trim(); if (devchatVersion < expectVersion) { logger.channel()?.info(`devchat version: ${devchatVersion}, but expect version: ${expectVersion}`); diff --git a/src/extension.ts b/src/extension.ts index c531841..cfef187 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -5,8 +5,7 @@ import { registerAddContextCommand, registerAskForCodeCommand, registerAskForFileCommand, - registerOpenAiApiKeySettingCommand, - registerDevChatApiKeySettingCommand, + registerAccessKeySettingCommand, regTopicDeleteCommand, regAddTopicCommand, regDeleteSelectTopicCommand, @@ -21,6 +20,7 @@ import { registerAskCodeSummaryIndexStopCommand, registerAddSummaryContextCommand, registerInstallCommandsCommand, + registerUpdateChatModelsCommand, } from './contributes/commands'; import { regLanguageContext } from './contributes/context'; import { regDevChatView, regTopicView } from './contributes/views'; @@ -33,6 +33,47 @@ import { UiUtilWrapper } from './util/uiUtil'; import { UiUtilVscode } from './util/uiUtil_vscode'; import { FT } from './util/feature_flags/feature_toggles'; +async function configUpdateTo_0912() { + const devchatKey = UiUtilWrapper.getConfiguration('DevChat', 'Access_Key_DevChat'); + const openaiKey = UiUtilWrapper.getConfiguration('DevChat', 'Api_Key_OpenAI'); + const endpointKey = UiUtilWrapper.getConfiguration('DevChat', 'API_ENDPOINT'); + + let modelConfigNew = {}; + if (openaiKey) { + modelConfigNew["api_key"] = openaiKey; + modelConfigNew["provider"] = "openai"; + } else if (devchatKey) { + modelConfigNew["api_key"] = devchatKey; + modelConfigNew["provider"] = "openai"; + } + + if (endpointKey) { + modelConfigNew["api_base"] = endpointKey; + } + + const modelConfig1: any = UiUtilWrapper.getConfiguration("devchat", "Model.gpt-3-5"); + const modelConfig2: any = UiUtilWrapper.getConfiguration("devchat", "Model.gpt-3-5-16k"); + const modelConfig3: any = UiUtilWrapper.getConfiguration("devchat", "Model.gpt-4"); + //if (!modelConfig1 && !modelConfig2 && !modelConfig3 && Object.keys(modelConfigNew).length > 0) { + if (Object.keys(modelConfig1).length === 0 && + Object.keys(modelConfig2).length === 0 && + Object.keys(modelConfig3).length === 0) { + // config default gpt models + if (Object.keys(modelConfigNew).length === 0) { + modelConfigNew["api_key"] = "DC."; + modelConfigNew["provider"] = "openai"; + } + + try { + vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5", modelConfigNew, vscode.ConfigurationTarget.Global); + vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5-16k", modelConfigNew, vscode.ConfigurationTarget.Global); + vscode.workspace.getConfiguration("devchat").update("Model.gpt-4", modelConfigNew, vscode.ConfigurationTarget.Global); + } catch(error) { + return; + } + } +} + function activate(context: vscode.ExtensionContext) { ExtensionContextHolder.context = context; @@ -40,13 +81,14 @@ function activate(context: vscode.ExtensionContext) { logger.init(LoggerChannelVscode.getInstance()); UiUtilWrapper.init(new UiUtilVscode()); + configUpdateTo_0912(); + regLanguageContext(); regDevChatView(context); regTopicView(context); - registerOpenAiApiKeySettingCommand(context); - registerDevChatApiKeySettingCommand(context); + registerAccessKeySettingCommand(context); registerOpenChatPanelCommand(context); registerAddContextCommand(context); registerAskForCodeCommand(context); @@ -54,6 +96,7 @@ function activate(context: vscode.ExtensionContext) { registerStatusBarItemClickCommand(context); registerInstallCommandsCommand(context); + registerUpdateChatModelsCommand(context); createStatusBarItem(context); if (FT("ask-code")) { diff --git a/src/handler/historyMessagesBase.ts b/src/handler/historyMessagesBase.ts index 1f7af4a..a9834fb 100644 --- a/src/handler/historyMessagesBase.ts +++ b/src/handler/historyMessagesBase.ts @@ -178,15 +178,7 @@ export async function historyMessagesBase(): Promise { - if (!isValidApiKey(apiKey)) { - return { command: 'receiveMessage', text: 'Your API key is invalid. We support OpenAI and DevChat keys. Please reset the key.', hash: '', user: 'system', date: '', isError: false }; - } - - isApiSet = true; - ApiKeyManager.writeApiKeySecret(apiKey); - - const welcomeMessageText = welcomeMessage().response; return { - command: 'receiveMessage', text: `Your OPENAI_API_KEY is set. Enjoy DevChat!\n${welcomeMessageText}`, hash: '', user: 'system', date: '', isError: false + command: 'receiveMessage', text: `You need config access key for specified llmodel in setting view.`, hash: '', user: 'system', date: '', isError: false }; } \ No newline at end of file diff --git a/src/handler/loadHandlers.ts b/src/handler/loadHandlers.ts index 4f40126..b754ad5 100644 --- a/src/handler/loadHandlers.ts +++ b/src/handler/loadHandlers.ts @@ -19,6 +19,7 @@ import { doCommand } from './doCommand'; import { getSetting, updateSetting } from './updateConfig'; import { featureToggle, featureToggles } from './featureToggle'; import { getUserAccessKey } from './userAccessKey'; +import { regModelList } from './regValidModelList'; // According to the context menu selected by the user, add the corresponding context file @@ -92,3 +93,5 @@ messageHandler.registerHandler('featureToggle', featureToggle); messageHandler.registerHandler('featureToggles', featureToggles); messageHandler.registerHandler('getUserAccessKey', getUserAccessKey); + +messageHandler.registerHandler('regModelList', regModelList); diff --git a/src/handler/regCommandList.ts b/src/handler/regCommandList.ts index e59f8a9..43190a4 100644 --- a/src/handler/regCommandList.ts +++ b/src/handler/regCommandList.ts @@ -2,6 +2,7 @@ import * as vscode from 'vscode'; import CommandManager from '../command/commandManager'; import { MessageHandler } from './messageHandler'; import { regInMessage, regOutMessage } from '../util/reg_messages'; +import { getValidModels } from './regValidModelList'; regInMessage({command: 'regCommandList'}); @@ -48,3 +49,8 @@ export async function sendCommandListByDevChatRun() { } } +export async function updateChatModels() { + const modelList = await getValidModels(); + MessageHandler.sendMessage(existPannel!, { command: 'regModelList', result: modelList }); +} + diff --git a/src/handler/regValidModelList.ts b/src/handler/regValidModelList.ts new file mode 100644 index 0000000..61f949c --- /dev/null +++ b/src/handler/regValidModelList.ts @@ -0,0 +1,98 @@ +import * as vscode from 'vscode'; +import ChatContextManager from '../context/contextManager'; +import { MessageHandler } from './messageHandler'; +import { regInMessage, regOutMessage } from '../util/reg_messages'; +import { ApiKeyManager } from '../util/apiKey'; +import { UiUtilWrapper } from '../util/uiUtil'; + + +export async function getValidModels(): Promise { + const modelProperties = async (modelPropertyName: string, modelName: string) => { + const modelConfig = UiUtilWrapper.getConfiguration("devchat", modelPropertyName); + if (!modelConfig) { + return undefined; + } + + let modelProperties: any = {}; + for (const key of Object.keys(modelConfig || {})) { + const property = modelConfig![key]; + modelProperties[key] = property; + } + if (!modelConfig["provider"]) { + return undefined; + } + if (!modelConfig["api_key"]) { + const providerName = ApiKeyManager.toProviderKey(modelConfig["provider"]); + if (!providerName) { + return undefined; + } + const apiKey = await ApiKeyManager.loadApiKeySecret(providerName); + if (!apiKey) { + return undefined; + } + modelProperties["api_key"] = apiKey; + } + + modelProperties['model'] = modelName; + return modelProperties; + }; + + let modelList : string[] = []; + const openaiModel = await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo"); + if (openaiModel) { + modelList.push(openaiModel.model); + } + const openaiModel2 = await modelProperties('Model.gpt-3-5-16k', "gpt-3.5-turbo-16k"); + if (openaiModel2) { + modelList.push(openaiModel2.model); + } + const openaiModel3 = await modelProperties('Model.gpt-4', "gpt-4"); + if (openaiModel3) { + modelList.push(openaiModel3.model); + } + const claudeModel = await modelProperties('Model.claude-2', "claude-2"); + if (claudeModel) { + modelList.push(claudeModel.model); + } + + const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel'); + if (!customModelConfig) { + return modelList; + } + + const customModels = customModelConfig as Array; + for (const model of customModels) { + if (!model.model) { + continue; + } + + const modelProvider = model["model"].split('/')[0]; + const modelName = model["model"].split('/').slice(1).join('/'); + + if (!model["api_key"]) { + const providerName = ApiKeyManager.toProviderKey(modelProvider); + if (!providerName) { + continue; + } + const apiKey = await ApiKeyManager.loadApiKeySecret(providerName); + if (!apiKey) { + continue; + } + } + + modelList.push(model["model"]); + } + + return modelList; +} + +regInMessage({command: 'regModelList'}); +regOutMessage({command: 'regModelList', result: [{name: ''}]}); +export async function regModelList(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise { + const modelList = await getValidModels(); + + MessageHandler.sendMessage(panel, { command: 'regModelList', result: modelList }); + return; +} + + diff --git a/src/handler/sendMessage.ts b/src/handler/sendMessage.ts index 35522d5..4cd1ad8 100644 --- a/src/handler/sendMessage.ts +++ b/src/handler/sendMessage.ts @@ -57,7 +57,14 @@ export async function askCode(message: any, panel: vscode.WebviewPanel|vscode.We let envs = {}; - let openaiApiKey = await ApiKeyManager.getApiKey(); + const llmModelData = await ApiKeyManager.llmModel(); + if (!llmModelData) { + logger.channel()?.error('No valid llm model is selected!'); + logger.channel()?.show(); + return; + } + + let openaiApiKey = llmModelData.api_key; if (!openaiApiKey) { logger.channel()?.error('The OpenAI key is invalid!'); logger.channel()?.show(); @@ -65,7 +72,7 @@ export async function askCode(message: any, panel: vscode.WebviewPanel|vscode.We } envs['OPENAI_API_KEY'] = openaiApiKey; - const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey); + const openAiApiBase = llmModelData.api_base; if (openAiApiBase) { envs['OPENAI_API_BASE'] = openAiApiBase; } diff --git a/src/handler/userAccessKey.ts b/src/handler/userAccessKey.ts index d89b588..57a6020 100644 --- a/src/handler/userAccessKey.ts +++ b/src/handler/userAccessKey.ts @@ -12,22 +12,20 @@ regInMessage({command: 'getUserAccessKey'}); regOutMessage({command: 'getUserAccessKey', accessKey: "DC.xxx", keyType: "DevChat", endPoint: "https://xxx"}); export async function getUserAccessKey(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise { const workspaceDir = UiUtilWrapper.workspaceFoldersFirstPath(); - let openaiApiKey = await ApiKeyManager.getApiKey(); - if (!openaiApiKey) { + const llmModelData = await ApiKeyManager.llmModel(); + if (!llmModelData || llmModelData.api_key) { MessageHandler.sendMessage(panel, {"command": "getUserAccessKey", "accessKey": "", "keyType": "", "endPoint": ""}); return; } - let keyType = ApiKeyManager.getKeyType(openaiApiKey!); - if (keyType === "DC") { + let keyType: string = "others"; + if (llmModelData.api_key?.startsWith("DC.")) { keyType = "DevChat"; - } else if (keyType === "sk") { - keyType = "OpenAI"; } - let openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey); + let openAiApiBase = llmModelData.api_base; if (!openAiApiBase) { openAiApiBase = ""; } - MessageHandler.sendMessage(panel, {"command": "getUserAccessKey", "accessKey": openaiApiKey, "keyType": keyType, "endPoint": openAiApiBase}); + MessageHandler.sendMessage(panel, {"command": "getUserAccessKey", "accessKey": llmModelData.api_key, "keyType": keyType, "endPoint": openAiApiBase}); } \ No newline at end of file diff --git a/src/panel/statusBarView.ts b/src/panel/statusBarView.ts index 6780a88..3937ee4 100644 --- a/src/panel/statusBarView.ts +++ b/src/panel/statusBarView.ts @@ -42,7 +42,7 @@ export function createStatusBarItem(context: vscode.ExtensionContext): vscode.St if (apiKeyStatus !== 'has valid access key') { statusBarItem.text = `$(warning)DevChat`; statusBarItem.tooltip = `${apiKeyStatus}`; - statusBarItem.command = 'DevChat.Access_Key_DevChat'; + statusBarItem.command = 'DevChat.AccessKey.DevChat'; progressBar.update(`Checking devchat dependency environment: ${apiKeyStatus}.`, 0); return; } diff --git a/src/toolwrapper/devchat.ts b/src/toolwrapper/devchat.ts index 94e1229..d2196a6 100644 --- a/src/toolwrapper/devchat.ts +++ b/src/toolwrapper/devchat.ts @@ -9,7 +9,7 @@ import ExtensionContextHolder from '../util/extensionContext'; import { UiUtilWrapper } from '../util/uiUtil'; import { ApiKeyManager } from '../util/apiKey'; import { exitCode } from 'process'; - +import * as yaml from 'yaml'; const envPath = path.join(__dirname, '..', '.env'); @@ -120,9 +120,9 @@ class DevChat { args.push("-p", options.parent); } - const llmModel = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.model'); - if (llmModel) { - args.push("-m", llmModel); + const llmModelData = await ApiKeyManager.llmModel(); + if (llmModelData && llmModelData.model) { + args.push("-m", llmModelData.model); } return args; @@ -192,14 +192,19 @@ class DevChat { }; } - apiEndpoint(apiKey: string | undefined): any { - const openAiApiBase = ApiKeyManager.getEndPoint(apiKey); - - const openAiApiBaseObject = openAiApiBase ? { OPENAI_API_BASE: openAiApiBase } : {}; - return openAiApiBaseObject; - } - async chat(content: string, options: ChatOptions = {}, onData: (data: ChatResponse) => void): Promise { + const llmModelData = await ApiKeyManager.llmModel(); + if (!llmModelData) { + return { + "prompt-hash": "", + user: "", + date: "", + response: `Error: no valid llm model is selected!`, + finish_reason: "", + isError: true, + }; + } + const args = await this.buildArgs(options); args.push("--"); args.push(content); @@ -211,36 +216,41 @@ class DevChat { logger.channel()?.show(); } - - // 如果配置了devchat的TOKEN,那么就需要使用默认的代理 - let openAiApiBaseObject = this.apiEndpoint(openaiApiKey); - - const openaiModel = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.model'); - const openaiTemperature = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.temperature'); const openaiStream = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.stream'); - const llmModel = UiUtilWrapper.getConfiguration('DevChat', 'llmModel'); - const tokensPerPrompt = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.tokensPerPrompt'); + + const openAiApiBaseObject = llmModelData.api_base? { OPENAI_API_BASE: llmModelData.api_base } : {}; + const activeLlmModelKey = llmModelData.api_key; let devChat: string | undefined = UiUtilWrapper.getConfiguration('DevChat', 'DevChatPath'); if (!devChat) { devChat = 'devchat'; } - const devchatConfig = { - model: openaiModel, - provider: llmModel, - "tokens-per-prompt": tokensPerPrompt, - OpenAI: { - temperature: openaiTemperature, - stream: openaiStream, - } + const reduceModelData = Object.keys(llmModelData) + .filter(key => key !== 'api_key' && key !== 'provider' && key !== 'model' && key !== 'api_base') + .reduce((obj, key) => { + obj[key] = llmModelData[key]; + return obj; + }, {}); + let devchatConfig = {}; + devchatConfig[llmModelData.model] = { + "provider": llmModelData.provider, + "stream": openaiStream, + ...reduceModelData }; + + let devchatModels = { + "default_model": llmModelData.model, + "models": devchatConfig}; // write to config file - const configPath = path.join(workspaceDir!, '.chat', 'config.json'); + const os = process.platform; + const userHome = os === 'win32' ? fs.realpathSync(process.env.USERPROFILE || '') : process.env.HOME; + + const configPath = path.join(userHome!, '.chat', 'config.yml'); // write devchatConfig to configPath - const configJson = JSON.stringify(devchatConfig, null, 2); - fs.writeFileSync(configPath, configJson); + const yamlString = yaml.stringify(devchatModels); + fs.writeFileSync(configPath, yamlString); try { @@ -257,7 +267,7 @@ class DevChat { env: { PYTHONUTF8:1, ...process.env, - OPENAI_API_KEY: openaiApiKey, + OPENAI_API_KEY: activeLlmModelKey, ...openAiApiBaseObject }, }; diff --git a/src/util/apiKey.ts b/src/util/apiKey.ts index 4a95592..e7fc685 100644 --- a/src/util/apiKey.ts +++ b/src/util/apiKey.ts @@ -3,30 +3,130 @@ import { UiUtilWrapper } from './uiUtil'; export class ApiKeyManager { + static toProviderKey(provider: string) : string | undefined { + let providerNameMap = { + "openai": "OpenAI", + "cohere": "Cohere", + "anthropic": "Anthropic", + "replicate": "Replicate", + "huggingface": "HuggingFace", + "together_ai": "TogetherAI", + "openrouter": "OpenRouter", + "vertex_ai": "VertexAI", + "ai21": "AI21", + "baseten": "Baseten", + "azure": "Azure", + "sagemaker": "SageMaker", + "bedrock": "Bedrock" + }; + return providerNameMap[provider]; + } static async getApiKey(llmType: string = "OpenAI"): Promise { - let apiKey: string|undefined = undefined; - - if (llmType === "OpenAI") { - apiKey = await UiUtilWrapper.secretStorageGet("openai_OPENAI_API_KEY"); - } - if (!apiKey) { - apiKey = await UiUtilWrapper.secretStorageGet("devchat_OPENAI_API_KEY"); + const llmModelT = await this.llmModel(); + if (!llmModelT) { + return undefined; } - - if (!apiKey) { - if (llmType === "OpenAI") { - apiKey = UiUtilWrapper.getConfiguration('DevChat', 'Api_Key_OpenAI'); + + return llmModelT.api_key; + } + + static async llmModel() { + const llmModelT = UiUtilWrapper.getConfiguration('devchat', 'defaultModel'); + if (!llmModelT) { + return undefined; + } + + const modelProperties = async (modelPropertyName: string, modelName: string) => { + const modelConfig = UiUtilWrapper.getConfiguration("devchat", modelPropertyName); + if (!modelConfig) { + return undefined; } - if (!apiKey) { - apiKey = UiUtilWrapper.getConfiguration('DevChat', 'Access_Key_DevChat'); + + let modelProperties: any = {}; + for (const key of Object.keys(modelConfig || {})) { + const property = modelConfig![key]; + modelProperties[key] = property; + } + if (!modelConfig["provider"]) { + return undefined; + } + if (!modelConfig["api_key"]) { + const providerName = this.toProviderKey(modelConfig["provider"]); + if (!providerName) { + return undefined; + } + const apiKey = await this.loadApiKeySecret(providerName); + if (!apiKey) { + return undefined; + } + modelProperties["api_key"] = apiKey; + } + + if (!modelConfig["api_base"] && modelProperties["api_key"]?.startsWith("DC.")) { + modelProperties["api_base"] = "https://api.devchat.ai/v1"; + } + + modelProperties['model'] = modelName; + return modelProperties; + }; + + if (llmModelT === "gpt-3.5-turbo") { + return await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo"); + } + if (llmModelT === "gpt-3.5-turbo-16k") { + return await modelProperties('Model.gpt-3-5-16k', "gpt-3.5-turbo-16k"); + } + if (llmModelT === "gpt-4") { + return await modelProperties('Model.gpt-4', "gpt-4"); + } + if (llmModelT === "claude-2") { + return await modelProperties('Model.claude-2', "claude-2"); + } + + const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel'); + if (!customModelConfig) { + return undefined; + } + + const customModels = customModelConfig as Array; + for (const model of customModels) { + if (!model.model) { + continue; + } + if (model.model === llmModelT) { + let modelProperties: any = {}; + for (const key of Object.keys(model || {})) { + const property = model![key]; + modelProperties[key] = property; + } + + const modelProvider = model["model"].split('/')[0]; + const modelName = model["model"].split('/').slice(1).join('/'); + + if (!model["api_key"]) { + const providerName = this.toProviderKey(modelProvider); + if (!providerName) { + return undefined; + } + const apiKey = await this.loadApiKeySecret(providerName); + if (!apiKey) { + return undefined; + } + modelProperties["api_key"] = apiKey; + } + + if (!model["api_base"] && modelProperties["api_key"]?.startsWith("DC.")) { + modelProperties["api_base"] = "https://api.devchat.ai/v1"; + } + + modelProperties["provider"] = modelProvider; + modelProperties["model"] = modelName; + + return modelProperties; } } - if (!apiKey) { - if (llmType === "OpenAI") { - apiKey = process.env.OPENAI_API_KEY; - } - } - return apiKey; + + return undefined; } static getKeyType(apiKey: string): string | undefined { @@ -40,27 +140,9 @@ export class ApiKeyManager { } static async writeApiKeySecret(apiKey: string, llmType: string = "Unknow"): Promise { - if (apiKey.startsWith("sk-")) { - await UiUtilWrapper.storeSecret("openai_OPENAI_API_KEY", apiKey); - } else if (apiKey.startsWith("DC.")) { - await UiUtilWrapper.storeSecret("devchat_OPENAI_API_KEY", apiKey); - } else { - if (llmType === "OpenAI") { - await UiUtilWrapper.storeSecret("openai_OPENAI_API_KEY", apiKey); - } else if (llmType === "DevChat") { - await UiUtilWrapper.storeSecret("devchat_OPENAI_API_KEY", apiKey); - } - } + await UiUtilWrapper.storeSecret(`Access_KEY_${llmType}`, apiKey); } - - static getEndPoint(apiKey: string | undefined): string | undefined { - let endPoint = UiUtilWrapper.getConfiguration('DevChat', 'API_ENDPOINT'); - if (!endPoint) { - endPoint = process.env.OPENAI_API_BASE; - } - if (!endPoint && apiKey?.startsWith("DC.")) { - endPoint = "https://api.devchat.ai/v1"; - } - return endPoint; + static async loadApiKeySecret(llmType: string = "Unknow"): Promise { + return await UiUtilWrapper.secretStorageGet(`Access_KEY_${llmType}`); } } \ No newline at end of file diff --git a/src/util/commonUtil.ts b/src/util/commonUtil.ts index 39b2c43..bd95ac7 100644 --- a/src/util/commonUtil.ts +++ b/src/util/commonUtil.ts @@ -12,12 +12,12 @@ import { ApiKeyManager } from './apiKey'; async function createOpenAiKeyEnv() { let envs = {...process.env}; - let openaiApiKey = await ApiKeyManager.getApiKey(); - if (openaiApiKey) { - envs['OPENAI_API_KEY'] = openaiApiKey; - } + const llmModelData = await ApiKeyManager.llmModel(); + if (llmModelData && llmModelData.api_key) { + envs['OPENAI_API_KEY'] = llmModelData.api_key; + } - const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey); + const openAiApiBase = llmModelData.api_base; if (openAiApiBase) { envs['OPENAI_API_BASE'] = openAiApiBase; } @@ -228,11 +228,11 @@ export function runCommandStringAndWriteOutputSync(command: string, outputFile: return JSON.stringify(data); }; fs.writeFileSync(outputFile, onOutputFile(command, output)); - return { exitCode: 0, stdout: output, stderr: '' } + return { exitCode: 0, stdout: output, stderr: '' }; } catch (error) { logger.channel()?.error(`Error occurred: ${error}`); logger.channel()?.show(); - return { exitCode: 1, stdout: '', stderr: String(error) } + return { exitCode: 1, stdout: '', stderr: String(error) }; } } diff --git a/src/views/components/CurrentMessage/index.tsx b/src/views/components/CurrentMessage/index.tsx index 67c440d..fbe0991 100644 --- a/src/views/components/CurrentMessage/index.tsx +++ b/src/views/components/CurrentMessage/index.tsx @@ -1,7 +1,7 @@ import React, { useEffect } from "react"; import { keyframes } from "@emotion/react"; -import { Container, Text } from "@mantine/core"; +import { Box, Container, Text } from "@mantine/core"; import MessageBody from "@/views/components/MessageBody"; import { observer } from "mobx-react-lite"; import { useMst } from "@/views/stores/RootStore"; @@ -43,7 +43,7 @@ const getBlocks = (message) => { blocks.push(unmatchedText); return blocks; -} +}; const CurrentMessage = observer((props: any) => { const { width } = props; @@ -78,10 +78,9 @@ const CurrentMessage = observer((props: any) => { }, [hasDone]); return generating - ? { }}> - + : <>; }); diff --git a/src/views/components/Header/index.tsx b/src/views/components/Header/index.tsx index f992f00..c54925a 100644 --- a/src/views/components/Header/index.tsx +++ b/src/views/components/Header/index.tsx @@ -1,12 +1,19 @@ import React from "react"; -import { Header, Avatar, Flex, Text, ActionIcon } from "@mantine/core"; +import { Header, Avatar, Flex, Text, ActionIcon, createStyles } from "@mantine/core"; import BalanceTip from "@/views/components/BalanceTip"; import { IconSettings } from "@tabler/icons-react"; // @ts-ignore import SvgAvatarDevChat from "../MessageAvatar/avatar_devchat.svg"; import messageUtil from "@/util/MessageUtil"; +const useStyles = createStyles((theme) => ({ + logoName:{ + color: 'var(--vscode-foreground)' + } +})); + export default function Head() { + const {classes} = useStyles(); const openSetting = () => { messageUtil.sendMessage({ command: "doCommand", @@ -18,8 +25,7 @@ export default function Head() { height={40} style={{ backgroundColor: "var(--vscode-sideBar-background)", - // borderBottom: "1px solid var(--vscode-disabledForeground)", - boxShadow: "0 0px 3px var(--vscode-widget-shadow)", + borderBottom: '1px solid #ced4da', }} > @@ -32,7 +38,7 @@ export default function Head() { }} > - DevChat + DevChat
diff --git a/src/views/components/InputMessage/index.tsx b/src/views/components/InputMessage/index.tsx index f7c624f..f8e22f7 100644 --- a/src/views/components/InputMessage/index.tsx +++ b/src/views/components/InputMessage/index.tsx @@ -1,8 +1,8 @@ -import { useMantineTheme, Flex, Stack, Accordion, Box, ActionIcon, ScrollArea, Center, Popover, Textarea, Text, Divider, Indicator, HoverCard, Drawer } from "@mantine/core"; -import { useDisclosure, useListState, useResizeObserver, useTimeout } from "@mantine/hooks"; -import { IconGitBranch, IconBook, IconX, IconSquareRoundedPlus, IconSend, IconPaperclip, IconChevronDown } from "@tabler/icons-react"; +import { useMantineTheme, Flex, Stack, ActionIcon, ScrollArea, Popover, Textarea, Text, Indicator, Drawer, Group, Button, Menu,createStyles } from "@mantine/core"; +import { useDisclosure, useResizeObserver } from "@mantine/hooks"; +import { IconGitBranch, IconSend, IconPaperclip, IconChevronDown, IconTextPlus, IconRobot } from "@tabler/icons-react"; import React, { useState, useEffect } from "react"; -import { IconGitBranchChecked, IconShellCommand, IconMouseRightClick } from "@/views/components/ChatIcons"; +import { IconGitBranchChecked, IconShellCommand } from "@/views/components/ChatIcons"; import messageUtil from '@/util/MessageUtil'; import InputContexts from './InputContexts'; import { observer } from "mobx-react-lite"; @@ -10,10 +10,32 @@ import { useMst } from "@/views/stores/RootStore"; import { ChatContext } from "@/views/stores/InputStore"; import { Message } from "@/views/stores/ChatStore"; +const useStyles = createStyles((theme) => ({ + actionIcon:{ + color: 'var(--vscode-dropdown-foreground)', + borderColor:'var(--vscode-dropdown-border)', + backgroundColor: 'var(--vscode-dropdown-background)', + '&:hover':{ + color: 'var(--vscode-dropdown-foreground)', + borderColor:'var(--vscode-dropdown-border)', + backgroundColor: 'var(--vscode-dropdown-background)' + }, + '&[data-disabled]': { + borderColor: "transparent", + backgroundColor: "#e9ecef", + color: "#adb5bd", + cursor: "not-allowed", + backgroundImage: "none", + pointervents: "none", + } + } + })); + const InputMessage = observer((props: any) => { + const {classes} = useStyles(); const { chatPanelWidth } = props; const { input, chat } = useMst(); - const { contexts, menuOpend, menuType, currentMenuIndex, contextMenus, commandMenus } = input; + const { contexts, menuOpend, menuType, currentMenuIndex, contextMenus, commandMenus,modelMenus } = input; const { generating } = chat; const [drawerOpened, { open: openDrawer, close: closeDrawer }] = useDisclosure(false); @@ -22,12 +44,6 @@ const InputMessage = observer((props: any) => { const [commandMenusNode, setCommandMenusNode] = useState(null); const [inputRef, inputRect] = useResizeObserver(); - const handlePlusClick = (event: React.MouseEvent) => { - input.openMenu('contexts'); - inputRef.current.focus(); - event.stopPropagation(); - }; - const handleInputChange = (event: React.ChangeEvent) => { const value = event.target.value; // if value start with '/' command show menu @@ -115,86 +131,21 @@ const InputMessage = observer((props: any) => { const contextMenuIcon = (name: string) => { if (name === 'git diff --cached') { - return (); + return ; } if (name === 'git diff HEAD') { - return (); + return ; } - return (); + return ; }; - const contextMenusNode = [...contextMenus] - .sort((a, b) => { - if (a.name === '') { - return 1; // Placing '' at the end - } else if (b.name === '') { - return -1; // Placing '' at the front - } else { - return (a.name || "").localeCompare(b.name || ""); // Sorting alphabetically for other cases - } - }) - .map(({ pattern, description, name }, index) => { - return ( - { - handleContextClick(name); - input.closeMenu(); - }}> - {contextMenuIcon(name)} - - - {name} - - - {description} - - - ); - }); - useEffect(() => { input.fetchContextMenus().then(); input.fetchCommandMenus().then(); - messageUtil.registerHandler('regCommandList', (message: { result: object[]}) => { - input.updateCommands(message.result); - }); + input.fetchModelMenus().then(); + messageUtil.registerHandler('regCommandList', (message: { result: object[]}) => { + input.updateCommands(message.result); + }); messageUtil.registerHandler('appendContext', (message: { command: string; context: string }) => { // context is a temp file path const match = /\|([^]+?)\]/.exec(message.context); @@ -231,6 +182,22 @@ const InputMessage = observer((props: any) => { inputRef.current.focus(); }, []); + const getModelShowName = (modelName:string)=>{ + const nameMap = { + "gpt-3.5-turbo": "GPT-3.5", + "gpt-3.5-turbo-16k": "GPT-3.5-16K", + "gpt-4": "GPT-4", + "claude-2": "CLAUDE-2" + }; + if (modelName in nameMap){ + return nameMap[modelName]; + } else if(modelName.lastIndexOf('/') > -1){ + return modelName.substring(modelName.lastIndexOf('/')+1).toLocaleUpperCase(); + } else { + return modelName.toUpperCase(); + } + }; + useEffect(() => { let filtered; if (input.value) { @@ -294,8 +261,136 @@ const InputMessage = observer((props: any) => { } }, [contexts.length]); + const changeModel = (value) =>{ + chat.changeChatModel(value); + messageUtil.sendMessage({ + command: "updateSetting", + key1: "devchat", + key2: "defaultModel", + value: value, + }); + }; + + const menuStyles = { + arrow:{ + borderColor: 'var(--vscode-menu-border)', + }, + dropdown:{ + borderColor: 'var(--vscode-menu-border)', + backgroundColor: 'var(--vscode-menu-background)' + }, + itemLabel:{ + color: 'var(--vscode-menu-foreground)' + }, + item: { + padding: 5, + backgroundColor: 'var(--vscode-menu-background)', + '&:hover,&[data-hovered=true]': { + color: 'var(--vscode-commandCenter-activeForeground)', + borderColor: 'var(--vscode-commandCenter-border)', + backgroundColor: 'var(--vscode-commandCenter-activeBackground)' + } + } + }; + + const buttonStyles = { + root: { + color: 'var(--vscode-dropdown-foreground)', + borderColor:'var(--vscode-dropdown-border)', + backgroundColor: 'var(--vscode-dropdown-background)', + '&:hover':{ + color: 'var(--vscode-dropdown-foreground)', + borderColor:'var(--vscode-dropdown-border)', + backgroundColor: 'var(--vscode-dropdown-background)' + } + } + }; + return ( - <> + + + + + + + + + + {[...contextMenus] + .sort((a, b) => { + if (a.name === '') { + return 1; // Placing '' at the end + } else if (b.name === '') { + return -1; // Placing '' at the front + } else { + return (a.name || "").localeCompare(b.name || ""); // Sorting alphabetically for other cases + } + }) + .map(({ pattern, description, name }, index) => { + return ( + { + handleContextClick(name); + }} + > + {name} + + {description} + + ); + })} + + + + + + + + {modelMenus.map((modelName) => { + return changeModel(modelName)}> + {getModelShowName(modelName)} + ; + })} + + + {contexts && contexts.length > 0 && { } { input.closeMenu(); inputRef.current.focus(); }} - onClose={() => input.closeMenu()} - onOpen={() => menuType !== '' ? input.openMenu(menuType) : input.closeMenu()} - returnFocus={true}> + >