Merge pull request #301 from devchat-ai/improve_input_ux

Improve input ux
This commit is contained in:
boob.yang 2023-09-13 21:05:23 +08:00 committed by GitHub
commit f6b34dc4f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 1324 additions and 690 deletions

View File

@ -9,5 +9,5 @@
"dist": true // set this to false to include "dist" folder in search results
},
// Turn off tsc task auto detection since we have the necessary tasks as npm scripts
"typescript.tsc.autoDetect": "off"
"typescript.tsc.autoDetect": "off"
}

35
package-lock.json generated
View File

@ -21,6 +21,7 @@
"@tiptap/starter-kit": "^2.0.3",
"axios": "^1.3.6",
"dotenv": "^16.0.3",
"js-yaml": "^4.1.0",
"mobx": "^6.10.0",
"mobx-react": "^9.0.0",
"mobx-state-tree": "^5.1.8",
@ -34,7 +35,8 @@
"rehype-raw": "^6.1.1",
"shell-escape": "^0.2.0",
"string-argv": "^0.3.2",
"uuid": "^9.0.0"
"uuid": "^9.0.0",
"yaml": "^2.3.2"
},
"devDependencies": {
"@babel/core": "^7.21.8",
@ -5882,6 +5884,14 @@
"node": ">=10"
}
},
"node_modules/cosmiconfig/node_modules/yaml": {
"version": "1.10.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==",
"engines": {
"node": ">= 6"
}
},
"node_modules/create-require": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz",
@ -9030,7 +9040,6 @@
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dev": true,
"dependencies": {
"argparse": "^2.0.1"
},
@ -14025,11 +14034,11 @@
"dev": true
},
"node_modules/yaml": {
"version": "1.10.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==",
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz",
"integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==",
"engines": {
"node": ">= 6"
"node": ">= 14"
}
},
"node_modules/yargs": {
@ -18429,6 +18438,13 @@
"parse-json": "^5.0.0",
"path-type": "^4.0.0",
"yaml": "^1.10.0"
},
"dependencies": {
"yaml": {
"version": "1.10.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg=="
}
}
},
"create-require": {
@ -20759,7 +20775,6 @@
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dev": true,
"requires": {
"argparse": "^2.0.1"
}
@ -24350,9 +24365,9 @@
"dev": true
},
"yaml": {
"version": "1.10.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg=="
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz",
"integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg=="
},
"yargs": {
"version": "16.2.0",

View File

@ -70,59 +70,353 @@
"configuration": {
"title": "DevChat",
"properties": {
"DevChat.llmModel": {
"type": "string",
"default": "OpenAI",
"enum": [
"OpenAI"
"devchat.Model.gpt-3-5": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "openai",
"enum": [
"openai"
],
"description": "[required*] Specify which provider host this llm model"
},
"api_key": {
"type": "string",
"default": "",
"description": "[required*] Specify access key for selected provider."
},
"api_base": {
"type": "string",
"default": "",
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"presence_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"frequency_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
}
},
"required": [
"provider",
"key"
],
"description": "Select which LLM to use."
"additionalProperties": false,
"order": 0,
"markdownDescription": "Specify the properties for gpt-3.5-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
},
"DevChat.maxLogCount": {
"type": "number",
"default": 20,
"description": "Limit the number of prompts in the chat view."
"devchat.Model.gpt-3-5-16k": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "openai",
"enum": [
"openai"
],
"description": "[required*] Specify which provider host this llm model"
},
"api_key": {
"type": "string",
"default": "",
"description": "[required*] Specify access key for selected provider."
},
"api_base": {
"type": "string",
"default": "",
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"presence_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"frequency_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
}
},
"required": [
"provider",
"key"
],
"additionalProperties": false,
"order": 1,
"markdownDescription": "Specify properties for gpt-3.5-turbo-16k model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"DevChat.OpenAI.model": {
"type": "string",
"default": "gpt-3.5-turbo",
"description": "Specify the model ID.",
"when": "DevChat.llmModel == 'OpenAI'"
"devchat.Model.gpt-4": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "openai",
"enum": [
"openai"
],
"description": "[required*] Specify which provider host this llm model"
},
"api_key": {
"type": "string",
"default": "",
"description": "[required*] Specify access key for selected provider."
},
"api_base": {
"type": "string",
"default": "",
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"presence_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"frequency_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
}
},
"additionalProperties": false,
"order": 2,
"markdownDescription": "properties for gpt-4 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
},
"DevChat.OpenAI.temperature": {
"type": "number",
"default": 0,
"description": "The sampling temperature to use, between 0 and 2. Lower values like 0.2 will make it more focused and deterministic.",
"when": "DevChat.llmModel == 'OpenAI'"
"devchat.Model.claude-2": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "anthropic",
"enum": [
"anthropic"
],
"description": "[required*] which provider host this llm model"
},
"api_key": {
"type": "string",
"default": "",
"description": "[required*] Specify access key for selected provider."
},
"api_base": {
"type": "string",
"default": "",
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"presence_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"frequency_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
}
},
"additionalProperties": false,
"order": 3,
"markdownDescription": "properties for claude-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.customModel": {
"type": "array",
"items": {
"type": "object",
"properties": {
"model": {
"oneOf": [
{
"type": "string",
"enum": [
"openai/gpt-4",
"openai/gpt-4-0613",
"openai/gpt-4-0314",
"openai/gpt-4-32k",
"openai/gpt-4-32k-0314",
"openai/gpt-4-32k-0613",
"openai/gpt-3.5-turbo",
"openai/gpt-3.5-turbo-0301",
"openai/gpt-3.5-turbo-0613",
"openai/gpt-3.5-turbo-16k",
"openai/gpt-3.5-turbo-16k-0613",
"openai/text-davinci-003",
"openai/curie-001",
"openai/babbage-001",
"openai/ada-001",
"openai/babbage-002",
"openai/davinci-002",
"cohere/command-nightly",
"cohere/command",
"cohere/command-light",
"cohere/command-medium-beta",
"cohere/command-xlarge-beta",
"anthropic/claude-2",
"anthropic/claude-instant-1",
"anthropic/claude-instant-1.2",
"replicate/replicate/",
"replicate/replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
"replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52",
"replicate/joehoover/instructblip-vicuna13b:c4c54e3c8c97cd50c2d2fec9be3b6065563ccf7d43787fb99f84151b867178fe",
"replicate/replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5",
"replicate/a16z-infra/llama-2-7b-chat:7b0bfc9aff140d5b75bacbed23e91fd3c34b01a1e958d32132de6e0a19796e2c",
"replicate/replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b",
"replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f",
"replicate/replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad",
"huggingface/meta-llama/Llama-2-7b-hf",
"huggingface/meta-llama/Llama-2-7b-chat-hf",
"huggingface/meta-llama/Llama-2-13b-hf",
"huggingface/meta-llama/Llama-2-13b-chat-hf",
"huggingface/meta-llama/Llama-2-70b-hf",
"huggingface/meta-llama/Llama-2-70b-chat-hf",
"huggingface/meta-llama/Llama-2-7b",
"huggingface/meta-llama/Llama-2-7b-chat",
"huggingface/meta-llama/Llama-2-13b",
"huggingface/meta-llama/Llama-2-13b-chat",
"huggingface/meta-llama/Llama-2-70b",
"huggingface/meta-llama/Llama-2-70b-chat",
"together_ai/togethercomputer/llama-2-70b-chat",
"together_ai/togethercomputer/Llama-2-7B-32K-Instruct",
"together_ai/togethercomputer/llama-2-7b",
"baseten/qvv0xeq",
"baseten/q841o8w",
"baseten/31dxrj3",
"openrouter/google/palm-2-codechat-bison",
"openrouter/google/palm-2-chat-bison",
"openrouter/openai/gpt-3.5-turbo",
"openrouter/openai/gpt-3.5-turbo-16k",
"openrouter/openai/gpt-4-32k",
"openrouter/anthropic/claude-2",
"openrouter/anthropic/claude-instant-v1",
"openrouter/meta-llama/llama-2-13b-chat",
"openrouter/meta-llama/llama-2-70b-chat",
"vertex_ai/chat-bison",
"vertex_ai/chat-bison@001",
"vertex_ai/text-bison",
"vertex_ai/text-bison@001",
"ai21/j2-ultra",
"ai21/j2-mid",
"ai21/j2-light"
],
"description": "Specify llm model name."
},
{
"type": "string",
"description": "[required*] Specify llm model name."
}
]
},
"api_key": {
"type": "string",
"default": "",
"description": "[required*] Specify access key for selected provider."
},
"api_base": {
"type": "string",
"default": "",
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"presence_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"frequency_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
}
},
"additionalProperties": false
},
"order": 6,
"markdownDescription": "Specify the custom llm model for DevChat."
},
"devchat.defaultModel": {
"oneOf": [
{
"type": "string",
"default": "gpt-3.5-turbo",
"enum": [
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-4",
"claude-2"
]
},
{
"type": "string",
"default": "gpt-3.5-turbo"
}
],
"order": 7,
"markdownDescription": "Specify the default llm model for DevChat. [Price of each model](https://devchat.ai/pricing)"
},
"DevChat.OpenAI.stream": {
"type": "boolean",
"default": true,
"description": "Whether to stream a response.",
"when": "DevChat.llmModel == 'OpenAI'"
"order": 10,
"description": "Whether to stream a response."
},
"DevChat.OpenAI.tokensPerPrompt": {
"type": "number",
"default": 6000,
"description": "The max number of tokens of a prompt.",
"when": "DevChat.llmModel == 'OpenAI'"
},
"DevChat.Access_Key_DevChat": {
"type": "string",
"default": "",
"description": "DevChat's secret key for accessing multiple LLM models"
},
"DevChat.Api_Key_OpenAI": {
"type": "string",
"default": "",
"description": "OpenAI's secret key for accessing LLM models. (Leave blank if using DevChat's key.)",
"when": "DevChat.llmModel == 'OpenAI'"
},
"DevChat.API_ENDPOINT": {
"type": "string",
"default": "",
"description": "API endpoint URL",
"when": "DevChat.llmModel == 'OpenAI'"
"DevChat.EnableFunctionCalling": {
"type": "boolean",
"default": false,
"order": 11,
"description": "Enable function calling for GPT."
},
"DevChat.DevChatPath": {
"type": "string",
@ -135,8 +429,27 @@
]
}
},
"order": 12,
"description": "Where is the devchat binary located?"
},
"DevChat.betaInvitationCode": {
"type": "string",
"default": "",
"order": 13,
"description": "The invitation code for beta testing."
},
"DevChat.maxLogCount": {
"type": "number",
"default": 20,
"order": 14,
"description": "Limit the number of prompts in the chat view."
},
"DevChat.askcode.supportedFileTypes": {
"type": "string",
"default": ".+\\.js$, .+\\.ts$, .+\\.jsx$, .+\\.tsx$, .+\\.java$, .+\\.py$, .+\\.go$, .+\\.rb$, .+\\.php$, .+\\.cpp$, .+\\.c$, .+\\.cs$, .+\\.swift$, .+\\.rs$, .+\\.sh$, .+\\.bash$, .+\\.zsh$, .+\\.m$, .+\\.mm$, .+\\.h$, .+\\.hpp$, .+\\.hh$, .+\\.html$, .+\\.htm$, .+\\.xhtml$, .+\\.xml$, .+\\.css$, .+\\.scss$, .+\\.sass$, .+\\.less$, .+\\.json$, .+\\.yaml$, .+\\.yml$, .+\\.toml$, .+\\.ini$, .+\\.md$, .+\\.markdown$, .+\\.txt$, .+\\.csv$, .+\\.sql$, .+\\.sqlite$, .+\\.db$, .+\\.hql$, .+\\.psql$, .+\\.pgsql$, .+\\.plpgsql$",
"order": 15,
"description": "Comma-separated list of regular expressions for supported file types for analysis."
},
"DevChat.PythonPath": {
"type": "string",
"default": "",
@ -150,26 +463,10 @@
},
"description": "Which Python interpreter to use with DevChat?"
},
"DevChat.EnableFunctionCalling": {
"type": "boolean",
"default": false,
"description": "Enable/Disable function calling for GPT.",
"when": "DevChat.llmModel == 'OpenAI'"
},
"DevChat.PythonVirtualEnv": {
"type": "string",
"default": "",
"description": "Path to the Python virtual environment for AskCode."
},
"DevChat.askcode.supportedFileTypes": {
"type": "string",
"default": ".+\\.js$, .+\\.ts$, .+\\.jsx$, .+\\.tsx$, .+\\.java$, .+\\.py$, .+\\.go$, .+\\.rb$, .+\\.php$, .+\\.cpp$, .+\\.c$, .+\\.cs$, .+\\.swift$, .+\\.rs$, .+\\.sh$, .+\\.bash$, .+\\.zsh$, .+\\.m$, .+\\.mm$, .+\\.h$, .+\\.hpp$, .+\\.hh$, .+\\.html$, .+\\.htm$, .+\\.xhtml$, .+\\.xml$, .+\\.css$, .+\\.scss$, .+\\.sass$, .+\\.less$, .+\\.json$, .+\\.yaml$, .+\\.yml$, .+\\.toml$, .+\\.ini$, .+\\.md$, .+\\.markdown$, .+\\.txt$, .+\\.csv$, .+\\.sql$, .+\\.sqlite$, .+\\.db$, .+\\.hql$, .+\\.psql$, .+\\.pgsql$, .+\\.plpgsql$",
"description": "Comma-separated list of regular expressions for supported file types for analysis."
},
"DevChat.betaInvitationCode": {
"type": "string",
"default": "",
"description": "The invitation code for beta testing."
}
}
},
@ -231,13 +528,73 @@
"title": "Create Entry"
},
{
"command": "DevChat.Api_Key_OpenAI",
"title": "Input OpenAI Api Key",
"command": "DevChat.AccessKey.OpenAI",
"title": "Input Access Key for OpenAI",
"category": "DevChat"
},
{
"command": "DevChat.Access_Key_DevChat",
"title": "Input DevChat Access Key",
"command": "DevChat.AccessKey.Cohere",
"title": "Input Access Key for Cohere",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.Anthropic",
"title": "Input Access Key for Anthropic",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.Replicate",
"title": "Input Access Key for Replicate",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.HuggingFace",
"title": "Input Access Key for HuggingFace",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.TogetherAI",
"title": "Input Access Key for TogetherAI",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.OpenRouter",
"title": "Input Access Key for OpenRouter",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.VertexAI",
"title": "Input Access Key for VertexAI",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.AI21",
"title": "Input Access Key for AI21",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.BaseTen",
"title": "Input Access Key for BaseTen",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.Azure",
"title": "Input Access Key for Azure",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.SageMaker",
"title": "Input Access Key for SageMaker",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.Bedrock",
"title": "Input Access Key for Bedrock",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.DevChat",
"title": "Input Access Key for DevChat",
"category": "DevChat"
},
{
@ -287,6 +644,11 @@
"command": "DevChat.InstallCommands",
"title": "Install slash commands",
"category": "DevChat"
},
{
"command": "DevChat.UpdataChatModels",
"title": "Update Chat Models",
"category": "DevChat"
}
],
"menus": {
@ -484,6 +846,7 @@
"@tiptap/starter-kit": "^2.0.3",
"axios": "^1.3.6",
"dotenv": "^16.0.3",
"js-yaml": "^4.1.0",
"mobx": "^6.10.0",
"mobx-react": "^9.0.0",
"mobx-state-tree": "^5.1.8",
@ -497,6 +860,7 @@
"rehype-raw": "^6.1.1",
"shell-escape": "^0.2.0",
"string-argv": "^0.3.2",
"uuid": "^9.0.0"
"uuid": "^9.0.0",
"yaml": "^2.3.2"
}
}
}

View File

@ -1,14 +1,10 @@
import * as path from 'path';
import * as vscode from 'vscode'
import * as vscode from 'vscode';
import { ChatContext } from './contextManager';
import { createTempSubdirectory, git_ls_tree, runCommandStringAndWriteOutput } from '../util/commonUtil';
import { logger } from '../util/logger';
import { handleCodeSelected } from './contextCodeSelected';
import DevChat, { ChatOptions } from '../toolwrapper/devchat';
import { number } from 'mobx-state-tree/dist/internal';
async function getCurrentSelectText(activeEditor: vscode.TextEditor): Promise<string> {
@ -183,7 +179,7 @@ async function getSymbolDefine(symbolList: string[], activeEditor: vscode.TextEd
for (const child of symbol.children) {
visitFun(child);
}
}
};
for (const symbol of symbolsT) {
visitFun(symbol);
}

View File

@ -21,7 +21,7 @@ import { FT } from '../util/feature_flags/feature_toggles';
import { getPackageVersion } from '../util/python_installer/pip_package_version';
import { exec } from 'child_process';
import { sendCommandListByDevChatRun } from '../handler/regCommandList';
import { sendCommandListByDevChatRun, updateChatModels } from '../handler/regCommandList';
import DevChat from "../toolwrapper/devchat";
let indexProcess: CommandRun | null = null;
@ -82,42 +82,39 @@ function registerAskForFileCommand(context: vscode.ExtensionContext) {
context.subscriptions.push(vscode.commands.registerCommand('devchat.askForFile_chinese', callback));
}
export function registerOpenAiApiKeySettingCommand(context: vscode.ExtensionContext) {
const secretStorage: vscode.SecretStorage = context.secrets;
function regAccessKeyCommand(context: vscode.ExtensionContext, provider: string) {
context.subscriptions.push(
vscode.commands.registerCommand('DevChat.Api_Key_OpenAI', async () => {
vscode.commands.registerCommand(`DevChat.AccessKey.${provider}`, async () => {
const passwordInput: string = await vscode.window.showInputBox({
password: true,
title: "Input OpenAi Api Key",
placeHolder: "Set OpenAI Api Key.(Leave blank if clearing stored key.)"
title: `Input ${provider} Access Key`,
placeHolder: `Set ${provider} Access Key.(Leave blank if clearing stored key.)`
}) ?? '';
if (passwordInput.trim() !== "" && !isValidApiKey(passwordInput)) {
UiUtilWrapper.showErrorMessage("Your api key is invalid!");
return ;
}
ApiKeyManager.writeApiKeySecret(passwordInput, "OpenAI");
await ApiKeyManager.writeApiKeySecret(passwordInput, provider);
})
);
}
export function registerDevChatApiKeySettingCommand(context: vscode.ExtensionContext) {
const secretStorage: vscode.SecretStorage = context.secrets;
context.subscriptions.push(
vscode.commands.registerCommand('DevChat.Access_Key_DevChat', async () => {
const passwordInput: string = await vscode.window.showInputBox({
password: true,
title: "Input DevChat Access Key",
placeHolder: "Set DevChat Access Key.(Leave blank if clearing stored key.)"
}) ?? '';
if (passwordInput.trim() !== "" && !isValidApiKey(passwordInput)) {
UiUtilWrapper.showErrorMessage("Your access key is invalid!");
return ;
}
ApiKeyManager.writeApiKeySecret(passwordInput, "DevChat");
})
);
export function registerAccessKeySettingCommand(context: vscode.ExtensionContext) {
regAccessKeyCommand(context, "OpenAI");
regAccessKeyCommand(context, "Cohere");
regAccessKeyCommand(context, "Anthropic");
regAccessKeyCommand(context, "Replicate");
regAccessKeyCommand(context, "HuggingFace");
regAccessKeyCommand(context, "TogetherAI");
regAccessKeyCommand(context, "OpenRouter");
regAccessKeyCommand(context, "VertexAI");
regAccessKeyCommand(context, "AI21");
regAccessKeyCommand(context, "BaseTen");
regAccessKeyCommand(context, "Azure");
regAccessKeyCommand(context, "SageMaker");
regAccessKeyCommand(context, "Bedrock");
regAccessKeyCommand(context, "DevChat");
}
export function registerStatusBarItemClickCommand(context: vscode.ExtensionContext) {
@ -142,7 +139,7 @@ const topicDeleteCallback = async (item: TopicTreeItem) => {
TopicManager.getInstance().deleteTopic(item.id);
}
};
;
export function regTopicDeleteCommand(context: vscode.ExtensionContext) {
context.subscriptions.push(
@ -316,7 +313,16 @@ async function installAskCode(supportedFileTypes, progressBar: any, callback: Fu
async function indexCode(pythonVirtualEnv, supportedFileTypes, progressBar: any) {
let envs = {};
let openaiApiKey = await ApiKeyManager.getApiKey();
const llmModelData = await ApiKeyManager.llmModel();
if (!llmModelData) {
logger.channel()?.error('No valid llm model is selected!');
logger.channel()?.show();
progressBar.endWithError("No valid llm model is selected!");
return;
}
let openaiApiKey = llmModelData.api_key;
if (!openaiApiKey) {
logger.channel()?.error('The OpenAI key is invalid!');
logger.channel()?.show();
@ -326,7 +332,7 @@ async function indexCode(pythonVirtualEnv, supportedFileTypes, progressBar: any)
}
envs['OPENAI_API_KEY'] = openaiApiKey;
const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
const openAiApiBase = llmModelData.api_base;
if (openAiApiBase) {
envs['OPENAI_API_BASE'] = openAiApiBase;
}
@ -443,17 +449,26 @@ export function registerAskCodeSummaryIndexStartCommand(context: vscode.Extensio
async function indexCodeSummary(pythonVirtualEnv, supportedFileTypes, progressBar: any) {
let envs = {};
let openaiApiKey = await ApiKeyManager.getApiKey();
const llmModelData = await ApiKeyManager.llmModel();
if (!llmModelData) {
logger.channel()?.error('No valid llm model is selected!');
logger.channel()?.show();
progressBar.endWithError("No valid llm model is selected!");
return;
}
let openaiApiKey = llmModelData.api_key;
if (!openaiApiKey) {
logger.channel()?.error('The OpenAI key is invalid!');
logger.channel()?.show();
progressBar.endWithError("The OpenAI key is invalid!");
progressBar.endWithError("The OpenAI key is invalid!");
return;
}
envs['OPENAI_API_KEY'] = openaiApiKey;
const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
const openAiApiBase = llmModelData.api_base;
if (openAiApiBase) {
envs['OPENAI_API_BASE'] = openAiApiBase;
}
@ -524,6 +539,14 @@ export function registerInstallCommandsCommand(context: vscode.ExtensionContext)
context.subscriptions.push(disposable);
}
export function registerUpdateChatModelsCommand(context: vscode.ExtensionContext) {
let disposable = vscode.commands.registerCommand('DevChat.UpdataChatModels', async () => {
updateChatModels();
});
context.subscriptions.push(disposable);
}
export async function addSummaryContextFun(fsPath: string ) {
if (!FT("ask-code-summary")) {
UiUtilWrapper.showErrorMessage("This command is a beta version command and has not been released yet.");

View File

@ -14,7 +14,7 @@ export function checkDevChatDependency(showError: boolean = true): boolean {
try {
// Check if DevChat is installed
const expectVersion = 'DevChat 0.2.3';
const expectVersion = 'DevChat 0.2.4';
const devchatVersion = runCommand(`"${devChat}" --version`).toString().trim();
if (devchatVersion < expectVersion) {
logger.channel()?.info(`devchat version: ${devchatVersion}, but expect version: ${expectVersion}`);

View File

@ -5,8 +5,7 @@ import {
registerAddContextCommand,
registerAskForCodeCommand,
registerAskForFileCommand,
registerOpenAiApiKeySettingCommand,
registerDevChatApiKeySettingCommand,
registerAccessKeySettingCommand,
regTopicDeleteCommand,
regAddTopicCommand,
regDeleteSelectTopicCommand,
@ -21,6 +20,7 @@ import {
registerAskCodeSummaryIndexStopCommand,
registerAddSummaryContextCommand,
registerInstallCommandsCommand,
registerUpdateChatModelsCommand,
} from './contributes/commands';
import { regLanguageContext } from './contributes/context';
import { regDevChatView, regTopicView } from './contributes/views';
@ -33,6 +33,47 @@ import { UiUtilWrapper } from './util/uiUtil';
import { UiUtilVscode } from './util/uiUtil_vscode';
import { FT } from './util/feature_flags/feature_toggles';
async function configUpdateTo_0912() {
const devchatKey = UiUtilWrapper.getConfiguration('DevChat', 'Access_Key_DevChat');
const openaiKey = UiUtilWrapper.getConfiguration('DevChat', 'Api_Key_OpenAI');
const endpointKey = UiUtilWrapper.getConfiguration('DevChat', 'API_ENDPOINT');
let modelConfigNew = {};
if (openaiKey) {
modelConfigNew["api_key"] = openaiKey;
modelConfigNew["provider"] = "openai";
} else if (devchatKey) {
modelConfigNew["api_key"] = devchatKey;
modelConfigNew["provider"] = "openai";
}
if (endpointKey) {
modelConfigNew["api_base"] = endpointKey;
}
const modelConfig1: any = UiUtilWrapper.getConfiguration("devchat", "Model.gpt-3-5");
const modelConfig2: any = UiUtilWrapper.getConfiguration("devchat", "Model.gpt-3-5-16k");
const modelConfig3: any = UiUtilWrapper.getConfiguration("devchat", "Model.gpt-4");
//if (!modelConfig1 && !modelConfig2 && !modelConfig3 && Object.keys(modelConfigNew).length > 0) {
if (Object.keys(modelConfig1).length === 0 &&
Object.keys(modelConfig2).length === 0 &&
Object.keys(modelConfig3).length === 0) {
// config default gpt models
if (Object.keys(modelConfigNew).length === 0) {
modelConfigNew["api_key"] = "DC.<your devchat key>";
modelConfigNew["provider"] = "openai";
}
try {
vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5", modelConfigNew, vscode.ConfigurationTarget.Global);
vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5-16k", modelConfigNew, vscode.ConfigurationTarget.Global);
vscode.workspace.getConfiguration("devchat").update("Model.gpt-4", modelConfigNew, vscode.ConfigurationTarget.Global);
} catch(error) {
return;
}
}
}
function activate(context: vscode.ExtensionContext) {
ExtensionContextHolder.context = context;
@ -40,13 +81,14 @@ function activate(context: vscode.ExtensionContext) {
logger.init(LoggerChannelVscode.getInstance());
UiUtilWrapper.init(new UiUtilVscode());
configUpdateTo_0912();
regLanguageContext();
regDevChatView(context);
regTopicView(context);
registerOpenAiApiKeySettingCommand(context);
registerDevChatApiKeySettingCommand(context);
registerAccessKeySettingCommand(context);
registerOpenChatPanelCommand(context);
registerAddContextCommand(context);
registerAskForCodeCommand(context);
@ -54,6 +96,7 @@ function activate(context: vscode.ExtensionContext) {
registerStatusBarItemClickCommand(context);
registerInstallCommandsCommand(context);
registerUpdateChatModelsCommand(context);
createStatusBarItem(context);
if (FT("ask-code")) {

View File

@ -178,15 +178,7 @@ export async function historyMessagesBase(): Promise<LoadHistoryMessages | undef
}
export async function onApiKeyBase(apiKey: string): Promise<{ command: string, text: string, hash: string, user: string, date: string, isError: boolean }> {
if (!isValidApiKey(apiKey)) {
return { command: 'receiveMessage', text: 'Your API key is invalid. We support OpenAI and DevChat keys. Please reset the key.', hash: '', user: 'system', date: '', isError: false };
}
isApiSet = true;
ApiKeyManager.writeApiKeySecret(apiKey);
const welcomeMessageText = welcomeMessage().response;
return {
command: 'receiveMessage', text: `Your OPENAI_API_KEY is set. Enjoy DevChat!\n${welcomeMessageText}`, hash: '', user: 'system', date: '', isError: false
command: 'receiveMessage', text: `You need config access key for specified llmodel in setting view.`, hash: '', user: 'system', date: '', isError: false
};
}

View File

@ -19,6 +19,7 @@ import { doCommand } from './doCommand';
import { getSetting, updateSetting } from './updateConfig';
import { featureToggle, featureToggles } from './featureToggle';
import { getUserAccessKey } from './userAccessKey';
import { regModelList } from './regValidModelList';
// According to the context menu selected by the user, add the corresponding context file
@ -92,3 +93,5 @@ messageHandler.registerHandler('featureToggle', featureToggle);
messageHandler.registerHandler('featureToggles', featureToggles);
messageHandler.registerHandler('getUserAccessKey', getUserAccessKey);
messageHandler.registerHandler('regModelList', regModelList);

View File

@ -2,6 +2,7 @@ import * as vscode from 'vscode';
import CommandManager from '../command/commandManager';
import { MessageHandler } from './messageHandler';
import { regInMessage, regOutMessage } from '../util/reg_messages';
import { getValidModels } from './regValidModelList';
regInMessage({command: 'regCommandList'});
@ -48,3 +49,8 @@ export async function sendCommandListByDevChatRun() {
}
}
export async function updateChatModels() {
const modelList = await getValidModels();
MessageHandler.sendMessage(existPannel!, { command: 'regModelList', result: modelList });
}

View File

@ -0,0 +1,98 @@
import * as vscode from 'vscode';
import ChatContextManager from '../context/contextManager';
import { MessageHandler } from './messageHandler';
import { regInMessage, regOutMessage } from '../util/reg_messages';
import { ApiKeyManager } from '../util/apiKey';
import { UiUtilWrapper } from '../util/uiUtil';
export async function getValidModels(): Promise<string[]> {
const modelProperties = async (modelPropertyName: string, modelName: string) => {
const modelConfig = UiUtilWrapper.getConfiguration("devchat", modelPropertyName);
if (!modelConfig) {
return undefined;
}
let modelProperties: any = {};
for (const key of Object.keys(modelConfig || {})) {
const property = modelConfig![key];
modelProperties[key] = property;
}
if (!modelConfig["provider"]) {
return undefined;
}
if (!modelConfig["api_key"]) {
const providerName = ApiKeyManager.toProviderKey(modelConfig["provider"]);
if (!providerName) {
return undefined;
}
const apiKey = await ApiKeyManager.loadApiKeySecret(providerName);
if (!apiKey) {
return undefined;
}
modelProperties["api_key"] = apiKey;
}
modelProperties['model'] = modelName;
return modelProperties;
};
let modelList : string[] = [];
const openaiModel = await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo");
if (openaiModel) {
modelList.push(openaiModel.model);
}
const openaiModel2 = await modelProperties('Model.gpt-3-5-16k', "gpt-3.5-turbo-16k");
if (openaiModel2) {
modelList.push(openaiModel2.model);
}
const openaiModel3 = await modelProperties('Model.gpt-4', "gpt-4");
if (openaiModel3) {
modelList.push(openaiModel3.model);
}
const claudeModel = await modelProperties('Model.claude-2', "claude-2");
if (claudeModel) {
modelList.push(claudeModel.model);
}
const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel');
if (!customModelConfig) {
return modelList;
}
const customModels = customModelConfig as Array<any>;
for (const model of customModels) {
if (!model.model) {
continue;
}
const modelProvider = model["model"].split('/')[0];
const modelName = model["model"].split('/').slice(1).join('/');
if (!model["api_key"]) {
const providerName = ApiKeyManager.toProviderKey(modelProvider);
if (!providerName) {
continue;
}
const apiKey = await ApiKeyManager.loadApiKeySecret(providerName);
if (!apiKey) {
continue;
}
}
modelList.push(model["model"]);
}
return modelList;
}
regInMessage({command: 'regModelList'});
regOutMessage({command: 'regModelList', result: [{name: ''}]});
export async function regModelList(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
const modelList = await getValidModels();
MessageHandler.sendMessage(panel, { command: 'regModelList', result: modelList });
return;
}

View File

@ -57,7 +57,14 @@ export async function askCode(message: any, panel: vscode.WebviewPanel|vscode.We
let envs = {};
let openaiApiKey = await ApiKeyManager.getApiKey();
const llmModelData = await ApiKeyManager.llmModel();
if (!llmModelData) {
logger.channel()?.error('No valid llm model is selected!');
logger.channel()?.show();
return;
}
let openaiApiKey = llmModelData.api_key;
if (!openaiApiKey) {
logger.channel()?.error('The OpenAI key is invalid!');
logger.channel()?.show();
@ -65,7 +72,7 @@ export async function askCode(message: any, panel: vscode.WebviewPanel|vscode.We
}
envs['OPENAI_API_KEY'] = openaiApiKey;
const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
const openAiApiBase = llmModelData.api_base;
if (openAiApiBase) {
envs['OPENAI_API_BASE'] = openAiApiBase;
}

View File

@ -12,22 +12,20 @@ regInMessage({command: 'getUserAccessKey'});
regOutMessage({command: 'getUserAccessKey', accessKey: "DC.xxx", keyType: "DevChat", endPoint: "https://xxx"});
export async function getUserAccessKey(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
const workspaceDir = UiUtilWrapper.workspaceFoldersFirstPath();
let openaiApiKey = await ApiKeyManager.getApiKey();
if (!openaiApiKey) {
const llmModelData = await ApiKeyManager.llmModel();
if (!llmModelData || llmModelData.api_key) {
MessageHandler.sendMessage(panel, {"command": "getUserAccessKey", "accessKey": "", "keyType": "", "endPoint": ""});
return;
}
let keyType = ApiKeyManager.getKeyType(openaiApiKey!);
if (keyType === "DC") {
let keyType: string = "others";
if (llmModelData.api_key?.startsWith("DC.")) {
keyType = "DevChat";
} else if (keyType === "sk") {
keyType = "OpenAI";
}
let openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
let openAiApiBase = llmModelData.api_base;
if (!openAiApiBase) {
openAiApiBase = "";
}
MessageHandler.sendMessage(panel, {"command": "getUserAccessKey", "accessKey": openaiApiKey, "keyType": keyType, "endPoint": openAiApiBase});
MessageHandler.sendMessage(panel, {"command": "getUserAccessKey", "accessKey": llmModelData.api_key, "keyType": keyType, "endPoint": openAiApiBase});
}

View File

@ -42,7 +42,7 @@ export function createStatusBarItem(context: vscode.ExtensionContext): vscode.St
if (apiKeyStatus !== 'has valid access key') {
statusBarItem.text = `$(warning)DevChat`;
statusBarItem.tooltip = `${apiKeyStatus}`;
statusBarItem.command = 'DevChat.Access_Key_DevChat';
statusBarItem.command = 'DevChat.AccessKey.DevChat';
progressBar.update(`Checking devchat dependency environment: ${apiKeyStatus}.`, 0);
return;
}

View File

@ -9,7 +9,7 @@ import ExtensionContextHolder from '../util/extensionContext';
import { UiUtilWrapper } from '../util/uiUtil';
import { ApiKeyManager } from '../util/apiKey';
import { exitCode } from 'process';
import * as yaml from 'yaml';
const envPath = path.join(__dirname, '..', '.env');
@ -120,9 +120,9 @@ class DevChat {
args.push("-p", options.parent);
}
const llmModel = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.model');
if (llmModel) {
args.push("-m", llmModel);
const llmModelData = await ApiKeyManager.llmModel();
if (llmModelData && llmModelData.model) {
args.push("-m", llmModelData.model);
}
return args;
@ -192,14 +192,19 @@ class DevChat {
};
}
apiEndpoint(apiKey: string | undefined): any {
const openAiApiBase = ApiKeyManager.getEndPoint(apiKey);
const openAiApiBaseObject = openAiApiBase ? { OPENAI_API_BASE: openAiApiBase } : {};
return openAiApiBaseObject;
}
async chat(content: string, options: ChatOptions = {}, onData: (data: ChatResponse) => void): Promise<ChatResponse> {
const llmModelData = await ApiKeyManager.llmModel();
if (!llmModelData) {
return {
"prompt-hash": "",
user: "",
date: "",
response: `Error: no valid llm model is selected!`,
finish_reason: "",
isError: true,
};
}
const args = await this.buildArgs(options);
args.push("--");
args.push(content);
@ -211,36 +216,41 @@ class DevChat {
logger.channel()?.show();
}
// 如果配置了devchat的TOKEN那么就需要使用默认的代理
let openAiApiBaseObject = this.apiEndpoint(openaiApiKey);
const openaiModel = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.model');
const openaiTemperature = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.temperature');
const openaiStream = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.stream');
const llmModel = UiUtilWrapper.getConfiguration('DevChat', 'llmModel');
const tokensPerPrompt = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.tokensPerPrompt');
const openAiApiBaseObject = llmModelData.api_base? { OPENAI_API_BASE: llmModelData.api_base } : {};
const activeLlmModelKey = llmModelData.api_key;
let devChat: string | undefined = UiUtilWrapper.getConfiguration('DevChat', 'DevChatPath');
if (!devChat) {
devChat = 'devchat';
}
const devchatConfig = {
model: openaiModel,
provider: llmModel,
"tokens-per-prompt": tokensPerPrompt,
OpenAI: {
temperature: openaiTemperature,
stream: openaiStream,
}
const reduceModelData = Object.keys(llmModelData)
.filter(key => key !== 'api_key' && key !== 'provider' && key !== 'model' && key !== 'api_base')
.reduce((obj, key) => {
obj[key] = llmModelData[key];
return obj;
}, {});
let devchatConfig = {};
devchatConfig[llmModelData.model] = {
"provider": llmModelData.provider,
"stream": openaiStream,
...reduceModelData
};
let devchatModels = {
"default_model": llmModelData.model,
"models": devchatConfig};
// write to config file
const configPath = path.join(workspaceDir!, '.chat', 'config.json');
const os = process.platform;
const userHome = os === 'win32' ? fs.realpathSync(process.env.USERPROFILE || '') : process.env.HOME;
const configPath = path.join(userHome!, '.chat', 'config.yml');
// write devchatConfig to configPath
const configJson = JSON.stringify(devchatConfig, null, 2);
fs.writeFileSync(configPath, configJson);
const yamlString = yaml.stringify(devchatModels);
fs.writeFileSync(configPath, yamlString);
try {
@ -257,7 +267,7 @@ class DevChat {
env: {
PYTHONUTF8:1,
...process.env,
OPENAI_API_KEY: openaiApiKey,
OPENAI_API_KEY: activeLlmModelKey,
...openAiApiBaseObject
},
};

View File

@ -3,30 +3,130 @@
import { UiUtilWrapper } from './uiUtil';
export class ApiKeyManager {
static toProviderKey(provider: string) : string | undefined {
let providerNameMap = {
"openai": "OpenAI",
"cohere": "Cohere",
"anthropic": "Anthropic",
"replicate": "Replicate",
"huggingface": "HuggingFace",
"together_ai": "TogetherAI",
"openrouter": "OpenRouter",
"vertex_ai": "VertexAI",
"ai21": "AI21",
"baseten": "Baseten",
"azure": "Azure",
"sagemaker": "SageMaker",
"bedrock": "Bedrock"
};
return providerNameMap[provider];
}
static async getApiKey(llmType: string = "OpenAI"): Promise<string | undefined> {
let apiKey: string|undefined = undefined;
if (llmType === "OpenAI") {
apiKey = await UiUtilWrapper.secretStorageGet("openai_OPENAI_API_KEY");
}
if (!apiKey) {
apiKey = await UiUtilWrapper.secretStorageGet("devchat_OPENAI_API_KEY");
const llmModelT = await this.llmModel();
if (!llmModelT) {
return undefined;
}
if (!apiKey) {
if (llmType === "OpenAI") {
apiKey = UiUtilWrapper.getConfiguration('DevChat', 'Api_Key_OpenAI');
return llmModelT.api_key;
}
static async llmModel() {
const llmModelT = UiUtilWrapper.getConfiguration('devchat', 'defaultModel');
if (!llmModelT) {
return undefined;
}
const modelProperties = async (modelPropertyName: string, modelName: string) => {
const modelConfig = UiUtilWrapper.getConfiguration("devchat", modelPropertyName);
if (!modelConfig) {
return undefined;
}
if (!apiKey) {
apiKey = UiUtilWrapper.getConfiguration('DevChat', 'Access_Key_DevChat');
let modelProperties: any = {};
for (const key of Object.keys(modelConfig || {})) {
const property = modelConfig![key];
modelProperties[key] = property;
}
if (!modelConfig["provider"]) {
return undefined;
}
if (!modelConfig["api_key"]) {
const providerName = this.toProviderKey(modelConfig["provider"]);
if (!providerName) {
return undefined;
}
const apiKey = await this.loadApiKeySecret(providerName);
if (!apiKey) {
return undefined;
}
modelProperties["api_key"] = apiKey;
}
if (!modelConfig["api_base"] && modelProperties["api_key"]?.startsWith("DC.")) {
modelProperties["api_base"] = "https://api.devchat.ai/v1";
}
modelProperties['model'] = modelName;
return modelProperties;
};
if (llmModelT === "gpt-3.5-turbo") {
return await modelProperties('Model.gpt-3-5', "gpt-3.5-turbo");
}
if (llmModelT === "gpt-3.5-turbo-16k") {
return await modelProperties('Model.gpt-3-5-16k', "gpt-3.5-turbo-16k");
}
if (llmModelT === "gpt-4") {
return await modelProperties('Model.gpt-4', "gpt-4");
}
if (llmModelT === "claude-2") {
return await modelProperties('Model.claude-2', "claude-2");
}
const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel');
if (!customModelConfig) {
return undefined;
}
const customModels = customModelConfig as Array<any>;
for (const model of customModels) {
if (!model.model) {
continue;
}
if (model.model === llmModelT) {
let modelProperties: any = {};
for (const key of Object.keys(model || {})) {
const property = model![key];
modelProperties[key] = property;
}
const modelProvider = model["model"].split('/')[0];
const modelName = model["model"].split('/').slice(1).join('/');
if (!model["api_key"]) {
const providerName = this.toProviderKey(modelProvider);
if (!providerName) {
return undefined;
}
const apiKey = await this.loadApiKeySecret(providerName);
if (!apiKey) {
return undefined;
}
modelProperties["api_key"] = apiKey;
}
if (!model["api_base"] && modelProperties["api_key"]?.startsWith("DC.")) {
modelProperties["api_base"] = "https://api.devchat.ai/v1";
}
modelProperties["provider"] = modelProvider;
modelProperties["model"] = modelName;
return modelProperties;
}
}
if (!apiKey) {
if (llmType === "OpenAI") {
apiKey = process.env.OPENAI_API_KEY;
}
}
return apiKey;
return undefined;
}
static getKeyType(apiKey: string): string | undefined {
@ -40,27 +140,9 @@ export class ApiKeyManager {
}
static async writeApiKeySecret(apiKey: string, llmType: string = "Unknow"): Promise<void> {
if (apiKey.startsWith("sk-")) {
await UiUtilWrapper.storeSecret("openai_OPENAI_API_KEY", apiKey);
} else if (apiKey.startsWith("DC.")) {
await UiUtilWrapper.storeSecret("devchat_OPENAI_API_KEY", apiKey);
} else {
if (llmType === "OpenAI") {
await UiUtilWrapper.storeSecret("openai_OPENAI_API_KEY", apiKey);
} else if (llmType === "DevChat") {
await UiUtilWrapper.storeSecret("devchat_OPENAI_API_KEY", apiKey);
}
}
await UiUtilWrapper.storeSecret(`Access_KEY_${llmType}`, apiKey);
}
static getEndPoint(apiKey: string | undefined): string | undefined {
let endPoint = UiUtilWrapper.getConfiguration('DevChat', 'API_ENDPOINT');
if (!endPoint) {
endPoint = process.env.OPENAI_API_BASE;
}
if (!endPoint && apiKey?.startsWith("DC.")) {
endPoint = "https://api.devchat.ai/v1";
}
return endPoint;
static async loadApiKeySecret(llmType: string = "Unknow"): Promise<string | undefined> {
return await UiUtilWrapper.secretStorageGet(`Access_KEY_${llmType}`);
}
}

View File

@ -12,12 +12,12 @@ import { ApiKeyManager } from './apiKey';
async function createOpenAiKeyEnv() {
let envs = {...process.env};
let openaiApiKey = await ApiKeyManager.getApiKey();
if (openaiApiKey) {
envs['OPENAI_API_KEY'] = openaiApiKey;
}
const llmModelData = await ApiKeyManager.llmModel();
if (llmModelData && llmModelData.api_key) {
envs['OPENAI_API_KEY'] = llmModelData.api_key;
}
const openAiApiBase = ApiKeyManager.getEndPoint(openaiApiKey);
const openAiApiBase = llmModelData.api_base;
if (openAiApiBase) {
envs['OPENAI_API_BASE'] = openAiApiBase;
}
@ -228,11 +228,11 @@ export function runCommandStringAndWriteOutputSync(command: string, outputFile:
return JSON.stringify(data);
};
fs.writeFileSync(outputFile, onOutputFile(command, output));
return { exitCode: 0, stdout: output, stderr: '' }
return { exitCode: 0, stdout: output, stderr: '' };
} catch (error) {
logger.channel()?.error(`Error occurred: ${error}`);
logger.channel()?.show();
return { exitCode: 1, stdout: '', stderr: String(error) }
return { exitCode: 1, stdout: '', stderr: String(error) };
}
}

View File

@ -1,7 +1,7 @@
import React, { useEffect } from "react";
import { keyframes } from "@emotion/react";
import { Container, Text } from "@mantine/core";
import { Box, Container, Text } from "@mantine/core";
import MessageBody from "@/views/components/MessageBody";
import { observer } from "mobx-react-lite";
import { useMst } from "@/views/stores/RootStore";
@ -43,7 +43,7 @@ const getBlocks = (message) => {
blocks.push(unmatchedText);
return blocks;
}
};
const CurrentMessage = observer((props: any) => {
const { width } = props;
@ -78,10 +78,9 @@ const CurrentMessage = observer((props: any) => {
}, [hasDone]);
return generating
? <Container
? <Box
sx={{
margin: 0,
padding: 0,
marginBottom: 50,
width: width,
pre: {
whiteSpace: 'break-spaces'
@ -89,7 +88,7 @@ const CurrentMessage = observer((props: any) => {
}}>
<MessageBody messageText={renderBlocks.join('\n\n')} messageType="bot" />
<MessageBlink />
</Container>
</Box>
: <></>;
});

View File

@ -1,12 +1,19 @@
import React from "react";
import { Header, Avatar, Flex, Text, ActionIcon } from "@mantine/core";
import { Header, Avatar, Flex, Text, ActionIcon, createStyles } from "@mantine/core";
import BalanceTip from "@/views/components/BalanceTip";
import { IconSettings } from "@tabler/icons-react";
// @ts-ignore
import SvgAvatarDevChat from "../MessageAvatar/avatar_devchat.svg";
import messageUtil from "@/util/MessageUtil";
const useStyles = createStyles((theme) => ({
logoName:{
color: 'var(--vscode-foreground)'
}
}));
export default function Head() {
const {classes} = useStyles();
const openSetting = () => {
messageUtil.sendMessage({
command: "doCommand",
@ -18,8 +25,7 @@ export default function Head() {
height={40}
style={{
backgroundColor: "var(--vscode-sideBar-background)",
// borderBottom: "1px solid var(--vscode-disabledForeground)",
boxShadow: "0 0px 3px var(--vscode-widget-shadow)",
borderBottom: '1px solid #ced4da',
}}
>
<Flex justify="space-between" align="center" sx={{ padding: "0 10px" }}>
@ -32,7 +38,7 @@ export default function Head() {
}}
>
<Avatar color="indigo" size={25} radius="xl" src={SvgAvatarDevChat} />
<Text weight="bold">DevChat</Text>
<Text weight="bold" className={classes.logoName}>DevChat</Text>
</Flex>
<Flex align="center" gap="xs" sx={{paddingRight:10}}>
<div>

View File

@ -1,8 +1,8 @@
import { useMantineTheme, Flex, Stack, Accordion, Box, ActionIcon, ScrollArea, Center, Popover, Textarea, Text, Divider, Indicator, HoverCard, Drawer } from "@mantine/core";
import { useDisclosure, useListState, useResizeObserver, useTimeout } from "@mantine/hooks";
import { IconGitBranch, IconBook, IconX, IconSquareRoundedPlus, IconSend, IconPaperclip, IconChevronDown } from "@tabler/icons-react";
import { useMantineTheme, Flex, Stack, ActionIcon, ScrollArea, Popover, Textarea, Text, Indicator, Drawer, Group, Button, Menu,createStyles } from "@mantine/core";
import { useDisclosure, useResizeObserver } from "@mantine/hooks";
import { IconGitBranch, IconSend, IconPaperclip, IconChevronDown, IconTextPlus, IconRobot } from "@tabler/icons-react";
import React, { useState, useEffect } from "react";
import { IconGitBranchChecked, IconShellCommand, IconMouseRightClick } from "@/views/components/ChatIcons";
import { IconGitBranchChecked, IconShellCommand } from "@/views/components/ChatIcons";
import messageUtil from '@/util/MessageUtil';
import InputContexts from './InputContexts';
import { observer } from "mobx-react-lite";
@ -10,10 +10,32 @@ import { useMst } from "@/views/stores/RootStore";
import { ChatContext } from "@/views/stores/InputStore";
import { Message } from "@/views/stores/ChatStore";
const useStyles = createStyles((theme) => ({
actionIcon:{
color: 'var(--vscode-dropdown-foreground)',
borderColor:'var(--vscode-dropdown-border)',
backgroundColor: 'var(--vscode-dropdown-background)',
'&:hover':{
color: 'var(--vscode-dropdown-foreground)',
borderColor:'var(--vscode-dropdown-border)',
backgroundColor: 'var(--vscode-dropdown-background)'
},
'&[data-disabled]': {
borderColor: "transparent",
backgroundColor: "#e9ecef",
color: "#adb5bd",
cursor: "not-allowed",
backgroundImage: "none",
pointervents: "none",
}
}
}));
const InputMessage = observer((props: any) => {
const {classes} = useStyles();
const { chatPanelWidth } = props;
const { input, chat } = useMst();
const { contexts, menuOpend, menuType, currentMenuIndex, contextMenus, commandMenus } = input;
const { contexts, menuOpend, menuType, currentMenuIndex, contextMenus, commandMenus,modelMenus } = input;
const { generating } = chat;
const [drawerOpened, { open: openDrawer, close: closeDrawer }] = useDisclosure(false);
@ -22,12 +44,6 @@ const InputMessage = observer((props: any) => {
const [commandMenusNode, setCommandMenusNode] = useState<any>(null);
const [inputRef, inputRect] = useResizeObserver();
const handlePlusClick = (event: React.MouseEvent<HTMLButtonElement>) => {
input.openMenu('contexts');
inputRef.current.focus();
event.stopPropagation();
};
const handleInputChange = (event: React.ChangeEvent<HTMLTextAreaElement>) => {
const value = event.target.value;
// if value start with '/' command show menu
@ -115,86 +131,21 @@ const InputMessage = observer((props: any) => {
const contextMenuIcon = (name: string) => {
if (name === 'git diff --cached') {
return (<IconGitBranchChecked size={16}
color='var(--vscode-menu-foreground)'
style={{
marginTop: 8,
marginLeft: 12,
}} />);
return <IconGitBranchChecked size={14} color='var(--vscode-menu-foreground)'/>;
}
if (name === 'git diff HEAD') {
return (<IconGitBranch size={16}
color='var(--vscode-menu-foreground)'
style={{
marginTop: 8,
marginLeft: 12,
}} />);
return <IconGitBranch size={14} color='var(--vscode-menu-foreground)'/>;
}
return (<IconShellCommand size={16}
color='var(--vscode-menu-foreground)'
style={{
marginTop: 8,
marginLeft: 12,
}} />);
return <IconShellCommand size={14} color='var(--vscode-menu-foreground)'/>;
};
const contextMenusNode = [...contextMenus]
.sort((a, b) => {
if (a.name === '<custom command>') {
return 1; // Placing '<custom command>' at the end
} else if (b.name === '<custom command>') {
return -1; // Placing '<custom command>' at the front
} else {
return (a.name || "").localeCompare(b.name || ""); // Sorting alphabetically for other cases
}
})
.map(({ pattern, description, name }, index) => {
return (
<Flex
key={`contexts-menus-${index}`}
mih={40}
gap="md"
justify="flex-start"
align="flex-start"
direction="row"
wrap="wrap"
sx={{
padding: '5px 0',
'&:hover': {
cursor: 'pointer',
color: 'var(--vscode-commandCenter-activeForeground)',
backgroundColor: 'var(--vscode-commandCenter-activeBackground)'
}
}}
onClick={() => {
handleContextClick(name);
input.closeMenu();
}}>
{contextMenuIcon(name)}
<Stack spacing={0} w="calc(100% - 60px)">
<Text sx={{
fontSize: 'sm',
fontWeight: 'bolder',
color: 'var(--vscode-menu-foreground)'
}}>
{name}
</Text>
<Text sx={{
fontSize: 'sm',
color: theme.colors.gray[6],
}}>
{description}
</Text>
</Stack>
</Flex>);
});
useEffect(() => {
input.fetchContextMenus().then();
input.fetchCommandMenus().then();
messageUtil.registerHandler('regCommandList', (message: { result: object[]}) => {
input.updateCommands(message.result);
});
input.fetchModelMenus().then();
messageUtil.registerHandler('regCommandList', (message: { result: object[]}) => {
input.updateCommands(message.result);
});
messageUtil.registerHandler('appendContext', (message: { command: string; context: string }) => {
// context is a temp file path
const match = /\|([^]+?)\]/.exec(message.context);
@ -231,6 +182,22 @@ const InputMessage = observer((props: any) => {
inputRef.current.focus();
}, []);
const getModelShowName = (modelName:string)=>{
const nameMap = {
"gpt-3.5-turbo": "GPT-3.5",
"gpt-3.5-turbo-16k": "GPT-3.5-16K",
"gpt-4": "GPT-4",
"claude-2": "CLAUDE-2"
};
if (modelName in nameMap){
return nameMap[modelName];
} else if(modelName.lastIndexOf('/') > -1){
return modelName.substring(modelName.lastIndexOf('/')+1).toLocaleUpperCase();
} else {
return modelName.toUpperCase();
}
};
useEffect(() => {
let filtered;
if (input.value) {
@ -294,8 +261,136 @@ const InputMessage = observer((props: any) => {
}
}, [contexts.length]);
const changeModel = (value) =>{
chat.changeChatModel(value);
messageUtil.sendMessage({
command: "updateSetting",
key1: "devchat",
key2: "defaultModel",
value: value,
});
};
const menuStyles = {
arrow:{
borderColor: 'var(--vscode-menu-border)',
},
dropdown:{
borderColor: 'var(--vscode-menu-border)',
backgroundColor: 'var(--vscode-menu-background)'
},
itemLabel:{
color: 'var(--vscode-menu-foreground)'
},
item: {
padding: 5,
backgroundColor: 'var(--vscode-menu-background)',
'&:hover,&[data-hovered=true]': {
color: 'var(--vscode-commandCenter-activeForeground)',
borderColor: 'var(--vscode-commandCenter-border)',
backgroundColor: 'var(--vscode-commandCenter-activeBackground)'
}
}
};
const buttonStyles = {
root: {
color: 'var(--vscode-dropdown-foreground)',
borderColor:'var(--vscode-dropdown-border)',
backgroundColor: 'var(--vscode-dropdown-background)',
'&:hover':{
color: 'var(--vscode-dropdown-foreground)',
borderColor:'var(--vscode-dropdown-border)',
backgroundColor: 'var(--vscode-dropdown-background)'
}
}
};
return (
<>
<Stack
spacing={0}
sx={{
padding:'0 5px'
}}
>
<Group
spacing={5}
sx={{
marginTop: 5
}}
>
<Menu
width={chatPanelWidth-10}
position='bottom-start'
shadow="sm"
withArrow
styles={menuStyles}
>
<Menu.Target>
<ActionIcon
radius="xl"
variant="default"
disabled={generating}
className={classes.actionIcon}
>
<IconTextPlus size="1rem" />
</ActionIcon>
</Menu.Target>
<Menu.Dropdown>
{[...contextMenus]
.sort((a, b) => {
if (a.name === '<custom command>') {
return 1; // Placing '<custom command>' at the end
} else if (b.name === '<custom command>') {
return -1; // Placing '<custom command>' at the front
} else {
return (a.name || "").localeCompare(b.name || ""); // Sorting alphabetically for other cases
}
})
.map(({ pattern, description, name }, index) => {
return (
<Menu.Item
key={`contexts-menus-${index}`}
icon={contextMenuIcon(name)}
onClick={() => {
handleContextClick(name);
}}
>
{name}
<Text sx={{fontSize: '9pt',color: theme.colors.gray[6],}}>
{description}
</Text>
</Menu.Item>);
})}
</Menu.Dropdown>
</Menu>
<Menu
position="bottom-start"
withArrow
shadow="md"
styles={menuStyles}
>
<Menu.Target>
<Button
disabled={generating}
variant="default"
size="xs"
radius="xl"
leftIcon={<IconRobot size="1rem" />}
styles={buttonStyles}
>
{getModelShowName(chat.chatModel)}
</Button>
</Menu.Target>
<Menu.Dropdown>
{modelMenus.map((modelName) => {
return <Menu.Item onClick={() => changeModel(modelName)}>
{getModelShowName(modelName)}
</Menu.Item>;
})}
</Menu.Dropdown>
</Menu>
</Group>
{contexts && contexts.length > 0 &&
<Drawer
opened={drawerOpened}
@ -318,19 +413,15 @@ const InputMessage = observer((props: any) => {
</Drawer >
}
<Popover
id='commandMenu'
position='top-start'
closeOnClickOutside={true}
shadow="sm"
width={chatPanelWidth}
width={chatPanelWidth-10}
opened={menuOpend}
onChange={() => {
input.closeMenu();
inputRef.current.focus();
}}
onClose={() => input.closeMenu()}
onOpen={() => menuType !== '' ? input.openMenu(menuType) : input.closeMenu()}
returnFocus={true}>
>
<Popover.Target>
<Textarea
id='chat-textarea'
@ -344,11 +435,14 @@ const InputMessage = observer((props: any) => {
maxRows={10}
radius="md"
size="xs"
sx={{ pointerEvents: 'all' }}
placeholder="Send a message."
sx={{
pointerEvents: 'all' ,
marginTop: 5,
marginBottom: 5
}}
placeholder="Ask DevChat a question or type / for workflow"
styles={{
icon: { alignItems: 'center', paddingLeft: '5px' },
rightSection: { alignItems: 'center', paddingRight: '5px', marginRight: (contexts.length > 0 ? '18px' : '0') },
rightSection: { alignItems: 'flex-end', marginBottom:'6px', marginRight: (contexts.length > 0 ? '24px' : '10px') },
input: {
fontSize: 'var(--vscode-editor-font-size)',
backgroundColor: 'var(--vscode-input-background)',
@ -359,54 +453,19 @@ const InputMessage = observer((props: any) => {
}
}
}}
icon={
<ActionIcon
size='sm'
disabled={generating}
onClick={handlePlusClick}
sx={{
pointerEvents: 'all',
'&:hover': {
backgroundColor: 'var(--vscode-toolbar-activeBackground)'
},
'&[data-disabled]': {
borderColor: 'var(--vscode-input-border)',
backgroundColor: 'var(--vscode-toolbar-activeBackground)'
}
}}
>
<IconSquareRoundedPlus size="1rem" />
</ActionIcon>
}
rightSection={
<Flex>
<ActionIcon
size='sm'
disabled={generating}
onClick={handleSendClick}
sx={{
pointerEvents: 'all',
'&:hover': {
backgroundColor: 'var(--vscode-toolbar-activeBackground)'
},
'&[data-disabled]': {
borderColor: 'var(--vscode-input-border)',
backgroundColor: 'var(--vscode-toolbar-activeBackground)'
}
}}>
<IconSend size="1rem" />
</ActionIcon>
<>
{contexts.length > 0 &&
<Indicator label={contexts.length} size={12}>
<ActionIcon
size='sm'
size='md'
radius="md"
variant="default"
disabled={generating}
onClick={openDrawer}
className={classes.actionIcon}
sx={{
pointerEvents: 'all',
'&:hover': {
backgroundColor: 'var(--vscode-toolbar-activeBackground)'
},
'&[data-disabled]': {
borderColor: 'var(--vscode-input-border)',
backgroundColor: 'var(--vscode-toolbar-activeBackground)'
@ -414,64 +473,47 @@ const InputMessage = observer((props: any) => {
}}>
<IconPaperclip size="1rem" />
</ActionIcon>
</Indicator>}
</Flex>
</Indicator>
}
<ActionIcon
size='md'
radius="md"
variant="default"
disabled={generating}
onClick={handleSendClick}
className={classes.actionIcon}
sx={{
marginLeft: '10px',
pointerEvents: 'all',
backgroundColor:'#ED6A45',
border:'0',
color:'#FFFFFF',
'&:hover': {
backgroundColor:'#ED6A45',
color:'#FFFFFF',
opacity:0.7
}
}}>
<IconSend size="1rem" />
</ActionIcon>
</>
}
/>
</Popover.Target>
{
menuType === 'contexts'
? (<Popover.Dropdown
sx={{
padding: 0,
color: 'var(--vscode-menu-foreground)',
borderColor: 'var(--vscode-menu-border)',
backgroundColor: 'var(--vscode-menu-background)'
}}>
<Flex
gap="3px"
justify="flex-start"
align="center"
direction="row"
wrap="wrap"
sx={{ overflow: 'hidden' }}>
<IconMouseRightClick
size={14}
color={'var(--vscode-menu-foreground)'}
style={{ marginLeft: '12px' }} />
<Text
c="dimmed"
ta="left"
fz='sm'
m='12px 5px'
truncate='end'
w={chatPanelWidth - 60}>
Tips: Select code or file & right click
</Text>
</Flex>
<Divider />
<Text sx={{ padding: '5px 5px 5px 10px' }}>DevChat Contexts</Text>
<ScrollArea.Autosize mah={240} type="always">
{contextMenusNode}
</ScrollArea.Autosize>
</Popover.Dropdown>)
: menuType === 'commands' && commandMenusNode.length > 0
? <Popover.Dropdown
sx={{
padding: 0,
color: 'var(--vscode-menu-foreground)',
borderColor: 'var(--vscode-menu-border)',
backgroundColor: 'var(--vscode-menu-background)'
}}>
<Text sx={{ padding: '5px 5px 5px 10px' }}>DevChat Workflows</Text>
<ScrollArea.Autosize mah={240} type="always">
{commandMenusNode}
</ScrollArea.Autosize>
</Popover.Dropdown>
: <></>
}
<Popover.Dropdown
sx={{
padding: 0,
color: 'var(--vscode-menu-foreground)',
borderColor: 'var(--vscode-menu-border)',
backgroundColor: 'var(--vscode-menu-background)'
}}>
<Text sx={{ padding: '5px 5px 5px 10px' }}>DevChat Workflows</Text>
<ScrollArea.Autosize mah={240} type="always">
{commandMenusNode}
</ScrollArea.Autosize>
</Popover.Dropdown>
</Popover >
</>);
</Stack>);
});
export default InputMessage;

View File

@ -7,13 +7,13 @@ import { observer } from "mobx-react-lite";
import { useMst } from "@/views/stores/RootStore";
import { Message } from "@/views/stores/ChatStore";
import MessageContext from "@/views/components/MessageContext";
import CurrentMessage from "@/views/components/CurrentMessage";
const MessageList = observer((props: any) => {
const { chat } = useMst();
const { chatPanelWidth } = props;
return (<>
return (<Stack spacing={0} sx={{margin:'0 10px 10px 10px'}}>
{chat.messages.map((item, index: number) => {
const { message: messageText, type: messageType, hash: messageHash, contexts } = item;
// setMessage(messageText);
@ -22,8 +22,7 @@ const MessageList = observer((props: any) => {
key={`message-${index}`}
sx={{
padding: 0,
margin: 0,
width: chatPanelWidth
margin: 0
}}>
<MessageAvatar
key={`message-header-${index}`}
@ -47,7 +46,8 @@ const MessageList = observer((props: any) => {
{index !== chat.messages.length - 1 && <Divider my={3} key={`message-divider-${index}`} />}
</Stack >;
})}
</>);
<CurrentMessage />
</Stack>);
});
export default MessageList;

View File

@ -1,24 +1,16 @@
import * as React from "react";
import { useEffect, useRef } from "react";
import {
ActionIcon,
Alert,
Anchor,
Box,
Button,
Center,
Chip,
Container,
Flex,
Group,
Radio,
Stack,
px,
ActionIcon,
Alert,
Box,
Button,
Center,
Stack,
} from "@mantine/core";
import { ScrollArea } from "@mantine/core";
import { useResizeObserver, useTimeout, useViewportSize } from "@mantine/hooks";
import { useElementSize, useResizeObserver, useTimeout, useViewportSize } from "@mantine/hooks";
import messageUtil from "@/util/MessageUtil";
import CurrentMessage from "@/views/components/CurrentMessage";
import StopButton from "@/views/components/StopButton";
import RegenerationButton from "@/views/components/RegenerationButton";
import { observer } from "mobx-react-lite";
@ -28,240 +20,208 @@ import { Message } from "@/views/stores/ChatStore";
import InputMessage from "@/views/components/InputMessage";
import MessageList from "@/views/components/MessageList";
import {
IconCircleArrowDownFilled,
IconExternalLink,
IconCircleArrowDownFilled,
IconExternalLink,
} from "@tabler/icons-react";
const chatPanel = observer(() => {
const { input, chat } = useMst();
const { input, chat } = useMst();
const [chatContainerRef, chatContainerRect] = useResizeObserver();
const scrollViewport = useRef<HTMLDivElement>(null);
const { height, width } = useViewportSize();
const [chatContainerRef, chatContainerRect] = useResizeObserver();
const scrollViewport = useRef<HTMLDivElement>(null);
const { height, width } = useViewportSize();
const { ref:inputAreatRef, height:inputAreaHeight } = useElementSize();
const chatPanelWidth = chatContainerRect.width;
const scrollToBottom = () =>
scrollViewport?.current?.scrollTo({
top: scrollViewport.current.scrollHeight,
behavior: "smooth",
});
const chatPanelWidth = chatContainerRect.width;
const getSettings = () => {
messageUtil.sendMessage({
command: "getSetting",
key1: "DevChat",
key2: "OpenAI.model",
});
};
const getFeatureToggles = () => {
messageUtil.sendMessage({
command: "featureToggles",
});
};
const timer = useTimeout(() => {
if (chat.isBottom) {
scrollToBottom();
}
}, 1000);
const chipStyle = {
color: "var(--vscode-checkbox-foreground)",
fontSize: "var(--vscode-editor-font-size)",
backgroundColor: "var(--vscode-checkbox-background)",
borderColor: "var(--vscode-checkbox-border)",
"&[data-checked]": {
borderColor: "var(--vscode-checkbox-selectBorder)",
},
};
const onScrollPositionChange = ({ x, y }) => {
const sh = scrollViewport.current?.scrollHeight || 0;
const vh = scrollViewport.current?.clientHeight || 0;
const gap = sh - vh - y;
const isBottom = sh < vh ? true : gap < 100;
const isTop = y === 0;
// console.log(`sh:${sh},vh:${vh},x:${x},y:${y},gap:${gap}`);
if (isBottom) {
chat.onMessagesBottom();
} else if (isTop) {
chat.onMessagesTop();
if (!chat.isLastPage) {
//TODO: Data loading flickers and has poor performance, so I temporarily disabled the loading logic.
// dispatch(fetchHistoryMessages({ pageIndex: pageIndex + 1 }));
}
} else {
chat.onMessagesMiddle();
}
};
useEffect(() => {
getSettings();
getFeatureToggles();
chat.fetchHistoryMessages({ pageIndex: 0 }).then();
messageUtil.registerHandler(
"receiveMessagePartial",
(message: { text: string }) => {
chat.startResponsing(message.text);
timer.start();
}
);
messageUtil.registerHandler(
"receiveMessage",
(message: { text: string; isError: boolean; hash }) => {
chat.stopGenerating(true, message.hash, message.text);
if (message.isError) {
chat.happendError(message.text);
}
}
);
messageUtil.registerHandler(
"systemMessage",
(message: { text: string }) => {
const messageItem = Message.create({
type: "system",
message: message.text,
const scrollToBottom = () =>
scrollViewport?.current?.scrollTo({
top: scrollViewport.current.scrollHeight,
behavior: "smooth",
});
chat.newMessage(messageItem);
// start generating
chat.startSystemMessage();
// Clear the input field
input.setValue("");
input.clearContexts();
}
);
messageUtil.registerHandler("getSetting", (message: { value: string }) => {
chat.changeChatModel(message.value);
});
messageUtil.registerHandler(
"featureToggles",
(message: { features: object }) => {
// chat.changeChatModel(message.value);
chat.updateFeatures(message.features);
}
);
timer.start();
return () => {
timer.clear();
const getSettings = () => {
messageUtil.sendMessage({
command: "getSetting",
key1: "devchat",
key2: "defaultModel",
});
};
}, []);
useEffect(() => {
scrollToBottom();
}, [chat.scrollBottom]);
const getFeatureToggles = () => {
messageUtil.sendMessage({
command: "featureToggles",
});
};
return (
<Box
ref={chatContainerRef}
miw={300}
sx={{
height: "100%",
margin: 0,
padding: "10px 10px 5px 10px",
background: "var(--vscode-sideBar-background)",
color: "var(--vscode-editor-foreground)",
}}
>
{!chat.isBottom && (
<ActionIcon
onClick={() => {
const timer = useTimeout(() => {
if (chat.isBottom) {
scrollToBottom();
}}
title="Bottom"
variant="transparent"
sx={{ position: "absolute", bottom: 80, right: 20, zIndex: 1 }}
}
}, 1000);
const onScrollPositionChange = ({ x, y }) => {
const sh = scrollViewport.current?.scrollHeight || 0;
const vh = scrollViewport.current?.clientHeight || 0;
const gap = sh - vh - y;
const isBottom = sh < vh ? true : gap < 100;
const isTop = y === 0;
// console.log(`sh:${sh},vh:${vh},x:${x},y:${y},gap:${gap}`);
if (isBottom) {
chat.onMessagesBottom();
} else if (isTop) {
chat.onMessagesTop();
if (!chat.isLastPage) {
//TODO: Data loading flickers and has poor performance, so I temporarily disabled the loading logic.
// dispatch(fetchHistoryMessages({ pageIndex: pageIndex + 1 }));
}
} else {
chat.onMessagesMiddle();
}
};
useEffect(() => {
getSettings();
getFeatureToggles();
chat.fetchHistoryMessages({ pageIndex: 0 }).then();
messageUtil.registerHandler(
"receiveMessagePartial",
(message: { text: string }) => {
chat.startResponsing(message.text);
timer.start();
}
);
messageUtil.registerHandler(
"receiveMessage",
(message: { text: string; isError: boolean; hash }) => {
chat.stopGenerating(true, message.hash, message.text);
if (message.isError) {
chat.happendError(message.text);
}
}
);
messageUtil.registerHandler(
"systemMessage",
(message: { text: string }) => {
const messageItem = Message.create({
type: "system",
message: message.text,
});
chat.newMessage(messageItem);
// start generating
chat.startSystemMessage();
// Clear the input field
input.setValue("");
input.clearContexts();
}
);
messageUtil.registerHandler("getSetting", (message: { value: string }) => {
chat.changeChatModel(message.value);
});
messageUtil.registerHandler(
"featureToggles",
(message: { features: object }) => {
// chat.changeChatModel(message.value);
chat.updateFeatures(message.features);
}
);
timer.start();
return () => {
timer.clear();
};
}, []);
useEffect(() => {
scrollToBottom();
}, [chat.scrollBottom]);
return (
<Stack
ref={chatContainerRef}
miw={300}
spacing={0}
sx={{
height:'100%',
background: "var(--vscode-sideBar-background)",
color: "var(--vscode-editor-foreground)",
}}
>
<IconCircleArrowDownFilled size="1.125rem" />
</ActionIcon>
)}
<ScrollArea
sx={{
height: chat.generating ? height - px("9rem") : height - px("7rem"),
padding: 0,
margin: 0,
}}
onScrollPositionChange={onScrollPositionChange}
viewportRef={scrollViewport}
>
<MessageList chatPanelWidth={chatPanelWidth} />
<CurrentMessage />
{chat.errorMessage && (
<Box mb={20}>
<Alert
styles={{
message: { fontSize: "var(--vscode-editor-font-size)" },
}}
w={chatContainerRect.width}
color="gray"
variant="filled"
<ScrollArea
sx={{
height: height - inputAreaHeight - 40,
margin: 0
}}
onScrollPositionChange={onScrollPositionChange}
viewportRef={scrollViewport}
>
{chat.errorMessage}
</Alert>
{chat.errorMessage.search("Insufficient balance") > -1 && (
<Button
size="xs"
component="a"
href={chat.rechargeSite}
mt={5}
variant="outline"
leftIcon={<IconExternalLink size="0.9rem" />}
>
<MessageList chatPanelWidth={chatPanelWidth} />
{chat.errorMessage && (
<Box sx={{
margin:'0 10px 40px 10px'
}}>
<Alert
styles={{
message: { fontSize: "var(--vscode-editor-font-size)" },
}}
color="gray"
variant="filled"
>
{chat.errorMessage}
</Alert>
{chat.errorMessage.search("Insufficient balance") > -1 && (
<Button
size="xs"
component="a"
href={chat.rechargeSite}
mt={5}
variant="outline"
leftIcon={<IconExternalLink size="0.9rem" />}
>
Open official website to recharge.
</Button>
)}
</Box>
)}
</ScrollArea>
<Stack
spacing={0}
sx={{ position: "absolute", bottom: 10, width: chatPanelWidth }}
>
{chat.generating && (
<Center mb={5}>
<StopButton />
</Center>
)}
{chat.errorMessage && (
<Center mb={5}>
<RegenerationButton />
</Center>
)}
<InputMessage chatPanelWidth={chatPanelWidth} />
<Chip.Group
multiple={false}
value={chat.chatModel}
onChange={(value) => {
chat.changeChatModel(value);
messageUtil.sendMessage({
command: "updateSetting",
key1: "DevChat",
key2: "OpenAI.model",
value: value,
});
}}
>
<Group position="left" spacing={5} mt={5}>
<Chip size="xs" styles={{ label: chipStyle }} value="gpt-3.5-turbo">
GPT-3.5
</Chip>
<Chip
size="xs"
styles={{ label: chipStyle }}
value="gpt-3.5-turbo-16k"
</Button>
)}
</Box>
)}
{!chat.isBottom && (
<ActionIcon
onClick={() => {
scrollToBottom();
}}
title="Bottom"
variant="transparent"
sx={{ position: "absolute", bottom: 5, right: 16, zIndex: 1 }}
>
<IconCircleArrowDownFilled size="1.125rem" />
</ActionIcon>
)}
{chat.generating && (
<Center sx={{ position: "absolute", bottom: 5, zIndex: 1,width:'100%' }}>
<StopButton />
</Center>
)}
{chat.errorMessage && (
<Center sx={{ position: "absolute", bottom: 5, zIndex: 1,width:'100%' }}>
<RegenerationButton />
</Center>
)}
</ScrollArea>
<Box
ref={inputAreatRef}
sx={{
position:"absolute",
bottom:0,
width:chatPanelWidth,
background: "var(--vscode-sideBar-background)",
boxShadow: "0 0 10px 0 var(--vscode-widget-shadow)",
borderTop:'1px solid #ced4da',
}}
>
GPT-3.5-16K
</Chip>
<Chip size="xs" styles={{ label: chipStyle }} value="gpt-4">
GPT-4
</Chip>
</Group>
</Chip.Group>
</Stack>
</Box>
);
<InputMessage chatPanelWidth={chatPanelWidth} />
</Box>
</Stack>
);
});
export default chatPanel;

View File

@ -75,7 +75,7 @@ export const ChatStore = types.model('Chat', {
isBottom: true,
isTop: false,
scrollBottom: 0,
chatModel: 'gpt-4',
chatModel: 'GPT-3.5',
rechargeSite: 'https://devchat.ai/pricing/',
features: types.optional(types.frozen(), {})
})

View File

@ -20,6 +20,18 @@ const regContextMenus = async () => {
}
});
};
const regModelMenus = async () => {
return new Promise<String[]>((resolve, reject) => {
try {
messageUtil.sendMessage({ command: 'regModelList' });
messageUtil.registerHandler('regModelList', (message: {result: String[]} ) => {
resolve(message.result);
});
} catch (e) {
reject(e);
}
});
};
export const ChatContext = types.model({
file: types.maybe(types.string),
@ -44,6 +56,7 @@ export const InputStore = types
currentMenuIndex: 0,
commandMenus: types.array(MenuItem),
contextMenus: types.array(MenuItem),
modelMenus: types.array(types.string)
}).
actions(self => ({
setValue(value: string) {
@ -88,7 +101,14 @@ export const InputStore = types
console.error("Failed to fetch context menus", error);
}
}),
fetchModelMenus: flow(function* () {
try {
const models = yield regModelMenus();
self.modelMenus.push(...models);
} catch (error) {
console.error("Failed to fetch context menus", error);
}
}),
fetchCommandMenus: flow(function* () {
const regCommandMenus = async () => {
return new Promise<Item[]>((resolve, reject) => {
@ -105,7 +125,7 @@ export const InputStore = types
} catch (error) {
console.error("Failed to fetch command menus", error);
}
})
}),
}));

View File

@ -76,6 +76,7 @@ describe('sendMessageBase', () => {
text: 'Hello, world!'
};
const chatResponse: ChatResponse = {
"finish_reason": "",
response: 'Hello, user!',
isError: false,
user: 'user',
@ -92,6 +93,7 @@ describe('sendMessageBase', () => {
it('should handle response text correctly when isError is false', async () => {
const partialDataText = 'Partial data';
const chatResponse: ChatResponse = {
"finish_reason": "",
response: 'Hello, user!',
isError: false,
user: 'user',
@ -106,6 +108,7 @@ describe('sendMessageBase', () => {
it('should handle response text correctly when isError is true', async () => {
const partialDataText = 'Partial data';
const chatResponse: ChatResponse = {
"finish_reason": "",
response: 'Error occurred!',
isError: true,
user: 'user',
@ -130,12 +133,9 @@ describe('sendMessageBase', () => {
workspaceFoldersFirstPathStub.returns('./');
getConfigurationStub.withArgs('DevChat', 'Access_Key_DevChat').returns(process.env.TEST_DEVCHAT_KEY);
getConfigurationStub.withArgs('DevChat', 'OpenAI.model').returns('gpt-4');
getConfigurationStub.withArgs('DevChat', 'OpenAI.temperature').returns(0);
getConfigurationStub.withArgs('DevChat', 'OpenAI.stream').returns('true');
getConfigurationStub.withArgs('DevChat', 'llmModel').returns('OpenAI');
getConfigurationStub.withArgs('DevChat', 'OpenAI.tokensPerPrompt').returns(9000);
const result = await sendMessageBase(message, handlePartialData);
expect(result).to.be.an('object');
expect(result!.command).to.equal('receiveMessage');
@ -157,12 +157,9 @@ describe('sendMessageBase', () => {
workspaceFoldersFirstPathStub.returns('./');
getConfigurationStub.withArgs('DevChat', 'Access_Key_DevChat').returns('sk-KvH7ZCtHmFDCBTqH0jUv');
getConfigurationStub.withArgs('DevChat', 'OpenAI.model').returns('gpt-4');
getConfigurationStub.withArgs('DevChat', 'OpenAI.temperature').returns('0');
getConfigurationStub.withArgs('DevChat', 'OpenAI.stream').returns('true');
getConfigurationStub.withArgs('DevChat', 'llmModel').returns('OpenAI');
getConfigurationStub.withArgs('DevChat', 'OpenAI.tokensPerPrompt').returns('9000');
const result = await sendMessageBase(message, handlePartialData);
expect(result).to.be.an('object');
expect(result!.command).to.equal('receiveMessage');
@ -186,12 +183,9 @@ describe('sendMessageBase', () => {
workspaceFoldersFirstPathStub.returns('./');
getConfigurationStub.withArgs('DevChat', 'Access_Key_DevChat').returns(process.env.TEST_DEVCHAT_KEY);
getConfigurationStub.withArgs('DevChat', 'OpenAI.model').returns('gpt-4');
getConfigurationStub.withArgs('DevChat', 'OpenAI.temperature').returns(0);
getConfigurationStub.withArgs('DevChat', 'OpenAI.stream').returns('true');
getConfigurationStub.withArgs('DevChat', 'llmModel').returns('OpenAI');
getConfigurationStub.withArgs('DevChat', 'OpenAI.tokensPerPrompt').returns(9000);
// Start sendMessageBase in a separate Promise
const sendMessagePromise = sendMessageBase(message, handlePartialData);

View File

@ -39,30 +39,6 @@ describe('ApiKeyManager', () => {
});
});
describe('getEndPoint', () => {
it('should return the configuration endpoint', () => {
sinon.stub(UiUtilWrapper, 'getConfiguration').returns('https://config-endpoint.com');
const endPoint = ApiKeyManager.getEndPoint('sk-key');
expect(endPoint).to.equal('https://config-endpoint.com');
});
it('should return the environment variable endpoint', () => {
sinon.stub(UiUtilWrapper, 'getConfiguration').returns(undefined);
process.env.OPENAI_API_BASE = 'https://env-endpoint.com';
const endPoint = ApiKeyManager.getEndPoint('sk-key');
expect(endPoint).to.equal('https://env-endpoint.com');
});
it('should return the default endpoint for DC keys', () => {
sinon.stub(UiUtilWrapper, 'getConfiguration').returns(undefined);
const endPoint = ApiKeyManager.getEndPoint('DC.key');
expect(endPoint).to.equal('https://api.devchat.ai/v1');
});
});
describe('getKeyType', () => {
it('should return "sk" for sk keys', () => {
const keyType = ApiKeyManager.getKeyType('sk-key');