928 lines
34 KiB
JSON
928 lines
34 KiB
JSON
{
|
|
"name": "devchat",
|
|
"displayName": "DevChat",
|
|
"description": "Write prompts, not code",
|
|
"version": "0.1.65",
|
|
"icon": "assets/devchat.png",
|
|
"publisher": "merico",
|
|
"engines": {
|
|
"vscode": "^1.75.0"
|
|
},
|
|
"repository": {
|
|
"type": "git",
|
|
"url": "https://github.com/devchat-ai/devchat-vscode.git"
|
|
},
|
|
"categories": [
|
|
"Programming Languages",
|
|
"Snippets",
|
|
"Machine Learning",
|
|
"Education"
|
|
],
|
|
"keywords": [
|
|
"ai",
|
|
"anthropic",
|
|
"assistant",
|
|
"autocomplete",
|
|
"bot",
|
|
"chat",
|
|
"chatbot",
|
|
"codegen",
|
|
"c#",
|
|
"c++",
|
|
"codex",
|
|
"co-pilot",
|
|
"devchat",
|
|
"documentation",
|
|
"go",
|
|
"golang",
|
|
"intellisense",
|
|
"java",
|
|
"javascript",
|
|
"keybindings",
|
|
"kotlin",
|
|
"llm",
|
|
"model",
|
|
"openai",
|
|
"php",
|
|
"pilot",
|
|
"python",
|
|
"refactor",
|
|
"ruby",
|
|
"sourcegraph",
|
|
"snippets",
|
|
"test",
|
|
"typescript"
|
|
],
|
|
"activationEvents": [
|
|
"*"
|
|
],
|
|
"main": "./dist/extension.js",
|
|
"files": [
|
|
"dist/*",
|
|
"bin/*",
|
|
"assets/*",
|
|
"tools/*",
|
|
"workflows/*",
|
|
"workflowsCommands/*",
|
|
"LICENSE",
|
|
"README.md"
|
|
],
|
|
"contributes": {
|
|
"configuration": {
|
|
"title": "DevChat",
|
|
"properties": {
|
|
"devchat.Provider.devchat": {
|
|
"type": "object",
|
|
"properties": {
|
|
"access_key": {
|
|
"type": "string",
|
|
"default": "",
|
|
"description": "[required*] Specify access key for selected provider."
|
|
},
|
|
"api_base": {
|
|
"type": "string",
|
|
"default": "",
|
|
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
|
}
|
|
},
|
|
"required": [
|
|
"access_key"
|
|
],
|
|
"additionalProperties": false,
|
|
"order": 0,
|
|
"markdownDescription": "Specify the properties for devchat provider."
|
|
},
|
|
"devchat.Provider.openai": {
|
|
"type": "object",
|
|
"properties": {
|
|
"access_key": {
|
|
"type": "string",
|
|
"default": "",
|
|
"description": "[required*] Specify access key for selected provider."
|
|
},
|
|
"api_base": {
|
|
"type": "string",
|
|
"default": "",
|
|
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
|
}
|
|
},
|
|
"required": [
|
|
"access_key"
|
|
],
|
|
"additionalProperties": false,
|
|
"order": 1,
|
|
"markdownDescription": "Specify the properties for openai provider."
|
|
},
|
|
"devchat.Model.gpt-3-5": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat",
|
|
"openai"
|
|
],
|
|
"description": "[required*] Specify which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.3,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 1000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 13000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
},
|
|
"presence_penalty": {
|
|
"type": "number",
|
|
"default": 0,
|
|
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
|
},
|
|
"frequency_penalty": {
|
|
"type": "number",
|
|
"default": 0,
|
|
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
|
}
|
|
},
|
|
"required": [
|
|
"provider",
|
|
"key"
|
|
],
|
|
"additionalProperties": false,
|
|
"order": 2,
|
|
"markdownDescription": "Specify the properties for gpt-3.5-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
|
|
},
|
|
"devchat.Model.gpt-4": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat",
|
|
"openai"
|
|
],
|
|
"description": "[required*] Specify which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.3,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 1000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 6000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
},
|
|
"presence_penalty": {
|
|
"type": "number",
|
|
"default": 0,
|
|
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
|
},
|
|
"frequency_penalty": {
|
|
"type": "number",
|
|
"default": 0,
|
|
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 5,
|
|
"markdownDescription": "properties for gpt-4 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
|
|
},
|
|
"devchat.Model.gpt-4-turbo": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat",
|
|
"openai"
|
|
],
|
|
"description": "[required*] Specify which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.3,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 1000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 32000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
},
|
|
"presence_penalty": {
|
|
"type": "number",
|
|
"default": 0,
|
|
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
|
},
|
|
"frequency_penalty": {
|
|
"type": "number",
|
|
"default": 0,
|
|
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 6,
|
|
"markdownDescription": "properties for gpt-4-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
|
|
},
|
|
"devchat.Model.claude-3-sonnet": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat"
|
|
],
|
|
"description": "[required*] which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.3,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 1000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 32000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 7,
|
|
"markdownDescription": "properties for claude-3-Sonnet model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
|
},
|
|
"devchat.Model.claude-3-opus": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat"
|
|
],
|
|
"description": "[required*] which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.3,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 1000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 32000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 7,
|
|
"markdownDescription": "properties for claude-3-Opus model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
|
},
|
|
"devchat.Model.xinghuo-2": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat"
|
|
],
|
|
"description": "[required*] which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.5,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 2048,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 6000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 8,
|
|
"markdownDescription": "properties for xinghuo-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
|
},
|
|
"devchat.Model.chatglm_pro": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat"
|
|
],
|
|
"description": "[required*] which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.5,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 4000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 8000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 9,
|
|
"markdownDescription": "properties for chatglm_pro model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
|
},
|
|
"devchat.Model.ERNIE-Bot": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat"
|
|
],
|
|
"description": "[required*] which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.5,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 4000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 8000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 10,
|
|
"markdownDescription": "properties for ERNIE-Bot model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
|
},
|
|
"devchat.Model.CodeLlama-70b": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat"
|
|
],
|
|
"description": "[required*] which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.5,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 2000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 4000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 11,
|
|
"markdownDescription": "properties for togetherai/codellama/CodeLlama-70b-Instruct-hf. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
|
},
|
|
"devchat.Model.Mixtral-8x7B": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat"
|
|
],
|
|
"description": "[required*] which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.5,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 2000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 4000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 11,
|
|
"markdownDescription": "properties for togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
|
},
|
|
"devchat.Model.llama-2-70b-chat": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat"
|
|
],
|
|
"description": "[required*] which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.5,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 2000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 4000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 12,
|
|
"markdownDescription": "properties for llama-2-70b-chat. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
|
},
|
|
"devchat.Model.Minimax-abab6": {
|
|
"type": "object",
|
|
"properties": {
|
|
"provider": {
|
|
"type": "string",
|
|
"default": "devchat",
|
|
"enum": [
|
|
"devchat"
|
|
],
|
|
"description": "[required*] which provider host this llm model"
|
|
},
|
|
"temperature": {
|
|
"type": "number",
|
|
"default": 0.5,
|
|
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
|
},
|
|
"max_tokens": {
|
|
"type": "number",
|
|
"default": 2000,
|
|
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
|
},
|
|
"max_input_tokens": {
|
|
"type": "number",
|
|
"default": 4000,
|
|
"description": "[optional*] Maximum text length for input to AI."
|
|
}
|
|
},
|
|
"additionalProperties": false,
|
|
"order": 12,
|
|
"markdownDescription": "properties for minimax/abab6-chat. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
|
},
|
|
"devchat.defaultModel": {
|
|
"oneOf": [
|
|
{
|
|
"type": "string",
|
|
"default": "gpt-3.5-turbo",
|
|
"enum": [
|
|
"gpt-3.5-turbo",
|
|
"gpt-4",
|
|
"gpt-4-turbo-preview",
|
|
"claude-3-sonnet",
|
|
"claude-3-opus",
|
|
"xinghuo-3.5",
|
|
"GLM-4",
|
|
"ERNIE-Bot-4.0",
|
|
"togetherai/codellama/CodeLlama-70b-Instruct-hf",
|
|
"togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
"minimax/abab6-chat",
|
|
"llama-2-70b-chat"
|
|
]
|
|
},
|
|
{
|
|
"type": "string",
|
|
"default": "gpt-3.5-turbo"
|
|
}
|
|
],
|
|
"order": 13,
|
|
"markdownDescription": "Specify the default llm model for DevChat. [Price of each model](https://web.devchat.ai/pricing)"
|
|
},
|
|
"DevChat.OpenAI.stream": {
|
|
"type": "boolean",
|
|
"default": true,
|
|
"order": 14,
|
|
"description": "Whether to stream a response."
|
|
},
|
|
"DevChat.EnableFunctionCalling": {
|
|
"type": "boolean",
|
|
"default": false,
|
|
"order": 15,
|
|
"description": "Enable function calling for GPT."
|
|
},
|
|
"DevChat.betaInvitationCode": {
|
|
"type": "string",
|
|
"default": "",
|
|
"order": 17,
|
|
"description": "The invitation code for beta testing."
|
|
},
|
|
"DevChat.maxLogCount": {
|
|
"type": "number",
|
|
"default": 20,
|
|
"order": 18,
|
|
"description": "Limit the number of prompts in the chat view."
|
|
},
|
|
"DevChat.PythonForChat": {
|
|
"type": "string",
|
|
"default": "",
|
|
"input": {
|
|
"type": "file",
|
|
"filter": {
|
|
"All files": [
|
|
"python*"
|
|
]
|
|
}
|
|
},
|
|
"description": "Which Python interpreter to use with DevChat?",
|
|
"order": 19
|
|
},
|
|
"DevChat.PythonForCommands": {
|
|
"type": "string",
|
|
"default": "",
|
|
"description": "Path to the Python virtual environment for AskCode.",
|
|
"order": 20
|
|
},
|
|
"DevChat.Language": {
|
|
"type": "string",
|
|
"default": "",
|
|
"enum": [
|
|
"en",
|
|
"zh"
|
|
],
|
|
"enumDescriptions": [
|
|
"English",
|
|
"Simplified Chinese"
|
|
],
|
|
"description": "The language used for DevChat interface.",
|
|
"order": 21
|
|
}
|
|
}
|
|
},
|
|
"viewsContainers": {
|
|
"activitybar": [
|
|
{
|
|
"id": "devchat-sidebar",
|
|
"title": "DevChat",
|
|
"icon": "assets/devchat_icon.svg"
|
|
}
|
|
]
|
|
},
|
|
"views": {
|
|
"devchat-sidebar": [
|
|
{
|
|
"type": "webview",
|
|
"id": "devchat-view",
|
|
"name": "DevChat"
|
|
}
|
|
]
|
|
},
|
|
"commands": [
|
|
{
|
|
"command": "devchat.applyDiffResult",
|
|
"title": "Apply Diff",
|
|
"icon": "assets/devchat_apply.svg"
|
|
},
|
|
{
|
|
"command": "devchat.createEntry",
|
|
"title": "Create Entry"
|
|
},
|
|
{
|
|
"command": "DevChat.AccessKey.OpenAI",
|
|
"title": "Input OpenAI API Key",
|
|
"category": "DevChat"
|
|
},
|
|
{
|
|
"command": "DevChat.AccessKey.DevChat",
|
|
"title": "Input DevChat Access Key",
|
|
"category": "DevChat"
|
|
},
|
|
{
|
|
"command": "DevChat.PythonPath",
|
|
"title": "Set Python Path",
|
|
"category": "DevChat"
|
|
},
|
|
{
|
|
"command": "devchat.openChatPanel",
|
|
"title": "DevChat"
|
|
},
|
|
{
|
|
"command": "devchat.addContext",
|
|
"title": "Devchat:Add to DevChat"
|
|
},
|
|
{
|
|
"command": "devchat.askForCode",
|
|
"title": "Devchat:Add to DevChat"
|
|
},
|
|
{
|
|
"command": "devchat.askForFile",
|
|
"title": "Devchat:Add to DevChat"
|
|
},
|
|
{
|
|
"command": "devchat.addConext_chinese",
|
|
"title": "Devchat:添加到DevChat"
|
|
},
|
|
{
|
|
"command": "devchat.askForCode_chinese",
|
|
"title": "Devchat:添加到DevChat"
|
|
},
|
|
{
|
|
"command": "devchat.askForFile_chinese",
|
|
"title": "Devchat:添加到DevChat"
|
|
},
|
|
{
|
|
"command": "DevChat.InstallCommands",
|
|
"title": "Install slash commands",
|
|
"category": "DevChat"
|
|
},
|
|
{
|
|
"command": "DevChat.UpdataChatModels",
|
|
"title": "Update Chat Models",
|
|
"category": "DevChat"
|
|
},
|
|
{
|
|
"command": "DevChat.InstallCommandPython",
|
|
"title": "Install Python for Commands",
|
|
"category": "DevChat"
|
|
},
|
|
{
|
|
"command": "DevChat.Chat",
|
|
"title": "Chat with DevChat",
|
|
"category": "DevChat"
|
|
},
|
|
{
|
|
"command": "devchat.explain",
|
|
"title": "Devchat:Generate Explain"
|
|
},
|
|
{
|
|
"command": "devchat.explain_chinese",
|
|
"title": "Devchat:代码解释"
|
|
},
|
|
{
|
|
"command": "devchat.comments",
|
|
"title": "Devchat:Generate Comments"
|
|
},
|
|
{
|
|
"command": "devchat.comments_chinese",
|
|
"title": "Devchat:生成注释"
|
|
},
|
|
{
|
|
"command": "devchat.fix",
|
|
"title": "Devchat:Fix this"
|
|
},
|
|
{
|
|
"command": "devchat.fix_chinese",
|
|
"title": "Devchat:修复此"
|
|
}
|
|
],
|
|
"keybindings": [
|
|
{
|
|
"command": "devchat.openChatPanel",
|
|
"key": "ctrl+shift+/",
|
|
"mac": "cmd+shift+/"
|
|
}
|
|
],
|
|
"menus": {
|
|
"editor/title": [
|
|
{
|
|
"command": "devchat.applyDiffResult",
|
|
"group": "navigation",
|
|
"when": "editorTextFocus && isInDiffEditor"
|
|
}
|
|
],
|
|
"commandPalette": [
|
|
{
|
|
"command": "devchat.applyDiffResult",
|
|
"when": "false"
|
|
},
|
|
{
|
|
"command": "devchat.addContext",
|
|
"when": "false"
|
|
},
|
|
{
|
|
"command": "devchat.askForCode",
|
|
"when": "false"
|
|
},
|
|
{
|
|
"command": "devchat.askForFile",
|
|
"when": "false"
|
|
},
|
|
{
|
|
"command": "devchat.addConext_chinese",
|
|
"when": "false"
|
|
},
|
|
{
|
|
"command": "devchat.askForCode_chinese",
|
|
"when": "false"
|
|
},
|
|
{
|
|
"command": "devchat.askForFile_chinese",
|
|
"when": "false"
|
|
},
|
|
{
|
|
"command": "DevChat.Chat",
|
|
"when": "false"
|
|
}
|
|
],
|
|
"explorer/context": [
|
|
{
|
|
"when": "isChineseLocale && resourceLangId != 'git'",
|
|
"command": "devchat.addConext_chinese",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"when": "!isChineseLocale && resourceLangId != 'git'",
|
|
"command": "devchat.addContext",
|
|
"group": "navigation"
|
|
}
|
|
],
|
|
"editor/context": [
|
|
{
|
|
"command": "devchat.askForCode_chinese",
|
|
"when": "isChineseLocale && editorTextFocus && editorHasSelection",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"command": "devchat.askForCode",
|
|
"when": "!isChineseLocale && editorTextFocus && editorHasSelection",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"command": "devchat.askForFile",
|
|
"when": "!isChineseLocale && editorTextFocus && !editorHasSelection",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"command": "devchat.askForFile_chinese",
|
|
"when": "isChineseLocale && editorTextFocus && !editorHasSelection",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"command": "devchat.explain",
|
|
"when": "!isChineseLocale && editorTextFocus && editorHasSelection",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"command": "devchat.explain_chinese",
|
|
"when": "isChineseLocale && editorTextFocus && editorHasSelection",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"command": "devchat.comments",
|
|
"when": "!isChineseLocale && editorTextFocus && editorHasSelection",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"command": "devchat.comments_chinese",
|
|
"when": "isChineseLocale && editorTextFocus && editorHasSelection",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"command": "devchat.fix",
|
|
"when": "!isChineseLocale && editorTextFocus && editorHasSelection",
|
|
"group": "navigation"
|
|
},
|
|
{
|
|
"command": "devchat.fix_chinese",
|
|
"when": "isChineseLocale && editorTextFocus && editorHasSelection",
|
|
"group": "navigation"
|
|
}
|
|
]
|
|
}
|
|
},
|
|
"scripts": {
|
|
"build:gui": "cd ./gui && yarn && yarn vscode",
|
|
"vscode:uninstall": "node ./dist/uninstall",
|
|
"vscode:prepublish": "npm run package",
|
|
"compile": "webpack",
|
|
"watch": "webpack --watch",
|
|
"package": "webpack --mode production --devtool hidden-source-map",
|
|
"compile-tests": "tsc -p . --outDir out",
|
|
"watch-tests": "tsc -p . -w --outDir out",
|
|
"pretest": "npm run compile-tests && npm run compile && npm run lint",
|
|
"lint": "eslint src --ext ts",
|
|
"test": "mocha",
|
|
"build": "webpack --config webpack.config.js && cd ./gui && yarn && yarn vscode",
|
|
"dev": "webpack serve --config webpack.config.js --open",
|
|
"idea": "webpack --config webpack.idea.config.js && mv dist/main.js dist/main.html ../devchat-intellij/src/main/resources/static && echo '🎆done'"
|
|
},
|
|
"devDependencies": {
|
|
"@babel/core": "^7.21.8",
|
|
"@babel/preset-env": "^7.23.6",
|
|
"@babel/preset-react": "^7.23.3",
|
|
"@babel/preset-typescript": "^7.21.5",
|
|
"@types/chai": "^4.3.5",
|
|
"@types/glob": "^8.1.0",
|
|
"@types/mocha": "^10.0.1",
|
|
"@types/mock-fs": "^4.13.1",
|
|
"@types/ncp": "^2.0.5",
|
|
"@types/node": "16.x",
|
|
"@types/proxyquire": "^1.3.28",
|
|
"@types/shell-escape": "^0.2.1",
|
|
"@types/sinon": "^10.0.15",
|
|
"@types/uuid": "^9.0.1",
|
|
"@types/vscode": "^1.75.0",
|
|
"@typescript-eslint/eslint-plugin": "^5.56.0",
|
|
"@typescript-eslint/parser": "^5.56.0",
|
|
"@vscode/test-electron": "^2.3.0",
|
|
"babel-loader": "^9.1.2",
|
|
"chai": "^4.3.7",
|
|
"copy-webpack-plugin": "^11.0.0",
|
|
"dotenv": "^16.0.3",
|
|
"eslint": "^8.36.0",
|
|
"file-loader": "^6.2.0",
|
|
"glob": "^8.1.0",
|
|
"html-webpack-plugin": "^5.5.1",
|
|
"jest": "^29.5.0",
|
|
"json-loader": "^0.5.7",
|
|
"mocha": "^10.2.0",
|
|
"mock-fs": "^5.2.0",
|
|
"proxyquire": "^2.1.3",
|
|
"sinon": "^15.1.0",
|
|
"ts-jest": "^29.1.0",
|
|
"ts-loader": "^9.4.2",
|
|
"ts-node": "^10.9.1",
|
|
"typescript": "^4.9.5",
|
|
"vscode-test": "^1.6.1",
|
|
"webpack": "^5.76.3",
|
|
"webpack-cli": "^5.0.1",
|
|
"webpack-dev-server": "^4.13.3"
|
|
},
|
|
"dependencies": {
|
|
"@iarna/toml": "^2.2.5",
|
|
"@tiptap/extension-link": "^2.0.3",
|
|
"@tiptap/pm": "^2.0.0",
|
|
"@tiptap/starter-kit": "^2.0.3",
|
|
"axios": "^1.3.6",
|
|
"clean-webpack-plugin": "^4.0.0",
|
|
"dayjs": "^1.11.10",
|
|
"dotenv": "^16.0.3",
|
|
"js-yaml": "^4.1.0",
|
|
"mdast": "^3.0.0",
|
|
"mobx": "^6.12.0",
|
|
"ncp": "^2.0.0",
|
|
"node-fetch": "^3.3.1",
|
|
"nonce": "^1.0.4",
|
|
"quote": "^0.4.0",
|
|
"rehype-raw": "^6.1.1",
|
|
"shell-escape": "^0.2.0",
|
|
"string-argv": "^0.3.2",
|
|
"tree-kill": "^1.2.2",
|
|
"unified": "^11.0.3",
|
|
"unist-util-visit": "^5.0.0",
|
|
"uuid": "^9.0.0",
|
|
"yaml": "^2.3.2"
|
|
}
|
|
}
|