Merge pull request #565 from devchat-ai/clear_vscode_config

Refactor messageHandler.ts, workflowContextHandler.ts, historyMessagesBase.ts, and handlerRegister.ts
This commit is contained in:
boob.yang 2024-06-18 12:56:25 +08:00 committed by GitHub
commit 549528bd7e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 38 additions and 1047 deletions

View File

@ -68,548 +68,6 @@
"README.md"
],
"contributes": {
"configuration": {
"title": "DevChat",
"properties": {
"devchat.Provider.devchat": {
"type": "object",
"properties": {
"access_key": {
"type": "string",
"default": "",
"description": "[required*] Specify access key for selected provider."
},
"api_base": {
"type": "string",
"default": "",
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
}
},
"required": [
"access_key"
],
"additionalProperties": false,
"order": 0,
"markdownDescription": "Specify the properties for devchat provider."
},
"devchat.Provider.openai": {
"type": "object",
"properties": {
"access_key": {
"type": "string",
"default": "",
"description": "[required*] Specify access key for selected provider."
},
"api_base": {
"type": "string",
"default": "",
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
}
},
"required": [
"access_key"
],
"additionalProperties": false,
"order": 1,
"markdownDescription": "Specify the properties for openai provider."
},
"devchat.Model.gpt-3-5": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat",
"openai"
],
"description": "[required*] Specify which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 13000,
"description": "[optional*] Maximum text length for input to AI."
},
"presence_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"frequency_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
}
},
"required": [
"provider",
"key"
],
"additionalProperties": false,
"order": 2,
"markdownDescription": "Specify the properties for gpt-3.5-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
},
"devchat.Model.gpt-4": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat",
"openai"
],
"description": "[required*] Specify which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 6000,
"description": "[optional*] Maximum text length for input to AI."
},
"presence_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"frequency_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
}
},
"additionalProperties": false,
"order": 5,
"markdownDescription": "properties for gpt-4 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
},
"devchat.Model.gpt-4-turbo": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat",
"openai"
],
"description": "[required*] Specify which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 32000,
"description": "[optional*] Maximum text length for input to AI."
},
"presence_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
"frequency_penalty": {
"type": "number",
"default": 0,
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
}
},
"additionalProperties": false,
"order": 6,
"markdownDescription": "properties for gpt-4-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
},
"devchat.Model.claude-3-sonnet": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat"
],
"description": "[required*] which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 32000,
"description": "[optional*] Maximum text length for input to AI."
}
},
"additionalProperties": false,
"order": 7,
"markdownDescription": "properties for claude-3-Sonnet model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.Model.claude-3-opus": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat"
],
"description": "[required*] which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.3,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 1000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 32000,
"description": "[optional*] Maximum text length for input to AI."
}
},
"additionalProperties": false,
"order": 7,
"markdownDescription": "properties for claude-3-Opus model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.Model.xinghuo-2": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat"
],
"description": "[required*] which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.5,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 2048,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 6000,
"description": "[optional*] Maximum text length for input to AI."
}
},
"additionalProperties": false,
"order": 8,
"markdownDescription": "properties for xinghuo-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.Model.chatglm_pro": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat"
],
"description": "[required*] which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.5,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 4000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 8000,
"description": "[optional*] Maximum text length for input to AI."
}
},
"additionalProperties": false,
"order": 9,
"markdownDescription": "properties for chatglm_pro model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.Model.ERNIE-Bot": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat"
],
"description": "[required*] which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.5,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 4000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 8000,
"description": "[optional*] Maximum text length for input to AI."
}
},
"additionalProperties": false,
"order": 10,
"markdownDescription": "properties for ERNIE-Bot model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.Model.CodeLlama-70b": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat"
],
"description": "[required*] which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.5,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 2000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 4000,
"description": "[optional*] Maximum text length for input to AI."
}
},
"additionalProperties": false,
"order": 11,
"markdownDescription": "properties for togetherai/codellama/CodeLlama-70b-Instruct-hf. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.Model.Mixtral-8x7B": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat"
],
"description": "[required*] which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.5,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 2000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 4000,
"description": "[optional*] Maximum text length for input to AI."
}
},
"additionalProperties": false,
"order": 11,
"markdownDescription": "properties for togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.Model.llama-2-70b-chat": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat"
],
"description": "[required*] which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.5,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 2000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 4000,
"description": "[optional*] Maximum text length for input to AI."
}
},
"additionalProperties": false,
"order": 12,
"markdownDescription": "properties for llama-2-70b-chat. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.Model.Minimax-abab6": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"default": "devchat",
"enum": [
"devchat"
],
"description": "[required*] which provider host this llm model"
},
"temperature": {
"type": "number",
"default": 0.5,
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
},
"max_tokens": {
"type": "number",
"default": 2000,
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
},
"max_input_tokens": {
"type": "number",
"default": 4000,
"description": "[optional*] Maximum text length for input to AI."
}
},
"additionalProperties": false,
"order": 12,
"markdownDescription": "properties for minimax/abab6-chat. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
},
"devchat.defaultModel": {
"oneOf": [
{
"type": "string",
"default": "gpt-3.5-turbo",
"enum": [
"gpt-3.5-turbo",
"gpt-4",
"gpt-4-turbo-preview",
"claude-3-sonnet",
"claude-3-opus",
"xinghuo-3.5",
"GLM-4",
"ERNIE-Bot-4.0",
"togetherai/codellama/CodeLlama-70b-Instruct-hf",
"togetherai/mistralai/Mixtral-8x7B-Instruct-v0.1",
"minimax/abab6-chat",
"llama-2-70b-chat"
]
},
{
"type": "string",
"default": "gpt-3.5-turbo"
}
],
"order": 13,
"markdownDescription": "Specify the default llm model for DevChat. [Price of each model](https://web.devchat.ai/pricing)"
},
"DevChat.OpenAI.stream": {
"type": "boolean",
"default": true,
"order": 14,
"description": "Whether to stream a response."
},
"DevChat.EnableFunctionCalling": {
"type": "boolean",
"default": false,
"order": 15,
"description": "Enable function calling for GPT."
},
"DevChat.betaInvitationCode": {
"type": "string",
"default": "",
"order": 17,
"description": "The invitation code for beta testing."
},
"DevChat.maxLogCount": {
"type": "number",
"default": 20,
"order": 18,
"description": "Limit the number of prompts in the chat view."
},
"DevChat.PythonForChat": {
"type": "string",
"default": "",
"input": {
"type": "file",
"filter": {
"All files": [
"python*"
]
}
},
"description": "Which Python interpreter to use with DevChat?",
"order": 19
},
"DevChat.PythonForCommands": {
"type": "string",
"default": "",
"description": "Path to the Python virtual environment for AskCode.",
"order": 20
},
"DevChat.Language": {
"type": "string",
"default": "",
"enum": [
"en",
"zh"
],
"enumDescriptions": [
"English",
"Simplified Chinese"
],
"description": "The language used for DevChat interface.",
"order": 21
}
}
},
"viewsContainers": {
"activitybar": [
{
@ -634,20 +92,6 @@
"title": "Apply Diff",
"icon": "assets/devchat_apply.svg"
},
{
"command": "devchat.createEntry",
"title": "Create Entry"
},
{
"command": "DevChat.AccessKey.OpenAI",
"title": "Input OpenAI API Key",
"category": "DevChat"
},
{
"command": "DevChat.AccessKey.DevChat",
"title": "Input DevChat Access Key",
"category": "DevChat"
},
{
"command": "DevChat.PythonPath",
"title": "Set Python Path",
@ -686,11 +130,6 @@
"title": "Install slash commands",
"category": "DevChat"
},
{
"command": "DevChat.UpdataChatModels",
"title": "Update Chat Models",
"category": "DevChat"
},
{
"command": "DevChat.Chat",
"title": "Chat with DevChat",
@ -720,11 +159,6 @@
"command": "devchat.fix_chinese",
"title": "Devchat:修复此"
},
{
"command": "DevChat.codecomplete",
"title": "Codecomplete",
"category": "DevChat"
},
{
"command": "DevChat.codecomplete_callback",
"title": "Codecomplete Callback",
@ -741,11 +175,6 @@
"command": "devchat.openChatPanel",
"key": "ctrl+shift+/",
"mac": "cmd+shift+/"
},
{
"command": "DevChat.codecomplete",
"key": "ctrl+shift+'",
"mac": "cmd+shift+'"
}
],
"menus": {
@ -792,6 +221,38 @@
{
"command": "DevChat.quickFix",
"when": "false"
},
{
"command": "devchat.explain",
"when": "false"
},
{
"command": "devchat.explain_chinese",
"when": "false"
},
{
"command": "devchat.comments",
"when": "false"
},
{
"command": "devchat.comments_chinese",
"when": "false"
},
{
"command": "devchat.fix",
"when": "false"
},
{
"command": "devchat.fix_chinese",
"when": "false"
},
{
"command": "DevChat.codecomplete_callback",
"when": "false"
},
{
"command": "DevChat.InstallCommands",
"when": "false"
}
],
"explorer/context": [

View File

@ -1,33 +0,0 @@
import * as path from 'path';
import { ChatContext } from './contextManager';
import { createTempSubdirectory, runCommandStringAndWriteOutput } from '../util/commonUtil';
import { logger } from '../util/logger';
import { UiUtilWrapper } from '../util/uiUtil';
export const customCommandContext: ChatContext = {
name: '<Custom Local Command>',
description: 'Click this and enter your desired command to run. The return will be added to the context.',
handler: async () => {
// popup a dialog to ask for the command line to run
const customCommand = await UiUtilWrapper.showInputBox({
prompt: 'Input your custom command',
placeHolder: 'for example: ls -l'
});
// 检查用户是否输入了命令
if (customCommand) {
const tempDir = await createTempSubdirectory('devchat/context');
const diffFile = path.join(tempDir, 'custom.txt');
logger.channel()?.trace(`Your custom command is: ${customCommand}`);
const result = await runCommandStringAndWriteOutput(customCommand, diffFile);
logger.channel()?.trace(` exit code:`, result.exitCode);
logger.channel()?.trace(` stdout:`, result.stdout);
logger.channel()?.trace(` stderr:`, result.stderr);
return [`[context|${diffFile}]`];
}
return [];
},
};

View File

@ -1,23 +0,0 @@
import * as path from 'path';
import { ChatContext } from './contextManager';
import { createTempSubdirectory, runCommandStringAndWriteOutput } from '../util/commonUtil';
import { logger } from '../util/logger';
export const gitDiffContext: ChatContext = {
name: 'git diff HEAD',
description: 'all changes since the last commit',
handler: async () => {
const tempDir = await createTempSubdirectory('devchat/context');
const diffFile = path.join(tempDir, 'diff_all.txt');
logger.channel()?.info(`git diff HEAD:`);
const result = await runCommandStringAndWriteOutput('git diff HEAD', diffFile);
logger.channel()?.info(` exit code:`, result.exitCode);
logger.channel()?.debug(` stdout:`, result.stdout);
logger.channel()?.debug(` stderr:`, result.stderr);
return [`[context|${diffFile}]`];
},
};

View File

@ -1,23 +0,0 @@
import * as path from 'path';
import { ChatContext } from './contextManager';
import { createTempSubdirectory, runCommandStringAndWriteOutput } from '../util/commonUtil';
import { logger } from '../util/logger';
export const gitDiffCachedContext: ChatContext = {
name: 'git diff --cached',
description: 'the staged changes since the last commit',
handler: async () => {
const tempDir = await createTempSubdirectory('devchat/context');
const diffFile = path.join(tempDir, 'diff_cached.txt');
logger.channel()?.info(`git diff --cached:`);
const result = await runCommandStringAndWriteOutput('git diff --cached', diffFile);
logger.channel()?.info(` exit code:`, result.exitCode);
logger.channel()?.debug(` stdout:`, result.stdout);
logger.channel()?.debug(` stderr:`, result.stderr);
return [`[context|${diffFile}]`];
},
};

View File

@ -2,7 +2,6 @@ import * as path from 'path';
import { logger } from '../util/logger';
import { createTempSubdirectory } from '../util/commonUtil';
import CustomContexts from './customContext';
import { UiUtilWrapper } from '../util/uiUtil';
@ -10,8 +9,8 @@ export interface ChatContext {
name: string;
description: string;
handler: () => Promise<string[]>;
}
}
export class ChatContextManager {
private static instance: ChatContextManager;
private contexts: ChatContext[] = [];
@ -33,35 +32,6 @@ export class ChatContextManager {
}
}
public async loadCustomContexts(workflowsDir: string): Promise<void> {
const customContexts = CustomContexts.getInstance();
customContexts.parseContexts(workflowsDir);
for (const customContext of customContexts.getContexts()) {
this.registerContext({
name: customContext.name,
description: customContext.description,
handler: async () => {
const tempDir = await createTempSubdirectory('devchat/context');
const outputFile = path.join(tempDir, 'context.txt');
logger.channel()?.info(`running: ${customContext.command.join(' ')}`);
const commandResult = await customContexts.handleCommand(customContext.name, outputFile);
logger.channel()?.info(` exit code:`, commandResult!.exitCode);
logger.channel()?.debug(` stdout:`, commandResult!.stdout);
logger.channel()?.debug(` stderr:`, commandResult!.stderr);
if (commandResult!.stderr) {
UiUtilWrapper.showErrorMessage(commandResult!.stderr);
}
return [`[context|${outputFile}]`];
},
});
}
}
getContextList(): ChatContext[] {
return this.contexts;
}
@ -75,5 +45,4 @@ export class ChatContextManager {
return [];
}
}
}

View File

@ -1,109 +0,0 @@
import fs from 'fs';
import path from 'path';
import { logger } from '../util/logger';
import { runCommandStringArrayAndWriteOutput, runCommandStringAndWriteOutputSync, CommandResult } from '../util/commonUtil';
import { UiUtilWrapper } from '../util/uiUtil';
export interface CustomContext {
name: string;
description: string;
command: string[];
edit: boolean | undefined;
path: string;
}
class CustomContexts {
private static instance: CustomContexts | null = null;
private contexts: CustomContext[] = [];
private constructor() {
}
public static getInstance(): CustomContexts {
if (!CustomContexts.instance) {
CustomContexts.instance = new CustomContexts();
}
return CustomContexts.instance;
}
public parseContexts(workflowsDir: string): void {
this.contexts = [];
try {
const extensionDirs = fs.readdirSync(workflowsDir, { withFileTypes: true })
.filter(dirent => dirent.isDirectory())
.map(dirent => dirent.name);
for (const extensionDir of extensionDirs) {
const contextDirPath = path.join(workflowsDir, extensionDir, 'context');
if (fs.existsSync(contextDirPath)) {
const contextDirs = fs.readdirSync(contextDirPath, { withFileTypes: true })
.filter(dirent => dirent.isDirectory())
.map(dirent => dirent.name);
for (const contextDir of contextDirs) {
const settingsPath = path.join(contextDirPath, contextDir, '_setting_.json');
if (fs.existsSync(settingsPath)) {
const settings = JSON.parse(fs.readFileSync(settingsPath, 'utf-8'));
const context: CustomContext = {
name: settings.name,
description: settings.description,
command: settings.command,
edit: settings.edit,
path: path.join(contextDirPath, contextDir)
};
this.contexts.push(context);
}
}
}
}
} catch (error) {
logger.channel()?.error(`Failed to parse contexts due to error: ${error}`);
logger.channel()?.show();
}
}
public getContexts(): CustomContext[] {
return this.contexts;
}
public getContext(contextName: string): CustomContext | null {
const foundContext = this.contexts.find(context => context.name === contextName);
return foundContext ? foundContext : null;
}
public async handleCommand(contextName: string, outputFile: string): Promise<CommandResult | null> {
const context = this.getContext(contextName);
if (!context) {
logger.channel()?.error(`Context "${contextName}" not found`);
logger.channel()?.show();
return null;
}
const contextDir = context.path;
const commandArray = context.command.slice(); // Create a copy of the command array
commandArray.forEach((arg, index) => {
commandArray[index] = arg.replace('${CurDir}', contextDir);
});
if (commandArray.length === 1) {
if (context.edit === true) {
// prompt input box for user to edit the commandArray[0]
const newCommand: string | undefined = await UiUtilWrapper.showInputBox({
placeHolder: 'Edit the command',
value: commandArray[0]
});
if (!newCommand) {
return { exitCode: 1, stdout: '', stderr: 'Command is empty' };
}
return runCommandStringAndWriteOutputSync(newCommand!, outputFile);
}
return runCommandStringAndWriteOutputSync(commandArray[0], outputFile);
}
return await runCommandStringArrayAndWriteOutput(commandArray, outputFile);
}
}
export default CustomContexts;

View File

@ -1,16 +0,0 @@
import { ChatContextManager } from './contextManager';
import { gitDiffCachedContext } from './contextGitDiffCached';
import { gitDiffContext } from './contextGitDiff';
import { customCommandContext } from './contextCustomCommand';
// import { refDefsContext } from './contextRefDefs';
// import { defRefsContext } from './contextDefRefs';
const chatContextManager = ChatContextManager.getInstance();
// 注册命令
chatContextManager.registerContext(gitDiffCachedContext);
chatContextManager.registerContext(gitDiffContext);
// chatContextManager.registerContext(refDefsContext);
// chatContextManager.registerContext(defRefsContext);
chatContextManager.registerContext(customCommandContext);

View File

@ -1,31 +1,24 @@
import * as vscode from "vscode";
import * as fs from "fs";
import * as os from "os";
import * as path from "path";
import * as util from "util";
import { sendFileSelectMessage, sendCodeSelectMessage } from "./util";
import { ExtensionContextHolder } from "../util/extensionContext";
import { FilePairManager } from "../util/diffFilePairs";
import { ApiKeyManager } from "../util/apiKey";
import { UiUtilWrapper } from "../util/uiUtil";
import { isValidApiKey } from "../handler/historyMessagesBase";
import { logger } from "../util/logger";
import { sendCommandListByDevChatRun } from '../handler/workflowCommandHandler';
import DevChat from "../toolwrapper/devchat";
import { createEnvByConda, createEnvByMamba } from '../util/python_installer/app_install';
import { installRequirements } from '../util/python_installer/package_install';
import { chatWithDevChat } from '../handler/chatHandler';
import { focusDevChatInput } from '../handler/focusHandler';
import { DevChatConfig } from '../util/config';
import { MessageHandler } from "../handler/messageHandler";
const readdir = util.promisify(fs.readdir);
const stat = util.promisify(fs.stat);
const mkdir = util.promisify(fs.mkdir);
const copyFile = util.promisify(fs.copyFile);
// It is used to copy workflow commands to user directory.
async function copyDirectory(src: string, dest: string): Promise<void> {
await mkdir(dest, { recursive: true });
const entries = await readdir(src, { withFileTypes: true });
@ -284,7 +277,8 @@ export function registerHandleUri(context: vscode.ExtensionContext) {
// 解析 URI 并执行相应的操作
if (uri.path.includes("accesskey")) {
const accessKey = uri.path.split("/")[2];
DevChatConfig.getInstance().set("provides.devchat.api_key", accessKey);
DevChatConfig.getInstance().set("provides.devchat.api_key", accessKey);
DevChatConfig.getInstance().set("provides.devchat.api_base", "https://api.devchat.ai/v1");
ensureChatPanel(context);
await new Promise((resolve, reject) => {
setTimeout(() => {

View File

@ -1,24 +0,0 @@
/*
check whether some feature is enabled
*/
import * as vscode from 'vscode';
import { regInMessage, regOutMessage } from '../util/reg_messages';
import { logger } from '../util/logger';
import { FT, FTs } from '../util/feature_flags/feature_toggles';
import { MessageHandler } from './messageHandler';
regInMessage({command: 'featureToggle', feature: 'feature name'});
regOutMessage({command: 'featureToggle', feature: 'feature name', enabled: true});
export async function featureToggle(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
const enabled = FT(message.feature);
MessageHandler.sendMessage(panel, {command: 'featureToggle', feature: message.feature, enabled: enabled});
}
regInMessage({command: 'featureToggles'});
// eslint-disable-next-line @typescript-eslint/naming-convention
regOutMessage({command: 'featureToggles', features: {'feature name': true}});
export async function getFeatureToggles(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
const featureTaggles = FTs();
MessageHandler.sendMessage(panel, {command: 'featureToggles', features: featureTaggles});
}

View File

@ -4,7 +4,6 @@ import { replaceCodeBlockToFile } from './codeBlockHandler';
import { doCommit } from './commitHandler';
import { getHistoryMessages } from './historyMessagesHandler';
import { getWorkflowCommandList } from './workflowCommandHandler';
import { getWorkflowContextList } from './workflowContextHandler';
import { sendMessage, stopDevChat, regeneration, deleteChatMessage, userInput } from './sendMessage';
import { applyCodeWithDiff } from './diffHandler';
import { addConext } from './contextHandler';
@ -12,11 +11,9 @@ import { getContextDetail } from './contextHandler';
import {createAndOpenFile} from './codeBlockHandler';
import { listAllMessages } from './listMessages';
import { doVscodeCommand } from './vscodeCommandHandler';
import { featureToggle, getFeatureToggles } from './featureToggleHandler';
import { readFile, writeFile, getIDEServicePort, getCurrentFileInfo } from './fileHandler';
import { getTopics, deleteTopic } from './topicHandler';
import { readConfig, writeConfig, readServerConfigBase, writeServerConfigBase } from './configHandler';
import { getSetting, getUserAccessKey, getValidLlmModelList, updateSetting } from './removehandler';
// According to the context menu selected by the user, add the corresponding context file
@ -40,9 +37,6 @@ messageHandler.registerHandler('historyMessages', getHistoryMessages);
// Register the command list
// Response: { command: 'regCommandList', result: <command list> }
messageHandler.registerHandler('regCommandList', getWorkflowCommandList);
// Register the context list
// Response: { command: 'regContextList', result: <context list> }
messageHandler.registerHandler('regContextList', getWorkflowContextList);
// Send a message, send the message entered by the user to AI
// Response:
// { command: 'receiveMessagePartial', text: <response message text>, user: <user>, date: <date> }
@ -72,9 +66,6 @@ messageHandler.registerHandler('deleteChatMessage', deleteChatMessage);
// Response: none
messageHandler.registerHandler('doCommand', doVscodeCommand);
messageHandler.registerHandler('featureToggle', featureToggle);
messageHandler.registerHandler('featureToggles', getFeatureToggles);
messageHandler.registerHandler('userInput', userInput);
messageHandler.registerHandler('readFile', readFile);
@ -91,9 +82,3 @@ messageHandler.registerHandler('getIDEServicePort', getIDEServicePort);
messageHandler.registerHandler('readServerConfigBase', readServerConfigBase);
messageHandler.registerHandler('writeServerConfigBase', writeServerConfigBase);
messageHandler.registerHandler('regModelList', getValidLlmModelList);
messageHandler.registerHandler('updateSetting', updateSetting);
messageHandler.registerHandler('getSetting', getSetting);
messageHandler.registerHandler('getUserAccessKey', getUserAccessKey);

View File

@ -46,27 +46,6 @@ OPENAI_API_KEY is missing from your environment or settings. Kindly input your O
} as LogEntry;
}
export function isValidApiKey(apiKey: string, llmType: string = "None") {
let apiKeyStrim = apiKey.trim();
const apiKeyType = ApiKeyManager.getKeyType(apiKeyStrim);
if (apiKeyType === undefined) {
return false;
}
if (llmType === "OpenAI") {
if (apiKeyType === "sk") {
return true;
}
return false;
}
if (llmType === "DevChat") {
if (apiKeyType === "DC") {
return true;
}
return false;
}
return true;
}
export async function loadTopicHistoryLogs(topicId: string | undefined): Promise<Array<LogEntry> | undefined> {
if (!topicId) {
return undefined;

View File

@ -2,7 +2,6 @@
import * as vscode from 'vscode';
import '../context/loadContexts';
import { logger } from '../util/logger';

View File

@ -1,49 +0,0 @@
import * as vscode from 'vscode';
import { MessageHandler } from './messageHandler';
import { regInMessage, regOutMessage } from '../util/reg_messages';
import { ApiKeyManager } from '../util/apiKey';
import { UiUtilWrapper } from '../util/uiUtil';
regInMessage({command: 'regModelList'});
regOutMessage({command: 'regModelList', result: [{name: ''}]});
export async function getValidLlmModelList(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
const modelList = ["model1", "model2", "model3"];
MessageHandler.sendMessage(panel, { command: 'regModelList', result: modelList });
return;
}
regInMessage({command: 'updateSetting', key1: "DevChat", key2: "OpenAI", value:"xxxx"});
export async function updateSetting(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
return ;
}
regInMessage({command: 'getSetting', key1: "DevChat", key2: "OpenAI"});
regOutMessage({command: 'getSetting', key1: "DevChat", key2: "OpenAI", value: "GPT-4"});
export async function getSetting(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
if (message.key2 === "Language") {
MessageHandler.sendMessage(panel, {"command": "getSetting", "key1": message.key1, "key2": message.key2, "value": "en"});
return;
}
MessageHandler.sendMessage(panel, {"command": "getSetting", "key1": message.key1, "key2": message.key2, "value": "model2"});
}
regInMessage({command: 'getUserAccessKey'});
regOutMessage({command: 'getUserAccessKey', accessKey: "DC.xxx", keyType: "DevChat", endPoint: "https://xxx"});
export async function getUserAccessKey(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
MessageHandler.sendMessage(panel,
{
"command": "getUserAccessKey",
"accessKey": "",
"keyType": "",
"endPoint": ""
}
);
return;
}

View File

@ -1,14 +0,0 @@
import * as vscode from 'vscode';
import { ChatContextManager } from '../context/contextManager';
import { MessageHandler } from './messageHandler';
import { regInMessage, regOutMessage } from '../util/reg_messages';
regInMessage({command: 'regContextList'});
regOutMessage({command: 'regContextList', result: [{name: '', description: ''}]});
export async function getWorkflowContextList(message: any, panel: vscode.WebviewPanel|vscode.WebviewView): Promise<void> {
const contextList = ChatContextManager.getInstance().getContextList();
MessageHandler.sendMessage(panel, { command: 'regContextList', result: contextList });
return;
}

View File

@ -1,31 +0,0 @@
import * as vscode from 'vscode';
import * as fs from 'fs';
import * as path from 'path';
import { DevChatConfig } from '../config';
const featureTogglesJson = `
{
"ask-code-summary": false,
"ask-code": true,
"ask-code-dfs": false
}`;
const featureToggles = JSON.parse(featureTogglesJson);
// eslint-disable-next-line @typescript-eslint/naming-convention
export function FT(feature: string): boolean {
const betaInvitationCode = DevChatConfig.getInstance().get('beta_invitation_code');
const expectedInvitationCode = 'WELCOMEADDTODEVCHAT';
return betaInvitationCode === expectedInvitationCode || featureToggles[feature] === true;
}
// eslint-disable-next-line @typescript-eslint/naming-convention
export function FTs(): any {
// visited features
let newFeatureToggles = {};
for (const feature in featureToggles) {
newFeatureToggles[feature] = FT(feature);
}
return newFeatureToggles;
}

View File

@ -1,18 +0,0 @@
总结如下:
在VSCode插件开发中我们讨论了如何实现内测功能的激活控制。主要有以下几种方式
1. **使用特定的配置项**:在插件的配置项中添加一个特定的字段,例如`enableBetaFeatures`,根据这个配置项的值来决定是否启用内测功能。
2. **使用特定的命令**:在插件中添加一个特定的命令,例如`activateBetaFeatures`,只有当用户执行了这个命令,才启用内测功能。
3. **使用许可证**:在插件中添加一个许可证验证功能,只有当用户输入了有效的许可证,才启用内测功能。
4. **使用特定的版本**:在插件的版本号中添加一个特定的标识,例如`1.0.0-beta`,只有当插件的版本号包含了这个标识,才启用内测功能。
为了避免在内测结束后需要修改代码我们推荐使用特性开关Feature Toggles的方式来管理内测功能。可以创建一个特性开关的配置文件例如`feature-toggles.json`,在这个文件中定义哪些特性是开启的,哪些特性是关闭的。在插件代码中,读取这个配置文件,根据特性开关的值来决定是否启用某个特性。
对于`package.json`文件中定义的命令,我们可以在插件的激活函数(`activate`函数)中动态地注册或注销命令来实现这个功能。首先,在`package.json`文件中定义所有可能需要的命令,包括内测命令。然后,在`activate`函数中,根据特性开关的值来决定是否注册内测命令。
以上就是我们对于VSCode插件开发中内测功能激活控制的讨论总结。

View File

@ -1,38 +0,0 @@
import { expect } from 'chai';
import CustomContexts from '../../src/context/customContext';
import fs from 'fs';
import path from 'path';
describe('CustomContexts', () => {
const workflowsDir = path.join(__dirname, 'test-workflows');
beforeEach(() => {
// Create a test workflows directory with a sample _setting_.json file
if (!fs.existsSync(workflowsDir)) {
fs.mkdirSync(workflowsDir);
}
const extensionDir = path.join(workflowsDir, 'extension1', 'context', 'context1');
fs.mkdirSync(extensionDir, { recursive: true });
fs.writeFileSync(path.join(extensionDir, '_setting_.json'), JSON.stringify({
name: 'test-context',
description: 'Test context',
command: ['echo', 'Hello, World!']
}));
});
afterEach(() => {
// Clean up the test workflows directory
fs.rmSync(workflowsDir, { recursive: true });
});
it('should parse custom contexts', () => {
const customContexts = CustomContexts.getInstance();
customContexts.parseContexts(workflowsDir);
const contexts = customContexts.getContexts();
expect(contexts).to.have.lengthOf(1);
expect(contexts[0].name).to.equal('test-context');
expect(contexts[0].description).to.equal('Test context');
});
});

View File

@ -1,18 +0,0 @@
import { expect } from 'chai';
// import { describe, it } from 'mocha';
import '../../src/context/loadContexts';
import { ChatContextManager } from '../../src/context/contextManager';
import { gitDiffCachedContext } from '../../src/context/contextGitDiffCached';
import { gitDiffContext } from '../../src/context/contextGitDiff';
import { customCommandContext } from '../../src/context/contextCustomCommand';
describe('loadContexts', () => {
it('should register all contexts', () => {
const chatContextManager = ChatContextManager.getInstance();
const contextList = chatContextManager.getContextList();
expect(contextList).to.include(gitDiffCachedContext);
expect(contextList).to.include(gitDiffContext);
expect(contextList).to.include(customCommandContext);
});
});