Support multi llm model
This commit is contained in:
parent
80f0ccf02d
commit
0ff9870cf6
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@ -9,5 +9,5 @@
|
||||
"dist": true // set this to false to include "dist" folder in search results
|
||||
},
|
||||
// Turn off tsc task auto detection since we have the necessary tasks as npm scripts
|
||||
"typescript.tsc.autoDetect": "off"
|
||||
"typescript.tsc.autoDetect": "off"
|
||||
}
|
270
package.json
270
package.json
@ -70,6 +70,270 @@
|
||||
"configuration": {
|
||||
"title": "DevChat",
|
||||
"properties": {
|
||||
"devchat.Model.gpt-3-5": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": ["openai"],
|
||||
"description": "Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.3,
|
||||
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
},
|
||||
"presence_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
}
|
||||
},
|
||||
"required": ["provider", "key"],
|
||||
"additionalProperties": false,
|
||||
"order": 0,
|
||||
"markdownDescription": "Specify the properties for gpt-3.5-turbo model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
|
||||
},
|
||||
"devchat.Model.gpt-3-5-16k": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": ["openai"],
|
||||
"description": "Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.3,
|
||||
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
},
|
||||
"presence_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
}
|
||||
},
|
||||
"required": ["provider", "key"],
|
||||
"additionalProperties": false,
|
||||
"order": 1,
|
||||
"markdownDescription": "Specify properties for gpt-3.5-turbo-16k model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.gpt-4": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": ["openai"],
|
||||
"description": "Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.3,
|
||||
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
},
|
||||
"presence_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 2,
|
||||
"markdownDescription": "properties for gpt-4 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature)"
|
||||
},
|
||||
"devchat.Model.claude-2": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "anthropic",
|
||||
"enum": ["anthropic"],
|
||||
"description": "Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.3,
|
||||
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
},
|
||||
"presence_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for claude-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
|
||||
"devchat.customModel": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"model": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"enum": ["openai/gpt-4", "openai/gpt-4-0613", "openai/gpt-4-0314", "openai/gpt-4-32k", "openai/gpt-4-32k-0314", "openai/gpt-4-32k-0613", "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo-0301", "openai/gpt-3.5-turbo-0613", "openai/gpt-3.5-turbo-16k", "openai/gpt-3.5-turbo-16k-0613", "openai/text-davinci-003", "openai/curie-001", "openai/babbage-001", "openai/ada-001", "openai/babbage-002", "openai/davinci-002", "cohere/command-nightly", "cohere/command", "cohere/command-light", "cohere/command-medium-beta", "cohere/command-xlarge-beta", "anthropic/claude-2", "anthropic/claude-instant-1", "anthropic/claude-instant-1.2", "replicate/replicate/", "replicate/replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781", "replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52", "replicate/joehoover/instructblip-vicuna13b:c4c54e3c8c97cd50c2d2fec9be3b6065563ccf7d43787fb99f84151b867178fe", "replicate/replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5", "replicate/a16z-infra/llama-2-7b-chat:7b0bfc9aff140d5b75bacbed23e91fd3c34b01a1e958d32132de6e0a19796e2c", "replicate/replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b", "replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f", "replicate/replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad", "huggingface/meta-llama/Llama-2-7b-hf", "huggingface/meta-llama/Llama-2-7b-chat-hf", "huggingface/meta-llama/Llama-2-13b-hf", "huggingface/meta-llama/Llama-2-13b-chat-hf", "huggingface/meta-llama/Llama-2-70b-hf", "huggingface/meta-llama/Llama-2-70b-chat-hf", "huggingface/meta-llama/Llama-2-7b", "huggingface/meta-llama/Llama-2-7b-chat", "huggingface/meta-llama/Llama-2-13b", "huggingface/meta-llama/Llama-2-13b-chat", "huggingface/meta-llama/Llama-2-70b", "huggingface/meta-llama/Llama-2-70b-chat", "together_ai/togethercomputer/llama-2-70b-chat", "together_ai/togethercomputer/Llama-2-7B-32K-Instruct", "together_ai/togethercomputer/llama-2-7b", "baseten/qvv0xeq", "baseten/q841o8w", "baseten/31dxrj3", "openrouter/google/palm-2-codechat-bison", "openrouter/google/palm-2-chat-bison", "openrouter/openai/gpt-3.5-turbo", "openrouter/openai/gpt-3.5-turbo-16k", "openrouter/openai/gpt-4-32k", "openrouter/anthropic/claude-2", "openrouter/anthropic/claude-instant-v1", "openrouter/meta-llama/llama-2-13b-chat", "openrouter/meta-llama/llama-2-70b-chat", "vertex_ai/chat-bison", "vertex_ai/chat-bison@001", "vertex_ai/text-bison", "vertex_ai/text-bison@001", "ai21/j2-ultra", "ai21/j2-mid", "ai21/j2-light"],
|
||||
"description": "Specify llm model name."
|
||||
}, {
|
||||
"type": "string",
|
||||
"description": "Specify llm model name."
|
||||
}
|
||||
]
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.3,
|
||||
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
},
|
||||
"presence_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"order": 6,
|
||||
"markdownDescription": "Specify the custom llm model for DevChat."
|
||||
},
|
||||
|
||||
"devchat.defaultModel": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string",
|
||||
"default": "gpt-3.5-turbo",
|
||||
"enum": [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-4",
|
||||
"claude-2"
|
||||
]
|
||||
}, {
|
||||
"type": "string",
|
||||
"default": "gpt-3.5-turbo"
|
||||
}
|
||||
],
|
||||
"order": 7,
|
||||
"markdownDescription": "Specify the default llm model for DevChat. [Price of each model](https://devchat.ai/pricing)"
|
||||
},
|
||||
|
||||
|
||||
"DevChat.activeModelKey": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "api key for llm model"
|
||||
},
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"DevChat.llmModel": {
|
||||
"type": "string",
|
||||
"default": "OpenAI",
|
||||
@ -83,12 +347,6 @@
|
||||
"default": 20,
|
||||
"description": "Limit the number of prompts in the chat view."
|
||||
},
|
||||
"DevChat.OpenAI.model": {
|
||||
"type": "string",
|
||||
"default": "gpt-3.5-turbo",
|
||||
"description": "Specify the model ID.",
|
||||
"when": "DevChat.llmModel == 'OpenAI'"
|
||||
},
|
||||
"DevChat.OpenAI.temperature": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
|
@ -120,9 +120,9 @@ class DevChat {
|
||||
args.push("-p", options.parent);
|
||||
}
|
||||
|
||||
const llmModel = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.model');
|
||||
if (llmModel) {
|
||||
args.push("-m", llmModel);
|
||||
const llmModelData = ApiKeyManager.llmModel();
|
||||
if (llmModelData && llmModelData.model) {
|
||||
args.push("-m", llmModelData.model);
|
||||
}
|
||||
|
||||
return args;
|
||||
@ -200,6 +200,18 @@ class DevChat {
|
||||
}
|
||||
|
||||
async chat(content: string, options: ChatOptions = {}, onData: (data: ChatResponse) => void): Promise<ChatResponse> {
|
||||
const llmModelData = ApiKeyManager.llmModel();
|
||||
if (!llmModelData) {
|
||||
return {
|
||||
"prompt-hash": "",
|
||||
user: "",
|
||||
date: "",
|
||||
response: `Error: no valid llm model is selected!`,
|
||||
finish_reason: "",
|
||||
isError: true,
|
||||
};
|
||||
}
|
||||
|
||||
const args = await this.buildArgs(options);
|
||||
args.push("--");
|
||||
args.push(content);
|
||||
@ -211,23 +223,20 @@ class DevChat {
|
||||
logger.channel()?.show();
|
||||
}
|
||||
|
||||
|
||||
// 如果配置了devchat的TOKEN,那么就需要使用默认的代理
|
||||
let openAiApiBaseObject = this.apiEndpoint(openaiApiKey);
|
||||
|
||||
const openaiModel = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.model');
|
||||
const openaiTemperature = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.temperature');
|
||||
const openaiStream = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.stream');
|
||||
const llmModel = UiUtilWrapper.getConfiguration('DevChat', 'llmModel');
|
||||
const tokensPerPrompt = UiUtilWrapper.getConfiguration('DevChat', 'OpenAI.tokensPerPrompt');
|
||||
|
||||
const openAiApiBaseObject = llmModelData.api_base? { OPENAI_API_BASE: llmModelData.api_base } : {};
|
||||
const activeLlmModelKey = llmModelData.api_key;
|
||||
|
||||
let devChat: string | undefined = UiUtilWrapper.getConfiguration('DevChat', 'DevChatPath');
|
||||
if (!devChat) {
|
||||
devChat = 'devchat';
|
||||
}
|
||||
|
||||
const devchatConfig = {
|
||||
model: openaiModel,
|
||||
provider: llmModel,
|
||||
"tokens-per-prompt": tokensPerPrompt,
|
||||
OpenAI: {
|
||||
@ -257,7 +266,7 @@ class DevChat {
|
||||
env: {
|
||||
PYTHONUTF8:1,
|
||||
...process.env,
|
||||
OPENAI_API_KEY: openaiApiKey,
|
||||
OPENAI_API_KEY: activeLlmModelKey,
|
||||
...openAiApiBaseObject
|
||||
},
|
||||
};
|
||||
|
@ -4,29 +4,85 @@ import { UiUtilWrapper } from './uiUtil';
|
||||
|
||||
export class ApiKeyManager {
|
||||
static async getApiKey(llmType: string = "OpenAI"): Promise<string | undefined> {
|
||||
let apiKey: string|undefined = undefined;
|
||||
|
||||
if (llmType === "OpenAI") {
|
||||
apiKey = await UiUtilWrapper.secretStorageGet("openai_OPENAI_API_KEY");
|
||||
}
|
||||
if (!apiKey) {
|
||||
apiKey = await UiUtilWrapper.secretStorageGet("devchat_OPENAI_API_KEY");
|
||||
const llmModel = this.llmModel();
|
||||
if (!llmModel) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
if (llmType === "OpenAI") {
|
||||
apiKey = UiUtilWrapper.getConfiguration('DevChat', 'Api_Key_OpenAI');
|
||||
|
||||
return llmModel.api_key;
|
||||
}
|
||||
|
||||
static llmModel() {
|
||||
const llmModel = UiUtilWrapper.getConfiguration('devchat', 'defaultModel');
|
||||
if (!llmModel) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const modelProperties = (modelPropertyName: string, modelName: string) => {
|
||||
const modelConfig = UiUtilWrapper.getConfiguration("devchat", modelPropertyName);
|
||||
if (!modelConfig) {
|
||||
return undefined;
|
||||
}
|
||||
if (!apiKey) {
|
||||
apiKey = UiUtilWrapper.getConfiguration('DevChat', 'Access_Key_DevChat');
|
||||
|
||||
let modelProperties: any = {};
|
||||
for (const key of Object.keys(modelConfig || {})) {
|
||||
const property = modelConfig![key];
|
||||
modelProperties[key] = property;
|
||||
}
|
||||
|
||||
if (!modelConfig["provider"] || !modelConfig["api_key"]) {
|
||||
return undefined;
|
||||
}
|
||||
modelProperties['model'] = modelName;
|
||||
|
||||
return modelProperties;
|
||||
};
|
||||
|
||||
if (llmModel === "gpt-3.5-turbo") {
|
||||
return modelProperties('Model.gpt-3-5', "gpt-3.5-turbo");
|
||||
}
|
||||
if (llmModel === "gpt-3.5-turbo-16k") {
|
||||
return modelProperties('Model.gpt-3-5-16k', "gpt-3.5-turbo-16k");
|
||||
}
|
||||
if (llmModel === "gpt-4") {
|
||||
return modelProperties('Model.gpt-4', "gpt-4");
|
||||
}
|
||||
if (llmModel === "claude-2") {
|
||||
return modelProperties('Model.claude-2', "claude-2");
|
||||
}
|
||||
|
||||
const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel');
|
||||
if (!customModelConfig) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const customModels = customModelConfig as Array<any>;
|
||||
for (const model of customModels) {
|
||||
if (!model.model) {
|
||||
continue;
|
||||
}
|
||||
if (model.model === llmModel) {
|
||||
let modelProperties: any = {};
|
||||
for (const key of Object.keys(model || {})) {
|
||||
const property = model![key];
|
||||
modelProperties[key] = property;
|
||||
}
|
||||
|
||||
if (!model["api_key"]) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const modelProvider = model["model"].split('/')[0];
|
||||
const modelName = model["model"].split('/').slice(1).join('/');
|
||||
|
||||
modelProperties["provider"] = modelProvider;
|
||||
modelProperties["model"] = modelName;
|
||||
|
||||
return modelProperties;
|
||||
}
|
||||
}
|
||||
if (!apiKey) {
|
||||
if (llmType === "OpenAI") {
|
||||
apiKey = process.env.OPENAI_API_KEY;
|
||||
}
|
||||
}
|
||||
return apiKey;
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
static getKeyType(apiKey: string): string | undefined {
|
||||
|
@ -50,8 +50,8 @@ const chatPanel = observer(() => {
|
||||
const getSettings = () => {
|
||||
messageUtil.sendMessage({
|
||||
command: "getSetting",
|
||||
key1: "DevChat",
|
||||
key2: "OpenAI.model",
|
||||
key1: "devchat",
|
||||
key2: "defaultModel",
|
||||
});
|
||||
};
|
||||
|
||||
@ -237,8 +237,8 @@ const chatPanel = observer(() => {
|
||||
chat.changeChatModel(value);
|
||||
messageUtil.sendMessage({
|
||||
command: "updateSetting",
|
||||
key1: "DevChat",
|
||||
key2: "OpenAI.model",
|
||||
key1: "devchat",
|
||||
key2: "defaultModel",
|
||||
value: value,
|
||||
});
|
||||
}}
|
||||
|
@ -130,7 +130,6 @@ describe('sendMessageBase', () => {
|
||||
workspaceFoldersFirstPathStub.returns('./');
|
||||
|
||||
getConfigurationStub.withArgs('DevChat', 'Access_Key_DevChat').returns(process.env.TEST_DEVCHAT_KEY);
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.model').returns('gpt-4');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.temperature').returns(0);
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.stream').returns('true');
|
||||
getConfigurationStub.withArgs('DevChat', 'llmModel').returns('OpenAI');
|
||||
@ -157,7 +156,6 @@ describe('sendMessageBase', () => {
|
||||
workspaceFoldersFirstPathStub.returns('./');
|
||||
|
||||
getConfigurationStub.withArgs('DevChat', 'Access_Key_DevChat').returns('sk-KvH7ZCtHmFDCBTqH0jUv');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.model').returns('gpt-4');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.temperature').returns('0');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.stream').returns('true');
|
||||
getConfigurationStub.withArgs('DevChat', 'llmModel').returns('OpenAI');
|
||||
@ -186,7 +184,6 @@ describe('sendMessageBase', () => {
|
||||
workspaceFoldersFirstPathStub.returns('./');
|
||||
|
||||
getConfigurationStub.withArgs('DevChat', 'Access_Key_DevChat').returns(process.env.TEST_DEVCHAT_KEY);
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.model').returns('gpt-4');
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.temperature').returns(0);
|
||||
getConfigurationStub.withArgs('DevChat', 'OpenAI.stream').returns('true');
|
||||
getConfigurationStub.withArgs('DevChat', 'llmModel').returns('OpenAI');
|
||||
|
Loading…
x
Reference in New Issue
Block a user