commit
56c4e9aca0
223
package.json
223
package.json
@ -70,18 +70,10 @@
|
||||
"configuration": {
|
||||
"title": "DevChat",
|
||||
"properties": {
|
||||
"devchat.Model.gpt-3-5": {
|
||||
"devchat.Provider.devchat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"enum": [
|
||||
"openai"
|
||||
],
|
||||
"description": "[required*] Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"access_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify access key for selected provider."
|
||||
@ -90,6 +82,68 @@
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"access_key"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"order": -1,
|
||||
"markdownDescription": "Specify the properties for devchat provider."
|
||||
},
|
||||
"devchat.Provider.openai": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"access_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"access_key"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"order": -1,
|
||||
"markdownDescription": "Specify the properties for openai provider."
|
||||
},
|
||||
"devchat.Provider.anthropic": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"access_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"access_key"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"order": -1,
|
||||
"markdownDescription": "Specify the properties for anthropic provider."
|
||||
},
|
||||
"devchat.Model.gpt-3-5": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat",
|
||||
"openai"
|
||||
],
|
||||
"description": "[required*] Specify which provider host this llm model"
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
@ -125,22 +179,13 @@
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat",
|
||||
"openai"
|
||||
],
|
||||
"description": "[required*] Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.3,
|
||||
@ -175,22 +220,13 @@
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "openai",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat",
|
||||
"openai"
|
||||
],
|
||||
"description": "[required*] Specify which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.3,
|
||||
@ -221,22 +257,13 @@
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "anthropic",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat",
|
||||
"anthropic"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify access key for selected provider."
|
||||
},
|
||||
"api_base": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[optional*] Specify the api base for selected provider. Leave it blank if you want to use default api base."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.3,
|
||||
@ -246,22 +273,106 @@
|
||||
"type": "number",
|
||||
"default": 1000,
|
||||
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
},
|
||||
"presence_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
|
||||
},
|
||||
"frequency_penalty": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "[optional*] Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for claude-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.xinghuo-2": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
},
|
||||
"max_tokens": {
|
||||
"type": "number",
|
||||
"default": 4000,
|
||||
"description": "[optional*] The maximum number of tokens to generate in the chat completion.\nThe total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for xinghuo-2 model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.chatglm_pro": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for chatglm_pro model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.ERNIE-Bot": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for ERNIE-Bot model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.Model.llama-2-13b-chat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"provider": {
|
||||
"type": "string",
|
||||
"default": "devchat",
|
||||
"enum": [
|
||||
"devchat"
|
||||
],
|
||||
"description": "[required*] which provider host this llm model"
|
||||
},
|
||||
"api_secret": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "[required*] Specify secret key for selected provider."
|
||||
},
|
||||
"temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "[optional*] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"order": 3,
|
||||
"markdownDescription": "properties for llama-2-13b-chat model. Leave it blank if you won't use this llm model. [how to set?](https://platform.openai.com/docs/api-reference/chat/create#temperature) "
|
||||
},
|
||||
"devchat.defaultModel": {
|
||||
"oneOf": [
|
||||
{
|
||||
@ -271,7 +382,11 @@
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-4",
|
||||
"claude-2"
|
||||
"claude-2",
|
||||
"xinghuo-2",
|
||||
"chatglm_pro",
|
||||
"ERNIE-Bot",
|
||||
"llama-2-13b-chat"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -14,7 +14,7 @@ export function checkDevChatDependency(showError: boolean = true): boolean {
|
||||
|
||||
try {
|
||||
// Check if DevChat is installed
|
||||
const expectVersion = 'DevChat 0.2.6';
|
||||
const expectVersion = 'DevChat 0.2.7';
|
||||
const devchatVersion = runCommand(`"${devChat}" --version`).toString().trim();
|
||||
if (devchatVersion < expectVersion) {
|
||||
logger.channel()?.info(`devchat version: ${devchatVersion}, but expect version: ${expectVersion}`);
|
||||
|
156
src/extension.ts
156
src/extension.ts
@ -33,9 +33,27 @@ import { UiUtilWrapper } from './util/uiUtil';
|
||||
import { UiUtilVscode } from './util/uiUtil_vscode';
|
||||
import { FT } from './util/feature_flags/feature_toggles';
|
||||
|
||||
async function configUpdateTo_0912() {
|
||||
async function isProviderHasSetted() {
|
||||
const providerProperty = "Provider.devchat";
|
||||
const providerConfig = UiUtilWrapper.getConfiguration("devchat", providerProperty);
|
||||
if (providerConfig &&providerConfig["access_key"]) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const providerPropertyOpenAI = "Provider.openai";
|
||||
const providerConfigOpenAI = UiUtilWrapper.getConfiguration("devchat", providerPropertyOpenAI);
|
||||
if (providerConfigOpenAI &&providerConfigOpenAI["access_key"]) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
async function configUpdateTo_0924() {
|
||||
if (await isProviderHasSetted()) {
|
||||
return ;
|
||||
}
|
||||
const defaultModel: any = UiUtilWrapper.getConfiguration("devchat", "defaultModel");
|
||||
|
||||
|
||||
let devchatKey = UiUtilWrapper.getConfiguration('DevChat', 'Access_Key_DevChat');
|
||||
let openaiKey = UiUtilWrapper.getConfiguration('DevChat', 'Api_Key_OpenAI');
|
||||
@ -52,71 +70,111 @@ async function configUpdateTo_0912() {
|
||||
}
|
||||
|
||||
let modelConfigNew = {};
|
||||
let providerConfigNew = {};
|
||||
if (openaiKey) {
|
||||
modelConfigNew["api_key"] = openaiKey;
|
||||
modelConfigNew["provider"] = "openai";
|
||||
} else if (devchatKey) {
|
||||
modelConfigNew["api_key"] = devchatKey;
|
||||
modelConfigNew["provider"] = "openai";
|
||||
providerConfigNew["access_key"] = openaiKey;
|
||||
if (endpointKey) {
|
||||
providerConfigNew["api_base"] = endpointKey;
|
||||
}
|
||||
|
||||
await vscode.workspace.getConfiguration("devchat").update("Provider.openai", providerConfigNew, vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
|
||||
if (endpointKey) {
|
||||
modelConfigNew["api_base"] = endpointKey;
|
||||
if (devchatKey) {
|
||||
providerConfigNew["access_key"] = devchatKey;
|
||||
if (endpointKey) {
|
||||
providerConfigNew["api_base"] = endpointKey;
|
||||
}
|
||||
|
||||
await vscode.workspace.getConfiguration("devchat").update("Provider.devchat", providerConfigNew, vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
|
||||
const modelConfig1: any = UiUtilWrapper.getConfiguration("devchat", "Model.gpt-3-5");
|
||||
const modelConfig2: any = UiUtilWrapper.getConfiguration("devchat", "Model.gpt-3-5-16k");
|
||||
const modelConfig3: any = UiUtilWrapper.getConfiguration("devchat", "Model.gpt-4");
|
||||
//if (!modelConfig1 && !modelConfig2 && !modelConfig3 && Object.keys(modelConfigNew).length > 0) {
|
||||
if (Object.keys(modelConfig1).length === 0 &&
|
||||
Object.keys(modelConfig2).length === 0 &&
|
||||
Object.keys(modelConfig3).length === 0) {
|
||||
// config default gpt models
|
||||
if (Object.keys(modelConfigNew).length === 0) {
|
||||
modelConfigNew["provider"] = "openai";
|
||||
}
|
||||
const support_models = [
|
||||
"Model.gpt-3-5",
|
||||
"Model.gpt-3-5-16k",
|
||||
"Model.gpt-4",
|
||||
"Model.claude-2",
|
||||
"Model.xinghuo-2",
|
||||
"Model.chatglm_pro",
|
||||
"Model.ERNIE-Bot",
|
||||
"Model.llama-2-13b-chat"
|
||||
];
|
||||
|
||||
if (!defaultModel) {
|
||||
vscode.workspace.getConfiguration("devchat").update("defaultModel", "gpt-3.5-turbo", vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
|
||||
try {
|
||||
vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5", modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||
vscode.workspace.getConfiguration("devchat").update("Model.gpt-3-5-16k", modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||
vscode.workspace.getConfiguration("devchat").update("Model.gpt-4", modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||
} catch(error) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const modelConfig4: any = UiUtilWrapper.getConfiguration("devchat", "Model.claude-2");
|
||||
if (Object.keys(modelConfig4).length === 0) {
|
||||
modelConfigNew = {};
|
||||
if (devchatKey) {
|
||||
modelConfigNew["api_key"] = devchatKey;
|
||||
} else if (openaiKey) {
|
||||
modelConfigNew["api_key"] = openaiKey;
|
||||
}
|
||||
|
||||
if (modelConfigNew["api_key"].startsWith("DC.")) {
|
||||
if (!defaultModel) {
|
||||
vscode.workspace.getConfiguration("devchat").update("defaultModel", "claude-2", vscode.ConfigurationTarget.Global);
|
||||
for (const model of support_models) {
|
||||
const modelConfig1: any = UiUtilWrapper.getConfiguration("devchat", model);
|
||||
if (Object.keys(modelConfig1).length === 0) {
|
||||
modelConfigNew = {"provider": "devchat"};
|
||||
if (openaiKey && model.startsWith("Model.gpt-")) {
|
||||
modelConfigNew = {"provider": "openai"};
|
||||
}
|
||||
|
||||
modelConfigNew["provider"] = "anthropic";
|
||||
vscode.workspace.getConfiguration("devchat").update("Model.claude-2", modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||
await vscode.workspace.getConfiguration("devchat").update(model, modelConfigNew, vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
}
|
||||
|
||||
if (!defaultModel) {
|
||||
await vscode.workspace.getConfiguration("devchat").update("defaultModel", "claude-2", vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async function configUpdate0912To_0924() {
|
||||
if (await isProviderHasSetted()) {
|
||||
return ;
|
||||
}
|
||||
|
||||
const old_models = [
|
||||
"Model.gpt-3-5",
|
||||
"Model.gpt-3-5-16k",
|
||||
"Model.gpt-4",
|
||||
"Model.claude-2"
|
||||
];
|
||||
|
||||
for (const model of old_models) {
|
||||
const modelConfig: any = UiUtilWrapper.getConfiguration("devchat", model);
|
||||
if (Object.keys(modelConfig).length !== 0) {
|
||||
let modelProperties: any = {};
|
||||
for (const key of Object.keys(modelConfig || {})) {
|
||||
const property = modelConfig![key];
|
||||
modelProperties[key] = property;
|
||||
}
|
||||
|
||||
if (modelConfig["api_key"]) {
|
||||
let providerConfigNew = {}
|
||||
providerConfigNew["access_key"] = modelConfig["api_key"];
|
||||
if (modelConfig["api_base"]) {
|
||||
providerConfigNew["api_base"] = modelConfig["api_base"];
|
||||
}
|
||||
|
||||
if (modelConfig["api_key"].startsWith("DC.")) {
|
||||
modelProperties["provider"] = "devchat";
|
||||
await vscode.workspace.getConfiguration("devchat").update("Provider.devchat", providerConfigNew, vscode.ConfigurationTarget.Global);
|
||||
} else {
|
||||
modelProperties["provider"] = "openai";
|
||||
await vscode.workspace.getConfiguration("devchat").update("Provider.openai", providerConfigNew, vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
|
||||
delete modelProperties["api_key"];
|
||||
delete modelProperties["api_base"];
|
||||
await vscode.workspace.getConfiguration("devchat").update(model, modelProperties, vscode.ConfigurationTarget.Global);
|
||||
} else {
|
||||
delete modelProperties["api_base"];
|
||||
modelProperties["provider"] = "devchat";
|
||||
await vscode.workspace.getConfiguration("devchat").update(model, modelProperties, vscode.ConfigurationTarget.Global);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function activate(context: vscode.ExtensionContext) {
|
||||
async function activate(context: vscode.ExtensionContext) {
|
||||
ExtensionContextHolder.context = context;
|
||||
|
||||
logger.init(LoggerChannelVscode.getInstance());
|
||||
UiUtilWrapper.init(new UiUtilVscode());
|
||||
|
||||
configUpdateTo_0912();
|
||||
await configUpdateTo_0924();
|
||||
await configUpdate0912To_0924();
|
||||
|
||||
regLanguageContext();
|
||||
|
||||
|
@ -21,7 +21,18 @@ export async function getValidModels(): Promise<string[]> {
|
||||
if (!modelConfig["provider"]) {
|
||||
return undefined;
|
||||
}
|
||||
if (!modelConfig["api_key"]) {
|
||||
const providerProperty = "Provider." + modelConfig["provider"];
|
||||
const providerConfig = UiUtilWrapper.getConfiguration("devchat", providerProperty);
|
||||
if (providerConfig) {
|
||||
if (providerConfig["access_key"]) {
|
||||
modelProperties["api_key"] = providerConfig["access_key"];
|
||||
}
|
||||
if (providerConfig["api_base"]) {
|
||||
modelProperties["api_base"] = providerConfig["api_base"];
|
||||
}
|
||||
}
|
||||
|
||||
if (!modelProperties["api_key"]) {
|
||||
const providerName = ApiKeyManager.toProviderKey(modelConfig["provider"]);
|
||||
if (!providerName) {
|
||||
return undefined;
|
||||
@ -57,7 +68,23 @@ export async function getValidModels(): Promise<string[]> {
|
||||
if (claudeModel) {
|
||||
modelList.push(claudeModel.model);
|
||||
}
|
||||
|
||||
const xinghuoModel = await modelProperties('Model.xinghuo-2', "xinghuo-2");
|
||||
if (xinghuoModel) {
|
||||
modelList.push(xinghuoModel.model);
|
||||
}
|
||||
const glmModel = await modelProperties('Model.chatglm_pro', "chatglm_pro");
|
||||
if (glmModel) {
|
||||
modelList.push(glmModel.model);
|
||||
}
|
||||
const erniebotModel = await modelProperties('Model.ERNIE-Bot', "ERNIE-Bot");
|
||||
if (erniebotModel) {
|
||||
modelList.push(erniebotModel.model);
|
||||
}
|
||||
const llama2Model = await modelProperties('Model.llama-2-13b-chat', "llama-2-13b-chat");
|
||||
if (llama2Model) {
|
||||
modelList.push(llama2Model.model);
|
||||
}
|
||||
|
||||
const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel');
|
||||
if (!customModelConfig) {
|
||||
return modelList;
|
||||
|
@ -39,7 +39,7 @@ export class ApiKeyManager {
|
||||
const modelProperties = async (modelPropertyName: string, modelName: string) => {
|
||||
const modelConfig = UiUtilWrapper.getConfiguration("devchat", modelPropertyName);
|
||||
if (!modelConfig) {
|
||||
return undefined;
|
||||
return undefined;
|
||||
}
|
||||
|
||||
let modelProperties: any = {};
|
||||
@ -50,7 +50,18 @@ export class ApiKeyManager {
|
||||
if (!modelConfig["provider"]) {
|
||||
return undefined;
|
||||
}
|
||||
if (!modelConfig["api_key"]) {
|
||||
const providerProperty = "Provider." + modelConfig["provider"];
|
||||
const providerConfig = UiUtilWrapper.getConfiguration("devchat", providerProperty);
|
||||
if (providerConfig) {
|
||||
if (providerConfig["access_key"]) {
|
||||
modelProperties["api_key"] = providerConfig["access_key"];
|
||||
}
|
||||
if (providerConfig["api_base"]) {
|
||||
modelProperties["api_base"] = providerConfig["api_base"];
|
||||
}
|
||||
}
|
||||
|
||||
if (!modelProperties["api_key"]) {
|
||||
const providerName = this.toProviderKey(modelConfig["provider"]);
|
||||
if (!providerName) {
|
||||
return undefined;
|
||||
@ -66,7 +77,7 @@ export class ApiKeyManager {
|
||||
modelProperties["api_key"] = apiKey;
|
||||
}
|
||||
|
||||
if (!modelConfig["api_base"] && modelProperties["api_key"]?.startsWith("DC.")) {
|
||||
if (!modelProperties["api_base"] && modelProperties["api_key"]?.startsWith("DC.")) {
|
||||
modelProperties["api_base"] = "https://api.devchat.ai/v1";
|
||||
}
|
||||
|
||||
@ -86,6 +97,18 @@ export class ApiKeyManager {
|
||||
if (llmModelT === "claude-2") {
|
||||
return await modelProperties('Model.claude-2', "claude-2");
|
||||
}
|
||||
if (llmModelT === "xinghuo-2") {
|
||||
return await modelProperties('Model.xinghuo-2', "xinghuo-2");
|
||||
}
|
||||
if (llmModelT === "chatglm_pro") {
|
||||
return await modelProperties('Model.chatglm_pro', "chatglm_pro");
|
||||
}
|
||||
if (llmModelT === "ERNIE-Bot") {
|
||||
return await modelProperties('Model.ERNIE-Bot', "ERNIE-Bot");
|
||||
}
|
||||
if (llmModelT === "llama-2-13b-chat") {
|
||||
return await modelProperties('Model.llama-2-13b-chat', "llama-2-13b-chat");
|
||||
}
|
||||
|
||||
const customModelConfig: any = UiUtilWrapper.getConfiguration('devchat', 'customModel');
|
||||
if (!customModelConfig) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user