From b17883c0c5e6ad8f209d490aeebd51689d0e22a8 Mon Sep 17 00:00:00 2001 From: "bobo.yang" Date: Sun, 28 Apr 2024 23:21:57 +0800 Subject: [PATCH] feat: Optimize devchat code completion config - Refactor devchat API configuration for clarity and consistency - Ensure code completion fallbacks to devchat when configured - Streamline the process for activating devchat server code completion --- src/contributes/codecomplete/llm.ts | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/contributes/codecomplete/llm.ts b/src/contributes/codecomplete/llm.ts index b65a79c..49dc4ed 100644 --- a/src/contributes/codecomplete/llm.ts +++ b/src/contributes/codecomplete/llm.ts @@ -18,14 +18,10 @@ export interface CodeCompletionChunk { export async function* streamComplete(prompt: string): AsyncGenerator { const nvidiaKey = DevChatConfig.getInstance().get("complete_key"); const ollamaApiBase = DevChatConfig.getInstance().get("complete_ollama_api_base"); - const devchatApiBase = DevChatConfig.getInstance().get("complete_devchat_api_base"); + const devchatToken = DevChatConfig.getInstance().get("providers.devchat.api_key"); + const devchatEndpoint = DevChatConfig.getInstance().get("providers.devchat.api_base"); - if (devchatApiBase) { - for await (const chunk of devchatComplete(prompt)) { - yield chunk; - } - } - else if (ollamaApiBase) { + if (ollamaApiBase) { for await (const chunk of ollamaDeepseekComplete(prompt)) { yield chunk; } @@ -33,6 +29,10 @@ export async function* streamComplete(prompt: string): AsyncGenerator { - const devchatApiBase = DevChatConfig.getInstance().get("complete_devchat_api_base"); - const completionApiBase = devchatApiBase + "/completions"; + const devchatEndpoint = DevChatConfig.getInstance().get("providers.devchat.api_base"); + const completionApiBase = devchatEndpoint + "/completions"; let model = DevChatConfig.getInstance().get("complete_model"); if (!model) {