feat: Optimize devchat code completion config

- Refactor devchat API configuration for clarity and consistency
- Ensure code completion fallbacks to devchat when configured
- Streamline the process for activating devchat server code completion
This commit is contained in:
bobo.yang 2024-04-28 23:21:57 +08:00
parent f95d0c6c5b
commit b17883c0c5

View File

@ -18,14 +18,10 @@ export interface CodeCompletionChunk {
export async function* streamComplete(prompt: string): AsyncGenerator<CodeCompletionChunk> {
const nvidiaKey = DevChatConfig.getInstance().get("complete_key");
const ollamaApiBase = DevChatConfig.getInstance().get("complete_ollama_api_base");
const devchatApiBase = DevChatConfig.getInstance().get("complete_devchat_api_base");
const devchatToken = DevChatConfig.getInstance().get("providers.devchat.api_key");
const devchatEndpoint = DevChatConfig.getInstance().get("providers.devchat.api_base");
if (devchatApiBase) {
for await (const chunk of devchatComplete(prompt)) {
yield chunk;
}
}
else if (ollamaApiBase) {
if (ollamaApiBase) {
for await (const chunk of ollamaDeepseekComplete(prompt)) {
yield chunk;
}
@ -33,6 +29,10 @@ export async function* streamComplete(prompt: string): AsyncGenerator<CodeComple
for await (const chunk of nvidiaStarcoderComplete(prompt)) {
yield chunk;
}
} else if (devchatToken && devchatEndpoint) {
for await (const chunk of devchatComplete(prompt)) {
yield chunk;
}
}
}
@ -172,8 +172,8 @@ export async function * ollamaDeepseekComplete(prompt: string) : AsyncGenerator<
export async function * devchatComplete(prompt: string) : AsyncGenerator<CodeCompletionChunk> {
const devchatApiBase = DevChatConfig.getInstance().get("complete_devchat_api_base");
const completionApiBase = devchatApiBase + "/completions";
const devchatEndpoint = DevChatConfig.getInstance().get("providers.devchat.api_base");
const completionApiBase = devchatEndpoint + "/completions";
let model = DevChatConfig.getInstance().get("complete_model");
if (!model) {