Use gpt4-turbo for unit_tests as default

This commit is contained in:
kagami 2024-03-15 15:21:51 +08:00
parent ccc1d97c90
commit 6a1fdc829e
3 changed files with 4 additions and 4 deletions

View File

@ -16,7 +16,7 @@ from openai_util import create_chat_completion_content
from tools.directory_viewer import ListViewer
from tools.tiktoken_util import get_encoding
MODEL = USER_LLM_MODEL if USE_USER_MODEL else "gpt-3.5-turbo"
MODEL = USER_LLM_MODEL if USE_USER_MODEL else "gpt-4-turbo-preview"
ENCODING = (
get_encoding(DEFAULT_ENCODING) # Use default encoding as an approximation
if USE_USER_MODEL

View File

@ -8,7 +8,7 @@ from llm_conf import (
)
from openai_util import create_chat_completion_content
MODEL = USER_LLM_MODEL if USE_USER_MODEL else "gpt-3.5-turbo"
MODEL = USER_LLM_MODEL if USE_USER_MODEL else "gpt-4-turbo-preview"
# ruff: noqa: E501

View File

@ -16,13 +16,13 @@ from openai_util import create_chat_completion_content
from prompts import PROPOSE_TEST_PROMPT
from tools.tiktoken_util import get_encoding
MODEL = USER_LLM_MODEL if USE_USER_MODEL else "gpt-3.5-turbo"
MODEL = USER_LLM_MODEL if USE_USER_MODEL else "gpt-4-turbo-preview"
ENCODING = (
get_encoding(DEFAULT_ENCODING) # Use default encoding as an approximation
if USE_USER_MODEL
else get_encoding("cl100k_base")
)
TOKEN_BUDGET = int(CONTEXT_SIZE.get(MODEL, DEFAULT_CONTEXT_SIZE) * 0.9)
TOKEN_BUDGET = int(CONTEXT_SIZE.get(MODEL, DEFAULT_CONTEXT_SIZE) * 0.95)
def _mk_user_msg(