Handle token budget when recommend test context

This commit is contained in:
kagami 2024-03-13 11:28:49 +08:00
parent 3b1b24e1b2
commit c44891a39f

View File

@ -19,7 +19,7 @@ ENCODING = (
if USE_USER_MODEL if USE_USER_MODEL
else get_encoding("cl100k_base") else get_encoding("cl100k_base")
) )
# TODO: handle token budget
TOKEN_BUDGET = int(CONTEXT_SIZE.get(MODEL, DEFAULT_CONTEXT_SIZE) * 0.9) TOKEN_BUDGET = int(CONTEXT_SIZE.get(MODEL, DEFAULT_CONTEXT_SIZE) * 0.9)
@ -58,11 +58,13 @@ JSON Format Example:
""" """
def get_recommended_symbols( def _mk_user_msg(func_to_test: FuncToTest, contexts: List) -> str:
func_to_test: FuncToTest, known_context: Optional[List] = None """
) -> List[str]: Create a user message to be sent to the model within the token budget.
known_context = known_context or [] """
context_content = "\n\n".join([str(c) for c in known_context]) msg = None
while msg is None:
context_content = "\n\n".join([str(c) for c in contexts])
msg = recommend_symbol_context_prompt.format( msg = recommend_symbol_context_prompt.format(
function_content=func_to_test.func_content, function_content=func_to_test.func_content,
@ -71,16 +73,34 @@ def get_recommended_symbols(
file_path=func_to_test.file_path, file_path=func_to_test.file_path,
) )
token_count = len(ENCODING.encode(msg, disallowed_special=()))
if contexts and token_count > TOKEN_BUDGET:
# Remove the last context and try again
contexts.pop()
msg = None
return msg
def get_recommended_symbols(
func_to_test: FuncToTest, known_context: Optional[List] = None
) -> List[str]:
known_context = known_context or []
msg = _mk_user_msg(func_to_test, known_context)
json_res = {} json_res = {}
if USE_USER_MODEL: if USE_USER_MODEL:
# Use the wrapped api parameters # Use the wrapped api parameters
json_res = chat_completion_no_stream_return_json( json_res = (
chat_completion_no_stream_return_json(
messages=[{"role": "user", "content": msg}], messages=[{"role": "user", "content": msg}],
llm_config={ llm_config={
"model": MODEL, "model": MODEL,
"temperature": 0.1, "temperature": 0.1,
}, },
) or {} )
or {}
)
else: else:
response = create_chat_completion_content( response = create_chat_completion_content(