Improve the usage of the wrapped api

This commit is contained in:
kagami 2024-03-13 10:58:07 +08:00
parent 48441e2514
commit 4b756f7908
5 changed files with 41 additions and 31 deletions

View File

@ -97,12 +97,15 @@ class RelevantFileFinder(DirectoryStructureBase):
json_res = {}
if USE_USER_MODEL:
# Use the wrapped api parameters
json_res = chat_completion_no_stream_return_json(
messages=[{"role": "user", "content": user_msg}],
llm_config={
"model": MODEL,
"temperature": 0.1,
},
json_res = (
chat_completion_no_stream_return_json(
messages=[{"role": "user", "content": user_msg}],
llm_config={
"model": MODEL,
"temperature": 0.1,
},
)
or {}
)
else:

View File

@ -80,7 +80,7 @@ def get_recommended_symbols(
"model": MODEL,
"temperature": 0.1,
},
)
) or {}
else:
response = create_chat_completion_content(

View File

@ -52,9 +52,7 @@ def rerank_files(
files_str = ""
for file in items:
assert isinstance(
file, str
), "items must be a list of str when item_type is 'file'"
assert isinstance(file, str), "items must be a list of str when item_type is 'file'"
files_str += f"- {file}\n"
user_msg = rerank_file_prompt.format(
@ -63,20 +61,23 @@ def rerank_files(
accumulated_knowledge=knowledge,
)
result = None
result = {}
if USE_USER_MODEL:
# Use the wrapped api parameters
result = chat_completion_no_stream_return_json(
messages=[
{
"role": "user",
"content": user_msg,
result = (
chat_completion_no_stream_return_json(
messages=[
{
"role": "user",
"content": user_msg,
},
],
llm_config={
"model": MODEL,
"temperature": 0.1,
},
],
llm_config={
"model": MODEL,
"temperature": 0.1,
},
)
or {}
)
else:
@ -94,6 +95,6 @@ def rerank_files(
)
result = json.loads(response)
reranked = [(i["item"], i["relevance"]) for i in result["result"]]
reranked = [(i["item"], i["relevance"]) for i in result.get("result", [])]
return reranked

View File

@ -110,12 +110,15 @@ def propose_test(
json_res = {}
if USE_USER_MODEL:
# Use the wrapped api parameters
json_res = chat_completion_no_stream_return_json(
messages=[{"role": "user", "content": user_msg}],
llm_config={
"model": MODEL,
"temperature": 0.1,
},
json_res = (
chat_completion_no_stream_return_json(
messages=[{"role": "user", "content": user_msg}],
llm_config={
"model": MODEL,
"temperature": 0.1,
},
)
or {}
)
else:

View File

@ -139,9 +139,9 @@ def write_and_print_tests(
res = chat_completion_stream(
messages=[{"role": "user", "content": user_msg}],
llm_config={"model": MODEL, "temperature": 0.1},
stream_out=True,
)
# print(res)
if res:
print(res.get("content", ""))
else:
# Use the openai api parameters
@ -153,4 +153,7 @@ def write_and_print_tests(
for chunk in chunks:
if chunk.choices[0].finish_reason == "stop":
break
print(chunk.choices[0].delta.content, flush=True, end="")
content = chunk.choices[0].delta.content
if content is not None:
print(content, flush=True, end="")