From 4b756f7908c63f2027557f2bc8713ec5e07ba92c Mon Sep 17 00:00:00 2001 From: kagami Date: Wed, 13 Mar 2024 10:58:07 +0800 Subject: [PATCH] Improve the usage of the wrapped api --- .../relevant_file_finder.py | 15 +++++---- .../assistants/recommend_test_context.py | 2 +- unit_tests/assistants/rerank_files.py | 31 ++++++++++--------- unit_tests/propose_test.py | 15 +++++---- unit_tests/write_tests.py | 9 ++++-- 5 files changed, 41 insertions(+), 31 deletions(-) diff --git a/unit_tests/assistants/directory_structure/relevant_file_finder.py b/unit_tests/assistants/directory_structure/relevant_file_finder.py index dbdb772..b6e712c 100644 --- a/unit_tests/assistants/directory_structure/relevant_file_finder.py +++ b/unit_tests/assistants/directory_structure/relevant_file_finder.py @@ -97,12 +97,15 @@ class RelevantFileFinder(DirectoryStructureBase): json_res = {} if USE_USER_MODEL: # Use the wrapped api parameters - json_res = chat_completion_no_stream_return_json( - messages=[{"role": "user", "content": user_msg}], - llm_config={ - "model": MODEL, - "temperature": 0.1, - }, + json_res = ( + chat_completion_no_stream_return_json( + messages=[{"role": "user", "content": user_msg}], + llm_config={ + "model": MODEL, + "temperature": 0.1, + }, + ) + or {} ) else: diff --git a/unit_tests/assistants/recommend_test_context.py b/unit_tests/assistants/recommend_test_context.py index d75f12c..e173958 100644 --- a/unit_tests/assistants/recommend_test_context.py +++ b/unit_tests/assistants/recommend_test_context.py @@ -80,7 +80,7 @@ def get_recommended_symbols( "model": MODEL, "temperature": 0.1, }, - ) + ) or {} else: response = create_chat_completion_content( diff --git a/unit_tests/assistants/rerank_files.py b/unit_tests/assistants/rerank_files.py index 5a6b285..523a274 100644 --- a/unit_tests/assistants/rerank_files.py +++ b/unit_tests/assistants/rerank_files.py @@ -52,9 +52,7 @@ def rerank_files( files_str = "" for file in items: - assert isinstance( - file, str - ), "items must be a list of str when item_type is 'file'" + assert isinstance(file, str), "items must be a list of str when item_type is 'file'" files_str += f"- {file}\n" user_msg = rerank_file_prompt.format( @@ -63,20 +61,23 @@ def rerank_files( accumulated_knowledge=knowledge, ) - result = None + result = {} if USE_USER_MODEL: # Use the wrapped api parameters - result = chat_completion_no_stream_return_json( - messages=[ - { - "role": "user", - "content": user_msg, + result = ( + chat_completion_no_stream_return_json( + messages=[ + { + "role": "user", + "content": user_msg, + }, + ], + llm_config={ + "model": MODEL, + "temperature": 0.1, }, - ], - llm_config={ - "model": MODEL, - "temperature": 0.1, - }, + ) + or {} ) else: @@ -94,6 +95,6 @@ def rerank_files( ) result = json.loads(response) - reranked = [(i["item"], i["relevance"]) for i in result["result"]] + reranked = [(i["item"], i["relevance"]) for i in result.get("result", [])] return reranked diff --git a/unit_tests/propose_test.py b/unit_tests/propose_test.py index c6c1213..8bc4add 100644 --- a/unit_tests/propose_test.py +++ b/unit_tests/propose_test.py @@ -110,12 +110,15 @@ def propose_test( json_res = {} if USE_USER_MODEL: # Use the wrapped api parameters - json_res = chat_completion_no_stream_return_json( - messages=[{"role": "user", "content": user_msg}], - llm_config={ - "model": MODEL, - "temperature": 0.1, - }, + json_res = ( + chat_completion_no_stream_return_json( + messages=[{"role": "user", "content": user_msg}], + llm_config={ + "model": MODEL, + "temperature": 0.1, + }, + ) + or {} ) else: diff --git a/unit_tests/write_tests.py b/unit_tests/write_tests.py index 8aa5549..183c720 100644 --- a/unit_tests/write_tests.py +++ b/unit_tests/write_tests.py @@ -139,9 +139,9 @@ def write_and_print_tests( res = chat_completion_stream( messages=[{"role": "user", "content": user_msg}], llm_config={"model": MODEL, "temperature": 0.1}, - stream_out=True, ) - # print(res) + if res: + print(res.get("content", "")) else: # Use the openai api parameters @@ -153,4 +153,7 @@ def write_and_print_tests( for chunk in chunks: if chunk.choices[0].finish_reason == "stop": break - print(chunk.choices[0].delta.content, flush=True, end="") + + content = chunk.choices[0].delta.content + if content is not None: + print(content, flush=True, end="")