Improve the principles for writing tests

This commit is contained in:
kagami 2024-03-18 09:30:11 +08:00
parent bee6f21535
commit 1896c2b933
4 changed files with 22 additions and 14 deletions

View File

@ -15,7 +15,7 @@ from tools.symbol_util import (
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from libs.ide_services import IDEService, Location, Range, SymbolNode
from libs.ide_services import IDEService, Location, Position, Range, SymbolNode
@dataclass

View File

@ -11,6 +11,8 @@ from cache import LocalCache
from chatmark import Checkbox, Form, Step, TextEditor # noqa: E402
from find_context import (
Context,
Position,
Range,
find_symbol_context_by_static_analysis,
find_symbol_context_of_llm_recommendation,
)
@ -76,6 +78,10 @@ class UnitTestsWorkflow:
Context(
file_path=self.func_to_test.file_path,
content=self.func_to_test.container_content,
range=Range(
start=Position(line=self.func_to_test.container_start_line, character=0),
end=Position(line=self.func_to_test.container_end_line, character=0),
),
)
)
known_context_for_llm += list(

View File

@ -50,13 +50,8 @@ The reference should provide a clear example of best practices in testing functi
WRITE_TESTS_PROMPT = """
You're an advanced AI test case generator.
Given a target function, some reference test code, and a list of specific test case descriptions, write the test cases in code.
Each test case should be self-contained and executable.
Use the content of the reference test cases as a model, ensuring you use the same test framework and mock library,
and apply comparable mocking strategies and best practices.
{additional_requirements}
Given a target function, some relevant source code, some reference test code, and a list of specific test case descriptions, write the test cases in code.
Each test case should be self-contained and close to executable as possible.
The target function is {function_name}, located in the file {file_path}.
Here's the relevant source code of the function:
@ -69,13 +64,18 @@ Here's the list of test case descriptions:
{test_cases_str}
Answer in the following format in {chat_language}:
Answer in {chat_language} with the following requirements:
Test Case 1. <original test case 1 description>
Basic requirements
<test case 1 code>
1. Put all the test code in a single code block.
2. Use the content of the reference test cases as a model if possible. Ensuring you use the same test framework and mock library, and apply comparable mocking strategies and best practices.
3. Describe the test cases in comment in the same order as the list above.
4. Include libraries at the top of the code block if needed.
5. If two or more test cases share the same setup, you should reuse the setup code.
6. If two or more test cases share the same test logic, you should reuse the test logic.
7. Use TODO comments or FIXME comments to indicate any missing parts of the code or any parts that need to be improved.
Test Case 2. <original test case 2 description>
{additional_requirements}
<test case 2 code>
"""

View File

@ -35,7 +35,9 @@ def _mk_write_tests_msg(
symbol_contexts: Optional[List[Context]] = None,
user_requirements: str = "",
) -> Optional[str]:
additional_requirements = user_requirements
additional_requirements = ""
if user_requirements:
additional_requirements = f"Additional requirements\n\n{user_requirements}\n\n"
test_cases_str = ""
for i, test_case in enumerate(test_cases, 1):