mirror of
https://github.com/micropython/micropython.git
synced 2025-07-21 04:51:12 +02:00
tests/run-tests.py: Factor out helper function to create test report.
This commit factors existing code in `run-tests.py` into a new helper function `create_test_report()`. That function prints out a summary of the test run (eg number of tests passed, number failed, number skipped) and creates the corresponding `_results.json` file. This is done so `create_test_report()` can be reused by the other test runners. The `test_count` counter is now gone, and instead the number of passed plus number of failed tests is used as an equivalent count. For consistency this commit makes a minor change to the printed output of `run-tests.py`: instead of printing a shorthand name for tests that failed or skipped, it now prints the full name. Eg what was previously printed as `attrtuple2` is now printed as `basics/attrtuple2.py`. This makes the output a little longer (when there are failed/skipped tests) but helps to disambiguate the test name, eg which directory it's in. Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
@@ -616,7 +616,6 @@ class PyboardNodeRunner:
|
|||||||
|
|
||||||
|
|
||||||
def run_tests(pyb, tests, args, result_dir, num_threads=1):
|
def run_tests(pyb, tests, args, result_dir, num_threads=1):
|
||||||
test_count = ThreadSafeCounter()
|
|
||||||
testcase_count = ThreadSafeCounter()
|
testcase_count = ThreadSafeCounter()
|
||||||
test_results = ThreadSafeCounter([])
|
test_results = ThreadSafeCounter([])
|
||||||
|
|
||||||
@@ -903,7 +902,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
|
|||||||
|
|
||||||
if skip_it:
|
if skip_it:
|
||||||
print("skip ", test_file)
|
print("skip ", test_file)
|
||||||
test_results.append((test_name, test_file, "skip", ""))
|
test_results.append((test_file, "skip", ""))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Run the test on the MicroPython target.
|
# Run the test on the MicroPython target.
|
||||||
@@ -918,11 +917,11 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
|
|||||||
# start-up code (eg boot.py) when preparing to run the next test.
|
# start-up code (eg boot.py) when preparing to run the next test.
|
||||||
pyb.read_until(1, b"raw REPL; CTRL-B to exit\r\n")
|
pyb.read_until(1, b"raw REPL; CTRL-B to exit\r\n")
|
||||||
print("skip ", test_file)
|
print("skip ", test_file)
|
||||||
test_results.append((test_name, test_file, "skip", ""))
|
test_results.append((test_file, "skip", ""))
|
||||||
return
|
return
|
||||||
elif output_mupy == b"SKIP-TOO-LARGE\n":
|
elif output_mupy == b"SKIP-TOO-LARGE\n":
|
||||||
print("lrge ", test_file)
|
print("lrge ", test_file)
|
||||||
test_results.append((test_name, test_file, "skip", "too large"))
|
test_results.append((test_file, "skip", "too large"))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Look at the output of the test to see if unittest was used.
|
# Look at the output of the test to see if unittest was used.
|
||||||
@@ -1005,7 +1004,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
|
|||||||
# Print test summary, update counters, and save .exp/.out files if needed.
|
# Print test summary, update counters, and save .exp/.out files if needed.
|
||||||
if test_passed:
|
if test_passed:
|
||||||
print("pass ", test_file, extra_info)
|
print("pass ", test_file, extra_info)
|
||||||
test_results.append((test_name, test_file, "pass", ""))
|
test_results.append((test_file, "pass", ""))
|
||||||
rm_f(filename_expected)
|
rm_f(filename_expected)
|
||||||
rm_f(filename_mupy)
|
rm_f(filename_mupy)
|
||||||
else:
|
else:
|
||||||
@@ -1017,9 +1016,7 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
|
|||||||
rm_f(filename_expected) # in case left over from previous failed run
|
rm_f(filename_expected) # in case left over from previous failed run
|
||||||
with open(filename_mupy, "wb") as f:
|
with open(filename_mupy, "wb") as f:
|
||||||
f.write(output_mupy)
|
f.write(output_mupy)
|
||||||
test_results.append((test_name, test_file, "fail", ""))
|
test_results.append((test_file, "fail", ""))
|
||||||
|
|
||||||
test_count.increment()
|
|
||||||
|
|
||||||
# Print a note if this looks like it might have been a misfired unittest
|
# Print a note if this looks like it might have been a misfired unittest
|
||||||
if not uses_unittest and not test_passed:
|
if not uses_unittest and not test_passed:
|
||||||
@@ -1046,19 +1043,27 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
|
|||||||
print(line)
|
print(line)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
test_results = test_results.value
|
# Return test results.
|
||||||
passed_tests = list(r for r in test_results if r[2] == "pass")
|
return test_results.value, testcase_count.value
|
||||||
skipped_tests = list(r for r in test_results if r[2] == "skip" and r[3] != "too large")
|
|
||||||
skipped_tests_too_large = list(
|
|
||||||
r for r in test_results if r[2] == "skip" and r[3] == "too large"
|
|
||||||
)
|
|
||||||
failed_tests = list(r for r in test_results if r[2] == "fail")
|
|
||||||
|
|
||||||
print(
|
|
||||||
"{} tests performed ({} individual testcases)".format(
|
# Print a summary of the results and save them to a JSON file.
|
||||||
test_count.value, testcase_count.value
|
# Returns True if everything succeeded, False otherwise.
|
||||||
)
|
def create_test_report(args, test_results, testcase_count=None):
|
||||||
|
passed_tests = list(r for r in test_results if r[1] == "pass")
|
||||||
|
skipped_tests = list(r for r in test_results if r[1] == "skip" and r[2] != "too large")
|
||||||
|
skipped_tests_too_large = list(
|
||||||
|
r for r in test_results if r[1] == "skip" and r[2] == "too large"
|
||||||
)
|
)
|
||||||
|
failed_tests = list(r for r in test_results if r[1] == "fail")
|
||||||
|
|
||||||
|
num_tests_performed = len(passed_tests) + len(failed_tests)
|
||||||
|
|
||||||
|
testcase_count_info = ""
|
||||||
|
if testcase_count is not None:
|
||||||
|
testcase_count_info = " ({} individual testcases)".format(testcase_count)
|
||||||
|
print("{} tests performed{}".format(num_tests_performed, testcase_count_info))
|
||||||
|
|
||||||
print("{} tests passed".format(len(passed_tests)))
|
print("{} tests passed".format(len(passed_tests)))
|
||||||
|
|
||||||
if len(skipped_tests) > 0:
|
if len(skipped_tests) > 0:
|
||||||
@@ -1088,15 +1093,15 @@ def run_tests(pyb, tests, args, result_dir, num_threads=1):
|
|||||||
return obj.pattern
|
return obj.pattern
|
||||||
return obj
|
return obj
|
||||||
|
|
||||||
with open(os.path.join(result_dir, RESULTS_FILE), "w") as f:
|
with open(os.path.join(args.result_dir, RESULTS_FILE), "w") as f:
|
||||||
json.dump(
|
json.dump(
|
||||||
{
|
{
|
||||||
# The arguments passed on the command-line.
|
# The arguments passed on the command-line.
|
||||||
"args": vars(args),
|
"args": vars(args),
|
||||||
# A list of all results of the form [(test, result, reason), ...].
|
# A list of all results of the form [(test, result, reason), ...].
|
||||||
"results": list(test[1:] for test in test_results),
|
"results": list(test for test in test_results),
|
||||||
# A list of failed tests. This is deprecated, use the "results" above instead.
|
# A list of failed tests. This is deprecated, use the "results" above instead.
|
||||||
"failed_tests": [test[1] for test in failed_tests],
|
"failed_tests": [test[0] for test in failed_tests],
|
||||||
},
|
},
|
||||||
f,
|
f,
|
||||||
default=to_json,
|
default=to_json,
|
||||||
@@ -1350,7 +1355,8 @@ the last matching regex is used:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
os.makedirs(args.result_dir, exist_ok=True)
|
os.makedirs(args.result_dir, exist_ok=True)
|
||||||
res = run_tests(pyb, tests, args, args.result_dir, args.jobs)
|
test_results, testcase_count = run_tests(pyb, tests, args, args.result_dir, args.jobs)
|
||||||
|
res = create_test_report(args, test_results, testcase_count)
|
||||||
finally:
|
finally:
|
||||||
if pyb:
|
if pyb:
|
||||||
pyb.close()
|
pyb.close()
|
||||||
|
Reference in New Issue
Block a user