tests/run-perfbench.py: Create a _result.json at end of run.

Reuse the `create_test_report()` function from `run-tests.py` to generate a
`_result.json` file summarising the test run.

Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
Damien George
2025-06-11 00:11:14 +10:00
parent 09b058559b
commit 63701c2e94

View File

@@ -10,10 +10,12 @@ import sys
import argparse
from glob import glob
run_tests_module = __import__("run-tests")
sys.path.append("../tools")
import pyboard
prepare_script_for_target = __import__("run-tests").prepare_script_for_target
prepare_script_for_target = run_tests_module.prepare_script_for_target
# Paths for host executables
if os.name == "nt":
@@ -90,9 +92,9 @@ def run_benchmark_on_target(target, script):
def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
test_results = []
skip_complex = run_feature_test(target, "complex") != "complex"
skip_native = run_feature_test(target, "native_check") != "native"
target_had_error = False
for test_file in sorted(test_list):
print(test_file + ": ", end="")
@@ -105,6 +107,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
and test_file.find("viper_") != -1
)
if skip:
test_results.append((test_file, "skip", ""))
print("SKIP")
continue
@@ -125,6 +128,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
if isinstance(target, pyboard.Pyboard) or args.via_mpy:
crash, test_script_target = prepare_script_for_target(args, script_text=test_script)
if crash:
test_results.append((test_file, "fail", "preparation"))
print("CRASH:", test_script_target)
continue
else:
@@ -162,10 +166,13 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
error = "FAIL truth"
if error is not None:
if not error.startswith("SKIP"):
target_had_error = True
if error.startswith("SKIP"):
test_results.append((test_file, "skip", error))
else:
test_results.append((test_file, "fail", error))
print(error)
else:
test_results.append((test_file, "pass", ""))
t_avg, t_sd = compute_stats(times)
s_avg, s_sd = compute_stats(scores)
print(
@@ -179,7 +186,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
sys.stdout.flush()
return target_had_error
return test_results
def parse_output(filename):
@@ -264,6 +271,12 @@ def main():
cmd_parser.add_argument("--heapsize", help="heapsize to use (use default if not specified)")
cmd_parser.add_argument("--via-mpy", action="store_true", help="compile code to .mpy first")
cmd_parser.add_argument("--mpy-cross-flags", default="", help="flags to pass to mpy-cross")
cmd_parser.add_argument(
"-r",
"--result-dir",
default=run_tests_module.base_path("results"),
help="directory for test results",
)
cmd_parser.add_argument(
"N", nargs=1, help="N parameter (approximate target CPU frequency in MHz)"
)
@@ -307,13 +320,15 @@ def main():
print("N={} M={} n_average={}".format(N, M, n_average))
target_had_error = run_benchmarks(args, target, N, M, n_average, tests)
os.makedirs(args.result_dir, exist_ok=True)
test_results = run_benchmarks(args, target, N, M, n_average, tests)
res = run_tests_module.create_test_report(args, test_results)
if isinstance(target, pyboard.Pyboard):
target.exit_raw_repl()
target.close()
if target_had_error:
if not res:
sys.exit(1)