@@ -605,9 +605,7 @@ def run_script_on_remote_target(self, args, test_file, is_special):
605
605
def run_tests (pyb , tests , args , result_dir , num_threads = 1 ):
606
606
test_count = ThreadSafeCounter ()
607
607
testcase_count = ThreadSafeCounter ()
608
- passed_tests = ThreadSafeCounter ([])
609
- failed_tests = ThreadSafeCounter ([])
610
- skipped_tests = ThreadSafeCounter ([])
608
+ test_results = ThreadSafeCounter ([])
611
609
612
610
skip_tests = set ()
613
611
skip_native = False
@@ -896,7 +894,7 @@ def run_one_test(test_file):
896
894
897
895
if skip_it :
898
896
print ("skip " , test_file )
899
- skipped_tests .append ((test_name , test_file ))
897
+ test_results .append ((test_name , test_file , "skip" , "" ))
900
898
return
901
899
902
900
# Run the test on the MicroPython target.
@@ -911,7 +909,7 @@ def run_one_test(test_file):
911
909
# start-up code (eg boot.py) when preparing to run the next test.
912
910
pyb .read_until (1 , b"raw REPL; CTRL-B to exit\r \n " )
913
911
print ("skip " , test_file )
914
- skipped_tests .append ((test_name , test_file ))
912
+ test_results .append ((test_name , test_file , "skip" , "" ))
915
913
return
916
914
917
915
# Look at the output of the test to see if unittest was used.
@@ -994,7 +992,7 @@ def run_one_test(test_file):
994
992
# Print test summary, update counters, and save .exp/.out files if needed.
995
993
if test_passed :
996
994
print ("pass " , test_file , extra_info )
997
- passed_tests .append ((test_name , test_file ))
995
+ test_results .append ((test_name , test_file , "pass" , "" ))
998
996
rm_f (filename_expected )
999
997
rm_f (filename_mupy )
1000
998
else :
@@ -1006,7 +1004,7 @@ def run_one_test(test_file):
1006
1004
rm_f (filename_expected ) # in case left over from previous failed run
1007
1005
with open (filename_mupy , "wb" ) as f :
1008
1006
f .write (output_mupy )
1009
- failed_tests .append ((test_name , test_file ))
1007
+ test_results .append ((test_name , test_file , "fail" , "" ))
1010
1008
1011
1009
test_count .increment ()
1012
1010
@@ -1035,9 +1033,10 @@ def run_one_test(test_file):
1035
1033
print (line )
1036
1034
sys .exit (1 )
1037
1035
1038
- passed_tests = sorted (passed_tests .value )
1039
- skipped_tests = sorted (skipped_tests .value )
1040
- failed_tests = sorted (failed_tests .value )
1036
+ test_results = test_results .value
1037
+ passed_tests = list (r for r in test_results if r [2 ] == "pass" )
1038
+ skipped_tests = list (r for r in test_results if r [2 ] == "skip" )
1039
+ failed_tests = list (r for r in test_results if r [2 ] == "fail" )
1041
1040
1042
1041
print (
1043
1042
"{} tests performed ({} individual testcases)" .format (
@@ -1072,12 +1071,8 @@ def to_json(obj):
1072
1071
# The arguments passed on the command-line.
1073
1072
"args" : vars (args ),
1074
1073
# A list of all results of the form [(test, result, reason), ...].
1075
- "results" : (
1076
- list ([test [1 ], "pass" , "" ] for test in passed_tests )
1077
- + list ([test [1 ], "skip" , "" ] for test in skipped_tests )
1078
- + list ([test [1 ], "fail" , "" ] for test in failed_tests )
1079
- ),
1080
- # A list of failed tests. This is deprecated, one should the results above.
1074
+ "results" : list (test [1 :] for test in test_results ),
1075
+ # A list of failed tests. This is deprecated, use the "results" above instead.
1081
1076
"failed_tests" : [test [1 ] for test in failed_tests ],
1082
1077
},
1083
1078
f ,
0 commit comments