Merge branch 'feature/skip_known_failure_cases_v3.3' into 'release/v3.3'

CI: Ignore Known Failure Cases Result (v3.3)

See merge request espressif/esp-idf!14652
This commit is contained in:
Zim Kalinowski 2021-08-10 02:31:41 +00:00
commit 44e9dd3e29
6 changed files with 182 additions and 92 deletions

View File

@ -930,9 +930,11 @@ assign_test:
# clone test env configs
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
- cd $TEST_FW_PATH
# git clone the known failure cases repo, run test
- ./tools/ci/retry_failed.sh git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
# run test
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE
- cd $TEST_FW_PATH
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
.unit_test_template: &unit_test_template
<<: *example_test_template

View File

@ -21,13 +21,14 @@ Command line interface to run test cases from a given path.
Use ``python Runner.py test_case_path -c config_file -e env_config_file`` to run test cases.
"""
import argparse
import os
import sys
import argparse
import threading
from fnmatch import fnmatch
import TinyFW
from Utility import SearchCases, CaseConfig
from TinyFW import JunitReport, set_default_config
from Utility import CaseConfig, SearchCases, console_log
class Runner(threading.Thread):
@ -37,28 +38,64 @@ class Runner(threading.Thread):
:param env_config_file: env config file
"""
def __init__(self, test_case, case_config, env_config_file=None):
def __init__(self, test_case, case_config, env_config_file=None, known_failure_cases_file=None):
super(Runner, self).__init__()
self.setDaemon(True)
if case_config:
test_suite_name = os.path.splitext(os.path.basename(case_config))[0]
else:
test_suite_name = "TestRunner"
TinyFW.set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
test_methods = SearchCases.Search.search_test_cases(test_case)
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
self.test_result = []
self.known_failure_cases = self._get_config_cases(known_failure_cases_file)
@staticmethod
def _get_config_cases(config_file):
res = set()
if not config_file or not os.path.isfile(config_file):
return res
for line in open(config_file).readlines():
if not line:
continue
if not line.strip():
continue
without_comments = line.split("#")[0].strip()
if without_comments:
res.add(without_comments)
return res
def run(self):
for case in self.test_cases:
result = case.run()
self.test_result.append(result)
case.run()
@staticmethod
def is_known_issue(tc_name, known_cases):
for case in known_cases:
if tc_name == case:
return True
if fnmatch(tc_name, case):
return True
return False
def get_test_result(self):
return self.test_result and all(self.test_result)
_res = True
console_log("Test Results:")
for tc in JunitReport.JUNIT_TEST_SUITE.test_cases:
if tc.failures:
if self.is_known_issue(tc.name, self.known_failure_cases):
console_log(" Known Failure: " + tc.name, color="orange")
else:
console_log(" Test Fail: " + tc.name, color="red")
_res = False
else:
console_log(" Test Succeed: " + tc.name, color="green")
return _res
if __name__ == '__main__':
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("test_case",
@ -67,15 +104,17 @@ if __name__ == '__main__':
help="case filter/config file")
parser.add_argument("--env_config_file", "-e", default=None,
help="test env config file")
parser.add_argument("--known_failure_cases_file", default=None,
help="known failure cases file")
args = parser.parse_args()
runner = Runner(args.test_case, args.case_config, args.env_config_file)
runner = Runner(args.test_case, args.case_config, args.env_config_file, args.known_failure_cases_file)
runner.start()
while True:
try:
runner.join(1)
if not runner.isAlive():
if not runner.is_alive():
break
except KeyboardInterrupt:
print("exit by Ctrl-C")

View File

@ -13,17 +13,31 @@
# limitations under the License.
""" Interface for test cases. """
import functools
import os
import time
import traceback
import functools
import junit_xml
import Env
import DUT
import App
import DUT
import Env
import Utility
from Utility import format_case_id
class TestCaseFailed(AssertionError):
def __init__(self, *cases):
"""
Raise this exception if one or more test cases fail in a 'normal' way (ie the test runs but fails, no unexpected exceptions)
This will avoid dumping the Python stack trace, because the assumption is the junit error info and full job log already has
enough information for a developer to debug.
'cases' argument is the names of one or more test cases
"""
message = 'Test case{} failed: {}'.format('s' if len(cases) > 1 else '', ', '.join(str(c) for c in cases))
super(TestCaseFailed, self).__init__(self, message)
class DefaultEnvConfig(object):
@ -83,8 +97,8 @@ class JunitReport(object):
@classmethod
def output_report(cls, junit_file_path):
""" Output current test result to file. """
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), "w") as f:
cls.JUNIT_TEST_SUITE.to_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), 'w') as f:
junit_xml.to_xml_report_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
@classmethod
def get_current_test_case(cls):
@ -168,21 +182,20 @@ def test_method(**kwargs):
env_inst = Env.Env(**env_config)
# prepare for xunit test results
junit_file_path = env_inst.app_cls.get_log_folder(env_config["test_suite_name"])
junit_test_case = JunitReport.create_test_case(case_info["name"])
junit_file_path = env_inst.app_cls.get_log_folder(env_config['test_suite_name'])
junit_test_case = JunitReport.create_test_case(format_case_id(case_info['ID'],
target=case_info['chip'].lower()))
result = False
try:
Utility.console_log("starting running test: " + test_func.__name__, color="green")
# execute test function
test_func(env_inst, extra_data)
# if finish without exception, test result is True
result = True
except TestCaseFailed as e:
junit_test_case.add_failure_info(str(e))
except Exception as e:
# handle all the exceptions here
traceback.print_exc()
# log failure
junit_test_case.add_failure_info(str(e) + ":\r\n" + traceback.format_exc())
Utility.handle_unexpected_exception(junit_test_case, e)
finally:
# do close all DUTs, if result is False then print DUT debug info
close_errors = env_inst.close(dut_debug=(not result))
@ -194,7 +207,7 @@ def test_method(**kwargs):
# and raise exception in DUT close to fail test case if reset detected.
if close_errors:
for error in close_errors:
junit_test_case.add_failure_info('env close error: {}'.format(error))
junit_test_case.add_failure_info(str(error))
result = False
if not case_info["junit_report_by_case"]:
JunitReport.test_case_finish(junit_test_case)

View File

@ -154,7 +154,7 @@ class Parser(object):
configs = cls.DEFAULT_CONFIG.copy()
if config_file:
with open(config_file, "r") as f:
configs.update(yaml.load(f))
configs.update(yaml.load(f, Loader=yaml.FullLoader))
return configs
@classmethod

View File

@ -1,6 +1,7 @@
from __future__ import print_function
import sys
import sys
import traceback
_COLOR_CODES = {
"white": u'\033[0m',
@ -46,3 +47,20 @@ def load_source(name, path):
# importlib.machinery doesn't exists in Python 2 so we will use imp (deprecated in Python 3)
import imp
return imp.load_source(name, path)
def handle_unexpected_exception(junit_test_case, exception):
"""
Helper to log & add junit result details for an unexpected exception encountered
when running a test case.
Should always be called from inside an except: block
"""
traceback.print_exc()
# AssertionError caused by an 'assert' statement has an empty string as its 'str' form
e_str = str(exception) if str(exception) else repr(exception)
junit_test_case.add_failure_info('Unexpected exception: {}\n{}'.format(e_str, traceback.format_exc()))
def format_case_id(case_name, target='esp32', config='default'):
return '{}.{}.{}'.format(target, config, case_name)

View File

@ -17,13 +17,12 @@
"""
Test script for unit test case.
"""
import re
import os
import sys
import time
import argparse
import os
import re
import sys
import threading
import time
try:
import TinyFW
@ -47,9 +46,16 @@ import Env
from DUT import ExpectTimeout
from IDF.IDFApp import UT
from TinyFW import TestCaseFailed
from Utility import format_case_id, handle_unexpected_exception
UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
STRIP_CONFIG_PATTERN = re.compile(r'(.+?)(_\d+)?$')
# matches e.g.: "rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)"
RESET_PATTERN = re.compile(r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))")
EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
ABORT_PATTERN = re.compile(r"(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)")
FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
@ -67,11 +73,26 @@ DUT_STARTUP_CHECK_RETRY_COUNT = 5
TEST_HISTORY_CHECK_TIMEOUT = 1
class TestCaseFailed(AssertionError):
pass
def reset_reason_matches(reported_str, expected_str):
known_aliases = {
"_RESET": "_RST",
"POWERON_RESET": "POWERON",
"DEEPSLEEP_RESET": "DSLEEP",
}
if expected_str in reported_str:
return True
for token, alias in known_aliases.items():
if token in expected_str:
alt_expected_str = expected_str.replace(token, alias)
if alt_expected_str in reported_str:
return True
return False
def format_test_case_config(test_case_data):
def format_test_case_config(test_case_data, target='esp32'):
"""
convert the test case data to unified format.
We need to following info to run unit test cases:
@ -91,6 +112,7 @@ def format_test_case_config(test_case_data):
If config is not specified for test case, then
:param test_case_data: string, list, or a dictionary list
:param target: target
:return: formatted data
"""
@ -130,6 +152,9 @@ def format_test_case_config(test_case_data):
if "config" not in _case:
_case["config"] = "default"
if 'target' not in _case:
_case['target'] = target
return _case
if not isinstance(test_case_data, list):
@ -156,6 +181,14 @@ def replace_app_bin(dut, name, new_app_bin):
break
def format_case_name(case):
# we could split cases of same config into multiple binaries as we have limited rom space
# we should regard those configs like `default` and `default_2` as the same config
match = STRIP_CONFIG_PATTERN.match(case['config'])
stripped_config_name = match.group(1)
return format_case_id(case['name'], target=case['target'], config=stripped_config_name)
def reset_dut(dut):
dut.reset()
# esptool ``run`` cmd takes quite long time.
@ -175,6 +208,14 @@ def reset_dut(dut):
raise AssertionError("Reset {} ({}) failed!".format(dut.name, dut.port))
def log_test_case(description, test_case, ut_config):
Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config),
color='orange')
Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items()
if k != 'name' and v is not None),
color='orange')
def run_one_normal_case(dut, one_case, junit_test_case):
reset_dut(dut)
@ -286,24 +327,17 @@ def run_unit_test_cases(env, extra_data):
for one_case in case_config[ut_config]:
# create junit report test case
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
run_one_normal_case(dut, one_case, junit_test_case)
except TestCaseFailed:
failed_cases.append(one_case["name"])
failed_cases.append(format_case_name(one_case))
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
failed_cases.append(one_case["name"])
handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.test_case_finish(junit_test_case)
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
class Handler(threading.Thread):
@ -477,26 +511,22 @@ def run_multiple_devices_cases(env, extra_data):
Utility.console_log("Running unit test for config: " + ut_config, "O")
for one_case in case_config[ut_config]:
result = False
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
one_case.get('app_bin'), junit_test_case)
except TestCaseFailed:
pass # result is False, this is handled by the finally block
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
handle_unexpected_exception(junit_test_case, e)
finally:
if result:
Utility.console_log("Success: " + one_case["name"], color="green")
Utility.console_log("Success: " + format_case_name(one_case), color="green")
else:
failed_cases.append(one_case["name"])
Utility.console_log("Failed: " + one_case["name"], color="red")
failed_cases.append(format_case_name(one_case))
Utility.console_log("Failed: " + format_case_name(one_case), color="red")
TinyFW.JunitReport.test_case_finish(junit_test_case)
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
def run_one_multiple_stage_case(dut, one_case, junit_test_case):
reset_dut(dut)
@ -525,7 +555,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
result = False
if len(one_case["reset"]) == len(exception_reset_list):
for i, exception in enumerate(exception_reset_list):
if one_case["reset"][i] not in exception:
if not reset_reason_matches(exception, one_case["reset"][i]):
break
else:
result = True
@ -546,9 +576,9 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
result = result and check_reset()
output = dut.stop_capture_raw_data()
if result:
Utility.console_log("Success: " + one_case["name"], color="green")
Utility.console_log("Success: " + format_case_name(one_case), color="green")
else:
Utility.console_log("Failed: " + one_case["name"], color="red")
Utility.console_log("Failed: " + format_case_name(one_case), color="red")
junit_test_case.add_failure_info(output)
raise TestCaseFailed()
stage_finish.append("break")
@ -565,7 +595,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
# in this scenario reset should not happen
if int(data[1]):
# case ignored
Utility.console_log("Ignored: " + one_case["name"], color="orange")
Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
junit_test_case.add_skipped_info("ignored")
# only passed in last stage will be regarded as real pass
if last_stage():
@ -604,7 +634,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
def run_multiple_stage_cases(env, extra_data):
"""
extra_data can be 2 types of value
1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
1. as dict: Mandatory keys: "name" and "child case num", optional keys: "reset" and others
3. as list of string or dict:
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
@ -628,27 +658,19 @@ def run_multiple_stage_cases(env, extra_data):
dut.start_app()
for one_case in case_config[ut_config]:
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
run_one_multiple_stage_case(dut, one_case, junit_test_case)
except TestCaseFailed:
failed_cases.append(one_case["name"])
failed_cases.append(format_case_name(one_case))
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
failed_cases.append(one_case["name"])
handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.test_case_finish(junit_test_case)
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
def detect_update_unit_test_info(env, extra_data, app_bin):
case_config = format_test_case_config(extra_data)
for ut_config in case_config:
@ -719,20 +741,16 @@ if __name__ == '__main__':
type=int,
default=1
)
parser.add_argument("--env_config_file", "-e",
help="test env config file",
default=None
)
parser.add_argument("--app_bin", "-b",
help="application binary file for flashing the chip",
default=None
)
parser.add_argument(
'test',
help='Comma separated list of <option>:<argument> where option can be "name" (default), "child case num", \
"config", "timeout".',
nargs='+'
)
parser.add_argument('--env_config_file', '-e',
help='test env config file',
default=None)
parser.add_argument('--app_bin', '-b',
help='application binary file for flashing the chip',
default=None)
parser.add_argument('test',
help='Comma separated list of <option>:<argument> where option can be "name" (default), '
'"child case num", "config", "timeout".',
nargs='+')
args = parser.parse_args()
list_of_dicts = []
for test in args.test:
@ -742,7 +760,7 @@ if __name__ == '__main__':
if len(test_item) == 0:
continue
pair = test_item.split(r':')
if len(pair) == 1 or pair[0] is 'name':
if len(pair) == 1 or pair[0] == 'name':
test_dict['name'] = pair[0]
elif len(pair) == 2:
if pair[0] == 'timeout' or pair[0] == 'child case num':