Merge branch 'feature/skip_known_failure_cases_v4.0' into 'release/v4.0'

CI: Ignore Known Failure Cases Result (v4.0)

See merge request espressif/esp-idf!14651
This commit is contained in:
Zim Kalinowski 2021-08-10 02:29:53 +00:00
commit eeb4dd74c6
6 changed files with 167 additions and 91 deletions

View File

@ -42,9 +42,11 @@
# clone test env configs
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
- cd tools/ci/python_packages/tiny_test_fw/bin
# git clone the known failure cases repo, run test
- ./tools/ci/retry_failed.sh git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
# run test
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE
- cd tools/ci/python_packages/tiny_test_fw/bin
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
.unit_test_template:
extends: .example_test_template

View File

@ -13,19 +13,30 @@
# limitations under the License.
""" Interface for test cases. """
import os
import time
import traceback
import functools
import os
import socket
import time
from datetime import datetime
import junit_xml
from . import Env
from . import DUT
from . import App
from . import Utility
from . import DUT, App, Env, Utility
from .Utility import format_case_id
class TestCaseFailed(AssertionError):
def __init__(self, *cases):
"""
Raise this exception if one or more test cases fail in a 'normal' way (ie the test runs but fails, no unexpected exceptions)
This will avoid dumping the Python stack trace, because the assumption is the junit error info and full job log already has
enough information for a developer to debug.
'cases' argument is the names of one or more test cases
"""
message = 'Test case{} failed: {}'.format('s' if len(cases) > 1 else '', ', '.join(str(c) for c in cases))
super(TestCaseFailed, self).__init__(self, message)
class DefaultEnvConfig(object):
@ -87,8 +98,8 @@ class JunitReport(object):
@classmethod
def output_report(cls, junit_file_path):
""" Output current test result to file. """
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), "w") as f:
cls.JUNIT_TEST_SUITE.to_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), 'w') as f:
junit_xml.to_xml_report_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
@classmethod
def get_current_test_case(cls):
@ -184,21 +195,20 @@ def test_method(**kwargs):
env_inst = Env.Env(**env_config)
# prepare for xunit test results
junit_file_path = env_inst.app_cls.get_log_folder(env_config["test_suite_name"])
junit_test_case = JunitReport.create_test_case(case_info["ID"])
junit_file_path = env_inst.app_cls.get_log_folder(env_config['test_suite_name'])
junit_test_case = JunitReport.create_test_case(format_case_id(case_info['ID'],
target=case_info['chip'].lower()))
result = False
try:
Utility.console_log("starting running test: " + test_func.__name__, color="green")
# execute test function
test_func(env_inst, extra_data)
# if finish without exception, test result is True
result = True
except TestCaseFailed as e:
junit_test_case.add_failure_info(str(e))
except Exception as e:
# handle all the exceptions here
traceback.print_exc()
# log failure
junit_test_case.add_failure_info(str(e) + ":\r\n" + traceback.format_exc())
Utility.handle_unexpected_exception(junit_test_case, e)
finally:
# do close all DUTs, if result is False then print DUT debug info
close_errors = env_inst.close(dut_debug=(not result))
@ -210,7 +220,7 @@ def test_method(**kwargs):
# and raise exception in DUT close to fail test case if reset detected.
if close_errors:
for error in close_errors:
junit_test_case.add_failure_info('env close error: {}'.format(error))
junit_test_case.add_failure_info(str(error))
result = False
if not case_info["junit_report_by_case"]:
JunitReport.test_case_finish(junit_test_case)

View File

@ -1,6 +1,7 @@
from __future__ import print_function
import sys
import sys
import traceback
_COLOR_CODES = {
"white": u'\033[0m',
@ -58,3 +59,20 @@ def load_source(name, path):
ret = imp.load_source(name, path)
__LOADED_MODULES[name] = ret
return ret
def handle_unexpected_exception(junit_test_case, exception):
"""
Helper to log & add junit result details for an unexpected exception encountered
when running a test case.
Should always be called from inside an except: block
"""
traceback.print_exc()
# AssertionError caused by an 'assert' statement has an empty string as its 'str' form
e_str = str(exception) if str(exception) else repr(exception)
junit_test_case.add_failure_info('Unexpected exception: {}\n{}'.format(e_str, traceback.format_exc()))
def format_case_id(case_name, target='esp32', config='default'):
return '{}.{}.{}'.format(target, config, case_name)

View File

@ -21,13 +21,14 @@ Command line interface to run test cases from a given path.
Use ``python Runner.py test_case_path -c config_file -e env_config_file`` to run test cases.
"""
import argparse
import os
import sys
import argparse
import threading
from fnmatch import fnmatch
from tiny_test_fw import TinyFW
from tiny_test_fw.Utility import SearchCases, CaseConfig
from tiny_test_fw.TinyFW import JunitReport, set_default_config
from tiny_test_fw.Utility import CaseConfig, SearchCases, console_log
class Runner(threading.Thread):
@ -37,29 +38,64 @@ class Runner(threading.Thread):
:param env_config_file: env config file
"""
def __init__(self, test_case, case_config, env_config_file=None):
def __init__(self, test_case, case_config, env_config_file=None, known_failure_cases_file=None):
super(Runner, self).__init__()
self.setDaemon(True)
if case_config:
test_suite_name = os.path.splitext(os.path.basename(case_config))[0]
else:
test_suite_name = "TestRunner"
TinyFW.set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
test_methods = SearchCases.Search.search_test_cases(test_case)
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
self.test_result = []
self.known_failure_cases = self._get_config_cases(known_failure_cases_file)
@staticmethod
def _get_config_cases(config_file):
res = set()
if not config_file or not os.path.isfile(config_file):
return res
for line in open(config_file).readlines():
if not line:
continue
if not line.strip():
continue
without_comments = line.split("#")[0].strip()
if without_comments:
res.add(without_comments)
return res
def run(self):
for case in self.test_cases:
result = case.run()
self.test_result.append(result)
case.run()
@staticmethod
def is_known_issue(tc_name, known_cases):
for case in known_cases:
if tc_name == case:
return True
if fnmatch(tc_name, case):
return True
return False
def get_test_result(self):
return self.test_result and all(self.test_result)
_res = True
console_log("Test Results:")
for tc in JunitReport.JUNIT_TEST_SUITE.test_cases:
if tc.failures:
if self.is_known_issue(tc.name, self.known_failure_cases):
console_log(" Known Failure: " + tc.name, color="orange")
else:
console_log(" Test Fail: " + tc.name, color="red")
_res = False
else:
console_log(" Test Succeed: " + tc.name, color="green")
return _res
if __name__ == '__main__':
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("test_case",
help="test case folder or file")
@ -67,15 +103,17 @@ if __name__ == '__main__':
help="case filter/config file")
parser.add_argument("--env_config_file", "-e", default=None,
help="test env config file")
parser.add_argument("--known_failure_cases_file", default=None,
help="known failure cases file")
args = parser.parse_args()
runner = Runner(args.test_case, args.case_config, args.env_config_file)
runner = Runner(args.test_case, args.case_config, args.env_config_file, args.known_failure_cases_file)
runner.start()
while True:
try:
runner.join(1)
if not runner.isAlive():
if not runner.is_alive():
break
except KeyboardInterrupt:
print("exit by Ctrl-C")

View File

@ -19,10 +19,6 @@ from .IDFApp import IDFApp, Example, UT # noqa: export all Apps for users
from .IDFDUT import IDFDUT # noqa: export DUTs for users
def format_case_id(chip, case_name):
return "{}.{}".format(chip, case_name)
def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", execution_time=1,
level="example", erase_nvs=True, config_name=None, **kwargs):
"""
@ -50,7 +46,6 @@ def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", e
def test(func):
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
return test_func
return test
@ -82,7 +77,6 @@ def idf_unit_test(app=UT, dut=IDFDUT, chip="ESP32", module="unit-test", executio
def test(func):
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
return test_func
return test

View File

@ -18,17 +18,23 @@
Test script for unit test case.
"""
import re
import time
import argparse
import re
import threading
import time
from tiny_test_fw import TinyFW, Utility, Env, DUT
import ttfw_idf
from tiny_test_fw import DUT, Env, TinyFW, Utility
from tiny_test_fw.TinyFW import TestCaseFailed
from tiny_test_fw.Utility import format_case_id, handle_unexpected_exception
UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
STRIP_CONFIG_PATTERN = re.compile(r'(.+?)(_\d+)?$')
# matches e.g.: "rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)"
RESET_PATTERN = re.compile(r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))")
EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
ABORT_PATTERN = re.compile(r"(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)")
FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
@ -48,11 +54,26 @@ DUT_STARTUP_CHECK_RETRY_COUNT = 5
TEST_HISTORY_CHECK_TIMEOUT = 2
class TestCaseFailed(AssertionError):
pass
def reset_reason_matches(reported_str, expected_str):
known_aliases = {
"_RESET": "_RST",
"POWERON_RESET": "POWERON",
"DEEPSLEEP_RESET": "DSLEEP",
}
if expected_str in reported_str:
return True
for token, alias in known_aliases.items():
if token in expected_str:
alt_expected_str = expected_str.replace(token, alias)
if alt_expected_str in reported_str:
return True
return False
def format_test_case_config(test_case_data):
def format_test_case_config(test_case_data, target='esp32'):
"""
convert the test case data to unified format.
We need to following info to run unit test cases:
@ -72,6 +93,7 @@ def format_test_case_config(test_case_data):
If config is not specified for test case, then
:param test_case_data: string, list, or a dictionary list
:param target: target
:return: formatted data
"""
@ -111,6 +133,9 @@ def format_test_case_config(test_case_data):
if "config" not in _case:
_case["config"] = "default"
if 'target' not in _case:
_case['target'] = target
return _case
if not isinstance(test_case_data, list):
@ -138,7 +163,11 @@ def replace_app_bin(dut, name, new_app_bin):
def format_case_name(case):
return "[{}] {}".format(case["config"], case["name"])
# we could split cases of same config into multiple binaries as we have limited rom space
# we should regard those configs like `default` and `default_2` as the same config
match = STRIP_CONFIG_PATTERN.match(case['config'])
stripped_config_name = match.group(1)
return format_case_id(case['name'], target=case['target'], config=stripped_config_name)
def reset_dut(dut):
@ -165,6 +194,14 @@ def reset_dut(dut):
raise AssertionError("Reset {} ({}) failed!".format(dut.name, dut.port))
def log_test_case(description, test_case, ut_config):
Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config),
color='orange')
Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items()
if k != 'name' and v is not None),
color='orange')
def run_one_normal_case(dut, one_case, junit_test_case):
reset_dut(dut)
@ -277,14 +314,14 @@ def run_unit_test_cases(env, extra_data):
for one_case in case_config[ut_config]:
performance_items = []
# create junit report test case
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
run_one_normal_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items()
except TestCaseFailed:
failed_cases.append(format_case_name(one_case))
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.update_performance(performance_items)
@ -292,13 +329,6 @@ def run_unit_test_cases(env, extra_data):
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
class Handler(threading.Thread):
@ -479,12 +509,14 @@ def run_multiple_devices_cases(env, extra_data):
Utility.console_log("Running unit test for config: " + ut_config, "O")
for one_case in case_config[ut_config]:
result = False
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
one_case.get('app_bin'), junit_test_case)
except TestCaseFailed:
pass # result is False, this is handled by the finally block
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
handle_unexpected_exception(junit_test_case, e)
finally:
if result:
Utility.console_log("Success: " + format_case_name(one_case), color="green")
@ -497,12 +529,6 @@ def run_multiple_devices_cases(env, extra_data):
env.close_dut(dut)
duts = {}
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
def run_one_multiple_stage_case(dut, one_case, junit_test_case):
reset_dut(dut)
@ -531,7 +557,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
result = False
if len(one_case["reset"]) == len(exception_reset_list):
for i, exception in enumerate(exception_reset_list):
if one_case["reset"][i] not in exception:
if not reset_reason_matches(exception, one_case["reset"][i]):
break
else:
result = True
@ -610,7 +636,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
def run_multiple_stage_cases(env, extra_data):
"""
extra_data can be 2 types of value
1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
1. as dict: Mandatory keys: "name" and "child case num", optional keys: "reset" and others
3. as list of string or dict:
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
@ -635,14 +661,14 @@ def run_multiple_stage_cases(env, extra_data):
for one_case in case_config[ut_config]:
performance_items = []
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
run_one_multiple_stage_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items()
except TestCaseFailed:
failed_cases.append(format_case_name(one_case))
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.update_performance(performance_items)
@ -650,16 +676,8 @@ def run_multiple_stage_cases(env, extra_data):
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
def detect_update_unit_test_info(env, extra_data, app_bin):
case_config = format_test_case_config(extra_data)
for ut_config in case_config:
@ -730,20 +748,16 @@ if __name__ == '__main__':
type=int,
default=1
)
parser.add_argument("--env_config_file", "-e",
help="test env config file",
default=None
)
parser.add_argument("--app_bin", "-b",
help="application binary file for flashing the chip",
default=None
)
parser.add_argument(
'test',
help='Comma separated list of <option>:<argument> where option can be "name" (default), "child case num", \
"config", "timeout".',
nargs='+'
)
parser.add_argument('--env_config_file', '-e',
help='test env config file',
default=None)
parser.add_argument('--app_bin', '-b',
help='application binary file for flashing the chip',
default=None)
parser.add_argument('test',
help='Comma separated list of <option>:<argument> where option can be "name" (default), '
'"child case num", "config", "timeout".',
nargs='+')
args = parser.parse_args()
list_of_dicts = []
for test in args.test:
@ -753,7 +767,7 @@ if __name__ == '__main__':
if len(test_item) == 0:
continue
pair = test_item.split(r':')
if len(pair) == 1 or pair[0] is 'name':
if len(pair) == 1 or pair[0] == 'name':
test_dict['name'] = pair[0]
elif len(pair) == 2:
if pair[0] == 'timeout' or pair[0] == 'child case num':