Merge branch 'feature/skip_known_failure_cases_v4.2' into 'release/v4.2'

CI: Ignore Known Failure Cases Result (v4.2)

See merge request espressif/esp-idf!14649
This commit is contained in:
Zim Kalinowski 2021-08-10 02:23:31 +00:00
commit 059a3fa1be
6 changed files with 150 additions and 99 deletions

View File

@ -43,9 +43,11 @@
# clone test env configs
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
- cd tools/ci/python_packages/tiny_test_fw/bin
# git clone the known failure cases repo, run test
- ./tools/ci/retry_failed.sh git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
# run test
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE
- cd tools/ci/python_packages/tiny_test_fw/bin
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
.example_debug_template:
stage: target_test
@ -82,9 +84,11 @@
# clone test env configs
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
- cd tools/ci/python_packages/tiny_test_fw/bin
# git clone the known failure cases repo, run test
- ./tools/ci/retry_failed.sh git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
# run test
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE
- cd tools/ci/python_packages/tiny_test_fw/bin
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
.test_app_template:
extends: .example_test_template

View File

@ -13,19 +13,30 @@
# limitations under the License.
""" Interface for test cases. """
import os
import time
import traceback
import functools
import os
import socket
import time
from datetime import datetime
import junit_xml
from . import Env
from . import DUT
from . import App
from . import Utility
from . import DUT, App, Env, Utility
from .Utility import format_case_id
class TestCaseFailed(AssertionError):
def __init__(self, *cases):
"""
Raise this exception if one or more test cases fail in a 'normal' way (ie the test runs but fails, no unexpected exceptions)
This will avoid dumping the Python stack trace, because the assumption is the junit error info and full job log already has
enough information for a developer to debug.
'cases' argument is the names of one or more test cases
"""
message = 'Test case{} failed: {}'.format('s' if len(cases) > 1 else '', ', '.join(str(c) for c in cases))
super(TestCaseFailed, self).__init__(self, message)
class DefaultEnvConfig(object):
@ -87,8 +98,8 @@ class JunitReport(object):
@classmethod
def output_report(cls, junit_file_path):
""" Output current test result to file. """
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), "w") as f:
cls.JUNIT_TEST_SUITE.to_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), 'w') as f:
junit_xml.to_xml_report_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
@classmethod
def get_current_test_case(cls):
@ -184,21 +195,20 @@ def test_method(**kwargs):
env_inst = Env.Env(**env_config)
# prepare for xunit test results
junit_file_path = env_inst.app_cls.get_log_folder(env_config["test_suite_name"])
junit_test_case = JunitReport.create_test_case(case_info["ID"])
junit_file_path = env_inst.app_cls.get_log_folder(env_config['test_suite_name'])
junit_test_case = JunitReport.create_test_case(format_case_id(case_info['ID'],
target=case_info['chip'].lower()))
result = False
try:
Utility.console_log("starting running test: " + test_func.__name__, color="green")
# execute test function
test_func(env_inst, extra_data)
# if finish without exception, test result is True
result = True
except TestCaseFailed as e:
junit_test_case.add_failure_info(str(e))
except Exception as e:
# handle all the exceptions here
traceback.print_exc()
# log failure
junit_test_case.add_failure_info(str(e) + ":\r\n" + traceback.format_exc())
Utility.handle_unexpected_exception(junit_test_case, e)
finally:
# do close all DUTs, if result is False then print DUT debug info
close_errors = env_inst.close(dut_debug=(not result))
@ -210,7 +220,7 @@ def test_method(**kwargs):
# and raise exception in DUT close to fail test case if reset detected.
if close_errors:
for error in close_errors:
junit_test_case.add_failure_info("env close error: ".format(error))
junit_test_case.add_failure_info(str(error))
result = False
if not case_info["junit_report_by_case"]:
JunitReport.test_case_finish(junit_test_case)

View File

@ -1,7 +1,8 @@
from __future__ import print_function
import os.path
import sys
import traceback
_COLOR_CODES = {
"white": u'\033[0m',
@ -73,3 +74,20 @@ def load_source(path):
sys.path.remove(dir)
__LOADED_MODULES[path] = ret
return ret
def handle_unexpected_exception(junit_test_case, exception):
"""
Helper to log & add junit result details for an unexpected exception encountered
when running a test case.
Should always be called from inside an except: block
"""
traceback.print_exc()
# AssertionError caused by an 'assert' statement has an empty string as its 'str' form
e_str = str(exception) if str(exception) else repr(exception)
junit_test_case.add_failure_info('Unexpected exception: {}\n{}'.format(e_str, traceback.format_exc()))
def format_case_id(case_name, target='esp32', config='default'):
return '{}.{}.{}'.format(target, config, case_name)

View File

@ -21,13 +21,14 @@ Command line interface to run test cases from a given path.
Use ``python Runner.py test_case_path -c config_file -e env_config_file`` to run test cases.
"""
import argparse
import os
import sys
import argparse
import threading
from fnmatch import fnmatch
from tiny_test_fw import TinyFW
from tiny_test_fw.Utility import SearchCases, CaseConfig
from tiny_test_fw.TinyFW import JunitReport, set_default_config
from tiny_test_fw.Utility import CaseConfig, SearchCases, console_log
class Runner(threading.Thread):
@ -37,28 +38,64 @@ class Runner(threading.Thread):
:param env_config_file: env config file
"""
def __init__(self, test_case, case_config, env_config_file=None):
def __init__(self, test_case, case_config, env_config_file=None, known_failure_cases_file=None):
super(Runner, self).__init__()
self.setDaemon(True)
if case_config:
test_suite_name = os.path.splitext(os.path.basename(case_config))[0]
else:
test_suite_name = "TestRunner"
TinyFW.set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
test_methods = SearchCases.Search.search_test_cases(test_case)
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
self.test_result = []
self.known_failure_cases = self._get_config_cases(known_failure_cases_file)
@staticmethod
def _get_config_cases(config_file):
res = set()
if not config_file or not os.path.isfile(config_file):
return res
for line in open(config_file).readlines():
if not line:
continue
if not line.strip():
continue
without_comments = line.split("#")[0].strip()
if without_comments:
res.add(without_comments)
return res
def run(self):
for case in self.test_cases:
result = case.run()
self.test_result.append(result)
case.run()
@staticmethod
def is_known_issue(tc_name, known_cases):
for case in known_cases:
if tc_name == case:
return True
if fnmatch(tc_name, case):
return True
return False
def get_test_result(self):
return self.test_result and all(self.test_result)
_res = True
console_log("Test Results:")
for tc in JunitReport.JUNIT_TEST_SUITE.test_cases:
if tc.failures:
if self.is_known_issue(tc.name, self.known_failure_cases):
console_log(" Known Failure: " + tc.name, color="orange")
else:
console_log(" Test Fail: " + tc.name, color="red")
_res = False
else:
console_log(" Test Succeed: " + tc.name, color="green")
return _res
if __name__ == '__main__':
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("test_case",
@ -67,15 +104,17 @@ if __name__ == '__main__':
help="case filter/config file")
parser.add_argument("--env_config_file", "-e", default=None,
help="test env config file")
parser.add_argument("--known_failure_cases_file", default=None,
help="known failure cases file")
args = parser.parse_args()
runner = Runner(args.test_case, args.case_config, args.env_config_file)
runner = Runner(args.test_case, args.case_config, args.env_config_file, args.known_failure_cases_file)
runner.start()
while True:
try:
runner.join(1)
if not runner.isAlive():
if not runner.is_alive():
break
except KeyboardInterrupt:
print("exit by Ctrl-C")

View File

@ -20,10 +20,6 @@ from .IDFDUT import IDFDUT, ESP32DUT, ESP32S2DUT, ESP8266DUT, ESP32QEMUDUT # no
from .DebugUtils import OCDProcess, GDBProcess, TelnetProcess, CustomProcess # noqa: export DebugUtils for users
def format_case_id(chip, case_name):
return "{}.{}".format(chip, case_name)
def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", execution_time=1,
level="example", erase_nvs=True, config_name=None, **kwargs):
"""
@ -51,7 +47,6 @@ def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", e
def test(func):
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
return test_func
return test
@ -83,7 +78,6 @@ def idf_unit_test(app=UT, dut=IDFDUT, chip="ESP32", module="unit-test", executio
def test(func):
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
return test_func
return test
@ -118,7 +112,6 @@ def idf_custom_test(app=TestApp, dut=IDFDUT, chip="ESP32", module="misc", execut
def test(func):
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
return test_func
return test

View File

@ -18,17 +18,20 @@
Test script for unit test case.
"""
import re
import time
import argparse
import re
import threading
import time
from tiny_test_fw import TinyFW, Utility, Env, DUT
import ttfw_idf
from tiny_test_fw import DUT, Env, TinyFW, Utility
from tiny_test_fw.TinyFW import TestCaseFailed
from tiny_test_fw.Utility import format_case_id, handle_unexpected_exception
UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
STRIP_CONFIG_PATTERN = re.compile(r'(.+?)(_\d+)?$')
# matches e.g.: "rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)"
RESET_PATTERN = re.compile(r"(rst:0x[0-9a-fA-F]*\s\([\w].*?\),boot:0x[0-9a-fA-F]*\s\([\w].*?\))")
@ -70,11 +73,7 @@ def reset_reason_matches(reported_str, expected_str):
return False
class TestCaseFailed(AssertionError):
pass
def format_test_case_config(test_case_data):
def format_test_case_config(test_case_data, target='esp32'):
"""
convert the test case data to unified format.
We need to following info to run unit test cases:
@ -94,6 +93,7 @@ def format_test_case_config(test_case_data):
If config is not specified for test case, then
:param test_case_data: string, list, or a dictionary list
:param target: target
:return: formatted data
"""
@ -133,6 +133,9 @@ def format_test_case_config(test_case_data):
if "config" not in _case:
_case["config"] = "default"
if 'target' not in _case:
_case['target'] = target
return _case
if not isinstance(test_case_data, list):
@ -160,7 +163,11 @@ def replace_app_bin(dut, name, new_app_bin):
def format_case_name(case):
return "[{}] {}".format(case["config"], case["name"])
# we could split cases of same config into multiple binaries as we have limited rom space
# we should regard those configs like `default` and `default_2` as the same config
match = STRIP_CONFIG_PATTERN.match(case['config'])
stripped_config_name = match.group(1)
return format_case_id(case['name'], target=case['target'], config=stripped_config_name)
def reset_dut(dut):
@ -188,8 +195,11 @@ def reset_dut(dut):
def log_test_case(description, test_case, ut_config):
Utility.console_log("Running {} '{}' (config {})".format(description, test_case["name"], ut_config), color="orange")
Utility.console_log("Tags: %s" % ", ".join("%s=%s" % (k,v) for (k,v) in test_case.items() if k != "name" and v is not None), color="orange")
Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config),
color='orange')
Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items()
if k != 'name' and v is not None),
color='orange')
def run_one_normal_case(dut, one_case, junit_test_case):
@ -287,7 +297,7 @@ def run_unit_test_cases(env, extra_data):
:return: None
"""
case_config = format_test_case_config(extra_data)
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
# we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing
@ -306,14 +316,14 @@ def run_unit_test_cases(env, extra_data):
log_test_case("test case", one_case, ut_config)
performance_items = []
# create junit report test case
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
run_one_normal_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items()
except TestCaseFailed:
failed_cases.append(format_case_name(one_case))
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.update_performance(performance_items)
@ -321,13 +331,6 @@ def run_unit_test_cases(env, extra_data):
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
class Handler(threading.Thread):
@ -502,19 +505,21 @@ def run_multiple_devices_cases(env, extra_data):
"""
failed_cases = []
case_config = format_test_case_config(extra_data)
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
duts = {}
for ut_config in case_config:
Utility.console_log("Running unit test for config: " + ut_config, "O")
for one_case in case_config[ut_config]:
log_test_case("multi-device test", one_case, ut_config, )
result = False
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
one_case.get('app_bin'), junit_test_case)
except TestCaseFailed:
pass # result is False, this is handled by the finally block
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
handle_unexpected_exception(junit_test_case, e)
finally:
if result:
Utility.console_log("Success: " + format_case_name(one_case), color="green")
@ -527,12 +532,6 @@ def run_multiple_devices_cases(env, extra_data):
env.close_dut(dut)
duts = {}
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
def run_one_multiple_stage_case(dut, one_case, junit_test_case):
reset_dut(dut)
@ -641,7 +640,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
def run_multiple_stage_cases(env, extra_data):
"""
extra_data can be 2 types of value
1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
1. as dict: Mandatory keys: "name" and "child case num", optional keys: "reset" and others
3. as list of string or dict:
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
@ -650,7 +649,7 @@ def run_multiple_stage_cases(env, extra_data):
:return: None
"""
case_config = format_test_case_config(extra_data)
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
# we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing
@ -667,14 +666,14 @@ def run_multiple_stage_cases(env, extra_data):
for one_case in case_config[ut_config]:
log_test_case("multi-stage test", one_case, ut_config)
performance_items = []
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
run_one_multiple_stage_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items()
except TestCaseFailed:
failed_cases.append(format_case_name(one_case))
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.update_performance(performance_items)
@ -682,17 +681,9 @@ def run_multiple_stage_cases(env, extra_data):
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
for _case_name in failed_cases:
Utility.console_log("\t" + _case_name, color="red")
raise AssertionError("Unit Test Failed")
def detect_update_unit_test_info(env, extra_data, app_bin):
case_config = format_test_case_config(extra_data)
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
for ut_config in case_config:
dut = env.get_dut("unit-test-app", app_path=UT_APP_PATH, app_config_name=ut_config)
@ -762,20 +753,16 @@ if __name__ == '__main__':
type=int,
default=1
)
parser.add_argument("--env_config_file", "-e",
help="test env config file",
default=None
)
parser.add_argument("--app_bin", "-b",
help="application binary file for flashing the chip",
default=None
)
parser.add_argument(
'test',
help='Comma separated list of <option>:<argument> where option can be "name" (default), "child case num", \
"config", "timeout".',
nargs='+'
)
parser.add_argument('--env_config_file', '-e',
help='test env config file',
default=None)
parser.add_argument('--app_bin', '-b',
help='application binary file for flashing the chip',
default=None)
parser.add_argument('test',
help='Comma separated list of <option>:<argument> where option can be "name" (default), '
'"child case num", "config", "timeout".',
nargs='+')
args = parser.parse_args()
list_of_dicts = []
for test in args.test: