mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'feature/skip_known_failure_cases_v4.0' into 'release/v4.0'
CI: Ignore Known Failure Cases Result (v4.0) See merge request espressif/esp-idf!14651
This commit is contained in:
commit
eeb4dd74c6
@ -42,9 +42,11 @@
|
|||||||
# clone test env configs
|
# clone test env configs
|
||||||
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO
|
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPO
|
||||||
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
|
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
|
||||||
- cd tools/ci/python_packages/tiny_test_fw/bin
|
# git clone the known failure cases repo, run test
|
||||||
|
- ./tools/ci/retry_failed.sh git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
|
||||||
# run test
|
# run test
|
||||||
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE
|
- cd tools/ci/python_packages/tiny_test_fw/bin
|
||||||
|
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
|
||||||
|
|
||||||
.unit_test_template:
|
.unit_test_template:
|
||||||
extends: .example_test_template
|
extends: .example_test_template
|
||||||
|
@ -13,19 +13,30 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
""" Interface for test cases. """
|
""" Interface for test cases. """
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
import functools
|
import functools
|
||||||
|
import os
|
||||||
import socket
|
import socket
|
||||||
|
import time
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import junit_xml
|
import junit_xml
|
||||||
|
|
||||||
from . import Env
|
from . import DUT, App, Env, Utility
|
||||||
from . import DUT
|
from .Utility import format_case_id
|
||||||
from . import App
|
|
||||||
from . import Utility
|
|
||||||
|
class TestCaseFailed(AssertionError):
|
||||||
|
def __init__(self, *cases):
|
||||||
|
"""
|
||||||
|
Raise this exception if one or more test cases fail in a 'normal' way (ie the test runs but fails, no unexpected exceptions)
|
||||||
|
|
||||||
|
This will avoid dumping the Python stack trace, because the assumption is the junit error info and full job log already has
|
||||||
|
enough information for a developer to debug.
|
||||||
|
|
||||||
|
'cases' argument is the names of one or more test cases
|
||||||
|
"""
|
||||||
|
message = 'Test case{} failed: {}'.format('s' if len(cases) > 1 else '', ', '.join(str(c) for c in cases))
|
||||||
|
super(TestCaseFailed, self).__init__(self, message)
|
||||||
|
|
||||||
|
|
||||||
class DefaultEnvConfig(object):
|
class DefaultEnvConfig(object):
|
||||||
@ -87,8 +98,8 @@ class JunitReport(object):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def output_report(cls, junit_file_path):
|
def output_report(cls, junit_file_path):
|
||||||
""" Output current test result to file. """
|
""" Output current test result to file. """
|
||||||
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), "w") as f:
|
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), 'w') as f:
|
||||||
cls.JUNIT_TEST_SUITE.to_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
|
junit_xml.to_xml_report_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_current_test_case(cls):
|
def get_current_test_case(cls):
|
||||||
@ -184,21 +195,20 @@ def test_method(**kwargs):
|
|||||||
env_inst = Env.Env(**env_config)
|
env_inst = Env.Env(**env_config)
|
||||||
|
|
||||||
# prepare for xunit test results
|
# prepare for xunit test results
|
||||||
junit_file_path = env_inst.app_cls.get_log_folder(env_config["test_suite_name"])
|
junit_file_path = env_inst.app_cls.get_log_folder(env_config['test_suite_name'])
|
||||||
junit_test_case = JunitReport.create_test_case(case_info["ID"])
|
junit_test_case = JunitReport.create_test_case(format_case_id(case_info['ID'],
|
||||||
|
target=case_info['chip'].lower()))
|
||||||
result = False
|
result = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
Utility.console_log("starting running test: " + test_func.__name__, color="green")
|
Utility.console_log("starting running test: " + test_func.__name__, color="green")
|
||||||
# execute test function
|
# execute test function
|
||||||
test_func(env_inst, extra_data)
|
test_func(env_inst, extra_data)
|
||||||
# if finish without exception, test result is True
|
# if finish without exception, test result is True
|
||||||
result = True
|
result = True
|
||||||
|
except TestCaseFailed as e:
|
||||||
|
junit_test_case.add_failure_info(str(e))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# handle all the exceptions here
|
Utility.handle_unexpected_exception(junit_test_case, e)
|
||||||
traceback.print_exc()
|
|
||||||
# log failure
|
|
||||||
junit_test_case.add_failure_info(str(e) + ":\r\n" + traceback.format_exc())
|
|
||||||
finally:
|
finally:
|
||||||
# do close all DUTs, if result is False then print DUT debug info
|
# do close all DUTs, if result is False then print DUT debug info
|
||||||
close_errors = env_inst.close(dut_debug=(not result))
|
close_errors = env_inst.close(dut_debug=(not result))
|
||||||
@ -210,7 +220,7 @@ def test_method(**kwargs):
|
|||||||
# and raise exception in DUT close to fail test case if reset detected.
|
# and raise exception in DUT close to fail test case if reset detected.
|
||||||
if close_errors:
|
if close_errors:
|
||||||
for error in close_errors:
|
for error in close_errors:
|
||||||
junit_test_case.add_failure_info('env close error: {}'.format(error))
|
junit_test_case.add_failure_info(str(error))
|
||||||
result = False
|
result = False
|
||||||
if not case_info["junit_report_by_case"]:
|
if not case_info["junit_report_by_case"]:
|
||||||
JunitReport.test_case_finish(junit_test_case)
|
JunitReport.test_case_finish(junit_test_case)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
import sys
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
|
||||||
_COLOR_CODES = {
|
_COLOR_CODES = {
|
||||||
"white": u'\033[0m',
|
"white": u'\033[0m',
|
||||||
@ -58,3 +59,20 @@ def load_source(name, path):
|
|||||||
ret = imp.load_source(name, path)
|
ret = imp.load_source(name, path)
|
||||||
__LOADED_MODULES[name] = ret
|
__LOADED_MODULES[name] = ret
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def handle_unexpected_exception(junit_test_case, exception):
|
||||||
|
"""
|
||||||
|
Helper to log & add junit result details for an unexpected exception encountered
|
||||||
|
when running a test case.
|
||||||
|
|
||||||
|
Should always be called from inside an except: block
|
||||||
|
"""
|
||||||
|
traceback.print_exc()
|
||||||
|
# AssertionError caused by an 'assert' statement has an empty string as its 'str' form
|
||||||
|
e_str = str(exception) if str(exception) else repr(exception)
|
||||||
|
junit_test_case.add_failure_info('Unexpected exception: {}\n{}'.format(e_str, traceback.format_exc()))
|
||||||
|
|
||||||
|
|
||||||
|
def format_case_id(case_name, target='esp32', config='default'):
|
||||||
|
return '{}.{}.{}'.format(target, config, case_name)
|
||||||
|
@ -21,13 +21,14 @@ Command line interface to run test cases from a given path.
|
|||||||
Use ``python Runner.py test_case_path -c config_file -e env_config_file`` to run test cases.
|
Use ``python Runner.py test_case_path -c config_file -e env_config_file`` to run test cases.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
import argparse
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import argparse
|
|
||||||
import threading
|
import threading
|
||||||
|
from fnmatch import fnmatch
|
||||||
|
|
||||||
from tiny_test_fw import TinyFW
|
from tiny_test_fw.TinyFW import JunitReport, set_default_config
|
||||||
from tiny_test_fw.Utility import SearchCases, CaseConfig
|
from tiny_test_fw.Utility import CaseConfig, SearchCases, console_log
|
||||||
|
|
||||||
|
|
||||||
class Runner(threading.Thread):
|
class Runner(threading.Thread):
|
||||||
@ -37,29 +38,64 @@ class Runner(threading.Thread):
|
|||||||
:param env_config_file: env config file
|
:param env_config_file: env config file
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, test_case, case_config, env_config_file=None):
|
def __init__(self, test_case, case_config, env_config_file=None, known_failure_cases_file=None):
|
||||||
super(Runner, self).__init__()
|
super(Runner, self).__init__()
|
||||||
self.setDaemon(True)
|
self.setDaemon(True)
|
||||||
if case_config:
|
if case_config:
|
||||||
test_suite_name = os.path.splitext(os.path.basename(case_config))[0]
|
test_suite_name = os.path.splitext(os.path.basename(case_config))[0]
|
||||||
else:
|
else:
|
||||||
test_suite_name = "TestRunner"
|
test_suite_name = "TestRunner"
|
||||||
TinyFW.set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
|
set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
|
||||||
test_methods = SearchCases.Search.search_test_cases(test_case)
|
test_methods = SearchCases.Search.search_test_cases(test_case)
|
||||||
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
|
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
|
||||||
self.test_result = []
|
self.known_failure_cases = self._get_config_cases(known_failure_cases_file)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_config_cases(config_file):
|
||||||
|
res = set()
|
||||||
|
if not config_file or not os.path.isfile(config_file):
|
||||||
|
return res
|
||||||
|
|
||||||
|
for line in open(config_file).readlines():
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
if not line.strip():
|
||||||
|
continue
|
||||||
|
without_comments = line.split("#")[0].strip()
|
||||||
|
if without_comments:
|
||||||
|
res.add(without_comments)
|
||||||
|
return res
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
for case in self.test_cases:
|
for case in self.test_cases:
|
||||||
result = case.run()
|
case.run()
|
||||||
self.test_result.append(result)
|
|
||||||
|
@staticmethod
|
||||||
|
def is_known_issue(tc_name, known_cases):
|
||||||
|
for case in known_cases:
|
||||||
|
if tc_name == case:
|
||||||
|
return True
|
||||||
|
if fnmatch(tc_name, case):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def get_test_result(self):
|
def get_test_result(self):
|
||||||
return self.test_result and all(self.test_result)
|
_res = True
|
||||||
|
console_log("Test Results:")
|
||||||
|
for tc in JunitReport.JUNIT_TEST_SUITE.test_cases:
|
||||||
|
if tc.failures:
|
||||||
|
if self.is_known_issue(tc.name, self.known_failure_cases):
|
||||||
|
console_log(" Known Failure: " + tc.name, color="orange")
|
||||||
|
else:
|
||||||
|
console_log(" Test Fail: " + tc.name, color="red")
|
||||||
|
_res = False
|
||||||
|
else:
|
||||||
|
console_log(" Test Succeed: " + tc.name, color="green")
|
||||||
|
|
||||||
|
return _res
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("test_case",
|
parser.add_argument("test_case",
|
||||||
help="test case folder or file")
|
help="test case folder or file")
|
||||||
@ -67,15 +103,17 @@ if __name__ == '__main__':
|
|||||||
help="case filter/config file")
|
help="case filter/config file")
|
||||||
parser.add_argument("--env_config_file", "-e", default=None,
|
parser.add_argument("--env_config_file", "-e", default=None,
|
||||||
help="test env config file")
|
help="test env config file")
|
||||||
|
parser.add_argument("--known_failure_cases_file", default=None,
|
||||||
|
help="known failure cases file")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
runner = Runner(args.test_case, args.case_config, args.env_config_file)
|
runner = Runner(args.test_case, args.case_config, args.env_config_file, args.known_failure_cases_file)
|
||||||
runner.start()
|
runner.start()
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
runner.join(1)
|
runner.join(1)
|
||||||
if not runner.isAlive():
|
if not runner.is_alive():
|
||||||
break
|
break
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print("exit by Ctrl-C")
|
print("exit by Ctrl-C")
|
||||||
|
@ -19,10 +19,6 @@ from .IDFApp import IDFApp, Example, UT # noqa: export all Apps for users
|
|||||||
from .IDFDUT import IDFDUT # noqa: export DUTs for users
|
from .IDFDUT import IDFDUT # noqa: export DUTs for users
|
||||||
|
|
||||||
|
|
||||||
def format_case_id(chip, case_name):
|
|
||||||
return "{}.{}".format(chip, case_name)
|
|
||||||
|
|
||||||
|
|
||||||
def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", execution_time=1,
|
def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", execution_time=1,
|
||||||
level="example", erase_nvs=True, config_name=None, **kwargs):
|
level="example", erase_nvs=True, config_name=None, **kwargs):
|
||||||
"""
|
"""
|
||||||
@ -50,7 +46,6 @@ def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", e
|
|||||||
|
|
||||||
def test(func):
|
def test(func):
|
||||||
test_func = original_method(func)
|
test_func = original_method(func)
|
||||||
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
|
|
||||||
return test_func
|
return test_func
|
||||||
|
|
||||||
return test
|
return test
|
||||||
@ -82,7 +77,6 @@ def idf_unit_test(app=UT, dut=IDFDUT, chip="ESP32", module="unit-test", executio
|
|||||||
|
|
||||||
def test(func):
|
def test(func):
|
||||||
test_func = original_method(func)
|
test_func = original_method(func)
|
||||||
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
|
|
||||||
return test_func
|
return test_func
|
||||||
|
|
||||||
return test
|
return test
|
||||||
|
@ -18,17 +18,23 @@
|
|||||||
Test script for unit test case.
|
Test script for unit test case.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import re
|
||||||
import threading
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
from tiny_test_fw import TinyFW, Utility, Env, DUT
|
|
||||||
import ttfw_idf
|
import ttfw_idf
|
||||||
|
from tiny_test_fw import DUT, Env, TinyFW, Utility
|
||||||
|
from tiny_test_fw.TinyFW import TestCaseFailed
|
||||||
|
from tiny_test_fw.Utility import format_case_id, handle_unexpected_exception
|
||||||
|
|
||||||
UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
|
UT_APP_BOOT_UP_DONE = "Press ENTER to see the list of tests."
|
||||||
|
|
||||||
|
STRIP_CONFIG_PATTERN = re.compile(r'(.+?)(_\d+)?$')
|
||||||
|
|
||||||
|
# matches e.g.: "rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)"
|
||||||
RESET_PATTERN = re.compile(r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))")
|
RESET_PATTERN = re.compile(r"(ets [\w]{3}\s+[\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^()]*\([\w].*?\))")
|
||||||
|
|
||||||
EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
|
EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
|
||||||
ABORT_PATTERN = re.compile(r"(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)")
|
ABORT_PATTERN = re.compile(r"(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)")
|
||||||
FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
|
FINISH_PATTERN = re.compile(r"1 Tests (\d) Failures (\d) Ignored")
|
||||||
@ -48,11 +54,26 @@ DUT_STARTUP_CHECK_RETRY_COUNT = 5
|
|||||||
TEST_HISTORY_CHECK_TIMEOUT = 2
|
TEST_HISTORY_CHECK_TIMEOUT = 2
|
||||||
|
|
||||||
|
|
||||||
class TestCaseFailed(AssertionError):
|
def reset_reason_matches(reported_str, expected_str):
|
||||||
pass
|
known_aliases = {
|
||||||
|
"_RESET": "_RST",
|
||||||
|
"POWERON_RESET": "POWERON",
|
||||||
|
"DEEPSLEEP_RESET": "DSLEEP",
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected_str in reported_str:
|
||||||
|
return True
|
||||||
|
|
||||||
|
for token, alias in known_aliases.items():
|
||||||
|
if token in expected_str:
|
||||||
|
alt_expected_str = expected_str.replace(token, alias)
|
||||||
|
if alt_expected_str in reported_str:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def format_test_case_config(test_case_data):
|
def format_test_case_config(test_case_data, target='esp32'):
|
||||||
"""
|
"""
|
||||||
convert the test case data to unified format.
|
convert the test case data to unified format.
|
||||||
We need to following info to run unit test cases:
|
We need to following info to run unit test cases:
|
||||||
@ -72,6 +93,7 @@ def format_test_case_config(test_case_data):
|
|||||||
If config is not specified for test case, then
|
If config is not specified for test case, then
|
||||||
|
|
||||||
:param test_case_data: string, list, or a dictionary list
|
:param test_case_data: string, list, or a dictionary list
|
||||||
|
:param target: target
|
||||||
:return: formatted data
|
:return: formatted data
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -111,6 +133,9 @@ def format_test_case_config(test_case_data):
|
|||||||
if "config" not in _case:
|
if "config" not in _case:
|
||||||
_case["config"] = "default"
|
_case["config"] = "default"
|
||||||
|
|
||||||
|
if 'target' not in _case:
|
||||||
|
_case['target'] = target
|
||||||
|
|
||||||
return _case
|
return _case
|
||||||
|
|
||||||
if not isinstance(test_case_data, list):
|
if not isinstance(test_case_data, list):
|
||||||
@ -138,7 +163,11 @@ def replace_app_bin(dut, name, new_app_bin):
|
|||||||
|
|
||||||
|
|
||||||
def format_case_name(case):
|
def format_case_name(case):
|
||||||
return "[{}] {}".format(case["config"], case["name"])
|
# we could split cases of same config into multiple binaries as we have limited rom space
|
||||||
|
# we should regard those configs like `default` and `default_2` as the same config
|
||||||
|
match = STRIP_CONFIG_PATTERN.match(case['config'])
|
||||||
|
stripped_config_name = match.group(1)
|
||||||
|
return format_case_id(case['name'], target=case['target'], config=stripped_config_name)
|
||||||
|
|
||||||
|
|
||||||
def reset_dut(dut):
|
def reset_dut(dut):
|
||||||
@ -165,6 +194,14 @@ def reset_dut(dut):
|
|||||||
raise AssertionError("Reset {} ({}) failed!".format(dut.name, dut.port))
|
raise AssertionError("Reset {} ({}) failed!".format(dut.name, dut.port))
|
||||||
|
|
||||||
|
|
||||||
|
def log_test_case(description, test_case, ut_config):
|
||||||
|
Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config),
|
||||||
|
color='orange')
|
||||||
|
Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items()
|
||||||
|
if k != 'name' and v is not None),
|
||||||
|
color='orange')
|
||||||
|
|
||||||
|
|
||||||
def run_one_normal_case(dut, one_case, junit_test_case):
|
def run_one_normal_case(dut, one_case, junit_test_case):
|
||||||
|
|
||||||
reset_dut(dut)
|
reset_dut(dut)
|
||||||
@ -277,14 +314,14 @@ def run_unit_test_cases(env, extra_data):
|
|||||||
for one_case in case_config[ut_config]:
|
for one_case in case_config[ut_config]:
|
||||||
performance_items = []
|
performance_items = []
|
||||||
# create junit report test case
|
# create junit report test case
|
||||||
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
|
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
|
||||||
try:
|
try:
|
||||||
run_one_normal_case(dut, one_case, junit_test_case)
|
run_one_normal_case(dut, one_case, junit_test_case)
|
||||||
performance_items = dut.get_performance_items()
|
performance_items = dut.get_performance_items()
|
||||||
except TestCaseFailed:
|
except TestCaseFailed:
|
||||||
failed_cases.append(format_case_name(one_case))
|
failed_cases.append(format_case_name(one_case))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
|
handle_unexpected_exception(junit_test_case, e)
|
||||||
failed_cases.append(format_case_name(one_case))
|
failed_cases.append(format_case_name(one_case))
|
||||||
finally:
|
finally:
|
||||||
TinyFW.JunitReport.update_performance(performance_items)
|
TinyFW.JunitReport.update_performance(performance_items)
|
||||||
@ -292,13 +329,6 @@ def run_unit_test_cases(env, extra_data):
|
|||||||
# close DUT when finish running all cases for one config
|
# close DUT when finish running all cases for one config
|
||||||
env.close_dut(dut.name)
|
env.close_dut(dut.name)
|
||||||
|
|
||||||
# raise exception if any case fails
|
|
||||||
if failed_cases:
|
|
||||||
Utility.console_log("Failed Cases:", color="red")
|
|
||||||
for _case_name in failed_cases:
|
|
||||||
Utility.console_log("\t" + _case_name, color="red")
|
|
||||||
raise AssertionError("Unit Test Failed")
|
|
||||||
|
|
||||||
|
|
||||||
class Handler(threading.Thread):
|
class Handler(threading.Thread):
|
||||||
|
|
||||||
@ -479,12 +509,14 @@ def run_multiple_devices_cases(env, extra_data):
|
|||||||
Utility.console_log("Running unit test for config: " + ut_config, "O")
|
Utility.console_log("Running unit test for config: " + ut_config, "O")
|
||||||
for one_case in case_config[ut_config]:
|
for one_case in case_config[ut_config]:
|
||||||
result = False
|
result = False
|
||||||
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
|
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
|
||||||
try:
|
try:
|
||||||
result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
|
result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
|
||||||
one_case.get('app_bin'), junit_test_case)
|
one_case.get('app_bin'), junit_test_case)
|
||||||
|
except TestCaseFailed:
|
||||||
|
pass # result is False, this is handled by the finally block
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
|
handle_unexpected_exception(junit_test_case, e)
|
||||||
finally:
|
finally:
|
||||||
if result:
|
if result:
|
||||||
Utility.console_log("Success: " + format_case_name(one_case), color="green")
|
Utility.console_log("Success: " + format_case_name(one_case), color="green")
|
||||||
@ -497,12 +529,6 @@ def run_multiple_devices_cases(env, extra_data):
|
|||||||
env.close_dut(dut)
|
env.close_dut(dut)
|
||||||
duts = {}
|
duts = {}
|
||||||
|
|
||||||
if failed_cases:
|
|
||||||
Utility.console_log("Failed Cases:", color="red")
|
|
||||||
for _case_name in failed_cases:
|
|
||||||
Utility.console_log("\t" + _case_name, color="red")
|
|
||||||
raise AssertionError("Unit Test Failed")
|
|
||||||
|
|
||||||
|
|
||||||
def run_one_multiple_stage_case(dut, one_case, junit_test_case):
|
def run_one_multiple_stage_case(dut, one_case, junit_test_case):
|
||||||
reset_dut(dut)
|
reset_dut(dut)
|
||||||
@ -531,7 +557,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
|
|||||||
result = False
|
result = False
|
||||||
if len(one_case["reset"]) == len(exception_reset_list):
|
if len(one_case["reset"]) == len(exception_reset_list):
|
||||||
for i, exception in enumerate(exception_reset_list):
|
for i, exception in enumerate(exception_reset_list):
|
||||||
if one_case["reset"][i] not in exception:
|
if not reset_reason_matches(exception, one_case["reset"][i]):
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
result = True
|
result = True
|
||||||
@ -610,7 +636,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
|
|||||||
def run_multiple_stage_cases(env, extra_data):
|
def run_multiple_stage_cases(env, extra_data):
|
||||||
"""
|
"""
|
||||||
extra_data can be 2 types of value
|
extra_data can be 2 types of value
|
||||||
1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
|
1. as dict: Mandatory keys: "name" and "child case num", optional keys: "reset" and others
|
||||||
3. as list of string or dict:
|
3. as list of string or dict:
|
||||||
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
|
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
|
||||||
|
|
||||||
@ -635,14 +661,14 @@ def run_multiple_stage_cases(env, extra_data):
|
|||||||
|
|
||||||
for one_case in case_config[ut_config]:
|
for one_case in case_config[ut_config]:
|
||||||
performance_items = []
|
performance_items = []
|
||||||
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
|
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
|
||||||
try:
|
try:
|
||||||
run_one_multiple_stage_case(dut, one_case, junit_test_case)
|
run_one_multiple_stage_case(dut, one_case, junit_test_case)
|
||||||
performance_items = dut.get_performance_items()
|
performance_items = dut.get_performance_items()
|
||||||
except TestCaseFailed:
|
except TestCaseFailed:
|
||||||
failed_cases.append(format_case_name(one_case))
|
failed_cases.append(format_case_name(one_case))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
|
handle_unexpected_exception(junit_test_case, e)
|
||||||
failed_cases.append(format_case_name(one_case))
|
failed_cases.append(format_case_name(one_case))
|
||||||
finally:
|
finally:
|
||||||
TinyFW.JunitReport.update_performance(performance_items)
|
TinyFW.JunitReport.update_performance(performance_items)
|
||||||
@ -650,16 +676,8 @@ def run_multiple_stage_cases(env, extra_data):
|
|||||||
# close DUT when finish running all cases for one config
|
# close DUT when finish running all cases for one config
|
||||||
env.close_dut(dut.name)
|
env.close_dut(dut.name)
|
||||||
|
|
||||||
# raise exception if any case fails
|
|
||||||
if failed_cases:
|
|
||||||
Utility.console_log("Failed Cases:", color="red")
|
|
||||||
for _case_name in failed_cases:
|
|
||||||
Utility.console_log("\t" + _case_name, color="red")
|
|
||||||
raise AssertionError("Unit Test Failed")
|
|
||||||
|
|
||||||
|
|
||||||
def detect_update_unit_test_info(env, extra_data, app_bin):
|
def detect_update_unit_test_info(env, extra_data, app_bin):
|
||||||
|
|
||||||
case_config = format_test_case_config(extra_data)
|
case_config = format_test_case_config(extra_data)
|
||||||
|
|
||||||
for ut_config in case_config:
|
for ut_config in case_config:
|
||||||
@ -730,20 +748,16 @@ if __name__ == '__main__':
|
|||||||
type=int,
|
type=int,
|
||||||
default=1
|
default=1
|
||||||
)
|
)
|
||||||
parser.add_argument("--env_config_file", "-e",
|
parser.add_argument('--env_config_file', '-e',
|
||||||
help="test env config file",
|
help='test env config file',
|
||||||
default=None
|
default=None)
|
||||||
)
|
parser.add_argument('--app_bin', '-b',
|
||||||
parser.add_argument("--app_bin", "-b",
|
help='application binary file for flashing the chip',
|
||||||
help="application binary file for flashing the chip",
|
default=None)
|
||||||
default=None
|
parser.add_argument('test',
|
||||||
)
|
help='Comma separated list of <option>:<argument> where option can be "name" (default), '
|
||||||
parser.add_argument(
|
'"child case num", "config", "timeout".',
|
||||||
'test',
|
nargs='+')
|
||||||
help='Comma separated list of <option>:<argument> where option can be "name" (default), "child case num", \
|
|
||||||
"config", "timeout".',
|
|
||||||
nargs='+'
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
list_of_dicts = []
|
list_of_dicts = []
|
||||||
for test in args.test:
|
for test in args.test:
|
||||||
@ -753,7 +767,7 @@ if __name__ == '__main__':
|
|||||||
if len(test_item) == 0:
|
if len(test_item) == 0:
|
||||||
continue
|
continue
|
||||||
pair = test_item.split(r':')
|
pair = test_item.split(r':')
|
||||||
if len(pair) == 1 or pair[0] is 'name':
|
if len(pair) == 1 or pair[0] == 'name':
|
||||||
test_dict['name'] = pair[0]
|
test_dict['name'] = pair[0]
|
||||||
elif len(pair) == 2:
|
elif len(pair) == 2:
|
||||||
if pair[0] == 'timeout' or pair[0] == 'child case num':
|
if pair[0] == 'timeout' or pair[0] == 'child case num':
|
||||||
|
Loading…
Reference in New Issue
Block a user