add component_ut in assign-test and target-test stage.

Add one template_test python file to get test case
This commit is contained in:
Fu Hanxi 2020-08-25 16:42:36 +08:00
parent e35328afd9
commit edc7cc9c00
19 changed files with 638 additions and 157 deletions

View File

@ -64,22 +64,39 @@ variables:
.fetch_submodules: &fetch_submodules |
python $SUBMODULE_FETCH_TOOL -s $SUBMODULES_TO_FETCH
.add_ssh_keys: &add_ssh_keys |
mkdir -p ~/.ssh
chmod 700 ~/.ssh
echo -n $GITLAB_KEY > ~/.ssh/id_rsa_base64
base64 --decode --ignore-garbage ~/.ssh/id_rsa_base64 > ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
echo -e "Host gitlab.espressif.cn\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config
before_script:
- source tools/ci/setup_python.sh
# apply bot filter in before script
- *apply_bot_filter
# add gitlab ssh key
- mkdir -p ~/.ssh
- chmod 700 ~/.ssh
- echo -n $GITLAB_KEY > ~/.ssh/id_rsa_base64
- base64 --decode --ignore-garbage ~/.ssh/id_rsa_base64 > ~/.ssh/id_rsa
- chmod 600 ~/.ssh/id_rsa
- echo -e "Host gitlab.espressif.cn\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config
- *add_ssh_keys
# Set some options and environment for CI
- source tools/ci/configure_ci_environment.sh
- *setup_tools_unless_target_test
- *fetch_submodules
# used for component-based unit test apps
.before_script_for_component_ut:
variables:
COMPONENT_UT_EXCLUDE_LIST_FP: ${CI_PROJECT_DIR}/tools/ci/component_ut_excludes.txt
before_script:
- source tools/ci/setup_python.sh
- *apply_bot_filter
- *add_ssh_keys
- source tools/ci/configure_ci_environment.sh
- *setup_tools_unless_target_test
- *fetch_submodules
- export COMPONENT_UT_DIRS=`find components/ -name test_apps -type d`
- export COMPONENT_UT_EXCLUDES=`[ -r $COMPONENT_UT_EXCLUDE_LIST_FP ] && cat $COMPONENT_UT_EXCLUDE_LIST_FP | xargs`
# used for check scripts which we want to run unconditionally
.before_script_lesser_nofilter:
variables:

View File

@ -0,0 +1,15 @@
from __future__ import print_function
import ttfw_idf
@ttfw_idf.idf_component_unit_test(env_tag='COMPONENT_UT_GENERIC')
def test_component_ut_esp_netif(env, extra_data):
dut = env.get_dut('esp_netif', 'components/esp_netif/test_app')
dut.start_app()
stdout = dut.expect('Tests finished', full_stdout=True)
ttfw_idf.ComponentUTResult.parse_result(stdout)
if __name__ == '__main__':
test_component_ut_esp_netif()

View File

@ -1 +0,0 @@
components/esp_netif/test_app

View File

@ -1,4 +1,5 @@
assign_test:
extends: .before_script_for_component_ut
tags:
- assign_test
image: $CI_DOCKER_REGISTRY/ubuntu-test-env$BOT_DOCKER_IMAGE_TAG
@ -11,8 +12,9 @@ assign_test:
- build_esp_idf_tests_cmake_esp32s2
variables:
SUBMODULES_TO_FETCH: "components/esptool_py/esptool"
EXAMPLE_CONFIG_OUTPUT_PATH: "$CI_PROJECT_DIR/examples/test_configs"
TEST_APP_CONFIG_OUTPUT_PATH: "$CI_PROJECT_DIR/tools/test_apps/test_configs"
EXAMPLE_CONFIG_OUTPUT_PATH: "${CI_PROJECT_DIR}/examples/test_configs"
TEST_APP_CONFIG_OUTPUT_PATH: "${CI_PROJECT_DIR}/tools/test_apps/test_configs"
COMPONENT_UT_CONFIG_OUTPUT_PATH: "${CI_PROJECT_DIR}/component_ut/test_configs"
UNIT_TEST_CASE_FILE: "${CI_PROJECT_DIR}/components/idf_test/unit_test"
# auto_test_script is compatible with Python 3 only
PYTHON_VER: 3
@ -22,8 +24,10 @@ assign_test:
- components/idf_test/*/TC.sqlite
- $EXAMPLE_CONFIG_OUTPUT_PATH
- $TEST_APP_CONFIG_OUTPUT_PATH
- $COMPONENT_UT_CONFIG_OUTPUT_PATH
- build_examples/artifact_index.json
- build_test_apps/artifact_index.json
- build_component_ut/artifact_index.json
- tools/unit-test-app/builds/artifact_index.json
expire_in: 1 week
only:
@ -36,16 +40,18 @@ assign_test:
- $BOT_LABEL_CUSTOM_TEST
script:
# assign example tests
- python tools/ci/python_packages/ttfw_idf/IDFAssignTest.py example_test $IDF_PATH/examples $CI_TARGET_TEST_CONFIG_FILE $EXAMPLE_CONFIG_OUTPUT_PATH
- python tools/ci/python_packages/ttfw_idf/IDFAssignTest.py example_test $IDF_PATH/examples -c $CI_TARGET_TEST_CONFIG_FILE -o $EXAMPLE_CONFIG_OUTPUT_PATH
# assign test apps
- python tools/ci/python_packages/ttfw_idf/IDFAssignTest.py custom_test $IDF_PATH/tools/test_apps $CI_TARGET_TEST_CONFIG_FILE $TEST_APP_CONFIG_OUTPUT_PATH
- python tools/ci/python_packages/ttfw_idf/IDFAssignTest.py custom_test $IDF_PATH/tools/test_apps -c $CI_TARGET_TEST_CONFIG_FILE -o $TEST_APP_CONFIG_OUTPUT_PATH
# assign component ut
- python tools/ci/python_packages/ttfw_idf/IDFAssignTest.py component_ut $COMPONENT_UT_DIRS -c $CI_TARGET_TEST_CONFIG_FILE -o $COMPONENT_UT_CONFIG_OUTPUT_PATH
# assign unit test cases
- python tools/ci/python_packages/ttfw_idf/IDFAssignTest.py unit_test $UNIT_TEST_CASE_FILE $CI_TARGET_TEST_CONFIG_FILE $IDF_PATH/components/idf_test/unit_test/CIConfigs
- python tools/ci/python_packages/ttfw_idf/IDFAssignTest.py unit_test $UNIT_TEST_CASE_FILE -c $CI_TARGET_TEST_CONFIG_FILE -o $IDF_PATH/components/idf_test/unit_test/CIConfigs
# clone test script to assign tests
- ./tools/ci/retry_failed.sh git clone $TEST_SCRIPT_REPOSITORY
- python $CHECKOUT_REF_SCRIPT auto_test_script auto_test_script
- cd auto_test_script
# assgin integration test cases
# assign integration test cases
- python CIAssignTestCases.py -t $IDF_PATH/components/idf_test/integration_test -c $CI_TARGET_TEST_CONFIG_FILE -b $IDF_PATH/SSC/ssc_bin
update_test_cases:

View File

@ -139,7 +139,9 @@ check_public_headers:
TEST_CONFIG_FILE: ${CI_PROJECT_DIR}/tools/ci/config/target-test.yml
scan_tests:
extends: .scan_build_tests
extends:
- .before_script_for_component_ut
- .scan_build_tests
only:
variables:
- $BOT_TRIGGER_WITH_LABEL == null
@ -158,15 +160,13 @@ scan_tests:
EXAMPLE_TEST_OUTPUT_DIR: ${CI_PROJECT_DIR}/examples/test_configs
TEST_APPS_TEST_DIR: ${CI_PROJECT_DIR}/tools/test_apps
TEST_APPS_OUTPUT_DIR: ${CI_PROJECT_DIR}/tools/test_apps/test_configs
COMPONENT_UT_DIR_TXT: ${CI_PROJECT_DIR}/tools/ci/component_ut_dirs.txt
COMPONENT_UT_OUTPUT_DIR: ${CI_PROJECT_DIR}/component_ut/test_configs
PYTHON_VER: 3
script:
- *export_component_ut_dirs
- python $CI_SCAN_TESTS_PY example_test $EXAMPLE_TEST_DIR -b make --exclude examples/build_system/idf_as_lib -c $TEST_CONFIG_FILE -o $EXAMPLE_TEST_OUTPUT_DIR
- python $CI_SCAN_TESTS_PY example_test $EXAMPLE_TEST_DIR -b cmake --exclude examples/build_system/idf_as_lib -c $TEST_CONFIG_FILE -o $EXAMPLE_TEST_OUTPUT_DIR
- python $CI_SCAN_TESTS_PY test_apps $TEST_APPS_TEST_DIR -c $TEST_CONFIG_FILE -o $TEST_APPS_OUTPUT_DIR
# template python test file not generated yet. preserve binary files for component UT
- python $CI_SCAN_TESTS_PY component_ut $COMPONENT_UT_DIRS -c $TEST_CONFIG_FILE -o $COMPONENT_UT_OUTPUT_DIR --preserve
- python $CI_SCAN_TESTS_PY component_ut $COMPONENT_UT_DIRS --exclude $COMPONENT_UT_EXCLUDES -c $TEST_CONFIG_FILE -o $COMPONENT_UT_OUTPUT_DIR
check_readme_links:
extends: .check_job_template

View File

@ -84,16 +84,7 @@
.test_app_template:
extends: .example_test_template
stage: target_test
dependencies:
- assign_test
only:
refs:
- master
- /^release\/v/
- /^v\d+\.\d+(\.\d+)?($|-)/
- triggers
- schedules
variables:
- $BOT_TRIGGER_WITH_LABEL == null
- $BOT_LABEL_CUSTOM_TEST
@ -104,6 +95,28 @@
LOG_PATH: "$CI_PROJECT_DIR/TEST_LOGS"
ENV_FILE: "$CI_PROJECT_DIR/ci-test-runner-configs/$CI_RUNNER_DESCRIPTION/EnvConfig.yml"
.component_ut_template:
extends:
- .before_script_for_component_ut
- .example_test_template
only:
variables:
- $BOT_TRIGGER_WITH_LABEL == null
- $BOT_LABEL_UNIT_TEST
variables:
CONFIG_FILE_PATH: "${CI_PROJECT_DIR}/component_ut/test_configs"
PYTHON_VER: 3
script:
- *define_config_file_name
# first test if config file exists, if not exist, exit 0
- test -e $CONFIG_FILE || exit 0
# clone test env configs
- ./tools/ci/retry_failed.sh git clone $TEST_ENV_CONFIG_REPOSITORY
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
- cd tools/ci/python_packages/tiny_test_fw/bin
# run test
- python Runner.py $COMPONENT_UT_DIRS -c $CONFIG_FILE -e $ENV_FILE
.unit_test_template:
extends: .example_test_template
stage: target_test
@ -396,6 +409,12 @@ test_app_test_003:
- ESP32
- Example_PPP
component_ut_test_001:
extends: .component_ut_template
tags:
- ESP32
- COMPONENT_UT_GENERIC
UT_001:
extends: .unit_test_template
parallel: 39

View File

@ -571,7 +571,7 @@ class BaseDUT(object):
return self.__getattribute__(method)
@_expect_lock
def expect(self, pattern, timeout=DEFAULT_EXPECT_TIMEOUT):
def expect(self, pattern, timeout=DEFAULT_EXPECT_TIMEOUT, full_stdout=False):
"""
expect(pattern, timeout=DEFAULT_EXPECT_TIMEOUT)
expect received data on DUT match the pattern. will raise exception when expect timeout.
@ -581,9 +581,11 @@ class BaseDUT(object):
:param pattern: string or compiled RegEx(string pattern)
:param timeout: timeout for expect
:param full_stdout: return full stdout until meet expect string/pattern or just matched string
:return: string if pattern is string; matched groups if pattern is RegEx
"""
method = self._get_expect_method(pattern)
stdout = ''
# non-blocking get data for first time
data = self.data_cache.get_data(0)
@ -598,12 +600,13 @@ class BaseDUT(object):
break
# wait for new data from cache
data = self.data_cache.get_data(time_remaining)
stdout = data
if ret is None:
pattern = _pattern_to_string(pattern)
self._save_expect_failure(pattern, data, start_time)
raise ExpectTimeout(self.name + ": " + pattern)
return ret
return stdout if full_stdout else ret
def _expect_multi(self, expect_all, expect_item_list, timeout):
"""

View File

@ -145,7 +145,7 @@ class AssignTest(object):
"""
Auto assign tests to CI jobs.
:param test_case_path: path of test case file(s)
:param test_case_paths: path of test case file(s)
:param ci_config_file: path of ``.gitlab-ci.yml``
"""
# subclass need to rewrite CI test job pattern, to filter all test jobs
@ -157,8 +157,8 @@ class AssignTest(object):
"supported_in_ci": True,
}
def __init__(self, test_case_path, ci_config_file, case_group=Group):
self.test_case_path = test_case_path
def __init__(self, test_case_paths, ci_config_file, case_group=Group):
self.test_case_paths = test_case_paths
self.test_case_file_pattern = None
self.test_cases = []
self.jobs = self._parse_gitlab_ci_config(ci_config_file)
@ -197,7 +197,7 @@ class AssignTest(object):
_case_filter = self.DEFAULT_FILTER.copy()
if case_filter:
_case_filter.update(case_filter)
test_methods = SearchCases.Search.search_test_cases(self.test_case_path, self.test_case_file_pattern)
test_methods = SearchCases.Search.search_test_cases(self.test_case_paths, self.test_case_file_pattern)
return CaseConfig.filter_test_cases(test_methods, _case_filter)
def _group_cases(self):

View File

@ -120,15 +120,19 @@ class Search(object):
return replicated_cases
@classmethod
def search_test_cases(cls, test_case, test_case_file_pattern=None):
def search_test_cases(cls, test_case_paths, test_case_file_pattern=None):
"""
search all test cases from a folder or file, and then do case replicate.
:param test_case: test case file(s) path
:param test_case_paths: test case file(s) paths
:param test_case_file_pattern: unix filename pattern
:return: a list of replicated test methods
"""
test_case_files = cls._search_test_case_files(test_case, test_case_file_pattern or cls.TEST_CASE_FILE_PATTERN)
if not isinstance(test_case_paths, list):
test_case_paths = [test_case_paths]
test_case_files = []
for path in test_case_paths:
test_case_files.extend(cls._search_test_case_files(path, test_case_file_pattern or cls.TEST_CASE_FILE_PATTERN))
test_cases = []
for test_case_file in test_case_files:
test_cases += cls._search_cases_from_file(test_case_file)

View File

@ -32,12 +32,12 @@ from tiny_test_fw.Utility import SearchCases, CaseConfig
class Runner(threading.Thread):
"""
:param test_case: test case file or folder
:param test_case_paths: test case file or folder
:param case_config: case config file, allow to filter test cases and pass data to test case
:param env_config_file: env config file
"""
def __init__(self, test_case, case_config, env_config_file=None):
def __init__(self, test_case_paths, case_config, env_config_file=None):
super(Runner, self).__init__()
self.setDaemon(True)
if case_config:
@ -45,7 +45,7 @@ class Runner(threading.Thread):
else:
test_suite_name = "TestRunner"
TinyFW.set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
test_methods = SearchCases.Search.search_test_cases(test_case)
test_methods = SearchCases.Search.search_test_cases(test_case_paths)
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
self.test_result = []
@ -59,23 +59,23 @@ class Runner(threading.Thread):
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("test_case",
help="test case folder or file")
parser.add_argument("test_cases", nargs='+',
help="test case folders or files")
parser.add_argument("--case_config", "-c", default=None,
help="case filter/config file")
parser.add_argument("--env_config_file", "-e", default=None,
help="test env config file")
args = parser.parse_args()
runner = Runner(args.test_case, args.case_config, args.env_config_file)
test_cases = [os.path.join(os.getenv('IDF_PATH'), path) if not os.path.isabs(path) else path for path in args.test_cases]
runner = Runner(test_cases, args.case_config, args.env_config_file)
runner.start()
while True:
try:
runner.join(1)
if not runner.isAlive():
if not runner.is_alive():
break
except KeyboardInterrupt:
print("exit by Ctrl-C")

View File

@ -62,8 +62,7 @@ def main():
parser.add_argument('test_type',
choices=TEST_LABELS.keys(),
help='Scan test type')
parser.add_argument('-p', '--paths', nargs='+',
required=True,
parser.add_argument('paths', nargs='+',
help='One or more app paths')
parser.add_argument('-b', '--build-system',
choices=BUILD_SYSTEMS.keys(),
@ -74,8 +73,7 @@ def main():
parser.add_argument('-o', '--output-path',
required=True,
help="output path of the scan result")
parser.add_argument("--exclude",
action="append",
parser.add_argument("--exclude", nargs="*",
help='Ignore specified directory. Can be used multiple times.')
parser.add_argument('--preserve', action="store_true",
help='add this flag to preserve artifacts for all apps')

View File

@ -22,7 +22,7 @@ import sys
from abc import abstractmethod
from tiny_test_fw import App
from .IDFAssignTest import ExampleGroup, TestAppsGroup, UnitTestGroup, IDFCaseGroup
from .IDFAssignTest import ExampleGroup, TestAppsGroup, UnitTestGroup, IDFCaseGroup, ComponentUTGroup
try:
import gitlab_api
@ -202,9 +202,9 @@ class IDFApp(App.BaseApp):
def __str__(self):
parts = ['app<{}>'.format(self.app_path)]
if self.config_name:
parts.extend('config<{}>'.format(self.config_name))
parts.append('config<{}>'.format(self.config_name))
if self.target:
parts.extend('target<{}>'.format(self.target))
parts.append('target<{}>'.format(self.target))
return ' '.join(parts)
@classmethod
@ -447,6 +447,11 @@ class TestApp(Example):
super(TestApp, self).__init__(app_path, config_name, target, case_group, artifacts_cls)
class ComponentUTApp(TestApp):
def __init__(self, app_path, config_name='default', target='esp32', case_group=ComponentUTGroup, artifacts_cls=Artifacts):
super(ComponentUTApp, self).__init__(app_path, config_name, target, case_group, artifacts_cls)
class LoadableElfTestApp(TestApp):
def __init__(self, app_path, app_files, config_name='default', target='esp32', case_group=TestAppsGroup, artifacts_cls=Artifacts):
# add arg `app_files` for loadable elf test_app.

View File

@ -19,7 +19,7 @@ from tiny_test_fw.Utility import CIAssignTest
from idf_py_actions.constants import SUPPORTED_TARGETS
IDF_PATH_FROM_ENV = os.getenv("IDF_PATH")
IDF_PATH_FROM_ENV = os.getenv('IDF_PATH')
class IDFCaseGroup(CIAssignTest.Group):
@ -30,9 +30,9 @@ class IDFCaseGroup(CIAssignTest.Group):
def get_artifact_index_file(cls):
assert cls.LOCAL_BUILD_DIR
if IDF_PATH_FROM_ENV:
artifact_index_file = os.path.join(IDF_PATH_FROM_ENV, cls.LOCAL_BUILD_DIR, "artifact_index.json")
artifact_index_file = os.path.join(IDF_PATH_FROM_ENV, cls.LOCAL_BUILD_DIR, 'artifact_index.json')
else:
artifact_index_file = "artifact_index.json"
artifact_index_file = 'artifact_index.json'
return artifact_index_file
@ -41,25 +41,25 @@ class IDFAssignTest(CIAssignTest.AssignTest):
super(IDFAssignTest, self).__init__(test_case_path, ci_config_file, case_group)
def format_build_log_path(self, parallel_num):
return "{}/list_job_{}.json".format(self.case_group.LOCAL_BUILD_DIR, parallel_num)
return '{}/list_job_{}.json'.format(self.case_group.LOCAL_BUILD_DIR, parallel_num)
def create_artifact_index_file(self, project_id=None, pipeline_id=None):
if project_id is None:
project_id = os.getenv("CI_PROJECT_ID")
project_id = os.getenv('CI_PROJECT_ID')
if pipeline_id is None:
pipeline_id = os.getenv("CI_PIPELINE_ID")
pipeline_id = os.getenv('CI_PIPELINE_ID')
gitlab_inst = gitlab_api.Gitlab(project_id)
artifact_index_list = []
for build_job_name in self.case_group.BUILD_JOB_NAMES:
job_info_list = gitlab_inst.find_job_id(build_job_name, pipeline_id=pipeline_id)
for job_info in job_info_list:
parallel_num = job_info["parallel_num"] or 1 # Could be None if "parallel_num" not defined for the job
raw_data = gitlab_inst.download_artifact(job_info["id"],
parallel_num = job_info['parallel_num'] or 1 # Could be None if "parallel_num" not defined for the job
raw_data = gitlab_inst.download_artifact(job_info['id'],
[self.format_build_log_path(parallel_num)])[0]
build_info_list = [json.loads(line) for line in raw_data.decode().splitlines()]
for build_info in build_info_list:
build_info["ci_job_id"] = job_info["id"]
build_info['ci_job_id'] = job_info['id']
artifact_index_list.append(build_info)
artifact_index_file = self.case_group.get_artifact_index_file()
try:
@ -68,42 +68,47 @@ class IDFAssignTest(CIAssignTest.AssignTest):
if e.errno != errno.EEXIST:
raise e
with open(artifact_index_file, "w") as f:
with open(artifact_index_file, 'w') as f:
json.dump(artifact_index_list, f)
class ExampleGroup(IDFCaseGroup):
SORT_KEYS = CI_JOB_MATCH_KEYS = ["env_tag", "target"]
SORT_KEYS = CI_JOB_MATCH_KEYS = ['env_tag', 'target']
LOCAL_BUILD_DIR = "build_examples"
BUILD_JOB_NAMES = ["build_examples_cmake_{}".format(target) for target in SUPPORTED_TARGETS]
LOCAL_BUILD_DIR = 'build_examples'
BUILD_JOB_NAMES = ['build_examples_cmake_{}'.format(target) for target in SUPPORTED_TARGETS]
class TestAppsGroup(ExampleGroup):
LOCAL_BUILD_DIR = "build_test_apps"
BUILD_JOB_NAMES = ["build_test_apps_{}".format(target) for target in SUPPORTED_TARGETS]
LOCAL_BUILD_DIR = 'build_test_apps'
BUILD_JOB_NAMES = ['build_test_apps_{}'.format(target) for target in SUPPORTED_TARGETS]
class ComponentUTGroup(TestAppsGroup):
LOCAL_BUILD_DIR = 'build_component_ut'
BUILD_JOB_NAMES = ['build_component_ut_{}'.format(target) for target in SUPPORTED_TARGETS]
class UnitTestGroup(IDFCaseGroup):
SORT_KEYS = ["test environment", "tags", "chip_target"]
CI_JOB_MATCH_KEYS = ["test environment"]
SORT_KEYS = ['test environment', 'tags', 'chip_target']
CI_JOB_MATCH_KEYS = ['test environment']
LOCAL_BUILD_DIR = "tools/unit-test-app/builds"
BUILD_JOB_NAMES = ["build_esp_idf_tests_cmake_{}".format(target) for target in SUPPORTED_TARGETS]
LOCAL_BUILD_DIR = 'tools/unit-test-app/builds'
BUILD_JOB_NAMES = ['build_esp_idf_tests_cmake_{}'.format(target) for target in SUPPORTED_TARGETS]
MAX_CASE = 50
ATTR_CONVERT_TABLE = {
"execution_time": "execution time"
'execution_time': 'execution time'
}
DUT_CLS_NAME = {
"esp32": "ESP32DUT",
"esp32s2": "ESP32S2DUT",
"esp8266": "ESP8266DUT",
'esp32': 'ESP32DUT',
'esp32s2': 'ESP32S2DUT',
'esp8266': 'ESP8266DUT',
}
def __init__(self, case):
super(UnitTestGroup, self).__init__(case)
for tag in self._get_case_attr(case, "tags"):
for tag in self._get_case_attr(case, 'tags'):
self.ci_job_match_keys.add(tag)
@staticmethod
@ -118,7 +123,7 @@ class UnitTestGroup(IDFCaseGroup):
if self.accept_new_case():
for key in self.filters:
if self._get_case_attr(case, key) != self.filters[key]:
if key == "tags":
if key == 'tags':
if set(self._get_case_attr(case, key)).issubset(set(self.filters[key])):
continue
break
@ -135,18 +140,18 @@ class UnitTestGroup(IDFCaseGroup):
case_data = []
for case in test_cases:
one_case_data = {
"config": self._get_case_attr(case, "config"),
"name": self._get_case_attr(case, "summary"),
"reset": self._get_case_attr(case, "reset"),
"timeout": self._get_case_attr(case, "timeout"),
'config': self._get_case_attr(case, 'config'),
'name': self._get_case_attr(case, 'summary'),
'reset': self._get_case_attr(case, 'reset'),
'timeout': self._get_case_attr(case, 'timeout'),
}
if test_function in ["run_multiple_devices_cases", "run_multiple_stage_cases"]:
if test_function in ['run_multiple_devices_cases', 'run_multiple_stage_cases']:
try:
one_case_data["child case num"] = self._get_case_attr(case, "child case num")
one_case_data['child case num'] = self._get_case_attr(case, 'child case num')
except KeyError as e:
print("multiple devices/stages cases must contains at least two test functions")
print("case name: {}".format(one_case_data["name"]))
print('multiple devices/stages cases must contains at least two test functions')
print('case name: {}'.format(one_case_data['name']))
raise e
case_data.append(one_case_data)
@ -159,18 +164,18 @@ class UnitTestGroup(IDFCaseGroup):
:return: dict of list of cases for each test functions
"""
case_by_test_function = {
"run_multiple_devices_cases": [],
"run_multiple_stage_cases": [],
"run_unit_test_cases": [],
'run_multiple_devices_cases': [],
'run_multiple_stage_cases': [],
'run_unit_test_cases': [],
}
for case in self.case_list:
if case["multi_device"] == "Yes":
case_by_test_function["run_multiple_devices_cases"].append(case)
elif case["multi_stage"] == "Yes":
case_by_test_function["run_multiple_stage_cases"].append(case)
if case['multi_device'] == 'Yes':
case_by_test_function['run_multiple_devices_cases'].append(case)
elif case['multi_stage'] == 'Yes':
case_by_test_function['run_multiple_stage_cases'].append(case)
else:
case_by_test_function["run_unit_test_cases"].append(case)
case_by_test_function['run_unit_test_cases'].append(case)
return case_by_test_function
def output(self):
@ -180,12 +185,12 @@ class UnitTestGroup(IDFCaseGroup):
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
target = self._get_case_attr(self.case_list[0], "chip_target")
target = self._get_case_attr(self.case_list[0], 'chip_target')
if target:
overwrite = {
"dut": {
"package": "ttfw_idf",
"class": self.DUT_CLS_NAME[target],
'dut': {
'package': 'ttfw_idf',
'class': self.DUT_CLS_NAME[target],
}
}
else:
@ -195,11 +200,11 @@ class UnitTestGroup(IDFCaseGroup):
output_data = {
# we don't need filter for test function, as UT uses a few test functions for all cases
"CaseConfig": [
'CaseConfig': [
{
"name": test_function,
"extra_data": self._create_extra_data(test_cases, test_function),
"overwrite": overwrite,
'name': test_function,
'extra_data': self._create_extra_data(test_cases, test_function),
'overwrite': overwrite,
} for test_function, test_cases in case_by_test_function.items() if test_cases
],
}
@ -258,13 +263,14 @@ class UnitTestAssignTest(IDFAssignTest):
return test_cases
test_cases = []
if os.path.isdir(self.test_case_path):
for yml_file in find_by_suffix('.yml', self.test_case_path):
test_cases.extend(get_test_cases_from_yml(yml_file))
elif os.path.isfile(self.test_case_path):
test_cases.extend(get_test_cases_from_yml(self.test_case_path))
else:
print("Test case path is invalid. Should only happen when use @bot to skip unit test.")
for path in self.test_case_paths:
if os.path.isdir(path):
for yml_file in find_by_suffix('.yml', path):
test_cases.extend(get_test_cases_from_yml(yml_file))
elif os.path.isfile(path) and path.endswith('.yml'):
test_cases.extend(get_test_cases_from_yml(path))
else:
print('Test case path is invalid. Should only happen when use @bot to skip unit test.')
# filter keys are lower case. Do map lower case keys with original keys.
try:
@ -291,27 +297,30 @@ class UnitTestAssignTest(IDFAssignTest):
# sort cases with configs and test functions
# in later stage cases with similar attributes are more likely to be assigned to the same job
# it will reduce the count of flash DUT operations
test_cases.sort(key=lambda x: x["config"] + x["multi_stage"] + x["multi_device"])
test_cases.sort(key=lambda x: x['config'] + x['multi_stage'] + x['multi_device'])
return test_cases
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("case_group", choices=["example_test", "custom_test", "unit_test"])
parser.add_argument("test_case", help="test case folder or file")
parser.add_argument("ci_config_file", help="gitlab ci config file")
parser.add_argument("output_path", help="output path of config files")
parser.add_argument("--pipeline_id", "-p", type=int, default=None, help="pipeline_id")
parser.add_argument("--test-case-file-pattern", help="file name pattern used to find Python test case files")
parser.add_argument('case_group', choices=['example_test', 'custom_test', 'unit_test', 'component_ut'])
parser.add_argument('test_case_paths', nargs='+', help='test case folder or file')
parser.add_argument('-c', '--config', help='gitlab ci config file')
parser.add_argument('-o', '--output', help='output path of config files')
parser.add_argument('--pipeline_id', '-p', type=int, default=None, help='pipeline_id')
parser.add_argument('--test-case-file-pattern', help='file name pattern used to find Python test case files')
args = parser.parse_args()
args_list = [args.test_case, args.ci_config_file]
test_case_paths = [os.path.join(IDF_PATH_FROM_ENV, path) if not os.path.isabs(path) else path for path in args.test_case_paths]
args_list = [test_case_paths, args.config]
if args.case_group == 'example_test':
assigner = ExampleAssignTest(*args_list)
elif args.case_group == 'custom_test':
assigner = TestAppsAssignTest(*args_list)
elif args.case_group == 'unit_test':
assigner = UnitTestAssignTest(*args_list)
elif args.case_group == 'component_ut':
assigner = ComponentUTAssignTest(*args_list)
else:
raise SystemExit(1) # which is impossible
@ -319,5 +328,5 @@ if __name__ == '__main__':
assigner.CI_TEST_JOB_PATTERN = re.compile(r'{}'.format(args.test_case_file_pattern))
assigner.assign_cases()
assigner.output_configs(args.output_path)
assigner.output_configs(args.output)
assigner.create_artifact_index_file()

View File

@ -17,10 +17,13 @@ import logging
import os
import re
import junit_xml
from tiny_test_fw import TinyFW, Utility
from .IDFApp import IDFApp, Example, LoadableElfTestApp, UT, TestApp # noqa: export all Apps for users
from .IDFDUT import IDFDUT, ESP32DUT, ESP32S2DUT, ESP8266DUT, ESP32QEMUDUT # noqa: export DUTs for users
from .DebugUtils import OCDBackend, GDBBackend, CustomProcess # noqa: export DebugUtils for users
from .IDFApp import IDFApp, Example, LoadableElfTestApp, UT, TestApp, ComponentUTApp # noqa: export all Apps for users
from .IDFDUT import IDFDUT, ESP32DUT, ESP32S2DUT, ESP8266DUT, ESP32QEMUDUT # noqa: export DUTs for users
from .unity_test_parser import TestResults, TestFormat
# pass TARGET_DUT_CLS_DICT to Env.py to avoid circular dependency issue.
TARGET_DUT_CLS_DICT = {
@ -108,6 +111,22 @@ def ci_target_check(func):
return wrapper
def test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, drop_kwargs_dut=False, **kwargs):
test_target = local_test_check(target)
dut = get_dut_class(test_target, erase_nvs)
if drop_kwargs_dut and 'dut' in kwargs: # panic_test() will inject dut, resolve conflicts here
dut = kwargs['dut']
del kwargs['dut']
original_method = TinyFW.test_method(
app=app, dut=dut, target=upper_list_or_str(target), ci_target=upper_list_or_str(ci_target),
module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs,
dut_dict=TARGET_DUT_CLS_DICT, **kwargs
)
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(target, test_func.case_info["name"])
return test_func
@ci_target_check
def idf_example_test(app=Example, target="ESP32", ci_target=None, module="examples", execution_time=1,
level="example", erase_nvs=True, config_name=None, **kwargs):
@ -125,19 +144,8 @@ def idf_example_test(app=Example, target="ESP32", ci_target=None, module="exampl
:param kwargs: other keyword args
:return: test method
"""
def test(func):
test_target = local_test_check(target)
dut = get_dut_class(test_target, erase_nvs)
original_method = TinyFW.test_method(
app=app, dut=dut, target=upper_list_or_str(target), ci_target=upper_list_or_str(ci_target),
module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs,
dut_dict=TARGET_DUT_CLS_DICT, **kwargs
)
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(target, test_func.case_info["name"])
return test_func
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs)
return test
@ -157,25 +165,36 @@ def idf_unit_test(app=UT, target="ESP32", ci_target=None, module="unit-test", ex
:param kwargs: other keyword args
:return: test method
"""
def test(func):
test_target = local_test_check(target)
dut = get_dut_class(test_target, erase_nvs)
original_method = TinyFW.test_method(
app=app, dut=dut, target=upper_list_or_str(target), ci_target=upper_list_or_str(ci_target),
module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs,
dut_dict=TARGET_DUT_CLS_DICT, **kwargs
)
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(target, test_func.case_info["name"])
return test_func
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs)
return test
@ci_target_check
def idf_custom_test(app=TestApp, target="ESP32", ci_target=None, module="misc", execution_time=1,
level="integration", erase_nvs=True, config_name=None, group="test-apps", **kwargs):
level="integration", erase_nvs=True, config_name=None, **kwargs):
"""
decorator for idf custom tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, drop_kwargs_dut=True, **kwargs)
return test
@ci_target_check
def idf_component_unit_test(app=ComponentUTApp, target="ESP32", ci_target=None, module="misc", execution_time=1,
level="integration", erase_nvs=True, config_name=None, **kwargs):
"""
decorator for idf custom tests (with default values for some keyword args).
@ -187,29 +206,41 @@ def idf_custom_test(app=TestApp, target="ESP32", ci_target=None, module="misc",
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param group: identifier to group custom tests (unused for now, defaults to "test-apps")
:param kwargs: other keyword args
:return: test method
"""
def test(func):
test_target = local_test_check(target)
dut = get_dut_class(test_target, erase_nvs)
if 'dut' in kwargs: # panic_test() will inject dut, resolve conflicts here
dut = kwargs['dut']
del kwargs['dut']
original_method = TinyFW.test_method(
app=app, dut=dut, target=upper_list_or_str(target), ci_target=upper_list_or_str(ci_target),
module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs,
dut_dict=TARGET_DUT_CLS_DICT, **kwargs
)
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(target, test_func.case_info["name"])
return test_func
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, **kwargs)
return test
class ComponentUTResult:
"""
Function Class, parse component unit test results
"""
@staticmethod
def parse_result(stdout):
try:
results = TestResults(stdout, TestFormat.UNITY_FIXTURE_VERBOSE)
except (ValueError, TypeError) as e:
raise ValueError('Error occurs when parsing the component unit test stdout to JUnit report: ' + str(e))
group_name = results.tests()[0].group()
with open(os.path.join(os.getenv('LOG_PATH', ''), '{}_XUNIT_RESULT.xml'.format(group_name)), 'w') as fw:
junit_xml.to_xml_report_file(fw, [results.to_junit()])
if results.num_failed():
# raise exception if any case fails
err_msg = 'Failed Cases:\n'
for test_case in results.test_iter():
if test_case.result() == 'FAIL':
err_msg += '\t{}: {}'.format(test_case.name(), test_case.message())
raise AssertionError(err_msg)
def log_performance(item, value):
"""
do print performance with pre-defined format to console

View File

@ -0,0 +1,375 @@
"""
Modification version of https://github.com/ETCLabs/unity-test-parser/blob/develop/unity_test_parser.py
since only python 3.6 or higher version have ``enum.auto()``
unity_test_parser.py
Parse the output of the Unity Test Framework for C. Parsed results are held in the TestResults
object format, which can then be converted to various XML formats.
"""
import enum
import re
import junit_xml
_NORMAL_TEST_REGEX = re.compile(r"(?P<file>.+):(?P<line>\d+):(?P<test_name>[^\s:]+):(?P<result>PASS|FAIL|IGNORE)(?:: (?P<message>.+))?")
_UNITY_FIXTURE_VERBOSE_PREFIX_REGEX = re.compile(r"(?P<prefix>TEST\((?P<test_group>[^\s,]+), (?P<test_name>[^\s\)]+)\))(?P<remainder>.+)?$")
_UNITY_FIXTURE_REMAINDER_REGEX = re.compile(r"^(?P<file>.+):(?P<line>\d+)::(?P<result>PASS|FAIL|IGNORE)(?:: (?P<message>.+))?")
_TEST_SUMMARY_BLOCK_REGEX = re.compile(
r"^(?P<num_tests>\d+) Tests (?P<num_failures>\d+) Failures (?P<num_ignored>\d+) Ignored\s*\r?\n(?P<overall_result>OK|FAIL)(?:ED)?", re.MULTILINE
)
_TEST_RESULT_ENUM = ["PASS", "FAIL", "IGNORE"]
class TestFormat(enum.Enum):
"""Represents the flavor of Unity used to produce a given output."""
UNITY_BASIC = 0
# UNITY_FIXTURE = enum.auto()
UNITY_FIXTURE_VERBOSE = 1
globals().update(TestFormat.__members__)
class TestStats:
"""Statistics about a test collection"""
def __init__(self):
self.total = 0
self.passed = 0
self.failed = 0
self.ignored = 0
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.total == other.total
and self.passed == other.passed
and self.failed == other.failed
and self.ignored == other.ignored)
return False
class TestResult:
"""
Class representing the result of a single test.
Contains the test name, its result (either PASS, FAIL or IGNORE), the file and line number if
the test result was not PASS, and an optional message.
"""
def __init__(
self,
test_name,
result,
group="default",
file="",
line=0,
message="",
full_line="",
):
if result not in _TEST_RESULT_ENUM:
raise ValueError("result must be one of {}.".format(_TEST_RESULT_ENUM))
self._test_name = test_name
self._result = result
self._group = group
self._message = message
self._full_line = full_line
if result != "PASS":
self._file = file
self._line = line
else:
self._file = ""
self._line = 0
def file(self):
"""The file name - returns empty string if the result is PASS."""
return self._file
def line(self):
"""The line number - returns 0 if the result is PASS."""
return self._line
def name(self):
"""The test name."""
return self._test_name
def result(self):
"""The test result, one of PASS, FAIL or IGNORED."""
return self._result
def group(self):
"""
The test group, if applicable.
For basic Unity output, this will always be "default".
"""
return self._group
def message(self):
"""The accompanying message - returns empty string if the result is PASS."""
return self._message
def full_line(self):
"""The original, full line of unit test output that this object was created from."""
return self._full_line
class TestResults:
"""
Class representing Unity test results.
After being initialized with raw test output, it parses the output and represents it as a list
of TestResult objects which can be inspected or converted to other types of output, e.g. JUnit
XML.
"""
def __init__(self, test_output, test_format=TestFormat.UNITY_BASIC):
"""
Create a new TestResults object from Unity test output.
Keyword arguments:
test_output -- The full test console output, must contain the overall result and summary
block at the bottom.
Optional arguments:
test_format -- TestFormat enum representing the flavor of Unity used to create the output.
Exceptions:
ValueError, if the test output is not formatted properly.
"""
self._tests = []
self._test_stats = self._find_summary_block(test_output)
if test_format is TestFormat.UNITY_BASIC:
self._parse_unity_basic(test_output)
elif test_format is TestFormat.UNITY_FIXTURE_VERBOSE:
self._parse_unity_fixture_verbose(test_output)
else:
raise ValueError(
"test_format must be one of UNITY_BASIC or UNITY_FIXTURE_VERBOSE."
)
def num_tests(self):
"""The total number of tests parsed."""
return self._test_stats.total
def num_passed(self):
"""The number of tests with result PASS."""
return self._test_stats.passed
def num_failed(self):
"""The number of tests with result FAIL."""
return self._test_stats.failed
def num_ignored(self):
"""The number of tests with result IGNORE."""
return self._test_stats.ignored
def test_iter(self):
"""Get an iterator for iterating over individual tests.
Returns an iterator over TestResult objects.
Example:
for test in unity_results.test_iter():
print(test.name())
"""
return iter(self._tests)
def tests(self):
"""Get a list of all the tests (TestResult objects)."""
return self._tests
def to_junit(
self, suite_name="all_tests",
):
"""
Convert the tests to JUnit XML.
Returns a junit_xml.TestSuite containing all of the test cases. One test suite will be
generated with the name given in suite_name. Unity Fixture test groups are mapped to the
classname attribute of test cases; for basic Unity output there will be one class named
"default".
Optional arguments:
suite_name -- The name to use for the "name" and "package" attributes of the testsuite element.
Sample output:
<testsuite disabled="0" errors="0" failures="1" name="[suite_name]" package="[suite_name]" skipped="0" tests="8" time="0">
<testcase classname="test_group_1" name="group_1_test" />
<testcase classname="test_group_2" name="group_2_test" />
</testsuite>
"""
test_case_list = []
for test in self._tests:
if test.result() == "PASS":
test_case_list.append(
junit_xml.TestCase(name=test.name(), classname=test.group())
)
else:
junit_tc = junit_xml.TestCase(
name=test.name(),
classname=test.group(),
file=test.file(),
line=test.line(),
)
if test.result() == "FAIL":
junit_tc.add_failure_info(
message=test.message(), output=test.full_line()
)
elif test.result() == "IGNORE":
junit_tc.add_skipped_info(
message=test.message(), output=test.full_line()
)
test_case_list.append(junit_tc)
return junit_xml.TestSuite(
name=suite_name, package=suite_name, test_cases=test_case_list
)
def _find_summary_block(self, unity_output):
"""
Find and parse the test summary block.
Unity prints a test summary block at the end of a test run of the form:
-----------------------
X Tests Y Failures Z Ignored
[PASS|FAIL]
Returns the contents of the test summary block as a TestStats object.
"""
match = _TEST_SUMMARY_BLOCK_REGEX.search(unity_output)
if not match:
raise ValueError("A Unity test summary block was not found.")
try:
stats = TestStats()
stats.total = int(match.group("num_tests"))
stats.failed = int(match.group("num_failures"))
stats.ignored = int(match.group("num_ignored"))
stats.passed = stats.total - stats.failed - stats.ignored
return stats
except ValueError:
raise ValueError("The Unity test summary block was not valid.")
def _parse_unity_basic(self, unity_output):
"""
Parse basic unity output.
This is of the form file:line:test_name:result[:optional_message]
"""
found_test_stats = TestStats()
for test in _NORMAL_TEST_REGEX.finditer(unity_output):
try:
new_test = TestResult(
test.group("test_name"),
test.group("result"),
file=test.group("file"),
line=int(test.group("line")),
message=test.group("message")
if test.group("message") is not None
else "",
full_line=test.group(0),
)
except ValueError:
continue
self._add_new_test(new_test, found_test_stats)
if len(self._tests) == 0:
raise ValueError("No tests were found.")
if found_test_stats != self._test_stats:
raise ValueError("Test output does not match summary block.")
def _parse_unity_fixture_verbose(self, unity_output):
"""
Parse the output of the unity_fixture add-in invoked with the -v flag.
This is a more complex operation than basic unity output, because the output for a single
test can span multiple lines. There is a prefix of the form "TEST(test_group, test_name)"
that always exists on the first line for a given test. Immediately following that can be a
pass or fail message, or some number of diagnostic messages followed by a pass or fail
message.
"""
found_test_stats = TestStats()
line_iter = iter(unity_output.splitlines())
try:
line = next(line_iter)
while True:
prefix_match = _UNITY_FIXTURE_VERBOSE_PREFIX_REGEX.search(line)
line = next(line_iter)
if prefix_match:
# Handle the remaining portion of a test case line after the unity_fixture
# prefix.
remainder = prefix_match.group("remainder")
if remainder:
self._parse_unity_fixture_remainder(
prefix_match, remainder, found_test_stats
)
# Handle any subsequent lines with more information on the same test case.
while not _UNITY_FIXTURE_VERBOSE_PREFIX_REGEX.search(line):
self._parse_unity_fixture_remainder(
prefix_match, line, found_test_stats
)
line = next(line_iter)
except StopIteration:
pass
if len(self._tests) == 0:
raise ValueError("No tests were found.")
if found_test_stats != self._test_stats:
raise ValueError("Test output does not match summary block.")
def _parse_unity_fixture_remainder(self, prefix_match, remainder, test_stats):
"""
Parse the remainder of a Unity Fixture test case.
Can be on the same line as the prefix or on subsequent lines.
"""
new_test = None
if remainder == " PASS":
new_test = TestResult(
prefix_match.group("test_name"),
"PASS",
group=prefix_match.group("test_group"),
full_line=prefix_match.group(0),
)
else:
remainder_match = _UNITY_FIXTURE_REMAINDER_REGEX.match(remainder)
if remainder_match:
new_test = TestResult(
prefix_match.group("test_name"),
remainder_match.group("result"),
group=prefix_match.group("test_group"),
file=remainder_match.group("file"),
line=int(remainder_match.group("line")),
message=remainder_match.group("message")
if remainder_match.group("message") is not None
else "",
full_line=prefix_match.group("prefix") + remainder_match.group(0),
)
if new_test is not None:
self._add_new_test(new_test, test_stats)
def _add_new_test(self, new_test, test_stats):
"""Add a new test and increment the proper members of test_stats."""
test_stats.total += 1
if new_test.result() == "PASS":
test_stats.passed += 1
elif new_test.result() == "FAIL":
test_stats.failed += 1
else:
test_stats.ignored += 1
self._tests.append(new_test)