Merge branch 'feature/ci_optimize_assign_test' into 'master'

CI: optimize assign test

See merge request espressif/esp-idf!6785
This commit is contained in:
He Yin Ling 2019-11-24 19:22:41 +08:00
commit d839f39ed8
6 changed files with 191 additions and 166 deletions

View File

@ -176,7 +176,7 @@ example_test_002:
- cd $TEST_FW_PATH
# run test
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE
.example_test_003:
extends: .example_test_template
@ -245,17 +245,16 @@ example_test_010:
UT_001:
extends: .unit_test_template
parallel: 50
parallel: 28
tags:
- ESP32_IDF
- UT_T1_1
# Max. allowed value of 'parallel' is 50.
# See UT_030 below if you want to add more unit test jobs.
UT_002:
extends: .unit_test_template
parallel: 30
parallel: 9
tags:
- ESP32_IDF
- UT_T1_1
@ -263,14 +262,12 @@ UT_002:
UT_003:
extends: .unit_test_template
parallel: 3
tags:
- ESP32_IDF
- UT_T1_SDMODE
UT_004:
extends: .unit_test_template
parallel: 3
tags:
- ESP32_IDF
- UT_T1_SPIMODE
@ -289,13 +286,6 @@ UT_006:
- UT_T1_SPIMODE
- psram
UT_007:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T1_GPIO
UT_008:
extends: .unit_test_template
tags:
@ -303,13 +293,6 @@ UT_008:
- UT_T1_GPIO
- psram
UT_009:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T1_PCNT
UT_010:
extends: .unit_test_template
tags:
@ -317,13 +300,6 @@ UT_010:
- UT_T1_PCNT
- psram
UT_011:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T1_LEDC
UT_012:
extends: .unit_test_template
tags:
@ -331,13 +307,6 @@ UT_012:
- UT_T1_LEDC
- psram
UT_013:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T2_RS485
UT_014:
extends: .unit_test_template
tags:
@ -347,7 +316,6 @@ UT_014:
UT_015:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T1_RMT
@ -361,26 +329,18 @@ UT_016:
UT_017:
extends: .unit_test_template
parallel: 3
tags:
- ESP32_IDF
- EMMC
UT_018:
extends: .unit_test_template
parallel: 5
parallel: 2
tags:
- ESP32_IDF
- UT_T1_1
- 8Mpsram
UT_019:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- Example_SPI_Multi_device
UT_020:
extends: .unit_test_template
tags:
@ -388,13 +348,6 @@ UT_020:
- Example_SPI_Multi_device
- psram
UT_021:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T2_I2C
UT_022:
extends: .unit_test_template
tags:
@ -404,7 +357,6 @@ UT_022:
UT_023:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T1_MCPWM
@ -416,13 +368,6 @@ UT_024:
- UT_T1_MCPWM
- psram
UT_025:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T1_I2S
UT_026:
extends: .unit_test_template
tags:
@ -430,13 +375,6 @@ UT_026:
- UT_T1_I2S
- psram
UT_027:
extends: .unit_test_template
parallel: 3
tags:
- ESP32_IDF
- UT_T2_1
UT_028:
extends: .unit_test_template
tags:
@ -444,34 +382,12 @@ UT_028:
- UT_T2_1
- psram
UT_029:
extends: .unit_test_template
tags:
- ESP32_IDF
- UT_T2_1
- 8Mpsram
# Gitlab parallel max value is 50. We need to create another UT job if parallel is larger than 50.
UT_030:
extends: .unit_test_template
parallel: 10
tags:
- ESP32_IDF
- UT_T1_1
UT_031:
extends: .unit_test_template
tags:
- ESP32_IDF
- UT_T1_FlashEncryption
UT_032:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T2_Ethernet
UT_033:
extends: .unit_test_template
tags:
@ -481,21 +397,19 @@ UT_033:
UT_034:
extends: .unit_test_template
parallel: 4
tags:
- ESP32_IDF
- UT_T1_ESP_FLASH
UT_035:
extends: .unit_test_template
parallel: 35
parallel: 16
tags:
- ESP32S2BETA_IDF
- UT_T1_1
UT_036:
extends: .unit_test_template
parallel: 2
tags:
- ESP32_IDF
- UT_T1_PSRAMV0
@ -503,18 +417,10 @@ UT_036:
UT_037:
extends: .unit_test_template
parallel: 4
tags:
- ESP32S2BETA_IDF
- UT_T1_LEDC
UT_040:
extends: .unit_test_template
parallel: 3
tags:
- ESP32_IDF
- UT_T1_no32kXTAL
UT_041:
extends: .unit_test_template
tags:
@ -522,13 +428,6 @@ UT_041:
- UT_T1_no32kXTAL
- psram
UT_042:
extends: .unit_test_template
parallel: 3
tags:
- ESP32_IDF
- UT_T1_32kXTAL
UT_043:
extends: .unit_test_template
tags:

View File

@ -24,12 +24,17 @@ except ImportError:
class Group(CIAssignTest.Group):
SORT_KEYS = ["config", "test environment", "multi_device", "multi_stage", "tags", "chip_target"]
MAX_CASE = 30
SORT_KEYS = ["test environment", "tags", "chip_target"]
MAX_CASE = 50
ATTR_CONVERT_TABLE = {
"execution_time": "execution time"
}
CI_JOB_MATCH_KEYS = ["test environment"]
DUT_CLS_NAME = {
"esp32": "ESP32DUT",
"esp32s2beta": "ESP32S2DUT",
"esp8266": "ESP8266DUT",
}
def __init__(self, case):
super(Group, self).__init__(case)
@ -42,13 +47,28 @@ class Group(CIAssignTest.Group):
attr = Group.ATTR_CONVERT_TABLE[attr]
return case[attr]
def _create_extra_data(self, test_function):
def add_extra_case(self, case):
""" If current group contains all tags required by case, then add succeed """
added = False
if self.accept_new_case():
for key in self.filters:
if self._get_case_attr(case, key) != self.filters[key]:
if key == "tags":
if self._get_case_attr(case, key).issubset(self.filters[key]):
continue
break
else:
self.case_list.append(case)
added = True
return added
def _create_extra_data(self, test_cases, test_function):
"""
For unit test case, we need to copy some attributes of test cases into config file.
So unit test function knows how to run the case.
"""
case_data = []
for case in self.case_list:
for case in test_cases:
one_case_data = {
"config": self._get_case_attr(case, "config"),
"name": self._get_case_attr(case, "summary"),
@ -67,19 +87,26 @@ class Group(CIAssignTest.Group):
case_data.append(one_case_data)
return case_data
def _map_test_function(self):
def _divide_case_by_test_function(self):
"""
determine which test function to use according to current test case
divide cases of current test group by test function they need to use
:return: test function name to use
:return: dict of list of cases for each test functions
"""
if self.filters["multi_device"] == "Yes":
test_function = "run_multiple_devices_cases"
elif self.filters["multi_stage"] == "Yes":
test_function = "run_multiple_stage_cases"
else:
test_function = "run_unit_test_cases"
return test_function
case_by_test_function = {
"run_multiple_devices_cases": [],
"run_multiple_stage_cases": [],
"run_unit_test_cases": [],
}
for case in self.case_list:
if case["multi_device"] == "Yes":
case_by_test_function["run_multiple_devices_cases"].append(case)
elif case["multi_stage"] == "Yes":
case_by_test_function["run_multiple_stage_cases"].append(case)
else:
case_by_test_function["run_unit_test_cases"].append(case)
return case_by_test_function
def output(self):
"""
@ -87,35 +114,30 @@ class Group(CIAssignTest.Group):
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
test_function = self._map_test_function()
target = self._get_case_attr(self.case_list[0], "chip_target")
if target:
overwrite = {
"dut": {
"path": "IDF/IDFDUT.py",
"class": self.DUT_CLS_NAME[target],
}
}
else:
overwrite = dict()
case_by_test_function = self._divide_case_by_test_function()
output_data = {
# we don't need filter for test function, as UT uses a few test functions for all cases
"CaseConfig": [
{
"name": test_function,
"extra_data": self._create_extra_data(test_function),
}
"extra_data": self._create_extra_data(test_cases, test_function),
"overwrite": overwrite,
} for test_function, test_cases in case_by_test_function.iteritems() if test_cases
],
}
target = self._get_case_attr(self.case_list[0], "chip_target")
if target is not None:
target_dut = {
"esp32": "ESP32DUT",
"esp32s2beta": "ESP32S2DUT",
"esp8266": "ESP8266DUT",
}[target]
output_data.update({
"Filter": {
"overwrite": {
"dut": {
"path": "IDF/IDFDUT.py",
"class": target_dut,
}
}
}
})
return output_data
@ -135,6 +157,8 @@ class UnitTestAssignTest(CIAssignTest.AssignTest):
with open(test_case_path, "r") as f:
raw_data = yaml.load(f, Loader=Loader)
test_cases = raw_data["test cases"]
for case in test_cases:
case["tags"] = set(case["tags"])
except IOError:
print("Test case path is invalid. Should only happen when use @bot to skip unit test.")
test_cases = []
@ -160,6 +184,10 @@ class UnitTestAssignTest(CIAssignTest.AssignTest):
# case don't have this key, regard as filter success
filtered_cases.append(case)
test_cases = filtered_cases
# sort cases with configs and test functions
# in later stage cases with similar attributes are more likely to be assigned to the same job
# it will reduce the count of flash DUT operations
test_cases.sort(key=lambda x: x["config"] + x["multi_stage"] + x["multi_device"])
return test_cases

View File

@ -105,6 +105,20 @@ class Group(object):
added = True
return added
def add_extra_case(self, case):
"""
By default (``add_case`` method), cases will only be added when have equal values of all filters with group.
But in some cases, we also want to add cases which are not best fit.
For example, one group has can run cases require (A, B). It can also accept cases require (A, ) and (B, ).
When assign failed by best fit, we will use this method to try if we can assign all failed cases.
If subclass want to retry, they need to overwrite this method.
Logic can be applied to handle such scenario could be different for different cases.
:return: True if accepted else False
"""
pass
def output(self):
"""
output data for job configs
@ -193,6 +207,26 @@ class AssignTest(object):
groups.append(self.case_group(case))
return groups
def _assign_failed_cases(self, assigned_groups, failed_groups):
""" try to assign failed cases to already assigned test groups """
still_failed_groups = []
failed_cases = []
for group in failed_groups:
failed_cases.extend(group.case_list)
for case in failed_cases:
# first try to assign to already assigned groups
for group in assigned_groups:
if group.add_extra_case(case):
break
else:
# if failed, group the failed cases
for group in still_failed_groups:
if group.add_case(case):
break
else:
still_failed_groups.append(self.case_group(case))
return still_failed_groups
@staticmethod
def _apply_bot_filter():
"""
@ -218,6 +252,21 @@ class AssignTest(object):
test_count = int(test_count)
self.test_cases *= test_count
@staticmethod
def _count_groups_by_keys(test_groups):
"""
Count the number of test groups by job match keys.
It's an important information to update CI config file.
"""
group_count = dict()
for group in test_groups:
key = ",".join(group.ci_job_match_keys)
try:
group_count[key] += 1
except KeyError:
group_count[key] = 1
return group_count
def assign_cases(self):
"""
separate test cases to groups and assign test cases to CI jobs.
@ -226,21 +275,46 @@ class AssignTest(object):
:return: None
"""
failed_to_assign = []
assigned_groups = []
case_filter = self._apply_bot_filter()
self.test_cases = self._search_cases(self.test_case_path, case_filter)
self._apply_bot_test_count()
test_groups = self._group_cases()
for group in test_groups:
for job in self.jobs:
if job.match_group(group):
job.assign_group(group)
assigned_groups.append(group)
break
else:
failed_to_assign.append(group)
if failed_to_assign:
console_log("Too many test cases vs jobs to run. Please add the following jobs to tools/ci/config/target-test.yml with specific tags:", "R")
for group in failed_to_assign:
console_log("* Add job with: " + ",".join(group.ci_job_match_keys), "R")
failed_to_assign = self._assign_failed_cases(assigned_groups, failed_to_assign)
# print debug info
# total requirement of current pipeline
required_group_count = self._count_groups_by_keys(test_groups)
console_log("Required job count by tags:")
for tags in required_group_count:
console_log("\t{}: {}".format(tags, required_group_count[tags]))
# number of unused jobs
not_used_jobs = [job for job in self.jobs if "case group" not in job]
if not_used_jobs:
console_log("{} jobs not used. Please check if you define too much jobs".format(len(not_used_jobs)), "O")
for job in not_used_jobs:
console_log("\t{}".format(job["name"]), "O")
# failures
if failed_to_assign:
console_log("Too many test cases vs jobs to run. "
"Please increase parallel count in tools/ci/config/target-test.yml "
"for jobs with specific tags:", "R")
failed_group_count = self._count_groups_by_keys(failed_to_assign)
for tags in failed_group_count:
console_log("\t{}: {}".format(tags, failed_group_count[tags]), "R")
raise RuntimeError("Failed to assign test case to CI jobs")
def output_configs(self, output_path):

View File

@ -159,7 +159,7 @@ class Parser(object):
configs = cls.DEFAULT_CONFIG.copy()
if config_file:
with open(config_file, "r") as f:
configs.update(yaml.load(f), Loader=Loader)
configs.update(yaml.load(f, Loader=Loader))
return configs
@classmethod
@ -190,9 +190,9 @@ class Parser(object):
test_case_list = []
for _config in configs["CaseConfig"]:
_filter = configs["Filter"].copy()
_overwrite = cls.handle_overwrite_args(_config.pop("overwrite", dict()))
_extra_data = _config.pop("extra_data", None)
_filter.update(_config)
_overwrite = cls.handle_overwrite_args(_filter.pop("overwrite", dict()))
_extra_data = _filter.pop("extra_data", None)
for test_method in test_methods:
if _filter_one_case(test_method, _filter):
test_case_list.append(TestCase.TestCase(test_method, _extra_data, **_overwrite))

View File

@ -38,11 +38,23 @@ def console_log(data, color="white", end="\n"):
sys.stdout.flush()
__LOADED_MODULES = dict()
# we should only load one module once.
# if we load one module twice,
# python will regard the same object loaded in the first time and second time as different objects.
# it will lead to strange errors like `isinstance(object, type_of_this_object)` return False
def load_source(name, path):
try:
from importlib.machinery import SourceFileLoader
return SourceFileLoader(name, path).load_module()
except ImportError:
# importlib.machinery doesn't exists in Python 2 so we will use imp (deprecated in Python 3)
import imp
return imp.load_source(name, path)
return __LOADED_MODULES[name]
except KeyError:
try:
from importlib.machinery import SourceFileLoader
ret = SourceFileLoader(name, path).load_module()
except ImportError:
# importlib.machinery doesn't exists in Python 2 so we will use imp (deprecated in Python 3)
import imp
ret = imp.load_source(name, path)
__LOADED_MODULES[name] = ret
return ret

View File

@ -158,6 +158,10 @@ def replace_app_bin(dut, name, new_app_bin):
break
def format_case_name(case):
return "[{}] {}".format(case["config"], case["name"])
def reset_dut(dut):
dut.reset()
# esptool ``run`` cmd takes quite long time.
@ -203,9 +207,9 @@ def run_one_normal_case(dut, one_case, junit_test_case):
test_finish.append(True)
output = dut.stop_capture_raw_data()
if result:
Utility.console_log("Success: " + one_case["name"], color="green")
Utility.console_log("Success: " + format_case_name(one_case), color="green")
else:
Utility.console_log("Failed: " + one_case["name"], color="red")
Utility.console_log("Failed: " + format_case_name(one_case), color="red")
junit_test_case.add_failure_info(output)
raise TestCaseFailed()
@ -222,7 +226,7 @@ def run_one_normal_case(dut, one_case, junit_test_case):
assert not exception_reset_list
if int(data[1]):
# case ignored
Utility.console_log("Ignored: " + one_case["name"], color="orange")
Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
junit_test_case.add_skipped_info("ignored")
one_case_finish(not int(data[0]))
@ -299,13 +303,15 @@ def run_unit_test_cases(env, extra_data):
run_one_normal_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items()
except TestCaseFailed:
failed_cases.append(one_case["name"])
failed_cases.append(format_case_name(one_case))
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
failed_cases.append(one_case["name"])
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.update_performance(performance_items)
TinyFW.JunitReport.test_case_finish(junit_test_case)
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases:
@ -502,11 +508,15 @@ def run_multiple_devices_cases(env, extra_data):
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
finally:
if result:
Utility.console_log("Success: " + one_case["name"], color="green")
Utility.console_log("Success: " + format_case_name(one_case), color="green")
else:
failed_cases.append(one_case["name"])
Utility.console_log("Failed: " + one_case["name"], color="red")
failed_cases.append(format_case_name(one_case))
Utility.console_log("Failed: " + format_case_name(one_case), color="red")
TinyFW.JunitReport.test_case_finish(junit_test_case)
# close all DUTs when finish running all cases for one config
for dut in duts:
env.close_dut(dut)
duts = {}
if failed_cases:
Utility.console_log("Failed Cases:", color="red")
@ -563,9 +573,9 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
result = result and check_reset()
output = dut.stop_capture_raw_data()
if result:
Utility.console_log("Success: " + one_case["name"], color="green")
Utility.console_log("Success: " + format_case_name(one_case), color="green")
else:
Utility.console_log("Failed: " + one_case["name"], color="red")
Utility.console_log("Failed: " + format_case_name(one_case), color="red")
junit_test_case.add_failure_info(output)
raise TestCaseFailed()
stage_finish.append("break")
@ -582,7 +592,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
# in this scenario reset should not happen
if int(data[1]):
# case ignored
Utility.console_log("Ignored: " + one_case["name"], color="orange")
Utility.console_log("Ignored: " + format_case_name(one_case), color="orange")
junit_test_case.add_skipped_info("ignored")
# only passed in last stage will be regarded as real pass
if last_stage():
@ -651,13 +661,15 @@ def run_multiple_stage_cases(env, extra_data):
run_one_multiple_stage_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items()
except TestCaseFailed:
failed_cases.append(one_case["name"])
failed_cases.append(format_case_name(one_case))
except Exception as e:
junit_test_case.add_failure_info("Unexpected exception: " + str(e))
failed_cases.append(one_case["name"])
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.update_performance(performance_items)
TinyFW.JunitReport.test_case_finish(junit_test_case)
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases: