mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
CI: revise unit test parser
This commit is contained in:
parent
bda63abb40
commit
678a7b53f7
@ -105,9 +105,8 @@ build_esp_idf_tests:
|
||||
|
||||
script:
|
||||
- cd tools/unit-test-app
|
||||
- git checkout ${CI_BUILD_REF_NAME} || echo "Using default branch..."
|
||||
- make TESTS_ALL=1
|
||||
- python UnitTestParser.py
|
||||
- python tools/UnitTestParser.py
|
||||
|
||||
build_examples:
|
||||
<<: *build_template
|
||||
@ -380,7 +379,7 @@ check_doc_links:
|
||||
LOG_PATH: "$CI_PROJECT_DIR/$CI_BUILD_REF"
|
||||
APP_NAME: "ut"
|
||||
TEST_CASE_FILE_PATH: "$CI_PROJECT_DIR/components/idf_test/unit_test"
|
||||
MODULE_UPDATE_FILE: "$CI_PROJECT_DIR/tools/unit-test-app/ModuleDefinition.yml"
|
||||
MODULE_UPDATE_FILE: "$CI_PROJECT_DIR/tools/unit-test-app/tools/ModuleDefinition.yml"
|
||||
|
||||
dependencies:
|
||||
- build_esp_idf_tests
|
||||
|
@ -1,145 +0,0 @@
|
||||
# This file is used to process section data generated by `objdump -s`
|
||||
import re
|
||||
|
||||
|
||||
class SectionTable(object):
|
||||
|
||||
class Section(object):
|
||||
SECTION_START_PATTERN = re.compile("Contents of section (.+?):")
|
||||
DATA_PATTERN = re.compile("([0-9a-f]{4,8})")
|
||||
|
||||
def __init__(self, name, start_address, data):
|
||||
self.name = name
|
||||
self.start_address = start_address
|
||||
self.data = data
|
||||
|
||||
def __contains__(self, item):
|
||||
if (item["region"] == self.name or item["region"] == "any") \
|
||||
and (self.start_address <= item["address"] < (self.start_address + len(self.data))):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def __getitem__(self, item):
|
||||
if isinstance(item, int):
|
||||
return self.data[item - self.start_address]
|
||||
elif isinstance(item, slice):
|
||||
start = item.start if item.start is None else item.start - self.start_address
|
||||
stop = item.stop if item.stop is None else item.stop - self.start_address
|
||||
return self.data[start:stop]
|
||||
return self.data[item]
|
||||
|
||||
def __str__(self):
|
||||
return "%s [%08x - %08x]" % (self.name, self.start_address, self.start_address+len(self.data))
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
@classmethod
|
||||
def parse_raw_data(cls, raw_data):
|
||||
name = ""
|
||||
data = ""
|
||||
start_address = 0
|
||||
# first find start line
|
||||
for i, line in enumerate(raw_data):
|
||||
if line.find("Contents of section ") != -1: # do strcmp first to speed up
|
||||
match = cls.SECTION_START_PATTERN.search(line)
|
||||
if match is not None:
|
||||
name = match.group(1)
|
||||
raw_data = raw_data[i+1:]
|
||||
break
|
||||
else:
|
||||
# do some error handling
|
||||
raw_data = [""] # add a dummy first data line
|
||||
pass
|
||||
|
||||
def process_data_line(line_to_process):
|
||||
# first remove the ascii part
|
||||
hex_part = line_to_process.split(" ")[0]
|
||||
# process rest part
|
||||
data_list = cls.DATA_PATTERN.findall(hex_part)
|
||||
try:
|
||||
_address = int(data_list[0], base=16)
|
||||
except IndexError:
|
||||
_address = -1
|
||||
|
||||
def hex_to_str(hex_data):
|
||||
if len(hex_data) % 2 == 1:
|
||||
hex_data = "0" + hex_data # append zero at the beginning
|
||||
_length = len(hex_data)
|
||||
return "".join([chr(int(hex_data[_i:_i+2], base=16))
|
||||
for _i in range(0, _length, 2)])
|
||||
pass
|
||||
|
||||
return _address, "".join([hex_to_str(x) for x in data_list[1:]])
|
||||
|
||||
# handle first line:
|
||||
address, _data = process_data_line(raw_data[0])
|
||||
if address != -1:
|
||||
start_address = address
|
||||
data += _data
|
||||
raw_data = raw_data[1:]
|
||||
for i, line in enumerate(raw_data):
|
||||
address, _data = process_data_line(line)
|
||||
if address == -1:
|
||||
raw_data = raw_data[i:]
|
||||
break
|
||||
else:
|
||||
data += _data
|
||||
else:
|
||||
# do error handling
|
||||
raw_data = []
|
||||
pass
|
||||
return cls(name, start_address, data) if start_address != -1 else None,\
|
||||
None if len(raw_data) == 0 else raw_data
|
||||
pass
|
||||
|
||||
def __init__(self, file_name):
|
||||
with open(file_name, "rb") as f:
|
||||
raw_data = f.readlines()
|
||||
self.table = []
|
||||
while raw_data:
|
||||
section, raw_data = self.Section.parse_raw_data(raw_data)
|
||||
self.table.append(section)
|
||||
|
||||
def get_unsigned_int(self, region, address, size=4, endian="LE"):
|
||||
if address % 4 != 0 or size % 4 != 0:
|
||||
print "warning: try to access without 4 bytes aligned"
|
||||
key = {"address": address, "region": region}
|
||||
for section in self.table:
|
||||
if key in section:
|
||||
tmp = section[address:address+size]
|
||||
value = 0
|
||||
for i in range(size):
|
||||
if endian == "LE":
|
||||
value += ord(tmp[i]) << (i*8)
|
||||
elif endian == "BE":
|
||||
value += ord(tmp[i]) << ((size - i - 1) * 8)
|
||||
else:
|
||||
print "only support LE or BE for parameter endian"
|
||||
assert False
|
||||
break
|
||||
else:
|
||||
value = None
|
||||
return value
|
||||
|
||||
def get_string(self, region, address):
|
||||
value = None
|
||||
key = {"address": address, "region": region}
|
||||
for section in self.table:
|
||||
if key in section:
|
||||
value = section[address:]
|
||||
for i, c in enumerate(value):
|
||||
if c == '\0':
|
||||
value = value[:i]
|
||||
break
|
||||
break
|
||||
return value
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,161 +0,0 @@
|
||||
import yaml
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import shutil
|
||||
import CreateSectionTable
|
||||
|
||||
|
||||
MODULE_MAP = yaml.load(open("ModuleDefinition.yml", "r"))
|
||||
|
||||
TEST_CASE_PATTERN = {
|
||||
"initial condition": "UTINIT1",
|
||||
"SDK": "ESP32_IDF",
|
||||
"level": "Unit",
|
||||
"execution time": 0,
|
||||
"Test App": "UT",
|
||||
"auto test": "Yes",
|
||||
"category": "Function",
|
||||
"test point 1": "basic function",
|
||||
"version": "v1 (2016-12-06)",
|
||||
"test environment": "UT_T1_1",
|
||||
"expected result": "1. set succeed"
|
||||
}
|
||||
|
||||
CONFIG_FILE_PATTERN = {
|
||||
"Config": {"execute count": 1, "execute order": "in order"},
|
||||
"DUT": [],
|
||||
"Filter": [{"Add": {"ID": []}}]
|
||||
}
|
||||
|
||||
test_cases = list()
|
||||
test_ids = {}
|
||||
test_ids_by_job = {}
|
||||
unit_jobs = {}
|
||||
|
||||
os.chdir(os.path.join("..", ".."))
|
||||
IDF_PATH = os.getcwd()
|
||||
|
||||
|
||||
class Parser(object):
|
||||
@classmethod
|
||||
def parse_test_addresses(cls):
|
||||
table = CreateSectionTable.SectionTable(os.path.join(IDF_PATH, "tools", "unit-test-app", "build", "tmp"))
|
||||
file_index = 1
|
||||
test_index = 1
|
||||
with open(os.path.join(IDF_PATH, "tools", "unit-test-app", "build", "tests"), "r") as file:
|
||||
for line in file:
|
||||
line = line.split()
|
||||
test = int(line[0],16)
|
||||
section = line[3]
|
||||
name_addr = table.get_unsigned_int(section, test, 4)
|
||||
desc_addr = table.get_unsigned_int(section, test + 4, 4)
|
||||
name = table.get_string("any", name_addr)
|
||||
desc = table.get_string("any", desc_addr)
|
||||
Parser.parse_test_cases(file_index, test_index, "%s, %s" % (name, desc))
|
||||
file_index += 1
|
||||
test_index += 1
|
||||
os.chdir(os.path.join("..", ".."))
|
||||
Parser.dump_test_cases(test_cases)
|
||||
|
||||
@classmethod
|
||||
def parse_test_cases(cls, file_index, test_index, tags):
|
||||
tags = re.split(r"[\[\]\"]", tags)
|
||||
ci_ready = "Yes"
|
||||
test_env = "UT_T1_1"
|
||||
for tag in tags:
|
||||
if tag == "ignore":
|
||||
ci_ready = "No"
|
||||
if re.match("test_env=", tag):
|
||||
test_env = tag[9:]
|
||||
module_name = tags[1]
|
||||
try:
|
||||
MODULE_MAP[module_name]
|
||||
except KeyError:
|
||||
module_name = "misc"
|
||||
id = "UT_%s_%s_%03d%02d" % (MODULE_MAP[module_name]['module abbr'],
|
||||
MODULE_MAP[module_name]['sub module abbr'],
|
||||
file_index, test_index)
|
||||
test_case = dict(TEST_CASE_PATTERN)
|
||||
test_case.update({"module": MODULE_MAP[module_name]['module'],
|
||||
"CI ready": ci_ready,
|
||||
"cmd set": ["IDFUnitTest/UnitTest", [tags[0][:-2]]],
|
||||
"ID": id,
|
||||
"test point 2": module_name,
|
||||
"steps": tags[0][:-2],
|
||||
"comment": tags[0][:-2],
|
||||
"test environment": test_env,
|
||||
"sub module": MODULE_MAP[module_name]['sub module'],
|
||||
"summary": tags[0][:-2]})
|
||||
if test_case["CI ready"] == "Yes":
|
||||
if test_ids.has_key(test_env):
|
||||
test_ids[test_env].append(id)
|
||||
else:
|
||||
test_ids.update({test_env: [id]})
|
||||
test_cases.append(test_case)
|
||||
|
||||
@classmethod
|
||||
def dump_test_cases(cls, test_cases):
|
||||
os.chdir(os.path.join(IDF_PATH, "components", "idf_test", "unit_test"))
|
||||
with open ("TestCaseAll.yml", "wb+") as f:
|
||||
yaml.dump({"test cases": test_cases}, f, allow_unicode=True, default_flow_style=False)
|
||||
|
||||
@classmethod
|
||||
def dump_ci_config(cls):
|
||||
Parser.split_test_cases()
|
||||
os.chdir(os.path.join(IDF_PATH, "components", "idf_test", "unit_test"))
|
||||
if not os.path.exists("CIConfigs"):
|
||||
os.makedirs("CIConfigs")
|
||||
os.chdir("CIConfigs")
|
||||
for unit_job in unit_jobs:
|
||||
job = dict(CONFIG_FILE_PATTERN)
|
||||
job.update({"DUT": ["UT1"]})
|
||||
job.update({"Filter": [{"Add": {"ID": test_ids_by_job[unit_job]}}]})
|
||||
with open (unit_job + ".yml", "wb+") as f:
|
||||
yaml.dump(job, f, allow_unicode=True, default_flow_style=False)
|
||||
|
||||
@classmethod
|
||||
def split_test_cases(cls):
|
||||
for job in unit_jobs:
|
||||
test_ids_by_job.update({job: list()})
|
||||
for test_env in test_ids:
|
||||
available_jobs = list()
|
||||
for job in unit_jobs:
|
||||
if test_env in unit_jobs[job]:
|
||||
available_jobs.append(job)
|
||||
for idx, job in enumerate(available_jobs):
|
||||
test_ids_by_job[job] += (test_ids[test_env][idx*len(test_ids[test_env])/len(available_jobs):(idx+1)*len(test_ids[test_env])/len(available_jobs)])
|
||||
|
||||
@classmethod
|
||||
def parse_gitlab_ci(cls):
|
||||
os.chdir(IDF_PATH)
|
||||
with open(".gitlab-ci.yml", "rb") as f:
|
||||
gitlab_ci = yaml.load(f)
|
||||
keys = gitlab_ci.keys()
|
||||
for key in keys:
|
||||
if re.match("UT_", key):
|
||||
test_env = gitlab_ci[key]["tags"]
|
||||
unit_job = key
|
||||
key = {}
|
||||
key.update({unit_job: test_env})
|
||||
unit_jobs.update(key)
|
||||
|
||||
@classmethod
|
||||
def copy_module_def_file(cls):
|
||||
src = os.path.join(IDF_PATH, "tools", "unit-test-app", "ModuleDefinition.yml")
|
||||
dst = os.path.join(IDF_PATH, "components", "idf_test", "unit_test")
|
||||
shutil.copy(src, dst)
|
||||
|
||||
|
||||
def main():
|
||||
os.chdir(os.path.join(IDF_PATH, "tools", "unit-test-app", "build"))
|
||||
os.system('xtensa-esp32-elf-objdump -t unit-test-app.elf | grep test_desc > tests')
|
||||
os.system('xtensa-esp32-elf-objdump -s unit-test-app.elf > tmp')
|
||||
Parser.parse_test_addresses()
|
||||
Parser.parse_gitlab_ci()
|
||||
Parser.dump_ci_config()
|
||||
Parser.copy_module_def_file()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
163
tools/unit-test-app/tools/CreateSectionTable.py
Normal file
163
tools/unit-test-app/tools/CreateSectionTable.py
Normal file
@ -0,0 +1,163 @@
|
||||
# This file is used to process section data generated by `objdump -s`
|
||||
import re
|
||||
|
||||
|
||||
class Section(object):
|
||||
"""
|
||||
One Section of section table. contains info about section name, address and raw data
|
||||
"""
|
||||
SECTION_START_PATTERN = re.compile("Contents of section (.+?):")
|
||||
DATA_PATTERN = re.compile("([0-9a-f]{4,8})")
|
||||
|
||||
def __init__(self, name, start_address, data):
|
||||
self.name = name
|
||||
self.start_address = start_address
|
||||
self.data = data
|
||||
|
||||
def __contains__(self, item):
|
||||
""" check if the section name and address match this section """
|
||||
if (item["section"] == self.name or item["section"] == "any") \
|
||||
and (self.start_address <= item["address"] < (self.start_address + len(self.data))):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def __getitem__(self, item):
|
||||
"""
|
||||
process slice.
|
||||
convert absolute address to relative address in current section and return slice result
|
||||
"""
|
||||
if isinstance(item, int):
|
||||
return self.data[item - self.start_address]
|
||||
elif isinstance(item, slice):
|
||||
start = item.start if item.start is None else item.start - self.start_address
|
||||
stop = item.stop if item.stop is None else item.stop - self.start_address
|
||||
return self.data[start:stop]
|
||||
return self.data[item]
|
||||
|
||||
def __str__(self):
|
||||
return "%s [%08x - %08x]" % (self.name, self.start_address, self.start_address + len(self.data))
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
@classmethod
|
||||
def parse_raw_data(cls, raw_data):
|
||||
"""
|
||||
process raw data generated by `objdump -s`, create section and return un-processed lines
|
||||
:param raw_data: lines of raw data generated by `objdump -s`
|
||||
:return: one section, un-processed lines
|
||||
"""
|
||||
name = ""
|
||||
data = ""
|
||||
start_address = 0
|
||||
# first find start line
|
||||
for i, line in enumerate(raw_data):
|
||||
if "Contents of section " in line: # do strcmp first to speed up
|
||||
match = cls.SECTION_START_PATTERN.search(line)
|
||||
if match is not None:
|
||||
name = match.group(1)
|
||||
raw_data = raw_data[i + 1:]
|
||||
break
|
||||
else:
|
||||
# do some error handling
|
||||
raw_data = [""] # add a dummy first data line
|
||||
|
||||
def process_data_line(line_to_process):
|
||||
# first remove the ascii part
|
||||
hex_part = line_to_process.split(" ")[0]
|
||||
# process rest part
|
||||
data_list = cls.DATA_PATTERN.findall(hex_part)
|
||||
try:
|
||||
_address = int(data_list[0], base=16)
|
||||
except IndexError:
|
||||
_address = -1
|
||||
|
||||
def hex_to_str(hex_data):
|
||||
if len(hex_data) % 2 == 1:
|
||||
hex_data = "0" + hex_data # append zero at the beginning
|
||||
_length = len(hex_data)
|
||||
return "".join([chr(int(hex_data[_i:_i + 2], base=16))
|
||||
for _i in range(0, _length, 2)])
|
||||
|
||||
return _address, "".join([hex_to_str(x) for x in data_list[1:]])
|
||||
|
||||
# handle first line:
|
||||
address, _data = process_data_line(raw_data[0])
|
||||
if address != -1:
|
||||
start_address = address
|
||||
data += _data
|
||||
raw_data = raw_data[1:]
|
||||
for i, line in enumerate(raw_data):
|
||||
address, _data = process_data_line(line)
|
||||
if address == -1:
|
||||
raw_data = raw_data[i:]
|
||||
break
|
||||
else:
|
||||
data += _data
|
||||
else:
|
||||
# do error handling
|
||||
raw_data = []
|
||||
|
||||
section = cls(name, start_address, data) if start_address != -1 else None
|
||||
unprocessed_data = None if len(raw_data) == 0 else raw_data
|
||||
return section, unprocessed_data
|
||||
|
||||
|
||||
class SectionTable(object):
|
||||
""" elf section table """
|
||||
|
||||
def __init__(self, file_name):
|
||||
with open(file_name, "rb") as f:
|
||||
raw_data = f.readlines()
|
||||
self.table = []
|
||||
while raw_data:
|
||||
section, raw_data = Section.parse_raw_data(raw_data)
|
||||
self.table.append(section)
|
||||
|
||||
def get_unsigned_int(self, section, address, size=4, endian="LE"):
|
||||
"""
|
||||
get unsigned int from section table
|
||||
:param section: section name; use "any" will only match with address
|
||||
:param address: start address
|
||||
:param size: size in bytes
|
||||
:param endian: LE or BE
|
||||
:return: int or None
|
||||
"""
|
||||
if address % 4 != 0 or size % 4 != 0:
|
||||
print("warning: try to access without 4 bytes aligned")
|
||||
key = {"address": address, "section": section}
|
||||
for section in self.table:
|
||||
if key in section:
|
||||
tmp = section[address:address+size]
|
||||
value = 0
|
||||
for i in range(size):
|
||||
if endian == "LE":
|
||||
value += ord(tmp[i]) << (i*8)
|
||||
elif endian == "BE":
|
||||
value += ord(tmp[i]) << ((size - i - 1) * 8)
|
||||
else:
|
||||
print("only support LE or BE for parameter endian")
|
||||
assert False
|
||||
break
|
||||
else:
|
||||
value = None
|
||||
return value
|
||||
|
||||
def get_string(self, section, address):
|
||||
"""
|
||||
get string ('\0' terminated) from section table
|
||||
:param section: section name; use "any" will only match with address
|
||||
:param address: start address
|
||||
:return: string or None
|
||||
"""
|
||||
value = None
|
||||
key = {"address": address, "section": section}
|
||||
for section in self.table:
|
||||
if key in section:
|
||||
value = section[address:]
|
||||
for i, c in enumerate(value):
|
||||
if c == '\0':
|
||||
value = value[:i]
|
||||
break
|
||||
break
|
||||
return value
|
8
tools/unit-test-app/tools/TagDefinition.yml
Normal file
8
tools/unit-test-app/tools/TagDefinition.yml
Normal file
@ -0,0 +1,8 @@
|
||||
ignore:
|
||||
# if the type exist but no value assigned
|
||||
default: "Yes"
|
||||
# if the type is not exist in tag list
|
||||
omitted: "No"
|
||||
test_env:
|
||||
default: "UT_T1_1"
|
||||
omitted: "UT_T1_1"
|
262
tools/unit-test-app/tools/UnitTestParser.py
Normal file
262
tools/unit-test-app/tools/UnitTestParser.py
Normal file
@ -0,0 +1,262 @@
|
||||
import yaml
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
from copy import deepcopy
|
||||
import CreateSectionTable
|
||||
|
||||
|
||||
TEST_CASE_PATTERN = {
|
||||
"initial condition": "UTINIT1",
|
||||
"SDK": "ESP32_IDF",
|
||||
"level": "Unit",
|
||||
"execution time": 0,
|
||||
"Test App": "UT",
|
||||
"auto test": "Yes",
|
||||
"category": "Function",
|
||||
"test point 1": "basic function",
|
||||
"version": "v1 (2016-12-06)",
|
||||
"test environment": "UT_T1_1",
|
||||
"expected result": "1. set succeed"
|
||||
}
|
||||
|
||||
CONFIG_FILE_PATTERN = {
|
||||
"Config": {"execute count": 1, "execute order": "in order"},
|
||||
"DUT": [],
|
||||
"Filter": [{"Add": {"ID": []}}]
|
||||
}
|
||||
|
||||
|
||||
class Parser(object):
|
||||
""" parse unit test cases from build files and create files for test bench """
|
||||
|
||||
TAG_PATTERN = re.compile("([^=]+)(=)?(.+)?")
|
||||
DESCRIPTION_PATTERN = re.compile("\[([^]\[]+)\]")
|
||||
|
||||
def __init__(self, idf_path=os.getenv("IDF_PATH")):
|
||||
self.test_env_tags = {}
|
||||
self.unit_jobs = {}
|
||||
self.file_name_cache = {}
|
||||
self.idf_path = idf_path
|
||||
self.tag_def = yaml.load(open(os.path.join(idf_path, "tools", "unit-test-app", "tools",
|
||||
"TagDefinition.yml"), "r"))
|
||||
self.module_map = yaml.load(open(os.path.join(idf_path, "tools", "unit-test-app", "tools",
|
||||
"ModuleDefinition.yml"), "r"))
|
||||
|
||||
def parse_test_cases_from_elf(self, elf_file):
|
||||
"""
|
||||
parse test cases from elf and save test cases to unit test folder
|
||||
:param elf_file: elf file path
|
||||
"""
|
||||
subprocess.check_output('xtensa-esp32-elf-objdump -t {} | grep \ test_desc > case_address.tmp'.format(elf_file),
|
||||
shell=True)
|
||||
subprocess.check_output('xtensa-esp32-elf-objdump -s {} > section_table.tmp'.format(elf_file), shell=True)
|
||||
|
||||
table = CreateSectionTable.SectionTable("section_table.tmp")
|
||||
test_cases = []
|
||||
with open("case_address.tmp", "r") as f:
|
||||
for line in f:
|
||||
# process symbol table like: "3ffb4310 l O .dram0.data 00000018 test_desc_33$5010"
|
||||
line = line.split()
|
||||
test_addr = int(line[0], 16)
|
||||
section = line[3]
|
||||
|
||||
name_addr = table.get_unsigned_int(section, test_addr, 4)
|
||||
desc_addr = table.get_unsigned_int(section, test_addr + 4, 4)
|
||||
file_name_addr = table.get_unsigned_int(section, test_addr + 12, 4)
|
||||
name = table.get_string("any", name_addr)
|
||||
desc = table.get_string("any", desc_addr)
|
||||
file_name = table.get_string("any", file_name_addr)
|
||||
|
||||
tc = self.parse_one_test_case(name, desc, file_name)
|
||||
if tc["CI ready"] == "Yes":
|
||||
# update test env list and the cases of same env list
|
||||
if tc["test environment"] in self.test_env_tags:
|
||||
self.test_env_tags[tc["test environment"]].append(tc["ID"])
|
||||
else:
|
||||
self.test_env_tags.update({tc["test environment"]: [tc]})
|
||||
test_cases.append(tc)
|
||||
|
||||
os.remove("section_table.tmp")
|
||||
os.remove("case_address.tmp")
|
||||
|
||||
self.dump_test_cases(test_cases)
|
||||
|
||||
def parse_case_properities(self, tags_raw):
|
||||
"""
|
||||
parse test case tags (properities) with the following rules:
|
||||
* first tag is always group of test cases, it's mandatory
|
||||
* the rest tags should be [type=value].
|
||||
* if the type have default value, then [type] equal to [type=default_value].
|
||||
* if the type don't don't exist, then equal to [type=omitted_value]
|
||||
default_value and omitted_value are defined in TagDefinition.yml
|
||||
:param tags_raw: raw tag string
|
||||
:return: tag dict
|
||||
"""
|
||||
tags = self.DESCRIPTION_PATTERN.findall(tags_raw)
|
||||
assert len(tags) > 0
|
||||
p = dict([(k, self.tag_def[k]["omitted"]) for k in self.tag_def])
|
||||
p["module"] = tags[0]
|
||||
|
||||
if p["module"] not in self.module_map:
|
||||
p["module"] = "misc"
|
||||
|
||||
# parsing rest tags, [type=value], =value is optional
|
||||
for tag in tags[1:]:
|
||||
match = self.TAG_PATTERN.search(tag)
|
||||
assert match is not None
|
||||
tag_type = match.group(1)
|
||||
tag_value = match.group(3)
|
||||
if match.group(2) == "=" and tag_value is None:
|
||||
# [tag_type=] means tag_value is empty string
|
||||
tag_value = ""
|
||||
if tag_type in p:
|
||||
if tag_value is None:
|
||||
p[tag_type] = self.tag_def[tag_type]["default"]
|
||||
else:
|
||||
p[tag_type] = tag_value
|
||||
else:
|
||||
# ignore not defined tag type
|
||||
pass
|
||||
return p
|
||||
|
||||
def parse_one_test_case(self, name, description, file_name):
|
||||
"""
|
||||
parse one test case
|
||||
:param name: test case name (summary)
|
||||
:param description: test case description (tag string)
|
||||
:param file_name: the file defines this test case
|
||||
:return: parsed test case
|
||||
"""
|
||||
prop = self.parse_case_properities(description)
|
||||
|
||||
if file_name in self.file_name_cache:
|
||||
self.file_name_cache[file_name] += 1
|
||||
else:
|
||||
self.file_name_cache[file_name] = 1
|
||||
|
||||
tc_id = "UT_%s_%s_%03d%02d" % (self.module_map[prop["module"]]['module abbr'],
|
||||
self.module_map[prop["module"]]['sub module abbr'],
|
||||
hash(file_name) % 1000,
|
||||
self.file_name_cache[file_name])
|
||||
test_case = deepcopy(TEST_CASE_PATTERN)
|
||||
test_case.update({"module": self.module_map[prop["module"]]['module'],
|
||||
"CI ready": "No" if prop["ignore"] == "Yes" else "Yes",
|
||||
"cmd set": ["IDFUnitTest/UnitTest", [name]],
|
||||
"ID": tc_id,
|
||||
"test point 2": prop["module"],
|
||||
"steps": name,
|
||||
"test environment": prop["test_env"],
|
||||
"sub module": self.module_map[prop["module"]]['sub module'],
|
||||
"summary": name})
|
||||
return test_case
|
||||
|
||||
def dump_test_cases(self, test_cases):
|
||||
"""
|
||||
dump parsed test cases to YAML file for test bench input
|
||||
:param test_cases: parsed test cases
|
||||
"""
|
||||
with open(os.path.join(self.idf_path, "components", "idf_test", "unit_test", "TestCaseAll.yml"), "wb+") as f:
|
||||
yaml.dump({"test cases": test_cases}, f, allow_unicode=True, default_flow_style=False)
|
||||
|
||||
def dump_ci_config(self):
|
||||
""" assign test cases and dump to config file to test bench """
|
||||
test_cases_by_jobs = self.assign_test_cases()
|
||||
|
||||
ci_config_folder = os.path.join(self.idf_path, "components", "idf_test", "unit_test", "CIConfigs")
|
||||
|
||||
if not os.path.exists(ci_config_folder):
|
||||
os.makedirs(os.path.join(ci_config_folder, "CIConfigs"))
|
||||
|
||||
for unit_job in self.unit_jobs:
|
||||
job = deepcopy(CONFIG_FILE_PATTERN)
|
||||
job.update({"DUT": ["UT1"]})
|
||||
job.update({"Filter": [{"Add": {"ID": test_cases_by_jobs[unit_job]}}]})
|
||||
|
||||
with open(os.path.join(ci_config_folder, unit_job + ".yml"), "wb+") as f:
|
||||
yaml.dump(job, f, allow_unicode=True, default_flow_style=False)
|
||||
|
||||
def assign_test_cases(self):
|
||||
""" assign test cases to jobs """
|
||||
test_cases_by_jobs = {}
|
||||
|
||||
for job in self.unit_jobs:
|
||||
test_cases_by_jobs.update({job: list()})
|
||||
for test_env in self.test_env_tags:
|
||||
available_jobs = list()
|
||||
for job in self.unit_jobs:
|
||||
if test_env in self.unit_jobs[job]:
|
||||
available_jobs.append(job)
|
||||
for idx, job in enumerate(available_jobs):
|
||||
test_cases_by_jobs[job] += (self.test_env_tags[test_env]
|
||||
[idx*len(self.test_env_tags[test_env])/len(available_jobs):
|
||||
(idx+1)*len(self.test_env_tags[test_env])/len(available_jobs)])
|
||||
return test_cases_by_jobs
|
||||
|
||||
def parse_gitlab_ci(self):
|
||||
""" parse gitlab ci config file to get pre-defined unit test jobs """
|
||||
with open(os.path.join(self.idf_path, ".gitlab-ci.yml"), "r") as f:
|
||||
gitlab_ci = yaml.load(f)
|
||||
keys = gitlab_ci.keys()
|
||||
for key in keys:
|
||||
if re.match("UT_", key):
|
||||
test_env = gitlab_ci[key]["tags"]
|
||||
unit_job = key
|
||||
key = {}
|
||||
key.update({unit_job: test_env})
|
||||
self.unit_jobs.update(key)
|
||||
|
||||
def copy_module_def_file(self):
|
||||
""" copy module def file to artifact path """
|
||||
src = os.path.join(self.idf_path, "tools", "unit-test-app", "tools", "ModuleDefinition.yml")
|
||||
dst = os.path.join(self.idf_path, "components", "idf_test", "unit_test")
|
||||
shutil.copy(src, dst)
|
||||
|
||||
|
||||
def test_parser():
|
||||
parser = Parser()
|
||||
# test parsing tags
|
||||
# parsing module only and module in module list
|
||||
prop = parser.parse_case_properities("[esp32]")
|
||||
assert prop["module"] == "esp32"
|
||||
# module not in module list
|
||||
prop = parser.parse_case_properities("[not_in_list]")
|
||||
assert prop["module"] == "misc"
|
||||
# parsing a default tag, a tag with assigned value
|
||||
prop = parser.parse_case_properities("[esp32][ignore][test_env=ABCD][not_support1][not_support2=ABCD]")
|
||||
assert prop["ignore"] == "Yes" and prop["test_env"] == "ABCD" \
|
||||
and "not_support1" not in prop and "not_supported2" not in prop
|
||||
# parsing omitted value
|
||||
prop = parser.parse_case_properities("[esp32]")
|
||||
assert prop["ignore"] == "No" and prop["test_env"] == "UT_T1_1"
|
||||
# parsing with incorrect format
|
||||
try:
|
||||
parser.parse_case_properities("abcd")
|
||||
assert False
|
||||
except AssertionError:
|
||||
pass
|
||||
# skip invalid data parse, [type=] assigns empty string to type
|
||||
prop = parser.parse_case_properities("[esp32]abdc aaaa [ignore=]")
|
||||
assert prop["module"] == "esp32" and prop["ignore"] == ""
|
||||
# skip mis-paired []
|
||||
prop = parser.parse_case_properities("[esp32][[ignore=b]][]][test_env=AAA]]")
|
||||
assert prop["module"] == "esp32" and prop["ignore"] == "b" and prop["test_env"] == "AAA"
|
||||
|
||||
|
||||
def main():
|
||||
test_parser()
|
||||
|
||||
idf_path = os.getenv("IDF_PATH")
|
||||
elf_path = os.path.join(idf_path, "tools", "unit-test-app", "build", "unit-test-app.elf")
|
||||
|
||||
parser = Parser(idf_path)
|
||||
parser.parse_test_cases_from_elf(elf_path)
|
||||
parser.parse_gitlab_ci()
|
||||
parser.dump_ci_config()
|
||||
parser.copy_module_def_file()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Loading…
Reference in New Issue
Block a user