Merge branch 'ci/improve_pytest_process' into 'master'

Ci/improve pytest process

See merge request espressif/esp-idf!21358
This commit is contained in:
Fu Hanxi 2022-12-08 13:04:16 +08:00
commit c81ff4571e
11 changed files with 281 additions and 223 deletions

View File

@ -35,9 +35,11 @@ from pytest_embedded.utils import find_by_suffix
from pytest_embedded_idf.dut import IdfDut
try:
from idf_ci_utils import to_list
from idf_unity_tester import CaseTester
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), 'tools', 'ci'))
from idf_ci_utils import to_list
from idf_unity_tester import CaseTester
try:
@ -46,22 +48,86 @@ except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), 'tools', 'ci', 'python_packages'))
import common_test_methods # noqa: F401
SUPPORTED_TARGETS = ['esp32', 'esp32s2', 'esp32c3', 'esp32s3', 'esp32c2', 'esp32c6']
PREVIEW_TARGETS = ['esp32h4'] # this PREVIEW_TARGETS excludes 'linux' target
DEFAULT_SDKCONFIG = 'default'
TARGET_MARKERS = {
'esp32': 'support esp32 target',
'esp32s2': 'support esp32s2 target',
'esp32s3': 'support esp32s3 target',
'esp32c3': 'support esp32c3 target',
'esp32c2': 'support esp32c2 target',
'esp32c6': 'support esp32c6 target',
'esp32h4': 'support esp32h4 target',
'linux': 'support linux target',
}
SPECIAL_MARKERS = {
'supported_targets': "support all officially announced supported targets ('esp32', 'esp32s2', 'esp32c3', 'esp32s3', 'esp32c2', 'esp32c6')",
'preview_targets': "support all preview targets ('esp32h4')",
'all_targets': 'support all targets, including supported ones and preview ones',
'temp_skip_ci': 'temp skip tests for specified targets only in ci',
'temp_skip': 'temp skip tests for specified targets both in ci and locally',
'nightly_run': 'tests should be executed as part of the nightly trigger pipeline',
'host_test': 'tests which should not be built at the build stage, and instead built in host_test stage',
'qemu': 'build and test using qemu-system-xtensa, not real target',
}
ENV_MARKERS = {
# single-dut markers
'generic': 'tests should be run on generic runners',
'flash_suspend': 'support flash suspend feature',
'ip101': 'connected via wired 10/100M ethernet',
'lan8720': 'connected via LAN8720 ethernet transceiver',
'quad_psram': 'runners with quad psram',
'octal_psram': 'runners with octal psram',
'usb_host': 'usb host runners',
'usb_host_flash_disk': 'usb host runners with USB flash disk attached',
'usb_device': 'usb device runners',
'ethernet_ota': 'ethernet OTA runners',
'flash_encryption': 'Flash Encryption runners',
'flash_encryption_f4r8': 'Flash Encryption runners with 4-line flash and 8-line psram',
'flash_encryption_f8r8': 'Flash Encryption runners with 8-line flash and 8-line psram',
'flash_mutli': 'Multiple flash chips tests',
'psram': 'Chip has 4-line psram',
'ir_transceiver': 'runners with a pair of IR transmitter and receiver',
'twai_transceiver': 'runners with a TWAI PHY transceiver',
'flash_encryption_wifi_high_traffic': 'Flash Encryption runners with wifi high traffic support',
'ethernet': 'ethernet runner',
'ethernet_flash_8m': 'ethernet runner with 8mb flash',
'ethernet_router': 'both the runner and dut connect to the same router through ethernet NIC',
'wifi_ap': 'a wifi AP in the environment',
'wifi_router': 'both the runner and dut connect to the same wifi router',
'wifi_high_traffic': 'wifi high traffic runners',
'wifi_wlan': 'wifi runner with a wireless NIC',
'xtal_26mhz': 'runner with 26MHz xtal on board',
'xtal_40mhz': 'runner with 40MHz xtal on board',
'external_flash': 'external flash memory connected via VSPI (FSPI)',
'sdcard_sdmode': 'sdcard running in SD mode',
'sdcard_spimode': 'sdcard running in SPI mode',
'MSPI_F8R8': 'runner with Octal Flash and Octal PSRAM',
'MSPI_F4R8': 'runner with Quad Flash and Octal PSRAM',
'MSPI_F4R4': 'runner with Quad Flash and Quad PSRAM',
'test_jtag_arm': 'runner where the chip is accessible through JTAG as well',
'adc': 'ADC related tests should run on adc runners',
'xtal32k': 'Runner with external 32k crystal connected',
'no32kXtal': 'Runner with no external 32k crystal connected',
'multi_dut_modbus_rs485': 'a pair of runners connected by RS485 bus',
'psramv0': 'Runner with PSRAM version 0',
# multi-dut markers
'ieee802154': 'ieee802154 related tests should run on ieee802154 runners.',
'i154_multi_dut': 'tests should be used for i154, such as openthread.',
'wifi_two_dut': 'tests should be run on runners which has two wifi duts connected.',
'generic_multi_device': 'generic multiple devices whose corresponding gpio pins are connected to each other.',
'twai_network': 'multiple runners form a TWAI network.',
'sdio_master_slave': 'Test sdio multi board.',
}
##################
# Help Functions #
##################
def is_target_marker(marker: str) -> bool:
if marker.startswith('esp32') or marker.startswith('esp8') or marker == 'linux':
return True
return False
def format_case_id(target: Optional[str], config: Optional[str], case: str) -> str:
return f'{target}.{config}.{case}'
@ -70,24 +136,68 @@ def item_marker_names(item: Item) -> List[str]:
return [marker.name for marker in item.iter_markers()]
def get_target_marker(markexpr: str) -> str:
def item_target_marker_names(item: Item) -> List[str]:
res = set()
for marker in item.iter_markers():
if marker.name in TARGET_MARKERS:
res.add(marker.name)
return sorted(res)
def item_env_marker_names(item: Item) -> List[str]:
res = set()
for marker in item.iter_markers():
if marker.name in ENV_MARKERS:
res.add(marker.name)
return sorted(res)
def item_skip_targets(item: Item) -> List[str]:
def _get_temp_markers_disabled_targets(marker_name: str) -> List[str]:
temp_marker = item.get_closest_marker(marker_name)
if not temp_marker:
return []
# temp markers should always use keyword arguments `targets` and `reason`
if not temp_marker.kwargs.get('targets') or not temp_marker.kwargs.get('reason'):
raise ValueError(
f'`{marker_name}` should always use keyword arguments `targets` and `reason`. '
f'For example: '
f'`@pytest.mark.{marker_name}(targets=["esp32"], reason="IDF-xxxx, will fix it ASAP")`'
)
return to_list(temp_marker.kwargs['targets']) # type: ignore
temp_skip_ci_targets = _get_temp_markers_disabled_targets('temp_skip_ci')
temp_skip_targets = _get_temp_markers_disabled_targets('temp_skip')
# in CI we skip the union of `temp_skip` and `temp_skip_ci`
if os.getenv('CI_JOB_ID'):
skip_targets = list(set(temp_skip_ci_targets).union(set(temp_skip_targets)))
else: # we use `temp_skip` locally
skip_targets = temp_skip_targets
return skip_targets
def get_target_marker_from_expr(markexpr: str) -> str:
candidates = set()
# we use `-m "esp32 and generic"` in our CI to filter the test cases
# this doesn't cover all use cases, but fit what we do in CI.
for marker in markexpr.split('and'):
marker = marker.strip()
if is_target_marker(marker):
if marker in TARGET_MARKERS:
candidates.add(marker)
if len(candidates) > 1:
raise ValueError(
f'Specified more than one target markers: {candidates}. Please specify no more than one.'
)
raise ValueError(f'Specified more than one target markers: {candidates}. Please specify no more than one.')
elif len(candidates) == 1:
return candidates.pop()
else:
raise ValueError(
'Please specify one target marker via "--target [TARGET]" or via "-m [TARGET]"'
)
raise ValueError('Please specify one target marker via "--target [TARGET]" or via "-m [TARGET]"')
############
@ -181,9 +291,7 @@ def build_dir(app_path: str, target: Optional[str], config: Optional[str]) -> st
logging.info(f'find valid binary path: {binary_path}')
return check_dir
logging.warning(
'checking binary path: %s... missing... try another place', binary_path
)
logging.warning('checking binary path: %s... missing... try another place', binary_path)
recommend_place = check_dirs[0]
raise ValueError(
@ -193,9 +301,7 @@ def build_dir(app_path: str, target: Optional[str], config: Optional[str]) -> st
@pytest.fixture(autouse=True)
@multi_dut_fixture
def junit_properties(
test_case_name: str, record_xml_attribute: Callable[[str, object], None]
) -> None:
def junit_properties(test_case_name: str, record_xml_attribute: Callable[[str, object], None]) -> None:
"""
This fixture is autoused and will modify the junit report test case name to <target>.<config>.<case_name>
"""
@ -211,9 +317,7 @@ def pytest_addoption(parser: pytest.Parser) -> None:
'--sdkconfig',
help='sdkconfig postfix, like sdkconfig.ci.<config>. (Default: None, which would build all found apps)',
)
base_group.addoption(
'--known-failure-cases-file', help='known failure cases file path'
)
base_group.addoption('--known-failure-cases-file', help='known failure cases file path')
_idf_pytest_embedded_key = pytest.StashKey['IdfPytestEmbedded']
@ -230,7 +334,7 @@ def pytest_configure(config: Config) -> None:
break
if not target: # also could specify through markexpr via "-m"
target = get_target_marker(config.getoption('markexpr') or '')
target = get_target_marker_from_expr(config.getoption('markexpr') or '')
config.stash[_idf_pytest_embedded_key] = IdfPytestEmbedded(
target=target,
@ -239,6 +343,9 @@ def pytest_configure(config: Config) -> None:
)
config.pluginmanager.register(config.stash[_idf_pytest_embedded_key])
for name, description in {**TARGET_MARKERS, **ENV_MARKERS, **SPECIAL_MARKERS}.items():
config.addinivalue_line('markers', f'{name}: {description}')
def pytest_unconfigure(config: Config) -> None:
_pytest_embedded = config.stash.get(_idf_pytest_embedded_key, None)
@ -250,28 +357,20 @@ def pytest_unconfigure(config: Config) -> None:
class IdfPytestEmbedded:
def __init__(
self,
target: Optional[str] = None,
target: str,
sdkconfig: Optional[str] = None,
known_failure_cases_file: Optional[str] = None,
):
# CLI options to filter the test cases
self.target = target
self.target = target.lower()
self.sdkconfig = sdkconfig
self.known_failure_patterns = self._parse_known_failure_cases_file(
known_failure_cases_file
)
self.known_failure_patterns = self._parse_known_failure_cases_file(known_failure_cases_file)
self._failed_cases: List[
Tuple[str, bool, bool]
] = [] # (test_case_name, is_known_failure_cases, is_xfail)
self._failed_cases: List[Tuple[str, bool, bool]] = [] # (test_case_name, is_known_failure_cases, is_xfail)
@property
def failed_cases(self) -> List[str]:
return [
case
for case, is_known, is_xfail in self._failed_cases
if not is_known and not is_xfail
]
return [case for case, is_known, is_xfail in self._failed_cases if not is_known and not is_xfail]
@property
def known_failure_cases(self) -> List[str]:
@ -303,15 +402,15 @@ class IdfPytestEmbedded:
@pytest.hookimpl(tryfirst=True)
def pytest_sessionstart(self, session: Session) -> None:
if self.target:
self.target = self.target.lower()
session.config.option.target = self.target
# same behavior for vanilla pytest-embedded '--target'
session.config.option.target = self.target
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(self, items: List[Function]) -> None:
# sort by file path and callspec.config
# implement like this since this is a limitation of pytest, couldn't get fixture values while collecting
# https://github.com/pytest-dev/pytest/discussions/9689
# after sort the test apps, the test may use the app cache to reduce the flash times.
def _get_param_config(_item: Function) -> str:
if hasattr(_item, 'callspec'):
return _item.callspec.params.get('config', DEFAULT_SDKCONFIG) # type: ignore
@ -326,43 +425,23 @@ class IdfPytestEmbedded:
# add markers for special markers
for item in items:
skip_ci_marker = item.get_closest_marker('temp_skip_ci')
skip_ci_targets: List[str] = []
if skip_ci_marker:
# `temp_skip_ci` should always use keyword arguments `targets` and `reason`
if not skip_ci_marker.kwargs.get('targets') or not skip_ci_marker.kwargs.get('reason'):
raise ValueError(
f'`temp_skip_ci` should always use keyword arguments `targets` and `reason`. '
f'For example: '
f'`@pytest.mark.temp_skip_ci(targets=["esp32"], reason="IDF-xxxx, will fix it ASAP")`'
)
skip_ci_targets = skip_ci_marker.kwargs['targets']
if 'supported_targets' in item.keywords:
for _target in SUPPORTED_TARGETS:
if _target not in skip_ci_targets:
item.add_marker(_target)
item.add_marker(_target)
if 'preview_targets' in item.keywords:
for _target in PREVIEW_TARGETS:
if _target not in skip_ci_targets:
item.add_marker(_target)
item.add_marker(_target)
if 'all_targets' in item.keywords:
for _target in [*SUPPORTED_TARGETS, *PREVIEW_TARGETS]:
if _target not in skip_ci_targets:
item.add_marker(_target)
item.add_marker(_target)
# `temp_skip_ci(targets=...)` can't work with specified single target
for skip_ci_target in skip_ci_targets:
if skip_ci_target in item.keywords:
raise ValueError(
'`skip_ci_targets` can only work with '
'`supported_targets`, `preview_targets`, `all_targets` markers'
)
# add 'xtal_40mhz' tag as a default tag for esp32c2 target
for item in items:
if 'esp32c2' in item_marker_names(item) and 'xtal_26mhz' not in item_marker_names(item):
# add 'xtal_40mhz' tag as a default tag for esp32c2 target
# only add this marker for esp32c2 cases
if (
self.target == 'esp32c2'
and 'esp32c2' in item_marker_names(item)
and 'xtal_26mhz' not in item_marker_names(item)
):
item.add_marker('xtal_40mhz')
# filter all the test cases with "nightly_run" marker
@ -370,29 +449,22 @@ class IdfPytestEmbedded:
# Do not filter nightly_run cases
pass
elif os.getenv('NIGHTLY_RUN') == '1':
items[:] = [
item for item in items if 'nightly_run' in item_marker_names(item)
]
items[:] = [item for item in items if 'nightly_run' in item_marker_names(item)]
else:
items[:] = [
item for item in items if 'nightly_run' not in item_marker_names(item)
]
items[:] = [item for item in items if 'nightly_run' not in item_marker_names(item)]
# filter all the test cases with "--target"
if self.target:
items[:] = [
item for item in items if self.target in item_marker_names(item)
]
# filter all the test cases with target and skip_targets
items[:] = [
item
for item in items
if self.target in item_marker_names(item) and self.target not in item_skip_targets(item)
]
# filter all the test cases with cli option "config"
if self.sdkconfig:
items[:] = [
item for item in items if _get_param_config(item) == self.sdkconfig
]
items[:] = [item for item in items if _get_param_config(item) == self.sdkconfig]
def pytest_runtest_makereport(
self, item: Function, call: CallInfo[None]
) -> Optional[TestReport]:
def pytest_runtest_makereport(self, item: Function, call: CallInfo[None]) -> Optional[TestReport]:
report = TestReport.from_item_and_call(item, call)
if report.outcome == 'failed':
test_case_name = item.funcargs.get('test_case_name', '')
@ -429,13 +501,9 @@ class IdfPytestEmbedded:
xml = ET.parse(junit)
testcases = xml.findall('.//testcase')
for case in testcases:
case.attrib['name'] = format_case_id(
target, config, case.attrib['name']
)
case.attrib['name'] = format_case_id(target, config, case.attrib['name'])
if 'file' in case.attrib:
case.attrib['file'] = case.attrib['file'].replace(
'/IDF/', ''
) # our unity test framework
case.attrib['file'] = case.attrib['file'].replace('/IDF/', '') # our unity test framework
xml.write(junit)
def pytest_sessionfinish(self, session: Session, exitstatus: int) -> None:

View File

@ -213,7 +213,7 @@ This code example is taken from :idf_file:`pytest_console_basic.py <examples/sys
'history',
'nohistory',
], indirect=True)
def test_console_advanced(config: str, dut: Dut) -> None:
def test_console_advanced(config: str, dut: IdfDut) -> None:
if config == 'history':
dut.expect('Command history enabled')
elif config == 'nohistory':
@ -242,7 +242,7 @@ This code example is taken from :idf_file:`pytest_gptimer_example.py <examples/p
@pytest.mark.supported_targets
@pytest.mark.generic
def test_gptimer_example(dut: Dut) -> None:
def test_gptimer_example(dut: IdfDut) -> None:
...
Use Params to Specify the sdkconfig Files
@ -398,7 +398,7 @@ This code example is taken from :idf_file:`pytest_esp_eth.py <components/esp_eth
.. code:: python
@pytest.mark.flaky(reruns=3, reruns_delay=5)
def test_esp_eth_ip101(dut: Dut) -> None:
def test_esp_eth_ip101(dut: IdfDut) -> None:
...
This flaky marker means that if the test function failed, the test case would rerun for a maximum of 3 times with 5 seconds delay.
@ -425,7 +425,7 @@ This marker means that if the test would be a known failure one on esp32s2.
Mark Nightly Run Test Cases
"""""""""""""""""""""""""""
Some tests cases are only triggered in nightly run pipelines due to a lack of runners.
Some test cases are only triggered in nightly run pipelines due to a lack of runners.
.. code:: python
@ -433,6 +433,49 @@ Some tests cases are only triggered in nightly run pipelines due to a lack of ru
This marker means that the test case would only be run with env var ``NIGHTLY_RUN`` or ``INCLUDE_NIGHTLY_RUN``.
Mark Temp Disabled in CI
""""""""""""""""""""""""
Some test cases which can pass locally may need to be temporarily disabled in CI due to a lack of runners.
.. code:: python
@pytest.mark.temp_skip_ci(targets=['esp32', 'esp32s2'], reason='lack of runners')
This marker means that the test case could still be run locally with ``pytest --target esp32``, but will not run in CI.
Run Unity Test Cases
""""""""""""""""""""
For component-based unit test apps, one line could do the trick to run all single-board test cases, including normal test cases and multi-stage test cases:
.. code:: python
def test_component_ut(dut: IdfDut):
dut.run_all_single_board_cases()
It would also skip all the test cases with ``[ignore]`` mark.
If you need to run a group of test cases, you may run:
.. code:: python
def test_component_ut(dut: IdfDut):
dut.run_all_single_board_cases(group='psram')
It would trigger all test cases with module name ``[psram]``.
You may also see that there are some test scripts with the following statements, which are deprecated. Please use the suggested one as above.
.. code:: python
def test_component_ut(dut: IdfDut):
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('*')
dut.expect_unity_test_output()
For further reading about our unit testing in ESP-IDF, please refer to :doc:`our unit testing guide <../api-guides/unit-tests>`.
Run the Tests in CI
-------------------
@ -499,8 +542,8 @@ For example, if you want to run all the esp32 tests under the ``$IDF_PATH/exampl
.. code:: shell
$ pip install pytest-embedded-serial-esp pytest-embedded-idf
$ cd $IDF_PATH
$ bash install.sh --enable-pytest
$ . ./export.sh
$ cd examples/system/console/basic
$ python $IDF_PATH/tools/ci/ci_build_apps.py . --target esp32 -vv --pytest-apps
@ -527,7 +570,7 @@ Add New Markers
Were using two types of custom markers, target markers which indicate that the test cases should support this target, and env markers which indicate that the test case should be assigned to runners with these tags in CI.
You can add new markers by adding one line under the ``${IDF_PATH}/pytest.ini`` ``markers =`` section. The grammar should be: ``<marker_name>: <marker_description>``
You can add new markers by adding one line under the ``${IDF_PATH}/conftest.py``. If it's a target marker, it should be added into ``TARGET_MARKERS``. If it's a marker that specifies a type of test environment, it should be added into ``ENV_MARKERS``. The grammar should be: ``<marker_name>: <marker_description>``.
Generate JUnit Report
^^^^^^^^^^^^^^^^^^^^^

View File

@ -10,7 +10,7 @@ from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32c3
def test_examples_nvs_rw_value(dut: Dut) -> None:
def test_examples_nvs_rw_value_cxx(dut: Dut) -> None:
for i, counter_state in zip_longest(range(4), ('The value is not initialized yet!',), fillvalue='Done'):
dut.expect('Opening Non-Volatile Storage \\(NVS\\) handle... Done', timeout=20)
dut.expect('Reading restart counter from NVS ... {}'.format(counter_state), timeout=20)

View File

@ -13,7 +13,7 @@ from pytest_embedded import Dut
@pytest.mark.esp32c3 # no runner available at the moment
@pytest.mark.esp32s2
@pytest.mark.sdcard_spimode
def test_examples_sd_card_sdmmc(dut: Dut) -> None:
def test_examples_sd_card_sdspi(dut: Dut) -> None:
dut.expect('example: Initializing SD card', timeout=20)
dut.expect('example: Using SPI peripheral', timeout=20)

View File

@ -12,7 +12,7 @@ from pytest_embedded import Dut
@pytest.mark.esp32s2
@pytest.mark.esp32s3
@pytest.mark.generic
def test_ulp_fsm(dut: Dut) -> None:
def test_example_ulp_fsm(dut: Dut) -> None:
dut.expect_exact('Not ULP wakeup')
dut.expect_exact('Entering deep sleep')

View File

@ -10,7 +10,7 @@ from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32s3
@pytest.mark.generic
def test_ulp_fsm_adc(dut: Dut) -> None:
def test_example_ulp_fsm_adc(dut: Dut) -> None:
dut.expect_exact('Not ULP wakeup')
dut.expect_exact('Entering deep sleep')

View File

@ -11,6 +11,7 @@ addopts =
--strict-markers
--skip-check-coredump y
--logfile-extension ".txt"
--check-duplicates y
# ignore DeprecationWarning
filterwarnings =
@ -18,74 +19,6 @@ filterwarnings =
ignore::DeprecationWarning:google.protobuf.*:
ignore::_pytest.warning_types.PytestExperimentalApiWarning
markers =
# target markers
esp32: support esp32 target
esp32s2: support esp32s2 target
esp32s3: support esp32s3 target
esp32c3: support esp32c3 target
esp32c2: support esp32c2 target
esp32c6: support esp32c6 target
esp32h4: support esp32h4 target
supported_targets: support all supported targets ('esp32', 'esp32s2', 'esp32c3', 'esp32s3', 'esp32c2', 'esp32c6')
preview_targets: support all preview targets ('linux', 'esp32h4')
all_targets: support all targets, including supported ones and preview ones
temp_skip_ci: temp skip ci for specified targets, can only work with `supported_targets`, `preview_targets`, `all_targets`
# env markers
generic: tests should be run on generic runners
nightly_run: tests should be executed as part of the nightly trigger pipeline
flash_suspend: support flash suspend feature
ip101: connected via wired 10/100M ethernet
lan8720: connected via LAN8720 ethernet transceiver
quad_psram: runners with quad psram
octal_psram: runners with octal psram
usb_host: usb host runners
usb_host_flash_disk: usb host runners with USB flash disk attached
usb_device: usb device runners
ethernet_ota: ethernet OTA runners
flash_encryption: Flash Encryption runners
flash_encryption_f4r8: Flash Encryption runners with 4-line flash and 8-line psram
flash_encryption_f8r8: Flash Encryption runners with 8-line flash and 8-line psram
flash_mutli: Multiple flash chips tests
psram: Chip has 4-line psram
ir_transceiver: runners with a pair of IR transmitter and receiver
twai_transceiver: runners with a TWAI PHY transceiver
flash_encryption_wifi_high_traffic: Flash Encryption runners with wifi high traffic support
ethernet: ethernet runner
ethernet_flash_8m: ethernet runner with 8mb flash
ethernet_router: both the runner and dut connect to the same router through ethernet NIC
wifi_ap: a wifi AP in the environment
wifi_router: both the runner and dut connect to the same wifi router
wifi_high_traffic: wifi high traffic runners
wifi_wlan: wifi runner with a wireless NIC
xtal_26mhz: runner with 26MHz xtal on board
xtal_40mhz: runner with 40MHz xtal on board
external_flash: external flash memory connected via VSPI (FSPI)
sdcard_sdmode: sdcard running in SD mode
sdcard_spimode: sdcard running in SPI mode
MSPI_F8R8: runner with Octal Flash and Octal PSRAM
MSPI_F4R8: runner with Quad Flash and Octal PSRAM
MSPI_F4R4: runner with Quad Flash and Quad PSRAM
test_jtag_arm: runner where the chip is accessible through JTAG as well
adc: ADC related tests should run on adc runners
xtal32k: Runner with external 32k crystal connected
no32kXtal: Runner with no external 32k crystal connected
multi_dut_modbus_rs485: a pair of runners connected by RS485 bus
psramv0: Runner with PSRAM version 0
# multi-dut markers
ieee802154: ieee802154 related tests should run on ieee802154 runners.
i154_multi_dut: tests should be used for i154, such as openthread.
wifi_two_dut: tests should be run on runners which has two wifi duts connected.
generic_multi_device: generic multiple devices whose corresponding gpio pins are connected to each other.
twai_network: multiple runners form a TWAI network.
sdio_master_slave: Test sdio multi board.
# host_test markers
host_test: tests which shouldn not be built at the build stage, and instead built in host_test stage.
qemu: build and test using qemu-system-xtensa, not real target.
# log related
log_cli = True
log_cli_level = INFO

View File

@ -412,6 +412,9 @@ def sort_yaml(files: List[str]) -> None:
if __name__ == '__main__':
if 'CI_JOB_ID' not in os.environ:
os.environ['CI_JOB_ID'] = 'fake' # this is a CI script
parser = argparse.ArgumentParser(description='ESP-IDF apps build/test checker')
action = parser.add_subparsers(dest='action')

View File

@ -23,10 +23,11 @@ def get_pytest_apps(
target: str,
config_rules_str: List[str],
marker_expr: str,
filter_expr: str,
preserve_all: bool = False,
extra_default_build_targets: Optional[List[str]] = None,
) -> List[App]:
pytest_cases = get_pytest_cases(paths, target, marker_expr)
pytest_cases = get_pytest_cases(paths, target, marker_expr, filter_expr)
_paths: Set[str] = set()
test_related_app_configs = defaultdict(set)
@ -126,6 +127,7 @@ def main(args: argparse.Namespace) -> None:
args.target,
args.config,
args.marker_expr,
args.filter_expr,
args.preserve_all,
extra_default_build_targets,
)
@ -181,8 +183,8 @@ if __name__ == '__main__':
parser.add_argument(
'-t',
'--target',
required=True,
help='Build apps for given target. could pass "all" to get apps for all targets',
default='all',
help='Build apps for given target',
)
parser.add_argument(
'--config',
@ -263,6 +265,12 @@ if __name__ == '__main__':
help='only build tests matching given mark expression. For example: -m "host_test and generic". Works only'
'for pytest',
)
parser.add_argument(
'-k',
'--filter-expr',
help='only build tests matching given filter expression. For example: -k "test_hello_world". Works only'
'for pytest',
)
parser.add_argument(
'--default-build-test-rules',
default=os.path.join(IDF_PATH, '.gitlab', 'ci', 'default-build-test-rules.yml'),

View File

@ -12,6 +12,7 @@ import subprocess
import sys
from contextlib import redirect_stdout
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Any, List, Optional, Set, Union
try:
@ -24,12 +25,10 @@ except ImportError:
if TYPE_CHECKING:
from _pytest.python import Function
IDF_PATH = os.path.abspath(
os.getenv('IDF_PATH', os.path.join(os.path.dirname(__file__), '..', '..'))
)
IDF_PATH = os.path.abspath(os.getenv('IDF_PATH', os.path.join(os.path.dirname(__file__), '..', '..')))
def get_submodule_dirs(full_path: bool = False) -> List:
def get_submodule_dirs(full_path: bool = False) -> List[str]:
"""
To avoid issue could be introduced by multi-os or additional dependency,
we use python and git to get this output
@ -64,13 +63,9 @@ def get_submodule_dirs(full_path: bool = False) -> List:
return dirs
def _check_git_filemode(full_path): # type: (str) -> bool
def _check_git_filemode(full_path: str) -> bool:
try:
stdout = (
subprocess.check_output(['git', 'ls-files', '--stage', full_path])
.strip()
.decode('utf-8')
)
stdout = subprocess.check_output(['git', 'ls-files', '--stage', full_path]).strip().decode('utf-8')
except subprocess.CalledProcessError:
return True
@ -150,6 +145,7 @@ class PytestCase:
path: str
name: str
apps: Set[PytestApp]
nightly_run: bool
def __hash__(self) -> int:
@ -185,14 +181,8 @@ class PytestCollectPlugin:
self.get_param(item, 'app_path', os.path.dirname(case_path)),
)
)
configs = to_list(
parse_multi_dut_args(
count, self.get_param(item, 'config', 'default')
)
)
targets = to_list(
parse_multi_dut_args(count, self.get_param(item, 'target', target))
)
configs = to_list(parse_multi_dut_args(count, self.get_param(item, 'config', 'default')))
targets = to_list(parse_multi_dut_args(count, self.get_param(item, 'target', target)))
else:
app_paths = [os.path.dirname(case_path)]
configs = ['default']
@ -212,8 +202,28 @@ class PytestCollectPlugin:
)
def get_pytest_files(paths: List[str]) -> List[str]:
# this is a workaround to solve pytest collector super slow issue
# benchmark with
# - time pytest -m esp32 --collect-only
# user=15.57s system=1.35s cpu=95% total=17.741
# - time { find -name 'pytest_*.py'; } | xargs pytest -m esp32 --collect-only
# user=0.11s system=0.63s cpu=36% total=2.044
# user=1.76s system=0.22s cpu=43% total=4.539
# use glob.glob would also save a bunch of time
pytest_scripts: Set[str] = set()
for p in paths:
path = Path(p)
pytest_scripts.update(str(_p) for _p in path.glob('**/pytest_*.py'))
return list(pytest_scripts)
def get_pytest_cases(
paths: Union[str, List[str]], target: str = 'all', marker_expr: Optional[str] = None
paths: Union[str, List[str]],
target: str = 'all',
marker_expr: Optional[str] = None,
filter_expr: Optional[str] = None,
) -> List[PytestCase]:
import pytest
from _pytest.config import ExitCode
@ -239,28 +249,25 @@ def get_pytest_cases(
os.environ['INCLUDE_NIGHTLY_RUN'] = '1'
cases = []
for t in targets:
collector = PytestCollectPlugin(t)
if marker_expr:
_marker_expr = f'{t} and ({marker_expr})'
else:
_marker_expr = t # target is also a marker
for target in targets:
collector = PytestCollectPlugin(target)
for path in to_list(paths):
with io.StringIO() as buf:
with redirect_stdout(buf):
cmd = ['--collect-only', path, '-q', '-m', _marker_expr]
res = pytest.main(cmd, plugins=[collector])
if res.value != ExitCode.OK:
if res.value == ExitCode.NO_TESTS_COLLECTED:
print(
f'WARNING: no pytest app found for target {t} under path {path}'
)
else:
print(buf.getvalue())
raise RuntimeError(
f'pytest collection failed at {path} with command \"{" ".join(cmd)}\"'
)
with io.StringIO() as buf:
with redirect_stdout(buf):
cmd = ['--collect-only', *get_pytest_files(paths), '--target', target, '-q']
if marker_expr:
cmd.extend(['-m', marker_expr])
if filter_expr:
cmd.extend(['-k', filter_expr])
res = pytest.main(cmd, plugins=[collector])
if res.value != ExitCode.OK:
if res.value == ExitCode.NO_TESTS_COLLECTED:
print(f'WARNING: no pytest app found for target {target} under paths {", ".join(paths)}')
else:
print(buf.getvalue())
raise RuntimeError(
f'pytest collection failed at {", ".join(paths)} with command \"{" ".join(cmd)}\"'
)
cases.extend(collector.cases)
@ -296,9 +303,7 @@ def get_ttfw_cases(paths: Union[str, List[str]]) -> List[Any]:
cases = []
for path in to_list(paths):
assign = IDFAssignTest(
path, os.path.join(IDF_PATH, '.gitlab', 'ci', 'target-test.yml')
)
assign = IDFAssignTest(path, os.path.join(IDF_PATH, '.gitlab', 'ci', 'target-test.yml'))
with contextlib.redirect_stdout(None): # swallow stdout
try:
cases += assign.search_cases()
@ -308,9 +313,7 @@ def get_ttfw_cases(paths: Union[str, List[str]]) -> List[Any]:
return cases
def get_ttfw_app_paths(
paths: Union[str, List[str]], target: Optional[str] = None
) -> Set[str]:
def get_ttfw_app_paths(paths: Union[str, List[str]], target: Optional[str] = None) -> Set[str]:
"""
Get the app paths from ttfw_idf under the given paths
"""

View File

@ -8,5 +8,5 @@ from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.generic
def test_sys_memprot(dut: Dut) -> None:
def test_sys_longjmp(dut: Dut) -> None:
dut.expect_exact('Test successful')