ci: remove ttfw related files

This commit is contained in:
Fu Hanxi 2023-10-04 16:41:03 +02:00
parent ca63d0f943
commit 4dbf8c8948
No known key found for this signature in database
GPG Key ID: 19399699CF3C4B16
87 changed files with 44 additions and 7586 deletions

View File

@ -63,13 +63,11 @@
- `macos_test`
- `nvs_coverage`
- `submodule`
- `unit_test[_esp32/esp32s2/...]`
- `weekend_test`
- `windows`
There are two general labels (not recommended since these two labels will trigger a lot of jobs)
- `target_test`: includes all target for `example_test`, `custom_test`, `component_ut`, `unit_test`, `integration_test`
- `target_test`: includes all target for `example_test`, `custom_test`, `component_ut`, `integration_test`
- `all_test`: includes all test labels
### How to trigger a `detached` pipeline without pushing new commits?
@ -197,7 +195,7 @@ if a name has multi phrases, use `-` to concatenate them.
- `target_test`
a combination of `example_test`, `custom_test`, `unit_test`, `component_ut`, `integration_test` and all targets
a combination of `example_test`, `custom_test`, `component_ut`, `integration_test` and all targets
#### `rules` Template Naming Rules

View File

@ -124,12 +124,7 @@ variables:
# install latest python packages
# target test jobs
if [[ "${CI_JOB_STAGE}" == "target_test" ]]; then
# ttfw jobs
if ! echo "${CI_JOB_NAME}" | egrep ".*pytest.*"; then
run_cmd bash install.sh --enable-ci --enable-ttfw
else
run_cmd bash install.sh --enable-ci --enable-pytest
fi
run_cmd bash install.sh --enable-ci --enable-pytest
elif [[ "${CI_JOB_STAGE}" == "build_doc" ]]; then
run_cmd bash install.sh --enable-ci --enable-docs
elif [[ "${CI_JOB_STAGE}" == "build" ]]; then

View File

@ -115,20 +115,6 @@ build:integration_test:
# -------------
# Special Cases
# -------------
"test:component_ut-{0}":
matrix:
- *all_targets
labels:
- component_ut
- "component_ut_{0}"
- target_test
patterns:
- component_ut
- "build-component_ut-{0}"
included_in:
- build:component_ut
- "build:component_ut-{0}"
- build:target_test
# To reduce the specific runners' usage.
# Do not create these jobs by default patterns on development branches

View File

@ -27,8 +27,6 @@
- "tools/ci/python_packages/gitlab_api.py"
- "tools/ci/python_packages/idf_http_server_test/**/*"
- "tools/ci/python_packages/idf_iperf_test_util/**/*"
- "tools/ci/python_packages/tiny_test_fw/**/*"
- "tools/ci/python_packages/ttfw_idf/**/*"
- "tools/ci/python_packages/common_test_methods.py"
- "tools/esp_prov/**/*"
@ -71,22 +69,11 @@
.patterns-custom_test: &patterns-custom_test
- "tools/ci/python_packages/gitlab_api.py"
- "tools/ci/python_packages/tiny_test_fw/**/*"
- "tools/ci/python_packages/ttfw_idf/**/*"
- "tools/ci/python_packages/common_test_methods.py"
- "tools/test_apps/**/*"
- "tools/ldgen/**/*"
.patterns-unit_test: &patterns-unit_test
- "tools/ci/python_packages/gitlab_api.py"
- "tools/ci/python_packages/tiny_test_fw/**/*"
- "tools/ci/python_packages/ttfw_idf/**/*"
- "tools/unit-test-app/**/*"
- "components/**/*"
.patterns-component_ut: &patterns-component_ut
- "tools/ci/python_packages/gitlab_api.py"
- "tools/ci/python_packages/common_test_methods.py"
@ -293,18 +280,6 @@
- "components/driver/include/driver/sdmmc*.h"
- "components/sdmmc/**/*"
# for jobs: UT_xx_SDSPI related
.patterns-unit_test-sdio: &patterns-unit_test-sdio
- "components/hal/sdio*.c"
- "components/hal/include/hal/sdio*.h"
- "components/driver/sdspi*.c"
- "components/driver/sdio*.c"
- "components/driver/sdmmc*.c"
- "components/driver/include/driver/sdspi*.h"
- "components/driver/include/driver/sdio*.h"
- "components/driver/include/driver/sdmmc*.h"
- "components/sdmmc/**/*"
# for jobs: component_ut_pytest_flash_multi
.patterns-component_ut-flash_multi: &patterns-component_ut-flash_multi
- "components/spi_flash/**/*"

View File

@ -1542,9 +1542,15 @@ pytest_test_apps_esp32c3_wifi_two_dut:
JOB_FULL_NAME="${JOB_NAME_PREFIX}_${CI_NODE_INDEX}"
CONFIG_FILE="${CONFIG_FILE_PATH}/${JOB_FULL_NAME}.yml"
.target_test_job_template:
.integration_test_template:
extends:
- .target_test_template
- .rules:test:integration_test
- .before_script:minimal
image: ${CI_INTEGRATION_TEST_ENV_IMAGE}
cache: []
needs: # the assign already needs all the build jobs
- assign_integration_test
artifacts:
when: always
paths:
@ -1556,33 +1562,7 @@ pytest_test_apps_esp32c3_wifi_two_dut:
junit: $LOG_PATH/*/XUNIT_RESULT.xml
expire_in: 1 week
variables:
TEST_FW_PATH: "$CI_PROJECT_DIR/tools/tiny-test-fw"
LOG_PATH: "$CI_PROJECT_DIR/TEST_LOGS"
ENV_FILE: "$CI_PROJECT_DIR/ci-test-runner-configs/$CI_RUNNER_DESCRIPTION/EnvConfig.yml"
script:
- *define_config_file_name
# first test if config file exists, if not exist, exit 0
- |
{ [[ -e $CONFIG_FILE ]]; } || { echo 'No config file found. Consider decreasing the parallel count of this job in ".gitlab/ci/target-test.yml"'; exit 0; }
# clone test env configs
- retry_failed git clone $TEST_ENV_CONFIG_REPO
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
# git clone the known failure cases repo, run test
- retry_failed git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
# run test
- cd tools/ci/python_packages/tiny_test_fw/bin
- run_cmd python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
.integration_test_template:
extends:
- .target_test_job_template
- .rules:test:integration_test
- .before_script:minimal
image: ${CI_INTEGRATION_TEST_ENV_IMAGE}
cache: []
needs: # the assign already needs all the build jobs
- assign_integration_test
variables:
LOCAL_ENV_CONFIG_PATH: "$CI_PROJECT_DIR/ci-test-runner-configs/$CI_RUNNER_DESCRIPTION/ESP32_IDF"
LOG_PATH: "${CI_PROJECT_DIR}/TEST_LOGS"
TEST_CASE_FILE_PATH: "$CI_PROJECT_DIR/auto_test_script/TestCaseFiles"

View File

@ -1,11 +0,0 @@
[LOGGING]
FileMask := LOG_ALL | DEBUG | TTCN_DEBUG
ConsoleMask := LOG_ALL | DEBUG | TTCN_DEBUG
LogSourceInfo := Yes
[MODULE_PARAMETERS]
libtest.m_ip_dst := "10.0.0.1"
libtest.m_ip_src := "10.0.0.2"
[EXECUTE]
esp32_netsuite.control

View File

@ -1,11 +0,0 @@
module esp32_netsuite {
import from tcp_suite all;
control {
execute(tc_tcp_002());
execute(tc_tcp_003());
execute(tc_tcp_004());
execute(tc_tcp_005());
}
}

View File

@ -1,151 +0,0 @@
import os
import re
import socket
import subprocess
import time
from shutil import copyfile
from threading import Event, Thread
import ttfw_idf
from tiny_test_fw import DUT, Utility
stop_sock_listener = Event()
stop_io_listener = Event()
sock = None
client_address = None
manual_test = False
def io_listener(dut1):
global sock
global client_address
data = b''
while not stop_io_listener.is_set():
try:
data = dut1.expect(re.compile(r'PacketOut:\[([a-fA-F0-9]+)\]'), timeout=5)
except DUT.ExpectTimeout:
continue
if data != () and data[0] != b'':
packet_data = data[0]
print('Packet_data>{}<'.format(packet_data))
response = bytearray.fromhex(packet_data.decode())
print('Sending to socket:')
packet = ' '.join(format(x, '02x') for x in bytearray(response))
print('Packet>{}<'.format(packet))
if client_address is not None:
sock.sendto(response, ('127.0.0.1', 7777))
def sock_listener(dut1):
global sock
global client_address
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
server_address = '0.0.0.0'
server_port = 7771
server = (server_address, server_port)
sock.bind(server)
try:
while not stop_sock_listener.is_set():
try:
payload, client_address = sock.recvfrom(1024)
packet = ' '.join(format(x, '02x') for x in bytearray(payload))
print('Received from address {}, data {}'.format(client_address, packet))
dut1.write(str.encode(packet))
except socket.timeout:
pass
finally:
sock.close()
sock = None
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def lwip_test_suite(env, extra_data):
global stop_io_listener
global stop_sock_listener
"""
steps: |
1. Rebuilds test suite with esp32_netsuite.ttcn
2. Starts listeners on stdout and socket
3. Execute ttcn3 test suite
4. Collect result from ttcn3
"""
dut1 = env.get_dut('net_suite', 'examples/system/network_tests', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'net_suite.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('net_suite', '{}KB'.format(bin_size // 1024))
ttfw_idf.check_performance('net_suite', bin_size // 1024, dut1.TARGET)
dut1.start_app()
thread1 = Thread(target=sock_listener, args=(dut1, ))
thread2 = Thread(target=io_listener, args=(dut1, ))
if not manual_test:
# Variables refering to esp32 ttcn test suite
TTCN_SRC = 'esp32_netsuite.ttcn'
TTCN_CFG = 'esp32_netsuite.cfg'
# System Paths
netsuite_path = os.getenv('NETSUITE_PATH')
netsuite_src_path = os.path.join(netsuite_path, 'src')
test_dir = os.path.dirname(os.path.realpath(__file__))
# Building the suite
print('Rebuilding the test suite')
print('-------------------------')
# copy esp32 specific files to ttcn net-suite dir
copyfile(os.path.join(test_dir, TTCN_SRC), os.path.join(netsuite_src_path, TTCN_SRC))
copyfile(os.path.join(test_dir, TTCN_CFG), os.path.join(netsuite_src_path, TTCN_CFG))
proc = subprocess.Popen(['bash', '-c', 'cd ' + netsuite_src_path + ' && source make.sh'],
cwd=netsuite_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.stdout.read()
print('Note: First build step we expect failure (titan/net_suite build system not suitable for multijob make)')
print(output)
proc = subprocess.Popen(['bash', '-c', 'cd ' + netsuite_src_path + ' && make'],
cwd=netsuite_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print('Note: This time all dependencies shall be generated -- multijob make shall pass')
output = proc.stdout.read()
print(output)
# Executing the test suite
thread1.start()
thread2.start()
time.sleep(2)
print('Executing the test suite')
print('------------------------')
proc = subprocess.Popen(['ttcn3_start', os.path.join(netsuite_src_path,'test_suite'), os.path.join(netsuite_src_path, TTCN_CFG)],
stdout=subprocess.PIPE)
output = proc.stdout.read()
print(output)
print('Collecting results')
print('------------------')
verdict_stats = re.search('(Verdict statistics:.*)', output)
if verdict_stats:
verdict_stats = verdict_stats.group(1)
else:
verdict_stats = b''
verdict = re.search('Overall verdict: pass', output)
if verdict:
print('Test passed!')
Utility.console_log(verdict_stats, 'green')
else:
Utility.console_log(verdict_stats, 'red')
raise ValueError('Test failed with: {}'.format(verdict_stats))
else:
try:
# Executing the test suite
thread1.start()
thread2.start()
time.sleep(2)
while True:
time.sleep(0.5)
except KeyboardInterrupt:
pass
print('Executing done, waiting for tests to finish')
print('-------------------------------------------')
stop_io_listener.set()
stop_sock_listener.set()
thread1.join()
thread2.join()
if __name__ == '__main__':
print('Manual execution, please build and start ttcn in a separate console')
manual_test = True
lwip_test_suite()

View File

@ -1,2 +0,0 @@
CaseConfig:
- name: lwip_test_suite

View File

@ -40,18 +40,6 @@ Building an example is the same as building any other project:
- `idf.py build` to build the example.
- Follow the printed instructions to flash, or run `idf.py -p PORT flash`.
## Running Test Python Script (ttfw)
Some of the examples have `..._test.py` scripts that are used to test that the example works as expected. These scripts run automatically in the internal test queue. They are not intended to be run by ESP-IDF users but sometimes you may want to run them locally.
Install Python dependencies and export the Python path where the IDF CI Python modules are found with the following commands:
```bash
bash install.sh --enable-ttfw
source export.sh
export PYTHONPATH=$IDF_PATH/tools/ci/python_packages:$IDF_PATH/tools:$PYTHONPATH
```
## Running Test Python Script (pytest)
Some of the examples have `pytest_....py` scripts that are using the `pytest` as the test framework. For detailed information, please refer to the "Run the Tests Locally" Section under [ESP-IDF tests in Pytest documentation](../docs/en/contribute/esp-idf-tests-with-pytest.rst)

View File

@ -21,7 +21,7 @@ Maximum data of 512 bytes can be transferred over L2CAP when MTU is set to 512 a
Note :
* To install the dependency packages needed, please refer to the top level [README file](../../../../README.md#running-test-python-script-ttfw).
* To install the dependency packages needed, please refer to the top level [README file](../../../../README.md#running-test-python-script-pytest).
* Currently this Python utility is only supported on Linux (BLE communication is via BLuez + DBus).
## How to Use Example

View File

@ -21,7 +21,7 @@ Maximum data of 512 bytes can be transferred over L2CAP when MTU is set to 512 a
Note :
* To install the dependency packages needed, please refer to the top level [README file](../../../../README.md#running-test-python-script-ttfw).
* To install the dependency packages needed, please refer to the top level [README file](../../../../README.md#running-test-python-script-pytest).
* * Currently this Python utility is only supported on Linux (BLE communication is via BLuez + DBus).
## How to Use Example

View File

@ -76,7 +76,7 @@ It performs three GATT operations against the specified peer:
Note :
* To install the dependency packages needed, please refer to the top level [README file](../../../../README.md#running-test-python-script-ttfw).
* To install the dependency packages needed, please refer to the top level [README file](../../../../README.md#running-test-python-script-pytest).
* Currently this Python utility is only supported on Linux (BLE communication is via BLuez + DBus).
## How to use example

View File

@ -71,7 +71,7 @@ It takes input from user and performs notify GATT operations against the specifi
Note :
* To install the dependency packages needed, please refer to the top level [README file](../../../../README.md#running-test-python-script-ttfw).
* To install the dependency packages needed, please refer to the top level [README file](../../../../README.md#running-test-python-script-pytest).
* Currently this Python utility is only supported on Linux (BLE communication is via BLuez + DBus).
## How to use example

View File

@ -33,7 +33,7 @@ To test this demo, use any BLE GATT server app that advertises support for the A
Note :
* To install the dependency packages needed, please refer to the top level [README file](../../../README.md#running-test-python-script-ttfw).
* To install the dependency packages needed, please refer to the top level [README file](../../../README.md#running-test-python-script-pytest).
* Currently this Python utility is only supported on Linux (BLE communication is via BLuez + DBus).
## How to Use Example

View File

@ -15,7 +15,7 @@ To test this demo, any BLE scanner app can be used.
Note :
* To install the dependency packages needed, please refer to the top level [README file](../../../README.md#running-test-python-script-ttfw).
* To install the dependency packages needed, please refer to the top level [README file](../../../README.md#running-test-python-script-pytest).
* Currently this Python utility is only supported on Linux (BLE communication is via BLuez + DBus).
## How to Use Example

View File

@ -19,7 +19,7 @@ To test this demo, any BLE scanner app can be used.
Note :
* To install the dependency packages needed, please refer to the top level [README file](../../../README.md#running-test-python-script-ttfw).
* To install the dependency packages needed, please refer to the top level [README file](../../../README.md#running-test-python-script-pytest).
* Currently this Python utility is only supported on Linux (BLE communication is via BLuez + DBus).
## How to Use Example

View File

@ -108,7 +108,7 @@ def test_example_captive_portal(dut: Dut) -> None:
raise RuntimeError('SoftAP connected to another host! {} != {}'.format(ip, got_ip))
except pexpect.exceptions.TIMEOUT:
# print what is happening on DUT side
logging.info('in exception tiny_test_fw.DUT.ExpectTimeout')
logging.info('in exception pexpect.exceptions.TIMEOUT')
logging.info(dut.read())
raise
print('Connected to DUT SoftAP')

View File

@ -45,7 +45,7 @@ For iOS, a provisioning application along with source code is available on GitHu
#### Platform : Linux / Windows / macOS
To install the dependency packages needed, please refer to the top level [README file](../../README.md#running-test-python-script-ttfw).
To install the dependency packages needed, please refer to the top level [README file](../../README.md#running-test-python-script-pytest).
`esp_prov` supports BLE and SoftAP transport for Linux, MacOS and Windows platforms. For BLE, however, if dependencies are not met, the script falls back to console mode and requires another application through which the communication can take place. The `esp_prov` console will guide you through the provisioning process of locating the correct BLE GATT services and characteristics, the values to write, and input read values.

View File

@ -13,7 +13,7 @@ from pathlib import Path
from typing import Dict, List, Optional, Tuple
import yaml
from idf_ci_utils import IDF_PATH, get_ttfw_cases
from idf_ci_utils import IDF_PATH
YES = u'\u2713'
NO = u'\u2717'
@ -228,35 +228,15 @@ def check_test_scripts(
def check_enable_test(
_app: App,
_pytest_app_dir_targets_dict: Dict[str, Dict[str, str]],
_ttfw_app_dir_targets_dict: Dict[str, Dict[str, str]],
) -> bool:
if _app.app_dir in _pytest_app_dir_targets_dict:
test_script_path = _pytest_app_dir_targets_dict[_app.app_dir]['script_path']
actual_verified_targets = sorted(
set(_pytest_app_dir_targets_dict[_app.app_dir]['targets'])
)
elif _app.app_dir in _ttfw_app_dir_targets_dict:
test_script_path = _ttfw_app_dir_targets_dict[_app.app_dir]['script_path']
actual_verified_targets = sorted(
set(_ttfw_app_dir_targets_dict[_app.app_dir]['targets'])
)
else:
return True # no test case
if (
_app.app_dir in _pytest_app_dir_targets_dict
and _app.app_dir in _ttfw_app_dir_targets_dict
):
print(
f'''
Both pytest and ttfw test cases are found for {_app.app_dir},
please remove one of them.
pytest script: {_pytest_app_dir_targets_dict[_app.app_dir]['script_path']}
ttfw script: {_ttfw_app_dir_targets_dict[_app.app_dir]['script_path']}
'''
)
return False
actual_extra_tested_targets = set(actual_verified_targets) - set(
_app.verified_targets
)
@ -297,9 +277,6 @@ def check_test_scripts(
If you want to enable test targets in the pytest test scripts, please add `@pytest.mark.MISSING_TARGET`
marker above the test case function.
If you want to enable test targets in the ttfw test scripts, please add/extend the keyword `targets` in
the ttfw decorator, e.g. `@ttfw_idf.idf_example_test(..., target=['esp32', 'MISSING_TARGET'])`
If you want to disable the test targets in the manifest file, please modify your manifest file with
the following code snippet:
@ -334,10 +311,8 @@ def check_test_scripts(
exit_code = 0
pytest_cases = get_pytest_cases(paths)
ttfw_cases = get_ttfw_cases(paths)
pytest_app_dir_targets_dict = {}
ttfw_app_dir_targets_dict = {}
for case in pytest_cases:
for pytest_app in case.apps:
app_dir = os.path.relpath(pytest_app.path, IDF_PATH)
@ -351,18 +326,6 @@ def check_test_scripts(
pytest_app.target
)
for case in ttfw_cases:
app_dir = case.case_info['app_dir']
if app_dir not in ttfw_app_dir_targets_dict:
ttfw_app_dir_targets_dict[app_dir] = {
'script_path': case.case_info['script_path'],
'targets': [case.case_info['target'].lower()],
}
else:
ttfw_app_dir_targets_dict[app_dir]['targets'].append(
case.case_info['target'].lower()
)
checked_app_dirs = set()
for app in apps:
if app.app_dir not in checked_app_dirs:
@ -371,7 +334,7 @@ def check_test_scripts(
continue
success = check_enable_test(
app, pytest_app_dir_targets_dict, ttfw_app_dir_targets_dict
app, pytest_app_dir_targets_dict
)
if not success:
print(f'check_enable_test failed for app: {app}')

View File

@ -558,7 +558,6 @@ components/lwip/apps/ping/ping.c
components/lwip/include/apps/dhcpserver/dhcpserver_options.h
components/lwip/include/apps/esp_ping.h
components/lwip/include/apps/ping/ping.h
components/lwip/weekend_test/net_suite_test.py
components/mbedtls/esp_crt_bundle/test_gen_crt_bundle/test_gen_crt_bundle.py
components/mbedtls/port/aes/block/esp_aes.c
components/mbedtls/port/aes/dma/esp_aes.c
@ -1147,8 +1146,6 @@ examples/mesh/ip_internal_network/main/mqtt_app.c
examples/mesh/manual_networking/main/include/mesh_light.h
examples/mesh/manual_networking/main/mesh_light.c
examples/mesh/manual_networking/main/mesh_main.c
examples/network/network_tests/main/stdinout.c
examples/network/network_tests/main/stdinout.h
examples/network/simple_sniffer/main/cmd_sniffer.c
examples/network/simple_sniffer/main/cmd_sniffer.h
examples/network/simple_sniffer/main/simple_sniffer_example_main.c
@ -1325,7 +1322,3 @@ tools/test_apps/system/memprot/main/esp32s2/test_memprot_main.c
tools/test_apps/system/no_embedded_paths/check_for_file_paths.py
tools/test_apps/system/no_embedded_paths/main/test_no_embedded_paths_main.c
tools/test_apps/system/startup/main/test_startup_main.c
tools/unit-test-app/idf_ext.py
tools/unit-test-app/main/app_main.c
tools/unit-test-app/tools/CreateSectionTable.py
tools/unit-test-app/tools/UnitTestParser.py

View File

@ -16,7 +16,7 @@ from pathlib import Path
import yaml
from idf_build_apps import LOGGER, App, build_apps, find_apps, setup_logging
from idf_build_apps.constants import SUPPORTED_TARGETS
from idf_ci_utils import IDF_PATH, get_ttfw_app_paths
from idf_ci_utils import IDF_PATH
CI_ENV_VARS = {
'EXTRA_CFLAGS': '-Werror -Werror=deprecated-declarations -Werror=unused-variable '
@ -100,8 +100,6 @@ def get_cmake_apps(
from idf_pytest.constants import PytestApp
from idf_pytest.script import get_pytest_cases
ttfw_app_dirs = get_ttfw_app_paths(paths, target)
apps = find_apps(
paths,
recursive=True,
@ -123,7 +121,7 @@ def get_cmake_apps(
apps_for_build = []
pytest_cases_apps = [app for case in get_pytest_cases(paths, target) for app in case.apps]
for app in apps:
if preserve_all or app.app_dir in ttfw_app_dirs: # relpath
if preserve_all: # relpath
app.preserve = True
if PytestApp(os.path.realpath(app.app_dir), app.target, app.config_name) in pytest_cases_apps:

View File

@ -41,3 +41,4 @@ tools/templates/sample_component/main.c
tools/ci/cleanup_ignore_lists.py
tools/ci/idf_pytest/**/*
tools/ci/artifacts_handler.py
tools/unit-test-app/**/*

View File

@ -112,5 +112,3 @@ tools/test_idf_py/test_hints.py
tools/test_idf_py/test_idf_py.py
tools/test_idf_tools/test_idf_tools.py
tools/test_mkdfu/test_mkdfu.py
tools/unit-test-app/tools/get_available_configs.sh
tools/unit-test-app/unit_test.py

View File

@ -1,15 +1,14 @@
# internal use only for CI
# some CI related util functions
#
# SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
#
import contextlib
# internal use only for CI
# some CI related util functions
import logging
import os
import subprocess
import sys
from typing import Any, List, Optional, Set, Union
from typing import Any, List
IDF_PATH = os.path.abspath(os.getenv('IDF_PATH', os.path.join(os.path.dirname(__file__), '..', '..')))
@ -111,61 +110,3 @@ def to_list(s: Any) -> List[Any]:
return s
return [s]
##################
# TTFW Utilities #
##################
def get_ttfw_cases(paths: Union[str, List[str]]) -> List[Any]:
"""
Get the test cases from ttfw_idf under the given paths
:param paths: list of paths to search
"""
try:
from ttfw_idf.IDFAssignTest import IDFAssignTest
except ImportError:
sys.path.append(os.path.join(IDF_PATH, 'tools', 'ci', 'python_packages'))
from ttfw_idf.IDFAssignTest import IDFAssignTest
# mock CI_JOB_ID if not exists
if not os.environ.get('CI_JOB_ID'):
os.environ['CI_JOB_ID'] = '1'
cases = []
for path in to_list(paths):
assign = IDFAssignTest(path, os.path.join(IDF_PATH, '.gitlab', 'ci', 'target-test.yml'))
with contextlib.redirect_stdout(None): # swallow stdout
try:
cases += assign.search_cases()
except ImportError as e:
logging.error(str(e))
return cases
def get_ttfw_app_paths(paths: Union[str, List[str]], target: Optional[str] = None) -> Set[str]:
"""
Get the app paths from ttfw_idf under the given paths
"""
from idf_build_apps import CMakeApp
cases = get_ttfw_cases(paths)
res: Set[str] = set()
for case in cases:
if not target or target == case.case_info['target'].lower():
# ttfw has no good way to detect the app path for master-slave tests
# the apps real location may be the sub folder of the test script path
# check if the current folder is an app, if it's not, add all its subfolders if they are apps
# only one level down
_app_dir = case.case_info['app_dir']
if CMakeApp.is_app(_app_dir):
res.add(_app_dir)
else:
for child in os.listdir(_app_dir):
sub_path = os.path.join(_app_dir, child)
if os.path.isdir(sub_path) and CMakeApp.is_app(sub_path):
res.add(sub_path)
return res

View File

@ -2,7 +2,6 @@ components/app_update/otatool.py
components/efuse/efuse_table_gen.py
components/efuse/test_efuse_host/efuse_tests.py
components/esp_local_ctrl/python/esp_local_ctrl_pb2.py
components/lwip/weekend_test/net_suite_test.py
components/mbedtls/esp_crt_bundle/gen_crt_bundle.py
components/mbedtls/esp_crt_bundle/test_gen_crt_bundle/test_gen_crt_bundle.py
components/nvs_flash/nvs_partition_generator/nvs_partition_gen.py
@ -21,7 +20,6 @@ components/wifi_provisioning/python/wifi_scan_pb2.py
components/xtensa/trax/traceparse.py
examples/protocols/esp_local_ctrl/scripts/esp_local_ctrl.py
examples/protocols/esp_local_ctrl/scripts/proto_lc.py
examples/protocols/http_server/advanced_tests/scripts/test.py
examples/storage/parttool/parttool_example.py
examples/system/ota/otatool/get_running_partition.py
examples/system/ota/otatool/otatool_example.py
@ -35,24 +33,6 @@ tools/ci/python_packages/idf_iperf_test_util/Attenuator.py
tools/ci/python_packages/idf_iperf_test_util/LineChart.py
tools/ci/python_packages/idf_iperf_test_util/PowerControl.py
tools/ci/python_packages/idf_iperf_test_util/TestReport.py
tools/ci/python_packages/tiny_test_fw/App.py
tools/ci/python_packages/tiny_test_fw/DUT.py
tools/ci/python_packages/tiny_test_fw/Env.py
tools/ci/python_packages/tiny_test_fw/EnvConfig.py
tools/ci/python_packages/tiny_test_fw/TinyFW.py
tools/ci/python_packages/tiny_test_fw/Utility/CIAssignTest.py
tools/ci/python_packages/tiny_test_fw/Utility/CaseConfig.py
tools/ci/python_packages/tiny_test_fw/Utility/GitlabCIJob.py
tools/ci/python_packages/tiny_test_fw/Utility/SearchCases.py
tools/ci/python_packages/tiny_test_fw/Utility/TestCase.py
tools/ci/python_packages/tiny_test_fw/Utility/__init__.py
tools/ci/python_packages/tiny_test_fw/bin/Runner.py
tools/ci/python_packages/tiny_test_fw/bin/example.py
tools/ci/python_packages/tiny_test_fw/docs/conf.py
tools/ci/python_packages/ttfw_idf/IDFAssignTest.py
tools/ci/python_packages/ttfw_idf/IDFDUT.py
tools/ci/python_packages/ttfw_idf/__init__.py
tools/ci/python_packages/ttfw_idf/unity_test_parser.py
tools/ci/python_packages/wifi_tools.py
tools/ci/test_autocomplete.py
tools/esp_app_trace/espytrace/apptrace.py
@ -93,7 +73,3 @@ tools/test_idf_py/test_idf_extensions/test_ext/test_extension.py
tools/test_idf_py/test_idf_py.py
tools/test_idf_tools/test_idf_tools.py
tools/test_mkdfu/test_mkdfu.py
tools/unit-test-app/idf_ext.py
tools/unit-test-app/tools/CreateSectionTable.py
tools/unit-test-app/tools/UnitTestParser.py
tools/unit-test-app/unit_test.py

View File

@ -1,87 +0,0 @@
# SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
class for handling Test Apps. Currently it provides the following features:
1. get SDK path
2. get SDK tools
3. parse application info from its path. for example:
* provide download info
* provide partition table info
Test Apps should inherent from BaseApp class and overwrite the methods.
"""
import os
import sys
import time
# timestamp used for calculate log folder name
LOG_FOLDER_TIMESTAMP = time.time()
class BaseApp(object):
"""
Base Class for App.
Defines the mandatory methods that App need to implement.
Also implements some common methods.
:param app_path: the path for app.
:param config_name: app configuration to be tested
:param target: build target
"""
def __init__(self, app_path, config_name=None, target=None):
pass
@classmethod
def get_sdk_path(cls):
"""
get sdk path.
subclass must overwrite this method.
:return: abs sdk path
"""
pass
@classmethod
def get_tools(cls):
"""
get SDK related tools for applications
subclass must overwrite this method.
:return: tuple, abs path of each tool
"""
pass
@classmethod
def get_log_folder(cls, test_suite_name):
"""
By default log folder is ``${SDK_PATH}/TEST_LOGS/${test_suite_name}_${timestamp}``.
The log folder name is consist once start running, ensure all logs of will be put into the same folder.
:param test_suite_name: the test suite name, by default it's the base file name for main module
:return: the log folder path
"""
if not test_suite_name:
test_suite_name = os.path.splitext(os.path.basename(sys.modules['__main__'].__file__))[0]
sdk_path = cls.get_sdk_path()
log_folder = os.path.join(sdk_path, 'TEST_LOGS',
test_suite_name +
time.strftime('_%m%d_%H_%M_%S', time.localtime(LOG_FOLDER_TIMESTAMP)))
if not os.path.exists(log_folder):
os.makedirs(log_folder)
return log_folder
def process_app_info(self):
"""
parse built app info for DUTTool
subclass must overwrite this method.
:return: required info for specific DUTTool
"""
pass

View File

@ -1,790 +0,0 @@
# SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
DUT provides 3 major groups of features:
* DUT port feature, provide basic open/close/read/write features
* DUT tools, provide extra methods to control the device, like download and start app
* DUT expect method, provide features for users to check DUT outputs
The current design of DUT have 3 classes for one DUT: BaseDUT, DUTPort, DUTTool.
* BaseDUT class:
* defines methods DUT port and DUT tool need to overwrite
* provide the expect methods and some other methods based on DUTPort
* DUTPort class:
* inherent from BaseDUT class
* implements the port features by overwriting port methods defined in BaseDUT
* DUTTool class:
* inherent from one of the DUTPort class
* implements the tools features by overwriting tool methods defined in BaseDUT
* could add some new methods provided by the tool
This module implements the BaseDUT class and one of the port class SerialDUT.
User should implement their DUTTool classes.
If they using different port then need to implement their DUTPort class as well.
"""
from __future__ import print_function
import copy
import functools
import re
import sys
import threading
import time
# python2 and python3 queue package name is different
try:
import Queue as _queue
except ImportError:
import queue as _queue # type: ignore
try:
from typing import Callable, List
except ImportError:
# Only used for type annotations
pass
import serial
from serial.tools import list_ports
from . import Utility
class ExpectTimeout(ValueError):
""" timeout for expect method """
pass
class UnsupportedExpectItem(ValueError):
""" expect item not supported by the expect method """
pass
def _expect_lock(func):
@functools.wraps(func)
def handler(self, *args, **kwargs):
with self.expect_lock:
ret = func(self, *args, **kwargs)
return ret
return handler
def _decode_data(data):
""" for python3, if the data is bytes, then decode it to string """
if isinstance(data, bytes):
# convert bytes to string. This is a bit of a hack, we know that we want to log this
# later so encode to the stdout encoding with backslash escapes for anything non-encodable
try:
return data.decode(sys.stdout.encoding, 'backslashreplace')
except UnicodeDecodeError: # Python <3.5 doesn't support backslashreplace
return data.decode(sys.stdout.encoding, 'replace')
return data
def _pattern_to_string(pattern):
try:
ret = 'RegEx: ' + pattern.pattern
except AttributeError:
ret = pattern
return ret
class _DataCache(_queue.Queue):
"""
Data cache based on Queue. Allow users to process data cache based on bytes instead of Queue."
"""
def __init__(self, maxsize=0):
_queue.Queue.__init__(self, maxsize=maxsize)
self.data_cache = str()
def _move_from_queue_to_cache(self):
"""
move all of the available data in the queue to cache
:return: True if moved any item from queue to data cache, else False
"""
ret = False
while True:
try:
self.data_cache += _decode_data(self.get(0))
ret = True
except _queue.Empty:
break
return ret
def get_data(self, timeout=0.0):
"""
get a copy of data from cache.
:param timeout: timeout for waiting new queue item
:return: copy of data cache
"""
# make sure timeout is non-negative
if timeout < 0:
timeout = 0
ret = self._move_from_queue_to_cache()
if not ret:
# we only wait for new data if we can't provide a new data_cache
try:
data = self.get(timeout=timeout)
self.data_cache += _decode_data(data)
except _queue.Empty:
# don't do anything when on update for cache
pass
return copy.deepcopy(self.data_cache)
def flush(self, index=0xFFFFFFFF):
"""
flush data from cache.
:param index: if < 0 then don't do flush, otherwise flush data before index
:return: None
"""
# first add data in queue to cache
self.get_data()
if index > 0:
self.data_cache = self.data_cache[index:]
class _LogThread(threading.Thread, _queue.Queue):
"""
We found some SD card on Raspberry Pi could have very bad performance.
It could take seconds to save small amount of data.
If the DUT receives data and save it as log, then it stops receiving data until log is saved.
This could lead to expect timeout.
As an workaround to this issue, ``BaseDUT`` class will create a thread to save logs.
Then data will be passed to ``expect`` as soon as received.
"""
def __init__(self):
threading.Thread.__init__(self, name='LogThread')
_queue.Queue.__init__(self, maxsize=0)
self.setDaemon(True)
self.flush_lock = threading.Lock()
def save_log(self, filename, data):
"""
:param filename: log file name
:param data: log data. Must be ``bytes``.
"""
self.put({'filename': filename, 'data': data})
def flush_data(self):
with self.flush_lock:
data_cache = dict()
while True:
# move all data from queue to data cache
try:
log = self.get_nowait()
try:
data_cache[log['filename']] += log['data']
except KeyError:
data_cache[log['filename']] = log['data']
except _queue.Empty:
break
# flush data
for filename in data_cache:
with open(filename, 'ab+') as f:
f.write(data_cache[filename])
def run(self):
while True:
time.sleep(1)
self.flush_data()
class RecvThread(threading.Thread):
CHECK_FUNCTIONS = [] # type: List[Callable]
""" DUT subclass can define a few check functions to process received data. """
def __init__(self, read, dut):
super(RecvThread, self).__init__()
self.exit_event = threading.Event()
self.setDaemon(True)
self.read = read
self.dut = dut
self.data_cache = dut.data_cache
self.recorded_data = dut.recorded_data
self.record_data_lock = dut.record_data_lock
self._line_cache = str()
def _line_completion(self, data):
"""
Usually check functions requires to check for one complete line.
This method will do line completion for the first line, and strip incomplete last line.
"""
ret = self._line_cache
decoded_data = _decode_data(data)
# cache incomplete line to later process
lines = decoded_data.splitlines(True)
last_line = lines[-1]
if last_line[-1] != '\n':
if len(lines) == 1:
# only one line and the line is not finished, then append this to cache
self._line_cache += lines[-1]
ret = str()
else:
# more than one line and not finished, replace line cache
self._line_cache = lines[-1]
ret += ''.join(lines[:-1])
else:
# line finishes, flush cache
self._line_cache = str()
ret += decoded_data
return ret
def run(self):
while not self.exit_event.isSet():
raw_data = self.read(1000)
if raw_data:
# we need to do line completion before call check functions
# need to call check functions first
# otherwise check functions could be called after cases finished
comp_data = self._line_completion(raw_data)
for check_function in self.CHECK_FUNCTIONS:
check_function(self, comp_data)
with self.record_data_lock:
self.data_cache.put(raw_data)
for capture_id in self.recorded_data:
self.recorded_data[capture_id].put(raw_data)
def exit(self):
self.exit_event.set()
self.join()
class BaseDUT(object):
"""
:param name: application defined name for port
:param port: comport name, used to create DUT port
:param log_file: log file name
:param app: test app instance
:param kwargs: extra args for DUT to create ports
"""
DEFAULT_EXPECT_TIMEOUT = 10
MAX_EXPECT_FAILURES_TO_SAVED = 10
RECV_THREAD_CLS = RecvThread
TARGET = None
""" DUT subclass can specify RECV_THREAD_CLS to do add some extra stuff when receive data.
For example, DUT can implement exception detect & analysis logic in receive thread subclass. """
LOG_THREAD = _LogThread()
LOG_THREAD.start()
def __init__(self, name, port, log_file, app, **kwargs):
self.expect_lock = threading.Lock()
self.name = name
self.port = port
self.log_file = log_file
self.app = app
self.data_cache = _DataCache()
# the main process of recorded data are done in receive thread
# but receive thread could be closed in DUT lifetime (tool methods)
# so we keep it in BaseDUT, as their life cycle are same
self.recorded_data = dict()
self.record_data_lock = threading.RLock()
self.receive_thread = None
self.expect_failures = []
self._port_open()
self.start_receive()
def __str__(self):
return 'DUT({}: {})'.format(self.name, str(self.port))
def _save_expect_failure(self, pattern, data, start_time):
"""
Save expect failure. If the test fails, then it will print the expect failures.
In some cases, user will handle expect exceptions.
The expect failures could be false alarm, and test case might generate a lot of such failures.
Therefore, we don't print the failure immediately and limit the max size of failure list.
"""
self.expect_failures.insert(0, {'pattern': pattern, 'data': data,
'start': start_time, 'end': time.time()})
self.expect_failures = self.expect_failures[:self.MAX_EXPECT_FAILURES_TO_SAVED]
def _save_dut_log(self, data):
"""
Save DUT log into file using another thread.
This is a workaround for some devices takes long time for file system operations.
See descriptions in ``_LogThread`` for details.
"""
self.LOG_THREAD.save_log(self.log_file, data)
# define for methods need to be overwritten by Port
@classmethod
def list_available_ports(cls):
"""
list all available ports.
subclass (port) must overwrite this method.
:return: list of available comports
"""
pass
def _port_open(self):
"""
open the port.
subclass (port) must overwrite this method.
:return: None
"""
pass
def _port_read(self, size=1):
"""
read form port. This method should not blocking for long time, otherwise receive thread can not exit.
subclass (port) must overwrite this method.
:param size: max size to read.
:return: read data.
"""
pass
def _port_write(self, data):
"""
write to port.
subclass (port) must overwrite this method.
:param data: data to write
:return: None
"""
pass
def _port_close(self):
"""
close port.
subclass (port) must overwrite this method.
:return: None
"""
pass
# methods that need to be overwritten by Tool
@classmethod
def confirm_dut(cls, port, **kwargs):
"""
confirm if it's a DUT, usually used by auto detecting DUT in by Env config.
subclass (tool) must overwrite this method.
:param port: comport
:return: tuple of result (bool), and target (str)
"""
pass
def start_app(self):
"""
usually after we got DUT, we need to do some extra works to let App start.
For example, we need to reset->download->reset to let IDF application start on DUT.
subclass (tool) must overwrite this method.
:return: None
"""
pass
# methods that features raw port methods
def start_receive(self):
"""
Start thread to receive data.
:return: None
"""
self.receive_thread = self.RECV_THREAD_CLS(self._port_read, self)
self.receive_thread.start()
def stop_receive(self):
"""
stop the receiving thread for the port
:return: None
"""
if self.receive_thread:
self.receive_thread.exit()
self.LOG_THREAD.flush_data()
self.receive_thread = None
def close(self):
"""
permanently close the port
"""
self.stop_receive()
self._port_close()
@staticmethod
def u_to_bytearray(data):
"""
if data is not bytearray then it tries to convert it
:param data: data which needs to be checked and maybe transformed
"""
if isinstance(data, type(u'')):
try:
data = data.encode('utf-8')
except UnicodeEncodeError as e:
print(u'Cannot encode {} of type {}'.format(data, type(data)))
raise e
return data
def write(self, data, eol='\r\n', flush=True):
"""
:param data: data
:param eol: end of line pattern.
:param flush: if need to flush received data cache before write data.
usually we need to flush data before write,
make sure processing outputs generated by wrote.
:return: None
"""
# do flush before write
if flush:
self.data_cache.flush()
# do write if cache
if data is not None:
self._port_write(self.u_to_bytearray(data) + self.u_to_bytearray(eol) if eol else self.u_to_bytearray(data))
@_expect_lock
def read(self, size=0xFFFFFFFF):
"""
read(size=0xFFFFFFFF)
read raw data. NOT suggested to use this method.
Only use it if expect method doesn't meet your requirement.
:param size: read size. default read all data
:return: read data
"""
data = self.data_cache.get_data(0)[:size]
self.data_cache.flush(size)
return data
def start_capture_raw_data(self, capture_id='default'):
"""
Sometime application want to get DUT raw data and use ``expect`` method at the same time.
Capture methods provides a way to get raw data without affecting ``expect`` or ``read`` method.
If you call ``start_capture_raw_data`` with same capture id again, it will restart capture on this ID.
:param capture_id: ID of capture. You can use different IDs to do different captures at the same time.
"""
with self.record_data_lock:
try:
# if start capture on existed ID, we do flush data and restart capture
self.recorded_data[capture_id].flush()
except KeyError:
# otherwise, create new data cache
self.recorded_data[capture_id] = _DataCache()
def stop_capture_raw_data(self, capture_id='default'):
"""
Stop capture and get raw data.
This method should be used after ``start_capture_raw_data`` on the same capture ID.
:param capture_id: ID of capture.
:return: captured raw data between start capture and stop capture.
"""
with self.record_data_lock:
try:
ret = self.recorded_data[capture_id].get_data()
self.recorded_data.pop(capture_id)
except KeyError as e:
e.message = 'capture_id does not exist. ' \
'You should call start_capture_raw_data with same ID ' \
'before calling stop_capture_raw_data'
raise e
return ret
# expect related methods
@staticmethod
def _expect_str(data, pattern):
"""
protected method. check if string is matched in data cache.
:param data: data to process
:param pattern: string
:return: pattern if match succeed otherwise None
"""
index = data.find(pattern)
if index != -1:
ret = pattern
index += len(pattern)
else:
ret = None
return ret, index
@staticmethod
def _expect_re(data, pattern):
"""
protected method. check if re pattern is matched in data cache
:param data: data to process
:param pattern: compiled RegEx pattern
:return: match groups if match succeed otherwise None
"""
ret = None
if isinstance(pattern.pattern, bytes):
pattern = re.compile(_decode_data(pattern.pattern))
match = pattern.search(data)
if match:
ret = tuple(x for x in match.groups())
index = match.end()
else:
index = -1
return ret, index
EXPECT_METHOD = [
[type(re.compile('')), '_expect_re'],
[type(b''), '_expect_str'], # Python 2 & 3 hook to work without 'from builtins import str' from future
[type(u''), '_expect_str'],
]
def _get_expect_method(self, pattern):
"""
protected method. get expect method according to pattern type.
:param pattern: expect pattern, string or compiled RegEx
:return: ``_expect_str`` or ``_expect_re``
"""
for expect_method in self.EXPECT_METHOD:
if isinstance(pattern, expect_method[0]):
method = expect_method[1]
break
else:
raise UnsupportedExpectItem()
return self.__getattribute__(method)
@_expect_lock
def expect(self, pattern, timeout=DEFAULT_EXPECT_TIMEOUT, full_stdout=False):
"""
expect(pattern, timeout=DEFAULT_EXPECT_TIMEOUT)
expect received data on DUT match the pattern. will raise exception when expect timeout.
:raise ExpectTimeout: failed to find the pattern before timeout
:raise UnsupportedExpectItem: pattern is not string or compiled RegEx
:param pattern: string or compiled RegEx(string pattern)
:param timeout: timeout for expect
:param full_stdout: return full stdout until meet expect string/pattern or just matched string
:return: string if pattern is string; matched groups if pattern is RegEx
"""
method = self._get_expect_method(pattern)
stdout = ''
# non-blocking get data for first time
data = self.data_cache.get_data(0)
start_time = time.time()
while True:
ret, index = method(data, pattern)
if ret is not None:
stdout = data[:index]
self.data_cache.flush(index)
break
time_remaining = start_time + timeout - time.time()
if time_remaining < 0:
break
# wait for new data from cache
data = self.data_cache.get_data(time_remaining)
if ret is None:
pattern = _pattern_to_string(pattern)
self._save_expect_failure(pattern, data, start_time)
raise ExpectTimeout(self.name + ': ' + pattern)
return stdout if full_stdout else ret
def _expect_multi(self, expect_all, expect_item_list, timeout):
"""
protected method. internal logical for expect multi.
:param expect_all: True or False, expect all items in the list or any in the list
:param expect_item_list: expect item list
:param timeout: timeout
:return: None
"""
def process_expected_item(item_raw):
# convert item raw data to standard dict
item = {
'pattern': item_raw[0] if isinstance(item_raw, tuple) else item_raw,
'method': self._get_expect_method(item_raw[0] if isinstance(item_raw, tuple)
else item_raw),
'callback': item_raw[1] if isinstance(item_raw, tuple) else None,
'index': -1,
'ret': None,
}
return item
expect_items = [process_expected_item(x) for x in expect_item_list]
# non-blocking get data for first time
data = self.data_cache.get_data(0)
start_time = time.time()
matched_expect_items = list()
while True:
for expect_item in expect_items:
if expect_item not in matched_expect_items:
# exclude those already matched
expect_item['ret'], expect_item['index'] = \
expect_item['method'](data, expect_item['pattern'])
if expect_item['ret'] is not None:
# match succeed for one item
matched_expect_items.append(expect_item)
# if expect all, then all items need to be matched,
# else only one item need to matched
if expect_all:
match_succeed = len(matched_expect_items) == len(expect_items)
else:
match_succeed = True if matched_expect_items else False
time_remaining = start_time + timeout - time.time()
if time_remaining < 0 or match_succeed:
break
else:
data = self.data_cache.get_data(time_remaining)
if match_succeed:
# sort matched items according to order of appearance in the input data,
# so that the callbacks are invoked in correct order
matched_expect_items = sorted(matched_expect_items, key=lambda it: it['index'])
# invoke callbacks and flush matched data cache
slice_index = -1
for expect_item in matched_expect_items:
# trigger callback
if expect_item['callback']:
expect_item['callback'](expect_item['ret'])
slice_index = max(slice_index, expect_item['index'])
# flush already matched data
self.data_cache.flush(slice_index)
else:
pattern = str([_pattern_to_string(x['pattern']) for x in expect_items])
self._save_expect_failure(pattern, data, start_time)
raise ExpectTimeout(self.name + ': ' + pattern)
@_expect_lock
def expect_any(self, *expect_items, **timeout):
"""
expect_any(*expect_items, timeout=DEFAULT_TIMEOUT)
expect any of the patterns.
will call callback (if provided) if pattern match succeed and then return.
will pass match result to the callback.
:raise ExpectTimeout: failed to match any one of the expect items before timeout
:raise UnsupportedExpectItem: pattern in expect_item is not string or compiled RegEx
:arg expect_items: one or more expect items.
string, compiled RegEx pattern or (string or RegEx(string pattern), callback)
:keyword timeout: timeout for expect
:return: None
"""
# to be compatible with python2
# in python3 we can write f(self, *expect_items, timeout=DEFAULT_TIMEOUT)
if 'timeout' not in timeout:
timeout['timeout'] = self.DEFAULT_EXPECT_TIMEOUT
return self._expect_multi(False, expect_items, **timeout)
@_expect_lock
def expect_all(self, *expect_items, **timeout):
"""
expect_all(*expect_items, timeout=DEFAULT_TIMEOUT)
expect all of the patterns.
will call callback (if provided) if all pattern match succeed and then return.
will pass match result to the callback.
:raise ExpectTimeout: failed to match all of the expect items before timeout
:raise UnsupportedExpectItem: pattern in expect_item is not string or compiled RegEx
:arg expect_items: one or more expect items.
string, compiled RegEx pattern or (string or RegEx(string pattern), callback)
:keyword timeout: timeout for expect
:return: None
"""
# to be compatible with python2
# in python3 we can write f(self, *expect_items, timeout=DEFAULT_TIMEOUT)
if 'timeout' not in timeout:
timeout['timeout'] = self.DEFAULT_EXPECT_TIMEOUT
return self._expect_multi(True, expect_items, **timeout)
@staticmethod
def _format_ts(ts):
return '{}:{}'.format(time.strftime('%m-%d %H:%M:%S', time.localtime(ts)), str(ts % 1)[2:5])
def print_debug_info(self):
"""
Print debug info of current DUT. Currently we will print debug info for expect failures.
"""
Utility.console_log('DUT debug info for DUT: {}:'.format(self.name), color='orange')
for failure in self.expect_failures:
Utility.console_log(u'\t[pattern]: {}\r\n\t[data]: {}\r\n\t[time]: {} - {}\r\n'
.format(failure['pattern'], failure['data'],
self._format_ts(failure['start']), self._format_ts(failure['end'])),
color='orange')
class SerialDUT(BaseDUT):
""" serial with logging received data feature """
DEFAULT_UART_CONFIG = {
'baudrate': 115200,
'bytesize': serial.EIGHTBITS,
'parity': serial.PARITY_NONE,
'stopbits': serial.STOPBITS_ONE,
'timeout': 0.05,
'xonxoff': False,
'rtscts': False,
}
def __init__(self, name, port, log_file, app, **kwargs):
self.port_inst = None
self.serial_configs = self.DEFAULT_UART_CONFIG.copy()
for uart_config_name in self.serial_configs.keys():
if uart_config_name in kwargs:
self.serial_configs[uart_config_name] = kwargs[uart_config_name]
super(SerialDUT, self).__init__(name, port, log_file, app, **kwargs)
def _format_data(self, data):
"""
format data for logging. do decode and add timestamp.
:param data: raw data from read
:return: formatted data (str)
"""
timestamp = '[{}]'.format(self._format_ts(time.time()))
formatted_data = timestamp.encode() + b'\r\n' + data + b'\r\n'
return formatted_data
def _port_open(self):
self.port_inst = serial.serial_for_url(self.port, **self.serial_configs)
def _port_close(self):
self.port_inst.close()
def _port_read(self, size=1):
data = self.port_inst.read(size)
if data:
self._save_dut_log(self._format_data(data))
return data
def _port_write(self, data):
if isinstance(data, str):
data = data.encode()
self.port_inst.write(data)
@classmethod
def list_available_ports(cls):
return [x.device for x in list_ports.comports()]

View File

@ -1,193 +0,0 @@
# SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
""" Test Env, manages DUT, App and EnvConfig, interface for test cases to access these components """
import functools
import os
import threading
import traceback
import netifaces
from . import EnvConfig
def _synced(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
with self.lock:
ret = func(self, *args, **kwargs)
return ret
decorator.__doc__ = func.__doc__
return decorator
class Env(object):
"""
test env, manages DUTs and env configs.
:keyword app: class for default application
:keyword dut: class for default DUT
:keyword env_tag: test env tag, used to select configs from env config file
:keyword env_config_file: test env config file path
:keyword test_name: test suite name, used when generate log folder name
"""
CURRENT_LOG_FOLDER = ''
def __init__(self,
app=None,
dut=None,
env_tag=None,
env_config_file=None,
test_suite_name=None,
**kwargs):
self.app_cls = app
self.default_dut_cls = dut
self.config = EnvConfig.Config(env_config_file, env_tag)
self.log_path = self.app_cls.get_log_folder(test_suite_name)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
Env.CURRENT_LOG_FOLDER = self.log_path
self.allocated_duts = dict()
self.lock = threading.RLock()
@_synced
def get_dut(self, dut_name, app_path, dut_class=None, app_class=None, app_config_name=None, **dut_init_args):
"""
get_dut(dut_name, app_path, dut_class=None, app_class=None)
:param dut_name: user defined name for DUT
:param app_path: application path, app instance will use this path to process application info
:param dut_class: dut class, if not specified will use default dut class of env
:param app_class: app class, if not specified will use default app of env
:param app_config_name: app build config
:keyword dut_init_args: extra kwargs used when creating DUT instance
:return: dut instance
"""
if dut_name in self.allocated_duts:
dut = self.allocated_duts[dut_name]['dut']
else:
if dut_class is None:
dut_class = self.default_dut_cls
if app_class is None:
app_class = self.app_cls
app_target = dut_class.TARGET
detected_target = None
try:
port = self.config.get_variable(dut_name)
if not app_target:
result, detected_target = dut_class.confirm_dut(port)
except ValueError:
# try to auto detect ports
allocated_ports = [self.allocated_duts[x]['port'] for x in self.allocated_duts]
available_ports = dut_class.list_available_ports()
for port in available_ports:
if port not in allocated_ports:
result, detected_target = dut_class.confirm_dut(port)
if result:
break
else:
port = None
if not app_target:
app_target = detected_target
if not app_target:
raise ValueError("DUT class doesn't specify the target, and autodetection failed")
app_inst = app_class(app_path, app_config_name, app_target)
if port:
try:
dut_config = self.get_variable(dut_name + '_port_config')
except ValueError:
dut_config = dict()
dut_config.update(dut_init_args)
dut = dut_class(dut_name, port,
os.path.join(self.log_path, dut_name + '.txt'),
app_inst,
**dut_config)
self.allocated_duts[dut_name] = {'port': port, 'dut': dut}
else:
raise ValueError('Failed to get DUT')
return dut
@_synced
def close_dut(self, dut_name):
"""
close_dut(dut_name)
close one DUT by name if DUT name is valid (the name used by ``get_dut``). otherwise will do nothing.
:param dut_name: user defined name for DUT
:return: None
"""
try:
dut = self.allocated_duts.pop(dut_name)['dut']
dut.close()
except KeyError:
pass
@_synced
def get_variable(self, variable_name):
"""
get_variable(variable_name)
get variable from config file. If failed then try to auto-detected it.
:param variable_name: name of the variable
:return: value of variable if successfully found. otherwise None.
"""
return self.config.get_variable(variable_name)
PROTO_MAP = {
'ipv4': netifaces.AF_INET,
'ipv6': netifaces.AF_INET6,
'mac': netifaces.AF_LINK,
}
@_synced
def get_pc_nic_info(self, nic_name='pc_nic', proto='ipv4'):
"""
get_pc_nic_info(nic_name="pc_nic")
try to get info of a specified NIC and protocol.
:param nic_name: pc nic name. allows passing variable name, nic name value.
:param proto: "ipv4", "ipv6" or "mac"
:return: a dict of nic info if successfully found. otherwise None.
nic info keys could be different for different protocols.
key "addr" is available for both mac, ipv4 and ipv6 pic info.
"""
interfaces = netifaces.interfaces()
if nic_name in interfaces:
# the name is in the interface list, we regard it as NIC name
if_addr = netifaces.ifaddresses(nic_name)
else:
# it's not in interface name list, we assume it's variable name
_nic_name = self.get_variable(nic_name)
if_addr = netifaces.ifaddresses(_nic_name)
return if_addr[self.PROTO_MAP[proto]][0]
@_synced
def close(self, dut_debug=False):
"""
close()
close all DUTs of the Env.
:param dut_debug: if dut_debug is True, then print all dut expect failures before close it
:return: exceptions during close DUT
"""
dut_close_errors = []
for dut_name in self.allocated_duts:
dut = self.allocated_duts[dut_name]['dut']
try:
if dut_debug:
dut.print_debug_info()
dut.close()
except Exception as e:
traceback.print_exc()
dut_close_errors.append(e)
self.allocated_duts = dict()
return dut_close_errors

View File

@ -1,63 +0,0 @@
# SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
The test env could change when we running test from different computers.
Test env config provide ``get_variable`` method to allow user get test environment related variables.
It will first try to get variable from config file.
If failed, then it will try to auto detect (Not supported yet).
Config file format is yaml. it's a set of key-value pair. The following is an example of config file::
Example_WIFI:
ap_ssid: "myssid"
ap_password: "mypassword"
Example_ShieldBox:
attenuator_port: "/dev/ttyUSB2"
ap_ssid: "myssid"
ap_password: "mypassword"
It will first define the env tag for each environment, then add its key-value pairs.
This will prevent test cases from getting configs from other env when there're configs for multiple env in one file.
"""
import yaml
from yaml import Loader as Loader
class Config(object):
""" Test Env Config """
def __init__(self, config_file, env_tag):
self.configs = self.load_config_file(config_file, env_tag)
@staticmethod
def load_config_file(config_file, env_name):
"""
load configs from config file.
:param config_file: config file path
:param env_name: env tag name
:return: configs for the test env
"""
try:
with open(config_file) as f:
configs = yaml.load(f, Loader=Loader)[env_name]
except (OSError, TypeError, IOError, KeyError):
configs = dict()
return configs
def get_variable(self, variable_name):
"""
first try to get from config file. if not found, try to auto detect the variable.
:param variable_name: name of variable
:return: value or None
"""
try:
value = self.configs[variable_name]
except KeyError:
# TODO: to support auto get variable here
value = None
if value is None:
raise ValueError('Failed to get variable')
return value

View File

@ -1,6 +0,0 @@
.external_ap: &external_ap
ap_ssid: "myssid"
ap_password: "mypassword"
Examples_WIFI:
<<: external_ap

View File

@ -1,231 +0,0 @@
# SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
""" Interface for test cases. """
import functools
import os
import socket
import time
from datetime import datetime
import junit_xml
from . import DUT, App, Env, Utility
from .Utility import format_case_id
class TestCaseFailed(AssertionError):
def __init__(self, *cases):
"""
Raise this exception if one or more test cases fail in a 'normal' way (ie the test runs but fails, no unexpected exceptions)
This will avoid dumping the Python stack trace, because the assumption is the junit error info and full job log already has
enough information for a developer to debug.
'cases' argument is the names of one or more test cases
"""
message = 'Test case{} failed: {}'.format('s' if len(cases) > 1 else '', ', '.join(str(c) for c in cases))
super(TestCaseFailed, self).__init__(self, message)
class DefaultEnvConfig(object):
"""
default test configs. There're 3 places to set configs, priority is (high -> low):
1. overwrite set by caller of test method
2. values set by test_method decorator
3. default env config get from this class
"""
DEFAULT_CONFIG = {
'app': App.BaseApp,
'dut': DUT.BaseDUT,
'env_tag': 'default',
'env_config_file': None,
'test_suite_name': None,
}
@classmethod
def set_default_config(cls, **kwargs):
"""
:param kwargs: configs need to be updated
:return: None
"""
cls.DEFAULT_CONFIG.update(kwargs)
@classmethod
def get_default_config(cls):
"""
:return: current default config
"""
return cls.DEFAULT_CONFIG.copy()
set_default_config = DefaultEnvConfig.set_default_config
get_default_config = DefaultEnvConfig.get_default_config
MANDATORY_INFO = {
'execution_time': 1,
'env_tag': 'default',
'category': 'function',
'ignore': False,
}
class JunitReport(object):
# wrapper for junit test report
# TODO: JunitReport methods are not thread safe (although not likely to be used this way).
JUNIT_FILE_NAME = 'XUNIT_RESULT.xml'
JUNIT_DEFAULT_TEST_SUITE = 'test-suite'
JUNIT_TEST_SUITE = junit_xml.TestSuite(JUNIT_DEFAULT_TEST_SUITE,
hostname=socket.gethostname(),
timestamp=datetime.utcnow().isoformat())
JUNIT_CURRENT_TEST_CASE = None
_TEST_CASE_CREATED_TS = 0
@classmethod
def output_report(cls, junit_file_path):
""" Output current test result to file. """
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), 'w') as f:
junit_xml.to_xml_report_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
@classmethod
def get_current_test_case(cls):
"""
By default, the test framework will handle junit test report automatically.
While some test case might want to update some info to test report.
They can use this method to get current test case created by test framework.
:return: current junit test case instance created by ``JunitTestReport.create_test_case``
"""
return cls.JUNIT_CURRENT_TEST_CASE
@classmethod
def test_case_finish(cls, test_case):
"""
Append the test case to test suite so it can be output to file.
Execution time will be automatically updated (compared to ``create_test_case``).
"""
test_case.elapsed_sec = time.time() - cls._TEST_CASE_CREATED_TS
cls.JUNIT_TEST_SUITE.test_cases.append(test_case)
@classmethod
def create_test_case(cls, name):
"""
Extend ``junit_xml.TestCase`` with:
1. save create test case so it can be get by ``get_current_test_case``
2. log create timestamp, so ``elapsed_sec`` can be auto updated in ``test_case_finish``.
:param name: test case name
:return: instance of ``junit_xml.TestCase``
"""
# set stdout to empty string, so we can always append string to stdout.
# It won't affect output logic. If stdout is empty, it won't be put to report.
test_case = junit_xml.TestCase(name, stdout='')
cls.JUNIT_CURRENT_TEST_CASE = test_case
cls._TEST_CASE_CREATED_TS = time.time()
return test_case
@classmethod
def update_performance(cls, performance_items):
"""
Update performance results to ``stdout`` of current test case.
:param performance_items: a list of performance items. each performance item is a key-value pair.
"""
assert cls.JUNIT_CURRENT_TEST_CASE
for item in performance_items:
cls.JUNIT_CURRENT_TEST_CASE.stdout += '[Performance][{}]: {}\n'.format(item[0], item[1])
def test_method(**kwargs):
"""
decorator for test case function.
The following keyword arguments are pre-defined.
Any other keyword arguments will be regarded as filter for the test case,
able to access them by ``case_info`` attribute of test method.
:keyword app: class for test app. see :doc:`App <App>` for details
:keyword dut: class for current dut. see :doc:`DUT <DUT>` for details
:keyword env_tag: name for test environment, used to select configs from config file
:keyword env_config_file: test env config file. usually will not set this keyword when define case
:keyword test_suite_name: test suite name, used for generating log folder name and adding xunit format test result.
usually will not set this keyword when define case
:keyword junit_report_by_case: By default the test fw will handle junit report generation.
In some cases, one test function might test many test cases.
If this flag is set, test case can update junit report by its own.
"""
def test(test_func):
case_info = MANDATORY_INFO.copy()
case_info['name'] = case_info['ID'] = test_func.__name__
case_info['junit_report_by_case'] = False
case_info.update(kwargs)
@functools.wraps(test_func)
def handle_test(extra_data=None, **overwrite):
"""
create env, run test and record test results
:param extra_data: extra data that runner or main passed to test case
:param overwrite: args that runner or main want to overwrite
:return: None
"""
# create env instance
env_config = DefaultEnvConfig.get_default_config()
for key in kwargs:
if key in env_config:
env_config[key] = kwargs[key]
env_config.update(overwrite)
env_inst = Env.Env(**env_config)
# prepare for xunit test results
junit_file_path = env_inst.app_cls.get_log_folder(env_config['test_suite_name'])
junit_test_case = JunitReport.create_test_case(format_case_id(case_info['ID'],
target=env_inst.default_dut_cls.TARGET))
result = False
unexpected_error = False
try:
Utility.console_log('starting running test: ' + test_func.__name__, color='green')
# execute test function
test_func(env_inst, extra_data)
# if finish without exception, test result is True
result = True
except TestCaseFailed as e:
junit_test_case.add_failure_info(str(e))
except Exception as e:
Utility.handle_unexpected_exception(junit_test_case, e)
unexpected_error = True
finally:
# do close all DUTs, if result is False then print DUT debug info
close_errors = env_inst.close(dut_debug=(not result))
# We have a hook in DUT close, allow DUT to raise error to fail test case.
# For example, we don't allow DUT exception (reset) during test execution.
# We don't want to implement in exception detection in test function logic,
# as we need to add it to every test case.
# We can implement it in DUT receive thread,
# and raise exception in DUT close to fail test case if reset detected.
if close_errors:
for error in close_errors:
junit_test_case.add_failure_info('env close error: {}'.format(error))
result = False
if not case_info['junit_report_by_case'] or unexpected_error:
JunitReport.test_case_finish(junit_test_case)
# end case and output result
JunitReport.output_report(junit_file_path)
if result:
Utility.console_log('Test Succeed: ' + test_func.__name__, color='green')
else:
Utility.console_log(('Test Fail: ' + test_func.__name__), color='red')
return result
handle_test.case_info = case_info
handle_test.test_method = True
return handle_test
return test

View File

@ -1,332 +0,0 @@
# SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
Common logic to assign test cases to CI jobs.
Some background knowledge about Gitlab CI and use flow in esp-idf:
* Gitlab CI jobs are static in ``.gitlab-ci.yml``. We can't dynamically create test jobs
* For test job running on DUT, we use ``tags`` to select runners with different test environment
* We have ``assign_test`` stage, will collect cases, and then assign them to correct test jobs
* ``assign_test`` will fail if failed to assign any cases
* with ``assign_test``, we can:
* dynamically filter test case we want to test
* alert user if they forget to add CI jobs and guide how to add test jobs
* the last step of ``assign_test`` is to output config files, then test jobs will run these cases
The Basic logic to assign test cases is as follow:
1. do search all the cases
2. do filter case (if filter is specified by @bot)
3. put cases to different groups according to rule of ``Group``
* try to put them in existed groups
* if failed then create a new group and add this case
4. parse and filter the test jobs from CI config file
5. try to assign all groups to jobs according to tags
6. output config files for jobs
"""
import json
import os
import re
import yaml
try:
from yaml import CLoader
has_cloader = True
except ImportError:
has_cloader = False
from yaml import Loader
from . import CaseConfig, GitlabCIJob, SearchCases, console_log
class Group(object):
MAX_EXECUTION_TIME = 30
MAX_CASE = 15
SORT_KEYS = ['env_tag']
# Matching CI job rules could be different from the way we want to group test cases.
# For example, when assign unit test cases, different test cases need to use different test functions.
# We need to put them into different groups.
# But these groups can be assigned to jobs with same tags, as they use the same test environment.
CI_JOB_MATCH_KEYS = SORT_KEYS
def __init__(self, case):
self.execution_time = 0
self.case_list = [case]
self.filters = dict(zip(self.SORT_KEYS, [self._get_case_attr(case, x) for x in self.SORT_KEYS]))
# we use ci_job_match_keys to match CI job tags. It's a set of required tags.
self.ci_job_match_keys = self._get_match_keys(case)
@staticmethod
def _get_case_attr(case, attr):
# we might use different type for case (dict or test_func)
# this method will do get attribute form cases
return case.case_info[attr]
def _get_match_keys(self, case):
keys = []
for attr in self.CI_JOB_MATCH_KEYS:
val = self._get_case_attr(case, attr)
if isinstance(val, list):
keys.extend(val)
else:
keys.append(val)
return set(keys)
def accept_new_case(self):
"""
check if allowed to add any case to this group
:return: True or False
"""
max_time = (sum([self._get_case_attr(x, 'execution_time') for x in self.case_list])
< self.MAX_EXECUTION_TIME)
max_case = (len(self.case_list) < self.MAX_CASE)
return max_time and max_case
def add_case(self, case):
"""
add case to current group
:param case: test case
:return: True if add succeed, else False
"""
added = False
if self.accept_new_case():
for key in self.filters:
if self._get_case_attr(case, key) != self.filters[key]:
break
else:
self.case_list.append(case)
added = True
return added
def add_extra_case(self, case):
"""
By default (``add_case`` method), cases will only be added when have equal values of all filters with group.
But in some cases, we also want to add cases which are not best fit.
For example, one group has can run cases require (A, B). It can also accept cases require (A, ) and (B, ).
When assign failed by best fit, we will use this method to try if we can assign all failed cases.
If subclass want to retry, they need to overwrite this method.
Logic can be applied to handle such scenario could be different for different cases.
:return: True if accepted else False
"""
pass
def output(self):
"""
output data for job configs
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
output_data = {
'Filter': self.filters,
'CaseConfig': [{'name': self._get_case_attr(x, 'name')} for x in self.case_list],
}
return output_data
class AssignTest(object):
"""
Auto assign tests to CI jobs.
:param test_case_paths: path of test case file(s)
:param ci_config_file: path of ``.gitlab-ci.yml``
"""
# subclass need to rewrite CI test job pattern, to filter all test jobs
CI_TEST_JOB_PATTERN = re.compile(r'^test_.+')
# by default we only run function in CI, as other tests could take long time
DEFAULT_FILTER = {
'category': 'function',
'ignore': False,
'supported_in_ci': True,
}
def __init__(self, test_case_paths, ci_config_file, case_group=Group):
self.test_case_paths = test_case_paths
self.test_case_file_pattern = None
self.test_cases = []
self.jobs = self._parse_gitlab_ci_config(ci_config_file)
self.case_group = case_group
@staticmethod
def _handle_parallel_attribute(job_name, job):
jobs_out = []
try:
for i in range(job['parallel']):
jobs_out.append(GitlabCIJob.Job(job, job_name + '_{}'.format(i + 1)))
except KeyError:
# Gitlab don't allow to set parallel to 1.
# to make test job name same ($CI_JOB_NAME_$CI_NODE_INDEX),
# we append "_" to jobs don't have parallel attribute
jobs_out.append(GitlabCIJob.Job(job, job_name + '_'))
return jobs_out
def _parse_gitlab_ci_config(self, ci_config_file):
with open(ci_config_file, 'r') as f:
ci_config = yaml.load(f, Loader=CLoader if has_cloader else Loader)
job_list = list()
for job_name in ci_config:
if 'pytest' in job_name:
continue
if self.CI_TEST_JOB_PATTERN.search(job_name) is not None:
job_list.extend(self._handle_parallel_attribute(job_name, ci_config[job_name]))
job_list.sort(key=lambda x: x['name'])
return job_list
def search_cases(self, case_filter=None):
"""
:param case_filter: filter for test cases. the filter to use is default filter updated with case_filter param.
:return: filtered test case list
"""
_case_filter = self.DEFAULT_FILTER.copy()
if case_filter:
_case_filter.update(case_filter)
test_methods = SearchCases.Search.search_test_cases(self.test_case_paths, self.test_case_file_pattern)
return CaseConfig.filter_test_cases(test_methods, _case_filter)
def _group_cases(self):
"""
separate all cases into groups according group rules. each group will be executed by one CI job.
:return: test case groups.
"""
groups = []
for case in self.test_cases:
for group in groups:
# add to current group
if group.add_case(case):
break
else:
# create new group
groups.append(self.case_group(case))
return groups
def _assign_failed_cases(self, assigned_groups, failed_groups):
""" try to assign failed cases to already assigned test groups """
still_failed_groups = []
failed_cases = []
for group in failed_groups:
failed_cases.extend(group.case_list)
for case in failed_cases:
# first try to assign to already assigned groups
for group in assigned_groups:
if group.add_extra_case(case):
break
else:
# if failed, group the failed cases
for group in still_failed_groups:
if group.add_case(case):
break
else:
still_failed_groups.append(self.case_group(case))
return still_failed_groups
@staticmethod
def _apply_bot_filter():
"""
we support customize CI test with bot.
here we process from and return the filter which ``_search_cases`` accepts.
:return: filter for search test cases
"""
res = dict()
for bot_filter in [os.getenv('BOT_CASE_FILTER'), os.getenv('BOT_TARGET_FILTER')]:
if bot_filter:
res.update(json.loads(bot_filter))
return res
def _apply_bot_test_count(self):
"""
Bot could also pass test count.
If filtered cases need to be tested for several times, then we do duplicate them here.
"""
test_count = os.getenv('BOT_TEST_COUNT')
if test_count:
test_count = int(test_count)
self.test_cases *= test_count
@staticmethod
def _count_groups_by_keys(test_groups):
"""
Count the number of test groups by job match keys.
It's an important information to update CI config file.
"""
group_count = dict()
for group in test_groups:
key = ','.join(group.ci_job_match_keys)
try:
group_count[key] += 1
except KeyError:
group_count[key] = 1
return group_count
def assign_cases(self):
"""
separate test cases to groups and assign test cases to CI jobs.
:raise AssertError: if failed to assign any case to CI job.
:return: None
"""
failed_to_assign = []
assigned_groups = []
case_filter = self._apply_bot_filter()
self.test_cases = self.search_cases(case_filter)
self._apply_bot_test_count()
test_groups = self._group_cases()
for group in test_groups:
for job in self.jobs:
if job.match_group(group):
job.assign_group(group)
assigned_groups.append(group)
break
else:
failed_to_assign.append(group)
if failed_to_assign:
failed_to_assign = self._assign_failed_cases(assigned_groups, failed_to_assign)
# print debug info
# total requirement of current pipeline
required_group_count = self._count_groups_by_keys(test_groups)
console_log('Required job count by tags:')
for tags in required_group_count:
console_log('\t{}: {}'.format(tags, required_group_count[tags]))
# number of unused jobs
not_used_jobs = [job for job in self.jobs if 'case group' not in job]
if not_used_jobs:
console_log('{} jobs not used. Please check if you define too much jobs'.format(len(not_used_jobs)), 'O')
for job in not_used_jobs:
console_log('\t{}'.format(job['name']), 'O')
# failures
if failed_to_assign:
console_log('Too many test cases vs jobs to run. '
'Please increase parallel count in .gitlab/ci/target-test.yml '
'for jobs with specific tags:', 'R')
failed_group_count = self._count_groups_by_keys(failed_to_assign)
for tags in failed_group_count:
console_log('\t{}: {}'.format(tags, failed_group_count[tags]), 'R')
raise RuntimeError('Failed to assign test case to CI jobs')
def output_configs(self, output_path):
"""
:param output_path: path to output config files for each CI job
:return: None
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
for job in self.jobs:
job.output_config(output_path)

View File

@ -1,246 +0,0 @@
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processing case config files.
This is mainly designed for CI, we need to auto create and assign test jobs.
Template Config File::
TestConfig:
app:
package: ttfw_idf
class: Example
dut:
path:
class:
config_file: /somewhere/config_file_for_runner
test_name: CI_test_job_1
Filter:
chip: ESP32
env_tag: default
CaseConfig:
- name: test_examples_protocol_https_request
# optional
extra_data: some extra data passed to case with kwarg extra_data
overwrite: # overwrite test configs
app:
package: ttfw_idf
class: Example
- name: xxx
"""
import importlib
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader as Loader
from . import TestCase
def _convert_to_lower_case_bytes(item):
"""
bot filter is always lower case string.
this function will convert to all string to lower case.
Note: Unicode strings are converted to bytes.
"""
if isinstance(item, (tuple, list)):
output = [_convert_to_lower_case_bytes(v) for v in item]
elif isinstance(item, type(b'')):
output = item.lower()
elif isinstance(item, type(u'')):
output = item.encode().lower()
else:
output = item
return output
def _filter_one_case(test_method, case_filter):
""" Apply filter for one case (the filter logic is the same as described in ``filter_test_cases``) """
filter_result = True
# filter keys are lower case. Do map lower case keys with original keys.
key_mapping = {x.lower(): x for x in test_method.case_info.keys()}
for orig_key in case_filter:
key = key_mapping[orig_key]
if key in test_method.case_info:
# the filter key is both in case and filter
# we need to check if they match
filter_item = _convert_to_lower_case_bytes(case_filter[orig_key])
accepted_item = _convert_to_lower_case_bytes(test_method.case_info[key])
if isinstance(filter_item, (tuple, list)) \
and isinstance(accepted_item, (tuple, list)):
# both list/tuple, check if they have common item
filter_result = True if set(filter_item) & set(accepted_item) else False
elif isinstance(filter_item, (tuple, list)):
# filter item list/tuple, check if case accepted value in filter item list/tuple
filter_result = True if accepted_item in filter_item else False
elif isinstance(accepted_item, (tuple, list)):
# accepted item list/tuple, check if case filter value is in accept item list/tuple
filter_result = True if filter_item in accepted_item else False
else:
if type(filter_item) != type(accepted_item):
# This will catch silent ignores of test cases when Unicode and bytes are compared
raise AssertionError(filter_item, '!=', accepted_item)
# both string/int, just do string compare
filter_result = (filter_item == accepted_item)
else:
# key in filter only, which means the case supports all values for this filter key, match succeed
pass
if not filter_result:
# match failed
break
return filter_result
def filter_test_cases(test_methods, case_filter):
"""
filter test case. filter logic:
1. if filter key both in case attribute and filter:
* if both value is string/int, then directly compare
* if one is list/tuple, the other one is string/int, then check if string/int is in list/tuple
* if both are list/tuple, then check if they have common item
2. if only case attribute or filter have the key, filter succeed
3. will do case insensitive compare for string
for example, the following are match succeed scenarios
(the rule is symmetric, result is same if exchange values for user filter and case attribute):
* user case filter is ``chip: ["esp32", "esp32c"]``, case doesn't have ``chip`` attribute
* user case filter is ``chip: ["esp32", "esp32c"]``, case attribute is ``chip: "esp32"``
* user case filter is ``chip: "esp32"``, case attribute is ``chip: "esp32"``
:param test_methods: a list of test methods functions
:param case_filter: case filter
:return: filtered test methods
"""
filtered_test_methods = []
for test_method in test_methods:
if _filter_one_case(test_method, case_filter):
filtered_test_methods.append(test_method)
return filtered_test_methods
class Parser(object):
DEFAULT_CONFIG = {
'TestConfig': dict(),
'Filter': dict(),
'CaseConfig': [{'extra_data': None}],
}
@classmethod
def parse_config_file(cls, config_file):
"""
parse from config file and then update to default config.
:param config_file: config file path
:return: configs
"""
configs = cls.DEFAULT_CONFIG.copy()
if config_file:
with open(config_file, 'r') as f:
configs.update(yaml.load(f, Loader=Loader))
return configs
@classmethod
def handle_overwrite_args(cls, overwrite):
"""
handle overwrite configs. import module from path and then get the required class.
:param overwrite: overwrite args
:return: dict of (original key: class)
"""
output = dict()
for key in overwrite:
module = importlib.import_module(overwrite[key]['package'])
output[key] = module.__getattribute__(overwrite[key]['class'])
return output
@classmethod
def apply_config(cls, test_methods, config_file):
"""
apply config for test methods
:param test_methods: a list of test methods functions
:param config_file: case filter file
:return: filtered cases
"""
configs = cls.parse_config_file(config_file)
test_case_list = []
for _config in configs['CaseConfig']:
_filter = configs['Filter'].copy()
_overwrite = cls.handle_overwrite_args(_config.pop('overwrite', dict()))
_extra_data = _config.pop('extra_data', None)
_filter.update(_config)
# Try get target from yml
try:
_target = _filter['target']
except KeyError:
_target = None
else:
_overwrite.update({'target': _target})
for test_method in test_methods:
if _filter_one_case(test_method, _filter):
try:
dut_dict = test_method.case_info['dut_dict']
except (AttributeError, KeyError):
dut_dict = None
if dut_dict and _target:
dut = test_method.case_info.get('dut')
if _target.upper() in dut_dict:
if dut and dut in dut_dict.values(): # don't overwrite special cases
_overwrite.update({'dut': dut_dict[_target.upper()]})
else:
raise ValueError('target {} is not in the specified dut_dict'.format(_target))
test_case_list.append(TestCase.TestCase(test_method, _extra_data, **_overwrite))
return test_case_list
class Generator(object):
""" Case config file generator """
def __init__(self):
self.default_config = {
'TestConfig': dict(),
'Filter': dict(),
}
def set_default_configs(self, test_config, case_filter):
"""
:param test_config: "TestConfig" value
:param case_filter: "Filter" value
:return: None
"""
self.default_config = {'TestConfig': test_config, 'Filter': case_filter}
def generate_config(self, case_configs, output_file):
"""
:param case_configs: "CaseConfig" value
:param output_file: output file path
:return: None
"""
config = self.default_config.copy()
config.update({'CaseConfig': case_configs})
with open(output_file, 'w') as f:
yaml.dump(config, f)

View File

@ -1,65 +0,0 @@
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
class Job(dict):
"""
Gitlab CI job
:param job: job data loaded from .gitlab-ci.yml
:param job_name: job name
"""
def __init__(self, job, job_name):
super(Job, self).__init__(job)
self['name'] = job_name
self.tags = set(self['tags'])
def match_group(self, group):
"""
Match group by tags of job.
All filters values of group should be included in tags.
:param group: case group to match
:return: True or False
"""
match_result = False
if 'case group' not in self and group.ci_job_match_keys == self.tags:
# group not assigned and all tags match
match_result = True
return match_result
def assign_group(self, group):
"""
assign a case group to a test job.
:param group: the case group to assign
"""
self['case group'] = group
def output_config(self, file_path):
"""
output test config to the given path.
file name will be job_name.yml
:param file_path: output file path
:return: None
"""
file_name = os.path.join(file_path, self['name'] + '.yml')
if 'case group' in self:
with open(file_name, 'w') as f:
yaml.safe_dump(self['case group'].output(), f, encoding='utf-8', default_flow_style=False)

View File

@ -1,144 +0,0 @@
# SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
""" search test cases from a given file or path """
import copy
import fnmatch
import os
import types
from typing import List
from . import load_source
class Search:
"""
This class is used as a class singleton. all the member functions are `classmethod`
"""
TEST_CASE_FILE_PATTERN = '*_test.py'
SUPPORT_REPLICATE_CASES_KEY = ['target']
# this attribute would be modified while running
missing_import_warnings: List[str] = []
@classmethod
def _search_cases_from_file(cls, file_name):
""" get test cases from test case .py file """
print('Try to get cases from: ' + file_name)
test_functions = []
try:
# search case no need to run the functions
# mock missing modules would help us get the test case function objects
mod = load_source(file_name, mock_missing=True)
for func in [mod.__getattribute__(x) for x in dir(mod)
if isinstance(mod.__getattribute__(x), types.FunctionType)]:
try:
# test method decorator will add test_method attribute to test function
if func.test_method:
test_functions.append(func)
except AttributeError:
continue
except ImportError as e:
warning_str = 'ImportError: \r\n\tFile:' + file_name + '\r\n\tError:' + str(e)
cls.missing_import_warnings.append(warning_str)
test_functions_out = []
for case in test_functions:
test_functions_out += cls.replicate_case(case)
for i, test_function in enumerate(test_functions_out):
print('\t{}. {} <{}>'.format(i + 1, test_function.case_info['name'], test_function.case_info['target']))
test_function.case_info['app_dir'] = os.path.dirname(file_name)
test_function.case_info['script_path'] = file_name
return test_functions_out
@classmethod
def _search_test_case_files(cls, test_case, file_pattern):
""" search all test case files recursively of a path """
if not os.path.exists(test_case):
raise OSError(f'test case path "{test_case}" not exist')
if os.path.isdir(test_case):
test_case_files = []
for root, _, file_names in os.walk(test_case):
for filename in fnmatch.filter(file_names, file_pattern):
test_case_files.append(os.path.join(root, filename))
else:
test_case_files = [test_case]
return test_case_files
@classmethod
def replicate_case(cls, case):
"""
Replicate cases according to its filter values.
If one case has specified filter chip=(ESP32, ESP32C),
it will create 2 cases, one for ESP32 and on for ESP32C.
Once the cases are replicated, it's easy to filter those we want to execute.
:param case: the original case
:return: a list of replicated cases
"""
replicate_config = []
for key in case.case_info:
if key == 'ci_target': # ci_target is used to filter target, should not be duplicated.
continue
if isinstance(case.case_info[key], (list, tuple)):
replicate_config.append(key)
def _replicate_for_key(cases, replicate_key, replicate_list):
def deepcopy_func(f, name=None):
fn = types.FunctionType(f.__code__, f.__globals__, name if name else f.__name__,
f.__defaults__, f.__closure__)
fn.__dict__.update(copy.deepcopy(f.__dict__))
return fn
case_out = []
for inner_case in cases:
for value in replicate_list:
new_case = deepcopy_func(inner_case)
new_case.case_info[replicate_key] = value
case_out.append(new_case)
return case_out
replicated_cases = [case]
while replicate_config:
if not replicate_config:
break
key = replicate_config.pop()
if key in cls.SUPPORT_REPLICATE_CASES_KEY:
replicated_cases = _replicate_for_key(replicated_cases, key, case.case_info[key])
# mark the cases with targets not in ci_target
for case in replicated_cases:
ci_target = case.case_info['ci_target']
if not ci_target or case.case_info['target'] in ci_target:
case.case_info['supported_in_ci'] = True
else:
case.case_info['supported_in_ci'] = False
return replicated_cases
@classmethod
def search_test_cases(cls, test_case_paths, test_case_file_pattern=None):
"""
search all test cases from a folder or file, and then do case replicate.
:param test_case_paths: test case file(s) paths
:param test_case_file_pattern: unix filename pattern
:return: a list of replicated test methods
"""
if not isinstance(test_case_paths, list):
test_case_paths = [test_case_paths]
test_case_files = []
for path in test_case_paths:
test_case_files.extend(
cls._search_test_case_files(path, test_case_file_pattern or cls.TEST_CASE_FILE_PATTERN))
test_cases = []
for test_case_file in test_case_files:
test_cases += cls._search_cases_from_file(test_case_file)
if cls.missing_import_warnings:
raise ImportError('\n\n'.join(cls.missing_import_warnings))
return test_cases

View File

@ -1,58 +0,0 @@
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader as Loader
class TestCase(object):
"""
Test Case Object, mainly used with runner.
runner can parse all test cases from a given path, set data and config for test case in prepare stage.
TestCase instance will record these data, provide run method to let runner execute test case.
:param test_method: test function
:param extra_data: data passed to test function
:param overwrite_args: kwargs that overwrite original test case configs
"""
DEFAULT_CASE_DOC = dict()
def __init__(self, test_method, extra_data, **overwrite_args):
self.test_method = test_method
self.extra_data = extra_data
self.overwrite_args = overwrite_args
def run(self):
""" execute the test case """
return self.test_method(self.extra_data, **self.overwrite_args)
def document(self):
"""
generate test case document.
parse the case doc with yaml parser and update to original case attributes.
:return: case document, dict of case attributes and values
"""
doc_string = self.test_method.__doc__
try:
doc = yaml.load(doc_string, Loader=Loader)
except (AttributeError, OSError, UnicodeDecodeError):
doc = self.DEFAULT_CASE_DOC
doc.update(self.test_method.env_args)
doc.update(self.test_method.accepted_filter)
return doc

View File

@ -1,134 +0,0 @@
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
from __future__ import print_function
import logging
import os.path
import sys
import time
import traceback
from unittest.mock import MagicMock
from .. import Env
_COLOR_CODES = {
'white': u'\033[0m',
'red': u'\033[31m',
'green': u'\033[32m',
'orange': u'\033[33m',
'blue': u'\033[34m',
'purple': u'\033[35m',
'W': u'\033[0m',
'R': u'\033[31m',
'G': u'\033[32m',
'O': u'\033[33m',
'B': u'\033[34m',
'P': u'\033[35m'
}
def _get_log_file_name():
if Env.Env.CURRENT_LOG_FOLDER:
file_name = os.path.join(Env.Env.CURRENT_LOG_FOLDER, 'console.txt')
else:
raise OSError('env log folder does not exist, will not save to log file')
return file_name
def format_timestamp():
ts = time.time()
return '{}:{}'.format(time.strftime('%m-%d %H:%M:%S', time.localtime(ts)), str(ts % 1)[2:5])
def console_log(data, color='white', end='\n'):
"""
log data to console.
(if not flush console log, Gitlab-CI won't update logs during job execution)
:param data: data content
:param color: color
"""
if color not in _COLOR_CODES:
color = 'white'
color_codes = _COLOR_CODES[color]
if isinstance(data, type(b'')):
data = data.decode('utf-8', 'replace')
print(color_codes + data, end=end)
if color not in ['white', 'W']:
# reset color to white for later logs
print(_COLOR_CODES['white'] + u'\r')
sys.stdout.flush()
log_data = '[{}] '.format(format_timestamp()) + data
try:
log_file = _get_log_file_name()
with open(log_file, 'a+') as f:
f.write(log_data + end)
except OSError:
pass
__LOADED_MODULES = dict() # type: ignore
# we should only load one module once.
# if we load one module twice,
# python will regard the same object loaded in the first time and second time as different objects.
# it will lead to strange errors like `isinstance(object, type_of_this_object)` return False
def load_source(path, mock_missing=False):
"""
Dynamic loading python file. Note that this function SHOULD NOT be used to replace ``import``.
It should only be used when the package path is only available in runtime.
:param path: The path of python file
:param mock_missing: If True, will mock the module if the module is not found.
:return: Loaded object
"""
path = os.path.realpath(path)
# load name need to be unique, otherwise it will update the already loaded module
load_name = str(len(__LOADED_MODULES))
try:
return __LOADED_MODULES[path]
except KeyError:
folder = os.path.dirname(path)
sys.path.append(folder)
from importlib.machinery import SourceFileLoader
try:
ret = SourceFileLoader(load_name, path).load_module()
except ModuleNotFoundError as e:
if not mock_missing:
raise
# mock the missing third-party libs. Don't use it when real-testing
while True:
sys.modules[e.name] = MagicMock()
logging.warning('Mocking python module %s', e.name)
try:
ret = SourceFileLoader(load_name, path).load_module()
break
except ModuleNotFoundError as f:
e = f # another round
except SyntaxError:
# well, let's ignore it... non of our business
return None
finally:
sys.path.remove(folder)
__LOADED_MODULES[path] = ret
return ret
def handle_unexpected_exception(junit_test_case, exception):
"""
Helper to log & add junit result details for an unexpected exception encountered
when running a test case.
Should always be called from inside an except: block
"""
traceback.print_exc()
# AssertionError caused by an 'assert' statement has an empty string as its 'str' form
e_str = str(exception) if str(exception) else repr(exception)
junit_test_case.add_failure_info('Unexpected exception: {}\n{}'.format(e_str, traceback.format_exc()))
def format_case_id(case_name, target='esp32', config='default'):
return '{}.{}.{}'.format(target, config, case_name)

View File

@ -1,124 +0,0 @@
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command line interface to run test cases from a given path.
* search and run test cases of a given path
* config file which support to filter test cases and passing data to test case
Use ``python Runner.py test_case_path -c config_file -e env_config_file`` to run test cases.
"""
import argparse
import os
import sys
import threading
from fnmatch import fnmatch
from tiny_test_fw.TinyFW import JunitReport, set_default_config
from tiny_test_fw.Utility import CaseConfig, SearchCases, console_log
class Runner(threading.Thread):
"""
:param test_case_paths: test case file or folder
:param case_config: case config file, allow to filter test cases and pass data to test case
:param env_config_file: env config file
"""
def __init__(self, test_case_paths, case_config, env_config_file=None, known_failure_cases_file=None):
super(Runner, self).__init__()
self.setDaemon(True)
if case_config:
test_suite_name = os.path.splitext(os.path.basename(case_config))[0]
else:
test_suite_name = 'TestRunner'
set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
test_methods = SearchCases.Search.search_test_cases(test_case_paths)
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
self.known_failure_cases = self._get_config_cases(known_failure_cases_file)
@staticmethod
def _get_config_cases(config_file):
res = set()
if not config_file or not os.path.isfile(config_file):
return res
for line in open(config_file).readlines():
if not line:
continue
if not line.strip():
continue
without_comments = line.split('#')[0].strip()
if without_comments:
res.add(without_comments)
return res
def run(self):
for case in self.test_cases:
case.run()
@staticmethod
def is_known_issue(tc_name, known_cases):
for case in known_cases:
if tc_name == case:
return True
if fnmatch(tc_name, case):
return True
return False
def get_test_result(self):
_res = True
console_log('Test Results:')
for tc in JunitReport.JUNIT_TEST_SUITE.test_cases:
if tc.failures:
if self.is_known_issue(tc.name, self.known_failure_cases):
console_log(' Known Failure: ' + tc.name, color='orange')
else:
console_log(' Test Fail: ' + tc.name, color='red')
_res = False
else:
console_log(' Test Succeed: ' + tc.name, color='green')
return _res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('test_cases', nargs='+',
help='test case folders or files')
parser.add_argument('--case_config', '-c', default=None,
help='case filter/config file')
parser.add_argument('--env_config_file', '-e', default=None,
help='test env config file')
parser.add_argument('--known_failure_cases_file', default=None,
help='known failure cases file')
args = parser.parse_args()
test_cases = [os.path.join(os.getenv('IDF_PATH'), path)
if not os.path.isabs(path) else path for path in args.test_cases]
runner = Runner(test_cases, args.case_config, args.env_config_file, args.known_failure_cases_file)
runner.start()
while True:
try:
runner.join(1)
if not runner.is_alive():
break
except KeyboardInterrupt:
print('exit by Ctrl-C')
break
if not runner.get_test_result():
sys.exit(1)

View File

@ -1,43 +0,0 @@
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" example of writing test with TinyTestFW """
import re
import ttfw_idf
from tiny_test_fw import TinyFW
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_https_request(env, extra_data):
"""
steps: |
1. join AP
2. connect to www.howsmyssl.com:443
3. send http request
"""
dut1 = env.get_dut('https_request', 'examples/protocols/https_request', dut_class=ttfw_idf.ESP32DUT)
dut1.start_app()
dut1.expect(re.compile(r'Connecting to www.howsmyssl.com:443'), timeout=30)
dut1.expect('Performing the SSL/TLS handshake')
dut1.expect('Certificate verified.', timeout=15)
dut1.expect_all(re.compile(r'Cipher suite is TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256'),
'Reading HTTP response',
timeout=20)
dut1.expect(re.compile(r'Completed (\d) requests'))
if __name__ == '__main__':
TinyFW.set_default_config(env_config_file='EnvConfigTemplate.yml', dut=ttfw_idf.IDFDUT)
test_examples_protocol_https_request()

View File

@ -1,26 +0,0 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXAPI = sphinx-apidoc
SPHINXAPISRC = ..
SPHINXBUILD = python -msphinx
SPHINXPROJ = TinyTestFW
SOURCEDIR = .
BUILDDIR = _build
# define the files to be excluded here
EXCLUEDLIST = "$(SPHINXAPISRC)/example.py"
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXAPI) -o $(SOURCEDIR) $(SPHINXAPISRC) $(EXCLUEDLIST)
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

View File

@ -1,158 +0,0 @@
# -*- coding: utf-8 -*-
#
# TinyTestFW documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 21 20:19:12 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'plantweb.directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TinyTestFW'
copyright = u'2017, Espressif'
author = u'Espressif'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TinyTestFWdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TinyTestFW.tex', u'TinyTestFW Documentation',
u'He Yinling', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tinytestfw', u'TinyTestFW Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TinyTestFW', u'TinyTestFW Documentation',
author, 'TinyTestFW', 'One line description of project.',
'Miscellaneous'),
]

View File

@ -1,204 +0,0 @@
.. TinyTestFW documentation master file, created by
sphinx-quickstart on Thu Sep 21 20:19:12 2017.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to TinyTestFW's documentation!
======================================
We have a lot of test which depends on interact with DUT via communication port.
Usually we send command to the port and then check response to see if the test succeed.
TinyTestFW is designed for such scenarios.
It supports ESP-IDF applications and can be adapted to other applications by writing new bundles.
Example
-------
Let's first check a simple example::
import re
import os
import sys
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path:
sys.path.insert(0, test_fw_path)
import TinyFW
from IDF import IDFApp, IDFDUT
@TinyFW.test_method(app=IDFApp.Example, dut=IDFDUT.IDFDUT, env_tag="Example_WIFI",
chip="ESP32", module="examples", execution_time=1)
def test_examples_protocol_https_request(env, extra_data):
"""
steps: |
1. join AP
2. connect to www.howsmyssl.com:443
3. send http request
"""
dut1 = env.get_dut("https_request", "examples/protocols/https_request")
dut1.start_app()
dut1.expect("Connecting to www.howsmyssl.com:443", timeout=30)
dut1.expect("Performing the SSL/TLS handshake")
dut1.expect("Certificate verified.", timeout=15)
dut1.expect_all(re.compile(r"Cipher suite is TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256"),
"Reading HTTP response",
timeout=20)
dut1.expect(re.compile(r"Completed (\d) requests"))
if __name__ == '__main__':
TinyFW.set_default_config(env_config_file="EnvConfigTemplate.yml")
test_examples_protocol_https_request()
SOP for adding test cases
-------------------------
1. Import test framework:
^^^^^^^^^^^^^^^^^^^^^^^^^
* We assume ``TEST_FW_PATH`` is pre-defined before running the tests
* Then we can import python packages and files from ``TEST_FW_PATH``
2. Define test case:
^^^^^^^^^^^^^^^^^^^^
1. Define test case ``test_xxx(env, extra_data)``
* env: instance of test env, see :doc:`Test Env <Env>` for details
* extra_data: extra data passed from test case caller
2. Add decorator for test case
* add decorator ``TinyFW.test_method`` to test method
* define default case configs and filters in decorator, see :doc:`TinyFW.test_method <TinyFW>`
3. Execute test cases:
^^^^^^^^^^^^^^^^^^^^^^
* define in ``main`` section and execute from this file
1. set preset configs(optional). If the config is not define in case decorator, it will use the preset configs.
2. call test case method:
* if you don't pass any arguments, it will use default values
* you can pass ``extra_data`` to test case by adding ``extra_data=some_data`` as kwarg of test case method.
default value for extra_data is None.
* you can overwrite test case config by adding them as kwarg of test case method.
It will overwrite preset configs and case default configs.
Examples::
test_examples_protocol_https_request(extra_data=["data1", "data2"], dut=SomeOtherDUT, env_tag="OtherEnv")
* or, use ``runner`` to execute. see :doc:`runner <Runner>` for details
Test FW features
----------------
1. Test Environment:
1. DUT: DUT class provides methods to interact with DUT
* read/write through port
* expect method which supports expect one or multiple string or RegEx
* tool methods provided by the tool bundle, like ``start_app``, ``reset``
2. App:
* provide some specific features to the test application of DUT, for example:
* SDK path
* SDK tools
* application information like partition table, download configs
3. Environment Configs:
* support get env configs from config file or auto-detect from current PC
* provide ``get_variable`` method to get variables
2. Allow to customize components (DUT, App) to support different devices
3. Integrate to CI:
* provide interfaces for Gitlab-CI
* provide ``search case`` and ``runner`` interfaces, able to integrate with other CI
Class Diagram
=============
.. uml::
class BaseDUT {
{field} app
{method} expect
{method} expect_any
{method} expect_all
{method} read
{method} write
{method} start_receive
{method} stop_receive
{method} close
}
class SerialDUT {
{method} _port_read
{method} _port_write
{method} _port_open
{method} _port_close
}
class IDFDUT {
{method} reset
{method} start_app
}
class BaseApp {
{method} get_sdk_path
{method} get_log_folder
}
class IDFApp {
{field} flash_files
{field} flash_settings
{field} partition_table
}
class Example {
{method} get_binary_path
}
class EnvConfig {
{method} get_variable
}
class Env {
{field} config
{field} allocated_duts
{field} app_cls
{method} get_dut
{method} close_dut
{method} get_variable
{method} get_pc_nic_info
{method} close
}
SerialDUT --|> BaseDUT
IDFDUT --|> SerialDUT
IDFApp --|> BaseApp
Example --|> IDFApp
Env *-- EnvConfig
Env *-- BaseDUT
Env o-- BaseApp
BaseDUT o-- BaseApp
.. toctree::
:maxdepth: 2
:caption: Contents:
modules
Dependencies
============
Support for both Python2 and Python3 (tested on python 2.7.13 and 3.6.2).
The following 3rd party lib is required:
* pyserial
* pyyaml
* junit_xml
* netifaces
* matplotlib (if use Utility.LineChart)
These libraries can be installed by running ``pip install --user -r requirements.txt`` in tiny-test-fw directory.
To build document, we need to install ``Sphinx``, ``plantweb`` and ``sphinx-rtd-theme`` (you may replace this with your own theme). ``plantweb`` requires internet access during building document.
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -1,540 +0,0 @@
# SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
""" IDF Test Applications """
import hashlib
import json
import os
import re
import subprocess
import sys
from abc import abstractmethod
from tiny_test_fw import App
from .IDFAssignTest import ComponentUTGroup, ExampleGroup, IDFCaseGroup, TestAppsGroup, UnitTestGroup
try:
import gitlab_api
except ImportError:
gitlab_api = None
try:
from typing import Any, Dict, List, Optional, Tuple, Type # noqa: F401
except ImportError:
pass
def parse_encrypted_flag(args, offs, binary): # type: (Dict, str, str) -> Any
# Find partition entries (e.g. the entries with an offset and a file)
for _, entry in args.items():
# If the current entry is a partition, we have to check whether it is
# the one we are looking for or not
try:
if (entry['offset'], entry['file']) == (offs, binary):
return entry['encrypted'] == 'true'
except (TypeError, KeyError):
# TypeError occurs if the entry is a list, which is possible in JSON
# data structure.
# KeyError occurs if the entry doesn't have "encrypted" field.
continue
# The entry was not found, return None. The caller will have to check
# CONFIG_SECURE_FLASH_ENCRYPTION_MODE_DEVELOPMENT macro
return None
def parse_flash_settings(path, default_encryption=False): # type: (str, bool) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]], Dict, Any]
file_name = os.path.basename(path)
# For compatibility reasons, this list contains all the files to be
# flashed
flash_files = []
# The following list only contains the files that need encryption
encrypt_files = []
if file_name == 'flasher_args.json':
# CMake version using build metadata file
with open(path, 'r') as f:
args = json.load(f)
for (offs, binary) in args['flash_files'].items():
if offs:
flash_files.append((offs, binary))
encrypted = parse_encrypted_flag(args, offs, binary)
# default_encryption should be taken into account if and only if
# encrypted flag is not provided in the JSON file.
if (encrypted is None and default_encryption) or encrypted:
encrypt_files.append((offs, binary))
flash_settings = args['flash_settings']
app_name = os.path.splitext(args['app']['file'])[0]
else:
# GNU Make version uses download.config arguments file
with open(path, 'r') as f:
args = f.readlines()[-1].split(' ')
flash_settings = {}
for idx in range(0, len(args), 2): # process arguments in pairs
if args[idx].startswith('--'):
# strip the -- from the command line argument
flash_settings[args[idx][2:]] = args[idx + 1]
else:
# offs, filename
flash_files.append((args[idx], args[idx + 1]))
# Parameter default_encryption tells us if the files need encryption
if default_encryption:
encrypt_files = flash_files
# we can only guess app name in download.config.
for p in flash_files:
if not os.path.dirname(p[1]) and 'partition' not in p[1]:
# app bin usually in the same dir with download.config and it's not partition table
app_name = os.path.splitext(p[1])[0]
break
else:
app_name = None
return flash_files, encrypt_files, flash_settings, app_name
class Artifacts(object):
def __init__(self, dest_root_path, artifact_index_file, app_path, config_name, target):
# type: (str, str, str, str, str) -> None
assert gitlab_api
# at least one of app_path or config_name is not None. otherwise we can't match artifact
assert app_path or config_name
assert os.path.exists(artifact_index_file)
self.gitlab_inst = gitlab_api.Gitlab(os.getenv('CI_PROJECT_ID'))
self.dest_root_path = dest_root_path
with open(artifact_index_file, 'r') as f:
artifact_index = json.load(f)
self.artifact_info = self._find_artifact(artifact_index, app_path, config_name, target)
@staticmethod
def _find_artifact(artifact_index, app_path, config_name, target): # type: ignore
for artifact_info in artifact_index:
match_result = True
if app_path:
# We use endswith here to avoid issue like:
# examples_protocols_mqtt_ws but return a examples_protocols_mqtt_wss failure
match_result = artifact_info['app_dir'].endswith(app_path)
if config_name:
match_result = match_result and config_name == artifact_info['config']
if target:
match_result = match_result and target == artifact_info['target']
if match_result:
ret = artifact_info
break
else:
ret = None
return ret
def _get_app_base_path(self): # type: () -> Any
if self.artifact_info:
return os.path.join(self.artifact_info['work_dir'], self.artifact_info['build_dir'])
else:
return None
def _get_flash_arg_file(self, base_path, job_id): # type: (str, str) -> str
if self.artifact_info['build_system'] == 'cmake':
flash_arg_file = os.path.join(base_path, 'flasher_args.json')
else:
flash_arg_file = os.path.join(base_path, 'download.config')
self.gitlab_inst.download_artifact(job_id, [flash_arg_file], self.dest_root_path)
return flash_arg_file
def _download_binary_files(self, base_path, job_id, flash_arg_file): # type: (str, str, str) -> None
# Let's ignore the second value returned (encrypt_files) as these
# files also appear in the first list
flash_files, _, _, app_name = parse_flash_settings(os.path.join(self.dest_root_path, flash_arg_file))
artifact_files = [os.path.join(base_path, p[1]) for p in flash_files]
artifact_files.append(os.path.join(base_path, app_name + '.elf'))
bootloader_path = os.path.join(base_path, 'bootloader', 'bootloader.bin')
if bootloader_path not in artifact_files:
artifact_files.append(bootloader_path)
self.gitlab_inst.download_artifact(job_id, artifact_files, self.dest_root_path)
def _download_sdkconfig_file(self, base_path, job_id): # type: (str, str) -> None
self.gitlab_inst.download_artifact(job_id, [os.path.join(base_path, 'sdkconfig')],
self.dest_root_path)
def download_artifacts(self): # type: () -> Any
if not self.artifact_info:
return None
base_path = self._get_app_base_path()
job_id = self.artifact_info['ci_job_id']
# 1. download flash args file
flash_arg_file = self._get_flash_arg_file(base_path, job_id)
# 2. download all binary files
self._download_binary_files(base_path, job_id, flash_arg_file)
# 3. download sdkconfig file
self._download_sdkconfig_file(base_path, job_id)
return base_path
def download_artifact_files(self, file_names): # type: (List[str]) -> Any
if self.artifact_info:
base_path = os.path.join(self.artifact_info['work_dir'], self.artifact_info['build_dir'])
job_id = self.artifact_info['ci_job_id']
# download all binary files
artifact_files = [os.path.join(base_path, fn) for fn in file_names]
self.gitlab_inst.download_artifact(job_id, artifact_files, self.dest_root_path)
# download sdkconfig file
self.gitlab_inst.download_artifact(job_id, [os.path.join(base_path, 'sdkconfig')],
self.dest_root_path)
else:
base_path = None
return base_path
class UnitTestArtifacts(Artifacts):
BUILDS_DIR_RE = re.compile(r'^builds/')
def _get_app_base_path(self): # type: () -> Any
if self.artifact_info:
output_dir = self.BUILDS_DIR_RE.sub('output/', self.artifact_info['build_dir'])
return os.path.join(self.artifact_info['app_dir'], output_dir)
else:
return None
def _download_sdkconfig_file(self, base_path, job_id): # type: (str, str) -> None
self.gitlab_inst.download_artifact(job_id, [os.path.join(base_path, 'sdkconfig')], self.dest_root_path)
class IDFApp(App.BaseApp):
"""
Implements common esp-idf application behavior.
idf applications should inherent from this class and overwrite method get_binary_path.
"""
IDF_DOWNLOAD_CONFIG_FILE = 'download.config'
IDF_FLASH_ARGS_FILE = 'flasher_args.json'
def __init__(self, app_path, config_name=None, target=None, case_group=IDFCaseGroup, artifact_cls=Artifacts): # type: ignore
super(IDFApp, self).__init__(app_path)
self.app_path = app_path # type: (str)
self.config_name = config_name # type: (str)
self.target = target # type: (str)
self.idf_path = self.get_sdk_path() # type: (str)
self.case_group = case_group
self.artifact_cls = artifact_cls
self.binary_path = self.get_binary_path()
self.elf_file = self._get_elf_file_path()
self._elf_file_sha256 = None # type: (Optional[str])
assert os.path.exists(self.binary_path)
if self.IDF_DOWNLOAD_CONFIG_FILE not in os.listdir(self.binary_path):
if self.IDF_FLASH_ARGS_FILE not in os.listdir(self.binary_path):
msg = ('Neither {} nor {} exists. '
"Try to run 'make print_flash_cmd | tail -n 1 > {}/{}' "
"or 'idf.py build' "
'for resolving the issue.'
'').format(self.IDF_DOWNLOAD_CONFIG_FILE, self.IDF_FLASH_ARGS_FILE,
self.binary_path, self.IDF_DOWNLOAD_CONFIG_FILE)
raise AssertionError(msg)
# In order to keep backward compatibility, flash_files is unchanged.
# However, we now have a new attribute encrypt_files.
self.flash_files, self.encrypt_files, self.flash_settings = self._parse_flash_download_config()
self.partition_table = self._parse_partition_table()
def __str__(self): # type: () -> str
parts = ['app<{}>'.format(self.app_path)]
if self.config_name:
parts.append('config<{}>'.format(self.config_name))
if self.target:
parts.append('target<{}>'.format(self.target))
return ' '.join(parts)
@classmethod
def get_sdk_path(cls): # type: () -> str
idf_path = os.getenv('IDF_PATH')
assert idf_path
assert os.path.exists(idf_path)
return idf_path
def _get_sdkconfig_paths(self): # type: () -> List[str]
"""
returns list of possible paths where sdkconfig could be found
Note: could be overwritten by a derived class to provide other locations or order
"""
return [os.path.join(self.binary_path, 'sdkconfig'), os.path.join(self.binary_path, '..', 'sdkconfig')]
def get_sdkconfig(self): # type: () -> Dict
"""
reads sdkconfig and returns a dictionary with all configured variables
:raise: AssertionError: if sdkconfig file does not exist in defined paths
"""
d = {}
sdkconfig_file = None
for i in self._get_sdkconfig_paths():
if os.path.exists(i):
sdkconfig_file = i
break
assert sdkconfig_file is not None
with open(sdkconfig_file) as f:
for line in f:
configs = line.split('=')
if len(configs) == 2:
d[configs[0]] = configs[1].rstrip()
return d
def get_sdkconfig_config_value(self, config_key): # type: (str) -> Any
sdkconfig_dict = self.get_sdkconfig()
value = None
if (config_key in sdkconfig_dict):
value = sdkconfig_dict[config_key]
return value
@abstractmethod
def _try_get_binary_from_local_fs(self): # type: () -> Optional[str]
pass
def get_binary_path(self): # type: () -> str
path = self._try_get_binary_from_local_fs()
if path:
return path
artifacts = self.artifact_cls(self.idf_path,
self.case_group.get_artifact_index_file(),
self.app_path, self.config_name, self.target)
if isinstance(self, LoadableElfTestApp):
assert self.app_files
path = artifacts.download_artifact_files(self.app_files)
else:
path = artifacts.download_artifacts()
if path:
return os.path.join(self.idf_path, path)
else:
raise OSError('Failed to get binary for {}'.format(self))
def _get_elf_file_path(self): # type: () -> str
ret = ''
file_names = os.listdir(self.binary_path)
for fn in file_names:
if os.path.splitext(fn)[1] == '.elf':
ret = os.path.join(self.binary_path, fn)
return ret
def _int_offs_abs_paths(self, files_list): # type: (List[tuple[str, str]]) -> List[Tuple[int, str]]
return [(int(offs, 0),
os.path.join(self.binary_path, file_path.strip()))
for (offs, file_path) in files_list]
def _parse_flash_download_config(self): # type: () -> Tuple[List[tuple[int, str]], List[tuple[int, str]], Dict]
"""
Parse flash download config from build metadata files
Sets self.flash_files, self.flash_settings
(Called from constructor)
Returns (flash_files, encrypt_files, flash_settings)
"""
if self.IDF_FLASH_ARGS_FILE in os.listdir(self.binary_path):
# CMake version using build metadata file
path = os.path.join(self.binary_path, self.IDF_FLASH_ARGS_FILE)
else:
# GNU Make version uses download.config arguments file
path = os.path.join(self.binary_path, self.IDF_DOWNLOAD_CONFIG_FILE)
# If the JSON doesn't find the encrypted flag for our files, provide
# a default encrpytion flag: the macro
# CONFIG_SECURE_FLASH_ENCRYPTION_MODE_DEVELOPMENT
sdkconfig_dict = self.get_sdkconfig()
default_encryption = 'CONFIG_SECURE_FLASH_ENCRYPTION_MODE_DEVELOPMENT' in sdkconfig_dict
flash_files, encrypt_files, flash_settings, _ = parse_flash_settings(path, default_encryption)
# Flash setting "encrypt" only and only if all the files to flash
# must be encrypted. Else, this parameter should be False.
# All files must be encrypted is both file lists are the same
flash_settings['encrypt'] = sorted(flash_files) == sorted(encrypt_files)
return self._int_offs_abs_paths(flash_files), self._int_offs_abs_paths(encrypt_files), flash_settings
def _parse_partition_table(self): # type: ignore
"""
Parse partition table contents based on app binaries
Returns partition_table data
(Called from constructor)
"""
partition_tool = os.path.join(self.idf_path,
'components',
'partition_table',
'gen_esp32part.py')
assert os.path.exists(partition_tool)
errors = []
# self.flash_files is sorted based on offset in order to have a consistent result with different versions of
# Python
for (_, path) in sorted(self.flash_files, key=lambda elem: elem[0]):
if 'partition' in os.path.split(path)[1]:
partition_file = os.path.join(self.binary_path, path)
process = subprocess.Popen([sys.executable, partition_tool, partition_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(raw_data, raw_error) = process.communicate()
if isinstance(raw_error, bytes):
raw_error = raw_error.decode()
if 'Traceback' in raw_error:
# Some exception occurred. It is possible that we've tried the wrong binary file.
errors.append((path, raw_error))
continue
if isinstance(raw_data, bytes):
raw_data = raw_data.decode()
break
else:
traceback_msg = os.linesep.join(['{} {}:{}{}'.format(partition_tool,
p,
os.linesep,
msg) for p, msg in errors])
raise ValueError('No partition table found for IDF binary path: {}{}{}'.format(self.binary_path,
os.linesep,
traceback_msg))
partition_table = dict()
for line in raw_data.splitlines():
if line[0] != '#':
try:
_name, _type, _subtype, _offset, _size, _flags = line.split(',')
if _size[-1] == 'K':
_size = int(_size[:-1]) * 1024
elif _size[-1] == 'M':
_size = int(_size[:-1]) * 1024 * 1024
else:
_size = int(_size)
_offset = int(_offset, 0)
except ValueError:
continue
partition_table[_name] = {
'type': _type,
'subtype': _subtype,
'offset': _offset,
'size': _size,
'flags': _flags
}
return partition_table
def get_elf_sha256(self): # type: () -> Optional[str]
if self._elf_file_sha256:
return self._elf_file_sha256
sha256 = hashlib.sha256()
with open(self.elf_file, 'rb') as f:
sha256.update(f.read())
self._elf_file_sha256 = sha256.hexdigest()
return self._elf_file_sha256
class Example(IDFApp):
def __init__(self, app_path, config_name='default', target='esp32', case_group=ExampleGroup, artifacts_cls=Artifacts):
# type: (str, str, str, Type[ExampleGroup], Type[Artifacts]) -> None
if not config_name:
config_name = 'default'
if not target:
target = 'esp32'
super(Example, self).__init__(app_path, config_name, target, case_group, artifacts_cls)
def _try_get_binary_from_local_fs(self): # type: () -> Optional[str]
# build folder of example path
path = os.path.join(self.idf_path, self.app_path, 'build')
if os.path.exists(path):
return path
# new style build dir
path = os.path.join(self.idf_path, self.app_path, f'build_{self.target}_{self.config_name}')
if os.path.exists(path):
return path
# Search for CI build folders.
# Path format: $IDF_PATH/<app_dir>/build_<target>_<config>
build_dir = f'build_{self.target}_{self.config_name}'
example_path = os.path.join(self.idf_path, self.app_path, build_dir)
if os.path.exists(example_path):
return path
return None
class UT(IDFApp):
def __init__(self, app_path, config_name='default', target='esp32', case_group=UnitTestGroup, artifacts_cls=UnitTestArtifacts):
# type: (str, str, str, Type[UnitTestGroup], Type[UnitTestArtifacts]) -> None
if not config_name:
config_name = 'default'
if not target:
target = 'esp32'
super(UT, self).__init__(app_path, config_name, target, case_group, artifacts_cls)
def _try_get_binary_from_local_fs(self): # type: () -> Optional[str]
path = os.path.join(self.idf_path, self.app_path, 'build')
if os.path.exists(path):
return path
# first try to get from build folder of unit-test-app
path = os.path.join(self.idf_path, 'tools', 'unit-test-app', 'build')
if os.path.exists(path):
# found, use bin in build path
return path
# ``build_unit_test.sh`` will copy binary to output folder
path = os.path.join(self.idf_path, 'tools', 'unit-test-app', 'output', self.target, self.config_name)
if os.path.exists(path):
return path
return None
class TestApp(Example):
def __init__(self, app_path, config_name='default', target='esp32', case_group=TestAppsGroup, artifacts_cls=Artifacts):
# type: (str, str, str, Type[TestAppsGroup], Type[Artifacts]) -> None
super(TestApp, self).__init__(app_path, config_name, target, case_group, artifacts_cls)
class ComponentUTApp(TestApp):
def __init__(self, app_path, config_name='default', target='esp32', case_group=ComponentUTGroup, artifacts_cls=Artifacts):
# type: (str, str, str, Type[ComponentUTGroup], Type[Artifacts]) -> None
super(ComponentUTApp, self).__init__(app_path, config_name, target, case_group, artifacts_cls)
class LoadableElfTestApp(TestApp):
def __init__(self, app_path, app_files, config_name='default', target='esp32', case_group=TestAppsGroup, artifacts_cls=Artifacts):
# type: (str, List[str], str, str, Type[TestAppsGroup], Type[Artifacts]) -> None
# add arg `app_files` for loadable elf test_app.
# Such examples only build elf files, so it doesn't generate flasher_args.json.
# So we can't get app files from config file. Test case should pass it to application.
super(IDFApp, self).__init__(app_path)
self.app_path = app_path
self.app_files = app_files
self.config_name = config_name or 'default'
self.target = target or 'esp32'
self.idf_path = self.get_sdk_path()
self.case_group = case_group
self.artifact_cls = artifacts_cls
self.binary_path = self.get_binary_path()
self.elf_file = self._get_elf_file_path()
assert os.path.exists(self.binary_path)
class SSC(IDFApp):
def get_binary_path(self): # type: () -> str
# TODO: to implement SSC get binary path
return self.app_path
class AT(IDFApp):
def get_binary_path(self): # type: () -> str
# TODO: to implement AT get binary path
return self.app_path

View File

@ -1,354 +0,0 @@
# SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
Command line tool to assign tests to CI test jobs.
"""
import argparse
import json
import os
import re
import sys
from copy import deepcopy
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader as Loader # type: ignore
import gitlab_api
from tiny_test_fw.Utility import CIAssignTest
try:
from idf_py_actions.constants import PREVIEW_TARGETS, SUPPORTED_TARGETS
except ImportError:
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
from idf_py_actions.constants import PREVIEW_TARGETS, SUPPORTED_TARGETS
IDF_PATH_FROM_ENV = os.getenv('IDF_PATH', '')
class IDFCaseGroup(CIAssignTest.Group):
BUILD_JOB_NAMES = None
@classmethod
def get_artifact_index_file(cls):
if IDF_PATH_FROM_ENV:
artifact_index_file = os.path.join(IDF_PATH_FROM_ENV, 'artifact_index.json')
else:
artifact_index_file = 'artifact_index.json'
return artifact_index_file
class IDFAssignTest(CIAssignTest.AssignTest):
DEFAULT_FILTER = {
'category': 'function',
'ignore': False,
'supported_in_ci': True,
'nightly_run': False,
}
def __init__(self, test_case_path, ci_config_file, case_group=IDFCaseGroup):
super(IDFAssignTest, self).__init__(test_case_path, ci_config_file, case_group)
def format_build_log_path(self, parallel_num):
return 'list_job_{}.txt'.format(parallel_num)
def create_artifact_index_file(self, project_id=None, pipeline_id=None):
if project_id is None:
project_id = os.getenv('CI_PROJECT_ID')
if pipeline_id is None:
pipeline_id = os.getenv('CI_PIPELINE_ID')
gitlab_inst = gitlab_api.Gitlab(project_id)
artifact_index_list = []
for build_job_name in self.case_group.BUILD_JOB_NAMES:
job_info_list = gitlab_inst.find_job_id(build_job_name, pipeline_id=pipeline_id)
for job_info in job_info_list:
parallel_num = job_info['parallel_num'] or 1 # Could be None if "parallel_num" not defined for the job
raw_data = gitlab_inst.download_artifact(job_info['id'],
[self.format_build_log_path(parallel_num)])[0]
build_info_list = [json.loads(line) for line in raw_data.decode().splitlines()]
for build_info in build_info_list:
build_info['ci_job_id'] = job_info['id']
artifact_index_list.append(build_info)
artifact_index_file = self.case_group.get_artifact_index_file()
with open(artifact_index_file, 'w') as f:
json.dump(artifact_index_list, f)
def search_cases(self, case_filter=None):
_filter = deepcopy(case_filter) if case_filter else {}
if 'NIGHTLY_RUN' in os.environ or 'BOT_LABEL_NIGHTLY_RUN' in os.environ:
_filter.update({'nightly_run': True})
return super().search_cases(_filter)
class ExampleGroup(IDFCaseGroup):
SORT_KEYS = CI_JOB_MATCH_KEYS = ['env_tag', 'target']
EXAMPLE_TARGETS = SUPPORTED_TARGETS + PREVIEW_TARGETS
BUILD_JOB_NAMES = ['build_examples_cmake_{}'.format(target) for target in EXAMPLE_TARGETS] # type: ignore
class TestAppsGroup(ExampleGroup):
TEST_APP_TARGETS = SUPPORTED_TARGETS + PREVIEW_TARGETS
BUILD_JOB_NAMES = ['build_test_apps_{}'.format(target) for target in TEST_APP_TARGETS] # type: ignore
class ComponentUTGroup(TestAppsGroup):
UNIT_TEST_TARGETS = SUPPORTED_TARGETS + PREVIEW_TARGETS
BUILD_JOB_NAMES = ['build_component_ut_{}'.format(target) for target in UNIT_TEST_TARGETS] # type: ignore
class UnitTestGroup(IDFCaseGroup):
SORT_KEYS = ['test environment', 'tags', 'chip_target']
CI_JOB_MATCH_KEYS = ['test environment']
UNIT_TEST_TARGETS = SUPPORTED_TARGETS + PREVIEW_TARGETS
BUILD_JOB_NAMES = ['build_esp_idf_tests_cmake_{}'.format(target) for target in UNIT_TEST_TARGETS] # type: ignore
MAX_CASE = 50
ATTR_CONVERT_TABLE = {
'execution_time': 'execution time'
}
DUT_CLS_NAME = {
'esp32': 'ESP32DUT',
'esp32s2': 'ESP32S2DUT',
'esp32s3': 'ESP32S3DUT',
'esp32c2': 'ESP32C2DUT',
'esp32c3': 'ESP32C3DUT',
'esp32c6': 'ESP32C6DUT',
'esp32h2': 'ESP32H2DUT',
'esp8266': 'ESP8266DUT',
}
def __init__(self, case):
super(UnitTestGroup, self).__init__(case)
for tag in self._get_case_attr(case, 'tags'):
self.ci_job_match_keys.add(tag)
@staticmethod
def _get_case_attr(case, attr):
if attr in UnitTestGroup.ATTR_CONVERT_TABLE:
attr = UnitTestGroup.ATTR_CONVERT_TABLE[attr]
return case[attr]
def add_extra_case(self, case):
""" If current group contains all tags required by case, then add succeed """
added = False
if self.accept_new_case():
for key in self.filters:
if self._get_case_attr(case, key) != self.filters[key]:
if key == 'tags':
if set(self._get_case_attr(case, key)).issubset(set(self.filters[key])):
continue
break
else:
self.case_list.append(case)
added = True
return added
def _create_extra_data(self, test_cases, test_function):
"""
For unit test case, we need to copy some attributes of test cases into config file.
So unit test function knows how to run the case.
"""
case_data = []
for case in test_cases:
one_case_data = {
'config': self._get_case_attr(case, 'config'),
'name': self._get_case_attr(case, 'summary'),
'reset': self._get_case_attr(case, 'reset'),
'timeout': self._get_case_attr(case, 'timeout'),
}
if test_function in ['run_multiple_devices_cases', 'run_multiple_stage_cases']:
try:
one_case_data['child case num'] = self._get_case_attr(case, 'child case num')
except KeyError as e:
print('multiple devices/stages cases must contains at least two test functions')
print('case name: {}'.format(one_case_data['name']))
raise e
case_data.append(one_case_data)
return case_data
def _divide_case_by_test_function(self):
"""
divide cases of current test group by test function they need to use
:return: dict of list of cases for each test functions
"""
case_by_test_function = {
'run_multiple_devices_cases': [],
'run_multiple_stage_cases': [],
'run_unit_test_cases': [],
}
for case in self.case_list:
if case['multi_device'] == 'Yes':
case_by_test_function['run_multiple_devices_cases'].append(case)
elif case['multi_stage'] == 'Yes':
case_by_test_function['run_multiple_stage_cases'].append(case)
else:
case_by_test_function['run_unit_test_cases'].append(case)
return case_by_test_function
def output(self):
"""
output data for job configs
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
target = self._get_case_attr(self.case_list[0], 'chip_target')
if target:
overwrite = {
'dut': {
'package': 'ttfw_idf',
'class': self.DUT_CLS_NAME[target],
}
}
else:
overwrite = dict()
case_by_test_function = self._divide_case_by_test_function()
output_data = {
# we don't need filter for test function, as UT uses a few test functions for all cases
'CaseConfig': [
{
'name': test_function,
'extra_data': self._create_extra_data(test_cases, test_function),
'overwrite': overwrite,
} for test_function, test_cases in case_by_test_function.items() if test_cases
],
}
return output_data
class ExampleAssignTest(IDFAssignTest):
CI_TEST_JOB_PATTERN = re.compile(r'^example_test_.+')
def __init__(self, test_case_path, ci_config_file):
super(ExampleAssignTest, self).__init__(test_case_path, ci_config_file, case_group=ExampleGroup)
class TestAppsAssignTest(IDFAssignTest):
CI_TEST_JOB_PATTERN = re.compile(r'^test_app_test_.+')
def __init__(self, test_case_path, ci_config_file):
super(TestAppsAssignTest, self).__init__(test_case_path, ci_config_file, case_group=TestAppsGroup)
class ComponentUTAssignTest(IDFAssignTest):
CI_TEST_JOB_PATTERN = re.compile(r'^component_ut_test_.+')
def __init__(self, test_case_path, ci_config_file):
super(ComponentUTAssignTest, self).__init__(test_case_path, ci_config_file, case_group=ComponentUTGroup)
class UnitTestAssignTest(IDFAssignTest):
CI_TEST_JOB_PATTERN = re.compile(r'^UT_.+')
def __init__(self, test_case_path, ci_config_file):
super(UnitTestAssignTest, self).__init__(test_case_path, ci_config_file, case_group=UnitTestGroup)
def search_cases(self, case_filter=None):
"""
For unit test case, we don't search for test functions.
The unit test cases is stored in a yaml file which is created in job build-idf-test.
"""
def find_by_suffix(suffix, path):
res = []
for root, _, files in os.walk(path):
for file in files:
if file.endswith(suffix):
res.append(os.path.join(root, file))
return res
def get_test_cases_from_yml(yml_file):
try:
with open(yml_file) as fr:
raw_data = yaml.load(fr, Loader=Loader)
test_cases = raw_data['test cases']
except (IOError, KeyError):
return []
else:
return test_cases
test_cases = []
for path in self.test_case_paths:
if os.path.isdir(path):
for yml_file in find_by_suffix('.yml', path):
test_cases.extend(get_test_cases_from_yml(yml_file))
elif os.path.isfile(path) and path.endswith('.yml'):
test_cases.extend(get_test_cases_from_yml(path))
else:
print('Test case path is invalid. Should only happen when use @bot to skip unit test.')
# filter keys are lower case. Do map lower case keys with original keys.
try:
key_mapping = {x.lower(): x for x in test_cases[0].keys()}
except IndexError:
key_mapping = dict()
if case_filter:
for key in case_filter:
filtered_cases = []
for case in test_cases:
try:
mapped_key = key_mapping[key]
# bot converts string to lower case
if isinstance(case[mapped_key], str):
_value = case[mapped_key].lower()
else:
_value = case[mapped_key]
if _value in case_filter[key]:
filtered_cases.append(case)
except KeyError:
# case don't have this key, regard as filter success
filtered_cases.append(case)
test_cases = filtered_cases
# sort cases with configs and test functions
# in later stage cases with similar attributes are more likely to be assigned to the same job
# it will reduce the count of flash DUT operations
test_cases.sort(key=lambda x: x['config'] + x['multi_stage'] + x['multi_device'])
return test_cases
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('case_group', choices=['example_test', 'custom_test', 'unit_test', 'component_ut'])
parser.add_argument('test_case_paths', nargs='+', help='test case folder or file')
parser.add_argument('-c', '--config', default=os.path.join(IDF_PATH_FROM_ENV, '.gitlab', 'ci', 'target-test.yml'),
help='gitlab ci config file')
parser.add_argument('-o', '--output', help='output path of config files')
parser.add_argument('--pipeline_id', '-p', type=int, default=None, help='pipeline_id')
parser.add_argument('--test-case-file-pattern', help='file name pattern used to find Python test case files')
args = parser.parse_args()
SUPPORTED_TARGETS.extend(PREVIEW_TARGETS)
test_case_paths = [os.path.join(IDF_PATH_FROM_ENV, path) if not os.path.isabs(path) else path for path in
args.test_case_paths] # type: ignore
args_list = [test_case_paths, args.config]
if args.case_group == 'example_test':
assigner = ExampleAssignTest(*args_list)
elif args.case_group == 'custom_test':
assigner = TestAppsAssignTest(*args_list)
elif args.case_group == 'unit_test':
assigner = UnitTestAssignTest(*args_list)
elif args.case_group == 'component_ut':
assigner = ComponentUTAssignTest(*args_list)
else:
raise SystemExit(1) # which is impossible
if args.test_case_file_pattern:
assigner.CI_TEST_JOB_PATTERN = re.compile(r'{}'.format(args.test_case_file_pattern))
assigner.assign_cases()
assigner.output_configs(args.output)
assigner.create_artifact_index_file()

View File

@ -1,917 +0,0 @@
# SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
""" DUT for IDF applications """
import collections
import functools
import io
import os
import os.path
import re
import subprocess
import sys
import tempfile
import time
import pexpect
import serial
# python2 and python3 queue package name is different
try:
import Queue as _queue
except ImportError:
import queue as _queue # type: ignore
from serial.tools import list_ports
from tiny_test_fw import DUT, Utility
try:
import esptool
except ImportError: # cheat and use IDF's copy of esptool if available
idf_path = os.getenv('IDF_PATH')
if not idf_path or not os.path.exists(idf_path):
raise
sys.path.insert(0, os.path.join(idf_path, 'components', 'esptool_py', 'esptool'))
import esptool
try:
# esptool>=4.0
detect_chip = esptool.cmds.detect_chip
FatalError = esptool.util.FatalError
targets = esptool.targets
except (AttributeError, ModuleNotFoundError):
# esptool<4.0
detect_chip = esptool.ESPLoader.detect_chip
FatalError = esptool.FatalError
targets = esptool
import espefuse
import espsecure
class IDFToolError(OSError):
pass
class IDFDUTException(RuntimeError):
pass
class IDFRecvThread(DUT.RecvThread):
PERFORMANCE_PATTERN = re.compile(r'\[Performance]\[(\w+)]: ([^\r\n]+)\r?\n')
EXCEPTION_PATTERNS = [
re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))"),
re.compile(r'(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)'),
re.compile(r'(rst 0x\d+ \(TG\dWDT_SYS_RESET|TGWDT_CPU_RESET\))')
]
BACKTRACE_PATTERN = re.compile(r'Backtrace:((\s(0x[0-9a-f]{8}):0x[0-9a-f]{8})+)')
BACKTRACE_ADDRESS_PATTERN = re.compile(r'(0x[0-9a-f]{8}):0x[0-9a-f]{8}')
def __init__(self, read, dut):
super(IDFRecvThread, self).__init__(read, dut)
self.exceptions = _queue.Queue()
self.performance_items = _queue.Queue()
def collect_performance(self, comp_data):
matches = self.PERFORMANCE_PATTERN.findall(comp_data)
for match in matches:
Utility.console_log('[Performance][{}]: {}'.format(match[0], match[1]), color='orange')
self.performance_items.put((match[0], match[1]))
def detect_exception(self, comp_data):
for pattern in self.EXCEPTION_PATTERNS:
start = 0
while True:
match = pattern.search(comp_data, pos=start)
if match:
start = match.end()
self.exceptions.put(match.group(0))
Utility.console_log('[Exception]: {}'.format(match.group(0)), color='red')
else:
break
def detect_backtrace(self, comp_data):
start = 0
while True:
match = self.BACKTRACE_PATTERN.search(comp_data, pos=start)
if match:
start = match.end()
Utility.console_log('[Backtrace]:{}'.format(match.group(1)), color='red')
# translate backtrace
addresses = self.BACKTRACE_ADDRESS_PATTERN.findall(match.group(1))
translated_backtrace = ''
for addr in addresses:
ret = self.dut.lookup_pc_address(addr)
if ret:
translated_backtrace += ret + '\n'
if translated_backtrace:
Utility.console_log('Translated backtrace\n:' + translated_backtrace, color='yellow')
else:
Utility.console_log('Failed to translate backtrace', color='yellow')
else:
break
CHECK_FUNCTIONS = [collect_performance, detect_exception, detect_backtrace]
def _uses_esptool(func):
""" Suspend listener thread, connect with esptool,
call target function with esptool instance,
then resume listening for output
"""
@functools.wraps(func)
def handler(self, *args, **kwargs):
self.stop_receive()
settings = self.port_inst.get_settings()
try:
if not self.rom_inst:
if not self.secure_boot_en:
self.rom_inst = detect_chip(self.port_inst)
else:
self.rom_inst = self.get_rom()(self.port_inst)
self.rom_inst.connect('hard_reset')
if (self.secure_boot_en):
esp = self.rom_inst
esp.flash_spi_attach(0)
else:
esp = self.rom_inst.run_stub()
ret = func(self, esp, *args, **kwargs)
# do hard reset after use esptool
esp.hard_reset()
finally:
# always need to restore port settings
self.port_inst.apply_settings(settings)
self.start_receive()
return ret
return handler
class IDFDUT(DUT.SerialDUT):
""" IDF DUT, extends serial with esptool methods
(Becomes aware of IDFApp instance which holds app-specific data)
"""
# /dev/ttyAMA0 port is listed in Raspberry Pi
# /dev/tty.Bluetooth-Incoming-Port port is listed in Mac
INVALID_PORT_PATTERN = re.compile(r'AMA|Bluetooth')
# if need to erase NVS partition in start app
ERASE_NVS = True
RECV_THREAD_CLS = IDFRecvThread
def __init__(self, name, port, log_file, app, allow_dut_exception=False, **kwargs):
super(IDFDUT, self).__init__(name, port, log_file, app, **kwargs)
self.allow_dut_exception = allow_dut_exception
self.exceptions = _queue.Queue()
self.performance_items = _queue.Queue()
self.rom_inst = None
self.secure_boot_en = self.app.get_sdkconfig_config_value('CONFIG_SECURE_BOOT') and \
not self.app.get_sdkconfig_config_value('CONFIG_EFUSE_VIRTUAL')
@classmethod
def get_rom(cls):
raise NotImplementedError('This is an abstraction class, method not defined.')
@classmethod
def get_mac(cls, app, port):
"""
get MAC address via esptool
:param app: application instance (to get tool)
:param port: serial port as string
:return: MAC address or None
"""
esp = None
try:
esp = cls.get_rom()(port)
esp.connect()
return esp.read_mac()
except RuntimeError:
return None
finally:
if esp:
# do hard reset after use esptool
esp.hard_reset()
esp._port.close()
@classmethod
def confirm_dut(cls, port, **kwargs):
inst = None
try:
expected_rom_class = cls.get_rom()
except NotImplementedError:
expected_rom_class = None
try:
# TODO: check whether 8266 works with this logic
# Otherwise overwrite it in ESP8266DUT
inst = detect_chip(port)
if expected_rom_class and type(inst) != expected_rom_class:
raise RuntimeError('Target not expected')
return inst.read_mac() is not None, get_target_by_rom_class(type(inst))
except (FatalError, RuntimeError):
return False, None
finally:
if inst is not None:
inst._port.close()
def _try_flash(self, erase_nvs):
"""
Called by start_app()
:return: None
"""
flash_files = []
encrypt_files = []
try:
# Open the files here to prevents us from having to seek back to 0
# each time. Before opening them, we have to organize the lists the
# way esptool.write_flash needs:
# If encrypt is provided, flash_files contains all the files to
# flash.
# Else, flash_files contains the files to be flashed as plain text
# and encrypt_files contains the ones to flash encrypted.
flash_files = self.app.flash_files
encrypt_files = self.app.encrypt_files
encrypt = self.app.flash_settings.get('encrypt', False)
if encrypt:
flash_files = encrypt_files
encrypt_files = []
else:
flash_files = [entry
for entry in flash_files
if entry not in encrypt_files]
flash_files = [(offs, open(path, 'rb')) for (offs, path) in flash_files]
encrypt_files = [(offs, open(path, 'rb')) for (offs, path) in encrypt_files]
if erase_nvs:
address = self.app.partition_table['nvs']['offset']
size = self.app.partition_table['nvs']['size']
nvs_file = tempfile.TemporaryFile()
nvs_file.write(b'\xff' * size)
nvs_file.seek(0)
if not isinstance(address, int):
address = int(address, 0)
# We have to check whether this file needs to be added to
# flash_files list or encrypt_files.
# Get the CONFIG_SECURE_FLASH_ENCRYPTION_MODE_DEVELOPMENT macro
# value. If it is set to True, then NVS is always encrypted.
sdkconfig_dict = self.app.get_sdkconfig()
macro_encryption = 'CONFIG_SECURE_FLASH_ENCRYPTION_MODE_DEVELOPMENT' in sdkconfig_dict
# If the macro is not enabled (plain text flash) or all files
# must be encrypted, add NVS to flash_files.
if not macro_encryption or encrypt:
flash_files.append((address, nvs_file))
else:
encrypt_files.append((address, nvs_file))
self.write_flash_data(flash_files, encrypt_files, False, encrypt)
finally:
for (_, f) in flash_files:
f.close()
for (_, f) in encrypt_files:
f.close()
@_uses_esptool
def write_flash_data(self, esp, flash_files=None, encrypt_files=None, ignore_flash_encryption_efuse_setting=True, encrypt=False):
"""
Try flashing at a particular baud rate.
Structured this way so @_uses_esptool will reconnect each time
:return: None
"""
last_error = None
for baud_rate in [921600, 115200]:
try:
# fake flasher args object, this is a hack until
# esptool Python API is improved
class FlashArgs(object):
def __init__(self, attributes):
for key, value in attributes.items():
self.__setattr__(key, value)
# write_flash expects the parameter encrypt_files to be None and not
# an empty list, so perform the check here
flash_args = FlashArgs({
'flash_size': self.app.flash_settings['flash_size'],
'flash_mode': self.app.flash_settings['flash_mode'],
'flash_freq': self.app.flash_settings['flash_freq'],
'addr_filename': flash_files or None,
'encrypt_files': encrypt_files or None,
'no_stub': self.secure_boot_en,
'compress': not self.secure_boot_en,
'verify': False,
'encrypt': encrypt,
'ignore_flash_encryption_efuse_setting': ignore_flash_encryption_efuse_setting,
'erase_all': False,
'after': 'no_reset',
'force': False,
'chip': esp.CHIP_NAME.lower().replace('-', ''),
})
esp.change_baud(baud_rate)
esptool.detect_flash_size(esp, flash_args)
esptool.write_flash(esp, flash_args)
break
except RuntimeError as e:
last_error = e
else:
raise last_error
def image_info(self, path_to_file):
"""
get hash256 of app
:param: path: path to file
:return: sha256 appended to app
"""
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
class Args(object):
def __init__(self, attributes):
for key, value in attributes.items():
self.__setattr__(key, value)
args = Args({
'chip': self.TARGET,
'filename': path_to_file,
})
esptool.image_info(args)
output = new_stdout.getvalue()
sys.stdout = old_stdout
return output
def start_app(self, erase_nvs=ERASE_NVS):
"""
download and start app.
:param: erase_nvs: whether erase NVS partition during flash
:return: None
"""
self._try_flash(erase_nvs)
def start_app_no_enc(self):
"""
download and start app.
:param: erase_nvs: whether erase NVS partition during flash
:return: None
"""
flash_files = self.app.flash_files + self.app.encrypt_files
self.write_flash(flash_files)
def write_flash(self, flash_files=None, encrypt_files=None, ignore_flash_encryption_efuse_setting=True, encrypt=False):
"""
Flash files
:return: None
"""
flash_offs_files = []
encrypt_offs_files = []
try:
if flash_files:
flash_offs_files = [(offs, open(path, 'rb')) for (offs, path) in flash_files]
if encrypt_files:
encrypt_offs_files = [(offs, open(path, 'rb')) for (offs, path) in encrypt_files]
self.write_flash_data(flash_offs_files, encrypt_offs_files, ignore_flash_encryption_efuse_setting, encrypt)
finally:
for (_, f) in flash_offs_files:
f.close()
for (_, f) in encrypt_offs_files:
f.close()
def bootloader_flash(self):
"""
download bootloader.
:return: None
"""
bootloader_path = os.path.join(self.app.binary_path, 'bootloader', 'bootloader.bin')
offs = int(self.app.get_sdkconfig()['CONFIG_BOOTLOADER_OFFSET_IN_FLASH'], 0)
flash_files = [(offs, bootloader_path)]
self.write_flash(flash_files)
@_uses_esptool
def reset(self, esp):
"""
hard reset DUT
:return: None
"""
# decorator `_use_esptool` will do reset
# so we don't need to do anything in this method
pass
@_uses_esptool
def erase_partition(self, esp, partition):
"""
:param partition: partition name to erase
:return: None
"""
address = self.app.partition_table[partition]['offset']
size = self.app.partition_table[partition]['size']
esp.erase_region(address, size)
@_uses_esptool
def erase_flash(self, esp):
"""
erase the flash completely
:return: None
"""
esp.erase_flash()
@_uses_esptool
def dump_flash(self, esp, output_file, **kwargs):
"""
dump flash
:param output_file: output file name, if relative path, will use sdk path as base path.
:keyword partition: partition name, dump the partition.
``partition`` is preferred than using ``address`` and ``size``.
:keyword address: dump from address (need to be used with size)
:keyword size: dump size (need to be used with address)
:return: None
"""
if os.path.isabs(output_file) is False:
output_file = os.path.relpath(output_file, self.app.get_log_folder())
if 'partition' in kwargs:
partition = self.app.partition_table[kwargs['partition']]
_address = partition['offset']
_size = partition['size']
elif 'address' in kwargs and 'size' in kwargs:
_address = kwargs['address']
_size = kwargs['size']
else:
raise IDFToolError("You must specify 'partition' or ('address' and 'size') to dump flash")
content = esp.read_flash(_address, _size)
with open(output_file, 'wb') as f:
f.write(content)
@staticmethod
def _sort_usb_ports(ports):
"""
Move the usb ports to the very beginning
:param ports: list of ports
:return: list of ports with usb ports at beginning
"""
usb_ports = []
rest_ports = []
for port in ports:
if 'usb' in port.lower():
usb_ports.append(port)
else:
rest_ports.append(port)
return usb_ports + rest_ports
@classmethod
def list_available_ports(cls):
# It will return other kinds of ports as well, such as ttyS* ports.
# Give the usb ports higher priority
ports = cls._sort_usb_ports([x.device for x in list_ports.comports()])
espport = os.getenv('ESPPORT')
if not espport:
# It's a little hard filter out invalid port with `serial.tools.list_ports.grep()`:
# The check condition in `grep` is: `if r.search(port) or r.search(desc) or r.search(hwid)`.
# This means we need to make all 3 conditions fail, to filter out the port.
# So some part of the filters will not be straight forward to users.
# And negative regular expression (`^((?!aa|bb|cc).)*$`) is not easy to understand.
# Filter out invalid port by our own will be much simpler.
return [x for x in ports if not cls.INVALID_PORT_PATTERN.search(x)]
# On MacOs with python3.6: type of espport is already utf8
if isinstance(espport, type(u'')):
port_hint = espport
else:
port_hint = espport.decode('utf8')
# If $ESPPORT is a valid port, make it appear first in the list
if port_hint in ports:
ports.remove(port_hint)
return [port_hint] + ports
# On macOS, user may set ESPPORT to /dev/tty.xxx while
# pySerial lists only the corresponding /dev/cu.xxx port
if sys.platform == 'darwin' and 'tty.' in port_hint:
port_hint = port_hint.replace('tty.', 'cu.')
if port_hint in ports:
ports.remove(port_hint)
return [port_hint] + ports
return ports
def lookup_pc_address(self, pc_addr):
cmd = ['%saddr2line' % self.TOOLCHAIN_PREFIX,
'-pfiaC', '-e', self.app.elf_file, pc_addr]
ret = ''
try:
translation = subprocess.check_output(cmd)
ret = translation.decode()
except OSError:
pass
return ret
@staticmethod
def _queue_read_all(source_queue):
output = []
while True:
try:
output.append(source_queue.get(timeout=0))
except _queue.Empty:
break
return output
def _queue_copy(self, source_queue, dest_queue):
data = self._queue_read_all(source_queue)
for d in data:
dest_queue.put(d)
def _get_from_queue(self, queue_name):
self_queue = getattr(self, queue_name)
if self.receive_thread:
recv_thread_queue = getattr(self.receive_thread, queue_name)
self._queue_copy(recv_thread_queue, self_queue)
return self._queue_read_all(self_queue)
def stop_receive(self):
if self.receive_thread:
for name in ['performance_items', 'exceptions']:
source_queue = getattr(self.receive_thread, name)
dest_queue = getattr(self, name)
self._queue_copy(source_queue, dest_queue)
super(IDFDUT, self).stop_receive()
def get_exceptions(self):
""" Get exceptions detected by DUT receive thread. """
return self._get_from_queue('exceptions')
def get_performance_items(self):
"""
DUT receive thread will automatic collect performance results with pattern ``[Performance][name]: value\n``.
This method is used to get all performance results.
:return: a list of performance items.
"""
return self._get_from_queue('performance_items')
def close(self):
super(IDFDUT, self).close()
if not self.allow_dut_exception and self.get_exceptions():
raise IDFDUTException('DUT exception detected on {}'.format(self))
class ESP32DUT(IDFDUT):
TARGET = 'esp32'
TOOLCHAIN_PREFIX = 'xtensa-esp32-elf-'
@classmethod
def get_rom(cls):
return targets.ESP32ROM
class ESP32S2DUT(IDFDUT):
TARGET = 'esp32s2'
TOOLCHAIN_PREFIX = 'xtensa-esp32s2-elf-'
@classmethod
def get_rom(cls):
return targets.ESP32S2ROM
class ESP32S3DUT(IDFDUT):
TARGET = 'esp32s3'
TOOLCHAIN_PREFIX = 'xtensa-esp32s3-elf-'
@classmethod
def get_rom(cls):
return targets.ESP32S3ROM
def erase_partition(self, esp, partition):
raise NotImplementedError()
class ESP32C2DUT(IDFDUT):
TARGET = 'esp32c2'
TOOLCHAIN_PREFIX = 'riscv32-esp-elf-'
@classmethod
def get_rom(cls):
return targets.ESP32C2ROM
class ESP32C3DUT(IDFDUT):
TARGET = 'esp32c3'
TOOLCHAIN_PREFIX = 'riscv32-esp-elf-'
@classmethod
def get_rom(cls):
return targets.ESP32C3ROM
class ESP32C6DUT(IDFDUT):
TARGET = 'esp32c6'
TOOLCHAIN_PREFIX = 'riscv32-esp-elf-'
@classmethod
def get_rom(cls):
return targets.ESP32C6ROM
class ESP32H2DUT(IDFDUT):
TARGET = 'esp32h2'
TOOLCHAIN_PREFIX = 'riscv32-esp-elf-'
@classmethod
def get_rom(cls):
return targets.ESP32H2ROM
class ESP8266DUT(IDFDUT):
TARGET = 'esp8266'
TOOLCHAIN_PREFIX = 'xtensa-lx106-elf-'
@classmethod
def get_rom(cls):
return targets.ESP8266ROM
def get_target_by_rom_class(cls):
for c in [ESP32DUT, ESP32S2DUT, ESP32S3DUT, ESP32C2DUT, ESP32C3DUT, ESP32C6DUT, ESP32H2DUT, ESP8266DUT, IDFQEMUDUT]:
if c.get_rom() == cls:
return c.TARGET
return None
class IDFQEMUDUT(IDFDUT):
TARGET = None
TOOLCHAIN_PREFIX = None
ERASE_NVS = True
DEFAULT_EXPECT_TIMEOUT = 30 # longer timeout, since app startup takes more time in QEMU (due to slow SHA emulation)
QEMU_SERIAL_PORT = 3334
def __init__(self, name, port, log_file, app, allow_dut_exception=False, **kwargs):
self.flash_image = tempfile.NamedTemporaryFile('rb+', suffix='.bin', prefix='qemu_flash_img')
self.app = app
self.flash_size = 4 * 1024 * 1024
self._write_flash_img()
args = [
'qemu-system-xtensa',
'-nographic',
'-machine', self.TARGET,
'-drive', 'file={},if=mtd,format=raw'.format(self.flash_image.name),
'-nic', 'user,model=open_eth',
'-serial', 'tcp::{},server,nowait'.format(self.QEMU_SERIAL_PORT),
'-S',
'-global driver=timer.esp32.timg,property=wdt_disable,value=true']
# TODO(IDF-1242): generate a temporary efuse binary, pass it to QEMU
if 'QEMU_BIOS_PATH' in os.environ:
args += ['-L', os.environ['QEMU_BIOS_PATH']]
self.qemu = pexpect.spawn(' '.join(args), timeout=self.DEFAULT_EXPECT_TIMEOUT)
self.qemu.expect_exact(b'(qemu)')
super(IDFQEMUDUT, self).__init__(name, port, log_file, app, allow_dut_exception=allow_dut_exception, **kwargs)
def _write_flash_img(self):
self.flash_image.seek(0)
self.flash_image.write(b'\x00' * self.flash_size)
for offs, path in self.app.flash_files:
with open(path, 'rb') as flash_file:
contents = flash_file.read()
self.flash_image.seek(offs)
self.flash_image.write(contents)
self.flash_image.flush()
@classmethod
def get_rom(cls):
return targets.ESP32ROM
@classmethod
def get_mac(cls, app, port):
# TODO(IDF-1242): get this from QEMU/efuse binary
return '11:22:33:44:55:66'
@classmethod
def confirm_dut(cls, port, **kwargs):
return True, cls.TARGET
def start_app(self, erase_nvs=ERASE_NVS):
# TODO: implement erase_nvs
# since the flash image is generated every time in the constructor, maybe this isn't needed...
self.qemu.sendline(b'cont\n')
self.qemu.expect_exact(b'(qemu)')
def reset(self):
self.qemu.sendline(b'system_reset\n')
self.qemu.expect_exact(b'(qemu)')
def erase_partition(self, partition):
raise NotImplementedError('method erase_partition not implemented')
def erase_flash(self):
raise NotImplementedError('method erase_flash not implemented')
def dump_flash(self, output_file, **kwargs):
raise NotImplementedError('method dump_flash not implemented')
@classmethod
def list_available_ports(cls):
return ['socket://localhost:{}'.format(cls.QEMU_SERIAL_PORT)]
def close(self):
super(IDFQEMUDUT, self).close()
self.qemu.sendline(b'q\n')
self.qemu.expect_exact(b'(qemu)')
for _ in range(self.DEFAULT_EXPECT_TIMEOUT):
if not self.qemu.isalive():
break
time.sleep(1)
else:
self.qemu.terminate(force=True)
class ESP32QEMUDUT(IDFQEMUDUT):
TARGET = 'esp32' # type: ignore
TOOLCHAIN_PREFIX = 'xtensa-esp32-elf-' # type: ignore
class IDFFPGADUT(IDFDUT):
TARGET = None # type: str
TOOLCHAIN_PREFIX = None # type: str
ERASE_NVS = True
FLASH_ENCRYPT_SCHEME = None # type: str
FLASH_ENCRYPT_CNT_KEY = None # type: str
FLASH_ENCRYPT_CNT_VAL = 0
FLASH_ENCRYPT_PURPOSE = None # type: str
SECURE_BOOT_EN_KEY = None # type: str
SECURE_BOOT_EN_VAL = 0
FLASH_SECTOR_SIZE = 4096
def __init__(self, name, port, log_file, app, allow_dut_exception=False, efuse_reset_port=None, **kwargs):
super(IDFFPGADUT, self).__init__(name, port, log_file, app, allow_dut_exception=allow_dut_exception, **kwargs)
self.esp = self.get_rom()(port)
self.efuses = None
self.efuse_operations = None
self.efuse_reset_port = efuse_reset_port
@classmethod
def get_rom(cls):
raise NotImplementedError('This is an abstraction class, method not defined.')
def erase_partition(self, esp, partition):
raise NotImplementedError()
def enable_efuses(self):
# We use an extra COM port to reset the efuses on FPGA.
# Connect DTR pin of the COM port to the efuse reset pin on daughter board
# Set EFUSEPORT env variable to the extra COM port
if not self.efuse_reset_port:
raise RuntimeError('EFUSEPORT not specified')
# Stop any previous serial port operation
self.stop_receive()
if self.secure_boot_en:
self.esp.connect()
self.efuses, self.efuse_operations = espefuse.get_efuses(self.esp, False, False, True)
def burn_efuse(self, field, val):
if not self.efuse_operations:
self.enable_efuses()
BurnEfuseArgs = collections.namedtuple('burn_efuse_args', ['name_value_pairs'])
args = BurnEfuseArgs({field: val})
self.efuse_operations.burn_efuse(self.esp, self.efuses, args)
def burn_efuse_key(self, key, purpose, block):
if not self.efuse_operations:
self.enable_efuses()
BurnKeyArgs = collections.namedtuple('burn_key_args',
['keyfile', 'keypurpose', 'block',
'force_write_always', 'no_write_protect', 'no_read_protect'])
args = BurnKeyArgs([key],
[purpose],
[block],
False, False, False)
self.efuse_operations.burn_key(self.esp, self.efuses, args)
def burn_efuse_key_digest(self, key, purpose, block):
if not self.efuse_operations:
self.enable_efuses()
BurnDigestArgs = collections.namedtuple('burn_key_digest_args',
['keyfile', 'keypurpose', 'block',
'force_write_always', 'no_write_protect', 'no_read_protect'])
args = BurnDigestArgs([open(key, 'rb')],
[purpose],
[block],
False, False, True)
self.efuse_operations.burn_key_digest(self.esp, self.efuses, args)
def reset_efuses(self):
if not self.efuse_reset_port:
raise RuntimeError('EFUSEPORT not specified')
with serial.Serial(self.efuse_reset_port) as efuseport:
print('Resetting efuses')
efuseport.dtr = 0
self.port_inst.setRTS(1)
self.port_inst.setRTS(0)
time.sleep(1)
efuseport.dtr = 1
self.efuse_operations = None
self.efuses = None
def sign_data(self, data_file, key_files, version, append_signature=0):
SignDataArgs = collections.namedtuple('sign_data_args',
['datafile','keyfile','output', 'version', 'append_signatures'])
outfile = tempfile.NamedTemporaryFile()
args = SignDataArgs(data_file, key_files, outfile.name, str(version), append_signature)
espsecure.sign_data(args)
outfile.seek(0)
return outfile.read()
class ESP32C3FPGADUT(IDFFPGADUT):
TARGET = 'esp32c3'
TOOLCHAIN_PREFIX = 'riscv32-esp-elf-'
FLASH_ENCRYPT_SCHEME = 'AES-XTS'
FLASH_ENCRYPT_CNT_KEY = 'SPI_BOOT_CRYPT_CNT'
FLASH_ENCRYPT_CNT_VAL = 1
FLASH_ENCRYPT_PURPOSE = 'XTS_AES_128_KEY'
SECURE_BOOT_EN_KEY = 'SECURE_BOOT_EN'
SECURE_BOOT_EN_VAL = 1
@classmethod
def get_rom(cls):
return targets.ESP32C3ROM
def erase_partition(self, esp, partition):
raise NotImplementedError()
def flash_encrypt_burn_cnt(self):
self.burn_efuse(self.FLASH_ENCRYPT_CNT_KEY, self.FLASH_ENCRYPT_CNT_VAL)
def flash_encrypt_burn_key(self, key, block=0):
self.burn_efuse_key(key, self.FLASH_ENCRYPT_PURPOSE, 'BLOCK_KEY%d' % block)
def flash_encrypt_get_scheme(self):
return self.FLASH_ENCRYPT_SCHEME
def secure_boot_burn_en_bit(self):
self.burn_efuse(self.SECURE_BOOT_EN_KEY, self.SECURE_BOOT_EN_VAL)
def secure_boot_burn_digest(self, digest, key_index=0, block=0):
self.burn_efuse_key_digest(digest, 'SECURE_BOOT_DIGEST%d' % key_index, 'BLOCK_KEY%d' % block)
@classmethod
def confirm_dut(cls, port, **kwargs):
return True, cls.TARGET
class ESP32S3FPGADUT(IDFFPGADUT):
TARGET = 'esp32s3'
TOOLCHAIN_PREFIX = 'xtensa-esp32s3-elf-'
FLASH_ENCRYPT_SCHEME = 'AES-XTS'
FLASH_ENCRYPT_CNT_KEY = 'SPI_BOOT_CRYPT_CNT'
FLASH_ENCRYPT_CNT_VAL = 1
FLASH_ENCRYPT_PURPOSE = 'XTS_AES_128_KEY'
SECURE_BOOT_EN_KEY = 'SECURE_BOOT_EN'
SECURE_BOOT_EN_VAL = 1
@classmethod
def get_rom(cls):
return targets.ESP32S3ROM
def erase_partition(self, esp, partition):
raise NotImplementedError()
def flash_encrypt_burn_cnt(self):
self.burn_efuse(self.FLASH_ENCRYPT_CNT_KEY, self.FLASH_ENCRYPT_CNT_VAL)
def flash_encrypt_burn_key(self, key, block=0):
self.burn_efuse_key(key, self.FLASH_ENCRYPT_PURPOSE, 'BLOCK_KEY%d' % block)
def flash_encrypt_get_scheme(self):
return self.FLASH_ENCRYPT_SCHEME
def secure_boot_burn_en_bit(self):
self.burn_efuse(self.SECURE_BOOT_EN_KEY, self.SECURE_BOOT_EN_VAL)
def secure_boot_burn_digest(self, digest, key_index=0, block=0):
self.burn_efuse_key_digest(digest, 'SECURE_BOOT_DIGEST%d' % key_index, 'BLOCK_KEY%d' % block)
@classmethod
def confirm_dut(cls, port, **kwargs):
return True, cls.TARGET

View File

@ -1,312 +0,0 @@
# SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import functools
import json
import logging
import os
import re
from collections import defaultdict
from copy import deepcopy
import junit_xml
from tiny_test_fw import TinyFW, Utility
from .IDFApp import UT, ComponentUTApp, Example, IDFApp, LoadableElfTestApp, TestApp # noqa: export all Apps for users
from .IDFDUT import (ESP32C2DUT, ESP32C3DUT, ESP32C3FPGADUT, ESP32C6DUT, ESP32DUT, # noqa: export DUTs for users
ESP32H2DUT, ESP32QEMUDUT, ESP32S2DUT, ESP32S3DUT, ESP32S3FPGADUT, ESP8266DUT, IDFDUT)
from .unity_test_parser import TestFormat, TestResults
# pass TARGET_DUT_CLS_DICT to Env.py to avoid circular dependency issue.
TARGET_DUT_CLS_DICT = {
'ESP32': ESP32DUT,
'ESP32S2': ESP32S2DUT,
'ESP32S3': ESP32S3DUT,
'ESP32C2': ESP32C2DUT,
'ESP32C3': ESP32C3DUT,
'ESP32C3FPGA': ESP32C3FPGADUT,
'ESP32S3FPGA': ESP32S3FPGADUT,
'ESP32C6': ESP32C6DUT,
'ESP32H2': ESP32H2DUT,
}
try:
string_type = basestring # type: ignore
except NameError:
string_type = str
def upper_list_or_str(text):
"""
Return the uppercase of list of string or string. Return itself for other
data types
:param text: list or string, other instance will be returned immediately
:return: uppercase of list of string
"""
if isinstance(text, string_type):
return [text.upper()]
elif isinstance(text, list):
return [item.upper() for item in text]
else:
return text
def local_test_check(decorator_target):
# Try to get the sdkconfig.json to read the IDF_TARGET value.
# If not set, will set to ESP32.
# For CI jobs, this is a fake procedure, the true target and dut will be
# overwritten by the job config YAML file.
idf_target = 'ESP32' # default if sdkconfig not found or not readable
if os.getenv('CI_JOB_ID'): # Only auto-detect target when running locally
return idf_target
decorator_target = upper_list_or_str(decorator_target)
expected_json_path = os.path.join('build', 'config', 'sdkconfig.json')
if os.path.exists(expected_json_path):
sdkconfig = json.load(open(expected_json_path))
try:
idf_target = sdkconfig['IDF_TARGET'].upper()
except KeyError:
logging.debug('IDF_TARGET not in {}. IDF_TARGET set to esp32'.format(os.path.abspath(expected_json_path)))
else:
logging.debug('IDF_TARGET: {}'.format(idf_target))
else:
logging.debug('{} not found. IDF_TARGET set to esp32'.format(os.path.abspath(expected_json_path)))
if isinstance(decorator_target, list):
if idf_target not in decorator_target:
fpga_target = ''.join((idf_target, 'FPGA'))
if fpga_target not in decorator_target:
raise ValueError('IDF_TARGET set to {}, not in decorator target value'.format(idf_target))
else:
idf_target = fpga_target
else:
if idf_target != decorator_target:
raise ValueError('IDF_TARGET set to {}, not equal to decorator target value'.format(idf_target))
return idf_target
def get_dut_class(target, dut_class_dict, erase_nvs=None):
if target not in dut_class_dict:
raise Exception('target can only be {%s} (case insensitive)' % ', '.join(dut_class_dict.keys()))
dut = dut_class_dict[target.upper()]
if erase_nvs:
dut.ERASE_NVS = 'erase_nvs'
return dut
def ci_target_check(func):
@functools.wraps(func)
def wrapper(**kwargs):
target = upper_list_or_str(kwargs.get('target', []))
ci_target = upper_list_or_str(kwargs.get('ci_target', []))
if not set(ci_target).issubset(set(target)):
raise ValueError('ci_target must be a subset of target')
return func(**kwargs)
return wrapper
def test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run, **kwargs):
target = upper_list_or_str(target)
test_target = local_test_check(target)
if 'additional_duts' in kwargs:
dut_classes = deepcopy(TARGET_DUT_CLS_DICT)
dut_classes.update(kwargs['additional_duts'])
else:
dut_classes = TARGET_DUT_CLS_DICT
dut = get_dut_class(test_target, dut_classes, erase_nvs)
original_method = TinyFW.test_method(
app=app, dut=dut, target=target, ci_target=upper_list_or_str(ci_target),
module=module, execution_time=execution_time, level=level, erase_nvs=erase_nvs,
dut_dict=dut_classes, nightly_run=nightly_run, **kwargs
)
test_func = original_method(func)
return test_func
@ci_target_check
def idf_example_test(app=Example, target='ESP32', ci_target=None, module='examples', execution_time=1,
level='example', erase_nvs=True, config_name=None, nightly_run=False, **kwargs):
"""
decorator for testing idf examples (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run,
**kwargs)
return test
@ci_target_check
def idf_unit_test(app=UT, target='ESP32', ci_target=None, module='unit-test', execution_time=1,
level='unit', erase_nvs=True, nightly_run=False, **kwargs):
"""
decorator for testing idf unit tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run,
**kwargs)
return test
@ci_target_check
def idf_custom_test(app=TestApp, target='ESP32', ci_target=None, module='misc', execution_time=1,
level='integration', erase_nvs=True, config_name=None, nightly_run=False, **kwargs):
"""
decorator for idf custom tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run,
**kwargs)
return test
@ci_target_check
def idf_component_unit_test(app=ComponentUTApp, target='ESP32', ci_target=None, module='misc', execution_time=1,
level='integration', erase_nvs=True, config_name=None, nightly_run=False, **kwargs):
"""
decorator for idf custom tests (with default values for some keyword args).
:param app: test application class
:param target: target supported, string or list
:param ci_target: target auto run in CI, if None than all target will be tested, None, string or list
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param config_name: if specified, name of the app configuration
:param kwargs: other keyword args
:return: test method
"""
def test(func):
return test_func_generator(func, app, target, ci_target, module, execution_time, level, erase_nvs, nightly_run,
**kwargs)
return test
class ComponentUTResult:
"""
Function Class, parse component unit test results
"""
results_list = defaultdict(list) # type: dict[str, list[junit_xml.TestSuite]]
"""
For origin unity test cases with macro "TEST", please set "test_format" to "TestFormat.UNITY_FIXTURE_VERBOSE".
For IDF unity test cases with macro "TEST CASE", please set "test_format" to "TestFormat.UNITY_BASIC".
"""
@staticmethod
def parse_result(stdout, test_format=TestFormat.UNITY_FIXTURE_VERBOSE):
try:
results = TestResults(stdout, test_format)
except (ValueError, TypeError) as e:
raise ValueError('Error occurs when parsing the component unit test stdout to JUnit report: ' + str(e))
group_name = results.tests()[0].group()
ComponentUTResult.results_list[group_name].append(results.to_junit())
with open(os.path.join(os.getenv('LOG_PATH', ''), '{}_XUNIT_RESULT.xml'.format(group_name)), 'w') as fw:
junit_xml.to_xml_report_file(fw, ComponentUTResult.results_list[group_name])
if results.num_failed():
# raise exception if any case fails
err_msg = 'Failed Cases:\n'
for test_case in results.test_iter():
if test_case.result() == 'FAIL':
err_msg += '\t{}: {}'.format(test_case.name(), test_case.message())
raise AssertionError(err_msg)
def log_performance(item, value):
"""
do print performance with pre-defined format to console
:param item: performance item name
:param value: performance value
"""
performance_msg = '[Performance][{}]: {}'.format(item, value)
Utility.console_log(performance_msg, 'orange')
# update to junit test report
current_junit_case = TinyFW.JunitReport.get_current_test_case()
current_junit_case.stdout += performance_msg + '\r\n'
def check_performance(item, value, target):
"""
check if idf performance meet pass standard
:param item: performance item name
:param value: performance item value
:param target: target chip
:raise: AssertionError: if check fails
"""
def _find_perf_item(path):
with open(path, 'r') as f:
data = f.read()
match = re.search(r'#define\s+IDF_PERFORMANCE_(MIN|MAX)_{}\s+([\d.]+)'.format(item.upper()), data)
return match.group(1), float(match.group(2))
def _check_perf(op, standard_value):
if op == 'MAX':
ret = value <= standard_value
else:
ret = value >= standard_value
if not ret:
raise AssertionError("[Performance] {} value is {}, doesn't meet pass standard {}"
.format(item, value, standard_value))
path_prefix = os.path.join(IDFApp.get_sdk_path(), 'components', 'idf_test', 'include')
performance_files = (os.path.join(path_prefix, target, 'idf_performance_target.h'),
os.path.join(path_prefix, 'idf_performance.h'))
for performance_file in performance_files:
try:
op, standard = _find_perf_item(performance_file)
except (IOError, AttributeError):
# performance file doesn't exist or match is not found in it
continue
_check_perf(op, standard)
# if no exception was thrown then the performance is met and no need to continue
break
else:
raise AssertionError('Failed to get performance standard for {}'.format(item))

View File

@ -1,377 +0,0 @@
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
Modification version of https://github.com/ETCLabs/unity-test-parser/blob/develop/unity_test_parser.py
since only python 3.6 or higher version have ``enum.auto()``
unity_test_parser.py
Parse the output of the Unity Test Framework for C. Parsed results are held in the TestResults
object format, which can then be converted to various XML formats.
"""
import enum
import re
import junit_xml
_NORMAL_TEST_REGEX = re.compile(r'(?P<file>.+):(?P<line>\d+):(?P<test_name>[^\s:]+):(?P<result>PASS|FAIL|IGNORE)(?:: (?P<message>.+))?')
_UNITY_FIXTURE_VERBOSE_PREFIX_REGEX = re.compile(r'(?P<prefix>TEST\((?P<test_group>[^\s,]+), (?P<test_name>[^\s\)]+)\))(?P<remainder>.+)?$')
_UNITY_FIXTURE_REMAINDER_REGEX = re.compile(r'^(?P<file>.+):(?P<line>\d+)::(?P<result>PASS|FAIL|IGNORE)(?:: (?P<message>.+))?')
_TEST_SUMMARY_BLOCK_REGEX = re.compile(
r'^(?P<num_tests>\d+) Tests (?P<num_failures>\d+) Failures (?P<num_ignored>\d+) Ignored\s*\r?\n(?P<overall_result>OK|FAIL)(?:ED)?', re.MULTILINE
)
_TEST_RESULT_ENUM = ['PASS', 'FAIL', 'IGNORE']
class TestFormat(enum.Enum):
"""Represents the flavor of Unity used to produce a given output."""
UNITY_BASIC = 0
# UNITY_FIXTURE = enum.auto()
UNITY_FIXTURE_VERBOSE = 1
globals().update(TestFormat.__members__)
class TestStats:
"""Statistics about a test collection"""
def __init__(self):
self.total = 0
self.passed = 0
self.failed = 0
self.ignored = 0
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.total == other.total
and self.passed == other.passed
and self.failed == other.failed
and self.ignored == other.ignored)
return False
class TestResult:
"""
Class representing the result of a single test.
Contains the test name, its result (either PASS, FAIL or IGNORE), the file and line number if
the test result was not PASS, and an optional message.
"""
def __init__(
self,
test_name,
result,
group='default',
file='',
line=0,
message='',
full_line='',
):
if result not in _TEST_RESULT_ENUM:
raise ValueError('result must be one of {}.'.format(_TEST_RESULT_ENUM))
self._test_name = test_name
self._result = result
self._group = group
self._message = message
self._full_line = full_line
if result != 'PASS':
self._file = file
self._line = line
else:
self._file = ''
self._line = 0
def file(self):
"""The file name - returns empty string if the result is PASS."""
return self._file
def line(self):
"""The line number - returns 0 if the result is PASS."""
return self._line
def name(self):
"""The test name."""
return self._test_name
def result(self):
"""The test result, one of PASS, FAIL or IGNORED."""
return self._result
def group(self):
"""
The test group, if applicable.
For basic Unity output, this will always be "default".
"""
return self._group
def message(self):
"""The accompanying message - returns empty string if the result is PASS."""
return self._message
def full_line(self):
"""The original, full line of unit test output that this object was created from."""
return self._full_line
class TestResults:
"""
Class representing Unity test results.
After being initialized with raw test output, it parses the output and represents it as a list
of TestResult objects which can be inspected or converted to other types of output, e.g. JUnit
XML.
"""
def __init__(self, test_output, test_format=TestFormat.UNITY_BASIC):
"""
Create a new TestResults object from Unity test output.
Keyword arguments:
test_output -- The full test console output, must contain the overall result and summary
block at the bottom.
Optional arguments:
test_format -- TestFormat enum representing the flavor of Unity used to create the output.
Exceptions:
ValueError, if the test output is not formatted properly.
"""
self._tests = []
self._test_stats = self._find_summary_block(test_output)
if test_format is TestFormat.UNITY_BASIC:
self._parse_unity_basic(test_output)
elif test_format is TestFormat.UNITY_FIXTURE_VERBOSE:
self._parse_unity_fixture_verbose(test_output)
else:
raise ValueError(
'test_format must be one of UNITY_BASIC or UNITY_FIXTURE_VERBOSE.'
)
def num_tests(self):
"""The total number of tests parsed."""
return self._test_stats.total
def num_passed(self):
"""The number of tests with result PASS."""
return self._test_stats.passed
def num_failed(self):
"""The number of tests with result FAIL."""
return self._test_stats.failed
def num_ignored(self):
"""The number of tests with result IGNORE."""
return self._test_stats.ignored
def test_iter(self):
"""Get an iterator for iterating over individual tests.
Returns an iterator over TestResult objects.
Example:
for test in unity_results.test_iter():
print(test.name())
"""
return iter(self._tests)
def tests(self):
"""Get a list of all the tests (TestResult objects)."""
return self._tests
def to_junit(
self, suite_name='all_tests',
):
"""
Convert the tests to JUnit XML.
Returns a junit_xml.TestSuite containing all of the test cases. One test suite will be
generated with the name given in suite_name. Unity Fixture test groups are mapped to the
classname attribute of test cases; for basic Unity output there will be one class named
"default".
Optional arguments:
suite_name -- The name to use for the "name" and "package" attributes of the testsuite element.
Sample output:
<testsuite disabled="0" errors="0" failures="1" name="[suite_name]" package="[suite_name]" skipped="0" tests="8" time="0">
<testcase classname="test_group_1" name="group_1_test" />
<testcase classname="test_group_2" name="group_2_test" />
</testsuite>
"""
test_case_list = []
for test in self._tests:
if test.result() == 'PASS':
test_case_list.append(
junit_xml.TestCase(name=test.name(), classname=test.group())
)
else:
junit_tc = junit_xml.TestCase(
name=test.name(),
classname=test.group(),
file=test.file(),
line=test.line(),
)
if test.result() == 'FAIL':
junit_tc.add_failure_info(
message=test.message(), output=test.full_line()
)
elif test.result() == 'IGNORE':
junit_tc.add_skipped_info(
message=test.message(), output=test.full_line()
)
test_case_list.append(junit_tc)
return junit_xml.TestSuite(
name=suite_name, package=suite_name, test_cases=test_case_list
)
def _find_summary_block(self, unity_output):
"""
Find and parse the test summary block.
Unity prints a test summary block at the end of a test run of the form:
-----------------------
X Tests Y Failures Z Ignored
[PASS|FAIL]
Returns the contents of the test summary block as a TestStats object.
"""
match = _TEST_SUMMARY_BLOCK_REGEX.search(unity_output)
if not match:
raise ValueError('A Unity test summary block was not found.')
try:
stats = TestStats()
stats.total = int(match.group('num_tests'))
stats.failed = int(match.group('num_failures'))
stats.ignored = int(match.group('num_ignored'))
stats.passed = stats.total - stats.failed - stats.ignored
return stats
except ValueError:
raise ValueError('The Unity test summary block was not valid.')
def _parse_unity_basic(self, unity_output):
"""
Parse basic unity output.
This is of the form file:line:test_name:result[:optional_message]
"""
found_test_stats = TestStats()
for test in _NORMAL_TEST_REGEX.finditer(unity_output):
try:
new_test = TestResult(
test.group('test_name'),
test.group('result'),
file=test.group('file'),
line=int(test.group('line')),
message=test.group('message')
if test.group('message') is not None
else '',
full_line=test.group(0),
)
except ValueError:
continue
self._add_new_test(new_test, found_test_stats)
if len(self._tests) == 0:
raise ValueError('No tests were found.')
if found_test_stats != self._test_stats:
raise ValueError('Test output does not match summary block.')
def _parse_unity_fixture_verbose(self, unity_output):
"""
Parse the output of the unity_fixture add-in invoked with the -v flag.
This is a more complex operation than basic unity output, because the output for a single
test can span multiple lines. There is a prefix of the form "TEST(test_group, test_name)"
that always exists on the first line for a given test. Immediately following that can be a
pass or fail message, or some number of diagnostic messages followed by a pass or fail
message.
"""
found_test_stats = TestStats()
line_iter = iter(unity_output.splitlines())
try:
line = next(line_iter)
while True:
prefix_match = _UNITY_FIXTURE_VERBOSE_PREFIX_REGEX.search(line)
line = next(line_iter)
if prefix_match:
# Handle the remaining portion of a test case line after the unity_fixture
# prefix.
remainder = prefix_match.group('remainder')
if remainder:
self._parse_unity_fixture_remainder(
prefix_match, remainder, found_test_stats
)
# Handle any subsequent lines with more information on the same test case.
while not _UNITY_FIXTURE_VERBOSE_PREFIX_REGEX.search(line):
self._parse_unity_fixture_remainder(
prefix_match, line, found_test_stats
)
line = next(line_iter)
except StopIteration:
pass
if len(self._tests) == 0:
raise ValueError('No tests were found.')
if found_test_stats != self._test_stats:
raise ValueError('Test output does not match summary block.')
def _parse_unity_fixture_remainder(self, prefix_match, remainder, test_stats):
"""
Parse the remainder of a Unity Fixture test case.
Can be on the same line as the prefix or on subsequent lines.
"""
new_test = None
if remainder == ' PASS':
new_test = TestResult(
prefix_match.group('test_name'),
'PASS',
group=prefix_match.group('test_group'),
full_line=prefix_match.group(0),
)
else:
remainder_match = _UNITY_FIXTURE_REMAINDER_REGEX.match(remainder)
if remainder_match:
new_test = TestResult(
prefix_match.group('test_name'),
remainder_match.group('result'),
group=prefix_match.group('test_group'),
file=remainder_match.group('file'),
line=int(remainder_match.group('line')),
message=remainder_match.group('message')
if remainder_match.group('message') is not None
else '',
full_line=prefix_match.group('prefix') + remainder_match.group(0),
)
if new_test is not None:
self._add_new_test(new_test, test_stats)
def _add_new_test(self, new_test, test_stats):
"""Add a new test and increment the proper members of test_stats."""
test_stats.total += 1
if new_test.result() == 'PASS':
test_stats.passed += 1
elif new_test.result() == 'FAIL':
test_stats.failed += 1
else:
test_stats.ignored += 1
self._tests.append(new_test)

View File

@ -111,7 +111,7 @@ This requires the following python libraries to run:
To install the dependency packages needed, please run the following command:
```shell
bash install.sh --enable-ttfw
bash install.sh --enable-pytest
```
**Note:** For troubleshooting errors with BLE transport, please refer this [link](https://bleak.readthedocs.io/en/latest/troubleshooting.html).

View File

@ -72,8 +72,8 @@ def action_print_help(script_extension: str) -> None:
optional arguments:
targets-to-install 'all', a single target (e.g. 'esp32s2'), or a comma-separated list of targets (e.g. 'esp32,esp32c3,esp32h2')
--enable-* a specific feature to enable (e.g. '--enable-ttfw' will enable feature ttfw)
--disable-* a specific feature to disable (e.g. '--disable-ttfw' will disable feature ttfw)
--enable-* a specific feature to enable (e.g. '--enable-pytest' will enable feature pytest)
--disable-* a specific feature to disable (e.g. '--disable-pytest' will disable feature pytest)
supported features: {', '.join(features)}
{help_opts} show this help message and exit

View File

@ -19,12 +19,6 @@
"optional": true,
"requirement_path": "tools/requirements/requirements.pytest.txt"
},
{
"name": "ttfw",
"description": "Packages for CI with ttfw",
"optional": true,
"requirement_path": "tools/requirements/requirements.ttfw.txt"
},
{
"name": "ci",
"description": "Packages for ESP-IDF CI scripts",

View File

@ -19,6 +19,7 @@ netifaces
rangehttpserver
dbus-python; sys_platform == 'linux'
protobuf
bleak
paho-mqtt
paramiko
netmiko

View File

@ -1,24 +0,0 @@
# Python package requirements for CI in ESP-IDF.
# This feature can be enabled by running "install.{sh,bat,ps1,fish} --enable-ttfw"
# build
idf-build-apps
# ttfw
pyserial
pyyaml
junit_xml
netifaces
# ttfw-idf
pexpect
python-gitlab
pygdbmi
# ble
dbus-python; sys_platform == 'linux'
pygobject; sys_platform != 'win32'
# esp_prov
bleak
protobuf

View File

@ -29,40 +29,3 @@ cd $IDF_PATH/examples/system/network_tests
idf.py build
idf.py -p PORT flash
```
## Run test
Open two terminals (1) and (2)
1) Start the test server which would pass packets from TTCN3 test suite into ESP32 board in `$IDF_PATH/components/lwip/weekend_test`
```
python net_suite_test.py
```
2) Start test suite in TTCN3 environment in `src` subdir of the cloned repository `net-test-suites.git`
```
ttcn3_start test_suite esp32_netsuite.cfg
```
## Internal connection
Purpose of this test is to execute standard network suite on a ESP32 network stack.
DUT, Device (Network stack in this case) under test, runs normally on target, but a specific interface with configured esp-netif for passing arbitrary data to
and from the network stack. Embedded code `net_suite.c` implements an application which serves stdin/stdout and propagates the data to/from this test interface.
Standard Intel net suite executed by TTCN3 engine uses udp ports for input/ouput of network packets. Python script `net_suite.py` translates this communication
from/to those udp ports to stdin/stdout, where after propagating over USB/UART to the ESP32 board are processed in the network stack (on the target).
Actual test execution, progress, evaluation and test reporting is done using standard net-test-suite scripts running on PC.
```
PC
+---------------------------------------------------------+ ESP32 board
| | +----------------------------------------+
| TTCN3 engine | | +----------------------------------+ |
| | | | net_suite.c | |
| +-----------------+ +--------------+ | | | +------------------------+ |
| | net-test-suite |--7777/udp--| net_suite.py |--stdout---------| -----> | esp_netif / lwip | |
| | |--7771/udp--| |--stdin----------| <----- | | |
| +-----------------+ +--------------+ | | +---------+------------------------+ |
+---------------------------------------------------------+ +----------------------------------------+
```

View File

@ -1,16 +1,8 @@
// Copyright 2019 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2019-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_netif.h"
#include "esp_log.h"
#include "driver/uart.h"

View File

@ -1,16 +1,8 @@
// Copyright 2019 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2019-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _NET_SUITE_STDINOUT_H
#define _NET_SUITE_STDINOUT_H

View File

@ -33,179 +33,4 @@ The unit test loader will prompt by showing a menu of available tests to run:
# Testing Unit Tests with CI
## CI Test Flow for Unit Test
Unit test uses 3 stages in CI: `build`, `assign_test`, `unit_test`.
### Build Stage:
`build_esp_idf_tests` job will build all UT configs and run script `UnitTestParser.py` to parse test cases form built elf files. Built binary (`tools/unit-test-app/build_<target>_<config>`) and parsed cases (`components/idf_test/unit_test/TestCaseAll.yml`) will be saved as artifacts.
When we add new test case, it will construct a structure to save case data during build. We'll parse the test case from this structure. The description (defined in test case: `TEST_CASE("name", "description")`) is used to extend test case definition. The format of test description is a list of tags:
1. first tag is always group of test cases, it's mandatory
2. the rest tags should be [type=value]. Tags could have default value and omitted value. For example, reset tag default value is "POWERON_RESET", omitted value is "" (do not reset) :
* "[reset]" equal to [reset=POWERON_RESET]
* if reset tag doesn't exist, then it equals to [reset=""]
3. the `[leaks]` tag is used to disable the leak checking. A specific maximum memory leakage can be set as follows: `[leaks=500]`. This allows no more than 500 bytes of heap to be leaked. Also there is a special function to set the critical level of leakage not through a tag, just directly in the test code ``test_utils_set_critical_leak_level()``.
The priority of using leakage level is as follows:
1. Setting by tag `[leaks=500]`.
2. Setting by ``test_utils_set_critical_leak_level()`` function.
3. Setting by default leakage in Kconfig ``CONFIG_UNITY_CRITICAL_LEAK_LEVEL_GENERAL``.
Tests marked as `[leaks]` or `[leaks=xxx]` reset the device after completion (or after each stage in multistage tests).
`TagDefinition.yml` defines how we should parse the description. In `TagDefinition.yml`, we declare the tags we are interested in, their default value and omitted value. Parser will parse the properities of test cases according to this file, and add them as test case attributes.
We will build unit-test-app with different sdkconfigs. Some config items requires specific board to run. For example, if `CONFIG_SPIRAM` is enabled, then unit test app must run on board supports PSRAM. `ConfigDependency.yml` is used to define the mapping between sdkconfig items and tags. The tags will be saved as case attributes, used to select jobs and runners. In the previous example, `psram` tag is generated, will only select jobs and runners also contains `psram` tag.
### Assign Test Stage:
`assign_unit_test` job will try to assign all cases to test jobs defined in `.gitlab-ci.yml`, according to test environment and tags. For each job, one config file with same name of test job will be generated in `components/idf_test/unit_test/CIConfigs/`(this folder will be passed to test jobs as artifacts). These config files will tell test jobs which cases it need to run, and pass some extra configs (like if the case will reset) of test case to runner.
Please check related document in tiny-test-fw for details.
### Unit Test Stage:
All jobs in `unit_test` stage will run job according to unit test configs. Then unit test jobs will use tiny-test-fw runner to run the test cases. The test logs will be saved as artifacts.
Unit test jobs will do reset before running each case (because some cases do not cleanup when failed). This makes test cases independent with each other during execution.
## Handle Unit Test CI Issues
### 1. Assign Test Failures
Gitlab CI do not support create jobs at runtime. We must maunally add all jobs to CI config file. To make test running in parallel, we limit the number of cases running on each job. When add new unit test cases, it could exceed the limitation that current unit test jobs support. In this case, assign test job will raise error, remind you to add jobs to `.gitlab-ci.yml`.
```
Too many test cases vs jobs to run. Please add the following jobs to .gitlab-ci.yml with specific tags:
* Add job with: UT_T1_1, ESP32_IDF, psram
* Add job with: UT_T1_1, ESP32_IDF
```
The above is an example of error message in assign test job. In this case, please add the following jobs in `.gitlab-ci.yml`:
```
UT_001_25:
<<: *unit_test_template
tags:
- ESP32_IDF
- UT_T1_1
UT_004_09:
<<: *unit_test_template
tags:
- ESP32_IDF
- UT_T1_1
- psram
```
The naming rule of jobs are `UT` + `job type index` + `job index`. Each combination of tags is a different job type.
### 2. Debugging Failed Cases
First you can check the logs. It's saved as unit test job artifacts. You can download from the test job page.
If you want to reproduce locally, you need to:
1. Download artifacts of `build_esp_idf_tests`. The built binary is in `tools/unit-test-app/build_<target>_<config>` folder.
* Built binary in CI could be slightly different from locally built binary with the same revision, some cases might only fails with CI built binary.
2. Check the following print in CI job to get the config name: `Running unit test for config: config_name`. Then flash the binary of this config to your board.
3. Run the failed case on your board (refer to Running Unit Tests section).
* There're some special UT cases (multiple stages case, multiple devices cases) which requires user interaction:
* You can refer to [unit test document](https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/unit-tests.html#running-unit-tests) to run test manually.
* Or, you can use `tools/unit-test-app/unit_test.py` to run the test cases (see below)
# Testing and debugging on local machine
## Running unit tests on local machine by `unit_test.py`
First, install Python dependencies and export the Python path where the IDF CI Python modules are found:
```bash
bash install.sh --enable-ttfw
source export.sh
export PYTHONPATH=$IDF_PATH/tools/ci/python_packages:$PYTHONPATH
```
Change to the unit test app directory, configure the app as needed and build it in the default "build" directory. For example:
```bash
cd $IDF_PATH/tools/unit-test-app
idf.py ut-apply-config-psram
idf.py build -T vfs
```
(Instead of these steps, you can do whatever is needed to configure & build a unit test app with the tests and config that you need.)
If you want to build exactly the same binary files under the same location as they are in CI pipelines, you may run:
```bash
cd $IDF_PATH
python tools/ci/ci_build_apps.py tools/unit-test-app -v -t $IDF_TARGET --config "configs/*=" --copy-sdkconfig --preserve-all
```
This would build all configs. if you want to build only one config (let's take `psram` as an example), you may use:
```bash
cd $IDF_PATH
python tools/ci/ci_build_apps.py tools/unit-test-app -v -t $IDF_TARGET --config "configs/psram=" --copy-sdkconfig --preserve-all
```
### run a single test case by name
```bash
./unit_test.py "UART can do select()"
```
unit_test.py script will flash the unit test binary from the (default) build directory, then run the test case.
### Run a single test case twice
```bash
./unit_test.py -r 2 "UART can do select()"
```
### run multiple unit test cases
```bash
./unit_test.py "UART can do select()" "concurrent selects work"
```
### run a multi-stage test (type of test and child case numbers are autodetected)
```bash
./unit_test.py "check a time after wakeup from deep sleep"
```
### run a list of different unit tests (one simple and one multi-stage test)
```bash
./unit_test.py "concurrent selects work" "check a time after wakeup from deep sleep"
```
### Use custom environment config file
```bash
./unit_test.py -e /tmp/EnvConfigTemplate.yml "UART can do select()"
```
Note: No sample YAML file is currently available.
### use custom application binary
```bash
./unit_test.py -b /tmp/app.bin "UART can do select()"
```
Note: This option doesn't currently work without an EnvConfigTemplate also supplied, use the default unit-test-app binaries only.
### add some options for unit tests
```bash
./unit_test.py "UART can do select()",timeout:10 "concurrent selects work",config:release,env_tag:UT_T2_1
```
Note: Setting the `config` and `env_tag` values doesn't significantly change anything but the console log output, the same binary is used.
For now we prefer to use component-based unit test to test in CI. Running unit-test-app in CI is being deprecated.

View File

@ -1,8 +0,0 @@
# This config is split between targets since different component needs to be included
CONFIG_IDF_TARGET="esp32c2"
# spi_flash is tested in other configs
TEST_EXCLUDE_COMPONENTS=spi_flash
CONFIG_NEWLIB_TIME_SYSCALL_USE_NONE=n
CONFIG_NEWLIB_TIME_SYSCALL_USE_HRT=y
CONFIG_NEWLIB_TIME_SYSCALL_USE_RTC=n
CONFIG_NEWLIB_TIME_SYSCALL_USE_RTC_HRT=n

View File

@ -1,4 +0,0 @@
# This config is split between targets since different component needs to be excluded
CONFIG_IDF_TARGET="esp32c3"
# spi_flash is tested in other configs
TEST_EXCLUDE_COMPONENTS=spi_flash

View File

@ -1,4 +0,0 @@
# This config is split between targets since different component needs to be included
CONFIG_IDF_TARGET="esp32c6"
# spi_flash is tested in other configs
TEST_EXCLUDE_COMPONENTS=spi_flash

View File

@ -1,4 +0,0 @@
# This config is split between targets since different component needs to be excluded (esp32, esp32s2)
CONFIG_IDF_TARGET="esp32"
# spi_flash are tested in other configs
TEST_EXCLUDE_COMPONENTS=spi_flash test_utils

View File

@ -1,4 +0,0 @@
# This config is split between targets since different component needs to be included
CONFIG_IDF_TARGET="esp32h2"
# spi_flash is tested in other configs
TEST_EXCLUDE_COMPONENTS=spi_flash

View File

@ -1,5 +0,0 @@
# This config is split between targets since different component needs to be included
CONFIG_IDF_TARGET="esp32s2"
# spi_flash is tested in other configs
TEST_EXCLUDE_COMPONENTS=spi_flash
CONFIG_ESP32S2_RTCDATA_IN_FAST_MEM=y

View File

@ -1,4 +0,0 @@
# This config is split between targets since different component needs to be included
CONFIG_IDF_TARGET="esp32s3"
# spi_flash is tested in other configs
TEST_EXCLUDE_COMPONENTS=spi_flash test_utils

View File

@ -1,11 +0,0 @@
CONFIG_IDF_TARGET="esp32"
TEST_EXCLUDE_COMPONENTS=bt driver spi_flash test_utils esp-tls
CONFIG_SPIRAM=y
CONFIG_ESP_INT_WDT_TIMEOUT_MS=800
CONFIG_SPIRAM_OCCUPY_NO_HOST=y
CONFIG_ESP_WIFI_RX_IRAM_OPT=n
CONFIG_ESP_WIFI_IRAM_OPT=n
# Disable encrypted flash reads/writes to save IRAM in this build configuration
CONFIG_SPI_FLASH_ENABLE_ENCRYPTED_READ_WRITE=n
CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY=y
CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY=y

View File

@ -1,7 +0,0 @@
CONFIG_IDF_TARGET="esp32"
TEST_COMPONENTS=spi_flash
CONFIG_SPIRAM=y
CONFIG_ESP_INT_WDT_TIMEOUT_MS=800
CONFIG_SPIRAM_OCCUPY_NO_HOST=y
CONFIG_ESP_WIFI_RX_IRAM_OPT=n
CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY=y

View File

@ -1,8 +0,0 @@
CONFIG_IDF_TARGET="esp32"
TEST_COMPONENTS=spi_flash
CONFIG_SPIRAM=y
CONFIG_ESP_INT_WDT_TIMEOUT_MS=800
CONFIG_SPIRAM_OCCUPY_NO_HOST=y
CONFIG_ESP_WIFI_RX_IRAM_OPT=n
CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY=y
CONFIG_SPIRAM_MALLOC_ALWAYSINTERNAL=0

View File

@ -1,5 +0,0 @@
CONFIG_IDF_TARGET="esp32"
TEST_EXCLUDE_COMPONENTS=test_utils
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y

View File

@ -1,9 +0,0 @@
CONFIG_IDF_TARGET="esp32c2"
TEST_EXCLUDE_COMPONENTS=test_utils
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_NEWLIB_TIME_SYSCALL_USE_NONE=n
CONFIG_NEWLIB_TIME_SYSCALL_USE_HRT=y
CONFIG_NEWLIB_TIME_SYSCALL_USE_RTC=n
CONFIG_NEWLIB_TIME_SYSCALL_USE_RTC_HRT=n

View File

@ -1,5 +0,0 @@
CONFIG_IDF_TARGET="esp32c6"
TEST_EXCLUDE_COMPONENTS=test_utils
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y

View File

@ -1,5 +0,0 @@
CONFIG_IDF_TARGET="esp32h2"
TEST_EXCLUDE_COMPONENTS=test_utils
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y

View File

@ -1,6 +0,0 @@
# This config is split between targets since different component needs to be included (esp32, esp32s2)
CONFIG_IDF_TARGET="esp32s2"
TEST_EXCLUDE_COMPONENTS=test_utils
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y

View File

@ -1,5 +0,0 @@
CONFIG_IDF_TARGET="esp32s3"
TEST_EXCLUDE_COMPONENTS=test_utils
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y

View File

@ -1,5 +0,0 @@
CONFIG_IDF_TARGET="esp32"
TEST_EXCLUDE_COMPONENTS=test_utils
CONFIG_FREERTOS_UNICORE=y
CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY=y
CONFIG_ESP32_RTCDATA_IN_FAST_MEM=y

View File

@ -1,4 +0,0 @@
# This config is for all targets
# The test is isolated as it requires particular memory layout
TEST_COMPONENTS=test_utils
CONFIG_ESP_IPC_TASK_STACK_SIZE=3072

View File

@ -1,7 +0,0 @@
# This config is for esp32 only
CONFIG_IDF_TARGET="esp32"
# The test is isolated as it requires particular memory layout
TEST_COMPONENTS=test_utils
CONFIG_ESP_IPC_TASK_STACK_SIZE=3072
CONFIG_SPIRAM=y
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE=y

View File

@ -1,4 +0,0 @@
TEST_COMPONENTS=spi_flash mbedtls
CONFIG_SPIRAM=y
CONFIG_IDF_TARGET="esp32s2"
CONFIG_SPIRAM_OCCUPY_NO_HOST=y

View File

@ -1,4 +0,0 @@
TEST_EXCLUDE_COMPONENTS=libsodium bt app_update driver spi_flash
CONFIG_SPIRAM=y
CONFIG_IDF_TARGET="esp32s2"
CONFIG_SPIRAM_OCCUPY_NO_HOST=y

View File

@ -1,14 +0,0 @@
"psram": '{CONFIG_SPIRAM=y} and not {CONFIG_SPIRAM_BANKSWITCH_ENABLE=y} and {CONFIG_IDF_TARGET_ESP32=y}'
"8Mpsram": "CONFIG_SPIRAM_BANKSWITCH_ENABLE=y"
"ESP32_IDF": "CONFIG_IDF_TARGET_ESP32=y"
"ESP32S2_IDF": "CONFIG_IDF_TARGET_ESP32S2=y"
"ESP32S3_IDF": "CONFIG_IDF_TARGET_ESP32S3=y"
"ESP32C2_IDF": "CONFIG_IDF_TARGET_ESP32C2=y"
"ESP32C3_IDF": "CONFIG_IDF_TARGET_ESP32C3=y"
"ESP32C6_IDF": "CONFIG_IDF_TARGET_ESP32C6=y"
"ESP32H2_IDF": "CONFIG_IDF_TARGET_ESP32H2=y"
"ESP32P4_IDF": "CONFIG_IDF_TARGET_ESP32P4=y"
"quad_psram": '{CONFIG_SPIRAM_MODE_QUAD=y} and {CONFIG_IDF_TARGET_ESP32S3=y}'
"octal_psram": '{CONFIG_SPIRAM_MODE_OCT=y} and {CONFIG_IDF_TARGET_ESP32S3=y}'
"xtal_26mhz": '{CONFIG_XTAL_FREQ_26=y} and {CONFIG_IDF_TARGET_ESP32C2=y}'
"xtal_40mhz": '{CONFIG_XTAL_FREQ_40=y} and {CONFIG_IDF_TARGET_ESP32C2=y}'

View File

@ -1,135 +0,0 @@
freertos:
module: System
module abbr: SYS
sub module: OS
sub module abbr: OS
nvs:
module: System
module abbr: SYS
sub module: NVS
sub module abbr: NVS
partition:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
ulp:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
fp:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
hw:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
tjpgd:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
miniz:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
mmap:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
bignum:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
newlib:
module: System
module abbr: SYS
sub module: Std Lib
sub module abbr: STD
aes:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
mbedtls:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
spi_flash:
module: Driver
module abbr: DRV
sub module: SPI
sub module abbr: SPI
spi_flash_read:
module: Driver
module abbr: DRV
sub module: SPI
sub module abbr: SPI
spi_flash_write:
module: Driver
module abbr: DRV
sub module: SPI
sub module abbr: SPI
flash_encryption:
module: Driver
module abbr: DRV
sub module: SPI
sub module abbr: SPI
esp32:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
deepsleep:
module: RTC
module abbr: RTC
sub module: Deep Sleep
sub module abbr: SLEEP
sd:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
cxx:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
fatfs:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
delay:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
spi:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
vfs:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
misc:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC
wifi_init:
module: System
module abbr: SYS
sub module: Misc
sub module abbr: MISC

View File

@ -1,20 +0,0 @@
ignore:
# if the type exist but no value assigned
default: "Yes"
# if the type is not exist in tag list
omitted: "No"
test_env:
default: "UT_T1_1"
omitted: "UT_T1_1"
reset:
default: "POWERON_RESET"
omitted: " "
multi_device:
default: "Yes"
omitted: "No"
multi_stage:
default: "Yes"
omitted: "No"
timeout:
default: 30
omitted: 30

View File

@ -1,367 +0,0 @@
from __future__ import print_function
import argparse
import os
import re
import shutil
import sys
from copy import deepcopy
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader as Loader # type: ignore
try:
from ElfUnitTestParser import parse_elf_test_cases
except ImportError:
sys.path.append(os.path.expandvars(os.path.join('$IDF_PATH', 'tools', 'unit-test-app', 'tools')))
from ElfUnitTestParser import parse_elf_test_cases
TEST_CASE_PATTERN = {
'initial condition': 'UTINIT1',
'chip_target': 'esp32',
'level': 'Unit',
'execution time': 0,
'auto test': 'Yes',
'category': 'Function',
'test point 1': 'basic function',
'version': 'v1 (2016-12-06)',
'test environment': 'UT_T1_1',
'reset': '',
'expected result': '1. set succeed',
'cmd set': 'test_unit_test_case',
'Test App': 'UT',
}
class Parser(object):
""" parse unit test cases from build files and create files for test bench """
TAG_PATTERN = re.compile(r'([^=]+)(=)?(.+)?')
DESCRIPTION_PATTERN = re.compile(r'\[([^]\[]+)\]')
CONFIG_PATTERN = re.compile(r'{([^}]+)}')
TEST_GROUPS_PATTERN = re.compile(r'TEST_GROUPS=(.*)$')
# file path (relative to idf path)
TAG_DEF_FILE = os.path.join('tools', 'unit-test-app', 'tools', 'TagDefinition.yml')
MODULE_DEF_FILE = os.path.join('tools', 'unit-test-app', 'tools', 'ModuleDefinition.yml')
CONFIG_DEPENDENCY_FILE = os.path.join('tools', 'unit-test-app', 'tools', 'ConfigDependency.yml')
MODULE_ARTIFACT_FILE = os.path.join('components', 'idf_test', 'ModuleDefinition.yml')
TEST_CASE_FILE_DIR = os.path.join('components', 'idf_test', 'unit_test')
UT_CONFIG_FOLDER = os.path.join('tools', 'unit-test-app', 'configs')
ELF_FILE = 'unit-test-app.elf'
SDKCONFIG_FILE = 'sdkconfig'
STRIP_CONFIG_PATTERN = re.compile(r'(.+?)(_\d+)?$')
def __init__(self, binary_folder, node_index):
idf_path = os.getenv('IDF_PATH')
idf_target = os.getenv('IDF_TARGET')
self.test_env_tags = {}
self.unit_jobs = {}
self.file_name_cache = {}
self.idf_path = idf_path
self.idf_target = idf_target
self.node_index = node_index
self.ut_bin_folder = binary_folder
self.tag_def = yaml.load(open(os.path.join(idf_path, self.TAG_DEF_FILE), 'r'), Loader=Loader)
self.module_map = yaml.load(open(os.path.join(idf_path, self.MODULE_DEF_FILE), 'r'), Loader=Loader)
self.config_dependencies = yaml.load(open(os.path.join(idf_path, self.CONFIG_DEPENDENCY_FILE), 'r'),
Loader=Loader)
# used to check if duplicated test case names
self.test_case_names = set()
self.parsing_errors = []
def parse_test_cases_for_one_config(self, configs_folder, config_output_folder, config_name):
"""
parse test cases from elf and save test cases need to be executed to unit test folder
:param configs_folder: folder where per-config sdkconfig fragments are located (i.e. tools/unit-test-app/configs)
:param config_output_folder: build folder of this config
:param config_name: built unit test config name
"""
tags = self.parse_tags(os.path.join(config_output_folder, self.SDKCONFIG_FILE))
print('Tags of config %s: %s' % (config_name, tags))
test_groups = self.get_test_groups(os.path.join(configs_folder, config_name))
elf_file = os.path.join(config_output_folder, self.ELF_FILE)
bin_test_cases = parse_elf_test_cases(elf_file, self.idf_target)
test_cases = []
for bin_tc in bin_test_cases:
# we could split cases of same config into multiple binaries as we have limited rom space
# we should regard those configs like `default` and `default_2` as the same config
match = self.STRIP_CONFIG_PATTERN.match(config_name)
stripped_config_name = match.group(1)
tc = self.parse_one_test_case(bin_tc['name'], bin_tc['desc'], config_name, stripped_config_name, tags)
# check if duplicated case names
# we need to use it to select case,
# if duplicated IDs, Unity could select incorrect case to run
# and we need to check all cases no matter if it's going te be executed by CI
# also add app_name here, we allow same case for different apps
if (tc['summary'] + stripped_config_name) in self.test_case_names:
self.parsing_errors.append('{} ({}): duplicated test case ID: {}'.format(stripped_config_name, config_name, tc['summary']))
else:
self.test_case_names.add(tc['summary'] + stripped_config_name)
test_group_included = True
if test_groups is not None and tc['group'] not in test_groups:
test_group_included = False
if tc['CI ready'] == 'Yes' and test_group_included:
# update test env list and the cases of same env list
if tc['test environment'] in self.test_env_tags:
self.test_env_tags[tc['test environment']].append(tc['ID'])
else:
self.test_env_tags.update({tc['test environment']: [tc['ID']]})
if bin_tc['function_count'] > 1:
tc.update({'child case num': bin_tc['function_count']})
# only add cases need to be executed
test_cases.append(tc)
return test_cases
def parse_case_properties(self, tags_raw):
"""
parse test case tags (properties) with the following rules:
* first tag is always group of test cases, it's mandatory
* the rest tags should be [type=value].
* if the type have default value, then [type] equal to [type=default_value].
* if the type don't exist, then equal to [type=omitted_value]
default_value and omitted_value are defined in TagDefinition.yml
:param tags_raw: raw tag string
:return: tag dict
"""
tags = self.DESCRIPTION_PATTERN.findall(tags_raw)
assert len(tags) > 0
p = dict([(k, self.tag_def[k]['omitted']) for k in self.tag_def])
p['module'] = tags[0]
# Use the original value of the first tag as test group name
p['group'] = p['module']
if p['module'] not in self.module_map:
p['module'] = 'misc'
# parsing rest tags, [type=value], =value is optional
for tag in tags[1:]:
match = self.TAG_PATTERN.search(tag)
assert match is not None
tag_type = match.group(1)
tag_value = match.group(3)
if match.group(2) == '=' and tag_value is None:
# [tag_type=] means tag_value is empty string
tag_value = ''
if tag_type in p:
if tag_value is None:
p[tag_type] = self.tag_def[tag_type]['default']
else:
p[tag_type] = tag_value
else:
# ignore not defined tag type
pass
return p
@staticmethod
def parse_tags_internal(sdkconfig, config_dependencies, config_pattern):
required_tags = []
def compare_config(config):
return config in sdkconfig
def process_condition(condition):
matches = config_pattern.findall(condition)
if matches:
for config in matches:
compare_result = compare_config(config)
# replace all configs in condition with True or False according to compare result
condition = re.sub(config_pattern, str(compare_result), condition, count=1)
# Now the condition is a python condition, we can use eval to compute its value
ret = eval(condition)
else:
# didn't use complex condition. only defined one condition for the tag
ret = compare_config(condition)
return ret
for tag in config_dependencies:
if process_condition(config_dependencies[tag]):
required_tags.append(tag)
return required_tags
def parse_tags(self, sdkconfig_file):
"""
Some test configs could requires different DUTs.
For example, if CONFIG_SPIRAM is enabled, we need kit with PSRAM to run test.
This method will get tags for runners according to ConfigDependency.yml(maps tags to sdkconfig).
We support to the following syntax::
# define the config which requires the tag
'tag_a': 'config_a="value_a"'
# define the condition for the tag
'tag_b': '{config A} and (not {config B} or (not {config C} and {config D}))'
:param sdkconfig_file: sdk config file of the unit test config
:return: required tags for runners
"""
with open(sdkconfig_file, 'r') as f:
configs_raw_data = f.read()
configs = configs_raw_data.splitlines(False)
return self.parse_tags_internal(configs, self.config_dependencies, self.CONFIG_PATTERN)
def get_test_groups(self, config_file):
"""
If the config file includes TEST_GROUPS variable, return its value as a list of strings.
:param config_file file under configs/ directory for given configuration
:return: list of test groups, or None if TEST_GROUPS wasn't set
"""
with open(config_file, 'r') as f:
for line in f:
match = self.TEST_GROUPS_PATTERN.match(line)
if match is not None:
return match.group(1).split(' ')
return None
def parse_one_test_case(self, name, description, config_name, stripped_config_name, tags):
"""
parse one test case
:param name: test case name (summary)
:param description: test case description (tag string)
:param config_name: built unit test app name
:param stripped_config_name: strip suffix from config name because they're the same except test components
:param tags: tags to select runners
:return: parsed test case
"""
prop = self.parse_case_properties(description)
test_case = deepcopy(TEST_CASE_PATTERN)
test_case.update({'config': config_name,
'module': self.module_map[prop['module']]['module'],
'group': prop['group'],
'CI ready': 'No' if prop['ignore'] == 'Yes' else 'Yes',
'ID': '[{}] {}'.format(stripped_config_name, name),
'test point 2': prop['module'],
'steps': name,
'test environment': prop['test_env'],
'reset': prop['reset'],
'sub module': self.module_map[prop['module']]['sub module'],
'summary': name,
'multi_device': prop['multi_device'],
'multi_stage': prop['multi_stage'],
'timeout': int(prop['timeout']),
'tags': tags,
'chip_target': self.idf_target})
return test_case
def dump_test_cases(self, test_cases):
"""
dump parsed test cases to YAML file for test bench input
:param test_cases: parsed test cases
"""
filename = os.path.join(self.idf_path, self.TEST_CASE_FILE_DIR,
'{}_{}.yml'.format(self.idf_target, self.node_index))
try:
os.mkdir(os.path.dirname(filename))
except OSError:
pass
with open(os.path.join(filename), 'w+') as f:
yaml.dump({'test cases': test_cases}, f, allow_unicode=True, default_flow_style=False)
def copy_module_def_file(self):
""" copy module def file to artifact path """
src = os.path.join(self.idf_path, self.MODULE_DEF_FILE)
dst = os.path.join(self.idf_path, self.MODULE_ARTIFACT_FILE)
shutil.copy(src, dst)
def parse_test_cases(self):
""" parse test cases from multiple built unit test apps """
test_cases = []
configs_folder = os.path.join(self.idf_path, self.UT_CONFIG_FOLDER)
config_output_prefix = f'build_{self.idf_target}_'
test_configs = []
for item in os.listdir(self.ut_bin_folder):
if os.path.isdir(os.path.join(self.ut_bin_folder, item)) and item.startswith(config_output_prefix):
test_configs.append(item.split(config_output_prefix)[1])
for config in test_configs:
config_output_folder = os.path.join(self.ut_bin_folder, f'{config_output_prefix}{config}')
if os.path.exists(config_output_folder):
test_cases.extend(self.parse_test_cases_for_one_config(configs_folder, config_output_folder, config))
test_cases.sort(key=lambda x: x['config'] + x['summary'])
self.dump_test_cases(test_cases)
def test_parser(binary_folder, node_index):
ut_parser = Parser(binary_folder, node_index)
# test parsing tags
# parsing module only and module in module list
prop = ut_parser.parse_case_properties('[esp32]')
assert prop['module'] == 'esp32'
# module not in module list
prop = ut_parser.parse_case_properties('[not_in_list]')
assert prop['module'] == 'misc'
# parsing a default tag, a tag with assigned value
prop = ut_parser.parse_case_properties('[esp32][ignore][test_env=ABCD][not_support1][not_support2=ABCD]')
assert prop['ignore'] == 'Yes' and prop['test_env'] == 'ABCD' \
and 'not_support1' not in prop and 'not_supported2' not in prop
# parsing omitted value
prop = ut_parser.parse_case_properties('[esp32]')
assert prop['ignore'] == 'No' and prop['test_env'] == 'UT_T1_1'
# parsing with incorrect format
try:
ut_parser.parse_case_properties('abcd')
assert False
except AssertionError:
pass
# skip invalid data parse, [type=] assigns empty string to type
prop = ut_parser.parse_case_properties('[esp32]abdc aaaa [ignore=]')
assert prop['module'] == 'esp32' and prop['ignore'] == ''
# skip mis-paired []
prop = ut_parser.parse_case_properties('[esp32][[ignore=b]][]][test_env=AAA]]')
assert prop['module'] == 'esp32' and prop['ignore'] == 'b' and prop['test_env'] == 'AAA'
config_dependency = {
'a': '123',
'b': '456',
'c': 'not {123}',
'd': '{123} and not {456}',
'e': '{123} and not {789}',
'f': '({123} and {456}) or ({123} and {789})'
}
sdkconfig = ['123', '789']
tags = ut_parser.parse_tags_internal(sdkconfig, config_dependency, ut_parser.CONFIG_PATTERN)
assert sorted(tags) == ['a', 'd', 'f'] # sorted is required for older Python3, e.g. 3.4.8
def main(binary_folder, node_index):
assert os.getenv('IDF_PATH'), 'IDF_PATH must be set to use this script'
assert os.getenv('IDF_TARGET'), 'IDF_TARGET must be set to use this script'
test_parser(binary_folder, node_index)
ut_parser = Parser(binary_folder, node_index)
ut_parser.parse_test_cases()
ut_parser.copy_module_def_file()
if len(ut_parser.parsing_errors) > 0:
for error in ut_parser.parsing_errors:
print(error)
exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('bin_dir', help='Binary Folder')
parser.add_argument('node_index', type=int, default=1,
help='Node index, should only be set in CI')
args = parser.parse_args()
main(args.bin_dir, args.node_index)

View File

@ -1,32 +0,0 @@
#!/usr/bin/env bash
cd ${IDF_PATH}/tools/unit-test-app
AVAL_CONFIGS=""
CONFIGS=$(ls configs)
if [ $1 == "esp32" ]; then
#echo 'Searching for configs for target "'$1'"'
for FILE in $CONFIGS
do
grep 'CONFIG_IDF_TARGET' <configs/$FILE > /dev/null
if [ $? -ne 0 ]; then
# If CONFIG_IDF_TARGET not found, implies ESP32
AVAL_CONFIGS="$AVAL_CONFIGS $FILE"
fi
grep -E '^CONFIG_IDF_TARGET="?'$1'"?$' <configs/$FILE > /dev/null
if [ $? -eq 0 ]; then
AVAL_CONFIGS="$AVAL_CONFIGS $FILE"
fi
done
else
#echo 'Searching for configs for target "'$1'"'
for FILE in $CONFIGS
do
grep -E '^CONFIG_IDF_TARGET="?'$1'"?$' <configs/$FILE > /dev/null
if [ $? -eq 0 ]; then
AVAL_CONFIGS="$AVAL_CONFIGS $FILE"
fi
done
fi
echo $AVAL_CONFIGS

View File

@ -1,812 +0,0 @@
#!/usr/bin/env python
#
# SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
"""
Test script for unit test case.
"""
import argparse
import re
import threading
import time
import ttfw_idf
from tiny_test_fw import DUT, Env, TinyFW, Utility
from tiny_test_fw.TinyFW import TestCaseFailed
from tiny_test_fw.Utility import format_case_id, handle_unexpected_exception
UT_APP_BOOT_UP_DONE = 'Press ENTER to see the list of tests.'
STRIP_CONFIG_PATTERN = re.compile(r'(.+?)(_\d+)?$')
# matches e.g.: "rst:0xc (SW_CPU_RESET),boot:0x13 (SPI_FAST_FLASH_BOOT)"
RESET_PATTERN = re.compile(r'(rst:0x[0-9a-fA-F]*\s\([\w].*?\),boot:0x[0-9a-fA-F]*\s\([\w].*?\))')
EXCEPTION_PATTERN = re.compile(r"(Guru Meditation Error: Core\s+\d panic'ed \([\w].*?\))")
ABORT_PATTERN = re.compile(r'(abort\(\) was called at PC 0x[a-fA-F\d]{8} on core \d)')
ASSERT_PATTERN = re.compile(r'(assert failed: .*)')
FINISH_PATTERN = re.compile(r'1 Tests (\d) Failures (\d) Ignored')
END_LIST_STR = r'\r?\nEnter test for running'
TEST_PATTERN = re.compile(r'\((\d+)\)\s+"([^"]+)" ([^\r\n]+)\r?\n(' + END_LIST_STR + r')?')
TEST_SUBMENU_PATTERN = re.compile(r'\s+\((\d+)\)\s+"[^"]+"\r?\n(?=(?=\()|(' + END_LIST_STR + r'))')
UT_APP_PATH = 'tools/unit-test-app'
SIMPLE_TEST_ID = 0
MULTI_STAGE_ID = 1
MULTI_DEVICE_ID = 2
DEFAULT_TIMEOUT = 20
DUT_DELAY_AFTER_RESET = 2
DUT_STARTUP_CHECK_RETRY_COUNT = 5
TEST_HISTORY_CHECK_TIMEOUT = 2
def reset_reason_matches(reported_str, expected_str):
known_aliases = {
'_RESET': '_RST',
'POWERON_RESET': 'POWERON',
'DEEPSLEEP_RESET': 'DSLEEP',
'SW_CPU_RESET': 'SW_CPU',
}
if expected_str in reported_str:
return True
for token, alias in known_aliases.items():
if token in expected_str:
alt_expected_str = expected_str.replace(token, alias)
if alt_expected_str in reported_str:
return True
return False
def format_test_case_config(test_case_data, target='esp32'):
"""
convert the test case data to unified format.
We need to following info to run unit test cases:
1. unit test app config
2. test case name
3. test case reset info
the formatted case config is a dict, with ut app config as keys. The value is a list of test cases.
Each test case is a dict with "name" and "reset" as keys. For example::
case_config = {
"default": [{"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, {...}],
"psram": [{"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}],
}
If config is not specified for test case, then
:param test_case_data: string, list, or a dictionary list
:param target: target
:return: formatted data
"""
case_config = dict()
def parse_case(one_case_data):
""" parse and format one case """
def process_reset_list(reset_list):
# strip space and remove white space only items
_output = list()
for _r in reset_list:
_data = _r.strip(' ')
if _data:
_output.append(_data)
return _output
_case = dict()
if isinstance(one_case_data, str):
_temp = one_case_data.split(' [reset=')
_case['name'] = _temp[0]
try:
_case['reset'] = process_reset_list(_temp[1][0:-1].split(','))
except IndexError:
_case['reset'] = list()
elif isinstance(one_case_data, dict):
_case = one_case_data.copy()
assert 'name' in _case
if 'reset' not in _case:
_case['reset'] = list()
else:
if isinstance(_case['reset'], str):
_case['reset'] = process_reset_list(_case['reset'].split(','))
else:
raise TypeError('Not supported type during parsing unit test case')
if 'config' not in _case:
_case['config'] = 'default'
if 'target' not in _case:
_case['target'] = target
return _case
if not isinstance(test_case_data, list):
test_case_data = [test_case_data]
for case_data in test_case_data:
parsed_case = parse_case(case_data)
try:
case_config[parsed_case['config']].append(parsed_case)
except KeyError:
case_config[parsed_case['config']] = [parsed_case]
return case_config
def replace_app_bin(dut, name, new_app_bin):
if new_app_bin is None:
return
search_pattern = '/{}.bin'.format(name)
for i, config in enumerate(dut.download_config):
if config.endswith(search_pattern):
dut.download_config[i] = new_app_bin
Utility.console_log('The replaced application binary is {}'.format(new_app_bin), 'O')
break
def format_case_name(case):
# we could split cases of same config into multiple binaries as we have limited rom space
# we should regard those configs like `default` and `default_2` as the same config
match = STRIP_CONFIG_PATTERN.match(case['config'])
stripped_config_name = match.group(1)
return format_case_id(case['name'], target=case['target'], config=stripped_config_name)
def reset_dut(dut):
dut.reset()
# esptool ``run`` cmd takes quite long time.
# before reset finish, serial port is closed. therefore DUT could already bootup before serial port opened.
# this could cause checking bootup print failed.
# now use input cmd `-` and check test history to check if DUT is bootup.
# we'll retry this step for a few times,
# in case `dut.reset` returns during DUT bootup (when DUT can't process any command).
#
# during bootup, DUT might only receive part of the first `-` command.
# If it only receive `\n`, then it will print all cases. It could take more than 5 seconds, reset check will fail.
# To solve this problem, we will add a delay between reset and input `-` command. And we'll also enlarge expect timeout.
time.sleep(DUT_DELAY_AFTER_RESET)
for _ in range(DUT_STARTUP_CHECK_RETRY_COUNT):
dut.write('-')
try:
dut.expect('0 Tests 0 Failures 0 Ignored', timeout=TEST_HISTORY_CHECK_TIMEOUT)
break
except DUT.ExpectTimeout:
pass
else:
raise AssertionError('Reset {} ({}) failed!'.format(dut.name, dut.port))
def log_test_case(description, test_case, ut_config):
Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config),
color='orange')
Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items()
if k != 'name' and v is not None),
color='orange')
def run_one_normal_case(dut, one_case, junit_test_case):
reset_dut(dut)
dut.start_capture_raw_data()
# run test case
dut.write("\"{}\"".format(one_case['name']))
dut.expect('Running ' + one_case['name'] + '...')
exception_reset_list = []
# we want to set this flag in callbacks (inner functions)
# use list here so we can use append to set this flag
test_finish = list()
# expect callbacks
def one_case_finish(result):
""" one test finished, let expect loop break and log result """
test_finish.append(True)
output = dut.stop_capture_raw_data()
if result:
Utility.console_log('Success: ' + format_case_name(one_case), color='green')
else:
Utility.console_log('Failed: ' + format_case_name(one_case), color='red')
junit_test_case.add_failure_info(output)
raise TestCaseFailed(format_case_name(one_case))
def handle_exception_reset(data):
"""
just append data to exception list.
exception list will be checked in ``handle_reset_finish``, once reset finished.
"""
exception_reset_list.append(data[0])
def handle_test_finish(data):
""" test finished without reset """
# in this scenario reset should not happen
assert not exception_reset_list
if int(data[1]):
# case ignored
Utility.console_log('Ignored: ' + format_case_name(one_case), color='orange')
junit_test_case.add_skipped_info('ignored')
one_case_finish(not int(data[0]))
def handle_reset_finish(data):
""" reset happened and reboot finished """
assert exception_reset_list # reboot but no exception/reset logged. should never happen
result = False
if len(one_case['reset']) == len(exception_reset_list):
for i, exception in enumerate(exception_reset_list):
if not reset_reason_matches(exception, one_case['reset'][i]):
break
else:
result = True
if not result:
err_msg = 'Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}'.format(one_case['reset'],
exception_reset_list)
Utility.console_log(err_msg, color='orange')
junit_test_case.add_failure_info(err_msg)
one_case_finish(result)
while not test_finish:
try:
timeout_value = one_case['timeout']
dut.expect_any((RESET_PATTERN, handle_exception_reset),
(EXCEPTION_PATTERN, handle_exception_reset),
(ABORT_PATTERN, handle_exception_reset),
(ASSERT_PATTERN, handle_exception_reset),
(FINISH_PATTERN, handle_test_finish),
(UT_APP_BOOT_UP_DONE, handle_reset_finish),
timeout=timeout_value)
except DUT.ExpectTimeout:
Utility.console_log('Timeout in expect (%s seconds)' % timeout_value, color='orange')
junit_test_case.add_failure_info('timeout')
one_case_finish(False)
break
@ttfw_idf.idf_unit_test(env_tag='UT_T1_1', junit_report_by_case=True)
def run_unit_test_cases(env, extra_data):
"""
extra_data can be three types of value
1. as string:
1. "case_name"
2. "case_name [reset=RESET_REASON]"
2. as dict:
1. with key like {"name": "Intr_alloc test, shared ints"}
2. with key like {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET", "config": "psram"}
3. as list of string or dict:
[case1, case2, case3, {"name": "restart from PRO CPU", "reset": "SW_CPU_RESET"}, ...]
:param env: test env instance
:param extra_data: the case name or case list or case dictionary
:return: None
"""
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
# we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing
# Before exit test function this flag is used to log if the case fails
failed_cases = []
for ut_config in case_config:
Utility.console_log('Running unit test for config: ' + ut_config, 'O')
# Get the console baudrate from the sdkconfig
_app = ttfw_idf.UT(app_path=UT_APP_PATH, config_name=ut_config, target=env.default_dut_cls.TARGET)
baud = _app.get_sdkconfig_config_value('CONFIG_ESP_CONSOLE_UART_BAUDRATE')
if baud is None:
baud = 115200
Utility.console_log('Can\'t find console baudrate in sdkconfig, use 115200 as default')
else:
baud = int(baud, 10) if isinstance(baud, str) else baud
Utility.console_log('Console baudrate is {}'.format(baud))
# Get the DUT with specified baudrate
dut = env.get_dut('unit-test-app', app_path=UT_APP_PATH, app_config_name=ut_config,
allow_dut_exception=True, baudrate=baud)
if len(case_config[ut_config]) > 0:
replace_app_bin(dut, 'unit-test-app', case_config[ut_config][0].get('app_bin'))
dut.start_app()
Utility.console_log('Download finished, start running test cases', 'O')
for one_case in case_config[ut_config]:
log_test_case('test case', one_case, ut_config)
performance_items = []
# create junit report test case
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
run_one_normal_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items()
except TestCaseFailed:
failed_cases.append(format_case_name(one_case))
except Exception as e:
handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.update_performance(performance_items)
TinyFW.JunitReport.test_case_finish(junit_test_case)
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
class Handler(threading.Thread):
WAIT_SIGNAL_PATTERN = re.compile(r'Waiting for signal: \[(.+)]!')
SEND_SIGNAL_PATTERN = re.compile(r'Send signal: \[([^]]+)](\[([^]]+)])?!')
FINISH_PATTERN = re.compile(r'1 Tests (\d) Failures (\d) Ignored')
def __init__(self, dut, sent_signal_list, lock, parent_case_name, child_case_index, timeout):
self.dut = dut
self.sent_signal_list = sent_signal_list
self.lock = lock
self.parent_case_name = parent_case_name
self.child_case_name = ''
self.child_case_index = child_case_index + 1
self.finish = False
self.result = False
self.output = ''
self.fail_name = None
self.timeout = timeout
self.force_stop = threading.Event() # it show the running status
reset_dut(self.dut) # reset the board to make it start from begining
threading.Thread.__init__(self, name='{} Handler'.format(dut))
def run(self):
self.dut.start_capture_raw_data()
def get_child_case_name(data):
self.child_case_name = data[0]
time.sleep(1)
self.dut.write(str(self.child_case_index))
def one_device_case_finish(result):
""" one test finished, let expect loop break and log result """
self.finish = True
self.result = result
self.output = '[{}]\n\n{}\n'.format(self.child_case_name,
self.dut.stop_capture_raw_data())
if not result:
self.fail_name = self.child_case_name
def device_wait_action(data):
start_time = time.time()
expected_signal = data[0].encode('utf-8')
while 1:
if time.time() > start_time + self.timeout:
Utility.console_log('Timeout in device for function: %s' % self.child_case_name, color='orange')
break
with self.lock:
for sent_signal in self.sent_signal_list:
if expected_signal == sent_signal['name']:
self.dut.write(sent_signal['parameter'])
self.sent_signal_list.remove(sent_signal)
break
else:
time.sleep(0.01)
continue
break
def device_send_action(data):
with self.lock:
self.sent_signal_list.append({
'name': data[0].encode('utf-8'),
'parameter': '' if data[2] is None else data[2].encode('utf-8')
# no parameter means we only write EOL to DUT
})
def handle_device_test_finish(data):
""" test finished without reset """
# in this scenario reset should not happen
if int(data[1]):
# case ignored
Utility.console_log('Ignored: ' + self.child_case_name, color='orange')
one_device_case_finish(not int(data[0]))
try:
time.sleep(1)
self.dut.write("\"{}\"".format(self.parent_case_name))
self.dut.expect('Running ' + self.parent_case_name + '...')
except DUT.ExpectTimeout:
Utility.console_log('No case detected!', color='orange')
while not self.finish and not self.force_stop.isSet():
try:
self.dut.expect_any((re.compile('\(' + str(self.child_case_index) + '\)\s"(\w+)"'), # noqa: W605 - regex
get_child_case_name),
(self.WAIT_SIGNAL_PATTERN, device_wait_action), # wait signal pattern
(self.SEND_SIGNAL_PATTERN, device_send_action), # send signal pattern
(self.FINISH_PATTERN, handle_device_test_finish), # test finish pattern
timeout=self.timeout)
except DUT.ExpectTimeout:
Utility.console_log('Timeout in expect (%s seconds)' % self.timeout, color='orange')
one_device_case_finish(False)
break
def stop(self):
self.force_stop.set()
def get_case_info(one_case):
parent_case = one_case['name']
child_case_num = one_case['child case num']
return parent_case, child_case_num
def get_dut(duts, env, name, ut_config, app_bin=None):
if name in duts:
dut = duts[name]
else:
dut = env.get_dut(name, app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
duts[name] = dut
replace_app_bin(dut, 'unit-test-app', app_bin)
dut.start_app() # download bin to board
return dut
def run_one_multiple_devices_case(duts, ut_config, env, one_case, app_bin, junit_test_case):
lock = threading.RLock()
threads = []
send_signal_list = []
result = True
parent_case, case_num = get_case_info(one_case)
for i in range(case_num):
dut = get_dut(duts, env, 'dut%d' % i, ut_config, app_bin)
threads.append(Handler(dut, send_signal_list, lock,
parent_case, i, one_case['timeout']))
for thread in threads:
thread.setDaemon(True)
thread.start()
output = 'Multiple Device Failed\n'
for thread in threads:
thread.join()
result = result and thread.result
output += thread.output
if not thread.result:
[thd.stop() for thd in threads]
if not result:
junit_test_case.add_failure_info(output)
# collect performances from DUTs
performance_items = []
for dut_name in duts:
performance_items.extend(duts[dut_name].get_performance_items())
TinyFW.JunitReport.update_performance(performance_items)
return result
@ttfw_idf.idf_unit_test(env_tag='UT_T2_1', junit_report_by_case=True)
def run_multiple_devices_cases(env, extra_data):
"""
extra_data can be two types of value
1. as dict:
e.g.
{"name": "gpio master/slave test example",
"child case num": 2,
"config": "release",
"env_tag": "UT_T2_1"}
2. as list dict:
e.g.
[{"name": "gpio master/slave test example1",
"child case num": 2,
"config": "release",
"env_tag": "UT_T2_1"},
{"name": "gpio master/slave test example2",
"child case num": 2,
"config": "release",
"env_tag": "UT_T2_1"}]
"""
failed_cases = []
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
duts = {}
for ut_config in case_config:
Utility.console_log('Running unit test for config: ' + ut_config, 'O')
for one_case in case_config[ut_config]:
log_test_case('multi-device test', one_case, ut_config, )
result = False
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
result = run_one_multiple_devices_case(duts, ut_config, env, one_case,
one_case.get('app_bin'), junit_test_case)
except TestCaseFailed:
pass # result is False, this is handled by the finally block
except Exception as e:
handle_unexpected_exception(junit_test_case, e)
finally:
if result:
Utility.console_log('Success: ' + format_case_name(one_case), color='green')
else:
failed_cases.append(format_case_name(one_case))
Utility.console_log('Failed: ' + format_case_name(one_case), color='red')
TinyFW.JunitReport.test_case_finish(junit_test_case)
# close all DUTs when finish running all cases for one config
for dut in duts:
env.close_dut(dut)
duts = {}
def run_one_multiple_stage_case(dut, one_case, junit_test_case):
reset_dut(dut)
dut.start_capture_raw_data()
exception_reset_list = []
for test_stage in range(one_case['child case num']):
# select multi stage test case name
dut.write("\"{}\"".format(one_case['name']))
dut.expect('Running ' + one_case['name'] + '...')
# select test function for current stage
dut.write(str(test_stage + 1))
# we want to set this flag in callbacks (inner functions)
# use list here so we can use append to set this flag
stage_finish = list()
def last_stage():
return test_stage == one_case['child case num'] - 1
def check_reset():
if one_case['reset']:
assert exception_reset_list # reboot but no exception/reset logged. should never happen
result = False
if len(one_case['reset']) == len(exception_reset_list):
for i, exception in enumerate(exception_reset_list):
if not reset_reason_matches(exception, one_case['reset'][i]):
break
else:
result = True
if not result:
err_msg = 'Reset Check Failed: \r\n\tExpected: {}\r\n\tGet: {}'.format(one_case['reset'],
exception_reset_list)
Utility.console_log(err_msg, color='orange')
junit_test_case.add_failure_info(err_msg)
else:
# we allow omit reset in multi stage cases
result = True
return result
# expect callbacks
def one_case_finish(result):
""" one test finished, let expect loop break and log result """
# handle test finish
result = result and check_reset()
output = dut.stop_capture_raw_data()
if result:
Utility.console_log('Success: ' + format_case_name(one_case), color='green')
else:
Utility.console_log('Failed: ' + format_case_name(one_case), color='red')
junit_test_case.add_failure_info(output)
raise TestCaseFailed(format_case_name(one_case))
stage_finish.append('break')
def handle_exception_reset(data):
"""
just append data to exception list.
exception list will be checked in ``handle_reset_finish``, once reset finished.
"""
exception_reset_list.append(data[0])
def handle_test_finish(data):
""" test finished without reset """
# in this scenario reset should not happen
if int(data[1]):
# case ignored
Utility.console_log('Ignored: ' + format_case_name(one_case), color='orange')
junit_test_case.add_skipped_info('ignored')
# only passed in last stage will be regarded as real pass
if last_stage():
one_case_finish(not int(data[0]))
else:
Utility.console_log('test finished before enter last stage', color='orange')
one_case_finish(False)
def handle_next_stage(data):
""" reboot finished. we goto next stage """
if last_stage():
# already last stage, should never goto next stage
Utility.console_log("didn't finish at last stage", color='orange')
one_case_finish(False)
else:
stage_finish.append('continue')
while not stage_finish:
try:
timeout_value = one_case['timeout']
dut.expect_any((RESET_PATTERN, handle_exception_reset),
(EXCEPTION_PATTERN, handle_exception_reset),
(ABORT_PATTERN, handle_exception_reset),
(ASSERT_PATTERN, handle_exception_reset),
(FINISH_PATTERN, handle_test_finish),
(UT_APP_BOOT_UP_DONE, handle_next_stage),
timeout=timeout_value)
except DUT.ExpectTimeout:
Utility.console_log('Timeout in expect (%s seconds)' % timeout_value, color='orange')
one_case_finish(False)
break
if stage_finish[0] == 'break':
# test breaks on current stage
break
@ttfw_idf.idf_unit_test(env_tag='UT_T1_1', junit_report_by_case=True)
def run_multiple_stage_cases(env, extra_data):
"""
extra_data can be 2 types of value
1. as dict: Mandatory keys: "name" and "child case num", optional keys: "reset" and others
3. as list of string or dict:
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
:param env: test env instance
:param extra_data: the case name or case list or case dictionary
:return: None
"""
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
# we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing
# Before exit test function this flag is used to log if the case fails
failed_cases = []
for ut_config in case_config:
Utility.console_log('Running unit test for config: ' + ut_config, 'O')
dut = env.get_dut('unit-test-app', app_path=UT_APP_PATH, app_config_name=ut_config, allow_dut_exception=True)
if len(case_config[ut_config]) > 0:
replace_app_bin(dut, 'unit-test-app', case_config[ut_config][0].get('app_bin'))
dut.start_app()
for one_case in case_config[ut_config]:
log_test_case('multi-stage test', one_case, ut_config)
performance_items = []
junit_test_case = TinyFW.JunitReport.create_test_case(format_case_name(one_case))
try:
run_one_multiple_stage_case(dut, one_case, junit_test_case)
performance_items = dut.get_performance_items()
except TestCaseFailed:
failed_cases.append(format_case_name(one_case))
except Exception as e:
handle_unexpected_exception(junit_test_case, e)
failed_cases.append(format_case_name(one_case))
finally:
TinyFW.JunitReport.update_performance(performance_items)
TinyFW.JunitReport.test_case_finish(junit_test_case)
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
def detect_update_unit_test_info(env, extra_data, app_bin):
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
for ut_config in case_config:
dut = env.get_dut('unit-test-app', app_path=UT_APP_PATH, app_config_name=ut_config)
replace_app_bin(dut, 'unit-test-app', app_bin)
dut.start_app()
reset_dut(dut)
# get the list of test cases
dut.write('')
dut.expect("Here's the test menu, pick your combo:", timeout=DEFAULT_TIMEOUT)
def find_update_dic(name, _t, _timeout, child_case_num=None):
for _case_data in extra_data:
if _case_data['name'] == name:
_case_data['type'] = _t
if 'timeout' not in _case_data:
_case_data['timeout'] = _timeout
if child_case_num:
_case_data['child case num'] = child_case_num
try:
while True:
data = dut.expect(TEST_PATTERN, timeout=DEFAULT_TIMEOUT)
test_case_name = data[1]
m = re.search(r'\[timeout=(\d+)\]', data[2])
if m:
timeout = int(m.group(1))
else:
timeout = 30
m = re.search(r'\[multi_stage\]', data[2])
if m:
test_case_type = MULTI_STAGE_ID
else:
m = re.search(r'\[multi_device\]', data[2])
if m:
test_case_type = MULTI_DEVICE_ID
else:
test_case_type = SIMPLE_TEST_ID
find_update_dic(test_case_name, test_case_type, timeout)
if data[3] and re.search(END_LIST_STR, data[3]):
break
continue
# find the last submenu item
data = dut.expect(TEST_SUBMENU_PATTERN, timeout=DEFAULT_TIMEOUT)
find_update_dic(test_case_name, test_case_type, timeout, child_case_num=int(data[0]))
if data[1] and re.search(END_LIST_STR, data[1]):
break
# check if the unit test case names are correct, i.e. they could be found in the device
for _dic in extra_data:
if 'type' not in _dic:
raise ValueError("Unit test \"{}\" doesn't exist in the flashed device!".format(_dic.get('name')))
except DUT.ExpectTimeout:
Utility.console_log('Timeout during getting the test list', color='red')
finally:
dut.close()
# These options are the same for all configs, therefore there is no need to continue
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--repeat', '-r',
help='Number of repetitions for the test(s). Default is 1.',
type=int,
default=1
)
parser.add_argument('--env_config_file', '-e',
help='test env config file',
default=None)
parser.add_argument('--app_bin', '-b',
help='application binary file for flashing the chip',
default=None)
parser.add_argument('test',
help='Comma separated list of <option>:<argument> where option can be "name" (default), '
'"child case num", "config", "timeout".',
nargs='+')
args = parser.parse_args()
list_of_dicts = []
for test in args.test:
test_args = test.split(r',')
test_dict = dict()
for test_item in test_args:
if len(test_item) == 0:
continue
pair = test_item.split(r':', 1)
if len(pair) == 1 or pair[0] == 'name':
test_dict['name'] = pair[0]
elif len(pair) == 2:
if pair[0] == 'timeout' or pair[0] == 'child case num':
test_dict[pair[0]] = int(pair[1])
else:
test_dict[pair[0]] = pair[1]
else:
raise ValueError('Error in argument item {} of {}'.format(test_item, test))
test_dict['app_bin'] = args.app_bin
list_of_dicts.append(test_dict)
TinyFW.set_default_config(env_config_file=args.env_config_file)
env_config = TinyFW.get_default_config()
env_config['app'] = ttfw_idf.UT
env_config['dut'] = ttfw_idf.IDFDUT
env_config['test_suite_name'] = 'unit_test_parsing'
test_env = Env.Env(**env_config)
detect_update_unit_test_info(test_env, extra_data=list_of_dicts, app_bin=args.app_bin)
for index in range(1, args.repeat + 1):
if args.repeat > 1:
Utility.console_log('Repetition {}'.format(index), color='green')
for dic in list_of_dicts:
t = dic.get('type', SIMPLE_TEST_ID)
if t == SIMPLE_TEST_ID:
run_unit_test_cases(extra_data=dic)
elif t == MULTI_STAGE_ID:
run_multiple_stage_cases(extra_data=dic)
elif t == MULTI_DEVICE_ID:
run_multiple_devices_cases(extra_data=dic)
else:
raise ValueError('Unknown type {} of {}'.format(t, dic.get('name')))