Merge branch 'test/backport_ci_changes_to_v3.1' into 'release/v3.1'

test: backport ci changes (backport v3.1)

See merge request idf/esp-idf!2869
This commit is contained in:
He Yin Ling 2018-07-30 09:58:31 +08:00
commit a8b92d560b
18 changed files with 890 additions and 171 deletions

View File

@ -1,9 +1,9 @@
stages:
- build
- assign_test
- host_test
- unit_test
- test
- test_report
- integration_test
- deploy
variables:
@ -151,6 +151,8 @@ build_ssc_01:
build_ssc_02:
<<: *build_ssc_template
# If you want to add new build ssc jobs, please add it into dependencies of `assign_test` and `.test_template`
build_esp_idf_tests:
<<: *build_template
artifacts:
@ -158,7 +160,7 @@ build_esp_idf_tests:
- tools/unit-test-app/output
- components/idf_test/unit_test/TestCaseAll.yml
- components/idf_test/unit_test/CIConfigs/*.yml
expire_in: 6 mos
expire_in: 1 mos
script:
- cd tools/unit-test-app
- MAKEFLAGS= make help # make sure kconfig tools are built in single process
@ -218,6 +220,7 @@ build_examples_06:
build_examples_07:
<<: *build_examples_template
# If you want to add new build example jobs, please add it into dependencies of `.example_test_template`
build_docs:
stage: build
@ -250,22 +253,21 @@ build_docs:
- make html
- ../check_doc_warnings.sh
test_nvs_on_host:
stage: test
.host_test_template: &host_test_template
stage: host_test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
tags:
- host_test
dependencies: []
test_nvs_on_host:
<<: *host_test_template
script:
- cd components/nvs_flash/test_nvs_host
- make test
test_nvs_coverage:
stage: test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
tags:
- host_test
dependencies: []
<<: *host_test_template
artifacts:
paths:
- components/nvs_flash/test_nvs_host/coverage_report
@ -279,63 +281,46 @@ test_nvs_coverage:
- make coverage_report
test_partition_table_on_host:
stage: test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
<<: *host_test_template
tags:
- build
dependencies: []
script:
- cd components/partition_table/test_gen_esp32part_host
- ./gen_esp32part_tests.py
test_wl_on_host:
stage: test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
tags:
- host_test
<<: *host_test_template
artifacts:
paths:
- components/wear_levelling/test_wl_host/coverage_report.zip
dependencies: []
script:
- cd components/wear_levelling/test_wl_host
- make test
test_fatfs_on_host:
stage: test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
<<: *host_test_template
tags:
- wl_host_test
dependencies: []
script:
- cd components/fatfs/test_fatfs_host/
- make test
test_spiffs_on_host:
stage: test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
<<: *host_test_template
tags:
- wl_host_test
dependencies: []
script:
- cd components/spiffs/test_spiffs_host/
- make test
test_multi_heap_on_host:
stage: test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
tags:
- host_test
<<: *host_test_template
script:
- cd components/heap/test_multi_heap_host
- ./test_all_configs.sh
test_build_system:
stage: test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
tags:
- host_test
dependencies: []
<<: *host_test_template
script:
- ${IDF_PATH}/tools/ci/test_configure_ci_environment.sh
- rm -rf test_build_system
@ -343,67 +328,8 @@ test_build_system:
- cd test_build_system
- ${IDF_PATH}/tools/ci/test_build_system.sh
test_report:
stage: test_report
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
tags:
- report
only:
- master
- triggers
- schedules
- /^release\/v/
- /^v\d+\.\d+(\.\d+)?($|-)/
variables:
LOG_PATH: "$CI_PROJECT_DIR/$CI_COMMIT_SHA"
TEST_CASE_FILE_PATH: "$CI_PROJECT_DIR/components/idf_test"
REPORT_PATH: "$CI_PROJECT_DIR/CI_Test_Report"
MODULE_UPDATE_FILE: "$CI_PROJECT_DIR/tools/unit-test-app/tools/ModuleDefinition.yml"
#dependencies:
#We need all UT* and IT* artifacts except for only a few other
artifacts:
when: always
paths:
- $REPORT_PATH
- $LOG_PATH
expire_in: 12 mos
script:
# calc log path
- VER_NUM=`git rev-list HEAD | wc -l | awk '{print $1}'`
- SHA_ID=`echo $CI_COMMIT_SHA | cut -c 1-7`
- REVISION="${VER_NUM}_${SHA_ID}"
# replace / to _ in branch name
- ESCAPED_BRANCH_NAME=`echo $CI_COMMIT_REF_NAME | sed 's/\//___/g'`
# result path and artifacts path
- RESULT_PATH="$CI_PROJECT_NAME/$ESCAPED_BRANCH_NAME/$REVISION"
- ARTIFACTS_PATH="$GITLAB_HTTP_SERVER/idf/esp-idf/builds/$CI_JOB_ID/artifacts/browse/$CI_COMMIT_SHA"
# clone test bench
- git clone $GITLAB_SSH_SERVER/yinling/auto_test_script.git
- cd auto_test_script
- python $CHECKOUT_REF_SCRIPT auto_test_script
# generate report
- TEST_RESULT=Pass
- python CITestReport.py -l $LOG_PATH -t $TEST_CASE_FILE_PATH -p $REPORT_PATH -r $RESULT_PATH -a $ARTIFACTS_PATH -m $MODULE_UPDATE_FILE || TEST_RESULT=Fail
# commit to CI-test-result project
- git clone $GITLAB_SSH_SERVER/qa/CI-test-result.git
- rm -rf "CI-test-result/RawData/$RESULT_PATH"
- cp -R $CI_PROJECT_NAME CI-test-result/RawData
- cd CI-test-result
# config git user
- git config --global user.email "ci-test-result@espressif.com"
- git config --global user.name "ci-test-result"
# commit test result
- git add .
- git commit . -m "update test result for $CI_PROJECT_NAME/$CI_COMMIT_REF_NAME/$CI_COMMIT_SHA, pipeline ID $CI_PIPELINE_ID" || exit 0
- git push origin master
- test "${TEST_RESULT}" = "Pass" || exit 1
test_esp_err_to_name_on_host:
stage: test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
tags:
- host_test
dependencies: []
<<: *host_test_template
script:
- cd tools/
- ./gen_esp_err_to_name.py
@ -440,7 +366,7 @@ push_master_to_github:
deploy_docs:
stage: assign_test
stage: host_test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
tags:
- deploy
@ -475,7 +401,7 @@ deploy_docs:
- echo "[document preview][zh_CN] $CI_DOCKER_REGISTRY/docs/esp-idf/zh_CN/${GIT_VER}/index.html"
check_doc_links:
stage: test
stage: host_test
image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG
tags:
- check_doc_links
@ -577,7 +503,7 @@ assign_test:
- python CIAssignTestCases.py -t $IDF_PATH/components/idf_test/integration_test -c $IDF_PATH/.gitlab-ci.yml -b $IDF_PATH/SSC/ssc_bin
.example_test_template: &example_test_template
stage: test
stage: integration_test
when: on_success
only:
- master
@ -585,14 +511,21 @@ assign_test:
- /^v\d+\.\d+(\.\d+)?($|-)/
- triggers
- schedules
# gitlab ci do not support match job with RegEx or wildcard now in dependencies.
# we have a lot build example jobs and the binaries them exceed the limitation of artifacts.
# we can't artifact them in one job. For example test jobs, download all artifacts from previous stages.
dependencies:
- assign_test
- build_examples_00
- build_examples_01
- build_examples_02
- build_examples_03
- build_examples_04
- build_examples_05
- build_examples_06
- build_examples_07
artifacts:
when: always
paths:
- $LOG_PATH
expire_in: 6 mos
expire_in: 1 mos
variables:
TEST_FW_PATH: "$CI_PROJECT_DIR/tools/tiny-test-fw"
TEST_CASE_PATH: "$CI_PROJECT_DIR/examples"
@ -624,7 +557,7 @@ assign_test:
ENV_FILE: "$CI_PROJECT_DIR/ci-test-runner-configs/$CI_RUNNER_DESCRIPTION/EnvConfig.yml"
.test_template: &test_template
stage: test
stage: integration_test
when: on_success
only:
- master
@ -632,7 +565,6 @@ assign_test:
- /^v\d+\.\d+(\.\d+)?($|-)/
- triggers
- schedules
allow_failure: true
dependencies:
- assign_test
- build_ssc_00
@ -642,7 +574,7 @@ assign_test:
when: always
paths:
- $LOG_PATH
expire_in: 6 mos
expire_in: 1 mos
variables:
LOCAL_ENV_CONFIG_PATH: "$CI_PROJECT_DIR/ci-test-runner-configs/$CI_RUNNER_DESCRIPTION/ESP32_IDF"
LOG_PATH: "$CI_PROJECT_DIR/$CI_COMMIT_SHA"
@ -671,7 +603,7 @@ nvs_compatible_test:
paths:
- $LOG_PATH
- nvs_wifi.bin
expire_in: 6 mos
expire_in: 1 mos
tags:
- ESP32_IDF
- NVS_Compatible
@ -1250,6 +1182,12 @@ IT_010_01:
- ESP32_IDF
- SSC_T5_1
IT_011_01:
<<: *test_template
tags:
- ESP32_IDF
- SSC_T50_1
IT_501_01:
<<: *test_template
tags:

View File

@ -371,4 +371,4 @@ static void check_time_deepsleep(void)
TEST_ASSERT_MESSAGE(dt_ms > 0, "Time in deep sleep is negative");
}
TEST_CASE_MULTIPLE_STAGES("check a time after wakeup from deep sleep", "[deepsleep][reset=DEEPSLEEP_RESET]", trigger_deepsleep, check_time_deepsleep);
TEST_CASE_MULTIPLE_STAGES("check a time after wakeup from deep sleep", "[deepsleep][reset=DEEPSLEEP_RESET][timeout=60]", trigger_deepsleep, check_time_deepsleep);

View File

@ -7,6 +7,17 @@
- - SSC SSC1 ram
- - 'R SSC1 C +FREEHEAP:'
.MESH_INIT_COND: &MESH_INIT_COND
test script: InitCondBase
restore post cmd set:
- ''
- - SSC SSC[1-<node_num>] mesh -Q -o 1
- - R SSC[1-<node_num>] C MESH_NETWORK
- - SSC SSC[1-<node_num>] mesh -Q -o 3
- - R SSC[1-<node_num>] C +MESH_CONFIG:ALL
- - SSC SSC[1-<node_num>] ram
- - R SSC[1-<node_num>] A <heap_size>:(\d+)
initial condition:
- tag: APM1
<<: *SSC_INIT_COND
@ -2038,3 +2049,209 @@ initial condition:
- - R SSC[1-2] C +BLEADV:OK
- - SSC SSC1 ram
- - R SSC1 A <heap_size>:(\d+)
- tag: ENABLED_1
<<: *MESH_INIT_COND
initial condition detail: if mesh tree not exist, start one node first, then start others, after mesh network
established, root connect server
check cmd set:
- ''
- - SSC SSC[1-<node_num>] mesh -Q -o 2
- - R SSC[1-<node_num>] T <node_num>
- - MESHTREE
- - R PC_COM RE "MESHTREE:%%s%20nodes"%%(<node_num>)
- - SOC SOC1 LISTEN <test_tcp_port1> <pc_ip>
- - R SOC_COM L OK
- - SSC MNODE(0) mesh -S -o 0 -i <pc_ip> -t <test_tcp_port1>
- - ''
- - SOC SOC1 MACCEPT GSOC1
- - P MNODE(0) C +CONNECT,OK
- R SOC_COM L OK
- - SSC SSC[1-<node_num>] mesh -F -o 4 -a 1
- - P SSC[1-<node_num>] C +MESHFLAG:OK
- - SSC SSC[1-<node_num>] mesh -F -o 5 -a 1
- - P SSC[1-<node_num>] C +MESHFLAG:OK
restore cmd set:
- ''
- - SSC SSC[1-<node_num>] reboot
- - P SSC[1-<node_num>] C !!!ready!!!
- - SSC SSC[1-<node_num>] mesh -I
- - P SSC[1-<node_num>] C +MESH:INITED
- - SSC SSC[1-<node_num>] mesh -A -o 12 -t <duration_ms> -c <cnx_rssi> -l <select_rssi> -f <switch_rssi> -b <backoff_rssi>
- - P SSC[1-<node_num>] C +MESH_SET_PARENT_SWITCH:OK
- - SSC SSC[1-<node_num>] mesh -A -o 9 -t <auth_mode> -s <map_password>
- - P SSC[1-<node_num>] C +MESH_SET_AP_AUTH_MODE:OK C +MESH_SET_AP_AUTH_PWD:OK
- - SSC SSC[1-<node_num>] mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n
<ap_channel> -m <max_connect> -y <max_layer>
- - P SSC[1-<node_num>] C +MESH:CONFIG,OK
- - SSC SSC1 mesh -T
- - P SSC1 C +MESH:START,OK
- - DELAY 10
- - ''
- - SSC SSC[2-<node_num>] mesh -T
- - P SSC[2-<node_num>] C +MESH:START,OK
- - DELAY <delay_time>
- - ''
- - SSC SSC[1-<node_num>] mesh -Q -o 2
- - R SSC[1-<node_num>] T <node_num>
- - MESHTREE
- - R PC_COM RE "MESHTREE:%%s%20nodes"%%(<node_num>)
- - SSC MNODE(0) mesh -Q -o 1 -t <delay_time>
- - R MNODE(0) C NETWORK_TIME:PASS
- - SOC SOC1 LISTEN <test_tcp_port1> <pc_ip>
- - R SOC_COM L OK
- - SSC MNODE(0) mesh -S -o 0 -i <pc_ip> -t <test_tcp_port1>
- - ''
- - SOC SOC1 MACCEPT GSOC1
- - P MNODE(0) C +CONNECT,OK
- R SOC_COM L OK
- - SSC SSC[1-<node_num>] mesh -F -o 4 -a 1
- - P SSC[1-<node_num>] C +MESHFLAG:OK
- - SSC SSC[1-<node_num>] mesh -F -o 5 -a 1
- - P SSC[1-<node_num>] C +MESHFLAG:OK
force restore cmd set:
- ''
- - SSC SSC[1-<node_num>] restore
- - P SSC[1-<node_num>] C !!!ready!!!
- - SSC SSC[1-<node_num>] mesh -I
- - P SSC[1-<node_num>] C +MESH:INITED
- - SSC SSC[1-<node_num>] mesh -A -o 12 -t <duration_ms> -c <cnx_rssi> -l <select_rssi> -f <switch_rssi> -b <backoff_rssi>
- - P SSC[1-<node_num>] C +MESH_SET_PARENT_SWITCH:OK
- - SSC SSC[1-<node_num>] mesh -A -o 9 -t <auth_mode> -s <map_password>
- - P SSC[1-<node_num>] C +MESH_SET_AP_AUTH_MODE:OK C +MESH_SET_AP_AUTH_PWD:OK
- - SSC SSC[1-<node_num>] mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n
<ap_channel> -m <max_connect> -y <max_layer>
- - P SSC[1-<node_num>] C +MESH:CONFIG,OK
- - SSC SSC1 mesh -T
- - P SSC1 C +MESH:START,OK
- - DELAY 10
- - ''
- - SSC SSC[2-<node_num>] mesh -T
- - P SSC[2-<node_num>] C +MESH:START,OK
- - DELAY <delay_time>
- - ''
- - SSC SSC[1-<node_num>] mesh -Q -o 2
- - R SSC[1-<node_num>] T <node_num>
- - MESHTREE
- - R PC_COM RE "MESHTREE:%%s%20nodes"%%(<node_num>)
- - SSC MNODE(0) mesh -Q -o 1 -t <delay_time>
- - R MNODE(0) C NETWORK_TIME:PASS
- - SOC SOC1 LISTEN <test_tcp_port1> <pc_ip>
- - R SOC_COM L OK
- - SSC MNODE(0) mesh -S -o 0 -i <pc_ip> -t <test_tcp_port1>
- - ''
- - SOC SOC1 MACCEPT GSOC1
- - P MNODE(0) C +CONNECT,OK
- R SOC_COM L OK
- - SSC SSC[1-<node_num>] mesh -F -o 4 -a 1
- - P SSC[1-<node_num>] C +MESHFLAG:OK
- - SSC SSC[1-<node_num>] mesh -F -o 5 -a 1
- - P SSC[1-<node_num>] C +MESHFLAG:OK
- tag: ENABLED_2
<<: *MESH_INIT_COND
initial condition detail: if mesh tree not exist, start all nodes together
check cmd set:
- ''
- - SSC SSC[1-<node_num>] mesh -Q -o 2
- - R SSC[1-<node_num>] T <node_num>
- - MESHTREE
- - R PC_COM RE "MESHTREE:%%s%20nodes"%%(<node_num>)
restore cmd set:
- ''
- - SSC SSC[1-<node_num>] reboot
- - P SSC[1-<node_num>] C !!!ready!!!
- - SSC SSC[1-<node_num>] mesh -I
- - P SSC[1-<node_num>] C +MESH:INITED
- - SSC SSC[1-<node_num>] mesh -A -o 12 -t <duration_ms> -c <cnx_rssi> -l <select_rssi> -f <switch_rssi> -b <backoff_rssi>
- - P SSC[1-<node_num>] C +MESH_SET_PARENT_SWITCH:OK
- - SSC SSC[1-<node_num>] mesh -A -o 9 -t <auth_mode> -s <map_password>
- - P SSC[1-<node_num>] C +MESH_SET_AP_AUTH_MODE:OK C +MESH_SET_AP_AUTH_PWD:OK
- - SSC SSC[1-<node_num>] mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n
<ap_channel> -m <max_connect> -y <max_layer>
- - P SSC[1-<node_num>] C +MESH:CONFIG,OK
- - SSC SSC[1-<node_num>] mesh -T
- - P SSC[1-<node_num>] C +MESH:START,OK
- - DELAY <delay_time>
- - ''
- - SSC SSC[1-<node_num>] mesh -Q -o 2
- - R SSC[1-<node_num>] T <node_num>
- - MESHTREE
- - R PC_COM RE "MESHTREE:%%s%20nodes"%%(<node_num>)
- - SSC MNODE(0) mesh -Q -o 1 -t <delay_time>
- - R MNODE(0) C NETWORK_TIME:PASS
force restore cmd set:
- ''
- - SSC SSC[1-<node_num>] restore
- - P SSC[1-<node_num>] C !!!ready!!!
- - SSC SSC[1-<node_num>] mesh -I
- - P SSC[1-<node_num>] C +MESH:INITED
- - SSC SSC[1-<node_num>] mesh -A -o 12 -t <duration_ms> -c <cnx_rssi> -l <select_rssi> -f <switch_rssi> -b <backoff_rssi>
- - P SSC[1-<node_num>] C +MESH_SET_PARENT_SWITCH:OK
- - SSC SSC[1-<node_num>] mesh -A -o 9 -t <auth_mode> -s <map_password>
- - P SSC[1-<node_num>] C +MESH_SET_AP_AUTH_MODE:OK C +MESH_SET_AP_AUTH_PWD:OK
- - SSC SSC[1-<node_num>] mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n
<ap_channel> -m <max_connect> -y <max_layer>
- - P SSC[1-<node_num>] C +MESH:CONFIG,OK
- - SSC SSC[1-<node_num>] mesh -T
- - P SSC[1-<node_num>] C +MESH:START,OK
- - DELAY <delay_time>
- - ''
- - SSC SSC[1-<node_num>] mesh -Q -o 2
- - R SSC[1-<node_num>] T <node_num>
- - MESHTREE
- - R PC_COM RE "MESHTREE:%%s%20nodes"%%(<node_num>)
- - SSC MNODE(0) mesh -Q -o 1 -t <delay_time>
- - R MNODE(0) C NETWORK_TIME:PASS
- tag: ENABLED_3
<<: *MESH_INIT_COND
initial condition detail: all mesh nodes in softap+sta mode, mesh configed but not started
check cmd set:
- ''
restore cmd set:
- ''
- - SSC SSC[1-<node_num>] reboot
- - P SSC[1-<node_num>] C !!!ready!!!
- - SSC SSC[1-<node_num>] mesh -I
- - P SSC[1-<node_num>] C +MESH:INITED
- - SSC SSC[1-<node_num>] mesh -A -o 12 -t <duration_ms> -c <cnx_rssi> -l <select_rssi> -f <switch_rssi> -b <backoff_rssi>
- - P SSC[1-<node_num>] C +MESH_SET_PARENT_SWITCH:OK
- - SSC SSC[1-<node_num>] mesh -A -o 9 -t <auth_mode> -s <map_password>
- - P SSC[1-<node_num>] C +MESH_SET_AP_AUTH_MODE:OK C +MESH_SET_AP_AUTH_PWD:OK
- - SSC SSC[1-<node_num>] mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n
<ap_channel> -m <max_connect> -y <max_layer>
- - P SSC[1-<node_num>] C +MESH:CONFIG,OK
force restore cmd set:
- ''
- - SSC SSC[1-<node_num>] restore
- - P SSC[1-<node_num>] C !!!ready!!!
- - SSC SSC[1-<node_num>] mesh -I
- - P SSC[1-<node_num>] C +MESH:INITED
- - SSC SSC[1-<node_num>] mesh -A -o 12 -t <duration_ms> -c <cnx_rssi> -l <select_rssi> -f <switch_rssi> -b <backoff_rssi>
- - P SSC[1-<node_num>] C +MESH_SET_PARENT_SWITCH:OK
- - SSC SSC[1-<node_num>] mesh -A -o 9 -t <auth_mode> -s <map_password>
- - P SSC[1-<node_num>] C +MESH_SET_AP_AUTH_MODE:OK C +MESH_SET_AP_AUTH_PWD:OK
- - SSC SSC[1-<node_num>] mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n
<ap_channel> -m <max_connect> -y <max_layer>
- - P SSC[1-<node_num>] C +MESH:CONFIG,OK
- tag: DISABLED_1
<<: *MESH_INIT_COND
initial condition detail: all mesh node in softap+sta mode, disable all mesh node
check cmd set:
- ''
- - ASSERT
- - ''
restore cmd set:
- ''
- - SSC SSC[1-<node_num>] reboot
- - P SSC[1-<node_num>] C !!!ready!!!
- - SSC SSC[1-<node_num>] op -Q
- - P SSC[1-<node_num>] C +CURMODE:3
- - SSC SSC[1-<node_num>] sta -D
- - P SSC[1-<node_num>] C +QAP:OK
force restore cmd set:
- ''
- - SSC SSC[1-<node_num>] restore
- - P SSC[1-<node_num>] C !!!ready!!!
- - SSC SSC[1-<node_num>] op -S -o 3
- - P SSC[1-<node_num>] C +MODE:OK
- - SSC SSC[1-<node_num>] sta -D
- - P SSC[1-<node_num>] C +QAP:OK

View File

@ -0,0 +1,178 @@
test cases:
- CI ready: 'Yes'
ID: MESH_COMM_0101
SDK: ESP32_IDF
Test App: SSC_MESH
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC MNODE(0) meshsend -S -d <MNODE(0,-1)_mac> -l 1000 -c 20 -b 20 -f 2
- - P MNODE(0) C +MESHSEND,OK
- P MNODE(0,-1) C +MESHRXPKT:OK
execution time: 0.0
expected result: |-
1. succeed
2. succeed
3. succeed
4. succeed
initial condition: ENABLED_2
level: Integration
module: Mesh
steps: |-
1. mesh init
2. mesh config
3. mesh start
4. root send unicast to leaf
sub module: Communication
summary: root send unicast to leaf
test environment: SSC_T50_1
test point 1: basic function
test point 2: unicast test
version: v1 (2017-7-20)
- CI ready: 'Yes'
ID: MESH_COMM_0106
SDK: ESP32_IDF
Test App: SSC_MESH
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC MNODE(0,-1) meshsend -S -d <MNODE(0)_mac> -l 1000 -c 20 -b 20 -f 2
- - P MNODE(0,-1) C +MESHSEND,OK
- P MNODE(0) C +MESHRXPKT:OK
execution time: 0.0
expected result: |-
1. succeed
2. succeed
3. succeed
4. succeed
initial condition: ENABLED_1
level: Integration
module: Mesh
steps: |-
1. mesh init
2. mesh config
3. mesh start
4. leaf send unicast to root
sub module: Communication
summary: leaf send unicast to root
test environment: SSC_T50_1
test point 1: basic function
test point 2: unicast test
version: v1 (2017-7-20)
- CI ready: 'Yes'
ID: MESH_COMM_0116
SDK: ESP32_IDF
Test App: SSC
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC MNODE(0,-1) meshsend -S -d <MNODE(0,1)_mac> -l 1000 -c 100 -f 2
- - P MNODE(0,-1) C +MESHTXPKT:OK C +MESHSEND,OK
- P MNODE(0,1) C +MESHRXPKT:OK
- - DELAY 5
- - ''
- - SSC MNODE(0,-1) meshsend -S -d <MNODE(0,1)_mac> -l 1000 -c 10 -f 2
- - P MNODE(0,-1) C +MESHTXPKT:OK C +MESHSEND,OK
- P MNODE(0,1) C +MESHRXPKT:OK
expected result: |-
1. succeed
2. succeed
3. succeed
4. succeed
initial condition: ENABLED_2
level: Integration
module: Mesh
steps: |-
1. mesh init
2. mesh config
3. mesh start
4. root send unicast to others
sub module: Communication
summary: fluid control test when node send packets upward
test environment: SSC_T50_1
test point 1: basic function
test point 2: unicast test
version: v1 (2017-7-20)
- CI ready: 'Yes'
ID: MESH_COMM_0210
SDK: ESP32_IDF
Test App: SSC_MESH
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC MNODE(0) meshsend -S -d <MNODE(0,0)_mac> -l 1460 -c 20 -b 20 -f 4
- - P MNODE(0) C +MESHTXPKT:OK C +MESHSEND,OK
- P MNODE(0,0) C +MESHRXPKT:OK
execution time: 0.0
expected result: |-
1. succeed
2. succeed
3. succeed
4. succeed
initial condition: ENABLED_1
level: Integration
module: Mesh
steps: |-
1. mesh init
2. mesh config
3. mesh start
4. root send unicast
sub module: Communication
summary: root send unicast downward with flag=FROMDS
test environment: SSC_T50_1
test point 1: basic function
test point 2: meshsend parameter check
version: v1 (2017-7-20)
- CI ready: 'Yes'
ID: MESH_COMM_0902
SDK: ESP32_IDF
Test App: SSC_MESH
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC MNODE(0,0,0) mesh -D -o 0 -g <MNODE(0)_mac>
- - P MNODE(0,0,0) C +MESH_ADDR_LIST_ADD
- - SSC MNODE(0,0,0) mesh -D -o 0 -g <MNODE(0,0)_mac>
- - P MNODE(0,0,0) C +MESH_ADDR_LIST_ADD
- - SSC MNODE(0,0,0) meshsend -P -p 0 -t 7 -l 12
- - P MNODE(0,0,0) C +MESHSEND:OPTION,1
- - SSC MNODE(0,0,0) meshsend -S -w 1 -d 01:00:5E:00:00:00 -c 10 -b 20 -f
2
- - P MNODE(0,0,0) C +MESHTXPKT:ERROR C +MESHSEND,OK
- P MNODE(0,0) NC +MESHRXPKT:OK
- - SSC MNODE(0,0,0) meshsend -S -w 1 -d 01:00:5E:00:00:00 -l 0 -c 10 -b 20 -f
2
- - P MNODE(0,0,0) C +MESHTXPKT:ERROR C +MESHSEND,OK
- P MNODE(0,0) NC +MESHRXPKT:OK
- - SSC MNODE(0,0,0) meshsend -S -w 1 -d 01:00:5E:00:00:00 -l 1000 -c 10 -b 20 -f
2
- - P MNODE(0,0,0) C +MESHTXPKT:OK C +MESHSEND,OK
- P MNODE(0) C +MESHRXPKT:OK
- - SSC MNODE(0,0,0) meshsend -S -w 1 -d 01:00:5E:00:00:00 -l 1460 -c 10 -b 20 -f
2
- - P MNODE(0,0,0) C +MESHTXPKT:ERROR C +MESHSEND,OK
execution time: 0.0
expected result: |-
1. succeed
2. succeed
3. succeed
4. succeed
initial condition: ENABLED_1
level: Integration
module: Mesh
steps: |-
1. mesh init
2. mesh config
3. mesh start
4. node send multicast with different length
sub module: Communication
summary: node send multicast with different length
test environment: SSC_T50_1
test point 1: basic function
test point 2: meshsend parameter check
version: v1 (2017-7-20)

View File

@ -0,0 +1,217 @@
test cases:
- CI ready: 'Yes'
ID: MESH_EST_0106
SDK: ESP32_IDF
Test App: SSC_MESH
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC SSC[1-<node_num>] mesh -I
- - P SSC[1-<node_num>] C +MESH:INITED
- - SSC SSC[1-<node_num>] mesh -A -o 7 -t 25
- - P SSC[1-<node_num>] C +MESH_SET_TX_POWER:OK
- - SSC SSC[1-<node_num>] mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n <ap_channel> -m <max_connect> -y <max_layer>
- - P SSC[1-<node_num>] C +MESH:CONFIG,OK
- - SSC SSC[1-<node_num>] mesh -T
- - P SSC[1-<node_num>] C +MESH:START
- - DELAY <delay_time>
- - ''
- - SSC SSC[1-<node_num>] mesh -Q -o 2
- - R SSC[1-<node_num>] T <node_num>
- - MESHTREE
- - R PC_COM RE "MESHTREE:%%s%20nodes"%%(<node_num>)
execution time: 0.0
expected result: |-
1. succeed
2. succeed
3. succeed
initial condition: DISABLED_1
level: Integration
module: Mesh
steps: |-
1. mesh init
2. mesh config
3. mesh start
sub module: Network Establish
summary: set node with different tx power, then establish network
test environment: SSC_T50_1
test point 1: basic function
test point 2: mesh config
version: v1 (2017-7-20)
- CI ready: 'Yes'
ID: MESH_EST_0313
SDK: ESP32_IDF
Test App: SSC_MESH
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC SSC[1-<node_num>] mesh -I
- - P SSC[1-<node_num>] C +MESH:INITED
- - SSC SSC[1-<node_num>] mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n
<ap_channel> -m <max_connect> -y <max_layer>
- - P SSC[1-<node_num>] C +MESH:CONFIG,OK
- - SSC SSC[1-2] mesh -T
- - P SSC[1-2] C +MESH:START
- - DELAY 30
- - ''
- - SSC SSC[1-2] mesh -Q -o 2
- - R SSC[1-2] T 2
- - MESHTREE
- - R PC_COM C MESHTREE:2%20nodes
- - SSC MNODE(0) reboot
- - P MNODE(0) C !!!ready!!!
- P MNODE(0,0) C MESH_EVENT_DISCONNECTED
- - DELAY 10
- - P MNODE(0,0) C MESH_EVENT_CONNECTED,1
execution time: 0.0
expected result: |-
1. succeed
2. succeed
3. succeed
4. succeed
initial condition: DISABLED_1
level: Integration
module: Mesh
steps: |-
1. switch to sta+softap mode
2. mesh init
3. mesh config
4. mesh start
sub module: Network Establish
summary: 2 nodes do mesh network establish,then reboot root
test environment: SSC_T50_1
test point 1: basic function
test point 2: mesh network establish
version: v1 (2017-7-20)
- CI ready: 'Yes'
ID: MESH_EST_0317
SDK: ESP32_IDF
Test App: SSC_MESH
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC SSC[1-<node_num>] mesh -I
- - P SSC[1-<node_num>] C +MESH:INITED
- - SSC SSC[1-<node_num>] mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n
<ap_channel> -m 1 -y 15
- - P SSC[1-<node_num>] C +MESH:CONFIG,OK
- - SSC SSC[1-15] mesh -T
- - P SSC[1-15] C +MESH:START
- - DELAY <delay_time>
- - ''
- - SSC SSC[1-15] mesh -Q -o 2
- - R SSC[1-15] T 15
- - MESHTREE
- - R PC_COM C MESHTREE:15%20nodes
execution time: 0.0
expected result: |-
1. succeed
2. succeed
3. succeed
4. succeed
5. succeed
initial condition: DISABLED_1
level: Integration
module: Mesh
steps: |-
1. switch to sta+softap mode
2. mesh init
3. mesh config: set mesh layer=15
4. start 15 nodes
5. check mesh tree
sub module: Network Establish
summary: set special mesh tree which layer=15
test environment: SSC_T50_1
test point 1: basic function
test point 2: mesh network establish
version: v1 (2017-7-20)
- CI ready: 'Yes'
ID: MESH_EST_0404
SDK: ESP32_IDF
Test App: SSC_MESH
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC MNODE(0) reboot
- - P MNODE(0) C !!!ready!!!
- - DELAY 2
- - ''
- - SSC MNODE(0) mesh -I
- - P MNODE(0) C +MESH:INITED
- - SSC MNODE(0) mesh -A -o 9 -t <auth_mode> -s <map_password>
- - P MNODE(0) C +MESH_SET_AP_AUTH_MODE:OK C +MESH_SET_AP_AUTH_PWD:OK
- - SSC MNODE(0) mesh -P -g <mesh_id> -s <ap_ssid> -p <ap_password> -n
<ap_channel> -m <max_connect> -y <max_layer>
- - P MNODE(0) C +MESH:CONFIG,OK
- - SSC MNODE(0) mesh -T
- - P MNODE(0) C +MESH:START
- - DELAY <delay_time>
- - ''
- - SSC SSC[1-<node_num>] mesh -Q -o 2
- - R SSC[1-<node_num>] T <node_num>
- - MESHTREE
- - R PC_COM RE "MESHTREE:%%s%20nodes"%%(<node_num>)
execution time: 0.0
expected result: |-
1. succeed
2. succeed
3. succeed
4. succeed
5. succeed
initial condition: ENABLED_2
level: Integration
module: Mesh
steps: |-
1. mesh init
2. mesh config
3. mesh start
4. save value
5. all nodes reboot
sub module: Network Establish
summary: after network is establish, disable root,wait some time, start the root node again
test environment: SSC_T50_1
test point 1: basic function
test point 2: mesh network re-establish
version: v1 (2017-7-20)
- CI ready: 'Yes'
ID: MESH_EST_0405
SDK: ESP32_IDF
Test App: SSC_MESH
auto test: 'Yes'
category: Function
cmd set:
- ''
- - SSC MNODE(0) reboot
- - P MNODE(0) C !!!ready!!!
- - DELAY <delay_time>
- - ''
- - SSC SSC[1-<node_num>] mesh -Q -o 2
- - R SSC[1-<node_num>] T <node_num>
- - MESHTREE
- - R PC_COM RE "MESHTREE:%%s%20nodes"%%(<node_num-1>)
execution time: 0.0
expected result: |-
1. succeed
2. succeed
3. succeed
4. succeed
5. succeed
initial condition: ENABLED_2
level: Integration
module: Mesh
steps: |-
1. mesh init
2. mesh config
3. mesh start
4. save value
5. all nodes reboot
sub module: Network Establish
summary: after network is establish, disable root, check mesh network
test environment: SSC_T50_1
test point 1: basic function
test point 2: mesh network re-establish
version: v1 (2017-7-20)

View File

@ -238,3 +238,11 @@ test environment:
PC has 1 wired NIC connected to AP.
PC has 1 WiFi NIC.
6 SSC target connect with PC by UART.
- tag: SSC_T50_1
<<: *TEST_ENV
Special: Y
Target Count: 50
test environment detail: |-
PC has 1 wired NIC connected to AP.
PC has 1 WiFi NIC.
50 SSC target connect with PC by UART.

View File

@ -14,7 +14,7 @@ import TinyFW
import IDF
@IDF.idf_example_test(env_tag="Example_WIFI")
@IDF.idf_example_test(env_tag="Example_WIFI", ignore=True)
def test_examples_protocol_esp_http_client(env, extra_data):
"""
steps: |

View File

@ -14,7 +14,7 @@ import TinyFW
import IDF
@IDF.idf_example_test(env_tag="Example_WIFI")
@IDF.idf_example_test(env_tag="Example_WIFI", ignore=True)
def test_examples_protocol_https_request(env, extra_data):
"""
steps: |

View File

@ -109,16 +109,22 @@ class UnitTestAssignTest(CIAssignTest.AssignTest):
with open(test_case_path, "r") as f:
raw_data = yaml.load(f)
test_cases = raw_data["test cases"]
# filter keys are lower case. Do map lower case keys with original keys.
try:
key_mapping = {x.lower(): x for x in test_cases[0].keys()}
except IndexError:
key_mapping = dict()
if case_filter:
for key in case_filter:
filtered_cases = []
for case in test_cases:
try:
mapped_key = key_mapping[key]
# bot converts string to lower case
if isinstance(case[key], str):
_value = case[key].lower()
if isinstance(case[mapped_key], str):
_value = case[mapped_key].lower()
else:
_value = case[key]
_value = case[mapped_key]
if _value in case_filter[key]:
filtered_cases.append(case)
except KeyError:

View File

@ -117,7 +117,7 @@ class _DataCache(_queue.Queue):
break
return ret
def get_data(self, timeout=0):
def get_data(self, timeout=0.0):
"""
get a copy of data from cache.
@ -154,6 +154,52 @@ class _DataCache(_queue.Queue):
self.data_cache = self.data_cache[index:]
class _LogThread(threading.Thread, _queue.Queue):
"""
We found some SD card on Raspberry Pi could have very bad performance.
It could take seconds to save small amount of data.
If the DUT receives data and save it as log, then it stops receiving data until log is saved.
This could lead to expect timeout.
As an workaround to this issue, ``BaseDUT`` class will create a thread to save logs.
Then data will be passed to ``expect`` as soon as received.
"""
def __init__(self):
threading.Thread.__init__(self, name="LogThread")
_queue.Queue.__init__(self, maxsize=0)
self.setDaemon(True)
self.flush_lock = threading.Lock()
def save_log(self, filename, data):
"""
:param filename: log file name
:param data: log data. Must be ``bytes``.
"""
self.put({"filename": filename, "data": data})
def flush_data(self):
with self.flush_lock:
data_cache = dict()
while True:
# move all data from queue to data cache
try:
log = self.get_nowait()
try:
data_cache[log["filename"]] += log["data"]
except KeyError:
data_cache[log["filename"]] = log["data"]
except _queue.Empty:
break
# flush data
for filename in data_cache:
with open(filename, "ab+") as f:
f.write(data_cache[filename])
def run(self):
while True:
time.sleep(1)
self.flush_data()
class _RecvThread(threading.Thread):
PERFORMANCE_PATTERN = re.compile(r"\[Performance]\[(\w+)]: ([^\r\n]+)\r?\n")
@ -214,6 +260,10 @@ class BaseDUT(object):
"""
DEFAULT_EXPECT_TIMEOUT = 5
MAX_EXPECT_FAILURES_TO_SAVED = 10
LOG_THREAD = _LogThread()
LOG_THREAD.start()
def __init__(self, name, port, log_file, app, **kwargs):
@ -224,12 +274,33 @@ class BaseDUT(object):
self.app = app
self.data_cache = _DataCache()
self.receive_thread = None
self.expect_failures = []
# open and start during init
self.open()
def __str__(self):
return "DUT({}: {})".format(self.name, str(self.port))
def _save_expect_failure(self, pattern, data, start_time):
"""
Save expect failure. If the test fails, then it will print the expect failures.
In some cases, user will handle expect exceptions.
The expect failures could be false alarm, and test case might generate a lot of such failures.
Therefore, we don't print the failure immediately and limit the max size of failure list.
"""
self.expect_failures.insert(0, {"pattern": pattern, "data": data,
"start": start_time, "end": time.time()})
self.expect_failures = self.expect_failures[:self.MAX_EXPECT_FAILURES_TO_SAVED]
def _save_dut_log(self, data):
"""
Save DUT log into file using another thread.
This is a workaround for some devices takes long time for file system operations.
See descriptions in ``_LogThread`` for details.
"""
self.LOG_THREAD.save_log(self.log_file, data)
# define for methods need to be overwritten by Port
@classmethod
def list_available_ports(cls):
@ -329,6 +400,7 @@ class BaseDUT(object):
if self.receive_thread:
self.receive_thread.exit()
self._port_close()
self.LOG_THREAD.flush_data()
def write(self, data, eol="\r\n", flush=True):
"""
@ -437,14 +509,19 @@ class BaseDUT(object):
start_time = time.time()
while True:
ret, index = method(data, pattern)
if ret is not None or time.time() - start_time > timeout:
if ret is not None:
self.data_cache.flush(index)
break
time_remaining = start_time + timeout - time.time()
if time_remaining < 0:
break
# wait for new data from cache
data = self.data_cache.get_data(time.time() + timeout - start_time)
data = self.data_cache.get_data(time_remaining)
if ret is None:
raise ExpectTimeout(self.name + ": " + _pattern_to_string(pattern))
pattern = _pattern_to_string(pattern)
self._save_expect_failure(pattern, data, start_time)
raise ExpectTimeout(self.name + ": " + pattern)
return ret
def _expect_multi(self, expect_all, expect_item_list, timeout):
@ -492,10 +569,11 @@ class BaseDUT(object):
else:
match_succeed = True if matched_expect_items else False
if time.time() - start_time > timeout or match_succeed:
time_remaining = start_time + timeout - time.time()
if time_remaining < 0 or match_succeed:
break
else:
data = self.data_cache.get_data(time.time() + timeout - start_time)
data = self.data_cache.get_data(time_remaining)
if match_succeed:
# do callback and flush matched data cache
@ -508,7 +586,9 @@ class BaseDUT(object):
# flush already matched data
self.data_cache.flush(slice_index)
else:
raise ExpectTimeout(self.name + ": " + str([_pattern_to_string(x) for x in expect_items]))
pattern = str([_pattern_to_string(x["pattern"]) for x in expect_items])
self._save_expect_failure(pattern, data, start_time)
raise ExpectTimeout(self.name + ": " + pattern)
@_expect_lock
def expect_any(self, *expect_items, **timeout):
@ -554,6 +634,22 @@ class BaseDUT(object):
timeout["timeout"] = self.DEFAULT_EXPECT_TIMEOUT
return self._expect_multi(True, expect_items, **timeout)
@staticmethod
def _format_ts(ts):
return "{}:{}".format(time.strftime("%m-%d %H:%M:%S", time.localtime(ts)), str(ts % 1)[2:5])
def print_debug_info(self):
"""
Print debug info of current DUT. Currently we will print debug info for expect failures.
"""
Utility.console_log("DUT debug info for DUT: {}:".format(self.name), color="orange")
for failure in self.expect_failures:
Utility.console_log(u"\t[pattern]: {}\r\n\t[data]: {}\r\n\t[time]: {} - {}\r\n"
.format(failure["pattern"], failure["data"],
self._format_ts(failure["start"]), self._format_ts(failure["end"])),
color="orange")
class SerialDUT(BaseDUT):
""" serial with logging received data feature """
@ -574,17 +670,14 @@ class SerialDUT(BaseDUT):
self.serial_configs.update(kwargs)
super(SerialDUT, self).__init__(name, port, log_file, app, **kwargs)
@staticmethod
def _format_data(data):
def _format_data(self, data):
"""
format data for logging. do decode and add timestamp.
:param data: raw data from read
:return: formatted data (str)
"""
timestamp = time.time()
timestamp = "[{}:{}]".format(time.strftime("%m-%d %H:%M:%S", time.localtime(timestamp)),
str(timestamp % 1)[2:5])
timestamp = "[{}]".format(self._format_ts(time.time()))
formatted_data = timestamp.encode() + b"\r\n" + data + b"\r\n"
return formatted_data
@ -597,8 +690,7 @@ class SerialDUT(BaseDUT):
def _port_read(self, size=1):
data = self.port_inst.read(size)
if data:
with open(self.log_file, "ab+") as _log_file:
_log_file.write(self._format_data(data))
self._save_dut_log(self._format_data(data))
return data
def _port_write(self, data):

View File

@ -162,14 +162,17 @@ class Env(object):
return if_addr[self.PROTO_MAP[proto]][0]
@_synced
def close(self):
def close(self, dut_debug=False):
"""
close()
close all DUTs of the Env.
:param dut_debug: if dut_debug is True, then print all dut expect failures before close it
:return: None
"""
for dut_name in self.allocated_duts:
dut = self.allocated_duts[dut_name]["dut"]
if dut_debug:
dut.print_debug_info()
dut.close()
self.allocated_duts = dict()

View File

@ -17,6 +17,10 @@ import os
import re
import subprocess
import functools
import random
import tempfile
from serial.tools import list_ports
import DUT
@ -40,6 +44,8 @@ class IDFDUT(DUT.SerialDUT):
""" IDF DUT, extends serial with ESPTool methods """
CHIP_TYPE_PATTERN = re.compile(r"Detecting chip type[.:\s]+(.+)")
# if need to erase NVS partition in start app
ERASE_NVS = True
def __init__(self, name, port, log_file, app, **kwargs):
self.download_config, self.partition_table = app.process_app_info()
@ -68,24 +74,39 @@ class IDFDUT(DUT.SerialDUT):
return cls.get_chip(app, port) is not None
@_tool_method
def start_app(self):
def start_app(self, erase_nvs=ERASE_NVS):
"""
download and start app.
:param: erase_nvs: whether erase NVS partition during flash
:return: None
"""
if erase_nvs:
address = self.partition_table["nvs"]["offset"]
size = self.partition_table["nvs"]["size"]
nvs_file = tempfile.NamedTemporaryFile()
nvs_file.write(chr(0xFF) * size)
nvs_file.flush()
download_config = self.download_config + [address, nvs_file.name]
else:
download_config = self.download_config
retry_baud_rates = ["921600", "115200"]
error = IDFToolError()
for baud_rate in retry_baud_rates:
try:
subprocess.check_output(["python", self.app.esptool,
"--port", self.port, "--baud", baud_rate]
+ self.download_config)
break
except subprocess.CalledProcessError as error:
continue
else:
raise error
try:
for baud_rate in retry_baud_rates:
try:
subprocess.check_output(["python", self.app.esptool,
"--port", self.port, "--baud", baud_rate]
+ download_config)
break
except subprocess.CalledProcessError as error:
continue
else:
raise error
finally:
if erase_nvs:
nvs_file.close()
@_tool_method
def reset(self):
@ -96,6 +117,17 @@ class IDFDUT(DUT.SerialDUT):
"""
subprocess.check_output(["python", self.app.esptool, "--port", self.port, "run"])
@_tool_method
def erase_partition(self, partition):
"""
:param partition: partition name to erase
:return: None
"""
address = self.partition_table[partition]["offset"]
size = self.partition_table[partition]["size"]
with open(".erase_partition.tmp", "wb") as f:
f.write(chr(0xFF) * size)
@_tool_method
def dump_flush(self, output_file, **kwargs):
"""

View File

@ -20,9 +20,8 @@ from IDF.IDFApp import IDFApp, Example, UT
from IDF.IDFDUT import IDFDUT
def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32",
module="examples", execution_time=1,
**kwargs):
def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32", module="examples", execution_time=1,
level="example", erase_nvs=True, **kwargs):
"""
decorator for testing idf examples (with default values for some keyword args).
@ -31,12 +30,44 @@ def idf_example_test(app=Example, dut=IDFDUT, chip="ESP32",
:param chip: chip supported, string or tuple
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param kwargs: other keyword args
:return: test method
"""
# not use partial function as define as function support auto generating document
try:
# try to config the default behavior of erase nvs
dut.ERASE_NVS = erase_nvs
except AttributeError:
pass
return TinyFW.test_method(app=app, dut=dut, chip=chip, module=module,
execution_time=execution_time, **kwargs)
execution_time=execution_time, level=level, **kwargs)
def idf_unit_test(app=UT, dut=IDFDUT, chip="ESP32", module="unit-test", execution_time=1,
level="unit", erase_nvs=True, **kwargs):
"""
decorator for testing idf unit tests (with default values for some keyword args).
:param app: test application class
:param dut: dut class
:param chip: chip supported, string or tuple
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param kwargs: other keyword args
:return: test method
"""
try:
# try to config the default behavior of erase nvs
dut.ERASE_NVS = erase_nvs
except AttributeError:
pass
return TinyFW.test_method(app=app, dut=dut, chip=chip, module=module,
execution_time=execution_time, level=level, **kwargs)
def log_performance(item, value):

View File

@ -107,6 +107,7 @@ MANDATORY_INFO = {
"execution_time": 1,
"env_tag": "default",
"category": "function",
"ignore": False,
}
@ -130,7 +131,7 @@ def test_method(**kwargs):
test_func_file_name = frame[1][1]
case_info = MANDATORY_INFO.copy()
case_info["name"] = test_func.__name__
case_info["name"] = case_info["ID"] = test_func.__name__
case_info.update(kwargs)
@functools.wraps(test_func)
@ -154,6 +155,7 @@ def test_method(**kwargs):
xunit_file = os.path.join(env_inst.app_cls.get_log_folder(env_config["test_suite_name"]),
XUNIT_FILE_NAME)
XUNIT_RECEIVER.begin_case(test_func.__name__, time.time(), test_func_file_name)
result = False
try:
Utility.console_log("starting running test: " + test_func.__name__, color="green")
# execute test function
@ -163,12 +165,11 @@ def test_method(**kwargs):
except Exception as e:
# handle all the exceptions here
traceback.print_exc()
result = False
# log failure
XUNIT_RECEIVER.failure(str(e), test_func_file_name)
finally:
# do close all DUTs
env_inst.close()
# do close all DUTs, if result is False then print DUT debug info
env_inst.close(dut_debug=(not result))
# end case and output result
XUNIT_RECEIVER.end_case(test_func.__name__, time.time())
with open(xunit_file, "ab+") as f:

View File

@ -125,6 +125,7 @@ class AssignTest(object):
# by default we only run function in CI, as other tests could take long time
DEFAULT_FILTER = {
"category": "function",
"ignore": False,
}
def __init__(self, test_case_path, ci_config_file, case_group=Group):
@ -188,6 +189,16 @@ class AssignTest(object):
bot_filter = dict()
return bot_filter
def _apply_bot_test_count(self):
"""
Bot could also pass test count.
If filtered cases need to be tested for several times, then we do duplicate them here.
"""
test_count = os.getenv("BOT_TEST_COUNT")
if test_count:
test_count = int(test_count)
self.test_cases *= test_count
def assign_cases(self):
"""
separate test cases to groups and assign test cases to CI jobs.
@ -198,6 +209,7 @@ class AssignTest(object):
failed_to_assign = []
case_filter = self._apply_bot_filter()
self.test_cases = self._search_cases(self.test_case_path, case_filter)
self._apply_bot_test_count()
test_groups = self._group_cases()
for group in test_groups:
for job in self.jobs:

View File

@ -68,11 +68,15 @@ def _convert_to_lower_case(item):
def _filter_one_case(test_method, case_filter):
""" Apply filter for one case (the filter logic is the same as described in ``filter_test_cases``) """
filter_result = True
for key in case_filter:
# filter keys are lower case. Do map lower case keys with original keys.
key_mapping = {x.lower(): x for x in test_method.case_info.keys()}
for orig_key in case_filter:
key = key_mapping[orig_key]
if key in test_method.case_info:
# the filter key is both in case and filter
# we need to check if they match
filter_item = _convert_to_lower_case(case_filter[key])
filter_item = _convert_to_lower_case(case_filter[orig_key])
accepted_item = _convert_to_lower_case(test_method.case_info[key])
if isinstance(filter_item, (tuple, list)) \

View File

@ -179,28 +179,11 @@ class Parser(object):
"""
prop = self.parse_case_properities(description)
idf_path = os.getenv("IDF_PATH")
# use relative file path to IDF_PATH, to make sure file path is consist
relative_file_path = os.path.relpath(file_name, idf_path)
file_name_hash = int(hashlib.sha256(relative_file_path).hexdigest(), base=16) % 1000
if file_name_hash in self.file_name_cache:
self.file_name_cache[file_name_hash] += 1
else:
self.file_name_cache[file_name_hash] = 1
tc_id = "UT_%s_%s_%03d%02d" % (self.module_map[prop["module"]]['module abbr'],
self.module_map[prop["module"]]['sub module abbr'],
file_name_hash,
self.file_name_cache[file_name_hash])
test_case = deepcopy(TEST_CASE_PATTERN)
test_case.update({"config": config_name,
"module": self.module_map[prop["module"]]['module'],
"CI ready": "No" if prop["ignore"] == "Yes" else "Yes",
"ID": tc_id,
"ID": name,
"test point 2": prop["module"],
"steps": name,
"test environment": prop["test_env"],

View File

@ -105,8 +105,7 @@ def format_test_case_config(test_case_data):
return case_config
@TinyFW.test_method(app=UT, dut=IDF.IDFDUT, chip="ESP32", module="unit_test",
execution_time=1, env_tag="UT_T1_1")
@IDF.idf_unit_test(env_tag="UT_T1_1")
def run_unit_test_cases(env, extra_data):
"""
extra_data can be three types of value
@ -339,8 +338,7 @@ def case_run(duts, ut_config, env, one_case, failed_cases):
Utility.console_log("Failed: " + one_case["name"], color="red")
@TinyFW.test_method(app=UT, dut=IDF.IDFDUT, chip="ESP32", module="master_slave_test_case", execution_time=1,
env_tag="UT_T2_1")
@IDF.idf_unit_test(env_tag="UT_T2_1")
def run_multiple_devices_cases(env, extra_data):
"""
extra_data can be two types of value
@ -377,8 +375,7 @@ def run_multiple_devices_cases(env, extra_data):
raise AssertionError("Unit Test Failed")
@TinyFW.test_method(app=UT, dut=IDF.IDFDUT, chip="ESP32", module="unit_test",
execution_time=1, env_tag="UT_T1_1")
@IDF.idf_unit_test(env_tag="UT_T1_1")
def run_multiple_stage_cases(env, extra_data):
"""
extra_data can be 2 types of value