Merge branch 'bugfix/freertos_incorrect_placement_of_unblocked_task_during_tick_increment' into 'master'

fix(freertos/idf): Refactor suspend-resume tests

Closes IDF-8364

See merge request espressif/esp-idf!26849
This commit is contained in:
Sudeep Mohanty 2023-11-30 15:53:31 +08:00
commit 35fc493dcc
5 changed files with 208 additions and 200 deletions

View File

@ -19,5 +19,5 @@ set(priv_include_dirs
# the final elf, the component can be registered as WHOLE_ARCHIVE
idf_component_register(SRC_DIRS ${src_dirs}
PRIV_INCLUDE_DIRS ${priv_include_dirs}
PRIV_REQUIRES test_utils esp_timer driver
PRIV_REQUIRES test_utils driver
WHOLE_ARCHIVE)

View File

@ -17,12 +17,7 @@
#include "unity.h"
#include "test_utils.h"
#include "driver/gptimer.h"
#ifndef CONFIG_FREERTOS_UNICORE
#include "esp_ipc.h"
#endif
#include "esp_freertos_hooks.h"
#include "esp_rom_sys.h"
#include "esp_timer.h"
/* Counter task counts a target variable forever */
static void task_count(void *vp_counter)
@ -210,197 +205,4 @@ TEST_CASE("Resume task from ISR (other core)", "[freertos]")
{
test_resume_task_from_isr(!UNITY_FREERTOS_CPU);
}
#if !CONFIG_FREERTOS_SMP
/*
Scheduler suspension behavior has changed in SMP FreeRTOS, thus these test are disabled for SMP FreeRTOS.
See IDF-5201
*/
static volatile bool block;
static bool suspend_both_cpus;
static void IRAM_ATTR suspend_scheduler_while_block_set(void *arg)
{
vTaskSuspendAll();
while (block) { };
esp_rom_delay_us(1);
xTaskResumeAll();
}
static void IRAM_ATTR suspend_scheduler_on_both_cpus(void)
{
block = true;
if (suspend_both_cpus) {
TEST_ESP_OK(esp_ipc_call((xPortGetCoreID() == 0) ? 1 : 0, &suspend_scheduler_while_block_set, NULL));
}
vTaskSuspendAll();
}
static void IRAM_ATTR resume_scheduler_on_both_cpus(void)
{
block = false;
xTaskResumeAll();
}
static const int waiting_ms = 2000;
static const int delta_ms = 100;
static int duration_wait_task_ms;
static int duration_ctrl_task_ms;
static void waiting_task(void *pvParameters)
{
int cpu_id = xPortGetCoreID();
int64_t start_time = esp_timer_get_time();
printf("Start waiting_task cpu=%d\n", cpu_id);
vTaskDelay(waiting_ms / portTICK_PERIOD_MS);
duration_wait_task_ms = (esp_timer_get_time() - start_time) / 1000;
printf("Finish waiting_task cpu=%d, time=%d ms\n", cpu_id, duration_wait_task_ms);
vTaskDelete(NULL);
}
static void control_task(void *pvParameters)
{
int cpu_id = xPortGetCoreID();
esp_rom_delay_us(2000); // let to start the waiting_task first
printf("Start control_task cpu=%d\n", cpu_id);
int64_t start_time = esp_timer_get_time();
suspend_scheduler_on_both_cpus();
esp_rom_delay_us(waiting_ms * 1000 + delta_ms * 1000);
resume_scheduler_on_both_cpus();
duration_ctrl_task_ms = (esp_timer_get_time() - start_time) / 1000;
printf("Finish control_task cpu=%d, time=%d ms\n", cpu_id, duration_ctrl_task_ms);
vTaskDelete(NULL);
}
static void test_scheduler_suspend1(int cpu)
{
/* This test tests a case then both CPUs were in suspend state and then resume CPUs back.
* A task for which a wait time has been set and this time has elapsed in the suspended state should in any case be ready to start.
* (In an old implementation of xTaskIncrementTick function the counting for waiting_task() will be continued
* (excluding time in suspended) after control_task() is finished.)
*/
duration_wait_task_ms = 0;
duration_ctrl_task_ms = 0;
printf("Test for CPU%d\n", cpu);
int other_cpu = (cpu == 0) ? 1 : 0;
xTaskCreatePinnedToCore(&waiting_task, "waiting_task", 8192, NULL, 5, NULL, other_cpu);
xTaskCreatePinnedToCore(&control_task, "control_task", 8192, NULL, 5, NULL, cpu);
vTaskDelay(waiting_ms * 2 / portTICK_PERIOD_MS);
TEST_ASSERT_INT_WITHIN(4, waiting_ms + delta_ms + 4, duration_ctrl_task_ms);
if (suspend_both_cpus == false && cpu == 1) {
// CPU0 continues to increase the TickCount and the wait_task does not depend on Suspended Scheduler on CPU1
TEST_ASSERT_INT_WITHIN(2, waiting_ms, duration_wait_task_ms);
} else {
TEST_ASSERT_INT_WITHIN(4, waiting_ms + delta_ms + 4, duration_wait_task_ms);
}
printf("\n");
}
TEST_CASE("Test the waiting task not missed due to scheduler suspension on both CPUs", "[freertos]")
{
printf("Suspend both CPUs:\n");
suspend_both_cpus = true;
test_scheduler_suspend1(0);
test_scheduler_suspend1(1);
}
TEST_CASE("Test the waiting task not missed due to scheduler suspension on one CPU", "[freertos]")
{
printf("Suspend only one CPU:\n");
suspend_both_cpus = false;
test_scheduler_suspend1(0);
test_scheduler_suspend1(1);
}
static uint32_t tick_hook_ms[2];
static void IRAM_ATTR tick_hook(void)
{
tick_hook_ms[xPortGetCoreID()] += portTICK_PERIOD_MS;
}
static void test_scheduler_suspend2(int cpu)
{
esp_register_freertos_tick_hook_for_cpu(tick_hook, 0);
esp_register_freertos_tick_hook_for_cpu(tick_hook, 1);
memset(tick_hook_ms, 0, sizeof(tick_hook_ms));
printf("Test for CPU%d\n", cpu);
xTaskCreatePinnedToCore(&control_task, "control_task", 8192, NULL, 5, NULL, cpu);
vTaskDelay(waiting_ms * 2 / portTICK_PERIOD_MS);
esp_deregister_freertos_tick_hook(tick_hook);
printf("tick_hook_ms[cpu0] = %"PRIu32", tick_hook_ms[cpu1] = %"PRIu32"\n", tick_hook_ms[0], tick_hook_ms[1]);
TEST_ASSERT_INT_WITHIN(portTICK_PERIOD_MS * 2, waiting_ms * 2, tick_hook_ms[0]);
TEST_ASSERT_INT_WITHIN(portTICK_PERIOD_MS * 2, waiting_ms * 2, tick_hook_ms[1]);
printf("\n");
}
TEST_CASE("Test suspend-resume CPU. The number of tick_hook should be the same for both CPUs", "[freertos]")
{
printf("Suspend both CPUs:\n");
suspend_both_cpus = true;
test_scheduler_suspend2(0);
test_scheduler_suspend2(1);
printf("Suspend only one CPU:\n");
suspend_both_cpus = false;
test_scheduler_suspend2(0);
test_scheduler_suspend2(1);
}
static int duration_timer_ms;
static void timer_callback(TimerHandle_t arg)
{
duration_timer_ms += portTICK_PERIOD_MS;
}
static void test_scheduler_suspend3(int cpu)
{
duration_timer_ms = 0;
duration_ctrl_task_ms = 0;
printf("Test for CPU%d\n", cpu);
TimerHandle_t count_time = xTimerCreate("count_time", 1, pdTRUE, NULL, timer_callback);
xTimerStart( count_time, portMAX_DELAY);
xTaskCreatePinnedToCore(&control_task, "control_task", 8192, NULL, 5, NULL, cpu);
vTaskDelay(waiting_ms * 2 / portTICK_PERIOD_MS);
xTimerDelete(count_time, portMAX_DELAY);
printf("Finish duration_timer_ms=%d ms\n", duration_timer_ms);
TEST_ASSERT_INT_WITHIN(2, waiting_ms * 2, duration_timer_ms);
TEST_ASSERT_INT_WITHIN(5, waiting_ms + delta_ms, duration_ctrl_task_ms);
printf("\n");
}
/* Temporarily disabled due to failure with FreeRTOS v10.5.1 (IDF-8364) */
TEST_CASE("Test suspend-resume CPU works with xTimer", "[freertos][ignore]")
{
printf("Suspend both CPUs:\n");
suspend_both_cpus = true;
test_scheduler_suspend3(0);
test_scheduler_suspend3(1);
printf("Suspend only one CPU:\n");
suspend_both_cpus = false;
test_scheduler_suspend3(0);
test_scheduler_suspend3(1);
}
#endif // CONFIG_FREERTOS_UNICORE
#endif // !CONFIG_FREERTOS_SMP

View File

@ -751,4 +751,122 @@ TEST_CASE("Test xTaskSuspendAll on all cores pends all tasks and xTaskResumeAll
}
#endif // !CONFIG_FREERTOS_UNICORE
/* ---------------------------------------------------------------------------------------------------------------------
Test vTaskSuspendAll pinned task scheduling
Purpose:
- Test that when we disable the scheduler on core X, core X does not schedule any unblocked tasks pinned to it until
scheduling is resumed.
- While the scheduler on a core X is suspended, test that...
- A task pinned to core X is not scheduled even if its unblock time has been met
- The task is scheduled as soon as the scheduler on the core is resumed
Procedure:
Each core gets tested in the role of core X
- Create task A1 pinned to core X that will suspend scheduling on core X
- Create task A2 pinned to core X that will test unblocking on core X
- Put task A2 in blocked state with a finite delay
- Suspend the scheduler on core X from task A1
- Make sure that the delay time on task A2 expires
- Resume scheduler on core X from task A1
- Cleanup the tasks
Expected:
- When A1 disables scheduling, A2 should not be scheduled even after expiry of its delay time
- When A1 resumes scheduling, A2 should be scheduled
--------------------------------------------------------------------------------------------------------------------- */
#define TEST_BLOCKED_TASK_DELAY_MS 100
volatile static bool has_run = false;
SemaphoreHandle_t done_sem1;
void test_blocked_task(void *arg)
{
// Wait to be started
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Mark has_run as false to begin the test
has_run = false;
// Got to blocked state
vTaskDelay( TEST_BLOCKED_TASK_DELAY_MS / portTICK_PERIOD_MS );
// Mark when this task runs
has_run = true;
// Cleanup
vTaskDelete(NULL);
}
void test_suspend_task(void *arg)
{
TaskHandle_t blkd_task = (TaskHandle_t)arg;
// Wait to be started
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Start the task which would block
xTaskNotifyGive(blkd_task);
// Verify the state of the blocked task is eBlocked
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blkd_task));
// Suspend the scheduler on this core
vTaskSuspendAll();
// Busy spin for a time which ensures that the blocked task's delay expires
esp_rom_delay_us(TEST_BLOCKED_TASK_DELAY_MS * 1000 * 2);
// Verify that the blocked task has not been scheduled
TEST_ASSERT_EQUAL(false, has_run);
// Resume the scheduler
xTaskResumeAll();
// Let the blocked task to be scheduled
vTaskDelay(10);
// Verify that state of the blocked task is not eBlocked
TEST_ASSERT_NOT_EQUAL(eBlocked, eTaskGetState(blkd_task));
// Verify that the blocked task has run after scheduler is resumed
TEST_ASSERT_EQUAL(true, has_run);
// Signal test completion
xSemaphoreGive(done_sem1);
// Cleanup
vTaskDelete(NULL);
}
TEST_CASE("Test vTaskSuspendAll pinned task scheduling", "[freertos]")
{
for (int x = 0; x < portNUM_PROCESSORS; x++) {
TaskHandle_t susp_task;
TaskHandle_t blkd_task;
done_sem1 = xSemaphoreCreateBinary();
TEST_ASSERT_NOT_EQUAL(NULL, done_sem1);
// Create pinned task on core x which will block
// Ensure that this has a higher priority than the suspension task so that it immediately runs when the scheduler resumes
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_blocked_task, "blkd", 4096, NULL, UNITY_FREERTOS_PRIORITY + 1, &blkd_task, x));
// Create pinned task on core x which will suspend its scheduler
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_suspend_task, "susp", 4096, (void *)blkd_task, UNITY_FREERTOS_PRIORITY, &susp_task, x));
// Start the scheduler suspension task
xTaskNotifyGive(susp_task);
// Wait for test completion
xSemaphoreTake(done_sem1, portMAX_DELAY);
// Cleanup
vSemaphoreDelete(done_sem1);
// Add a short delay to allow the idle task to free any remaining task memory
vTaskDelay(10);
}
}
#endif // !CONFIG_FREERTOS_SMP

View File

@ -3,5 +3,5 @@
# In order for the cases defined by `TEST_CASE` in "misc" to be linked into
# the final elf, the component can be registered as WHOLE_ARCHIVE
idf_component_register(SRC_DIRS "."
PRIV_REQUIRES unity test_utils
PRIV_REQUIRES unity test_utils esp_timer
WHOLE_ARCHIVE)

View File

@ -15,6 +15,9 @@
#include "esp_memory_utils.h"
#include "unity.h"
#include "test_utils.h"
#include "esp_freertos_hooks.h"
#include <string.h>
#include <inttypes.h>
/*
Test ...Create...WithCaps() functions
@ -163,3 +166,88 @@ TEST_CASE("IDF additions: Event group creation with memory caps", "[freertos]")
// Free the event group
vEventGroupDelete(evt_group_handle);
}
#if !CONFIG_FREERTOS_SMP
/*
Scheduler suspension behavior has changed in SMP FreeRTOS, thus these test are disabled for SMP FreeRTOS.
See IDF-5201
*/
/* ---------------------------------------------------------------------------------------------------------------------
IDF additions: IDF tick hooks during scheduler suspension
Purpose:
- Test that the IDF tick hooks are called even with scheduler suspension
Procedure:
Each core gets tested in the role of core X
- Create suspend_task pinned to core X which will register a tick hook on core X and suspend scheduler on core X
- Register tick hook on core X
- suspend_task suspends scheduling on core X for Y milliseconds and then resumes scheduling
- Delay suspend_task for Y milliseconds more after scheduler resumption
- De-register the tick hook
- Verify the tick hook callback count
Expected:
- The tick hook is called for Y * 2 times
--------------------------------------------------------------------------------------------------------------------- */
#define TEST_DELAY_MS 200
static uint32_t tick_hook_count[portNUM_PROCESSORS];
static void IRAM_ATTR tick_hook(void)
{
tick_hook_count[portGET_CORE_ID()] += portTICK_PERIOD_MS;
}
static void suspend_task(void *arg)
{
TaskHandle_t main_task_hdl = ( TaskHandle_t )arg;
/* Fetch the current core ID */
BaseType_t xCoreID = portGET_CORE_ID();
/* Register tick hook */
memset(tick_hook_count, 0, sizeof(tick_hook_count));
esp_register_freertos_tick_hook_for_cpu(tick_hook, xCoreID);
/* Suspend scheduler */
vTaskSuspendAll();
/* Suspend for TEST_DELAY_MS milliseconds */
esp_rom_delay_us(TEST_DELAY_MS * 1000);
/* Resume scheduler */
xTaskResumeAll();
/* Delay for a further TEST_DELAY_MS milliseconds after scheduler resumption */
vTaskDelay(pdMS_TO_TICKS(TEST_DELAY_MS));
/* De-register tick hook */
esp_deregister_freertos_tick_hook_for_cpu(tick_hook, xCoreID);
/* Verify that the tick hook callback count equals the scheduler suspension time + the delay time.
* We add a variation of 2 ticks to account for delays encountered during test setup and teardown.
*/
printf("Core%d tick_hook_count = %"PRIu32"\n", xCoreID, tick_hook_count[xCoreID]);
TEST_ASSERT_INT_WITHIN(portTICK_PERIOD_MS * 2, TEST_DELAY_MS * 2, tick_hook_count[xCoreID]);
/* Signal main task of test completion */
xTaskNotifyGive(main_task_hdl);
/* Cleanup */
vTaskDelete(NULL);
}
TEST_CASE("IDF additions: IDF tick hooks during scheduler suspension", "[freertos]")
{
/* Run test for each core */
for (int x = 0; x < portNUM_PROCESSORS; x++) {
xTaskCreatePinnedToCore(&suspend_task, "suspend_task", 8192, (void *)xTaskGetCurrentTaskHandle(), UNITY_FREERTOS_PRIORITY, NULL, x);
/* Wait for test completion */
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
}
}
#endif // !CONFIG_FREERTOS_SMP