Merge branch 'bugfix/freertos_xTaskIncrementTick' into 'master'

freertos: Fix xTaskIncrementTick() and xTaskResumeAll()

Closes IDF-4698 and IDF-4705

See merge request espressif/esp-idf!17204
This commit is contained in:
Darian 2022-07-21 08:14:30 +08:00
commit 0570c5db1a
6 changed files with 800 additions and 399 deletions

View File

@ -3314,9 +3314,25 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
* or
* + Time slicing is in use and there is a task of equal priority to the
* currently running task.
*
* Note: For SMP, this function must only be called by core 0. Other cores should
* call xTaskIncrementTickOtherCores() instead.
*/
BaseType_t xTaskIncrementTick( void ) PRIVILEGED_FUNCTION;
#ifdef ESP_PLATFORM
/*
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY
* INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS
* AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
*
* Called from all other cores except core 0 when their tick interrupt
* occurs. This function will check if the current core requires time slicing,
* and also call the application tick hook.
*/
BaseType_t xTaskIncrementTickOtherCores( void ) PRIVILEGED_FUNCTION;
#endif // ESP_PLATFORM
/*
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.

View File

@ -151,6 +151,8 @@ IRAM_ATTR void SysTickIsrHandler(void *arg)
#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
extern void esp_vApplicationTickHook(void);
/**
* @brief Handler of SysTick
*
@ -165,11 +167,27 @@ BaseType_t xPortSysTickHandler(void)
portbenchmarkIntLatency();
#endif //configBENCHMARK
traceISR_ENTER(SYSTICK_INTR_ID);
BaseType_t ret = xTaskIncrementTick();
if(ret != pdFALSE) {
// Call IDF Tick Hook
esp_vApplicationTickHook();
// Call FreeRTOS Increment tick function
BaseType_t xSwitchRequired;
#if CONFIG_FREERTOS_UNICORE
xSwitchRequired = xTaskIncrementTick();
#else
if (xPortGetCoreID() == 0) {
xSwitchRequired = xTaskIncrementTick();
} else {
xSwitchRequired = xTaskIncrementTickOtherCores();
}
#endif
// Check if yield is required
if (xSwitchRequired != pdFALSE) {
portYIELD_FROM_ISR();
} else {
traceISR_EXIT();
}
return ret;
return xSwitchRequired;
}

View File

@ -58,7 +58,6 @@
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#undef _REENT_INIT_PTR
#define _REENT_INIT_PTR esp_reent_init
extern void esp_vApplicationTickHook(void);
extern void esp_vApplicationIdleHook(void);
#endif //ESP_PLATFORM
@ -2424,10 +2423,28 @@ void vTaskSuspendAll( void )
* BaseType_t. Please read Richard Barry's reply in the following link to a
* post in the FreeRTOS support forum before reporting this as a bug! -
* https://goo.gl/wu4acr */
#ifdef ESP_PLATFORM
/* For SMP, although each core has their own uxSchedulerSuspended, we still
* need to disable interrupts or enter a critical section when accessing. */
unsigned state;
state = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
/* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that
* do not otherwise exhibit real time behaviour. */
portSOFTWARE_BARRIER();
/* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
* is used to allow calls to vTaskSuspendAll() to nest. */
++uxSchedulerSuspended[ xPortGetCoreID() ];
portCLEAR_INTERRUPT_MASK_FROM_ISR(state);
/* Enforces ordering for ports and optimised compilers that may otherwise place
* the above increment elsewhere. */
portMEMORY_BARRIER();
#ifdef ESP_PLATFORM
portCLEAR_INTERRUPT_MASK_FROM_ISR( state );
#endif
}
/*----------------------------------------------------------*/
@ -2495,13 +2512,12 @@ void vTaskSuspendAll( void )
BaseType_t xTaskResumeAll( void )
{
TCB_t *pxTCB = NULL;
TCB_t * pxTCB = NULL;
BaseType_t xAlreadyYielded = pdFALSE;
TickType_t xTicksToNextUnblockTime;
/* If scheduler state is `taskSCHEDULER_RUNNING` then this function does not match a
* previous call to taskENTER_CRITICAL(). */
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_RUNNING );
/* If uxSchedulerSuspended is zero then this function does not match a
* previous call to vTaskSuspendAll(). */
configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] );
/* It is possible that an ISR caused a task to be removed from an event
* list while the scheduler was suspended. If this was the case then the
@ -2510,26 +2526,32 @@ BaseType_t xTaskResumeAll( void )
* tasks from this list into their appropriate ready list. */
taskENTER_CRITICAL();
{
--uxSchedulerSuspended[xPortGetCoreID()];
#ifdef ESP_PLATFORM
/* Minor optimization. Core ID can't change while inside a critical section */
BaseType_t xCoreID = xPortGetCoreID();
#else
BaseType_t xCoreID = 0;
#endif
--uxSchedulerSuspended[ xCoreID ];
if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
{
if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
{
/* Move any readied tasks from the pending list into the
* appropriate ready list. */
while( listLIST_IS_EMPTY( &xPendingReadyList[xPortGetCoreID()] ) == pdFALSE )
while( listLIST_IS_EMPTY( &xPendingReadyList[ xCoreID ] ) == pdFALSE )
{
pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[xPortGetCoreID()] ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xCoreID ] ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
( void ) uxListRemove( &( pxTCB->xEventListItem ) );
( void ) uxListRemove( &( pxTCB->xStateListItem ) );
prvAddTaskToReadyList( pxTCB );
/* If the moved task has a priority higher than the current
* task then a yield must be performed. */
if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
if( pxTCB->uxPriority >= pxCurrentTCB[ xCoreID ]->uxPriority )
{
xYieldPending[xPortGetCoreID()] = pdTRUE;
xYieldPending[ xCoreID ] = pdTRUE;
}
else
{
@ -2552,54 +2574,39 @@ BaseType_t xTaskResumeAll( void )
* they should be processed now. This ensures the tick count does
* not slip, and that any delayed tasks are resumed at the correct
* time. */
while( xPendedTicks > ( TickType_t ) 0 )
#ifdef ESP_PLATFORM
/* Core 0 is solely responsible for managing tick count, thus it
* must be the only core to unwind the pended ticks */
if ( xCoreID == 0 )
#endif
{
/* Calculate how far into the future the next task will
* leave the Blocked state because its timeout expired. If
* there are no tasks due to leave the blocked state between
* the time now and the time at which the tick count overflows
* then xNextTaskUnblockTime will the tick overflow time.
* This means xNextTaskUnblockTime can never be less than
* xTickCount, and the following can therefore not
* underflow. */
configASSERT( xNextTaskUnblockTime >= xTickCount );
xTicksToNextUnblockTime = xNextTaskUnblockTime - xTickCount;
TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
/* Don't want to move the tick count more than the number
of ticks that are pending, so cap if necessary. */
if( xTicksToNextUnblockTime > xPendedTicks )
if( xPendedCounts > ( TickType_t ) 0U )
{
xTicksToNextUnblockTime = xPendedTicks;
}
do
{
if( xTaskIncrementTick() != pdFALSE )
{
xYieldPending[ xCoreID ] = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
if( xTicksToNextUnblockTime == 0 )
{
/* xTicksToNextUnblockTime could be zero if the tick
* count is about to overflow and xTicksToNetUnblockTime
* holds the time at which the tick count will overflow
* (rather than the time at which the next task will
* unblock). Set to 1 otherwise xPendedTicks won't be
* decremented below. */
xTicksToNextUnblockTime = ( TickType_t ) 1;
}
else if( xTicksToNextUnblockTime > ( TickType_t ) 1)
{
/* Move the tick count one short of the next unblock
* time, then call xTaskIncrementTick() to move the tick
* count up to the next unblock time to unblock the task,
* if any. This will also swap the blocked task and
* overflow blocked task lists if necessary. */
xTickCount += ( xTicksToNextUnblockTime - ( TickType_t ) 1 );
}
xYieldPending[xPortGetCoreID()] |= xTaskIncrementTick();
--xPendedCounts;
} while( xPendedCounts > ( TickType_t ) 0U );
/* Adjust for the number of ticks just added to
xTickCount and go around the loop again if
xTicksToCatchUp is still greater than 0. */
xPendedTicks -= xTicksToNextUnblockTime;
xPendedTicks = 0;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
if( xYieldPending[xPortGetCoreID()] != pdFALSE )
if( xYieldPending[ xCoreID ] != pdFALSE )
{
#if ( configUSE_PREEMPTION != 0 )
{
@ -2937,14 +2944,19 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
void vTaskStepTick( const TickType_t xTicksToJump )
{
#ifdef ESP_PLATFORM
/* For SMP, we require a critical section to access xTickCount */
taskENTER_CRITICAL();
#endif
/* Correct the tick count value after a period during which the tick
* was suppressed. Note this does *not* call the tick hook function for
* each stepped tick. */
taskENTER_CRITICAL();
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
xTickCount += xTicksToJump;
traceINCREASE_TICK_COUNT( xTicksToJump );
#ifdef ESP_PLATFORM
taskEXIT_CRITICAL();
#endif
}
#endif /* configUSE_TICKLESS_IDLE */
@ -2952,32 +2964,31 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
{
#ifdef ESP_PLATFORM
BaseType_t xYieldRequired = pdFALSE;
#else
BaseType_t xYieldOccurred;
#endif // ESP_PLATFORM
/* Must not be called with the scheduler suspended as the implementation
* relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
#ifdef ESP_PLATFORM
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
/* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occuring when
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
#else
vTaskSuspendAll();
configASSERT( uxSchedulerSuspended == 0 );
#endif // ESP_PLATFORM
/* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
vTaskSuspendAll();
#ifdef ESP_PLATFORM
/* For SMP, we still require a critical section to access xPendedTicks even
* if the scheduler is disabled. */
taskENTER_CRITICAL();
xPendedTicks += xTicksToCatchUp;
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
return xYieldRequired;
#else
#else // ESP_PLATFORM
xPendedTicks += xTicksToCatchUp;
#endif // ESP_PLATFORM
xYieldOccurred = xTaskResumeAll();
return xYieldOccurred;
#endif // ESP_PLATFORM
}
/*----------------------------------------------------------*/
@ -3075,33 +3086,34 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
BaseType_t xTaskIncrementTick( void )
{
#ifdef ESP_PLATFORM
#if ( configNUM_CORES > 1 )
{
/* Only Core 0 should ever call this function. */
configASSERT( xPortGetCoreID() == 0 );
}
#endif /* ( configNUM_CORES > 1 ) */
#endif // ESP_PLATFORM
TCB_t * pxTCB;
TickType_t xItemValue;
BaseType_t xSwitchRequired = pdFALSE;
/* Only allow core 0 increase the tick count in the case of xPortSysTickHandler processing. */
/* And allow core 0 and core 1 to unwind uxPendedTicks during xTaskResumeAll. */
if (xPortInIsrContext())
{
#if ( configUSE_TICK_HOOK == 1 )
vApplicationTickHook();
#endif /* configUSE_TICK_HOOK */
esp_vApplicationTickHook();
if (xPortGetCoreID() != 0 )
{
return pdTRUE;
}
}
/* Called by the portable layer each time a tick interrupt occurs.
* Increments the tick then checks to see if the new tick value will cause any
* tasks to be unblocked. */
traceTASK_INCREMENT_TICK( xTickCount );
if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
#ifdef ESP_PLATFORM
/* We need a critical section here as we are about to access kernel data
* structures:
* - Other cores could be accessing them simultaneously
* - Unlike other ports, we call xTaskIncrementTick() without disabling nested
* interrupts, which in turn is disabled by the critical section. */
taskENTER_CRITICAL_ISR();
#endif // ESP_PLATFORM
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
{
taskENTER_CRITICAL_ISR();
/* Minor optimisation. The tick count cannot change in this
* block. */
const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
@ -3154,7 +3166,7 @@ BaseType_t xTaskIncrementTick( void )
* state - so record the item value in
* xNextTaskUnblockTime. */
xNextTaskUnblockTime = xItemValue;
break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */
break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
}
else
{
@ -3187,7 +3199,14 @@ BaseType_t xTaskIncrementTick( void )
* only be performed if the unblocked task has a
* priority that is equal to or higher than the
* currently executing task. */
if( pxTCB->uxPriority >= pxCurrentTCB[xPortGetCoreID()]->uxPriority )
#if defined(ESP_PLATFORM) && ( configNUM_CORES > 1 )
/* Since this function is only run on core 0, we
* only need to switch contexts if the unblocked task
* can run on core 0. */
if( ( pxTCB->xCoreID == 0 || pxTCB->xCoreID == tskNO_AFFINITY ) && (pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority) )
#else
if( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority )
#endif
{
xSwitchRequired = pdTRUE;
}
@ -3206,7 +3225,7 @@ BaseType_t xTaskIncrementTick( void )
* writer has not explicitly turned time slicing off. */
#if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
{
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[xPortGetCoreID()]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ 0 ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
{
xSwitchRequired = pdTRUE;
}
@ -3216,28 +3235,152 @@ BaseType_t xTaskIncrementTick( void )
}
}
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
#ifdef ESP_PLATFORM
#if ( configUSE_TICK_HOOK == 1 )
TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
#endif /* configUSE_TICK_HOOK */
/* Exit the critical section as we have finished accessing the kernel data structures. */
taskEXIT_CRITICAL_ISR();
#endif // ESP_PLATFORM
#if ( configUSE_TICK_HOOK == 1 )
{
/* Guard against the tick hook being called when the pended tick
* count is being unwound (when the scheduler is being unlocked). */
#ifdef ESP_PLATFORM
if( xPendedCounts == ( TickType_t ) 0 )
#else
if( xPendedTicks == ( TickType_t ) 0 )
#endif
{
vApplicationTickHook();
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_TICK_HOOK */
#if ( configUSE_PREEMPTION == 1 )
{
if( xYieldPending[ 0 ] != pdFALSE )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
}
else
{
++xPendedTicks;
}
#ifdef ESP_PLATFORM
/* Exit the critical section as we have finished accessing the kernel data structures. */
taskEXIT_CRITICAL_ISR();
#endif // ESP_PLATFORM
#if ( configUSE_PREEMPTION == 1 )
{
if( xYieldPending[xPortGetCoreID()] != pdFALSE )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
/* The tick hook gets called at regular intervals, even if the
* scheduler is locked. */
#if ( configUSE_TICK_HOOK == 1 )
{
vApplicationTickHook();
}
#endif
}
#endif /* configUSE_PREEMPTION */
return xSwitchRequired;
}
#ifdef ESP_PLATFORM
#if ( configNUM_CORES > 1 )
BaseType_t xTaskIncrementTickOtherCores( void )
{
/* Minor optimization. This function can never switch cores mid
* execution */
BaseType_t xCoreID = xPortGetCoreID();
BaseType_t xSwitchRequired = pdFALSE;
/* This function should never be called by Core 0. */
configASSERT( xCoreID != 0 );
/* Called by the portable layer each time a tick interrupt occurs.
* Increments the tick then checks to see if the new tick value will cause any
* tasks to be unblocked. */
traceTASK_INCREMENT_TICK( xTickCount );
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
{
/* We need a critical section here as we are about to access kernel data
* structures:
* - Other cores could be accessing them simultaneously
* - Unlike other ports, we call xTaskIncrementTick() without disabling
* nested interrupts, which in turn is disabled by the critical
* section. */
taskENTER_CRITICAL_ISR();
/* A task being unblocked cannot cause an immediate context switch
* if preemption is turned off. */
#if ( configUSE_PREEMPTION == 1 )
{
/* Check if core 0 calling xTaskIncrementTick() has
* unblocked a task that can be run. */
if( uxTopReadyPriority > pxCurrentTCB[xCoreID]->uxPriority )
{
xSwitchRequired = pdTRUE;
} else {
mtCOVERAGE_TEST_MARKER();
}
}
#endif
/* Tasks of equal priority to the currently running task will share
* processing time (time slice) if preemption is on, and the application
* writer has not explicitly turned time slicing off. */
#if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
{
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xCoreID ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
/* Exit the critical section as we have finished accessing the kernel data structures. */
taskEXIT_CRITICAL_ISR();
#if ( configUSE_PREEMPTION == 1 )
{
if( xYieldPending[ xCoreID ] != pdFALSE )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* configUSE_PREEMPTION */
}
#if ( configUSE_TICK_HOOK == 1 )
{
vApplicationTickHook();
}
#endif
return xSwitchRequired;
}
#endif /* ( configNUM_CORES > 1 ) */
#endif // ESP_PLATFORM
/*-----------------------------------------------------------*/
#if ( configUSE_APPLICATION_TASK_TAG == 1 )

View File

@ -1,288 +0,0 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/* Tests for FreeRTOS scheduler suspend & resume all tasks */
#include "sdkconfig.h"
#include <stdio.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/queue.h"
#include "unity.h"
#include "test_utils.h"
#include "esp_intr_alloc.h"
#include "driver/gptimer.h"
#include "esp_private/gptimer.h"
#include "sdkconfig.h"
#include "esp_rom_sys.h"
static SemaphoreHandle_t isr_semaphore;
static volatile unsigned isr_count;
/* Timer ISR increments an ISR counter, and signals a
mutex semaphore to wake up another counter task */
static bool on_timer_alarm_cb(gptimer_handle_t timer, const gptimer_alarm_event_data_t *edata, void *user_ctx)
{
portBASE_TYPE higher_awoken = pdFALSE;
isr_count++;
xSemaphoreGiveFromISR(isr_semaphore, &higher_awoken);
return higher_awoken == pdTRUE;
}
typedef struct {
SemaphoreHandle_t trigger_sem;
volatile unsigned counter;
} counter_config_t;
static void counter_task_fn(void *vp_config)
{
counter_config_t *config = (counter_config_t *)vp_config;
printf("counter_task running...\n");
while (1) {
xSemaphoreTake(config->trigger_sem, portMAX_DELAY);
config->counter++;
}
}
/* This test verifies that an interrupt can wake up a task while the scheduler is disabled.
In the FreeRTOS implementation, this exercises the xPendingReadyList for that core.
*/
TEST_CASE("Scheduler disabled can handle a pending context switch on resume", "[freertos]")
{
isr_count = 0;
TaskHandle_t counter_task;
gptimer_handle_t gptimer = NULL;
intr_handle_t intr_handle = NULL;
isr_semaphore = xSemaphoreCreateBinary();
TEST_ASSERT_NOT_NULL(isr_semaphore);
counter_config_t count_config = {
.trigger_sem = isr_semaphore,
.counter = 0,
};
xTaskCreatePinnedToCore(counter_task_fn, "counter", 2048,
&count_config, UNITY_FREERTOS_PRIORITY + 1,
&counter_task, UNITY_FREERTOS_CPU);
gptimer_config_t timer_config = {
.clk_src = GPTIMER_CLK_SRC_DEFAULT,
.direction = GPTIMER_COUNT_UP,
.resolution_hz = 1000000, // 1MHz, 1 tick=1us
};
TEST_ESP_OK(gptimer_new_timer(&timer_config, &gptimer));
gptimer_alarm_config_t alarm_config = {
.reload_count = 0,
.alarm_count = 1000, // alarm period 1ms
.flags.auto_reload_on_alarm = true,
};
gptimer_event_callbacks_t cbs = {
.on_alarm = on_timer_alarm_cb,
};
TEST_ESP_OK(gptimer_register_event_callbacks(gptimer, &cbs, NULL));
TEST_ESP_OK(gptimer_enable(gptimer));
TEST_ESP_OK(gptimer_set_alarm_action(gptimer, &alarm_config));
TEST_ESP_OK(gptimer_start(gptimer));
TEST_ESP_OK(gptimer_get_intr_handle(gptimer, &intr_handle));
vTaskDelay(pdMS_TO_TICKS(20));
// Check some counts have been triggered via the ISR
TEST_ASSERT(count_config.counter > 10);
TEST_ASSERT(isr_count > 10);
for (int i = 0; i < 20; i++) {
#ifdef CONFIG_FREERTOS_SMP
//Note: Scheduler suspension behavior changed in FreeRTOS SMP
vTaskPreemptionDisable(NULL);
#else
vTaskSuspendAll();
#endif // CONFIG_FREERTOS_SMP
esp_intr_noniram_disable();
unsigned no_sched_task = count_config.counter;
// scheduler off on this CPU...
esp_rom_delay_us(20 * 1000);
TEST_ASSERT_EQUAL(count_config.counter, no_sched_task);
// disable timer interrupts
esp_intr_disable(intr_handle);
// When we resume scheduler, we expect the counter task
// will preempt and count at least one more item
esp_intr_noniram_enable();
esp_intr_enable(intr_handle);
#ifdef CONFIG_FREERTOS_SMP
//Note: Scheduler suspension behavior changed in FreeRTOS SMP
vTaskPreemptionEnable(NULL);
#else
xTaskResumeAll();
#endif // CONFIG_FREERTOS_SMP
TEST_ASSERT_NOT_EQUAL(count_config.counter, no_sched_task);
}
TEST_ESP_OK(gptimer_stop(gptimer));
TEST_ESP_OK(gptimer_disable(gptimer));
TEST_ESP_OK(gptimer_del_timer(gptimer));
vTaskDelete(counter_task);
vSemaphoreDelete(isr_semaphore);
}
/* Multiple tasks on different cores can be added to the pending ready list
while scheduler is suspended, and should be started once the scheduler
resumes.
*/
TEST_CASE("Scheduler disabled can wake multiple tasks on resume", "[freertos]")
{
#define TASKS_PER_PROC 4
TaskHandle_t tasks[portNUM_PROCESSORS][TASKS_PER_PROC] = { 0 };
counter_config_t counters[portNUM_PROCESSORS][TASKS_PER_PROC] = { 0 };
/* Start all the tasks, they will block on isr_semaphore */
for (int p = 0; p < portNUM_PROCESSORS; p++) {
for (int t = 0; t < TASKS_PER_PROC; t++) {
counters[p][t].trigger_sem = xSemaphoreCreateMutex();
TEST_ASSERT_NOT_NULL( counters[p][t].trigger_sem );
TEST_ASSERT( xSemaphoreTake(counters[p][t].trigger_sem, 0) );
xTaskCreatePinnedToCore(counter_task_fn, "counter", 2048,
&counters[p][t], UNITY_FREERTOS_PRIORITY + 1,
&tasks[p][t], p);
TEST_ASSERT_NOT_NULL( tasks[p][t] );
}
}
/* takes a while to initialize tasks on both cores, sometimes... */
vTaskDelay(TASKS_PER_PROC * portNUM_PROCESSORS * 3);
/* Check nothing is counting, each counter should be blocked on its trigger_sem */
for (int p = 0; p < portNUM_PROCESSORS; p++) {
for (int t = 0; t < TASKS_PER_PROC; t++) {
TEST_ASSERT_EQUAL(0, counters[p][t].counter);
}
}
/* Suspend scheduler on this CPU */
#ifdef CONFIG_FREERTOS_SMP
//Note: Scheduler suspension behavior changed in FreeRTOS SMP
vTaskPreemptionDisable(NULL);
#else
vTaskSuspendAll();
#endif // CONFIG_FREERTOS_SMP
/* Give all the semaphores once. This will wake tasks immediately on the other
CPU, but they are deferred here until the scheduler resumes.
*/
for (int p = 0; p < portNUM_PROCESSORS; p++) {
for (int t = 0; t < TASKS_PER_PROC; t++) {
xSemaphoreGive(counters[p][t].trigger_sem);
}
}
esp_rom_delay_us(200); /* Let the other CPU do some things */
for (int p = 0; p < portNUM_PROCESSORS; p++) {
for (int t = 0; t < TASKS_PER_PROC; t++) {
int expected = (p == UNITY_FREERTOS_CPU) ? 0 : 1; // Has run if it was on the other CPU
esp_rom_printf("Checking CPU %d task %d (expected %d actual %d)\n", p, t, expected, counters[p][t].counter);
TEST_ASSERT_EQUAL(expected, counters[p][t].counter);
}
}
/* Resume scheduler */
#ifdef CONFIG_FREERTOS_SMP
//Note: Scheduler suspension behavior changed in FreeRTOS SMP
vTaskPreemptionEnable(NULL);
#else
xTaskResumeAll();
#endif // CONFIG_FREERTOS_SMP
/* Now the tasks on both CPUs should have been woken once and counted once. */
for (int p = 0; p < portNUM_PROCESSORS; p++) {
for (int t = 0; t < TASKS_PER_PROC; t++) {
esp_rom_printf("Checking CPU %d task %d (expected 1 actual %d)\n", p, t, counters[p][t].counter);
TEST_ASSERT_EQUAL(1, counters[p][t].counter);
}
}
/* Clean up */
for (int p = 0; p < portNUM_PROCESSORS; p++) {
for (int t = 0; t < TASKS_PER_PROC; t++) {
vTaskDelete(tasks[p][t]);
vSemaphoreDelete(counters[p][t].trigger_sem);
}
}
}
#ifndef CONFIG_FREERTOS_UNICORE
static volatile bool sched_suspended;
static void suspend_scheduler_5ms_task_fn(void *ignore)
{
#ifdef CONFIG_FREERTOS_SMP
//Note: Scheduler suspension behavior changed in FreeRTOS SMP
vTaskPreemptionDisable(NULL);
#else
vTaskSuspendAll();
#endif // CONFIG_FREERTOS_SMP
sched_suspended = true;
for (int i = 0; i < 5; i++) {
esp_rom_delay_us(1000);
}
#ifdef CONFIG_FREERTOS_SMP
//Note: Scheduler suspension behavior changed in FreeRTOS SMP
vTaskPreemptionEnable(NULL);
#else
xTaskResumeAll();
#endif // CONFIG_FREERTOS_SMP
sched_suspended = false;
vTaskDelete(NULL);
}
/* If the scheduler is disabled on one CPU (A) with a task blocked on something, and a task
on B (where scheduler is running) wakes it, then the task on A should be woken on resume.
*/
TEST_CASE("Scheduler disabled on CPU B, tasks on A can wake", "[freertos]")
{
TaskHandle_t counter_task;
SemaphoreHandle_t wake_sem = xSemaphoreCreateMutex();
xSemaphoreTake(wake_sem, 0);
counter_config_t count_config = {
.trigger_sem = wake_sem,
.counter = 0,
};
xTaskCreatePinnedToCore(counter_task_fn, "counter", 2048,
&count_config, UNITY_FREERTOS_PRIORITY + 1,
&counter_task, !UNITY_FREERTOS_CPU);
xTaskCreatePinnedToCore(suspend_scheduler_5ms_task_fn, "suspender", 2048,
NULL, UNITY_FREERTOS_PRIORITY - 1,
NULL, !UNITY_FREERTOS_CPU);
/* counter task is now blocked on other CPU, waiting for wake_sem, and we expect
that this CPU's scheduler will be suspended for 5ms shortly... */
while (!sched_suspended) { }
xSemaphoreGive(wake_sem);
esp_rom_delay_us(1000);
// Bit of a race here if the other CPU resumes its scheduler, but 5ms is a long time... */
TEST_ASSERT(sched_suspended);
TEST_ASSERT_EQUAL(0, count_config.counter); // the other task hasn't woken yet, because scheduler is off
TEST_ASSERT(sched_suspended);
/* wait for the rest of the 5ms... */
while (sched_suspended) { }
esp_rom_delay_us(100);
TEST_ASSERT_EQUAL(1, count_config.counter); // when scheduler resumes, counter task should immediately count
vTaskDelete(counter_task);
}
#endif

View File

@ -0,0 +1,508 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include <stdbool.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "driver/gptimer.h"
#include "esp_rom_sys.h"
#include "unity.h"
#include "test_utils.h"
/*
Scheduler suspension behavior differs significantly in SMP FreeRTOS, thus none of these tests apply to SMP FreeRTOS
*/
#if !CONFIG_FREERTOS_SMP
/*
GP timer is used to trigger an interrupt. Test cases will register an interrupt callback called from the timer's
interrupt callback. The functions below simply the interrupt registration/trigger/deregistration process.
*/
static gptimer_handle_t gptimer = NULL;
static bool (*registered_intr_callback)(void *) = NULL;
static bool on_timer_alarm_cb(gptimer_handle_t timer, const gptimer_alarm_event_data_t *edata, void *user_ctx)
{
bool yield;
if (registered_intr_callback) {
yield = registered_intr_callback(user_ctx);
} else {
yield = false;
}
return yield;
}
static void register_intr_cb(bool (*callback)(void *), void *callback_arg)
{
gptimer_handle_t gptimer_temp;
// Initialize a GP timer used to trigger an interrupt
gptimer_config_t timer_config = {
.clk_src = GPTIMER_CLK_SRC_DEFAULT,
.direction = GPTIMER_COUNT_UP,
.resolution_hz = 1000000, // 1MHz, 1 tick=1us
};
TEST_ESP_OK(gptimer_new_timer(&timer_config, &gptimer_temp));
// Configure an alarm (of 1ms) and callback for the timer
gptimer_alarm_config_t alarm_config = {
.reload_count = 0,
.alarm_count = 1000, // alarm period 1ms
.flags.auto_reload_on_alarm = true,
};
gptimer_event_callbacks_t cbs = {
.on_alarm = on_timer_alarm_cb,
};
TEST_ESP_OK(gptimer_register_event_callbacks(gptimer_temp, &cbs, callback_arg));
TEST_ESP_OK(gptimer_enable(gptimer_temp));
TEST_ESP_OK(gptimer_set_alarm_action(gptimer_temp, &alarm_config));
gptimer = gptimer_temp;
registered_intr_callback = callback;
}
static void trigger_intr_cb(void)
{
// Interrupt should be triggered in 1ms
TEST_ESP_OK(gptimer_start(gptimer));
}
static void deregister_intr_cb(void)
{
gptimer_handle_t gptimer_temp = gptimer;
gptimer = NULL;
registered_intr_callback = NULL;
TEST_ESP_OK(gptimer_stop(gptimer_temp));
TEST_ESP_OK(gptimer_disable(gptimer_temp));
TEST_ESP_OK(gptimer_del_timer(gptimer_temp));
}
/* ---------------------------------------------------------------------------------------------------------------------
Test vTaskSuspendAll() and xTaskResumeAll() basic
Purpose:
- Test that vTaskSuspendAll() will suspends the scheduler for the calling core
- Test that xTaskResumeAll() will resumes scheduling for the calling core
Procedure:
- Call vTaskSuspendAll() to suspend the scheduler
- Call xTaskResumeAll() to resume the scheduler
Expected:
- xTaskGetSchedulerState() should return the correct state
--------------------------------------------------------------------------------------------------------------------- */
TEST_CASE("Test vTaskSuspendAll and xTaskResumeAll basic", "[freertos]")
{
// Check scheduler is running on the current core
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
vTaskSuspendAll();
TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState());
xTaskResumeAll();
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
}
/* ---------------------------------------------------------------------------------------------------------------------
Test vTaskSuspendAll() and xTaskResumeAll() multicore
Only runs on !CONFIG_FREERTOS_UNICORE
Purpose:
- Test that vTaskSuspendAll() will only suspends scheduling only for the calling core
- Test that xTaskResumeAll() will only resume scheduling for the calling core
Procedure:
Each core gets tested in the role of core A
- Create a taskA pinned to one core (e.g., core A) that will disable the scheduler
- Created a "taskB" to another core (e.g., core B) that will not disable the scheduler
- taskA calls vTaskSuspendAll() to suspend the scheduler on core A
- taskA calls xTaskResumeAll() to resume the scheduler on core A
Expected:
- vTaskSuspendAll() should only disable the scheduler for the suspended core A
- xTaskResumeAll() should resume the scheduler for the suspended core A
- Scheduler on core B should remain enabled
--------------------------------------------------------------------------------------------------------------------- */
#if !CONFIG_FREERTOS_UNICORE
#define TEST_BASIC_BUSY_DELAY_US 10000
static volatile int taskA_sync;
static volatile int taskB_sync;
static void test_multicore_taskA(void *arg)
{
// Wait to be started
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Check scheduler on core A is enabled
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
taskA_sync++;
while (taskB_sync != 1) {
; // Wait for task B to complete its check
}
// Suspend the scheduler on core A
vTaskSuspendAll();
// Check scheduler is suspended on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState());
taskA_sync++;
while (taskB_sync != 2) {
; // Wait for task B to complete its check
}
// Busy spin for a while to simulate work done while scheduler is suspended
esp_rom_delay_us(TEST_BASIC_BUSY_DELAY_US);
// Check scheduler is still suspended on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState());
taskA_sync++;
while (taskB_sync != 3) {
; // Wait for task B to complete its check
}
// Resume the scheduler on core A
xTaskResumeAll();
// Check that scheduler has resumed resumed on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
taskA_sync++;
while (taskB_sync != 4) {
; // Wait for task B to complete its check
}
// Indicate done and wait to be deleted
SemaphoreHandle_t done_sem = (SemaphoreHandle_t)arg;
xSemaphoreGive(done_sem);
vTaskSuspend(NULL);
}
static void test_multicore_taskB(void *arg)
{
// Wait to be started
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
for (int i = 1; i <= 4; i++) {
// Wait for suspended trigger from task A
while (taskA_sync != i) {
;
}
// Check that scheduler is still running on core B
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
taskB_sync++;
}
// Indicate done and wait to be deleted
SemaphoreHandle_t done_sem = (SemaphoreHandle_t)arg;
xSemaphoreGive(done_sem);
vTaskSuspend(NULL);
}
TEST_CASE("Test vTaskSuspendAll() and xTaskResumeAll() multicore", "[freertos]")
{
SemaphoreHandle_t done_sem = xSemaphoreCreateCounting(portNUM_PROCESSORS, 0);
TEST_ASSERT_NOT_EQUAL(NULL, done_sem);
for (int i = 0; i < portNUM_PROCESSORS; i++) {
// Create tasks on core A and core B
TaskHandle_t taskA_hdl;
TaskHandle_t taskB_hdl;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_multicore_taskA, "taskA", 2048, (void *)done_sem, UNITY_FREERTOS_PRIORITY - 1, &taskA_hdl, i));
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_multicore_taskB, "taskB", 2048, (void *)done_sem, UNITY_FREERTOS_PRIORITY - 1, &taskB_hdl, !i));
// Start the tasks and wait for their completion
taskA_sync = 0;
taskB_sync = 0;
xTaskNotifyGive(taskA_hdl);
xTaskNotifyGive(taskB_hdl);
for (int j = 0; j < 2; j++) {
xSemaphoreTake(done_sem, portMAX_DELAY);
}
// Cleanup the tasks
vTaskDelete(taskA_hdl);
vTaskDelete(taskB_hdl);
}
vSemaphoreDelete(done_sem);
}
#endif // !CONFIG_FREERTOS_UNICORE
/* ---------------------------------------------------------------------------------------------------------------------
Test vTaskSuspendAll allows scheduling on other cores
Only runs on !CONFIG_FREERTOS_UNICORE
Purpose:
- Test that disabling a scheduler on one core (e.g., core B) does not disable scheduling on the other core (e.g., core A)
- While the scheduler on core B is disabled, test that...
- A task on Core A can be unblocked by another task also on core A
- A task on Core A can be unblocked by an interrupt on core A
Procedure:
Each core gets tested in the role of core A
- Create task B1 pinned to core B that will suspend scheduling on core B
- Create task A2 pinned to core A that will test unblocking on core A
- Create task A1 pinned to core A that will unblock task A2
- Register an interrupt on core A that will unblock task A2
- Have A2 block
- Have B1 disable scheduling on core B. A1 checks that scheduling is still enabled on core A
- Have A1 unblock A2
- Have the core A ISR unblock A2
- Cleanup the tasks
Expected:
When B1 disables scheduling on core B...
- Scheduling on core A should still be enabled
- A2 should be unblocked by A1 and run without issue
- A2 should be unblocked by core A ISR and run without issue
--------------------------------------------------------------------------------------------------------------------- */
#if !CONFIG_FREERTOS_UNICORE
static volatile int test_unblk_sync;
static SemaphoreHandle_t test_unblk_done_sem;
static bool test_unblk_coreA_isr(void *arg)
{
TaskHandle_t a2_task_hdl = (TaskHandle_t)arg;
BaseType_t task_woken = pdFALSE;
// Unblock task b2
vTaskNotifyGiveFromISR(a2_task_hdl, &task_woken);
return (task_woken == pdTRUE);
}
static void test_unblk_a2_task(void *arg)
{
volatile int *a2_task_run_count = (volatile int *)arg;
// Wait to be unblocked by A1
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
(*a2_task_run_count)++;
// Wait to be unblocked by Core A ISR
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
(*a2_task_run_count)++;
// Wait to be deleted
vTaskSuspend(NULL);
}
static void test_unblk_a1_task(void *arg)
{
volatile int a2_task_run_count = 0;
// Create task A2 to block on this core (i.e., core A)
TaskHandle_t a2_task_hdl;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_unblk_a2_task, "A2", 8192, (void *)&a2_task_run_count, UNITY_FREERTOS_PRIORITY + 2, &a2_task_hdl, xPortGetCoreID()));
// Install an interrupt on core A
register_intr_cb(test_unblk_coreA_isr, (void *)a2_task_hdl);
// Wait to be started by the main task
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Start B1
xTaskNotifyGive((TaskHandle_t)arg);
while (test_unblk_sync != 1) {
; // Wait for confirmation from B1 that scheduler has been suspended on Core B
}
// Verify that the scheduler is still enabled on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
// Unblock A2, it should preempt immediately due to its higher priority
xTaskNotifyGive(a2_task_hdl);
// Verify that task A2 has run
TEST_ASSERT_EQUAL(1, a2_task_run_count);
// Trigger an ISR on this core A to unblock task A2. A2 should preempt immediately due to its higher priority
trigger_intr_cb();
esp_rom_delay_us(2000); // Short busy delay to ensure interrupt has triggered
// Verify that task A2 has run
TEST_ASSERT_EQUAL(2, a2_task_run_count);
// Trigger B1 to resume scheduling on core B
test_unblk_sync = 2;
while (test_unblk_sync != 3) {
; // Wait for confirmation from B1 that scheduler has been resumed
}
// Verify that the scheduler is still enabled on core A
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
// Cleanup A2 and interrupt
deregister_intr_cb();
vTaskDelete(a2_task_hdl);
// Indicate done and wait to be deleted
xSemaphoreGive(test_unblk_done_sem);
vTaskSuspend(NULL);
}
static void test_unblk_b1_task(void *arg)
{
// Wait to be started by A1
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Check scheduler is running on core B
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
// Suspend the scheduler on core B
vTaskSuspendAll();
TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState());
// Indicate to A1 that core B scheduler has been suspended
test_unblk_sync = 1;
while (test_unblk_sync != 2) {
; // Wait for trigger from A1
}
// Resume the scheduler on core B
xTaskResumeAll();
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
// Indicate to A1 that core B scheduler has been resumed
test_unblk_sync = 3;
// Indicate done and wait to be deleted
xSemaphoreGive(test_unblk_done_sem);
vTaskSuspend(NULL);
}
TEST_CASE("Test vTaskSuspendAll allows scheduling on other cores", "[freertos]")
{
test_unblk_done_sem = xSemaphoreCreateCounting(portNUM_PROCESSORS, 0);
TEST_ASSERT_NOT_EQUAL(NULL, test_unblk_done_sem);
for (int i = 0; i < portNUM_PROCESSORS; i++) {
test_unblk_sync = 0;
// Create a tasks
TaskHandle_t a1_task_hdl;
TaskHandle_t b1_task_hdl;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_unblk_b1_task, "B1", 8192, NULL, UNITY_FREERTOS_PRIORITY + 1, &b1_task_hdl, !i));
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_unblk_a1_task, "A1", 8192, (void *)b1_task_hdl, UNITY_FREERTOS_PRIORITY + 1, &a1_task_hdl, i));
// Start A1 to and wait for both tasks to complete
xTaskNotifyGive(a1_task_hdl);
for (int j = 0; j < 2; j++) {
xSemaphoreTake(test_unblk_done_sem, portMAX_DELAY);
}
// Cleanup tasks
vTaskDelete(a1_task_hdl);
vTaskDelete(b1_task_hdl);
}
vSemaphoreDelete(test_unblk_done_sem);
}
#endif // !CONFIG_FREERTOS_UNICORE
/* ---------------------------------------------------------------------------------------------------------------------
Test xTaskResumeAll() resumes pended tasks on the current core
Purpose:
- When the scheduler is suspended on a particular core, test that tasks unblocked by an ISR on that core will place
those tasks on the core's pending ready list (regardless of the task's affinity).
- When the scheduler is resumed on a particular core, test that the tasks on core's pending ready list will be
scheduled.
Procedure:
Test for each core
- Create some blocking tasks on the same core
- Register an interrupt on the same core to unblock those tasks
- Suspend the scheduler on the core
- Trigger the interrupt to unblock those tasks
- Resume the scheduler
- Cleanup
Expected:
- When the ISR unblocks the blocked tasks, the task's state should be ready
- When the scheduler is resumed, the tasks should be scheduled and run without issue.
--------------------------------------------------------------------------------------------------------------------- */
#define TEST_PENDED_NUM_BLOCKED_TASKS 4
static bool test_pended_isr(void *arg)
{
TaskHandle_t *blkd_tsks = (TaskHandle_t *)arg;
BaseType_t task_woken = pdFALSE;
// Unblock the blocked tasks
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
vTaskNotifyGiveFromISR(blkd_tsks[i], &task_woken);
}
return (task_woken == pdTRUE);
}
static void test_pended_blkd_task(void *arg)
{
volatile bool *has_run = (bool *)arg;
// Wait to be unblocked
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Indicate the task been unblocked and has run
*has_run = true;
// Wait to be deleted
vTaskSuspend(NULL);
}
static void test_pended_running_task(void *arg)
{
TaskHandle_t main_task_hdl = (TaskHandle_t)arg;
TaskHandle_t blkd_tsks[TEST_PENDED_NUM_BLOCKED_TASKS];
volatile bool has_run[TEST_PENDED_NUM_BLOCKED_TASKS];
// Created blocked tasks pinned to each core
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
has_run[i] = false;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_pended_blkd_task, "blkd", 4096, (void *)&has_run[i], UNITY_FREERTOS_PRIORITY + 2, &blkd_tsks[i], i % portNUM_PROCESSORS));
}
vTaskDelay(10);
// Install an interrupt on the current core core
register_intr_cb(test_pended_isr, (void *)blkd_tsks);
// Checked that all tasks are blocked and have no run yet
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blkd_tsks[i])); // Should be eSuspended due to portMAX_DELAY
TEST_ASSERT_EQUAL(false, has_run[i]);
}
// Suspend the scheduler on the current core
vTaskSuspendAll();
// Trigger the interrupt to unblocked the blocked tasks
trigger_intr_cb();
esp_rom_delay_us(2000); // Short busy delay to ensure interrupt has triggered
// Check that all tasks are unblocked (but should not have run since the scheduler is suspend)
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
// Note: We use eBlocked instead of eReady due to a bug in eTaskGetState(). See (IDF-5543)
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blkd_tsks[i]));
TEST_ASSERT_EQUAL(false, has_run[i]);
}
// Resume the scheduler on the current core to schedule the unblocked tasks
xTaskResumeAll();
esp_rom_delay_us(10000); // Busy delay to ensure each task has enough time to run
// Check that all tasks have run
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
TEST_ASSERT_EQUAL(true, has_run[i]);
}
// Clean up the interrupt and tasks
deregister_intr_cb();
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
vTaskDelete(blkd_tsks[i]);
}
// Notify completion and wait for deletion
xTaskNotifyGive(main_task_hdl);
vTaskSuspend(NULL);
}
TEST_CASE("Test xTaskResumeAll resumes pended tasks", "[freertos]")
{
// Run the test on each core
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TaskHandle_t susp_tsk_hdl;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_pended_running_task, "susp", 2048, (void *)xTaskGetCurrentTaskHandle(), UNITY_FREERTOS_PRIORITY + 1, &susp_tsk_hdl, i));
// Wait for to be notified to test completion
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
vTaskDelete(susp_tsk_hdl);
}
}
#endif // !CONFIG_FREERTOS_SMP

View File

@ -175,7 +175,7 @@ The resulting schedule will have Task A running on CPU0 and Task C preempting Ta
Time Slicing
^^^^^^^^^^^^
The Vanilla FreeRTOS scheduler implements time slicing meaning that if current highest ready priority contains multiple ready tasks, the scheduler will switch between those tasks periodically in a round robin fashion.
The Vanilla FreeRTOS scheduler implements time slicing meaning that if current highest ready priority contains multiple ready tasks, the scheduler will switch between those tasks periodically in a round robin fashion.
However, in ESP-IDF FreeRTOS, it is not possible to implement perfect Round Robin time slicing due to the fact that a particular task may not be able to run on a particular core due to the following reasons:
@ -267,7 +267,7 @@ Vanilla FreeRTOS requires that a periodic tick interrupt occurs. The tick interr
- Checking if time slicing is required (i.e., triggering a context switch)
- Executing the application tick hook
In ESP-IDF FreeRTOS, each core will receive a periodic interrupt and independently run the tick interrupt. The tick interrupts on each core are of the same period but can be out of phase. Furthermore, the tick interrupt responsibilities listed above are not run by all cores:
In ESP-IDF FreeRTOS, each core will receive a periodic interrupt and independently run the tick interrupt. The tick interrupts on each core are of the same period but can be out of phase. However, the tick responsibilities listed above are not run by all cores:
- CPU0 will execute all of the tick interrupt responsibilities listed above
- CPU1 will only check for time slicing and execute the application tick hook
@ -296,13 +296,17 @@ Vanilla FreeRTOS allows the scheduler to be suspended/resumed by calling :cpp:fu
On scheduler resumption, :cpp:func:`xTaskResumeAll` will catch up all of the lost ticks and unblock any timed out tasks.
In ESP-IDF FreeRTOS, suspending the scheduler across multiple cores is not possible. Therefore when :cpp:func:`vTaskSuspendAll` is called:
In ESP-IDF FreeRTOS, suspending the scheduler across multiple cores is not possible. Therefore when :cpp:func:`vTaskSuspendAll` is called on a particular core (e.g., core A):
- Task switching is disabled only on the current core but interrupts for the current core are left enabled
- Calling any blocking/yielding function on the current core is forbidden. Time slicing is disabled on the current core.
- If suspending on CPU0, the tick count is frozen. The tick interrupt will still occur to execute the application tick hook.
- Task switching is disabled only on core A but interrupts for core A are left enabled
- Calling any blocking/yielding function on core A is forbidden. Time slicing is disabled on core A.
- If an interrupt on core A unblocks any tasks, those tasks will go into core A's own pending ready task list
- If core A is CPU0, the tick count is frozen and a pended tick count is incremented instead. However, the tick interrupt will still occur in order to execute the application tick hook.
When resuming the scheduler on CPU0, :cpp:func:`xTaskResumeAll` will catch up all of the lost ticks and unblock any timed out tasks.
When :cpp:func:`xTaskResumeAll` is called on a particular core (e.g., core A):
- Any tasks added to core A's pending ready task list will be resumed
- If core A is CPU0, the pended tick count is unwound to catch up the lost ticks.
.. warning::
Given that scheduler suspension on ESP-IDF FreeRTOS will only suspend scheduling on a particular core, scheduler suspension is **NOT** a valid method ensuring mutual exclusion between tasks when accessing shared data. Users should use proper locking primitives such as mutexes or spinlocks if they require mutual exclusion.