freertos: Fix SMP round robin scheduling

The previous SMP freertos round robin would skip over tasks when
time slicing. This commit implements a Best Effort Round Robin
where selected tasks are put to the back of the list, thus
makes the time slicing more fair.

- Documentation has been updated accordingly.
- Tidy up vTaskSwitchContext() to match v10.4.3 more
- Increased esp_ipc task stack size to avoid overflow

Closes https://github.com/espressif/esp-idf/issues/7256
This commit is contained in:
Darian Leung 2021-10-14 01:31:05 +08:00
parent f6db71539e
commit 466c42c3c1
7 changed files with 376 additions and 193 deletions

View File

@ -5,7 +5,7 @@ menu "IPC (Inter-Processor Call)"
range 512 65536 if !APPTRACE_ENABLE
range 2048 65536 if APPTRACE_ENABLE
default 2048 if APPTRACE_ENABLE
default 1024
default 1536
help
Configure the IPC tasks stack size. One IPC task runs on each core
(in dual core mode), and allows for cross-core function calls.

View File

@ -143,6 +143,9 @@
/*-----------------------------------------------------------*/
#ifdef ESP_PLATFORM
#define taskSELECT_HIGHEST_PRIORITY_TASK() taskSelectHighestPriorityTaskSMP()
#else //ESP_PLATFORM
#define taskSELECT_HIGHEST_PRIORITY_TASK() \
{ \
UBaseType_t uxTopPriority = uxTopReadyPriority; \
@ -159,6 +162,7 @@
listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB[xPortGetCoreID()], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
uxTopReadyPriority = uxTopPriority; \
} /* taskSELECT_HIGHEST_PRIORITY_TASK */
#endif //ESP_PLATFORM
/*-----------------------------------------------------------*/
@ -3358,12 +3362,102 @@ BaseType_t xTaskIncrementTick( void )
#endif /* configUSE_APPLICATION_TASK_TAG */
/*-----------------------------------------------------------*/
#ifdef ESP_PLATFORM
#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
static void taskSelectHighestPriorityTaskSMP( void )
{
/* This function is called from a critical section. So some optimizations are made */
BaseType_t uxCurPriority;
BaseType_t xTaskScheduled = pdFALSE;
BaseType_t xNewTopPrioritySet = pdFALSE;
BaseType_t xCoreID = xPortGetCoreID(); /* Optimization: Read once */
/* Search for tasks, starting form the highest ready priority. If nothing is
* found, we eventually default to the IDLE tasks at priority 0 */
for ( uxCurPriority = uxTopReadyPriority; uxCurPriority >= 0 && xTaskScheduled == pdFALSE; uxCurPriority-- )
{
/* Check if current priority has one or more ready tasks. Skip if none */
if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurPriority ] ) ) )
{
continue;
}
/* Save a copy of highest priority that has a ready state task */
if( xNewTopPrioritySet == pdFALSE )
{
xNewTopPrioritySet = pdTRUE;
uxTopReadyPriority = uxCurPriority;
}
/* We now search this priority's ready task list for a runnable task.
* We always start searching from the head of the list, so we reset
* pxIndex to point to the tail so that we start walking the list from
* the first item */
pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd );
/* Get the first item on the list */
TCB_t * pxTCBCur;
TCB_t * pxTCBFirst;
listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
pxTCBFirst = pxTCBCur;
do
{
/* Check if the current task is currently being executed. However, if
* it's being executed by the current core, we can still schedule it.
* Todo: Each task can store a xTaskRunState, instead of needing to
* check each core */
UBaseType_t ux;
for( ux = 0; ux < ( UBaseType_t )configNUM_CORES; ux++ )
{
if ( ux == xCoreID )
{
continue;
}
else if ( pxCurrentTCB[ux] == pxTCBCur )
{
/* Current task is already being executed. Get the next task */
goto get_next_task;
}
}
/* Check if the current task has a compatible affinity */
if ( ( pxTCBCur->xCoreID != xCoreID ) && ( pxTCBCur->xCoreID != tskNO_AFFINITY ) )
{
goto get_next_task;
}
/* The current task is runnable. Schedule it */
pxCurrentTCB[ xCoreID ] = pxTCBCur;
xTaskScheduled = pdTRUE;
/* Move the current tasks list item to the back of the list in order
* to implement best effort round robin. To do this, we need to reset
* the pxIndex to point to the tail again. */
pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd );
uxListRemove( &( pxTCBCur->xStateListItem ) );
vListInsertEnd( &( pxReadyTasksLists[ uxCurPriority ] ), &( pxTCBCur->xStateListItem ) );
break;
get_next_task:
/* The current task cannot be scheduled. Get the next task in the list */
listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
} while( pxTCBCur != pxTCBFirst); /* Check to see if we've walked the entire list */
}
assert( xTaskScheduled == pdTRUE ); /* At this point, a task MUST have been scheduled */
}
#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
#endif //ESP_PLATFORM
void vTaskSwitchContext( void )
{
//Theoretically, this is only called from either the tick interrupt or the crosscore interrupt, so disabling
//interrupts shouldn't be necessary anymore. Still, for safety we'll leave it in for now.
int irqstate = portSET_INTERRUPT_MASK_FROM_ISR();
#ifdef ESP_PLATFORM
/* vTaskSwitchContext is called either from:
* - ISR dispatcher when return from an ISR (interrupts will already be disabled)
* - vTaskSuspend() which is not in a critical section
* Therefore, we enter a critical section ISR version to ensure safety */
taskENTER_CRITICAL_ISR();
#endif // ESP_PLATFORM
if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
{
/* The scheduler is currently suspended - do not allow a context
@ -3373,7 +3467,9 @@ void vTaskSwitchContext( void )
else
{
xYieldPending[ xPortGetCoreID() ] = pdFALSE;
#ifdef ESP_PLATFORM
xSwitchingContext[ xPortGetCoreID() ] = pdTRUE;
#endif // ESP_PLATFORM
traceTASK_SWITCHED_OUT();
#if ( configGENERATE_RUN_TIME_STATS == 1 )
@ -3391,7 +3487,6 @@ void vTaskSwitchContext( void )
* overflows. The guard against negative values is to protect
* against suspect run time stat counter implementations - which
* are provided by the application, not the kernel. */
taskENTER_CRITICAL_ISR();
if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] )
{
pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] );
@ -3400,134 +3495,59 @@ void vTaskSwitchContext( void )
{
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL_ISR();
ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime;
}
#endif /* configGENERATE_RUN_TIME_STATS */
/* Check for stack overflow, if configured. */
#ifdef ESP_PLATFORM
taskFIRST_CHECK_FOR_STACK_OVERFLOW();
taskSECOND_CHECK_FOR_STACK_OVERFLOW();
#else
taskCHECK_FOR_STACK_OVERFLOW();
/* Select a new task to run */
/*
We cannot do taskENTER_CRITICAL_ISR(); here because it saves the interrupt context to the task tcb, and we're
swapping that out here. Instead, we're going to do the work here ourselves. Because interrupts are already disabled, we only
need to acquire the mutex.
*/
vPortCPUAcquireMutex( &xTaskQueueMutex );
#if !configUSE_PORT_OPTIMISED_TASK_SELECTION
unsigned portBASE_TYPE foundNonExecutingWaiter = pdFALSE, ableToSchedule = pdFALSE, resetListHead;
unsigned portBASE_TYPE holdTop=pdFALSE;
tskTCB * pxTCB;
portBASE_TYPE uxDynamicTopReady = uxTopReadyPriority;
/*
* ToDo: This scheduler doesn't correctly implement the round-robin scheduling as done in the single-core
* FreeRTOS stack when multiple tasks have the same priority and are all ready; it just keeps grabbing the
* first one. ToDo: fix this.
* (Is this still true? if any, there's the issue with one core skipping over the processes for the other
* core, potentially not giving the skipped-over processes any time.)
*/
while ( ableToSchedule == pdFALSE && uxDynamicTopReady >= 0 )
{
resetListHead = pdFALSE;
// Nothing to do for empty lists
if (!listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxDynamicTopReady ] ) )) {
ableToSchedule = pdFALSE;
tskTCB * pxRefTCB;
/* Remember the current list item so that we
can detect if all items have been inspected.
Once this happens, we move on to a lower
priority list (assuming nothing is suitable
for scheduling). Note: This can return NULL if
the list index is at the listItem */
pxRefTCB = pxReadyTasksLists[ uxDynamicTopReady ].pxIndex->pvOwner;
if ((void*)pxReadyTasksLists[ uxDynamicTopReady ].pxIndex==(void*)&pxReadyTasksLists[ uxDynamicTopReady ].xListEnd) {
//pxIndex points to the list end marker. Skip that and just get the next item.
listGET_OWNER_OF_NEXT_ENTRY( pxRefTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
}
do {
listGET_OWNER_OF_NEXT_ENTRY( pxTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
/* Find out if the next task in the list is
already being executed by another core */
foundNonExecutingWaiter = pdTRUE;
portBASE_TYPE i = 0;
for ( i=0; i<configNUM_CORES; i++ ) {
if (i == xPortGetCoreID()) {
continue;
} else if (pxCurrentTCB[i] == pxTCB) {
holdTop=pdTRUE; //keep this as the top prio, for the other CPU
foundNonExecutingWaiter = pdFALSE;
break;
}
}
if (foundNonExecutingWaiter == pdTRUE) {
/* If the task is not being executed
by another core and its affinity is
compatible with the current one,
prepare it to be swapped in */
if (pxTCB->xCoreID == tskNO_AFFINITY) {
pxCurrentTCB[xPortGetCoreID()] = pxTCB;
ableToSchedule = pdTRUE;
} else if (pxTCB->xCoreID == xPortGetCoreID()) {
pxCurrentTCB[xPortGetCoreID()] = pxTCB;
ableToSchedule = pdTRUE;
} else {
ableToSchedule = pdFALSE;
holdTop=pdTRUE; //keep this as the top prio, for the other CPU
}
} else {
ableToSchedule = pdFALSE;
}
if (ableToSchedule == pdFALSE) {
resetListHead = pdTRUE;
} else if ((ableToSchedule == pdTRUE) && (resetListHead == pdTRUE)) {
tskTCB * pxResetTCB;
do {
listGET_OWNER_OF_NEXT_ENTRY( pxResetTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
} while(pxResetTCB != pxRefTCB);
}
} while ((ableToSchedule == pdFALSE) && (pxTCB != pxRefTCB));
} else {
if (!holdTop) --uxTopReadyPriority;
/* Before the currently running task is switched out, save its errno. */
#if ( configUSE_POSIX_ERRNO == 1 )
{
pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
}
--uxDynamicTopReady;
}
#else
//For Unicore targets we can keep the current FreeRTOS O(1)
//Scheduler. I hope to optimize better the scheduler for
//Multicore settings -- This will involve to create a per
//affinity ready task list which will impact hugely on
//tasks module
taskSELECT_HIGHEST_PRIORITY_TASK();
#endif
traceTASK_SWITCHED_IN();
xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
#ifdef ESP_PLATFORM
//Exit critical region manually as well: release the mux now, interrupts will be re-enabled when we
//exit the function.
vPortCPUReleaseMutex( &xTaskQueueMutex );
#endif // ESP_PLATFORM
/* Select a new task to run using either the generic C or port
* optimised asm code. */
taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
traceTASK_SWITCHED_IN();
#ifdef ESP_PLATFORM
xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
#if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack);
#endif
#else
/* After the new task is switched in, update the global errno. */
#if ( configUSE_POSIX_ERRNO == 1 )
{
FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
}
#endif
#if ( configUSE_NEWLIB_REENTRANT == 1 )
{
/* Switch Newlib's _impure_ptr variable to point to the _reent
* structure specific to this task.
* See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
* for additional information. */
_impure_ptr = &( pxCurrentTCB->xNewLib_reent );
}
#endif /* configUSE_NEWLIB_REENTRANT */
#endif // ESP_PLATFORM
}
portCLEAR_INTERRUPT_MASK_FROM_ISR(irqstate);
#ifdef ESP_PLATFORM
/* Exit the critical section previously entered */
taskEXIT_CRITICAL_ISR();
#endif // ESP_PLATFORM
}
/*-----------------------------------------------------------*/

View File

@ -0,0 +1,169 @@
/*
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/queue.h"
#include "esp_rom_sys.h"
#include "hal/interrupt_controller_hal.h"
#include "unity.h"
#include "test_utils.h"
/*
Test Best Effort Round Robin Scheduling:
The following test case tests the "Best Effort Round Robin Scheduling" that fixes the skipping behavior found in older
versions of the ESP-IDF SMP FreeRTOS (see docs for more details about Best Effort Round Robin Scheduling).
This test...
- Only runs under dual core configuration
- Will disable the tick interrupts of both cores
Test flow as follows:
1. Stop preemption on core 0 by raising the priority of the unity task
2. Stop preemption on core 0 by creating a blocker task
3. Disable tick interrupts on both cores
4. Create N spin tasks on each core, each with a sequential task_code
5. Unblock those spin tasks in a sequential order
6. Lower priority of unity task and stop the blocker task so that spin tasks are run
7. Each time a spin task is run (i.e., an iteration) it will send its task code to a queue
8. Spin tasks will clean themselves up
9. The queue should contain the task codes of the spin tasks in the order they were started in, thus showing that round
robin schedules the tasks in a sequential order.
*/
#if !defined(CONFIG_FREERTOS_UNICORE) && (defined(CONFIG_FREERTOS_CORETIMER_0) || defined(CONFIG_FREERTOS_CORETIMER_1))
#define SPIN_TASK_PRIO (CONFIG_UNITY_FREERTOS_PRIORITY + 1)
#define SPIN_TASK_NUM_ITER 3
#define TASK_STACK_SIZE 1024
#define NUM_PINNED_SPIN_TASK_PER_CORE 3
#if defined(CONFIG_FREERTOS_CORETIMER_0)
#define TICK_INTR_IDX 6
#else //defined(CONFIG_FREERTOS_CORETIMER_1)
#define TICK_INTR_IDX 15
#endif
static QueueHandle_t core0_run_order_queue;
static QueueHandle_t core1_run_order_queue;
static uint32_t total_iter_count[configNUM_CORES] = {0};
static void spin_task(void *arg)
{
uint32_t task_code = (uint32_t)arg;
QueueHandle_t run_order_queue = ((task_code >> 4) == 0) ? core0_run_order_queue : core1_run_order_queue;
//Wait to be started
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
for (int i = 0; i < SPIN_TASK_NUM_ITER; i++) {
xQueueSend(run_order_queue, &task_code, 0);
//No need for critical sections as tick interrupt is disabled
total_iter_count[xPortGetCoreID()]++;
taskYIELD();
}
//Last iteration of the last spin task on this core. Reenable this core's tick interrupt
if (total_iter_count[xPortGetCoreID()] == (NUM_PINNED_SPIN_TASK_PER_CORE * SPIN_TASK_NUM_ITER)) {
interrupt_controller_hal_enable_interrupts(1 <<TICK_INTR_IDX);
}
vTaskDelete(NULL);
}
static void blocker_task(void *arg)
{
volatile bool *exit_loop = (volatile bool *)arg;
//Disable tick interrupts on core 1 the duration of the test
taskDISABLE_INTERRUPTS();
interrupt_controller_hal_disable_interrupts(1 << TICK_INTR_IDX);
taskENABLE_INTERRUPTS();
while (!*exit_loop) {
;
}
//Wait to be resumed
vTaskSuspend(NULL);
//Reenable tick interrupt on core 1
taskDISABLE_INTERRUPTS();
interrupt_controller_hal_enable_interrupts(1 << TICK_INTR_IDX);
taskENABLE_INTERRUPTS();
vTaskDelete(NULL);
}
TEST_CASE("Test FreeRTOS Scheduling Round Robin", "[freertos]")
{
core0_run_order_queue = xQueueCreate(SPIN_TASK_NUM_ITER * NUM_PINNED_SPIN_TASK_PER_CORE, sizeof(uint32_t));
core1_run_order_queue = xQueueCreate(SPIN_TASK_NUM_ITER * NUM_PINNED_SPIN_TASK_PER_CORE, sizeof(uint32_t));
/* Increase priority of unity task so that the spin tasks don't preempt us
during task creation. */
vTaskPrioritySet(NULL, SPIN_TASK_PRIO + 1);
/* Create a task on core 1 of the same priority to block core 1 */
volatile bool suspend_blocker = false;
TaskHandle_t blocker_task_hdl;
xTaskCreatePinnedToCore(blocker_task, "blk", TASK_STACK_SIZE, (void *)&suspend_blocker, SPIN_TASK_PRIO + 1, &blocker_task_hdl, 1);
//Disable tick interrupts on core 0 the duration of the test
taskDISABLE_INTERRUPTS();
interrupt_controller_hal_disable_interrupts(1 << TICK_INTR_IDX);
taskENABLE_INTERRUPTS();
TaskHandle_t core0_task_hdls[NUM_PINNED_SPIN_TASK_PER_CORE];
TaskHandle_t core1_task_hdls[NUM_PINNED_SPIN_TASK_PER_CORE];
for (int i = 0; i < NUM_PINNED_SPIN_TASK_PER_CORE; i++) {
//Create a spin task pinned to core 0
xTaskCreatePinnedToCore(spin_task, "spin", TASK_STACK_SIZE, (void *)(0x00 + i), SPIN_TASK_PRIO, &core0_task_hdls[i], 0);
//Create a spin task pinned to core 1
xTaskCreatePinnedToCore(spin_task, "spin", TASK_STACK_SIZE, (void *)(0x10 + i), SPIN_TASK_PRIO, &core1_task_hdls[i], 1);
}
/* Start the tasks in a particular order. This order should be reflected in
in the round robin scheduling on each core */
for (int i = 0; i < NUM_PINNED_SPIN_TASK_PER_CORE; i++) {
//Start a spin task on core 0
xTaskNotifyGive(core0_task_hdls[i]);
//Start a spin task on core 1
xTaskNotifyGive(core1_task_hdls[i]);
}
//Lower priority of this task and stop blocker task to allow the spin tasks to be scheduled
suspend_blocker = true;
vTaskPrioritySet(NULL, UNITY_FREERTOS_PRIORITY);
//Give a enough delay to allow all iterations of the round robin to occur
esp_rom_delay_us(10000);
for (int i = 0; i < SPIN_TASK_NUM_ITER; i++) {
for (int j = 0; j < NUM_PINNED_SPIN_TASK_PER_CORE; j++) {
uint32_t core0_entry;
uint32_t core1_entry;
TEST_ASSERT_EQUAL(pdTRUE, xQueueReceive(core0_run_order_queue, &core0_entry, 0));
TEST_ASSERT_EQUAL(pdTRUE, xQueueReceive(core1_run_order_queue, &core1_entry, 0));
TEST_ASSERT_EQUAL(0x00 + j, core0_entry);
TEST_ASSERT_EQUAL(0x10 + j, core1_entry);
}
}
//Resume the blocker task for cleanup
vTaskResume(blocker_task_hdl);
//Reenable tick interrupt on core 0
taskDISABLE_INTERRUPTS();
interrupt_controller_hal_enable_interrupts(1 << TICK_INTR_IDX);
taskENABLE_INTERRUPTS();
vTaskDelay(10); //Wait for blocker task to clean up
//Clean up queues
vQueueDelete(core0_run_order_queue);
vQueueDelete(core1_run_order_queue);
}
#endif //!defined(CONFIG_FREERTOS_UNICORE) && (efined(CONFIG_FREERTOS_CORETIMER_0) || defined(CONFIG_FREERTOS_CORETIMER_1))

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

View File

@ -19,7 +19,7 @@ FreeRTOS and ESP-IDF FreeRTOS. The API reference for vanilla FreeRTOS can be
found via https://www.freertos.org/a00106.html
For information regarding features that are exclusive to ESP-IDF FreeRTOS,
see :doc:`ESP-IDF FreeRTOS Additions<../api-reference/system/freertos_additions>`.
see :doc:`ESP-IDF FreeRTOS Additions</api-reference/system/freertos_additions>`.
.. only:: not CONFIG_FREERTOS_UNICORE
@ -30,10 +30,7 @@ see :doc:`ESP-IDF FreeRTOS Additions<../api-reference/system/freertos_additions>
``1`` for **APP_CPU**, or ``tskNO_AFFINITY`` which allows the task to run on
both.
:ref:`round-robin-scheduling`: The ESP-IDF FreeRTOS scheduler will skip tasks when
implementing Round-Robin scheduling between multiple tasks in the Ready state
that are of the same priority. To avoid this behavior, ensure that those tasks either
enter a blocked state, or are distributed across a wider range of priorities.
:ref:`round-robin-scheduling`: The ESP-IDF FreeRTOS scheduler implements a "Best Effort Round-Robin Scheduling" instead of the ideal Round-Robin scheduling in vanilla FreeRTOS.
:ref:`scheduler-suspension`: Suspending the scheduler in ESP-IDF FreeRTOS will only
affect the scheduler on the the calling core. In other words, calling
@ -133,91 +130,88 @@ synchronicity.
Round Robin Scheduling
^^^^^^^^^^^^^^^^^^^^^^
Given multiple tasks in the Ready state and of the same priority, vanilla
FreeRTOS implements Round Robin scheduling between each task. This will result
in running those tasks in turn each time the scheduler is called
(e.g. every tick interrupt). On the other hand, the ESP-IDF FreeRTOS scheduler
may skip tasks when Round Robin scheduling multiple Ready state tasks of the
same priority.
Given multiple tasks in the Ready state and of the same priority, vanilla FreeRTOS implements Round Robin scheduling between multiple ready state tasks of the same priority. This will result in running those tasks in turn each time the scheduler is called (e.g. when the tick interrupt occurs or when a task blocks/yields).
The issue of skipping tasks during Round Robin scheduling arises from the way
the Ready Tasks List is implemented in FreeRTOS. In vanilla FreeRTOS,
``pxReadyTasksList`` is used to store a list of tasks that are in the Ready
state. The list is implemented as an array of length ``configMAX_PRIORITIES``
where each element of the array is a linked list. Each linked list is of type
``List_t`` and contains TCBs of tasks of the same priority that are in the
Ready state. The following diagram illustrates the ``pxReadyTasksList``
structure.
On the other hand, it is not possible for the ESP-IDF FreeRTOS scheduler to implement perfect Round Robin due to the fact that a particular task may not be able to run on a particular core due to the following reasons:
.. figure:: ../../_static/freertos-ready-task-list.png
:align: center
:alt: Vanilla FreeRTOS Ready Task List Structure
- The task is pinned to the another core.
- For unpinned tasks, the task is already being run by another core.
Illustration of FreeRTOS Ready Task List Data Structure
Therefore, when a core searches the ready state task list for a task to run, the core may need to skip over a few tasks in the same priority list or drop to a lower priority in order to find a ready state task that the core can run.
The ESP-IDF FreeRTOS scheduler implements a Best Effort Round Robin scheduling for ready state tasks of the same priority by ensuring that tasks that have been selected to run will be placed at the back of the list, thus giving unselected tasks a higher priority on the next scheduling iteration (i.e., the next tick interrupt or yield)
The following example demonstrates the Best Effort Round Robin Scheduling in action. Assume that:
- There are four ready state tasks of the same priority ``AX, B0, C1, D1`` where:
- The priority is the current highest priority with ready state tasks
- The first character represents the task's names (i.e., ``A, B, C, D``)
- And the second character represents the tasks core pinning (and ``X`` means unpinned)
- The task list is always searched from the head
.. code-block:: none
--------------------------------------------------------------------------------
1. Starting state. None of the ready state tasks have been selected to run
Head [ AX , B0 , C1 , D0 ] Tail
--------------------------------------------------------------------------------
2. Core 0 has tick interrupt and searches for a task to run.
Task A is selected and is moved to the back of the list
Core0--|
Head [ AX , B0 , C1 , D0 ] Tail
0
Head [ B0 , C1 , D0 , AX ] Tail
--------------------------------------------------------------------------------
3. Core 1 has a tick interrupt and searches for a task to run.
Task B cannot be run due to incompatible affinity, so core 1 skips to Task C.
Task C is selected and is moved to the back of the list
Core1-------| 0
Head [ B0 , C1 , D0 , AX ] Tail
0 1
Head [ B0 , D0 , AX , C1 ] Tail
--------------------------------------------------------------------------------
4. Core 0 has another tick interrupt and searches for a task to run.
Task B is selected and moved to the back of the list
Each linked list also contains a ``pxIndex`` which points to the last TCB
returned when the list was queried. This index allows the ``vTaskSwitchContext()``
to start traversing the list at the TCB immediately after ``pxIndex`` hence
implementing Round Robin Scheduling between tasks of the same priority.
Core0--| 1
Head [ B0 , D0 , AX , C1 ] Tail
In ESP-IDF FreeRTOS, the Ready Tasks List is shared between cores hence
``pxReadyTasksList`` will contain tasks pinned to different cores. When a core
calls the scheduler, it is able to look at the ``xCoreID`` member of each TCB
in the list to determine if a task is allowed to run on calling the core. The
ESP-IDF FreeRTOS ``pxReadyTasksList`` is illustrated below.
1 0
Head [ D0 , AX , C1 , B0 ] Tail
.. figure:: ../../_static/freertos-ready-task-list-smp.png
:align: center
:alt: ESP-IDF FreeRTOS Ready Task List Structure
--------------------------------------------------------------------------------
Illustration of FreeRTOS Ready Task List Data Structure in ESP-IDF
5. Core 1 has another tick and searches for a task to run.
Task D cannot be run due to incompatible affinity, so core 1 skips to Task A
Task A is selected and moved to the back of the list
Therefore when **PRO_CPU** calls the scheduler, it will only consider the tasks
in blue or purple. Whereas when **APP_CPU** calls the scheduler, it will only
consider the tasks in orange or purple.
Core1-------| 0
Head [ D0 , AX , C1 , B0 ] Tail
Although each TCB has an ``xCoreID`` in ESP-IDF FreeRTOS, the linked list of
each priority only has a single ``pxIndex``. Therefore when the scheduler is
called from a particular core and traverses the linked list, it will skip all
TCBs pinned to the other core and point the pxIndex at the selected task. If
the other core then calls the scheduler, it will traverse the linked list
starting at the TCB immediately after ``pxIndex``. Therefore, TCBs skipped on
the previous scheduler call from the other core would not be considered on the
current scheduler call. This issue is demonstrated in the following
illustration.
0 1
Head [ D0 , C1 , B0 , AX ] Tail
.. figure:: ../../_static/freertos-ready-task-list-smp-pxIndex.png
:align: center
:alt: ESP-IDF pxIndex Behavior
Illustration of pxIndex behavior in ESP-IDF FreeRTOS
The implications to users regarding the Best Effort Round Robin Scheduling:
Referring to the illustration above, assume that priority 9 is the highest
priority, and none of the tasks in priority 9 will block hence will always be
either in the running or Ready state.
- Users cannot expect multiple ready state tasks of the same priority to run sequentially (as is the case in Vanilla FreeRTOS). As demonstrated in the example above, a core may need to skip over tasks.
- However, given enough ticks, a task will eventually be given some processing time.
- If a core cannot find a task runnable task at the highest ready state priority, it will drop to a lower priority to search for tasks.
- To achieve ideal round robin scheduling, users should ensure that all tasks of a particular priority are pinned to the same core.
1) **PRO_CPU** calls the scheduler and selects Task A to run, hence moves
``pxIndex`` to point to Task A
2) **APP_CPU** calls the scheduler and starts traversing from the task after
``pxIndex`` which is Task B. However Task B is not selected to run as it is not
pinned to **APP_CPU** hence it is skipped and Task C is selected instead.
``pxIndex`` now points to Task C
3) **PRO_CPU** calls the scheduler and starts traversing from Task D. It skips
Task D and selects Task E to run and points ``pxIndex`` to Task E. Notice that
Task B isnt traversed because it was skipped the last time **APP_CPU** called
the scheduler to traverse the list.
4) The same situation with Task D will occur if **APP_CPU** calls the
scheduler again as ``pxIndex`` now points to Task E
One solution to the issue of task skipping is to ensure that every task will
enter a blocked state so that they are removed from the Ready Task List.
Another solution is to distribute tasks across multiple priorities such that
a given priority will not be assigned multiple tasks that are pinned to
different cores.
.. _scheduler-suspension: