freertos: Schedule tasks unblocked by an ISR on a core with scheduler running if core affinity matches

FromISR APIs would put an unblocked task on the pending ready list if
the scheduler is suspended on the current core, irrespective of the
task's core affinity and the state of the scheduler on the other core.
This commit updates this behavior by allowing tasks to get scheduled on
a core which has the scheduler running as long as the task's core
affinity allows it.
This commit is contained in:
Sudeep Mohanty 2023-03-06 13:47:09 +01:00 committed by BOT
parent d3d5b77363
commit 9b81e2b189
3 changed files with 186 additions and 20 deletions

View File

@ -269,6 +269,24 @@
#define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) taskIS_CURRENTLY_RUNNING( ( pxTCB ) ) #define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) taskIS_CURRENTLY_RUNNING( ( pxTCB ) )
#endif /* configNUM_CORES > 1 */ #endif /* configNUM_CORES > 1 */
/*
* Check if a task can be scheduled on a core.
* On a dual-core system:
* - If a task is pinned, check the scheduler suspension state on the task's pinned core. The task can be scheduled
* if the scheduler is not suspended on the pinned core.
* - If a task is unpinned, check the scheduler suspension state on both cores. The task can be scheduled if the
* scheduler is not suspended on either of the cores.
* On a single-core system:
* - Check the scheduler suspension state on core 0. The task can be scheduled if the scheduler is not suspended.
*/
#if ( configNUM_CORES > 1 )
#define taskCAN_BE_SCHEDULED( pxTCB ) \
( ( pxTCB->xCoreID != tskNO_AFFINITY ) ) ? ( uxSchedulerSuspended[ pxTCB->xCoreID ] == ( UBaseType_t ) pdFALSE ) : \
( ( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) || ( uxSchedulerSuspended[ 1 ] == ( UBaseType_t ) pdFALSE ) )
#else
#define taskCAN_BE_SCHEDULED( pxTCB ) ( ( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) )
#endif /* configNUM_CORES > 1 */
/* /*
* Several functions take a TaskHandle_t parameter that can optionally be NULL, * Several functions take a TaskHandle_t parameter that can optionally be NULL,
* where NULL is used to indicate that the handle of the currently executing * where NULL is used to indicate that the handle of the currently executing
@ -685,7 +703,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
* *
* Scheduling Algorithm: * Scheduling Algorithm:
* This function will bias towards yielding the current core. * This function will bias towards yielding the current core.
* - If the unblocked task has a higher (or equal) priority than then current * - If the unblocked task has a higher (or equal) priority than the current
* core, the current core is yielded regardless of the current priority of the * core, the current core is yielded regardless of the current priority of the
* other core. * other core.
* - A core (current or other) will only yield if their schedulers are not * - A core (current or other) will only yield if their schedulers are not
@ -1363,13 +1381,15 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
/* Indicate whether the current core needs to yield */ /* Indicate whether the current core needs to yield */
BaseType_t xYieldRequiredCurrentCore; BaseType_t xYieldRequiredCurrentCore;
/* If the target task can run on the current core, and has a higher priority than the current core, then yield the current core */ /* If the target task can run on the current core, and has a higher priority than the current core, and the core has not suspended scheduling, then yield the current core */
if( ( ( xTaskCoreID == xCurCoreID ) || ( xTaskCoreID == tskNO_AFFINITY ) ) && ( uxTaskPriority > pxCurrentTCB[ xCurCoreID ]->uxPriority ) ) if( ( ( xTaskCoreID == xCurCoreID ) || ( xTaskCoreID == tskNO_AFFINITY ) ) &&
( uxTaskPriority > pxCurrentTCB[ xCurCoreID ]->uxPriority ) &&
( uxSchedulerSuspended[ xCurCoreID ] == ( UBaseType_t ) pdFALSE ) )
{ {
/* Return true for the caller to yield the current core */ /* Return true for the caller to yield the current core */
xYieldRequiredCurrentCore = pdTRUE; xYieldRequiredCurrentCore = pdTRUE;
} }
/* If the target task can run on the other core, and has a higher priority then the other core, and the other core has not suspended scheduling, the yield the other core */ /* If the target task can run on the other core, and has a higher priority then the other core, and the other core has not suspended scheduling, then yield the other core */
else if( ( ( xTaskCoreID == !xCurCoreID ) || ( xTaskCoreID == tskNO_AFFINITY ) ) && else if( ( ( xTaskCoreID == !xCurCoreID ) || ( xTaskCoreID == tskNO_AFFINITY ) ) &&
( uxTaskPriority > pxCurrentTCB[ !xCurCoreID ]->uxPriority ) && ( uxTaskPriority > pxCurrentTCB[ !xCurCoreID ]->uxPriority ) &&
( uxSchedulerSuspended[ !xCurCoreID ] == ( UBaseType_t ) pdFALSE ) ) ( uxSchedulerSuspended[ !xCurCoreID ] == ( UBaseType_t ) pdFALSE ) )
@ -2261,8 +2281,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
traceTASK_RESUME_FROM_ISR( pxTCB ); traceTASK_RESUME_FROM_ISR( pxTCB );
/* Check the ready lists can be accessed. */ /* Check the ready lists can be accessed. */
/* Known issue IDF-5856. We also need to check if the other core is suspended */ if( taskCAN_BE_SCHEDULED( pxTCB ) )
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
{ {
/* Ready lists can be accessed so move the task from the /* Ready lists can be accessed so move the task from the
* suspended list to the ready list directly. */ * suspended list to the ready list directly. */
@ -3878,12 +3897,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
* has NOT suspended its scheduler. This occurs when: * has NOT suspended its scheduler. This occurs when:
* - The task is pinned, and the pinned core's scheduler is running * - The task is pinned, and the pinned core's scheduler is running
* - The task is unpinned, and at least one of the core's scheduler is running */ * - The task is unpinned, and at least one of the core's scheduler is running */
#if ( configNUM_CORES > 1 ) if( taskCAN_BE_SCHEDULED( pxUnblockedTCB ) )
if( ( ( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) && ( taskCAN_RUN_ON_CORE( 0, pxUnblockedTCB->xCoreID ) == pdTRUE ) ) ||
( ( uxSchedulerSuspended[ 1 ] == ( UBaseType_t ) pdFALSE ) && ( taskCAN_RUN_ON_CORE( 1, pxUnblockedTCB->xCoreID ) == pdTRUE ) ) )
#else
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
#endif /* configNUM_CORES > 1 */
{ {
( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
prvAddTaskToReadyList( pxUnblockedTCB ); prvAddTaskToReadyList( pxUnblockedTCB );
@ -6109,7 +6123,7 @@ TickType_t uxTaskResetEventItemValue( void )
/* The task should not have been on an event list. */ /* The task should not have been on an event list. */
configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE ) if( taskCAN_BE_SCHEDULED( pxTCB ) )
{ {
( void ) uxListRemove( &( pxTCB->xStateListItem ) ); ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
prvAddTaskToReadyList( pxTCB ); prvAddTaskToReadyList( pxTCB );
@ -6200,7 +6214,7 @@ TickType_t uxTaskResetEventItemValue( void )
/* The task should not have been on an event list. */ /* The task should not have been on an event list. */
configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE ) if( taskCAN_BE_SCHEDULED( pxTCB ) )
{ {
( void ) uxListRemove( &( pxTCB->xStateListItem ) ); ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
prvAddTaskToReadyList( pxTCB ); prvAddTaskToReadyList( pxTCB );

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -244,15 +244,20 @@ Purpose:
- While the scheduler on core B is disabled, test that... - While the scheduler on core B is disabled, test that...
- A task on Core A can be unblocked by another task also on core A - A task on Core A can be unblocked by another task also on core A
- A task on Core A can be unblocked by an interrupt on core A - A task on Core A can be unblocked by an interrupt on core A
- A blocked unpinned task can be unblocked by a task on Core B and be scheduled to run on core A.
Procedure: Procedure:
Each core gets tested in the role of core A Each core gets tested in the role of core A
- Create task B1 pinned to core B that will suspend scheduling on core B - Create task B1 pinned to core B that will suspend scheduling on core B
- Create unpinned task B2
- Create task A2 pinned to core A that will test unblocking on core A - Create task A2 pinned to core A that will test unblocking on core A
- Create task A1 pinned to core A that will unblock task A2 - Create task A1 pinned to core A that will unblock task A2
- Register an interrupt on core A that will unblock task A2 - Register an interrupt on core A that will unblock task A2
- Have A2 block - Have A2 block
- Have B1 create B2
- B2 checks it is running on core B and then blocks on core B
- Have B1 disable scheduling on core B. A1 checks that scheduling is still enabled on core A - Have B1 disable scheduling on core B. A1 checks that scheduling is still enabled on core A
- B2 checks that it is running on core A after scheduling is disabled on core B
- Have A1 unblock A2 - Have A1 unblock A2
- Have the core A ISR unblock A2 - Have the core A ISR unblock A2
- Cleanup the tasks - Cleanup the tasks
@ -339,16 +344,38 @@ static void test_unblk_a1_task(void *arg)
vTaskSuspend(NULL); vTaskSuspend(NULL);
} }
static void test_unpinned_b2_task(void *arg)
{
BaseType_t *xCoreID = (BaseType_t *)arg;
// Verify that the task is running on the core B
TEST_ASSERT_EQUAL((*xCoreID), xPortGetCoreID());
// Wait to be notified by B1 after the scheduler is suspended on core B
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Verify that the task is running on core A after core B's scheduler is suspended
TEST_ASSERT_EQUAL(!(*xCoreID), xPortGetCoreID());
// Wait to be deleted
vTaskSuspend(NULL);
}
static void test_unblk_b1_task(void *arg) static void test_unblk_b1_task(void *arg)
{ {
// Wait to be started by A1 // Wait to be started by A1
ulTaskNotifyTake(pdTRUE, portMAX_DELAY); ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// Create unpinned task B2 to block on this core (i.e., core B). Task should run immediately as it has a higher priority.
TaskHandle_t b2_task_hdl;
BaseType_t xCoreID = xPortGetCoreID();
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_unpinned_b2_task, "B2", 8192, (void *)&xCoreID, UNITY_FREERTOS_PRIORITY + 2, &b2_task_hdl, tskNO_AFFINITY));
// Check scheduler is running on core B // Check scheduler is running on core B
TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState()); TEST_ASSERT_EQUAL(taskSCHEDULER_RUNNING, xTaskGetSchedulerState());
// Suspend the scheduler on core B // Suspend the scheduler on core B
vTaskSuspendAll(); vTaskSuspendAll();
TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState()); TEST_ASSERT_EQUAL(taskSCHEDULER_SUSPENDED, xTaskGetSchedulerState());
// Unblock B2. The unpinned task is free to run on the other core
xTaskNotifyGive(b2_task_hdl);
// Indicate to A1 that core B scheduler has been suspended // Indicate to A1 that core B scheduler has been suspended
test_unblk_sync = 1; test_unblk_sync = 1;
@ -361,6 +388,9 @@ static void test_unblk_b1_task(void *arg)
// Indicate to A1 that core B scheduler has been resumed // Indicate to A1 that core B scheduler has been resumed
test_unblk_sync = 3; test_unblk_sync = 3;
// Cleanup B2
vTaskDelete(b2_task_hdl);
// Indicate done and wait to be deleted // Indicate done and wait to be deleted
xSemaphoreGive(test_unblk_done_sem); xSemaphoreGive(test_unblk_done_sem);
vTaskSuspend(NULL); vTaskSuspend(NULL);
@ -399,7 +429,8 @@ Test xTaskResumeAll() resumes pended tasks on the current core
Purpose: Purpose:
- When the scheduler is suspended on a particular core, test that tasks unblocked by an ISR on that core will place - When the scheduler is suspended on a particular core, test that tasks unblocked by an ISR on that core will place
those tasks on the core's pending ready list (regardless of the task's affinity). those tasks on the core's pending ready list if the tasks are pinned to the core with the suspended scheduler.
Tasks which have affinity to the other core must be resumed.
- When the scheduler is resumed on a particular core, test that the tasks on core's pending ready list will be - When the scheduler is resumed on a particular core, test that the tasks on core's pending ready list will be
scheduled. scheduled.
@ -471,11 +502,16 @@ static void test_pended_running_task(void *arg)
trigger_intr_cb(); trigger_intr_cb();
esp_rom_delay_us(2000); // Short busy delay to ensure interrupt has triggered esp_rom_delay_us(2000); // Short busy delay to ensure interrupt has triggered
// Check that all tasks are unblocked (but should not have run since the scheduler is suspend) // Check that tasks which have affinity to the current core are blocked and have not run as the scheduler is suspended.
// While tasks which do not have affinity to the current core are unblocked.
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) { for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
// Note: We use eBlocked instead of eReady due to a bug in eTaskGetState(). See (IDF-5543) // Note: We use eBlocked instead of eReady due to a bug in eTaskGetState(). See (IDF-5543)
if (xTaskGetAffinity(blkd_tsks[i]) == xPortGetCoreID()) {
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blkd_tsks[i])); TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blkd_tsks[i]));
TEST_ASSERT_EQUAL(false, has_run[i]); TEST_ASSERT_EQUAL(false, has_run[i]);
} else {
TEST_ASSERT_NOT_EQUAL(eBlocked, eTaskGetState(blkd_tsks[i]));
}
} }
// Resume the scheduler on the current core to schedule the unblocked tasks // Resume the scheduler on the current core to schedule the unblocked tasks
@ -511,4 +547,119 @@ TEST_CASE("Test xTaskResumeAll resumes pended tasks", "[freertos]")
// Add a short delay to allow the idle task to free any remaining task memory // Add a short delay to allow the idle task to free any remaining task memory
vTaskDelay(10); vTaskDelay(10);
} }
/* ---------------------------------------------------------------------------------------------------------------------
Test xTaskSuspendAll on both cores pends all tasks and xTaskResumeAll on both cores resumes all tasks
Purpose:
- When the scheduler is suspended on both cores, test that tasks unblocked by an ISR on a core would place the
those tasks on the core's pending ready list.
- When the scheduler is resumed on both cores, test that each core will schedule the
tasks from their respective pending ready lists.
Procedure:
- Create some blocking tasks pinned on both cores
- Create a task which suspends the scheduler on the other core
- Suspend the scheduler respectively on both cores
- Unblock pinned tasks on both cores once the scheduler is suspended
- Test that unblocked tasks are not scheduled
- Resume the scheduler respectively on both cores
- Test that unblocked tasks are now scheduled
- Cleanup
Expected:
- When the ISR unblocks the blocked tasks, the task's state should be blocked
- When the scheduler is resumed, the tasks should be scheduled and run without issue
--------------------------------------------------------------------------------------------------------------------- */
#if !CONFIG_FREERTOS_UNICORE
TaskHandle_t blkd_tasks[TEST_PENDED_NUM_BLOCKED_TASKS];
SemaphoreHandle_t done_sem;
static void test_susp_task(void *arg)
{
bool *has_run = (bool *)arg;
// Suspend the scheduler on this core
vTaskSuspendAll();
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
if ((i % portNUM_PROCESSORS) == xPortGetCoreID()) {
// Unblock the blocked tasks pinned to this core.
// We use the FromISR() call to create an ISR scenario and to force the unblocked task to be placed
// on the pending ready list
BaseType_t yield = pdFALSE;
vTaskNotifyGiveFromISR(blkd_tasks[i], &yield);
// The unblocked task must still be blocked and must not have run
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blkd_tasks[i]));
TEST_ASSERT_EQUAL(false, has_run[i]);
}
}
// Resume the scheduler on this core
xTaskResumeAll();
// Signal test completion
xSemaphoreGive(done_sem);
// Wait for cleanup
vTaskSuspend(NULL);
}
TEST_CASE("Test xTaskSuspendAll on all cores pends all tasks and xTaskResumeAll on all cores resumes all tasks", "[freertos]")
{
volatile bool has_run[TEST_PENDED_NUM_BLOCKED_TASKS];
done_sem = xSemaphoreCreateBinary();
// Creat blocked tasks pinned to each core
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
has_run[i] = false;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_pended_blkd_task, "blkd", 4096, (void *)&has_run[i], UNITY_FREERTOS_PRIORITY + 2, &blkd_tasks[i], i % portNUM_PROCESSORS));
}
vTaskDelay(10);
// Create pinned task on the other core which will suspend its scheduler
TaskHandle_t susp_task;
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(test_susp_task, "susp_task", 2048, (void *)has_run, UNITY_FREERTOS_PRIORITY, &susp_task, !xPortGetCoreID()));
// Suspend the scheduler on this core
vTaskSuspendAll();
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
if ((i % portNUM_PROCESSORS) == xPortGetCoreID()) {
// Unblock the blocked tasks pinned to this core.
// We use the FromISR() call to create an ISR scenario and to force the unblocked task to be placed
// on the pending ready list
BaseType_t yield = pdFALSE;
vTaskNotifyGiveFromISR(blkd_tasks[i], &yield);
// The unblocked task must still be blocked and must not have run
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blkd_tasks[i]));
TEST_ASSERT_EQUAL(false, has_run[i]);
}
}
// Resume scheduler on this core
xTaskResumeAll();
// Wait for test completion
xSemaphoreTake(done_sem, portMAX_DELAY);
// Verify that all blocked tasks have resumed and run when the schdulers are resumed on both cores
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
TEST_ASSERT_NOT_EQUAL(eBlocked, eTaskGetState(blkd_tasks[i]));
TEST_ASSERT_EQUAL(true, has_run[i]);
}
// Cleanup
for (int i = 0; i < TEST_PENDED_NUM_BLOCKED_TASKS; i++) {
vTaskDelete(blkd_tasks[i]);
}
vTaskDelete(susp_task);
vSemaphoreDelete(done_sem);
}
#endif // !CONFIG_FREERTOS_UNICORE
#endif // !CONFIG_FREERTOS_SMP #endif // !CONFIG_FREERTOS_SMP

View File

@ -295,7 +295,8 @@ In ESP-IDF FreeRTOS, suspending the scheduler across multiple cores is not possi
- Task switching is disabled only on core A but interrupts for core A are left enabled - Task switching is disabled only on core A but interrupts for core A are left enabled
- Calling any blocking/yielding function on core A is forbidden. Time slicing is disabled on core A. - Calling any blocking/yielding function on core A is forbidden. Time slicing is disabled on core A.
- If an interrupt on core A unblocks any tasks, those tasks will go into core A's own pending ready task list - If an interrupt on core A unblocks any tasks, tasks with affinity to core A will go into core A's own pending ready task list. Unpinned tasks or tasks with affinity to other cores can be scheduled on cores with the scheduler running.
- In case the scheduler is suspended on all cores, tasks unblocked by an interrupt will go to the pending ready task lists of their pinned cores or to the pending ready list of the core on which the interrupt is called if the tasks are unpinned.
- If core A is CPU0, the tick count is frozen and a pended tick count is incremented instead. However, the tick interrupt will still occur in order to execute the application tick hook. - If core A is CPU0, the tick count is frozen and a pended tick count is incremented instead. However, the tick interrupt will still occur in order to execute the application tick hook.
When :cpp:func:`xTaskResumeAll` is called on a particular core (e.g., core A): When :cpp:func:`xTaskResumeAll` is called on a particular core (e.g., core A):