freertos: Fix event group task list race condition

FreeRTOS synchronization primitives (e.g., queues, eventgroups) use various event lists (i.e., task lists) to track what
tasks are blocked on a current primitive. Usually these event lists are accessed via one of the event lists functions
(such as vTask[PlaceOn|RemoveFrom]UnorderedEventList()), which in turn ensure that the global task list spinlock
(xTaskQueueMutex) is taken when accessing these lists.

However, some functions in event_groups.c manually traverse their event lists. Thus if a tick interrupt occurs on
another core during traversal and that tick interrupt unblocks a task on the event list being traversed, the event list
will be corrupted.

This commit modifies the following event_groups.c functions so that they take the global task list lock before
traversing their event list.

- xEventGroupSetBits()
- vEventGroupDelete()
This commit is contained in:
Darian Leung 2022-07-19 12:13:54 +08:00
parent 0bb4b995be
commit 6358f93a51
3 changed files with 42 additions and 0 deletions

View File

@ -565,6 +565,10 @@ BaseType_t xMatchFound = pdFALSE;
vTaskSuspendAll();
taskENTER_CRITICAL(&pxEventBits->eventGroupMux);
/* The critical section above only takes the event groups spinlock. However, we are about to traverse a task list.
Thus we need call the function below to take the task list spinlock located in tasks.c. Not doing so will risk
the task list's being changed while be are traversing it. */
vTaskTakeEventListLock();
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
@ -636,6 +640,8 @@ BaseType_t xMatchFound = pdFALSE;
bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
/* Release the previously held task list spinlock, then release the event group spinlock. */
vTaskReleaseEventListLock();
taskEXIT_CRITICAL(&pxEventBits->eventGroupMux);
( void ) xTaskResumeAll();
@ -650,6 +656,10 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
vTaskSuspendAll();
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
/* The critical section above only takes the event groups spinlock. However, we are about to traverse a task list.
Thus we need call the function below to take the task list spinlock located in tasks.c. Not doing so will risk
the task list's being changed while be are traversing it. */
vTaskTakeEventListLock();
{
traceEVENT_GROUP_DELETE( xEventGroup );
@ -661,6 +671,9 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
( void ) xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
}
/* Release the previously held task list spinlock. */
vTaskReleaseEventListLock();
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
/* The event group can only have been allocated dynamically - free

View File

@ -2152,6 +2152,23 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xIte
*/
void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
/*
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
*
* This function is a wrapper to take the "xTaskQueueMutex" spinlock of tasks.c.
* This lock is taken whenver any of the task lists or event lists are
* accessed/modified, such as when adding/removing tasks to/from the delayed
* task list or various event lists.
*
* This functions is meant to be called by xEventGroupSetBits() and
* vEventGroupDelete() as both those functions will access event lists (instead
* of delegating the entire responsibility to one of vTask...EventList()
* functions).
*/
void vTaskTakeEventListLock( void );
void vTaskReleaseEventListLock( void );
/*
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.

View File

@ -3165,6 +3165,18 @@ UBaseType_t i, uxTargetCPU;
}
/*-----------------------------------------------------------*/
void vTaskTakeEventListLock( void )
{
/* We call the tasks.c critical section macro to take xTaskQueueMutex */
taskENTER_CRITICAL(&xTaskQueueMutex);
}
void vTaskReleaseEventListLock( void )
{
/* We call the tasks.c critical section macro to release xTaskQueueMutex */
taskEXIT_CRITICAL(&xTaskQueueMutex);
}
BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
{
TCB_t *pxUnblockedTCB;