mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'bugfix/freertos_task_delete_v4.3' into 'release/v4.3'
freertos: Fix race condition using vTaskDelete() cross-core causing resource leak (v4.3) See merge request espressif/esp-idf!13142
This commit is contained in:
commit
2335e5026f
@ -1319,7 +1319,10 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
uxTaskNumber++;
|
||||
|
||||
if( pxTCB == curTCB ||
|
||||
/* in SMP, we also can't immediately delete the task active on the other core */
|
||||
(portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core ]) ||
|
||||
/* ... and we can't delete a non-running task pinned to the other core, as
|
||||
FPU cleanup has to happen on the same core */
|
||||
(portNUM_PROCESSORS > 1 && pxTCB->xCoreID == (!core)) )
|
||||
{
|
||||
/* A task is deleting itself. This cannot complete within the
|
||||
@ -1340,6 +1343,21 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
hence xYieldPending is used to latch that a context switch is
|
||||
required. */
|
||||
portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[core] );
|
||||
|
||||
if (portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core ])
|
||||
{
|
||||
/* SMP case of deleting a task running on a different core. Same issue
|
||||
as a task deleting itself, but we need to send a yield to this task now
|
||||
before we release xTaskQueueMutex.
|
||||
|
||||
Specifically there is a case where the other core may already be spinning on
|
||||
xTaskQueueMutex waiting to go into a blocked state. A check is added in
|
||||
prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
|
||||
xTasksWaitingTermination list in this case (instead it will immediately
|
||||
release xTaskQueueMutex again and be yielded before the FreeRTOS function
|
||||
returns.) */
|
||||
vPortYieldOtherCore( !core );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1372,25 +1390,6 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else if ( portNUM_PROCESSORS > 1 )
|
||||
{
|
||||
/* Check if the deleted task is currently running on any other core
|
||||
and force a yield to take it off.
|
||||
|
||||
(this includes re-checking the core that curTCB was previously
|
||||
running on, in case curTCB has migrated to a different core.)
|
||||
*/
|
||||
taskENTER_CRITICAL( &xTaskQueueMutex );
|
||||
for(BaseType_t i = 0; i < portNUM_PROCESSORS; i++)
|
||||
{
|
||||
if(pxTCB == pxCurrentTCB[ i ] )
|
||||
{
|
||||
vPortYieldOtherCore( i );
|
||||
break;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL( &xTaskQueueMutex );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
@ -5693,6 +5692,13 @@ static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const T
|
||||
TickType_t xTimeToWake;
|
||||
const TickType_t xConstTickCount = xTickCount;
|
||||
|
||||
if (portNUM_PROCESSORS > 1 && listIS_CONTAINED_WITHIN(&xTasksWaitingTermination, &( pxCurrentTCB[xCoreID]->xStateListItem))) {
|
||||
/* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex,
|
||||
so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will
|
||||
be suspended permanently */
|
||||
return;
|
||||
}
|
||||
|
||||
#if( INCLUDE_xTaskAbortDelay == 1 )
|
||||
{
|
||||
/* About to enter a delayed list, so ensure the ucDelayAborted flag is
|
||||
|
@ -2,20 +2,19 @@
|
||||
* Test backported deletion behavior by creating tasks of various affinities and
|
||||
* check if the task memory is freed immediately under the correct conditions.
|
||||
*
|
||||
* The behavior of vTaskDelete() has been backported form FreeRTOS v9.0.0. This
|
||||
* results in the immediate freeing of task memory and the immediate execution
|
||||
* of deletion callbacks under the following conditions...
|
||||
* - When deleting a task that is not currently running on either core
|
||||
* - When deleting a task that is pinned to the same core (with respect to
|
||||
* the core that calls vTaskDelete()
|
||||
* The behavior of vTaskDelete() results in the immediate freeing of task memory
|
||||
* and the immediate execution of deletion callbacks for tasks which are not
|
||||
* running, provided they are not pinned to the other core (due to FPU cleanup
|
||||
* requirements).
|
||||
*
|
||||
* If the two conditions are not met, freeing of task memory and execution of
|
||||
* If the condition is not met, freeing of task memory and execution of
|
||||
* deletion callbacks will still be carried out by the Idle Task.
|
||||
*/
|
||||
#include <stdio.h>
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "esp_heap_caps.h"
|
||||
|
||||
#include "unity.h"
|
||||
@ -84,3 +83,71 @@ TEST_CASE("FreeRTOS Delete Tasks", "[freertos]")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
typedef struct {
|
||||
SemaphoreHandle_t sem;
|
||||
volatile bool deleted; // Check the deleted task doesn't keep running after being deleted
|
||||
} tsk_blocks_param_t;
|
||||
|
||||
/* Task blocks as often as possible
|
||||
(two or more of these can share the same semaphore and "juggle" it around)
|
||||
*/
|
||||
static void tsk_blocks_frequently(void *param)
|
||||
{
|
||||
tsk_blocks_param_t *p = (tsk_blocks_param_t *)param;
|
||||
SemaphoreHandle_t sem = p->sem;
|
||||
srand(xTaskGetTickCount() ^ (int)xTaskGetCurrentTaskHandle());
|
||||
while (1) {
|
||||
assert(!p->deleted);
|
||||
esp_rom_delay_us(rand() % 10);
|
||||
assert(!p->deleted);
|
||||
xSemaphoreTake(sem, portMAX_DELAY);
|
||||
assert(!p->deleted);
|
||||
esp_rom_delay_us(rand() % 10);
|
||||
assert(!p->deleted);
|
||||
xSemaphoreGive(sem);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("FreeRTOS Delete Blocked Tasks", "[freertos]")
|
||||
{
|
||||
TaskHandle_t blocking_tasks[portNUM_PROCESSORS + 1]; // one per CPU, plus one unpinned task
|
||||
tsk_blocks_param_t params[portNUM_PROCESSORS + 1] = { 0 };
|
||||
|
||||
unsigned before = heap_caps_get_free_size(MALLOC_CAP_8BIT);
|
||||
printf("Free memory at start %u\n", before);
|
||||
|
||||
/* Any bugs will depend on relative timing of destroying the tasks, so create & delete many times.
|
||||
|
||||
Stop early if it looks like some resources have not been properly cleaned up.
|
||||
|
||||
(1000 iterations takes about 9 seconds on ESP32 dual core)
|
||||
*/
|
||||
for(unsigned iter = 0; iter < 1000; iter++) {
|
||||
// Create everything
|
||||
SemaphoreHandle_t sem = xSemaphoreCreateMutex();
|
||||
for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
|
||||
params[i].deleted = false;
|
||||
params[i].sem = sem;
|
||||
|
||||
TEST_ASSERT_EQUAL(pdTRUE,
|
||||
xTaskCreatePinnedToCore(tsk_blocks_frequently, "tsk_block", 4096, ¶ms[i],
|
||||
UNITY_FREERTOS_PRIORITY - 1, &blocking_tasks[i],
|
||||
i < portNUM_PROCESSORS ? i : tskNO_AFFINITY));
|
||||
}
|
||||
|
||||
vTaskDelay(5); // Let the tasks juggle the mutex for a bit
|
||||
|
||||
for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
|
||||
vTaskDelete(blocking_tasks[i]);
|
||||
params[i].deleted = true;
|
||||
}
|
||||
vTaskDelay(4); // Yield to the idle task for cleanup
|
||||
|
||||
vSemaphoreDelete(sem);
|
||||
|
||||
// Check we haven't leaked resources yet
|
||||
TEST_ASSERT_GREATER_OR_EQUAL(before - 256, heap_caps_get_free_size(MALLOC_CAP_8BIT));
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user