mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
freertos: use xTaskQueueMutex to protect tick count
Having two different spinlocks is problematic due to possibly different order in which the locks will be taken. Changing the order would require significant restructuring of kernel code which is undesirable. An additional place where taking xTickCountMutex was needed was in vApplicationSleep function. Not taking xTickCountMutex resulted in other CPU sometimes possibly advancing tick count while light sleep entry/exit was happening. Taking xTickCountMutex in addition to xTaskQueueMutex has shown a problem that in different code paths, these two spinlocks could be taken in different order, leading to (unlikely, but possible) deadlocks.
This commit is contained in:
parent
41caedd0da
commit
845bbc293a
@ -302,10 +302,8 @@ when the scheduler is unsuspended. The pending ready list itself can only be
|
||||
accessed from a critical section. */
|
||||
PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ portNUM_PROCESSORS ] = { ( UBaseType_t ) pdFALSE };
|
||||
|
||||
/* For now, we use just one mux for all the critical sections. ToDo: give everything a bit more granularity;
|
||||
that could improve performance by not needlessly spinning in spinlocks for unrelated resources. */
|
||||
/* We use just one spinlock for all the critical sections. */
|
||||
PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
|
||||
PRIVILEGED_DATA static portMUX_TYPE xTickCountMutex = portMUX_INITIALIZER_UNLOCKED;
|
||||
|
||||
#if ( configGENERATE_RUN_TIME_STATS == 1 )
|
||||
|
||||
@ -1347,9 +1345,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
{
|
||||
/* Minor optimisation. The tick count cannot change in this
|
||||
block. */
|
||||
// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
||||
const TickType_t xConstTickCount = xTickCount;
|
||||
// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
||||
|
||||
/* Generate the tick time at which the task wants to wake. */
|
||||
xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
|
||||
@ -1456,9 +1452,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
|
||||
/* Calculate the time to wake - this may overflow but this is
|
||||
not a problem. */
|
||||
// portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
||||
xTimeToWake = xTickCount + xTicksToDelay;
|
||||
// portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
||||
|
||||
/* We must remove ourselves from the ready list before adding
|
||||
ourselves to the blocked list as the same list item is used for
|
||||
@ -2198,9 +2192,7 @@ void vTaskSuspendAll( void )
|
||||
}
|
||||
else
|
||||
{
|
||||
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
||||
xReturn = xNextTaskUnblockTime - xTickCount;
|
||||
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
||||
}
|
||||
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||
|
||||
@ -2306,31 +2298,13 @@ BaseType_t xAlreadyYielded = pdFALSE;
|
||||
|
||||
TickType_t xTaskGetTickCount( void )
|
||||
{
|
||||
TickType_t xTicks;
|
||||
|
||||
/* Critical section required if running on a 16 bit processor. */
|
||||
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
||||
{
|
||||
xTicks = xTickCount;
|
||||
}
|
||||
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
||||
|
||||
return xTicks;
|
||||
return xTickCount;
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
TickType_t xTaskGetTickCountFromISR( void )
|
||||
{
|
||||
TickType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL_ISR(&xTickCountMutex);
|
||||
{
|
||||
xReturn = xTickCount;
|
||||
// vPortCPUReleaseMutex( &xTickCountMutex );
|
||||
}
|
||||
taskEXIT_CRITICAL_ISR(&xTickCountMutex);
|
||||
|
||||
return xReturn;
|
||||
return xTickCount;
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
@ -2465,10 +2439,10 @@ implementations require configUSE_TICKLESS_IDLE to be set to a value other than
|
||||
/* Correct the tick count value after a period during which the tick
|
||||
was suppressed. Note this does *not* call the tick hook function for
|
||||
each stepped tick. */
|
||||
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
||||
portENTER_CRITICAL( &xTaskQueueMutex );
|
||||
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
|
||||
xTickCount += xTicksToJump;
|
||||
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
||||
portEXIT_CRITICAL( &xTaskQueueMutex );
|
||||
traceINCREASE_TICK_COUNT( xTicksToJump );
|
||||
}
|
||||
|
||||
@ -2508,14 +2482,10 @@ BaseType_t xSwitchRequired = pdFALSE;
|
||||
|
||||
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
|
||||
{
|
||||
portTICK_TYPE_ENTER_CRITICAL( &xTickCountMutex );
|
||||
taskENTER_CRITICAL_ISR( &xTaskQueueMutex );
|
||||
/* Increment the RTOS tick, switching the delayed and overflowed
|
||||
delayed lists if it wraps to 0. */
|
||||
++xTickCount;
|
||||
portTICK_TYPE_EXIT_CRITICAL( &xTickCountMutex );
|
||||
|
||||
//The other CPU may decide to mess with the task queues, so this needs a mux.
|
||||
taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
|
||||
{
|
||||
/* Minor optimisation. The tick count cannot change in this
|
||||
block. */
|
||||
@ -3280,7 +3250,7 @@ BaseType_t xReturn;
|
||||
configASSERT( pxTimeOut );
|
||||
configASSERT( pxTicksToWait );
|
||||
|
||||
taskENTER_CRITICAL(&xTickCountMutex);
|
||||
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||
{
|
||||
/* Minor optimisation. The tick count cannot change in this block. */
|
||||
const TickType_t xConstTickCount = xTickCount;
|
||||
@ -3316,7 +3286,7 @@ BaseType_t xReturn;
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL(&xTickCountMutex);
|
||||
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -4077,7 +4047,7 @@ TCB_t *pxTCB;
|
||||
{
|
||||
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
|
||||
|
||||
taskENTER_CRITICAL(&xTickCountMutex);
|
||||
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||
/* If the mutex was given back by an interrupt while the queue was
|
||||
locked then the mutex holder might now be NULL. */
|
||||
if( pxMutexHolder != NULL )
|
||||
@ -4134,7 +4104,7 @@ TCB_t *pxTCB;
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL(&xTickCountMutex);
|
||||
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||
|
||||
}
|
||||
|
||||
@ -4147,7 +4117,7 @@ TCB_t *pxTCB;
|
||||
{
|
||||
TCB_t * const pxTCB = ( TCB_t * ) pxMutexHolder;
|
||||
BaseType_t xReturn = pdFALSE;
|
||||
taskENTER_CRITICAL(&xTickCountMutex);
|
||||
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||
|
||||
if( pxMutexHolder != NULL )
|
||||
{
|
||||
@ -4211,7 +4181,7 @@ TCB_t *pxTCB;
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL(&xTickCountMutex);
|
||||
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||
return xReturn;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user