freertos(IDF): Use common macros for SMP specific critical sections

In IDF FreeRTOS, when building for SMP, there are numerous functions
which require different critical sections when compared to single-core. This
commit encapsulates those difference into a common set of macros whose
behavior depends on "configNUM_CORES > 1". As such...

- Vanilla behavior has been restored for some functions when building for
  single core (i.e., used to call taskENTER_CRITICAL, now disables interrupts
  mactching vanilla behavior).
- Reduces number of "#ifdef (configNUM_CORES > 1)" in functions
- Any SMP only critical sections are now wrapped by
  "#ifdef (configNUM_CORES > 1)" and properly documented via comments.
This commit is contained in:
Darian Leung 2022-11-14 15:27:00 +08:00
parent 4c1ff6016a
commit 087e4318a6
7 changed files with 372 additions and 367 deletions

View File

@ -80,9 +80,7 @@ typedef struct EventGroupDef_t
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
#endif #endif
#ifdef ESP_PLATFORM portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
#endif // ESP_PLATFORM
} EventGroup_t; } EventGroup_t;
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -225,11 +223,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
} }
#endif #endif
#ifdef ESP_PLATFORM /* IDF-3755 */ prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
uxOriginalBitValue = pxEventBits->uxEventBits; uxOriginalBitValue = pxEventBits->uxEventBits;
@ -272,12 +266,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
} }
} }
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
xAlreadyYielded = pdFALSE;
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
@ -361,11 +350,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
} }
#endif #endif
#ifdef ESP_PLATFORM /* IDF-3755 */ prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
@ -433,12 +418,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
} }
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
xAlreadyYielded = pdFALSE;
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
@ -581,15 +561,14 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
pxList = &( pxEventBits->xTasksWaitingForBits ); pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
#ifdef ESP_PLATFORM /* IDF-3755 */
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
#if ( configNUM_CORES > 1 )
/* We are about to traverse a task list which is a kernel data structure. /* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */ * Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
vTaskTakeKernelLock(); vTaskTakeKernelLock();
#else #endif /* configNUM_CORES > 1 */
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
@ -661,13 +640,11 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
* bit was set in the control word. */ * bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear; pxEventBits->uxEventBits &= ~uxBitsToClear;
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock, then release the event group spinlock. */ /* Release the previously taken kernel lock. */
vTaskReleaseKernelLock(); vTaskReleaseKernelLock();
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); #endif /* configNUM_CORES > 1 */
#else ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
return pxEventBits->uxEventBits; return pxEventBits->uxEventBits;
} }
@ -678,18 +655,16 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
EventGroup_t * pxEventBits = xEventGroup; EventGroup_t * pxEventBits = xEventGroup;
const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
#if ( configNUM_CORES > 1 )
/* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
vTaskTakeKernelLock();
#endif /* configNUM_CORES > 1 */
{ {
traceEVENT_GROUP_DELETE( xEventGroup ); traceEVENT_GROUP_DELETE( xEventGroup );
/* IDF-3755 */
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#ifdef ESP_PLATFORM
/* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
vTaskTakeKernelLock();
#endif
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 ) while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{ {
/* Unblock the task, returning 0 as the event list is being deleted /* Unblock the task, returning 0 as the event list is being deleted
@ -697,34 +672,33 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) ); configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
} }
}
#if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
vTaskReleaseKernelLock();
#endif /* configNUM_CORES > 1 */
prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
#ifdef ESP_PLATFORM #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
/* Release the previously taken kernel lock. */ {
vTaskReleaseKernelLock(); /* The event group can only have been allocated dynamically - free
#endif * it again. */
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); vPortFree( pxEventBits );
}
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{
/* The event group could have been allocated statically or
* dynamically, so check before attempting to free the memory. */
if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{ {
/* The event group can only have been allocated dynamically - free
* it again. */
vPortFree( pxEventBits ); vPortFree( pxEventBits );
} }
#elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) else
{ {
/* The event group could have been allocated statically or mtCOVERAGE_TEST_MARKER();
* dynamically, so check before attempting to free the memory. */
if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
{
vPortFree( pxEventBits );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
} }
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ }
} #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/

View File

@ -3407,6 +3407,32 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) PRIVILEGED_FUNCTION;
*----------------------------------------------------------*/ *----------------------------------------------------------*/
/** @cond !DOC_EXCLUDE_HEADER_SECTION */ /** @cond !DOC_EXCLUDE_HEADER_SECTION */
/*
* Various convenience macros for critical sections and scheduler suspension
* called by other FreeRTOS sources and not meant to be called by the
* application. The behavior of each macro depends on whether FreeRTOS is
* currently configured for SMP or single core.
*/
#if ( configNUM_CORES > 1 )
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) taskENTER_CRITICAL( ( x ) )
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) ( { taskEXIT_CRITICAL( ( x ) ); pdFALSE; } )
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
taskENTER_CRITICAL_ISR( ( pxLock ) ); \
( void ) ( uxInterruptStatus );
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
taskEXIT_CRITICAL_ISR( ( pxLock ) ); \
( void ) ( uxInterruptStatus );
#else /* configNUM_CORES > 1 */
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) ( { vTaskSuspendAll(); ( void ) ( x ); } )
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) xTaskResumeAll()
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
( uxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \
( void ) ( pxLock );
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxSavedInterruptStatus ) ); \
( void ) ( pxLock );
#endif /* configNUM_CORES > 1 */
/* /*
* Return the handle of the task running on a certain CPU. Because of * Return the handle of the task running on a certain CPU. Because of
* the nature of SMP processing, there is no guarantee that this * the nature of SMP processing, there is no guarantee that this
@ -3519,6 +3545,8 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
TickType_t xTicksToWait, TickType_t xTicksToWait,
const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION;
#if ( configNUM_CORES > 1 )
/* /*
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
@ -3533,8 +3561,9 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
* of delegating the entire responsibility to one of vTask...EventList() * of delegating the entire responsibility to one of vTask...EventList()
* functions). * functions).
*/ */
void vTaskTakeKernelLock( void ); void vTaskTakeKernelLock( void );
void vTaskReleaseKernelLock( void ); void vTaskReleaseKernelLock( void );
#endif /* configNUM_CORES > 1 */
/* /*
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN

View File

@ -447,6 +447,13 @@ FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void);
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR() #define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
/**
* @brief Used by FreeRTOS functions to call the correct version of critical section API
*/
#if ( configNUM_CORES > 1 )
#define portCHECK_IF_IN_ISR() xPortInIsrContext()
#endif
// ------------------ Critical Sections -------------------- // ------------------ Critical Sections --------------------
/** /**

View File

@ -1095,12 +1095,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
* read, instead return a flag to say whether a context switch is required or * read, instead return a flag to say whether a context switch is required or
* not (i.e. has a task with a higher priority than us been woken by this * not (i.e. has a task with a higher priority than us been woken by this
* post). */ * post). */
#if ( configNUM_CORES > 1 ) prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
( void ) uxSavedInterruptStatus;
#else
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
{ {
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{ {
@ -1236,11 +1231,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
} }
#if ( configNUM_CORES > 1 ) prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@ -1286,12 +1277,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
#if ( configNUM_CORES > 1 ) prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
( void ) uxSavedInterruptStatus;
#else
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1422,11 +1408,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
} }
#if ( configNUM_CORES > 1 ) prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@ -2094,12 +2076,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
#if ( configNUM_CORES > 1 ) prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
( void ) uxSavedInterruptStatus;
#else
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -2170,12 +2147,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
} }
} }
#if ( configNUM_CORES > 1 ) prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@ -2209,12 +2181,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
#if ( configNUM_CORES > 1 ) prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
( void ) uxSavedInterruptStatus;
#else
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
{ {
/* Cannot block in an ISR, so check there is data available. */ /* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
@ -2235,12 +2202,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
} }
} }
#if ( configNUM_CORES > 1 ) prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@ -3269,8 +3231,21 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
configASSERT( pxQueueSetContainer ); configASSERT( pxQueueSetContainer );
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
/* We need to also acquire the queue set's spinlock as well. */ #if ( configNUM_CORES > 1 )
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
/* In SMP, queue sets have their own spinlock. Thus we need to also
* acquire the queue set's spinlock before accessing it. This
* function can also be called from an ISR context, so we need to
* check whether we are in an ISR. */
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
}
else
{
taskENTER_CRITICAL_ISR( &( pxQueueSetContainer->xQueueLock ) );
}
#endif /* configNUM_CORES > 1 */
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
{ {
@ -3321,8 +3296,17 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
/* Release the previously acquired queue set's spinlock. */ #if ( configNUM_CORES > 1 )
taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); /* Release the previously acquired queue set's spinlock. */
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
}
else
{
taskEXIT_CRITICAL_ISR( &( pxQueueSetContainer->xQueueLock ) );
}
#endif /* configNUM_CORES > 1 */
return xReturn; return xReturn;
} }

View File

@ -60,35 +60,19 @@
* or #defined the notification macros away, then provide default implementations * or #defined the notification macros away, then provide default implementations
* that uses task notifications. */ * that uses task notifications. */
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */ /*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
#ifndef sbRECEIVE_COMPLETED #ifndef sbRECEIVE_COMPLETED
#ifdef ESP_PLATFORM /* IDF-3775 */ #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \ { \
{ \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \
{ \ ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ ( uint32_t ) 0, \
( uint32_t ) 0, \ eNoAction ); \
eNoAction ); \ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \
} \ } \
} \ ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
#else /* ifdef ESP_PLATFORM */
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \
( uint32_t ) 0, \
eNoAction ); \
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \
} \
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
#endif /* sbRECEIVE_COMPLETED */ #endif /* sbRECEIVE_COMPLETED */
#ifndef sbRECEIVE_COMPLETED_FROM_ISR #ifndef sbRECEIVE_COMPLETED_FROM_ISR
@ -116,33 +100,18 @@
* or #defined the notification macro away, them provide a default implementation * or #defined the notification macro away, them provide a default implementation
* that uses task notifications. */ * that uses task notifications. */
#ifndef sbSEND_COMPLETED #ifndef sbSEND_COMPLETED
#ifdef ESP_PLATFORM /* IDF-3755 */ #define sbSEND_COMPLETED( pxStreamBuffer ) \
#define sbSEND_COMPLETED( pxStreamBuffer ) \ prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \ { \
{ \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \
{ \ ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ ( uint32_t ) 0, \
( uint32_t ) 0, \ eNoAction ); \
eNoAction ); \ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \
} \ } \
} \ ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
#else /* ifdef ESP_PLATFORM */
#define sbSEND_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \
( uint32_t ) 0, \
eNoAction ); \
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \
} \
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
#endif /* sbSEND_COMPLETED */ #endif /* sbSEND_COMPLETED */
#ifndef sbSEND_COMPLETE_FROM_ISR #ifndef sbSEND_COMPLETE_FROM_ISR
@ -309,7 +278,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
pucAllocatedMemory = NULL; pucAllocatedMemory = NULL;
} }
if( pucAllocatedMemory != NULL ) if( pucAllocatedMemory != NULL )
{ {
prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */

View File

@ -403,12 +403,9 @@ PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Poi
PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
PRIVILEGED_DATA static List_t xPendingReadyList[ configNUM_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ PRIVILEGED_DATA static List_t xPendingReadyList[ configNUM_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
#ifdef ESP_PLATFORM
/* Spinlock required for SMP critical sections. This lock protects all of the /* Spinlock required for SMP critical sections. This lock protects all of the
* kernel's data structures such as various tasks lists, flags, and tick counts. */ * kernel's data structures such as various tasks lists, flags, and tick counts. */
PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED; PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED;
#endif // ESP_PLATFORM
#if ( INCLUDE_vTaskDelete == 1 ) #if ( INCLUDE_vTaskDelete == 1 )
@ -1537,11 +1534,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
configASSERT( ( xTimeIncrement > 0U ) ); configASSERT( ( xTimeIncrement > 0U ) );
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED ); configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
#ifdef ESP_PLATFORM /* IDF-3755 */ prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
/* Minor optimisation. The tick count cannot change in this /* Minor optimisation. The tick count cannot change in this
* block. */ * block. */
@ -1597,12 +1590,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
taskEXIT_CRITICAL( &xKernelLock );
xAlreadyYielded = pdFALSE;
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
/* Force a reschedule if xTaskResumeAll has not already done so, we may /* Force a reschedule if xTaskResumeAll has not already done so, we may
* have put ourselves to sleep. */ * have put ourselves to sleep. */
@ -1631,11 +1619,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
if( xTicksToDelay > ( TickType_t ) 0U ) if( xTicksToDelay > ( TickType_t ) 0U )
{ {
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED ); configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
#ifdef ESP_PLATFORM /* IDF-3755 */ prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
traceTASK_DELAY(); traceTASK_DELAY();
@ -1648,12 +1632,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* executing task. */ * executing task. */
prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE ); prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
taskEXIT_CRITICAL( &xKernelLock );
xAlreadyYielded = pdFALSE;
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
} }
else else
{ {
@ -2836,11 +2815,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
/* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
#ifdef ESP_PLATFORM /* IDF-3755 */ prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
/* Search the ready lists. */ /* Search the ready lists. */
do do
@ -2886,11 +2861,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
} }
#endif #endif
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
return pxTCB; return pxTCB;
} }
@ -2906,11 +2877,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
{ {
UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
#ifdef ESP_PLATFORM /* IDF-3755 */ prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
/* Is there a space in the array for each task in the system? */ /* Is there a space in the array for each task in the system? */
if( uxArraySize >= uxCurrentNumberOfTasks ) if( uxArraySize >= uxCurrentNumberOfTasks )
@ -2969,11 +2936,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
return uxTask; return uxTask;
} }
@ -3008,10 +2971,12 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
void vTaskStepTick( const TickType_t xTicksToJump ) void vTaskStepTick( const TickType_t xTicksToJump )
{ {
#ifdef ESP_PLATFORM #if ( configNUM_CORES > 1 )
/* For SMP, we require a critical section to access xTickCount */
/* Although this is called with the scheduler suspended. For SMP, we
* still need to take the kernel lock to access xTickCount. */
taskENTER_CRITICAL( &xKernelLock ); taskENTER_CRITICAL( &xKernelLock );
#endif #endif /* configNUM_CORES > 1 */
/* Correct the tick count value after a period during which the tick /* Correct the tick count value after a period during which the tick
* was suppressed. Note this does *not* call the tick hook function for * was suppressed. Note this does *not* call the tick hook function for
@ -3019,9 +2984,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
xTickCount += xTicksToJump; xTickCount += xTicksToJump;
traceINCREASE_TICK_COUNT( xTicksToJump ); traceINCREASE_TICK_COUNT( xTicksToJump );
#ifdef ESP_PLATFORM
#if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock ); taskEXIT_CRITICAL( &xKernelLock );
#endif #endif /* configNUM_CORES > 1 */
} }
#endif /* configUSE_TICKLESS_IDLE */ #endif /* configUSE_TICKLESS_IDLE */
@ -3042,16 +3009,17 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
/* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */ * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
vTaskSuspendAll(); vTaskSuspendAll();
#ifdef ESP_PLATFORM #if ( configNUM_CORES > 1 )
/* For SMP, we still require a critical section to access xPendedTicks even /* Although the scheduler is suspended. For SMP, we still need to take
* if the scheduler is disabled. */ * the kernel lock to access xPendedTicks. */
taskENTER_CRITICAL( &xKernelLock ); taskENTER_CRITICAL( &xKernelLock );
xPendedTicks += xTicksToCatchUp; #endif /* configNUM_CORES > 1 */
xPendedTicks += xTicksToCatchUp;
#if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock ); taskEXIT_CRITICAL( &xKernelLock );
#else // ESP_PLATFORM #endif /* configNUM_CORES > 1 */
xPendedTicks += xTicksToCatchUp;
#endif // ESP_PLATFORM
xYieldOccurred = xTaskResumeAll(); xYieldOccurred = xTaskResumeAll();
return xYieldOccurred; return xYieldOccurred;
@ -3067,11 +3035,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
configASSERT( pxTCB ); configASSERT( pxTCB );
#ifdef ESP_PLATFORM /* IDF-3755 */ prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
/* A task can only be prematurely removed from the Blocked state if /* A task can only be prematurely removed from the Blocked state if
* it is actually in the Blocked state. */ * it is actually in the Blocked state. */
@ -3134,11 +3098,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
xReturn = pdFAIL; xReturn = pdFAIL;
} }
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
return xReturn; return xReturn;
} }
@ -3508,26 +3468,18 @@ BaseType_t xTaskIncrementTick( void )
{ {
TCB_t * pxTCB; TCB_t * pxTCB;
TaskHookFunction_t xReturn; TaskHookFunction_t xReturn;
UBaseType_t uxSavedInterruptStatus;
/* If xTask is NULL then set the calling task's hook. */ /* If xTask is NULL then set the calling task's hook. */
pxTCB = prvGetTCBFromHandle( xTask ); pxTCB = prvGetTCBFromHandle( xTask );
/* Save the hook function in the TCB. A critical section is required as /* Save the hook function in the TCB. A critical section is required as
* the value can be accessed from an interrupt. */ * the value can be accessed from an interrupt. */
#if ( configNUM_CORES > 1 ) prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
taskENTER_CRITICAL_ISR( &xKernelLock );
#else
UBaseType_t uxSavedInterruptStatus;
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
{ {
xReturn = pxTCB->pxTaskTag; xReturn = pxTCB->pxTaskTag;
} }
#if ( configNUM_CORES > 1 ) prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
taskEXIT_CRITICAL_ISR( &xKernelLock );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@ -3768,8 +3720,12 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
{ {
configASSERT( pxEventList ); configASSERT( pxEventList );
/* Take the kernel lock as we are about to access the task lists. */ #if ( configNUM_CORES > 1 )
taskENTER_CRITICAL( &xKernelLock );
/* In SMP, we need to take the kernel lock as we are about to access the
* task lists. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* configNUM_CORES > 1 */
/* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
* SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
@ -3782,7 +3738,10 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
taskEXIT_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock );
#endif /* configNUM_CORES > 1 */
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3792,14 +3751,18 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
{ {
configASSERT( pxEventList ); configASSERT( pxEventList );
/* Take the kernel lock as we are about to access the task lists. */ #if ( configNUM_CORES > 1 )
taskENTER_CRITICAL( &xKernelLock );
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by /* In SMP, the event groups haven't suspended the scheduler at this
* the event groups implementation. */ * point. We need to take the kernel lock instead as we are about to
* access the task lists. */
taskENTER_CRITICAL( &xKernelLock );
#else /* configNUM_CORES > 1 */
/* Note. We currently don't always suspend the scheduler. Todo: IDF-3755 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
* configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != 0 ); */ * the event groups implementation. */
configASSERT( uxSchedulerSuspended[ 0 ] != 0 );
#endif /* configNUM_CORES > 1 */
/* Store the item value in the event list item. It is safe to access the /* Store the item value in the event list item. It is safe to access the
* event list item here as interrupts won't access the event list item of a * event list item here as interrupts won't access the event list item of a
@ -3815,7 +3778,10 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
taskEXIT_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock );
#endif /* configNUM_CORES > 1 */
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3827,8 +3793,12 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
{ {
configASSERT( pxEventList ); configASSERT( pxEventList );
/* Take the kernel lock as we are about to access the task lists. */ #if ( configNUM_CORES > 1 )
taskENTER_CRITICAL( &xKernelLock );
/* In SMP, we need to take the kernel lock as we are about to access
* the task lists. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* configNUM_CORES > 1 */
/* This function should not be called by application code hence the /* This function should not be called by application code hence the
* 'Restricted' in its name. It is not part of the public API. It is * 'Restricted' in its name. It is not part of the public API. It is
@ -3853,7 +3823,10 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
taskEXIT_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock );
#endif /* configNUM_CORES > 1 */
} }
#endif /* configUSE_TIMERS */ #endif /* configUSE_TIMERS */
@ -3865,12 +3838,24 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
BaseType_t xReturn; BaseType_t xReturn;
/* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
* called from a critical section within an ISR. * called from a critical section within an ISR. */
*
* However, we still need to take the kernel lock as we are about to access #if ( configNUM_CORES > 1 )
* kernel data structures. Note that we use the ISR version of the macro as
* this function could be called from an ISR critical section. */ /* In SMP, we need to take the kernel lock (even if the caller is
taskENTER_CRITICAL_ISR( &xKernelLock ); * already in a critical section by taking a different lock) as we are
* about to access the task lists, which are protected by the kernel
* lock. This function can also be called from an ISR context, so we
* need to check whether we are in an ISR.*/
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskENTER_CRITICAL( &xKernelLock );
}
else
{
taskENTER_CRITICAL_ISR( &xKernelLock );
}
#endif /* configNUM_CORES > 1 */
{ {
/* Before taking the kernel lock, another task/ISR could have already /* Before taking the kernel lock, another task/ISR could have already
* emptied the pxEventList. So we insert a check here to see if * emptied the pxEventList. So we insert a check here to see if
@ -3965,13 +3950,23 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
taskEXIT_CRITICAL_ISR( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskEXIT_CRITICAL( &xKernelLock );
}
else
{
taskEXIT_CRITICAL_ISR( &xKernelLock );
}
#endif /* configNUM_CORES > 1 */
return xReturn; return xReturn;
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#ifdef ESP_PLATFORM #if ( configNUM_CORES > 1 )
void vTaskTakeKernelLock( void ) void vTaskTakeKernelLock( void )
{ {
/* We call the tasks.c critical section macro to take xKernelLock */ /* We call the tasks.c critical section macro to take xKernelLock */
@ -3983,7 +3978,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
/* We call the tasks.c critical section macro to release xKernelLock */ /* We call the tasks.c critical section macro to release xKernelLock */
taskEXIT_CRITICAL( &xKernelLock ); taskEXIT_CRITICAL( &xKernelLock );
} }
#endif // ESP_PLATFORM #endif /* configNUM_CORES > 1 */
void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
const TickType_t xItemValue ) const TickType_t xItemValue )
@ -3991,14 +3986,17 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
TCB_t * pxUnblockedTCB; TCB_t * pxUnblockedTCB;
BaseType_t xCurCoreID = xPortGetCoreID(); BaseType_t xCurCoreID = xPortGetCoreID();
/* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN. #if ( configNUM_CORES > 1 )
* It is used by the event flags implementation, thus those functions
* should call vTaskTakeKernelLock() before calling this function. */
/* /* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN.
* Todo: IDF-5785 * It is used by the event flags implementation, thus those functions
* configASSERT( uxSchedulerSuspended[ xCurCoreID ] != pdFALSE ); * should call vTaskTakeKernelLock() before calling this function. */
*/ #else /* configNUM_CORES > 1 */
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
* the event flags implementation. */
configASSERT( uxSchedulerSuspended != pdFALSE );
#endif /* configNUM_CORES > 1 */
/* Store the new item value in the event list. */ /* Store the new item value in the event list. */
listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
@ -4066,18 +4064,19 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
* On a single core configuration, this problem doesn't appear as this function is meant to be called from * On a single core configuration, this problem doesn't appear as this function is meant to be called from
* a critical section, disabling the (tick) interrupts. * a critical section, disabling the (tick) interrupts.
*/ */
#if ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) ) #if ( configNUM_CORES > 1 )
configASSERT( pxTimeOut ); configASSERT( pxTimeOut );
taskENTER_CRITICAL( &xKernelLock ); taskENTER_CRITICAL( &xKernelLock );
#endif // ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) ) #endif /* configNUM_CORES > 1 */
/* For internal use only as it does not use a critical section. */ /* For internal use only as it does not use a critical section. */
pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xOverflowCount = xNumOfOverflows;
pxTimeOut->xTimeOnEntering = xTickCount; pxTimeOut->xTimeOnEntering = xTickCount;
#if ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) ) #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock ); taskEXIT_CRITICAL( &xKernelLock );
#endif // ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) ) #endif /* configNUM_CORES > 1 */
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -4288,11 +4287,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
{ {
#ifdef ESP_PLATFORM /* IDF-3755 */ prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
/* Now the scheduler is suspended, the expected idle /* Now the scheduler is suspended, the expected idle
* time can be sampled again, and this time its value can * time can be sampled again, and this time its value can
@ -4316,11 +4311,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
} }
else else
{ {
@ -4389,11 +4380,22 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
{ {
taskENTER_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* For SMP, we need to take the kernel lock here as we
* another core could also update this task's TLSP at the
* same time. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
pxTCB = prvGetTCBFromHandle( xTaskToSet ); pxTCB = prvGetTCBFromHandle( xTaskToSet );
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback; pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
taskEXIT_CRITICAL( &xKernelLock );
#if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock );
#endif /* configNUM_CORES > 1 */
} }
} }
@ -4414,10 +4416,22 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
{ {
taskENTER_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* For SMP, we need to take the kernel lock here as we
* another core could also update this task's TLSP at the
* same time. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
pxTCB = prvGetTCBFromHandle( xTaskToSet ); pxTCB = prvGetTCBFromHandle( xTaskToSet );
configASSERT( pxTCB != NULL );
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
taskEXIT_CRITICAL( &xKernelLock );
#if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock );
#endif /* configNUM_CORES > 1 */
} }
} }
#endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1 */ #endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1 */
@ -4634,22 +4648,14 @@ static void prvCheckTasksWaitingTermination( void )
* it should be reported as being in the Blocked state. */ * it should be reported as being in the Blocked state. */
if( eState == eSuspended ) if( eState == eSuspended )
{ {
#ifdef ESP_PLATFORM /* IDF-3755 */ prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{ {
pxTaskStatus->eCurrentState = eBlocked; pxTaskStatus->eCurrentState = eBlocked;
} }
} }
#ifdef ESP_PLATFORM /* IDF-3755 */ ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
} }
} }
#endif /* INCLUDE_vTaskSuspend */ #endif /* INCLUDE_vTaskSuspend */
@ -5006,7 +5012,12 @@ static void prvResetNextTaskUnblockTime( void )
TCB_t * const pxMutexHolderTCB = pxMutexHolder; TCB_t * const pxMutexHolderTCB = pxMutexHolder;
BaseType_t xReturn = pdFALSE; BaseType_t xReturn = pdFALSE;
taskENTER_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
/* If the mutex was given back by an interrupt while the queue was /* If the mutex was given back by an interrupt while the queue was
* locked then the mutex holder might now be NULL. _RB_ Is this still * locked then the mutex holder might now be NULL. _RB_ Is this still
@ -5085,7 +5096,10 @@ static void prvResetNextTaskUnblockTime( void )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_ISR( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
return xReturn; return xReturn;
} }
@ -5100,7 +5114,12 @@ static void prvResetNextTaskUnblockTime( void )
TCB_t * const pxTCB = pxMutexHolder; TCB_t * const pxTCB = pxMutexHolder;
BaseType_t xReturn = pdFALSE; BaseType_t xReturn = pdFALSE;
taskENTER_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
if( pxMutexHolder != NULL ) if( pxMutexHolder != NULL )
{ {
@ -5169,7 +5188,10 @@ static void prvResetNextTaskUnblockTime( void )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_ISR( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
return xReturn; return xReturn;
} }
@ -5186,7 +5208,12 @@ static void prvResetNextTaskUnblockTime( void )
UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
taskENTER_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
if( pxMutexHolder != NULL ) if( pxMutexHolder != NULL )
{ {
@ -5281,7 +5308,10 @@ static void prvResetNextTaskUnblockTime( void )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
} }
#endif /* configUSE_MUTEXES */ #endif /* configUSE_MUTEXES */
@ -5615,18 +5645,27 @@ static void prvResetNextTaskUnblockTime( void )
TickType_t uxTaskResetEventItemValue( void ) TickType_t uxTaskResetEventItemValue( void )
{ {
TickType_t uxReturn; TickType_t uxReturn;
TCB_t * pxCurTCB; BaseType_t xCoreID;
taskENTER_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
pxCurTCB = pxCurrentTCB[ xPortGetCoreID() ];
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ) ); /* For SMP, we need to take the kernel lock here to ensure nothing else
* modifies the task's event item value simultaneously. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
xCoreID = xPortGetCoreID();
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xEventListItem ) );
/* Reset the event list item to its normal value - so it can be used with /* Reset the event list item to its normal value - so it can be used with
* queues and semaphores. */ * queues and semaphores. */
listSET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
taskEXIT_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_ISR( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
return uxReturn; return uxReturn;
} }
@ -5636,21 +5675,31 @@ TickType_t uxTaskResetEventItemValue( void )
TaskHandle_t pvTaskIncrementMutexHeldCount( void ) TaskHandle_t pvTaskIncrementMutexHeldCount( void )
{ {
TCB_t * curTCB; TCB_t * pxCurTCB;
BaseType_t xCoreID;
#if ( configNUM_CORES > 1 )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
xCoreID = xPortGetCoreID();
/* If xSemaphoreCreateMutex() is called before any tasks have been created /* If xSemaphoreCreateMutex() is called before any tasks have been created
* then pxCurrentTCB will be NULL. */ * then pxCurrentTCB will be NULL. */
taskENTER_CRITICAL( &xKernelLock ); if( pxCurrentTCB[ xCoreID ] != NULL )
if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
{ {
( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++; ( pxCurrentTCB[ xCoreID ]->uxMutexesHeld )++;
} }
curTCB = pxCurrentTCB[ xPortGetCoreID() ]; pxCurTCB = pxCurrentTCB[ xCoreID ];
taskEXIT_CRITICAL( &xKernelLock ); #if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
return curTCB; return pxCurTCB;
} }
#endif /* configUSE_MUTEXES */ #endif /* configUSE_MUTEXES */
@ -5971,6 +6020,7 @@ TickType_t uxTaskResetEventItemValue( void )
TCB_t * pxTCB; TCB_t * pxTCB;
uint8_t ucOriginalNotifyState; uint8_t ucOriginalNotifyState;
BaseType_t xReturn = pdPASS; BaseType_t xReturn = pdPASS;
UBaseType_t uxSavedInterruptStatus;
configASSERT( xTaskToNotify ); configASSERT( xTaskToNotify );
configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
@ -5995,7 +6045,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxTCB = xTaskToNotify; pxTCB = xTaskToNotify;
taskENTER_CRITICAL_ISR( &xKernelLock ); prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
{ {
if( pulPreviousNotificationValue != NULL ) if( pulPreviousNotificationValue != NULL )
{ {
@ -6089,7 +6139,7 @@ TickType_t uxTaskResetEventItemValue( void )
} }
} }
} }
taskEXIT_CRITICAL_ISR( &xKernelLock ); prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
return xReturn; return xReturn;
} }
@ -6105,7 +6155,7 @@ TickType_t uxTaskResetEventItemValue( void )
{ {
TCB_t * pxTCB; TCB_t * pxTCB;
uint8_t ucOriginalNotifyState; uint8_t ucOriginalNotifyState;
UBaseType_t uxSavedInterruptStatus;
configASSERT( xTaskToNotify ); configASSERT( xTaskToNotify );
configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
@ -6130,7 +6180,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxTCB = xTaskToNotify; pxTCB = xTaskToNotify;
taskENTER_CRITICAL_ISR( &xKernelLock ); prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
{ {
ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
@ -6180,7 +6230,7 @@ TickType_t uxTaskResetEventItemValue( void )
} }
} }
} }
taskEXIT_CRITICAL_ISR( &xKernelLock ); prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
} }
#endif /* configUSE_TASK_NOTIFICATIONS */ #endif /* configUSE_TASK_NOTIFICATIONS */
@ -6252,11 +6302,23 @@ TickType_t uxTaskResetEventItemValue( void )
uint32_t ulTaskGetIdleRunTimeCounter( void ) uint32_t ulTaskGetIdleRunTimeCounter( void )
{ {
taskENTER_CRITICAL( &xKernelLock ); uint32_t ulRunTimeCounter;
tskTCB * pxTCB = ( tskTCB * ) xIdleTaskHandle[ xPortGetCoreID() ];
taskEXIT_CRITICAL( &xKernelLock );
return pxTCB->ulRunTimeCounter; #if ( configNUM_CORES > 1 )
/* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */
taskENTER_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
ulRunTimeCounter = xIdleTaskHandle[ xPortGetCoreID() ]->ulRunTimeCounter;
#if ( configNUM_CORES > 1 )
/* Release the previously taken kernel lock. */
taskEXIT_CRITICAL( &xKernelLock );
#endif /* ( configNUM_CORES > 1 ) */
return ulRunTimeCounter;
} }
#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */

View File

@ -606,11 +606,7 @@
TickType_t xTimeNow; TickType_t xTimeNow;
BaseType_t xTimerListsWereSwitched; BaseType_t xTimerListsWereSwitched;
#ifdef ESP_PLATFORM prvENTER_CRITICAL_OR_SUSPEND_ALL( &xTimerLock );
taskENTER_CRITICAL( &xTimerLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
{ {
/* Obtain the time now to make an assessment as to whether the timer /* Obtain the time now to make an assessment as to whether the timer
* has expired or not. If obtaining the time causes the lists to switch * has expired or not. If obtaining the time causes the lists to switch
@ -624,11 +620,7 @@
/* The tick count has not overflowed, has the timer expired? */ /* The tick count has not overflowed, has the timer expired? */
if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) )
{ {
#ifdef ESP_PLATFORM ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
taskEXIT_CRITICAL( &xTimerLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); prvProcessExpiredTimer( xNextExpireTime, xTimeNow );
} }
else else
@ -648,11 +640,7 @@
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
#ifdef ESP_PLATFORM /* IDF-3755 */ if( prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ) == pdFALSE )
taskEXIT_CRITICAL( &xTimerLock );
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{ {
/* Yield to wait for either a command to arrive, or the /* Yield to wait for either a command to arrive, or the
* block time to expire. If a command arrived between the * block time to expire. If a command arrived between the
@ -660,22 +648,15 @@
* will not cause the task to block. */ * will not cause the task to block. */
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
else
#ifndef ESP_PLATFORM /* IDF-3755 */ {
else mtCOVERAGE_TEST_MARKER();
{ }
mtCOVERAGE_TEST_MARKER();
}
#endif // ESP_PLATFORM
} }
} }
else else
{ {
#ifdef ESP_PLATFORM /* IDF-3755 */ ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
taskEXIT_CRITICAL( &xTimerLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
} }
} }
} }