Merge branch 'change/freertos_local_crit_section_macro' into 'master'

change(freertos/idf): Refactor thread safety convenience macros

Closes IDF-8161

See merge request espressif/esp-idf!26805
This commit is contained in:
Darian 2023-11-08 10:29:22 +08:00
commit c4eea80c07
5 changed files with 144 additions and 198 deletions

View File

@ -534,11 +534,11 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
EventGroup_t const * const pxEventBits = xEventGroup; EventGroup_t const * const pxEventBits = xEventGroup;
EventBits_t uxReturn; EventBits_t uxReturn;
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); prvENTER_CRITICAL_OR_MASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus );
{ {
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
} }
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); prvEXIT_CRITICAL_OR_UNMASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus );
return uxReturn; return uxReturn;
} /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */

View File

@ -54,56 +54,6 @@
* correct privileged Vs unprivileged linkage and placement. */ * correct privileged Vs unprivileged linkage and placement. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
/* Some code sections require extra critical sections when building for SMP
* ( configNUMBER_OF_CORES > 1 ). */
#if ( configNUMBER_OF_CORES > 1 )
/* Macros that Enter/exit a critical section only when building for SMP */
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
static inline __attribute__( ( always_inline ) )
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
{
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskENTER_CRITICAL( pxLock );
}
else
{
#ifdef __clang_analyzer__
/* Teach clang-tidy that ISR version macro can be different */
configASSERT( 1 );
#endif
taskENTER_CRITICAL_ISR( pxLock );
}
}
static inline __attribute__( ( always_inline ) )
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
{
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskEXIT_CRITICAL( pxLock );
}
else
{
#ifdef __clang_analyzer__
/* Teach clang-tidy that ISR version macro can be different */
configASSERT( 1 );
#endif
taskEXIT_CRITICAL_ISR( pxLock );
}
}
#else /* configNUMBER_OF_CORES > 1 */
/* Macros that Enter/exit a critical section only when building for SMP */
#define taskENTER_CRITICAL_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
#endif /* configNUMBER_OF_CORES > 1 */
/* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList() /* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList()
* calls are deterministic (as queue locks use scheduler suspension instead of * calls are deterministic (as queue locks use scheduler suspension instead of
* critical sections). However, the SMP implementation is non-deterministic * critical sections). However, the SMP implementation is non-deterministic
@ -3109,7 +3059,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/* For SMP, we need to take the queue registry lock in case another /* For SMP, we need to take the queue registry lock in case another
* core updates the register simultaneously. */ * core updates the register simultaneously. */
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
{ {
if( pcQueueName != NULL ) if( pcQueueName != NULL )
{ {
@ -3145,7 +3095,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
} }
/* Release the previously taken queue registry lock. */ /* Release the previously taken queue registry lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
} }
#endif /* configQUEUE_REGISTRY_SIZE */ #endif /* configQUEUE_REGISTRY_SIZE */
@ -3162,7 +3112,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/* For SMP, we need to take the queue registry lock in case another /* For SMP, we need to take the queue registry lock in case another
* core updates the register simultaneously. */ * core updates the register simultaneously. */
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
{ {
/* Note there is nothing here to protect against another task adding or /* Note there is nothing here to protect against another task adding or
* removing entries from the registry while it is being searched. */ * removing entries from the registry while it is being searched. */
@ -3181,7 +3131,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
} }
/* Release the previously taken queue registry lock. */ /* Release the previously taken queue registry lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
return pcReturn; return pcReturn;
} /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
@ -3199,7 +3149,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/* For SMP, we need to take the queue registry lock in case another /* For SMP, we need to take the queue registry lock in case another
* core updates the register simultaneously. */ * core updates the register simultaneously. */
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
{ {
/* See if the handle of the queue being unregistered in actually in the /* See if the handle of the queue being unregistered in actually in the
* registry. */ * registry. */
@ -3223,7 +3173,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
} }
/* Release the previously taken queue registry lock. */ /* Release the previously taken queue registry lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
#endif /* configQUEUE_REGISTRY_SIZE */ #endif /* configQUEUE_REGISTRY_SIZE */
@ -3247,7 +3197,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/* For SMP, we need to take the queue's xQueueLock as we are about to /* For SMP, we need to take the queue's xQueueLock as we are about to
* access the queue. */ * access the queue. */
taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); prvENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
{ {
#if ( queueUSE_LOCKS == 1 ) #if ( queueUSE_LOCKS == 1 )
{ {
@ -3278,7 +3228,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
#endif /* queueUSE_LOCKS == 1 */ #endif /* queueUSE_LOCKS == 1 */
} }
/* Release the previously taken xQueueLock. */ /* Release the previously taken xQueueLock. */
taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); prvEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
} }
#endif /* configUSE_TIMERS */ #endif /* configUSE_TIMERS */
@ -3413,7 +3363,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/* In SMP, queue sets have their own xQueueLock. Thus we need to also /* In SMP, queue sets have their own xQueueLock. Thus we need to also
* acquire the queue set's xQueueLock before accessing it. */ * acquire the queue set's xQueueLock before accessing it. */
taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); prvENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
{ {
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
{ {
@ -3463,7 +3413,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
} }
/* Release the previously acquired queue set's xQueueLock. */ /* Release the previously acquired queue set's xQueueLock. */
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); prvEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
return xReturn; return xReturn;
} }

View File

@ -65,74 +65,6 @@
#include <stdio.h> #include <stdio.h>
#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
/* Some code sections require extra critical sections when building for SMP
* ( configNUMBER_OF_CORES > 1 ). */
#if ( configNUMBER_OF_CORES > 1 )
/* Macros that enter/exit a critical section only when building for SMP */
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
#define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock )
#define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock )
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
/* Macros that enter/exit a critical section only when building for single-core */
#define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
/* Macros that enable/disable interrupts only when building for SMP */
#define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR()
#define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
static inline __attribute__( ( always_inline ) )
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
{
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskENTER_CRITICAL( pxLock );
}
else
{
#ifdef __clang_analyzer__
/* Teach clang-tidy that ISR version macro can be different */
configASSERT( 1 );
#endif
taskENTER_CRITICAL_ISR( pxLock );
}
}
static inline __attribute__( ( always_inline ) )
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
{
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskEXIT_CRITICAL( pxLock );
}
else
{
#ifdef __clang_analyzer__
/* Teach clang-tidy that ISR version macro can be different */
configASSERT( 1 );
#endif
taskEXIT_CRITICAL_ISR( pxLock );
}
}
#else /* configNUMBER_OF_CORES > 1 */
/* Macros that enter/exit a critical section only when building for SMP */
#define taskENTER_CRITICAL_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
#define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
/* Macros that enter/exit a critical section only when building for single-core */
#define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
/* Macros that enable/disable interrupts only when building for SMP */
#define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 )
#define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus )
#endif /* configNUMBER_OF_CORES > 1 */
#if ( configUSE_PREEMPTION == 0 ) #if ( configUSE_PREEMPTION == 0 )
/* If the cooperative scheduler is being used then a yield should not be /* If the cooperative scheduler is being used then a yield should not be
@ -1475,7 +1407,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
/* For SMP, we need to take the kernel lock here as we are about to /* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */ * access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
/* Force a reschedule if it is the currently running task that has just /* Force a reschedule if it is the currently running task that has just
* been deleted. */ * been deleted. */
@ -1493,7 +1425,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
} }
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
} }
#endif /* INCLUDE_vTaskDelete */ #endif /* INCLUDE_vTaskDelete */
@ -2448,7 +2380,7 @@ void vTaskStartScheduler( void )
/* For SMP, we need to take the kernel lock here as we are about to /* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */ * access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
#if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
{ {
@ -2463,7 +2395,7 @@ void vTaskStartScheduler( void )
xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
/* If configGENERATE_RUN_TIME_STATS is defined then the following /* If configGENERATE_RUN_TIME_STATS is defined then the following
* macro must be defined to configure the timer/counter used to generate * macro must be defined to configure the timer/counter used to generate
@ -2513,12 +2445,12 @@ void vTaskEndScheduler( void )
/* For SMP, we need to take the kernel lock here as we are about to access /* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */ * kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
xSchedulerRunning = pdFALSE; xSchedulerRunning = pdFALSE;
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
vPortEndScheduler(); vPortEndScheduler();
} }
/*----------------------------------------------------------*/ /*----------------------------------------------------------*/
@ -2768,7 +2700,7 @@ TickType_t xTaskGetTickCountFromISR( void )
/* For SMP, we need to take the kernel lock here as we are about to access /* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */ * kernel data structures. */
taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
{ {
uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
{ {
@ -2777,7 +2709,7 @@ TickType_t xTaskGetTickCountFromISR( void )
portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
return xReturn; return xReturn;
} }
@ -3176,7 +3108,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
* the event list too. Interrupts can touch the event list item, * the event list too. Interrupts can touch the event list item,
* even though the scheduler is suspended, so a critical section * even though the scheduler is suspended, so a critical section
* is used. */ * is used. */
taskENTER_CRITICAL_SC_ONLY( &xKernelLock ); prvENTER_CRITICAL_SC_ONLY( &xKernelLock );
{ {
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{ {
@ -3192,7 +3124,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL_SC_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SC_ONLY( &xKernelLock );
/* Place the unblocked task into the appropriate ready list. */ /* Place the unblocked task into the appropriate ready list. */
prvAddTaskToReadyList( pxTCB ); prvAddTaskToReadyList( pxTCB );
@ -3255,7 +3187,7 @@ BaseType_t xTaskIncrementTick( void )
/* For SMP, we need to take the kernel lock here as we are about to access /* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures (unlike single core which calls this function with * kernel data structures (unlike single core which calls this function with
* interrupts disabled). */ * interrupts disabled). */
taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
{ {
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
{ {
@ -3423,7 +3355,7 @@ BaseType_t xTaskIncrementTick( void )
/* Release the previously taken kernel lock as we have finished accessing /* Release the previously taken kernel lock as we have finished accessing
* the kernel data structures. */ * the kernel data structures. */
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
#if ( configUSE_TICK_HOOK == 1 ) #if ( configUSE_TICK_HOOK == 1 )
{ {
@ -3646,7 +3578,7 @@ void vTaskSwitchContext( void )
/* For SMP, we need to take the kernel lock here as we are about to access /* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures (unlike single core which calls this function with * kernel data structures (unlike single core which calls this function with
* either interrupts disabled or when the scheduler hasn't started yet). */ * either interrupts disabled or when the scheduler hasn't started yet). */
taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
{ {
/* Get current core ID as we can no longer be preempted. */ /* Get current core ID as we can no longer be preempted. */
const BaseType_t xCurCoreID = portGET_CORE_ID(); const BaseType_t xCurCoreID = portGET_CORE_ID();
@ -3731,7 +3663,7 @@ void vTaskSwitchContext( void )
/* Release the previously taken kernel lock as we have finished accessing /* Release the previously taken kernel lock as we have finished accessing
* the kernel data structures. */ * the kernel data structures. */
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3746,7 +3678,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
/* For SMP, we need to take the kernel lock here as we are about to access /* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */ * kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
/* Place the event list item of the TCB in the appropriate event list. /* Place the event list item of the TCB in the appropriate event list.
* This is placed in the list in priority order so the highest priority task * This is placed in the list in priority order so the highest priority task
@ -3764,7 +3696,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3792,7 +3724,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
/* For SMP, we need to take the kernel lock here as we are about to access /* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */ * kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
/* Store the item value in the event list item. It is safe to access the /* Store the item value in the event list item. It is safe to access the
* event list item here as interrupts won't access the event list item of a * event list item here as interrupts won't access the event list item of a
@ -3809,7 +3741,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3829,7 +3761,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
/* For SMP, we need to take the kernel lock here as we are about to access /* For SMP, we need to take the kernel lock here as we are about to access
* kernel data structures. */ * kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
/* Place the event list item of the TCB in the appropriate event list. /* Place the event list item of the TCB in the appropriate event list.
* In this case it is assume that this is the only task that is going to * In this case it is assume that this is the only task that is going to
@ -4628,7 +4560,7 @@ static void prvCheckTasksWaitingTermination( void )
/* A critical section is required for SMP in case another core modifies /* A critical section is required for SMP in case another core modifies
* the task simultaneously. */ * the task simultaneously. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] ); pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
@ -4730,7 +4662,7 @@ static void prvCheckTasksWaitingTermination( void )
} }
} }
/* Exit the previously entered critical section. */ /* Exit the previously entered critical section. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
} }
#endif /* configUSE_TRACE_FACILITY */ #endif /* configUSE_TRACE_FACILITY */
@ -4952,11 +4884,11 @@ static void prvResetNextTaskUnblockTime( void )
* For single-core a critical section is not required as this is not * For single-core a critical section is not required as this is not
* called from an interrupt and the current TCB will always be the same * called from an interrupt and the current TCB will always be the same
* for any individual execution thread. */ * for any individual execution thread. */
uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY(); uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
{ {
xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
} }
taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
return xReturn; return xReturn;
} }
@ -4980,7 +4912,7 @@ static void prvResetNextTaskUnblockTime( void )
* *
* We use the ISR versions of interrupt macros as this function could be * We use the ISR versions of interrupt macros as this function could be
* called inside critical sections. */ * called inside critical sections. */
uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY(); uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
{ {
if( xSchedulerRunning == pdFALSE ) if( xSchedulerRunning == pdFALSE )
{ {
@ -4998,7 +4930,7 @@ static void prvResetNextTaskUnblockTime( void )
} }
} }
} }
taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
return xReturn; return xReturn;
} }
@ -5015,7 +4947,7 @@ static void prvResetNextTaskUnblockTime( void )
/* For SMP, we need to take the kernel lock here as we are about to /* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */ * access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
/* Get current core ID as we can no longer be preempted. */ /* Get current core ID as we can no longer be preempted. */
const BaseType_t xCurCoreID = portGET_CORE_ID(); const BaseType_t xCurCoreID = portGET_CORE_ID();
@ -5098,7 +5030,7 @@ static void prvResetNextTaskUnblockTime( void )
} }
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
return xReturn; return xReturn;
} }
@ -5115,7 +5047,7 @@ static void prvResetNextTaskUnblockTime( void )
/* For SMP, we need to take the kernel lock here as we are about to /* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */ * access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
if( pxMutexHolder != NULL ) if( pxMutexHolder != NULL )
{ {
@ -5185,7 +5117,7 @@ static void prvResetNextTaskUnblockTime( void )
} }
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
return xReturn; return xReturn;
} }
@ -5204,7 +5136,7 @@ static void prvResetNextTaskUnblockTime( void )
/* For SMP, we need to take the kernel lock here as we are about to /* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */ * access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
if( pxMutexHolder != NULL ) if( pxMutexHolder != NULL )
{ {
@ -5300,7 +5232,7 @@ static void prvResetNextTaskUnblockTime( void )
} }
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
} }
#endif /* configUSE_MUTEXES */ #endif /* configUSE_MUTEXES */
@ -5635,7 +5567,7 @@ TickType_t uxTaskResetEventItemValue( void )
/* For SMP, we need to take the kernel lock here to ensure nothing else /* For SMP, we need to take the kernel lock here to ensure nothing else
* modifies the task's event item value simultaneously. */ * modifies the task's event item value simultaneously. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
/* Get current core ID as we can no longer be preempted. */ /* Get current core ID as we can no longer be preempted. */
const BaseType_t xCurCoreID = portGET_CORE_ID(); const BaseType_t xCurCoreID = portGET_CORE_ID();
@ -5646,7 +5578,7 @@ TickType_t uxTaskResetEventItemValue( void )
* queues and semaphores. */ * queues and semaphores. */
listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
} }
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
return uxReturn; return uxReturn;
@ -5661,7 +5593,7 @@ TickType_t uxTaskResetEventItemValue( void )
/* For SMP, we need to take the kernel lock here as we are about to /* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */ * access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
/* Get current core ID as we can no longer be preempted. */ /* Get current core ID as we can no longer be preempted. */
const BaseType_t xCurCoreID = portGET_CORE_ID(); const BaseType_t xCurCoreID = portGET_CORE_ID();
@ -5676,7 +5608,7 @@ TickType_t uxTaskResetEventItemValue( void )
xReturn = pxCurrentTCBs[ xCurCoreID ]; xReturn = pxCurrentTCBs[ xCurCoreID ];
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
return xReturn; return xReturn;
} }

View File

@ -507,12 +507,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
/* For SMP, we need to take the kernel lock here as we are about to /* For SMP, we need to take the kernel lock here as we are about to
* access kernel data structures. */ * access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter; ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter;
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
return ulRunTimeCounter; return ulRunTimeCounter;
} }
@ -539,12 +539,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
{ {
/* For SMP, we need to take the kernel lock here as we are about /* For SMP, we need to take the kernel lock here as we are about
* to access kernel data structures. */ * to access kernel data structures. */
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
{ {
ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime; ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime;
} }
/* Release the previously taken kernel lock. */ /* Release the previously taken kernel lock. */
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
} }
else else
{ {

View File

@ -13,6 +13,7 @@
#include "sdkconfig.h" #include "sdkconfig.h"
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "freertos/task.h"
/* *INDENT-OFF* */ /* *INDENT-OFF* */
#ifdef __cplusplus #ifdef __cplusplus
@ -26,52 +27,117 @@
/* /*
* The following macros are convenience macros used to account for different * The following macros are convenience macros used to account for different
* thread safety behavior between Vanilla FreeRTOS (i.e., single-core) and ESP-IDF * thread safety behavior between single-core and SMP in ESP-IDF FreeRTOS.
* FreeRTOS (i.e., multi-core SMP).
* *
* For thread saftey... * For thread saftey...
* *
* - Vanilla FreeRTOS will use the following for thread safety (depending on situation) * - Single-core will use the following for thread safety (depending on situation)
* - `vTaskSuspendAll()`/`xTaskResumeAll()` for non-deterministic operations * - `vTaskSuspendAll()`/`xTaskResumeAll()` for non-deterministic operations
* - Critical sections or disabling interrupts for deterministic operations * - Critical sections or disabling interrupts for deterministic operations
* - ESP-IDF FreeRTOS will always use critical sections (determinism is not supported) * - SMP will always use critical sections (determinism is not supported)
*
* [refactor-todo]: Define these locally in each kernel source file (IDF-8161)
*/ */
#if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) ) #if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) taskENTER_CRITICAL( ( x ) ) /* Macros that use a critical section when building for SMP */
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) ( { taskEXIT_CRITICAL( ( x ) ); pdFALSE; } ) #define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock ) taskENTER_CRITICAL( ( pxLock ) )
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \ #define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock ) ( { taskEXIT_CRITICAL( ( pxLock ) ); pdFALSE; } )
{ \ #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \
taskENTER_CRITICAL_ISR( ( pxLock ) ); \ { \
( void ) ( uxInterruptStatus ); \ taskENTER_CRITICAL_ISR( ( pxLock ) ); \
( void ) ( uxStatus ); \
} }
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \ #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \
{ \ { \
taskEXIT_CRITICAL_ISR( ( pxLock ) ); \ taskEXIT_CRITICAL_ISR( ( pxLock ) ); \
( void ) ( uxInterruptStatus ); \ ( void ) ( uxStatus ); \
} }
/* Macros that enter/exit a critical section only when building for SMP */
#define prvENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define prvEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
#define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock )
#define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock )
#define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
#define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
static inline __attribute__( ( always_inline ) )
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
{
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskENTER_CRITICAL( pxLock );
}
else
{
#ifdef __clang_analyzer__
/* Teach clang-tidy that ISR version macro can be different */
configASSERT( 1 );
#endif
taskENTER_CRITICAL_ISR( pxLock );
}
}
static inline __attribute__( ( always_inline ) )
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
{
if( portCHECK_IF_IN_ISR() == pdFALSE )
{
taskEXIT_CRITICAL( pxLock );
}
else
{
#ifdef __clang_analyzer__
/* Teach clang-tidy that ISR version macro can be different */
configASSERT( 1 );
#endif
taskEXIT_CRITICAL_ISR( pxLock );
}
}
/* Macros that enter/exit a critical section only when building for single-core */
#define prvENTER_CRITICAL_SC_ONLY( pxLock )
#define prvEXIT_CRITICAL_SC_ONLY( pxLock )
/* Macros that enable/disable interrupts only when building for SMP */
#define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR()
#define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
#elif ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) #elif ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) )
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) ( { vTaskSuspendAll(); ( void ) ( x ); } ) /* Macros that suspend the scheduler or disables interrupts when building for single-core */
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) xTaskResumeAll() #define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock ) ( { vTaskSuspendAll(); ( void ) ( pxLock ); } )
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \ #define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock ) xTaskResumeAll()
{ \ #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \
( uxInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ { \
( void ) ( pxLock ); \ ( uxStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \
( void ) ( pxLock ); \
} }
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \ #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \
{ \ { \
portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxInterruptStatus ) ); \ portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxStatus ) ); \
( void ) ( pxLock ); \ ( void ) ( pxLock ); \
} }
/* Macros that enter/exit a critical section only when building for SMP */
#define prvENTER_CRITICAL_SMP_ONLY( pxLock )
#define prvEXIT_CRITICAL_SMP_ONLY( pxLock )
#define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
#define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
#define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
#define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
/* Macros that enter/exit a critical section only when building for single-core */
#define prvENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
#define prvEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
/* Macros that enable/disable interrupts only when building for SMP */
#define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 )
#define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus )
#endif /* ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) */ #endif /* ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) */
/* /*
* In ESP-IDF FreeRTOS (i.e., multi-core SMP) uses spinlocks to protect different * In ESP-IDF FreeRTOS under SMP builds, spinlocks are to protect different
* groups of data. This function is a wrapper to take the "xKernelLock" spinlock * groups of data. This function is a wrapper to take the "xKernelLock" spinlock
* of tasks.c. * of tasks.c.
* *
@ -87,8 +153,6 @@
* vEventGroupDelete() as both those functions will directly access event lists * vEventGroupDelete() as both those functions will directly access event lists
* (which are kernel data structures). Thus, a wrapper function must be provided * (which are kernel data structures). Thus, a wrapper function must be provided
* to take/release the "xKernelLock" from outside tasks.c. * to take/release the "xKernelLock" from outside tasks.c.
*
* [refactor-todo]: Extern this locally in event groups (IDF-8161)
*/ */
#if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) ) #if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )