mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'change/freertos_local_crit_section_macro_v5.2' into 'release/v5.2'
change(freertos/idf): Refactor thread safety convenience macros (v5.2) See merge request espressif/esp-idf!26997
This commit is contained in:
commit
681439b85a
@ -534,11 +534,11 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
|
|||||||
EventGroup_t const * const pxEventBits = xEventGroup;
|
EventGroup_t const * const pxEventBits = xEventGroup;
|
||||||
EventBits_t uxReturn;
|
EventBits_t uxReturn;
|
||||||
|
|
||||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
prvENTER_CRITICAL_OR_MASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus );
|
||||||
{
|
{
|
||||||
uxReturn = pxEventBits->uxEventBits;
|
uxReturn = pxEventBits->uxEventBits;
|
||||||
}
|
}
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
prvEXIT_CRITICAL_OR_UNMASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus );
|
||||||
|
|
||||||
return uxReturn;
|
return uxReturn;
|
||||||
} /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */
|
} /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */
|
||||||
|
@ -54,56 +54,6 @@
|
|||||||
* correct privileged Vs unprivileged linkage and placement. */
|
* correct privileged Vs unprivileged linkage and placement. */
|
||||||
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
|
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
|
||||||
|
|
||||||
/* Some code sections require extra critical sections when building for SMP
|
|
||||||
* ( configNUMBER_OF_CORES > 1 ). */
|
|
||||||
#if ( configNUMBER_OF_CORES > 1 )
|
|
||||||
/* Macros that Enter/exit a critical section only when building for SMP */
|
|
||||||
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
|
|
||||||
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
|
|
||||||
|
|
||||||
static inline __attribute__( ( always_inline ) )
|
|
||||||
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
|
||||||
{
|
|
||||||
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
|
||||||
{
|
|
||||||
taskENTER_CRITICAL( pxLock );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
#ifdef __clang_analyzer__
|
|
||||||
/* Teach clang-tidy that ISR version macro can be different */
|
|
||||||
configASSERT( 1 );
|
|
||||||
#endif
|
|
||||||
taskENTER_CRITICAL_ISR( pxLock );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline __attribute__( ( always_inline ) )
|
|
||||||
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
|
||||||
{
|
|
||||||
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
|
||||||
{
|
|
||||||
taskEXIT_CRITICAL( pxLock );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
#ifdef __clang_analyzer__
|
|
||||||
/* Teach clang-tidy that ISR version macro can be different */
|
|
||||||
configASSERT( 1 );
|
|
||||||
#endif
|
|
||||||
taskEXIT_CRITICAL_ISR( pxLock );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else /* configNUMBER_OF_CORES > 1 */
|
|
||||||
/* Macros that Enter/exit a critical section only when building for SMP */
|
|
||||||
#define taskENTER_CRITICAL_SMP_ONLY( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
|
|
||||||
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
|
||||||
#endif /* configNUMBER_OF_CORES > 1 */
|
|
||||||
|
|
||||||
/* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList()
|
/* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList()
|
||||||
* calls are deterministic (as queue locks use scheduler suspension instead of
|
* calls are deterministic (as queue locks use scheduler suspension instead of
|
||||||
* critical sections). However, the SMP implementation is non-deterministic
|
* critical sections). However, the SMP implementation is non-deterministic
|
||||||
@ -3109,7 +3059,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
|
|
||||||
/* For SMP, we need to take the queue registry lock in case another
|
/* For SMP, we need to take the queue registry lock in case another
|
||||||
* core updates the register simultaneously. */
|
* core updates the register simultaneously. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||||
{
|
{
|
||||||
if( pcQueueName != NULL )
|
if( pcQueueName != NULL )
|
||||||
{
|
{
|
||||||
@ -3145,7 +3095,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Release the previously taken queue registry lock. */
|
/* Release the previously taken queue registry lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||||
@ -3162,7 +3112,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
|
|
||||||
/* For SMP, we need to take the queue registry lock in case another
|
/* For SMP, we need to take the queue registry lock in case another
|
||||||
* core updates the register simultaneously. */
|
* core updates the register simultaneously. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||||
{
|
{
|
||||||
/* Note there is nothing here to protect against another task adding or
|
/* Note there is nothing here to protect against another task adding or
|
||||||
* removing entries from the registry while it is being searched. */
|
* removing entries from the registry while it is being searched. */
|
||||||
@ -3181,7 +3131,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Release the previously taken queue registry lock. */
|
/* Release the previously taken queue registry lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||||
|
|
||||||
return pcReturn;
|
return pcReturn;
|
||||||
} /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
|
} /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
|
||||||
@ -3199,7 +3149,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
|
|
||||||
/* For SMP, we need to take the queue registry lock in case another
|
/* For SMP, we need to take the queue registry lock in case another
|
||||||
* core updates the register simultaneously. */
|
* core updates the register simultaneously. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||||
{
|
{
|
||||||
/* See if the handle of the queue being unregistered in actually in the
|
/* See if the handle of the queue being unregistered in actually in the
|
||||||
* registry. */
|
* registry. */
|
||||||
@ -3223,7 +3173,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Release the previously taken queue registry lock. */
|
/* Release the previously taken queue registry lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||||
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
||||||
|
|
||||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||||
@ -3247,7 +3197,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
|
|
||||||
/* For SMP, we need to take the queue's xQueueLock as we are about to
|
/* For SMP, we need to take the queue's xQueueLock as we are about to
|
||||||
* access the queue. */
|
* access the queue. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
|
prvENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
|
||||||
{
|
{
|
||||||
#if ( queueUSE_LOCKS == 1 )
|
#if ( queueUSE_LOCKS == 1 )
|
||||||
{
|
{
|
||||||
@ -3278,7 +3228,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
#endif /* queueUSE_LOCKS == 1 */
|
#endif /* queueUSE_LOCKS == 1 */
|
||||||
}
|
}
|
||||||
/* Release the previously taken xQueueLock. */
|
/* Release the previously taken xQueueLock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
|
prvEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* configUSE_TIMERS */
|
#endif /* configUSE_TIMERS */
|
||||||
@ -3413,7 +3363,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
|
|
||||||
/* In SMP, queue sets have their own xQueueLock. Thus we need to also
|
/* In SMP, queue sets have their own xQueueLock. Thus we need to also
|
||||||
* acquire the queue set's xQueueLock before accessing it. */
|
* acquire the queue set's xQueueLock before accessing it. */
|
||||||
taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
|
prvENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
|
||||||
{
|
{
|
||||||
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
|
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
|
||||||
{
|
{
|
||||||
@ -3463,7 +3413,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Release the previously acquired queue set's xQueueLock. */
|
/* Release the previously acquired queue set's xQueueLock. */
|
||||||
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
|
prvEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
|
@ -65,74 +65,6 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
|
#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
|
||||||
|
|
||||||
/* Some code sections require extra critical sections when building for SMP
|
|
||||||
* ( configNUMBER_OF_CORES > 1 ). */
|
|
||||||
#if ( configNUMBER_OF_CORES > 1 )
|
|
||||||
/* Macros that enter/exit a critical section only when building for SMP */
|
|
||||||
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
|
|
||||||
#define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock )
|
|
||||||
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
|
|
||||||
/* Macros that enter/exit a critical section only when building for single-core */
|
|
||||||
#define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
|
|
||||||
/* Macros that enable/disable interrupts only when building for SMP */
|
|
||||||
#define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR()
|
|
||||||
#define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
|
|
||||||
|
|
||||||
static inline __attribute__( ( always_inline ) )
|
|
||||||
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
|
||||||
{
|
|
||||||
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
|
||||||
{
|
|
||||||
taskENTER_CRITICAL( pxLock );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
#ifdef __clang_analyzer__
|
|
||||||
/* Teach clang-tidy that ISR version macro can be different */
|
|
||||||
configASSERT( 1 );
|
|
||||||
#endif
|
|
||||||
taskENTER_CRITICAL_ISR( pxLock );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline __attribute__( ( always_inline ) )
|
|
||||||
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
|
||||||
{
|
|
||||||
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
|
||||||
{
|
|
||||||
taskEXIT_CRITICAL( pxLock );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
#ifdef __clang_analyzer__
|
|
||||||
/* Teach clang-tidy that ISR version macro can be different */
|
|
||||||
configASSERT( 1 );
|
|
||||||
#endif
|
|
||||||
taskEXIT_CRITICAL_ISR( pxLock );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* configNUMBER_OF_CORES > 1 */
|
|
||||||
/* Macros that enter/exit a critical section only when building for SMP */
|
|
||||||
#define taskENTER_CRITICAL_SMP_ONLY( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
|
|
||||||
#define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
|
|
||||||
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
|
||||||
/* Macros that enter/exit a critical section only when building for single-core */
|
|
||||||
#define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
|
|
||||||
#define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
|
|
||||||
/* Macros that enable/disable interrupts only when building for SMP */
|
|
||||||
#define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 )
|
|
||||||
#define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus )
|
|
||||||
|
|
||||||
#endif /* configNUMBER_OF_CORES > 1 */
|
|
||||||
|
|
||||||
#if ( configUSE_PREEMPTION == 0 )
|
#if ( configUSE_PREEMPTION == 0 )
|
||||||
|
|
||||||
/* If the cooperative scheduler is being used then a yield should not be
|
/* If the cooperative scheduler is being used then a yield should not be
|
||||||
@ -1475,7 +1407,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* access kernel data structures. */
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
/* Force a reschedule if it is the currently running task that has just
|
/* Force a reschedule if it is the currently running task that has just
|
||||||
* been deleted. */
|
* been deleted. */
|
||||||
@ -1493,7 +1425,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* INCLUDE_vTaskDelete */
|
#endif /* INCLUDE_vTaskDelete */
|
||||||
@ -2447,7 +2379,7 @@ void vTaskStartScheduler( void )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* access kernel data structures. */
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
#if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
|
#if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
|
||||||
{
|
{
|
||||||
@ -2462,7 +2394,7 @@ void vTaskStartScheduler( void )
|
|||||||
xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
|
xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
|
|
||||||
/* If configGENERATE_RUN_TIME_STATS is defined then the following
|
/* If configGENERATE_RUN_TIME_STATS is defined then the following
|
||||||
* macro must be defined to configure the timer/counter used to generate
|
* macro must be defined to configure the timer/counter used to generate
|
||||||
@ -2512,12 +2444,12 @@ void vTaskEndScheduler( void )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to access
|
/* For SMP, we need to take the kernel lock here as we are about to access
|
||||||
* kernel data structures. */
|
* kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
xSchedulerRunning = pdFALSE;
|
xSchedulerRunning = pdFALSE;
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
vPortEndScheduler();
|
vPortEndScheduler();
|
||||||
}
|
}
|
||||||
/*----------------------------------------------------------*/
|
/*----------------------------------------------------------*/
|
||||||
@ -2767,7 +2699,7 @@ TickType_t xTaskGetTickCountFromISR( void )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to access
|
/* For SMP, we need to take the kernel lock here as we are about to access
|
||||||
* kernel data structures. */
|
* kernel data structures. */
|
||||||
taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
|
uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
|
||||||
{
|
{
|
||||||
@ -2776,7 +2708,7 @@ TickType_t xTaskGetTickCountFromISR( void )
|
|||||||
portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -3175,7 +3107,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
* the event list too. Interrupts can touch the event list item,
|
* the event list too. Interrupts can touch the event list item,
|
||||||
* even though the scheduler is suspended, so a critical section
|
* even though the scheduler is suspended, so a critical section
|
||||||
* is used. */
|
* is used. */
|
||||||
taskENTER_CRITICAL_SC_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SC_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
|
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
|
||||||
{
|
{
|
||||||
@ -3191,7 +3123,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL_SC_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SC_ONLY( &xKernelLock );
|
||||||
|
|
||||||
/* Place the unblocked task into the appropriate ready list. */
|
/* Place the unblocked task into the appropriate ready list. */
|
||||||
prvAddTaskToReadyList( pxTCB );
|
prvAddTaskToReadyList( pxTCB );
|
||||||
@ -3254,7 +3186,7 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
/* For SMP, we need to take the kernel lock here as we are about to access
|
/* For SMP, we need to take the kernel lock here as we are about to access
|
||||||
* kernel data structures (unlike single core which calls this function with
|
* kernel data structures (unlike single core which calls this function with
|
||||||
* interrupts disabled). */
|
* interrupts disabled). */
|
||||||
taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
||||||
{
|
{
|
||||||
@ -3422,7 +3354,7 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
|
|
||||||
/* Release the previously taken kernel lock as we have finished accessing
|
/* Release the previously taken kernel lock as we have finished accessing
|
||||||
* the kernel data structures. */
|
* the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
|
||||||
|
|
||||||
#if ( configUSE_TICK_HOOK == 1 )
|
#if ( configUSE_TICK_HOOK == 1 )
|
||||||
{
|
{
|
||||||
@ -3645,7 +3577,7 @@ void vTaskSwitchContext( void )
|
|||||||
/* For SMP, we need to take the kernel lock here as we are about to access
|
/* For SMP, we need to take the kernel lock here as we are about to access
|
||||||
* kernel data structures (unlike single core which calls this function with
|
* kernel data structures (unlike single core which calls this function with
|
||||||
* either interrupts disabled or when the scheduler hasn't started yet). */
|
* either interrupts disabled or when the scheduler hasn't started yet). */
|
||||||
taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
/* Get current core ID as we can no longer be preempted. */
|
/* Get current core ID as we can no longer be preempted. */
|
||||||
const BaseType_t xCurCoreID = portGET_CORE_ID();
|
const BaseType_t xCurCoreID = portGET_CORE_ID();
|
||||||
@ -3730,7 +3662,7 @@ void vTaskSwitchContext( void )
|
|||||||
|
|
||||||
/* Release the previously taken kernel lock as we have finished accessing
|
/* Release the previously taken kernel lock as we have finished accessing
|
||||||
* the kernel data structures. */
|
* the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
@ -3745,7 +3677,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to access
|
/* For SMP, we need to take the kernel lock here as we are about to access
|
||||||
* kernel data structures. */
|
* kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
/* Place the event list item of the TCB in the appropriate event list.
|
/* Place the event list item of the TCB in the appropriate event list.
|
||||||
* This is placed in the list in priority order so the highest priority task
|
* This is placed in the list in priority order so the highest priority task
|
||||||
@ -3763,7 +3695,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
|
|||||||
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
|
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
@ -3791,7 +3723,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to access
|
/* For SMP, we need to take the kernel lock here as we are about to access
|
||||||
* kernel data structures. */
|
* kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
/* Store the item value in the event list item. It is safe to access the
|
/* Store the item value in the event list item. It is safe to access the
|
||||||
* event list item here as interrupts won't access the event list item of a
|
* event list item here as interrupts won't access the event list item of a
|
||||||
@ -3808,7 +3740,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
|
|||||||
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
|
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
@ -3828,7 +3760,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to access
|
/* For SMP, we need to take the kernel lock here as we are about to access
|
||||||
* kernel data structures. */
|
* kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
/* Place the event list item of the TCB in the appropriate event list.
|
/* Place the event list item of the TCB in the appropriate event list.
|
||||||
* In this case it is assume that this is the only task that is going to
|
* In this case it is assume that this is the only task that is going to
|
||||||
@ -4627,7 +4559,7 @@ static void prvCheckTasksWaitingTermination( void )
|
|||||||
|
|
||||||
/* A critical section is required for SMP in case another core modifies
|
/* A critical section is required for SMP in case another core modifies
|
||||||
* the task simultaneously. */
|
* the task simultaneously. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
|
pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
|
||||||
pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
|
pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
|
||||||
@ -4729,7 +4661,7 @@ static void prvCheckTasksWaitingTermination( void )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Exit the previously entered critical section. */
|
/* Exit the previously entered critical section. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* configUSE_TRACE_FACILITY */
|
#endif /* configUSE_TRACE_FACILITY */
|
||||||
@ -4951,11 +4883,11 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
* For single-core a critical section is not required as this is not
|
* For single-core a critical section is not required as this is not
|
||||||
* called from an interrupt and the current TCB will always be the same
|
* called from an interrupt and the current TCB will always be the same
|
||||||
* for any individual execution thread. */
|
* for any individual execution thread. */
|
||||||
uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
|
uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
|
||||||
{
|
{
|
||||||
xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
|
xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
|
||||||
}
|
}
|
||||||
taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
|
prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -4979,7 +4911,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
*
|
*
|
||||||
* We use the ISR versions of interrupt macros as this function could be
|
* We use the ISR versions of interrupt macros as this function could be
|
||||||
* called inside critical sections. */
|
* called inside critical sections. */
|
||||||
uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
|
uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
|
||||||
{
|
{
|
||||||
if( xSchedulerRunning == pdFALSE )
|
if( xSchedulerRunning == pdFALSE )
|
||||||
{
|
{
|
||||||
@ -4997,7 +4929,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
|
prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -5014,7 +4946,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* access kernel data structures. */
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
/* Get current core ID as we can no longer be preempted. */
|
/* Get current core ID as we can no longer be preempted. */
|
||||||
const BaseType_t xCurCoreID = portGET_CORE_ID();
|
const BaseType_t xCurCoreID = portGET_CORE_ID();
|
||||||
@ -5097,7 +5029,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -5114,7 +5046,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* access kernel data structures. */
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
if( pxMutexHolder != NULL )
|
if( pxMutexHolder != NULL )
|
||||||
{
|
{
|
||||||
@ -5184,7 +5116,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -5203,7 +5135,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* access kernel data structures. */
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
if( pxMutexHolder != NULL )
|
if( pxMutexHolder != NULL )
|
||||||
{
|
{
|
||||||
@ -5299,7 +5231,7 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* configUSE_MUTEXES */
|
#endif /* configUSE_MUTEXES */
|
||||||
@ -5634,7 +5566,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here to ensure nothing else
|
/* For SMP, we need to take the kernel lock here to ensure nothing else
|
||||||
* modifies the task's event item value simultaneously. */
|
* modifies the task's event item value simultaneously. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
/* Get current core ID as we can no longer be preempted. */
|
/* Get current core ID as we can no longer be preempted. */
|
||||||
const BaseType_t xCurCoreID = portGET_CORE_ID();
|
const BaseType_t xCurCoreID = portGET_CORE_ID();
|
||||||
@ -5645,7 +5577,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
* queues and semaphores. */
|
* queues and semaphores. */
|
||||||
listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
|
|
||||||
return uxReturn;
|
return uxReturn;
|
||||||
@ -5660,7 +5592,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* access kernel data structures. */
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
/* Get current core ID as we can no longer be preempted. */
|
/* Get current core ID as we can no longer be preempted. */
|
||||||
const BaseType_t xCurCoreID = portGET_CORE_ID();
|
const BaseType_t xCurCoreID = portGET_CORE_ID();
|
||||||
@ -5675,7 +5607,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
xReturn = pxCurrentTCBs[ xCurCoreID ];
|
xReturn = pxCurrentTCBs[ xCurCoreID ];
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
|
@ -503,12 +503,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
|
|||||||
|
|
||||||
/* For SMP, we need to take the kernel lock here as we are about to
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* access kernel data structures. */
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter;
|
ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter;
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
|
|
||||||
return ulRunTimeCounter;
|
return ulRunTimeCounter;
|
||||||
}
|
}
|
||||||
@ -534,12 +534,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
|
|||||||
{
|
{
|
||||||
/* For SMP, we need to take the kernel lock here as we are about
|
/* For SMP, we need to take the kernel lock here as we are about
|
||||||
* to access kernel data structures. */
|
* to access kernel data structures. */
|
||||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
{
|
{
|
||||||
ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime;
|
ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime;
|
||||||
}
|
}
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
|
|
||||||
#include "sdkconfig.h"
|
#include "sdkconfig.h"
|
||||||
#include "freertos/FreeRTOS.h"
|
#include "freertos/FreeRTOS.h"
|
||||||
|
#include "freertos/task.h"
|
||||||
|
|
||||||
/* *INDENT-OFF* */
|
/* *INDENT-OFF* */
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
@ -26,52 +27,117 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The following macros are convenience macros used to account for different
|
* The following macros are convenience macros used to account for different
|
||||||
* thread safety behavior between Vanilla FreeRTOS (i.e., single-core) and ESP-IDF
|
* thread safety behavior between single-core and SMP in ESP-IDF FreeRTOS.
|
||||||
* FreeRTOS (i.e., multi-core SMP).
|
|
||||||
*
|
*
|
||||||
* For thread saftey...
|
* For thread saftey...
|
||||||
*
|
*
|
||||||
* - Vanilla FreeRTOS will use the following for thread safety (depending on situation)
|
* - Single-core will use the following for thread safety (depending on situation)
|
||||||
* - `vTaskSuspendAll()`/`xTaskResumeAll()` for non-deterministic operations
|
* - `vTaskSuspendAll()`/`xTaskResumeAll()` for non-deterministic operations
|
||||||
* - Critical sections or disabling interrupts for deterministic operations
|
* - Critical sections or disabling interrupts for deterministic operations
|
||||||
* - ESP-IDF FreeRTOS will always use critical sections (determinism is not supported)
|
* - SMP will always use critical sections (determinism is not supported)
|
||||||
*
|
|
||||||
* [refactor-todo]: Define these locally in each kernel source file (IDF-8161)
|
|
||||||
*/
|
*/
|
||||||
#if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )
|
#if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )
|
||||||
|
|
||||||
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) taskENTER_CRITICAL( ( x ) )
|
/* Macros that use a critical section when building for SMP */
|
||||||
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) ( { taskEXIT_CRITICAL( ( x ) ); pdFALSE; } )
|
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock ) taskENTER_CRITICAL( ( pxLock ) )
|
||||||
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
|
#define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock ) ( { taskEXIT_CRITICAL( ( pxLock ) ); pdFALSE; } )
|
||||||
{ \
|
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \
|
||||||
taskENTER_CRITICAL_ISR( ( pxLock ) ); \
|
{ \
|
||||||
( void ) ( uxInterruptStatus ); \
|
taskENTER_CRITICAL_ISR( ( pxLock ) ); \
|
||||||
|
( void ) ( uxStatus ); \
|
||||||
}
|
}
|
||||||
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
|
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \
|
||||||
{ \
|
{ \
|
||||||
taskEXIT_CRITICAL_ISR( ( pxLock ) ); \
|
taskEXIT_CRITICAL_ISR( ( pxLock ) ); \
|
||||||
( void ) ( uxInterruptStatus ); \
|
( void ) ( uxStatus ); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Macros that enter/exit a critical section only when building for SMP */
|
||||||
|
#define prvENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
|
||||||
|
#define prvEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
|
||||||
|
#define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock )
|
||||||
|
#define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock )
|
||||||
|
#define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
|
||||||
|
#define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
|
||||||
|
|
||||||
|
static inline __attribute__( ( always_inline ) )
|
||||||
|
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
||||||
|
{
|
||||||
|
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||||
|
{
|
||||||
|
taskENTER_CRITICAL( pxLock );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
#ifdef __clang_analyzer__
|
||||||
|
/* Teach clang-tidy that ISR version macro can be different */
|
||||||
|
configASSERT( 1 );
|
||||||
|
#endif
|
||||||
|
taskENTER_CRITICAL_ISR( pxLock );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline __attribute__( ( always_inline ) )
|
||||||
|
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
||||||
|
{
|
||||||
|
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||||
|
{
|
||||||
|
taskEXIT_CRITICAL( pxLock );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
#ifdef __clang_analyzer__
|
||||||
|
/* Teach clang-tidy that ISR version macro can be different */
|
||||||
|
configASSERT( 1 );
|
||||||
|
#endif
|
||||||
|
taskEXIT_CRITICAL_ISR( pxLock );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Macros that enter/exit a critical section only when building for single-core */
|
||||||
|
#define prvENTER_CRITICAL_SC_ONLY( pxLock )
|
||||||
|
#define prvEXIT_CRITICAL_SC_ONLY( pxLock )
|
||||||
|
|
||||||
|
/* Macros that enable/disable interrupts only when building for SMP */
|
||||||
|
#define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR()
|
||||||
|
#define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
|
||||||
|
|
||||||
#elif ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) )
|
#elif ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) )
|
||||||
|
|
||||||
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) ( { vTaskSuspendAll(); ( void ) ( x ); } )
|
/* Macros that suspend the scheduler or disables interrupts when building for single-core */
|
||||||
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) xTaskResumeAll()
|
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock ) ( { vTaskSuspendAll(); ( void ) ( pxLock ); } )
|
||||||
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
|
#define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock ) xTaskResumeAll()
|
||||||
{ \
|
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \
|
||||||
( uxInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \
|
{ \
|
||||||
( void ) ( pxLock ); \
|
( uxStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \
|
||||||
|
( void ) ( pxLock ); \
|
||||||
}
|
}
|
||||||
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
|
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \
|
||||||
{ \
|
{ \
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxInterruptStatus ) ); \
|
portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxStatus ) ); \
|
||||||
( void ) ( pxLock ); \
|
( void ) ( pxLock ); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Macros that enter/exit a critical section only when building for SMP */
|
||||||
|
#define prvENTER_CRITICAL_SMP_ONLY( pxLock )
|
||||||
|
#define prvEXIT_CRITICAL_SMP_ONLY( pxLock )
|
||||||
|
#define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
|
||||||
|
#define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
|
||||||
|
#define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
||||||
|
#define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
||||||
|
|
||||||
|
/* Macros that enter/exit a critical section only when building for single-core */
|
||||||
|
#define prvENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
|
||||||
|
#define prvEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
|
||||||
|
|
||||||
|
/* Macros that enable/disable interrupts only when building for SMP */
|
||||||
|
#define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 )
|
||||||
|
#define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus )
|
||||||
|
|
||||||
#endif /* ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) */
|
#endif /* ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In ESP-IDF FreeRTOS (i.e., multi-core SMP) uses spinlocks to protect different
|
* In ESP-IDF FreeRTOS under SMP builds, spinlocks are to protect different
|
||||||
* groups of data. This function is a wrapper to take the "xKernelLock" spinlock
|
* groups of data. This function is a wrapper to take the "xKernelLock" spinlock
|
||||||
* of tasks.c.
|
* of tasks.c.
|
||||||
*
|
*
|
||||||
@ -87,8 +153,6 @@
|
|||||||
* vEventGroupDelete() as both those functions will directly access event lists
|
* vEventGroupDelete() as both those functions will directly access event lists
|
||||||
* (which are kernel data structures). Thus, a wrapper function must be provided
|
* (which are kernel data structures). Thus, a wrapper function must be provided
|
||||||
* to take/release the "xKernelLock" from outside tasks.c.
|
* to take/release the "xKernelLock" from outside tasks.c.
|
||||||
*
|
|
||||||
* [refactor-todo]: Extern this locally in event groups (IDF-8161)
|
|
||||||
*/
|
*/
|
||||||
#if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )
|
#if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user