mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'feature/freertos_10.4.3_sync_critical_sections_vs_suspension' into 'master'
FreeRTOS(IDF): Resolve critical section (multi-core) vs scheduler suspension (single-core) Closes IDF-3755 See merge request espressif/esp-idf!21002
This commit is contained in:
commit
74ed2aa2ee
@ -80,9 +80,7 @@ typedef struct EventGroupDef_t
|
|||||||
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
|
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
|
portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
} EventGroup_t;
|
} EventGroup_t;
|
||||||
|
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
@ -225,11 +223,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
uxOriginalBitValue = pxEventBits->uxEventBits;
|
uxOriginalBitValue = pxEventBits->uxEventBits;
|
||||||
|
|
||||||
@ -272,12 +266,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
|
||||||
xAlreadyYielded = pdFALSE;
|
|
||||||
#else
|
|
||||||
xAlreadyYielded = xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
|
|
||||||
if( xTicksToWait != ( TickType_t ) 0 )
|
if( xTicksToWait != ( TickType_t ) 0 )
|
||||||
{
|
{
|
||||||
@ -361,11 +350,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
||||||
|
|
||||||
@ -433,12 +418,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
|||||||
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
|
||||||
xAlreadyYielded = pdFALSE;
|
|
||||||
#else
|
|
||||||
xAlreadyYielded = xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
|
|
||||||
if( xTicksToWait != ( TickType_t ) 0 )
|
if( xTicksToWait != ( TickType_t ) 0 )
|
||||||
{
|
{
|
||||||
@ -581,15 +561,14 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
|||||||
|
|
||||||
pxList = &( pxEventBits->xTasksWaitingForBits );
|
pxList = &( pxEventBits->xTasksWaitingForBits );
|
||||||
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
|
||||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* We are about to traverse a task list which is a kernel data structure.
|
/* We are about to traverse a task list which is a kernel data structure.
|
||||||
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
|
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
|
||||||
vTaskTakeKernelLock();
|
vTaskTakeKernelLock();
|
||||||
#else
|
#endif /* configNUM_CORES > 1 */
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
||||||
|
|
||||||
@ -661,13 +640,11 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
|||||||
* bit was set in the control word. */
|
* bit was set in the control word. */
|
||||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
#if ( configNUM_CORES > 1 )
|
||||||
/* Release the previously taken kernel lock, then release the event group spinlock. */
|
/* Release the previously taken kernel lock. */
|
||||||
vTaskReleaseKernelLock();
|
vTaskReleaseKernelLock();
|
||||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
#endif /* configNUM_CORES > 1 */
|
||||||
#else
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
|
|
||||||
return pxEventBits->uxEventBits;
|
return pxEventBits->uxEventBits;
|
||||||
}
|
}
|
||||||
@ -678,17 +655,15 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
|||||||
EventGroup_t * pxEventBits = xEventGroup;
|
EventGroup_t * pxEventBits = xEventGroup;
|
||||||
const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
const List_t * pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
||||||
|
|
||||||
{
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||||
traceEVENT_GROUP_DELETE( xEventGroup );
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* IDF-3755 */
|
|
||||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
|
|
||||||
/* We are about to traverse a task list which is a kernel data structure.
|
/* We are about to traverse a task list which is a kernel data structure.
|
||||||
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
|
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
|
||||||
vTaskTakeKernelLock();
|
vTaskTakeKernelLock();
|
||||||
#endif
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
{
|
||||||
|
traceEVENT_GROUP_DELETE( xEventGroup );
|
||||||
|
|
||||||
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
||||||
{
|
{
|
||||||
@ -697,12 +672,12 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
|||||||
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
|
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
|
||||||
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
||||||
}
|
}
|
||||||
|
}
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
/* Release the previously taken kernel lock. */
|
/* Release the previously taken kernel lock. */
|
||||||
vTaskReleaseKernelLock();
|
vTaskReleaseKernelLock();
|
||||||
#endif
|
#endif /* configNUM_CORES > 1 */
|
||||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||||
|
|
||||||
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
||||||
{
|
{
|
||||||
@ -724,7 +699,6 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
||||||
}
|
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
@ -3407,6 +3407,32 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) PRIVILEGED_FUNCTION;
|
|||||||
*----------------------------------------------------------*/
|
*----------------------------------------------------------*/
|
||||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Various convenience macros for critical sections and scheduler suspension
|
||||||
|
* called by other FreeRTOS sources and not meant to be called by the
|
||||||
|
* application. The behavior of each macro depends on whether FreeRTOS is
|
||||||
|
* currently configured for SMP or single core.
|
||||||
|
*/
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) taskENTER_CRITICAL( ( x ) )
|
||||||
|
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) ( { taskEXIT_CRITICAL( ( x ) ); pdFALSE; } )
|
||||||
|
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
|
||||||
|
taskENTER_CRITICAL_ISR( ( pxLock ) ); \
|
||||||
|
( void ) ( uxInterruptStatus );
|
||||||
|
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
|
||||||
|
taskEXIT_CRITICAL_ISR( ( pxLock ) ); \
|
||||||
|
( void ) ( uxInterruptStatus );
|
||||||
|
#else /* configNUM_CORES > 1 */
|
||||||
|
#define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) ( { vTaskSuspendAll(); ( void ) ( x ); } )
|
||||||
|
#define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) xTaskResumeAll()
|
||||||
|
#define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
|
||||||
|
( uxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \
|
||||||
|
( void ) ( pxLock );
|
||||||
|
#define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
|
||||||
|
portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxSavedInterruptStatus ) ); \
|
||||||
|
( void ) ( pxLock );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the handle of the task running on a certain CPU. Because of
|
* Return the handle of the task running on a certain CPU. Because of
|
||||||
* the nature of SMP processing, there is no guarantee that this
|
* the nature of SMP processing, there is no guarantee that this
|
||||||
@ -3519,6 +3545,8 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
|
|||||||
TickType_t xTicksToWait,
|
TickType_t xTicksToWait,
|
||||||
const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION;
|
const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
|
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
|
||||||
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
|
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
|
||||||
@ -3533,8 +3561,9 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
|
|||||||
* of delegating the entire responsibility to one of vTask...EventList()
|
* of delegating the entire responsibility to one of vTask...EventList()
|
||||||
* functions).
|
* functions).
|
||||||
*/
|
*/
|
||||||
void vTaskTakeKernelLock( void );
|
void vTaskTakeKernelLock( void );
|
||||||
void vTaskReleaseKernelLock( void );
|
void vTaskReleaseKernelLock( void );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
|
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
|
||||||
|
@ -180,14 +180,24 @@ BaseType_t xPortSysTickHandler(void)
|
|||||||
|
|
||||||
// Call FreeRTOS Increment tick function
|
// Call FreeRTOS Increment tick function
|
||||||
BaseType_t xSwitchRequired;
|
BaseType_t xSwitchRequired;
|
||||||
#if CONFIG_FREERTOS_UNICORE
|
#if ( configNUM_CORES > 1 )
|
||||||
xSwitchRequired = xTaskIncrementTick();
|
/*
|
||||||
#else
|
For SMP, xTaskIncrementTick() will internally enter a critical section. But only core 0 calls xTaskIncrementTick()
|
||||||
|
while core 1 should call xTaskIncrementTickOtherCores().
|
||||||
|
*/
|
||||||
if (xPortGetCoreID() == 0) {
|
if (xPortGetCoreID() == 0) {
|
||||||
xSwitchRequired = xTaskIncrementTick();
|
xSwitchRequired = xTaskIncrementTick();
|
||||||
} else {
|
} else {
|
||||||
xSwitchRequired = xTaskIncrementTickOtherCores();
|
xSwitchRequired = xTaskIncrementTickOtherCores();
|
||||||
}
|
}
|
||||||
|
#else // configNUM_CORES > 1
|
||||||
|
/*
|
||||||
|
Vanilla (single core) FreeRTOS expects that xTaskIncrementTick() cannot be interrupted (i.e., no nested interrupts).
|
||||||
|
Thus we have to disable interrupts before calling it.
|
||||||
|
*/
|
||||||
|
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||||
|
xSwitchRequired = xTaskIncrementTick();
|
||||||
|
portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Check if yield is required
|
// Check if yield is required
|
||||||
|
@ -447,6 +447,13 @@ FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void);
|
|||||||
|
|
||||||
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
|
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Used by FreeRTOS functions to call the correct version of critical section API
|
||||||
|
*/
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
#define portCHECK_IF_IN_ISR() xPortInIsrContext()
|
||||||
|
#endif
|
||||||
|
|
||||||
// ------------------ Critical Sections --------------------
|
// ------------------ Critical Sections --------------------
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1095,12 +1095,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
|||||||
* read, instead return a flag to say whether a context switch is required or
|
* read, instead return a flag to say whether a context switch is required or
|
||||||
* not (i.e. has a task with a higher priority than us been woken by this
|
* not (i.e. has a task with a higher priority than us been woken by this
|
||||||
* post). */
|
* post). */
|
||||||
#if ( configNUM_CORES > 1 )
|
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||||
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
|
||||||
( void ) uxSavedInterruptStatus;
|
|
||||||
#else
|
|
||||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||||
{
|
{
|
||||||
@ -1236,11 +1231,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
|||||||
xReturn = errQUEUE_FULL;
|
xReturn = errQUEUE_FULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if ( configNUM_CORES > 1 )
|
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
|
||||||
#else
|
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -1286,12 +1277,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
|||||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||||
|
|
||||||
#if ( configNUM_CORES > 1 )
|
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||||
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
|
||||||
( void ) uxSavedInterruptStatus;
|
|
||||||
#else
|
|
||||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||||
|
|
||||||
@ -1422,11 +1408,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
|||||||
xReturn = errQUEUE_FULL;
|
xReturn = errQUEUE_FULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if ( configNUM_CORES > 1 )
|
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
|
||||||
#else
|
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -2094,12 +2076,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
|||||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||||
|
|
||||||
#if ( configNUM_CORES > 1 )
|
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||||
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
|
||||||
( void ) uxSavedInterruptStatus;
|
|
||||||
#else
|
|
||||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||||
|
|
||||||
@ -2170,12 +2147,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
|||||||
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if ( configNUM_CORES > 1 )
|
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
|
||||||
#else
|
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -2209,12 +2181,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
|||||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||||
|
|
||||||
#if ( configNUM_CORES > 1 )
|
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||||
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
|
||||||
( void ) uxSavedInterruptStatus;
|
|
||||||
#else
|
|
||||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
/* Cannot block in an ISR, so check there is data available. */
|
/* Cannot block in an ISR, so check there is data available. */
|
||||||
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||||
@ -2235,12 +2202,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
|||||||
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
|
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if ( configNUM_CORES > 1 )
|
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||||
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
|
|
||||||
#else
|
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -3269,8 +3231,21 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
configASSERT( pxQueueSetContainer );
|
configASSERT( pxQueueSetContainer );
|
||||||
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
|
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
|
||||||
|
|
||||||
/* We need to also acquire the queue set's spinlock as well. */
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* In SMP, queue sets have their own spinlock. Thus we need to also
|
||||||
|
* acquire the queue set's spinlock before accessing it. This
|
||||||
|
* function can also be called from an ISR context, so we need to
|
||||||
|
* check whether we are in an ISR. */
|
||||||
|
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||||
|
{
|
||||||
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
|
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
taskENTER_CRITICAL_ISR( &( pxQueueSetContainer->xQueueLock ) );
|
||||||
|
}
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
|
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
|
||||||
{
|
{
|
||||||
@ -3321,8 +3296,17 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
/* Release the previously acquired queue set's spinlock. */
|
/* Release the previously acquired queue set's spinlock. */
|
||||||
|
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||||
|
{
|
||||||
taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
|
taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
taskEXIT_CRITICAL_ISR( &( pxQueueSetContainer->xQueueLock ) );
|
||||||
|
}
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
|
@ -60,11 +60,9 @@
|
|||||||
* or #defined the notification macros away, then provide default implementations
|
* or #defined the notification macros away, then provide default implementations
|
||||||
* that uses task notifications. */
|
* that uses task notifications. */
|
||||||
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
|
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
|
||||||
|
|
||||||
#ifndef sbRECEIVE_COMPLETED
|
#ifndef sbRECEIVE_COMPLETED
|
||||||
#ifdef ESP_PLATFORM /* IDF-3775 */
|
|
||||||
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
||||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
||||||
{ \
|
{ \
|
||||||
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
|
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
|
||||||
{ \
|
{ \
|
||||||
@ -74,21 +72,7 @@
|
|||||||
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
|
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||||
#else /* ifdef ESP_PLATFORM */
|
|
||||||
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
|
||||||
vTaskSuspendAll(); \
|
|
||||||
{ \
|
|
||||||
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
|
|
||||||
{ \
|
|
||||||
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \
|
|
||||||
( uint32_t ) 0, \
|
|
||||||
eNoAction ); \
|
|
||||||
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
#endif /* sbRECEIVE_COMPLETED */
|
#endif /* sbRECEIVE_COMPLETED */
|
||||||
|
|
||||||
#ifndef sbRECEIVE_COMPLETED_FROM_ISR
|
#ifndef sbRECEIVE_COMPLETED_FROM_ISR
|
||||||
@ -116,9 +100,8 @@
|
|||||||
* or #defined the notification macro away, them provide a default implementation
|
* or #defined the notification macro away, them provide a default implementation
|
||||||
* that uses task notifications. */
|
* that uses task notifications. */
|
||||||
#ifndef sbSEND_COMPLETED
|
#ifndef sbSEND_COMPLETED
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
|
||||||
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
||||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
||||||
{ \
|
{ \
|
||||||
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
|
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
|
||||||
{ \
|
{ \
|
||||||
@ -128,21 +111,7 @@
|
|||||||
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
|
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||||
#else /* ifdef ESP_PLATFORM */
|
|
||||||
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
|
||||||
vTaskSuspendAll(); \
|
|
||||||
{ \
|
|
||||||
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
|
|
||||||
{ \
|
|
||||||
( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \
|
|
||||||
( uint32_t ) 0, \
|
|
||||||
eNoAction ); \
|
|
||||||
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
|
|
||||||
} \
|
|
||||||
} \
|
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
#endif /* sbSEND_COMPLETED */
|
#endif /* sbSEND_COMPLETED */
|
||||||
|
|
||||||
#ifndef sbSEND_COMPLETE_FROM_ISR
|
#ifndef sbSEND_COMPLETE_FROM_ISR
|
||||||
@ -309,7 +278,6 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
|||||||
pucAllocatedMemory = NULL;
|
pucAllocatedMemory = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if( pucAllocatedMemory != NULL )
|
if( pucAllocatedMemory != NULL )
|
||||||
{
|
{
|
||||||
prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */
|
prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */
|
||||||
|
@ -403,12 +403,9 @@ PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Poi
|
|||||||
PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
|
PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
|
||||||
PRIVILEGED_DATA static List_t xPendingReadyList[ configNUM_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
|
PRIVILEGED_DATA static List_t xPendingReadyList[ configNUM_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
|
|
||||||
/* Spinlock required for SMP critical sections. This lock protects all of the
|
/* Spinlock required for SMP critical sections. This lock protects all of the
|
||||||
* kernel's data structures such as various tasks lists, flags, and tick counts. */
|
* kernel's data structures such as various tasks lists, flags, and tick counts. */
|
||||||
PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED;
|
PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED;
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
|
|
||||||
#if ( INCLUDE_vTaskDelete == 1 )
|
#if ( INCLUDE_vTaskDelete == 1 )
|
||||||
|
|
||||||
@ -1537,11 +1534,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
configASSERT( ( xTimeIncrement > 0U ) );
|
configASSERT( ( xTimeIncrement > 0U ) );
|
||||||
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
/* Minor optimisation. The tick count cannot change in this
|
/* Minor optimisation. The tick count cannot change in this
|
||||||
* block. */
|
* block. */
|
||||||
@ -1597,12 +1590,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
|
||||||
xAlreadyYielded = pdFALSE;
|
|
||||||
#else
|
|
||||||
xAlreadyYielded = xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
|
|
||||||
/* Force a reschedule if xTaskResumeAll has not already done so, we may
|
/* Force a reschedule if xTaskResumeAll has not already done so, we may
|
||||||
* have put ourselves to sleep. */
|
* have put ourselves to sleep. */
|
||||||
@ -1631,11 +1619,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
if( xTicksToDelay > ( TickType_t ) 0U )
|
if( xTicksToDelay > ( TickType_t ) 0U )
|
||||||
{
|
{
|
||||||
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
traceTASK_DELAY();
|
traceTASK_DELAY();
|
||||||
|
|
||||||
@ -1648,12 +1632,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
|||||||
* executing task. */
|
* executing task. */
|
||||||
prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
|
prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
|
||||||
xAlreadyYielded = pdFALSE;
|
|
||||||
#else
|
|
||||||
xAlreadyYielded = xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -2836,11 +2815,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
/* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
|
/* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
|
||||||
configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
|
configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
/* Search the ready lists. */
|
/* Search the ready lists. */
|
||||||
do
|
do
|
||||||
@ -2886,11 +2861,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
|
|
||||||
return pxTCB;
|
return pxTCB;
|
||||||
}
|
}
|
||||||
@ -2906,11 +2877,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
{
|
{
|
||||||
UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
|
UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
/* Is there a space in the array for each task in the system? */
|
/* Is there a space in the array for each task in the system? */
|
||||||
if( uxArraySize >= uxCurrentNumberOfTasks )
|
if( uxArraySize >= uxCurrentNumberOfTasks )
|
||||||
@ -2969,11 +2936,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
|
|
||||||
return uxTask;
|
return uxTask;
|
||||||
}
|
}
|
||||||
@ -3008,10 +2971,12 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
|
|
||||||
void vTaskStepTick( const TickType_t xTicksToJump )
|
void vTaskStepTick( const TickType_t xTicksToJump )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
/* For SMP, we require a critical section to access xTickCount */
|
|
||||||
|
/* Although this is called with the scheduler suspended. For SMP, we
|
||||||
|
* still need to take the kernel lock to access xTickCount. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#endif
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
/* Correct the tick count value after a period during which the tick
|
/* Correct the tick count value after a period during which the tick
|
||||||
* was suppressed. Note this does *not* call the tick hook function for
|
* was suppressed. Note this does *not* call the tick hook function for
|
||||||
@ -3019,9 +2984,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
|
|||||||
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
|
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
|
||||||
xTickCount += xTicksToJump;
|
xTickCount += xTicksToJump;
|
||||||
traceINCREASE_TICK_COUNT( xTicksToJump );
|
traceINCREASE_TICK_COUNT( xTicksToJump );
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
#endif
|
#endif /* configNUM_CORES > 1 */
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* configUSE_TICKLESS_IDLE */
|
#endif /* configUSE_TICKLESS_IDLE */
|
||||||
@ -3042,16 +3009,17 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
/* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
|
/* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
|
||||||
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
|
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
|
||||||
vTaskSuspendAll();
|
vTaskSuspendAll();
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* For SMP, we still require a critical section to access xPendedTicks even
|
/* Although the scheduler is suspended. For SMP, we still need to take
|
||||||
* if the scheduler is disabled. */
|
* the kernel lock to access xPendedTicks. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
xPendedTicks += xTicksToCatchUp;
|
xPendedTicks += xTicksToCatchUp;
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
#else // ESP_PLATFORM
|
#endif /* configNUM_CORES > 1 */
|
||||||
xPendedTicks += xTicksToCatchUp;
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
xYieldOccurred = xTaskResumeAll();
|
xYieldOccurred = xTaskResumeAll();
|
||||||
|
|
||||||
return xYieldOccurred;
|
return xYieldOccurred;
|
||||||
@ -3067,11 +3035,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
|
|
||||||
configASSERT( pxTCB );
|
configASSERT( pxTCB );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
/* A task can only be prematurely removed from the Blocked state if
|
/* A task can only be prematurely removed from the Blocked state if
|
||||||
* it is actually in the Blocked state. */
|
* it is actually in the Blocked state. */
|
||||||
@ -3134,11 +3098,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
xReturn = pdFAIL;
|
xReturn = pdFAIL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -3148,14 +3108,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
|
|
||||||
BaseType_t xTaskIncrementTick( void )
|
BaseType_t xTaskIncrementTick( void )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
#if ( configNUM_CORES > 1 )
|
#if ( configNUM_CORES > 1 )
|
||||||
{
|
|
||||||
/* Only Core 0 should ever call this function. */
|
/* Only Core 0 should ever call this function. */
|
||||||
configASSERT( xPortGetCoreID() == 0 );
|
configASSERT( xPortGetCoreID() == 0 );
|
||||||
}
|
|
||||||
#endif /* ( configNUM_CORES > 1 ) */
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
TCB_t * pxTCB;
|
TCB_t * pxTCB;
|
||||||
TickType_t xItemValue;
|
TickType_t xItemValue;
|
||||||
BaseType_t xSwitchRequired = pdFALSE;
|
BaseType_t xSwitchRequired = pdFALSE;
|
||||||
@ -3165,15 +3122,13 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
* tasks to be unblocked. */
|
* tasks to be unblocked. */
|
||||||
traceTASK_INCREMENT_TICK( xTickCount );
|
traceTASK_INCREMENT_TICK( xTickCount );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* We need a critical section here as we are about to access kernel data
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* structures:
|
* access kernel data structures (unlike single core which calls this
|
||||||
* - Other cores could be accessing them simultaneously
|
* function with interrupts disabled). */
|
||||||
* - Unlike other ports, we call xTaskIncrementTick() without disabling nested
|
|
||||||
* interrupts, which in turn is disabled by the critical section. */
|
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
||||||
{
|
{
|
||||||
@ -3261,16 +3216,12 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
/* Preemption is on, but a context switch should
|
/* Preemption is on, but a context switch should
|
||||||
* only be performed if the unblocked task has a
|
* only be performed if the unblocked task has a
|
||||||
* priority that is equal to or higher than the
|
* priority that is equal to or higher than the
|
||||||
* currently executing task. */
|
* currently executing task.
|
||||||
#if defined( ESP_PLATFORM ) && ( configNUM_CORES > 1 )
|
*
|
||||||
|
* For SMP, since this function is only run on core
|
||||||
/* Since this function is only run on core 0, we
|
* 0, only need to switch contexts if the unblocked
|
||||||
* only need to switch contexts if the unblocked task
|
* task can run on core 0. */
|
||||||
* can run on core 0. */
|
if( ( taskCAN_RUN_ON_CORE( 0, pxTCB->xCoreID ) == pdTRUE ) && ( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority ) )
|
||||||
if( ( ( pxTCB->xCoreID == 0 ) || ( pxTCB->xCoreID == tskNO_AFFINITY ) ) && ( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority ) )
|
|
||||||
#else
|
|
||||||
if( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority )
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
xSwitchRequired = pdTRUE;
|
xSwitchRequired = pdTRUE;
|
||||||
}
|
}
|
||||||
@ -3300,23 +3251,22 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
}
|
}
|
||||||
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
#if ( configUSE_TICK_HOOK == 1 )
|
#if ( configUSE_TICK_HOOK == 1 )
|
||||||
TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
|
TickType_t xPendedTicksTemp = xPendedTicks; /* Non-volatile copy. */
|
||||||
#endif /* configUSE_TICK_HOOK */
|
#endif /* configUSE_TICK_HOOK */
|
||||||
/* Exit the critical section as we have finished accessing the kernel data structures. */
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* Release the previously taken kernel lock as we have finished
|
||||||
|
* accessing the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
#if ( configUSE_TICK_HOOK == 1 )
|
#if ( configUSE_TICK_HOOK == 1 )
|
||||||
{
|
{
|
||||||
/* Guard against the tick hook being called when the pended tick
|
/* Guard against the tick hook being called when the pended tick
|
||||||
* count is being unwound (when the scheduler is being unlocked). */
|
* count is being unwound (when the scheduler is being unlocked). */
|
||||||
#ifdef ESP_PLATFORM
|
if( xPendedTicksTemp == ( TickType_t ) 0 )
|
||||||
if( xPendedCounts == ( TickType_t ) 0 )
|
|
||||||
#else
|
|
||||||
if( xPendedTicks == ( TickType_t ) 0 )
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
vApplicationTickHook();
|
vApplicationTickHook();
|
||||||
}
|
}
|
||||||
@ -3343,10 +3293,12 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
++xPendedTicks;
|
++xPendedTicks;
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
/* Exit the critical section as we have finished accessing the kernel data structures. */
|
|
||||||
|
/* Release the previously taken kernel lock as we have finished
|
||||||
|
* accessing the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
/* The tick hook gets called at regular intervals, even if the
|
/* The tick hook gets called at regular intervals, even if the
|
||||||
* scheduler is locked. */
|
* scheduler is locked. */
|
||||||
@ -3378,12 +3330,8 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
|
|
||||||
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
|
||||||
{
|
{
|
||||||
/* We need a critical section here as we are about to access kernel data
|
/* We need take the kernel lock here as we are about to access
|
||||||
* structures:
|
* kernel data structures. */
|
||||||
* - Other cores could be accessing them simultaneously
|
|
||||||
* - Unlike other ports, we call xTaskIncrementTick() without disabling
|
|
||||||
* nested interrupts, which in turn is disabled by the critical
|
|
||||||
* section. */
|
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||||
|
|
||||||
/* A task being unblocked cannot cause an immediate context switch
|
/* A task being unblocked cannot cause an immediate context switch
|
||||||
@ -3419,7 +3367,8 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
}
|
}
|
||||||
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
||||||
|
|
||||||
/* Exit the critical section as we have finished accessing the kernel data structures. */
|
/* Release the previously taken kernel lock as we have finished
|
||||||
|
* accessing the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
|
|
||||||
#if ( configUSE_PREEMPTION == 1 )
|
#if ( configUSE_PREEMPTION == 1 )
|
||||||
@ -3508,26 +3457,18 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
{
|
{
|
||||||
TCB_t * pxTCB;
|
TCB_t * pxTCB;
|
||||||
TaskHookFunction_t xReturn;
|
TaskHookFunction_t xReturn;
|
||||||
|
UBaseType_t uxSavedInterruptStatus;
|
||||||
|
|
||||||
/* If xTask is NULL then set the calling task's hook. */
|
/* If xTask is NULL then set the calling task's hook. */
|
||||||
pxTCB = prvGetTCBFromHandle( xTask );
|
pxTCB = prvGetTCBFromHandle( xTask );
|
||||||
|
|
||||||
/* Save the hook function in the TCB. A critical section is required as
|
/* Save the hook function in the TCB. A critical section is required as
|
||||||
* the value can be accessed from an interrupt. */
|
* the value can be accessed from an interrupt. */
|
||||||
#if ( configNUM_CORES > 1 )
|
prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
|
||||||
#else
|
|
||||||
UBaseType_t uxSavedInterruptStatus;
|
|
||||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
xReturn = pxTCB->pxTaskTag;
|
xReturn = pxTCB->pxTaskTag;
|
||||||
}
|
}
|
||||||
#if ( configNUM_CORES > 1 )
|
prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
|
||||||
#else
|
|
||||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -3660,14 +3601,14 @@ get_next_task:
|
|||||||
|
|
||||||
void vTaskSwitchContext( void )
|
void vTaskSwitchContext( void )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* vTaskSwitchContext is called either from:
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* - ISR dispatcher when return from an ISR (interrupts will already be disabled)
|
* access kernel data structures (unlike single core which calls this
|
||||||
* - vTaskSuspend() which is not in a critical section
|
* function with either interrupts disabled or when the scheduler hasn't
|
||||||
* Therefore, we enter a critical section ISR version to ensure safety */
|
* started yet). */
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
|
||||||
{
|
{
|
||||||
@ -3756,10 +3697,12 @@ void vTaskSwitchContext( void )
|
|||||||
#endif // ESP_PLATFORM
|
#endif // ESP_PLATFORM
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
/* Exit the critical section previously entered */
|
|
||||||
|
/* Release the previously taken kernel lock as we have finished
|
||||||
|
* accessing the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
@ -3768,8 +3711,12 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
|
|||||||
{
|
{
|
||||||
configASSERT( pxEventList );
|
configASSERT( pxEventList );
|
||||||
|
|
||||||
/* Take the kernel lock as we are about to access the task lists. */
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* In SMP, we need to take the kernel lock as we are about to access the
|
||||||
|
* task lists. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
/* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
|
/* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
|
||||||
* SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
|
* SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
|
||||||
@ -3782,7 +3729,10 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
|
|||||||
|
|
||||||
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
|
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
@ -3792,14 +3742,18 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
|
|||||||
{
|
{
|
||||||
configASSERT( pxEventList );
|
configASSERT( pxEventList );
|
||||||
|
|
||||||
/* Take the kernel lock as we are about to access the task lists. */
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* In SMP, the event groups haven't suspended the scheduler at this
|
||||||
|
* point. We need to take the kernel lock instead as we are about to
|
||||||
|
* access the task lists. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#else /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
|
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
|
||||||
* the event groups implementation. */
|
* the event groups implementation. */
|
||||||
|
configASSERT( uxSchedulerSuspended[ 0 ] != 0 );
|
||||||
/* Note. We currently don't always suspend the scheduler. Todo: IDF-3755
|
#endif /* configNUM_CORES > 1 */
|
||||||
* configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] != 0 ); */
|
|
||||||
|
|
||||||
/* Store the item value in the event list item. It is safe to access the
|
/* Store the item value in the event list item. It is safe to access the
|
||||||
* event list item here as interrupts won't access the event list item of a
|
* event list item here as interrupts won't access the event list item of a
|
||||||
@ -3815,7 +3769,10 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
|
|||||||
|
|
||||||
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
|
prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
@ -3827,8 +3784,12 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
|
|||||||
{
|
{
|
||||||
configASSERT( pxEventList );
|
configASSERT( pxEventList );
|
||||||
|
|
||||||
/* Take the kernel lock as we are about to access the task lists. */
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* In SMP, we need to take the kernel lock as we are about to access
|
||||||
|
* the task lists. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
/* This function should not be called by application code hence the
|
/* This function should not be called by application code hence the
|
||||||
* 'Restricted' in its name. It is not part of the public API. It is
|
* 'Restricted' in its name. It is not part of the public API. It is
|
||||||
@ -3853,7 +3814,10 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
|
|||||||
traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
|
traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
|
||||||
prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
|
prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* configUSE_TIMERS */
|
#endif /* configUSE_TIMERS */
|
||||||
@ -3865,12 +3829,24 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
|
|||||||
BaseType_t xReturn;
|
BaseType_t xReturn;
|
||||||
|
|
||||||
/* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
|
/* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
|
||||||
* called from a critical section within an ISR.
|
* called from a critical section within an ISR. */
|
||||||
*
|
|
||||||
* However, we still need to take the kernel lock as we are about to access
|
#if ( configNUM_CORES > 1 )
|
||||||
* kernel data structures. Note that we use the ISR version of the macro as
|
|
||||||
* this function could be called from an ISR critical section. */
|
/* In SMP, we need to take the kernel lock (even if the caller is
|
||||||
|
* already in a critical section by taking a different lock) as we are
|
||||||
|
* about to access the task lists, which are protected by the kernel
|
||||||
|
* lock. This function can also be called from an ISR context, so we
|
||||||
|
* need to check whether we are in an ISR.*/
|
||||||
|
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||||
|
{
|
||||||
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||||
|
}
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
{
|
{
|
||||||
/* Before taking the kernel lock, another task/ISR could have already
|
/* Before taking the kernel lock, another task/ISR could have already
|
||||||
* emptied the pxEventList. So we insert a check here to see if
|
* emptied the pxEventList. So we insert a check here to see if
|
||||||
@ -3965,13 +3941,23 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
|
|||||||
xReturn = pdFALSE;
|
xReturn = pdFALSE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
|
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||||
|
{
|
||||||
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
|
}
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
void vTaskTakeKernelLock( void )
|
void vTaskTakeKernelLock( void )
|
||||||
{
|
{
|
||||||
/* We call the tasks.c critical section macro to take xKernelLock */
|
/* We call the tasks.c critical section macro to take xKernelLock */
|
||||||
@ -3983,7 +3969,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
|
|||||||
/* We call the tasks.c critical section macro to release xKernelLock */
|
/* We call the tasks.c critical section macro to release xKernelLock */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
}
|
}
|
||||||
#endif // ESP_PLATFORM
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
|
void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
|
||||||
const TickType_t xItemValue )
|
const TickType_t xItemValue )
|
||||||
@ -3991,14 +3977,17 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
|
|||||||
TCB_t * pxUnblockedTCB;
|
TCB_t * pxUnblockedTCB;
|
||||||
BaseType_t xCurCoreID = xPortGetCoreID();
|
BaseType_t xCurCoreID = xPortGetCoreID();
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN.
|
/* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN.
|
||||||
* It is used by the event flags implementation, thus those functions
|
* It is used by the event flags implementation, thus those functions
|
||||||
* should call vTaskTakeKernelLock() before calling this function. */
|
* should call vTaskTakeKernelLock() before calling this function. */
|
||||||
|
#else /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
/*
|
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
|
||||||
* Todo: IDF-5785
|
* the event flags implementation. */
|
||||||
* configASSERT( uxSchedulerSuspended[ xCurCoreID ] != pdFALSE );
|
configASSERT( uxSchedulerSuspended != pdFALSE );
|
||||||
*/
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
/* Store the new item value in the event list. */
|
/* Store the new item value in the event list. */
|
||||||
listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
|
listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
|
||||||
@ -4066,18 +4055,19 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
|
|||||||
* On a single core configuration, this problem doesn't appear as this function is meant to be called from
|
* On a single core configuration, this problem doesn't appear as this function is meant to be called from
|
||||||
* a critical section, disabling the (tick) interrupts.
|
* a critical section, disabling the (tick) interrupts.
|
||||||
*/
|
*/
|
||||||
#if ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) )
|
#if ( configNUM_CORES > 1 )
|
||||||
configASSERT( pxTimeOut );
|
configASSERT( pxTimeOut );
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
#endif // ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) )
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
|
||||||
/* For internal use only as it does not use a critical section. */
|
/* For internal use only as it does not use a critical section. */
|
||||||
pxTimeOut->xOverflowCount = xNumOfOverflows;
|
pxTimeOut->xOverflowCount = xNumOfOverflows;
|
||||||
pxTimeOut->xTimeOnEntering = xTickCount;
|
pxTimeOut->xTimeOnEntering = xTickCount;
|
||||||
|
|
||||||
#if ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) )
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
#endif // ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) )
|
#endif /* configNUM_CORES > 1 */
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
@ -4288,11 +4278,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
|
|
||||||
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
|
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
/* Now the scheduler is suspended, the expected idle
|
/* Now the scheduler is suspended, the expected idle
|
||||||
* time can be sampled again, and this time its value can
|
* time can be sampled again, and this time its value can
|
||||||
@ -4316,11 +4302,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -4389,11 +4371,22 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
|
|
||||||
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
|
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
|
||||||
{
|
{
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* For SMP, we need to take the kernel lock here as we
|
||||||
|
* another core could also update this task's TLSP at the
|
||||||
|
* same time. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
pxTCB = prvGetTCBFromHandle( xTaskToSet );
|
pxTCB = prvGetTCBFromHandle( xTaskToSet );
|
||||||
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
|
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
|
||||||
pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
|
pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4414,10 +4407,22 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
|
|||||||
|
|
||||||
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
|
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
|
||||||
{
|
{
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* For SMP, we need to take the kernel lock here as we
|
||||||
|
* another core could also update this task's TLSP at the
|
||||||
|
* same time. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
pxTCB = prvGetTCBFromHandle( xTaskToSet );
|
pxTCB = prvGetTCBFromHandle( xTaskToSet );
|
||||||
|
configASSERT( pxTCB != NULL );
|
||||||
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
|
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* configNUM_CORES > 1 */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1 */
|
#endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1 */
|
||||||
@ -4634,22 +4639,14 @@ static void prvCheckTasksWaitingTermination( void )
|
|||||||
* it should be reported as being in the Blocked state. */
|
* it should be reported as being in the Blocked state. */
|
||||||
if( eState == eSuspended )
|
if( eState == eSuspended )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock );
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
|
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
|
||||||
{
|
{
|
||||||
pxTaskStatus->eCurrentState = eBlocked;
|
pxTaskStatus->eCurrentState = eBlocked;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock );
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
|
||||||
#else
|
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* INCLUDE_vTaskSuspend */
|
#endif /* INCLUDE_vTaskSuspend */
|
||||||
@ -5006,7 +5003,12 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
TCB_t * const pxMutexHolderTCB = pxMutexHolder;
|
TCB_t * const pxMutexHolderTCB = pxMutexHolder;
|
||||||
BaseType_t xReturn = pdFALSE;
|
BaseType_t xReturn = pdFALSE;
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
/* If the mutex was given back by an interrupt while the queue was
|
/* If the mutex was given back by an interrupt while the queue was
|
||||||
* locked then the mutex holder might now be NULL. _RB_ Is this still
|
* locked then the mutex holder might now be NULL. _RB_ Is this still
|
||||||
@ -5085,7 +5087,10 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -5100,7 +5105,12 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
TCB_t * const pxTCB = pxMutexHolder;
|
TCB_t * const pxTCB = pxMutexHolder;
|
||||||
BaseType_t xReturn = pdFALSE;
|
BaseType_t xReturn = pdFALSE;
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
if( pxMutexHolder != NULL )
|
if( pxMutexHolder != NULL )
|
||||||
{
|
{
|
||||||
@ -5169,7 +5179,10 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -5186,7 +5199,12 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
|
UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
|
||||||
const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
|
const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
|
* access kernel data structures. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
if( pxMutexHolder != NULL )
|
if( pxMutexHolder != NULL )
|
||||||
{
|
{
|
||||||
@ -5281,7 +5299,10 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* configUSE_MUTEXES */
|
#endif /* configUSE_MUTEXES */
|
||||||
@ -5615,18 +5636,27 @@ static void prvResetNextTaskUnblockTime( void )
|
|||||||
TickType_t uxTaskResetEventItemValue( void )
|
TickType_t uxTaskResetEventItemValue( void )
|
||||||
{
|
{
|
||||||
TickType_t uxReturn;
|
TickType_t uxReturn;
|
||||||
TCB_t * pxCurTCB;
|
BaseType_t xCoreID;
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* For SMP, we need to take the kernel lock here to ensure nothing else
|
||||||
|
* modifies the task's event item value simultaneously. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
pxCurTCB = pxCurrentTCB[ xPortGetCoreID() ];
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ) );
|
xCoreID = xPortGetCoreID();
|
||||||
|
|
||||||
|
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xEventListItem ) );
|
||||||
|
|
||||||
/* Reset the event list item to its normal value - so it can be used with
|
/* Reset the event list item to its normal value - so it can be used with
|
||||||
* queues and semaphores. */
|
* queues and semaphores. */
|
||||||
listSET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||||
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
return uxReturn;
|
return uxReturn;
|
||||||
}
|
}
|
||||||
@ -5636,21 +5666,31 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
|
|
||||||
TaskHandle_t pvTaskIncrementMutexHeldCount( void )
|
TaskHandle_t pvTaskIncrementMutexHeldCount( void )
|
||||||
{
|
{
|
||||||
TCB_t * curTCB;
|
TCB_t * pxCurTCB;
|
||||||
|
BaseType_t xCoreID;
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
|
* access kernel data structures. */
|
||||||
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
xCoreID = xPortGetCoreID();
|
||||||
|
|
||||||
/* If xSemaphoreCreateMutex() is called before any tasks have been created
|
/* If xSemaphoreCreateMutex() is called before any tasks have been created
|
||||||
* then pxCurrentTCB will be NULL. */
|
* then pxCurrentTCB will be NULL. */
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
if( pxCurrentTCB[ xCoreID ] != NULL )
|
||||||
|
|
||||||
if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
|
|
||||||
{
|
{
|
||||||
( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
|
( pxCurrentTCB[ xCoreID ]->uxMutexesHeld )++;
|
||||||
}
|
}
|
||||||
|
|
||||||
curTCB = pxCurrentTCB[ xPortGetCoreID() ];
|
pxCurTCB = pxCurrentTCB[ xCoreID ];
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
return curTCB;
|
return pxCurTCB;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* configUSE_MUTEXES */
|
#endif /* configUSE_MUTEXES */
|
||||||
@ -5971,6 +6011,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
TCB_t * pxTCB;
|
TCB_t * pxTCB;
|
||||||
uint8_t ucOriginalNotifyState;
|
uint8_t ucOriginalNotifyState;
|
||||||
BaseType_t xReturn = pdPASS;
|
BaseType_t xReturn = pdPASS;
|
||||||
|
UBaseType_t uxSavedInterruptStatus;
|
||||||
|
|
||||||
configASSERT( xTaskToNotify );
|
configASSERT( xTaskToNotify );
|
||||||
configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
|
configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
|
||||||
@ -5995,7 +6036,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
|
|
||||||
pxTCB = xTaskToNotify;
|
pxTCB = xTaskToNotify;
|
||||||
|
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
|
||||||
{
|
{
|
||||||
if( pulPreviousNotificationValue != NULL )
|
if( pulPreviousNotificationValue != NULL )
|
||||||
{
|
{
|
||||||
@ -6089,7 +6130,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
|
||||||
|
|
||||||
return xReturn;
|
return xReturn;
|
||||||
}
|
}
|
||||||
@ -6105,7 +6146,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
{
|
{
|
||||||
TCB_t * pxTCB;
|
TCB_t * pxTCB;
|
||||||
uint8_t ucOriginalNotifyState;
|
uint8_t ucOriginalNotifyState;
|
||||||
|
UBaseType_t uxSavedInterruptStatus;
|
||||||
|
|
||||||
configASSERT( xTaskToNotify );
|
configASSERT( xTaskToNotify );
|
||||||
configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
|
configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
|
||||||
@ -6130,7 +6171,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
|
|
||||||
pxTCB = xTaskToNotify;
|
pxTCB = xTaskToNotify;
|
||||||
|
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus );
|
||||||
{
|
{
|
||||||
ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
|
ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
|
||||||
pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
|
pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
|
||||||
@ -6180,7 +6221,7 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus );
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* configUSE_TASK_NOTIFICATIONS */
|
#endif /* configUSE_TASK_NOTIFICATIONS */
|
||||||
@ -6252,11 +6293,23 @@ TickType_t uxTaskResetEventItemValue( void )
|
|||||||
|
|
||||||
uint32_t ulTaskGetIdleRunTimeCounter( void )
|
uint32_t ulTaskGetIdleRunTimeCounter( void )
|
||||||
{
|
{
|
||||||
taskENTER_CRITICAL( &xKernelLock );
|
uint32_t ulRunTimeCounter;
|
||||||
tskTCB * pxTCB = ( tskTCB * ) xIdleTaskHandle[ xPortGetCoreID() ];
|
|
||||||
taskEXIT_CRITICAL( &xKernelLock );
|
|
||||||
|
|
||||||
return pxTCB->ulRunTimeCounter;
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
|
* access kernel data structures. */
|
||||||
|
taskENTER_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
|
ulRunTimeCounter = xIdleTaskHandle[ xPortGetCoreID() ]->ulRunTimeCounter;
|
||||||
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
/* Release the previously taken kernel lock. */
|
||||||
|
taskEXIT_CRITICAL( &xKernelLock );
|
||||||
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
|
return ulRunTimeCounter;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
|
#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
|
||||||
|
@ -606,11 +606,7 @@
|
|||||||
TickType_t xTimeNow;
|
TickType_t xTimeNow;
|
||||||
BaseType_t xTimerListsWereSwitched;
|
BaseType_t xTimerListsWereSwitched;
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xTimerLock );
|
||||||
taskENTER_CRITICAL( &xTimerLock );
|
|
||||||
#else
|
|
||||||
vTaskSuspendAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
/* Obtain the time now to make an assessment as to whether the timer
|
/* Obtain the time now to make an assessment as to whether the timer
|
||||||
* has expired or not. If obtaining the time causes the lists to switch
|
* has expired or not. If obtaining the time causes the lists to switch
|
||||||
@ -624,11 +620,7 @@
|
|||||||
/* The tick count has not overflowed, has the timer expired? */
|
/* The tick count has not overflowed, has the timer expired? */
|
||||||
if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) )
|
if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
|
||||||
taskEXIT_CRITICAL( &xTimerLock );
|
|
||||||
#else
|
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
prvProcessExpiredTimer( xNextExpireTime, xTimeNow );
|
prvProcessExpiredTimer( xNextExpireTime, xTimeNow );
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -648,11 +640,7 @@
|
|||||||
|
|
||||||
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
|
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
if( prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ) == pdFALSE )
|
||||||
taskEXIT_CRITICAL( &xTimerLock );
|
|
||||||
#else
|
|
||||||
if( xTaskResumeAll() == pdFALSE )
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
{
|
{
|
||||||
/* Yield to wait for either a command to arrive, or the
|
/* Yield to wait for either a command to arrive, or the
|
||||||
* block time to expire. If a command arrived between the
|
* block time to expire. If a command arrived between the
|
||||||
@ -660,22 +648,15 @@
|
|||||||
* will not cause the task to block. */
|
* will not cause the task to block. */
|
||||||
portYIELD_WITHIN_API();
|
portYIELD_WITHIN_API();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef ESP_PLATFORM /* IDF-3755 */
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
mtCOVERAGE_TEST_MARKER();
|
mtCOVERAGE_TEST_MARKER();
|
||||||
}
|
}
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM /* IDF-3755 */
|
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
|
||||||
taskEXIT_CRITICAL( &xTimerLock );
|
|
||||||
#else
|
|
||||||
( void ) xTaskResumeAll();
|
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user