freertos: Reduce ESP specific code

This commit is contained in:
Zim Kalinowski 2021-08-30 14:45:31 +08:00
parent ed8df94915
commit 5f2a66a8a5
6 changed files with 314 additions and 223 deletions

View File

@ -38,6 +38,18 @@
#include "timers.h" #include "timers.h"
#include "event_groups.h" #include "event_groups.h"
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &pxEventBits->eventGroupMux
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#endif
/* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified /* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
* for the header files above, but not in this file, in order to generate the * for the header files above, but not in this file, in order to generate the
@ -212,7 +224,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
} }
#endif #endif
taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); taskENTER_CRITICAL();
{ {
uxOriginalBitValue = pxEventBits->uxEventBits; uxOriginalBitValue = pxEventBits->uxEventBits;
@ -255,7 +267,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
} }
} }
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); taskEXIT_CRITICAL();
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
@ -270,7 +282,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{ {
/* The task timed out, just return the current event bit value. */ /* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); taskENTER_CRITICAL();
{ {
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
@ -287,7 +299,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); taskEXIT_CRITICAL();
xTimeoutOccurred = pdTRUE; xTimeoutOccurred = pdTRUE;
} }
@ -332,7 +344,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
} }
#endif #endif
taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); taskENTER_CRITICAL();
{ {
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
@ -400,7 +412,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
} }
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); taskEXIT_CRITICAL();
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
@ -414,7 +426,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{ {
taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); taskENTER_CRITICAL();
{ {
/* The task timed out, just return the current event bit value. */ /* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
@ -439,7 +451,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
xTimeoutOccurred = pdTRUE; xTimeoutOccurred = pdTRUE;
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); taskEXIT_CRITICAL();
} }
else else
{ {
@ -470,7 +482,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
configASSERT( xEventGroup ); configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); taskENTER_CRITICAL();
{ {
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
@ -481,7 +493,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
/* Clear the bits. */ /* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear; pxEventBits->uxEventBits &= ~uxBitsToClear;
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); taskEXIT_CRITICAL();
return uxReturn; return uxReturn;
} }
@ -536,7 +548,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
pxList = &( pxEventBits->xTasksWaitingForBits ); pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); taskENTER_CRITICAL();
{ {
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
@ -608,7 +620,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
* bit was set in the control word. */ * bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear; pxEventBits->uxEventBits &= ~uxBitsToClear;
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); taskEXIT_CRITICAL();
return pxEventBits->uxEventBits; return pxEventBits->uxEventBits;
} }
@ -621,7 +633,7 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
traceEVENT_GROUP_DELETE( xEventGroup ); traceEVENT_GROUP_DELETE( xEventGroup );
taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); taskENTER_CRITICAL();
{ {
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 ) while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{ {
@ -631,7 +643,7 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
} }
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); taskEXIT_CRITICAL();
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{ {

View File

@ -203,9 +203,18 @@ typedef enum
* @endcond * @endcond
* \ingroup SchedulerControl * \ingroup SchedulerControl
*/ */
#define taskENTER_CRITICAL( x ) portENTER_CRITICAL( x ) #ifdef ESP_PLATFORM
#define taskENTER_CRITICAL( x ) portENTER_CRITICAL( x )
#else
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( )
#endif // ESP_PLATFORM
#define taskENTER_CRITICAL_FROM_ISR( ) portSET_INTERRUPT_MASK_FROM_ISR() #define taskENTER_CRITICAL_FROM_ISR( ) portSET_INTERRUPT_MASK_FROM_ISR()
#define taskENTER_CRITICAL_ISR(mux) portENTER_CRITICAL_ISR(mux)
#ifdef ESP_PLATFORM
#define taskENTER_CRITICAL_ISR( x ) portENTER_CRITICAL_ISR( x )
#else
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( )
#endif // ESP_PLATFORM
/** /**
* task. h * task. h
@ -221,10 +230,19 @@ typedef enum
* @endcond * @endcond
* \ingroup SchedulerControl * \ingroup SchedulerControl
*/ */
#define taskEXIT_CRITICAL( x ) portEXIT_CRITICAL( x )
#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x )
#define taskEXIT_CRITICAL_ISR(mux) portEXIT_CRITICAL_ISR(mux)
#ifdef ESP_PLATFORM
#define taskEXIT_CRITICAL( x ) portEXIT_CRITICAL( x )
#else
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( )
#endif // ESP_PLATFORM
#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x )
#ifdef ESP_PLATFORM
#define taskEXIT_CRITICAL_ISR( x ) portEXIT_CRITICAL_ISR( x )
#else
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( )
#endif // ESP_PLATFORM
/** /**
* task. h * task. h
* *

View File

@ -40,6 +40,18 @@
#include "croutine.h" #include "croutine.h"
#endif #endif
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &((Queue_t *)pxQueue)->mux
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#endif
/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
* for the header files above, but not in this file, in order to generate the * for the header files above, but not in this file, in order to generate the
@ -252,7 +264,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* accessing the queue event lists. * accessing the queue event lists.
*/ */
#define prvLockQueue( pxQueue ) \ #define prvLockQueue( pxQueue ) \
taskENTER_CRITICAL( &pxQueue->mux ); \ taskENTER_CRITICAL(); \
{ \ { \
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
{ \ { \
@ -263,7 +275,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
} \ } \
} \ } \
taskEXIT_CRITICAL( &pxQueue->mux ) taskEXIT_CRITICAL()
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
@ -278,7 +290,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
vPortCPUInitializeMutex(&pxQueue->mux); vPortCPUInitializeMutex(&pxQueue->mux);
} }
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
@ -317,7 +329,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
} }
} }
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
/* A value is returned for calling semantic consistency with previous /* A value is returned for calling semantic consistency with previous
* versions. */ * versions. */
@ -585,7 +597,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* calling task is the mutex holder, but not a good way of determining the * calling task is the mutex holder, but not a good way of determining the
* identity of the mutex holder, as the holder may change between the * identity of the mutex holder, as the holder may change between the
* following critical section exiting and the function returning. */ * following critical section exiting and the function returning. */
taskENTER_CRITICAL( &pxSemaphore->mux ); #ifdef ESP_PLATFORM
Queue_t * const pxQueue = (Queue_t *)pxSemaphore;
#endif
taskENTER_CRITICAL();
{ {
if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
{ {
@ -596,7 +611,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
pxReturn = NULL; pxReturn = NULL;
} }
} }
taskEXIT_CRITICAL( &pxSemaphore->mux ); taskEXIT_CRITICAL();
return pxReturn; return pxReturn;
} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
@ -816,7 +831,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
* interest of execution time efficiency. */ * interest of execution time efficiency. */
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
/* Is there room on the queue now? The running task must be the /* Is there room on the queue now? The running task must be the
* highest priority task wanting to access the queue. If the head item * highest priority task wanting to access the queue. If the head item
@ -922,7 +937,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
} }
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
return pdPASS; return pdPASS;
} }
else else
@ -931,7 +946,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
{ {
/* The queue was full and no block time is specified (or /* The queue was full and no block time is specified (or
* the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
/* Return to the original privilege level before exiting /* Return to the original privilege level before exiting
* the function. */ * the function. */
@ -952,12 +967,12 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
taskENTER_CRITICAL( &pxQueue->mux); taskENTER_CRITICAL();
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
@ -980,7 +995,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
* task is already in the ready list before it yields - in which * task is already in the ready list before it yields - in which
* case the yield will not cause a context switch unless there * case the yield will not cause a context switch unless there
* is also a higher priority task in the pending ready list. */ * is also a higher priority task in the pending ready list. */
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
@ -988,14 +1003,14 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
{ {
/* Try again. */ /* Try again. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
} }
} }
else else
{ {
/* The timeout has expired. */ /* The timeout has expired. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
traceQUEUE_SEND_FAILED( pxQueue ); traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL; return errQUEUE_FULL;
@ -1040,7 +1055,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
* post). */ * post). */
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{ {
taskENTER_CRITICAL_ISR(&pxQueue->mux); taskENTER_CRITICAL_ISR();
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{ {
@ -1154,7 +1169,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
taskEXIT_CRITICAL_ISR( &pxQueue->mux ); taskEXIT_CRITICAL_ISR();
} }
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
@ -1204,7 +1219,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{ {
taskENTER_CRITICAL_ISR(&pxQueue->mux); taskENTER_CRITICAL_ISR();
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1323,7 +1338,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue ); traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue );
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
taskEXIT_CRITICAL_ISR(&pxQueue->mux); taskEXIT_CRITICAL_ISR();
} }
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
@ -1358,7 +1373,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
* interest of execution time efficiency. */ * interest of execution time efficiency. */
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1390,7 +1405,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
return pdPASS; return pdPASS;
} }
else else
@ -1399,7 +1414,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
{ {
/* The queue was empty and no block time is specified (or /* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
traceQUEUE_RECEIVE_FAILED( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY; return errQUEUE_EMPTY;
} }
@ -1417,12 +1432,12 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
taskENTER_CRITICAL( &pxQueue->mux); taskENTER_CRITICAL();
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
@ -1435,7 +1450,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
else else
@ -1443,7 +1458,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
/* The queue contains data again. Loop back to try and read the /* The queue contains data again. Loop back to try and read the
* data. */ * data. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
} }
} }
else else
@ -1451,7 +1466,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
/* Timed out. If there is no data in the queue exit, otherwise loop /* Timed out. If there is no data in the queue exit, otherwise loop
* back and attempt to read the data. */ * back and attempt to read the data. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
@ -1497,7 +1512,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
* of execution time efficiency. */ * of execution time efficiency. */
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
/* Semaphores are queues with an item size of 0, and where the /* Semaphores are queues with an item size of 0, and where the
* number of messages in the queue is the semaphore's count value. */ * number of messages in the queue is the semaphore's count value. */
@ -1546,7 +1561,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
return pdPASS; return pdPASS;
} }
else else
@ -1564,7 +1579,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
/* The semaphore count was 0 and no block time is specified /* The semaphore count was 0 and no block time is specified
* (or the block time has expired) so exit now. */ * (or the block time has expired) so exit now. */
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
traceQUEUE_RECEIVE_FAILED( pxQueue ); traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY; return errQUEUE_EMPTY;
} }
@ -1582,12 +1597,12 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
/* Interrupts and other tasks can give to and take from the semaphore /* Interrupts and other tasks can give to and take from the semaphore
* now the critical section has been exited. */ * now the critical section has been exited. */
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
@ -1605,11 +1620,11 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
{ {
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{ {
taskENTER_CRITICAL( &pxQueue->mux); taskENTER_CRITICAL();
{ {
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
} }
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
} }
else else
{ {
@ -1620,7 +1635,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
else else
@ -1628,14 +1643,14 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
/* There was no timeout and the semaphore count was not 0, so /* There was no timeout and the semaphore count was not 0, so
* attempt to take the semaphore again. */ * attempt to take the semaphore again. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
} }
} }
else else
{ {
/* Timed out. */ /* Timed out. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux); taskEXIT_CRITICAL();
/* If the semaphore count is 0 exit now as the timeout has /* If the semaphore count is 0 exit now as the timeout has
* expired. Otherwise return to attempt to take the semaphore that is * expired. Otherwise return to attempt to take the semaphore that is
@ -1650,7 +1665,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
* test the mutex type again to check it is actually a mutex. */ * test the mutex type again to check it is actually a mutex. */
if( xInheritanceOccurred != pdFALSE ) if( xInheritanceOccurred != pdFALSE )
{ {
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
UBaseType_t uxHighestWaitingPriority; UBaseType_t uxHighestWaitingPriority;
@ -1662,7 +1677,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
} }
} }
#endif /* configUSE_MUTEXES */ #endif /* configUSE_MUTEXES */
@ -1707,7 +1722,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
* interest of execution time efficiency. */ * interest of execution time efficiency. */
for( ; ; ) for( ; ; )
{ {
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1745,7 +1760,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
return pdPASS; return pdPASS;
} }
else else
@ -1754,7 +1769,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
{ {
/* The queue was empty and no block time is specified (or /* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */ * the block time has expired) so leave now. */
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
traceQUEUE_PEEK_FAILED( pxQueue ); traceQUEUE_PEEK_FAILED( pxQueue );
return errQUEUE_EMPTY; return errQUEUE_EMPTY;
} }
@ -1773,12 +1788,12 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
} }
} }
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
@ -1791,7 +1806,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
else else
@ -1799,7 +1814,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
/* There is data in the queue now, so don't enter the blocked /* There is data in the queue now, so don't enter the blocked
* state, instead return to try and obtain the data. */ * state, instead return to try and obtain the data. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
} }
} }
else else
@ -1807,7 +1822,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
/* The timeout has expired. If there is still no data in the queue /* The timeout has expired. If there is still no data in the queue
* exit, otherwise go back and try to read the data again. */ * exit, otherwise go back and try to read the data again. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
@ -1852,7 +1867,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{ {
taskENTER_CRITICAL_ISR(&pxQueue->mux); taskENTER_CRITICAL_ISR();
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@ -1911,7 +1926,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
xReturn = pdFAIL; xReturn = pdFAIL;
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
} }
taskEXIT_CRITICAL_ISR(&pxQueue->mux); taskEXIT_CRITICAL_ISR();
} }
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
@ -1948,7 +1963,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
taskENTER_CRITICAL_ISR(&pxQueue->mux); taskENTER_CRITICAL_ISR();
{ {
/* Cannot block in an ISR, so check there is data available. */ /* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
@ -1969,7 +1984,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
} }
} }
taskEXIT_CRITICAL_ISR(&pxQueue->mux); taskEXIT_CRITICAL_ISR();
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn; return xReturn;
@ -1983,11 +1998,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
configASSERT( xQueue ); configASSERT( xQueue );
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
return uxReturn; return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
@ -2000,11 +2015,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
configASSERT( pxQueue ); configASSERT( pxQueue );
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
return uxReturn; return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
@ -2234,7 +2249,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
* removed from the queue while the queue was locked. When a queue is * removed from the queue while the queue was locked. When a queue is
* locked items can be added or removed, but the event lists cannot be * locked items can be added or removed, but the event lists cannot be
* updated. */ * updated. */
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
int8_t cTxLock = pxQueue->cTxLock; int8_t cTxLock = pxQueue->cTxLock;
@ -2312,10 +2327,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cTxLock = queueUNLOCKED; pxQueue->cTxLock = queueUNLOCKED;
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
/* Do the same for the Rx lock. */ /* Do the same for the Rx lock. */
taskENTER_CRITICAL( &pxQueue->mux ); taskENTER_CRITICAL();
{ {
int8_t cRxLock = pxQueue->cRxLock; int8_t cRxLock = pxQueue->cRxLock;
@ -2342,15 +2357,14 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cRxLock = queueUNLOCKED; pxQueue->cRxLock = queueUNLOCKED;
} }
taskEXIT_CRITICAL( &pxQueue->mux ); taskEXIT_CRITICAL();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
{ {
BaseType_t xReturn; BaseType_t xReturn;
Queue_t * pxQ = (Queue_t *)pxQueue; taskENTER_CRITICAL();
taskENTER_CRITICAL( &pxQ->mux );
{ {
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{ {
@ -2361,7 +2375,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
taskEXIT_CRITICAL( &pxQ->mux ); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -2872,8 +2886,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
QueueSetHandle_t xQueueSet ) QueueSetHandle_t xQueueSet )
{ {
BaseType_t xReturn; BaseType_t xReturn;
#ifdef ESP_PLATFORM
Queue_t * pxQueue = (Queue_t * )xQueueOrSemaphore;
#endif
taskENTER_CRITICAL(&(((Queue_t * )xQueueOrSemaphore)->mux)); taskENTER_CRITICAL();
{ {
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
{ {
@ -2892,7 +2909,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
xReturn = pdPASS; xReturn = pdPASS;
} }
} }
taskEXIT_CRITICAL(&(((Queue_t * )xQueueOrSemaphore)->mux)); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -2922,12 +2939,15 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
else else
{ {
taskENTER_CRITICAL(&(pxQueueOrSemaphore->mux)); #ifdef ESP_PLATFORM
Queue_t* pxQueue = (Queue_t*)pxQueueOrSemaphore;
#endif
taskENTER_CRITICAL();
{ {
/* The queue is no longer contained in the set. */ /* The queue is no longer contained in the set. */
pxQueueOrSemaphore->pxQueueSetContainer = NULL; pxQueueOrSemaphore->pxQueueSetContainer = NULL;
} }
taskEXIT_CRITICAL(&(pxQueueOrSemaphore->mux)); taskEXIT_CRITICAL();
xReturn = pdPASS; xReturn = pdPASS;
} }

View File

@ -38,6 +38,18 @@
#include "task.h" #include "task.h"
#include "stream_buffer.h" #include "stream_buffer.h"
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &pxStreamBuffer->xStreamBufferMux
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#endif
#if ( configUSE_TASK_NOTIFICATIONS != 1 ) #if ( configUSE_TASK_NOTIFICATIONS != 1 )
#error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c
#endif #endif
@ -54,7 +66,7 @@
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */ /*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
#ifndef sbRECEIVE_COMPLETED #ifndef sbRECEIVE_COMPLETED
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
taskENTER_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); \ taskENTER_CRITICAL(); \
{ \ { \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \ { \
@ -64,7 +76,7 @@
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \ } \
} \ } \
taskEXIT_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); taskEXIT_CRITICAL();
#endif /* sbRECEIVE_COMPLETED */ #endif /* sbRECEIVE_COMPLETED */
#ifndef sbRECEIVE_COMPLETED_FROM_ISR #ifndef sbRECEIVE_COMPLETED_FROM_ISR
@ -93,7 +105,7 @@
* that uses task notifications. */ * that uses task notifications. */
#ifndef sbSEND_COMPLETED #ifndef sbSEND_COMPLETED
#define sbSEND_COMPLETED( pxStreamBuffer ) \ #define sbSEND_COMPLETED( pxStreamBuffer ) \
taskENTER_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); \ taskENTER_CRITICAL(); \
{ \ { \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \ { \
@ -103,7 +115,7 @@
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \ } \
} \ } \
taskEXIT_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); taskEXIT_CRITICAL();
#endif /* sbSEND_COMPLETED */ #endif /* sbSEND_COMPLETED */
#ifndef sbSEND_COMPLETE_FROM_ISR #ifndef sbSEND_COMPLETE_FROM_ISR
@ -422,7 +434,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
#endif #endif
/* Can only reset a message buffer if there are no tasks blocked on it. */ /* Can only reset a message buffer if there are no tasks blocked on it. */
taskENTER_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); taskENTER_CRITICAL();
{ {
if( pxStreamBuffer->xTaskWaitingToReceive == NULL ) if( pxStreamBuffer->xTaskWaitingToReceive == NULL )
{ {
@ -445,7 +457,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
} }
} }
} }
taskEXIT_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -554,7 +566,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
{ {
/* Wait until the required number of bytes are free in the message /* Wait until the required number of bytes are free in the message
* buffer. */ * buffer. */
taskENTER_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); taskENTER_CRITICAL();
{ {
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
@ -569,11 +581,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
} }
else else
{ {
taskEXIT_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); taskEXIT_CRITICAL();
break; break;
} }
} }
taskEXIT_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); taskEXIT_CRITICAL();
traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer );
( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait );
@ -752,7 +764,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
{ {
/* Checking if there is data and clearing the notification state must be /* Checking if there is data and clearing the notification state must be
* performed atomically. */ * performed atomically. */
taskENTER_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); taskENTER_CRITICAL();
{ {
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
@ -775,7 +787,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &pxStreamBuffer->xStreamBufferMux ); taskEXIT_CRITICAL();
if( xBytesAvailable <= xBytesToStoreMessageLength ) if( xBytesAvailable <= xBytesToStoreMessageLength )
{ {

View File

@ -39,6 +39,18 @@
#include "timers.h" #include "timers.h"
#include "stack_macros.h" #include "stack_macros.h"
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &xTaskQueueMutex
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#endif
/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
* for the header files above, but not in this file, in order to generate the * for the header files above, but not in this file, in order to generate the
@ -358,7 +370,10 @@ PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Del
PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */ PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
#ifdef ESP_PLATFORM
PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED; PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
#endif // ESP_PLATFORM
#if ( INCLUDE_vTaskDelete == 1 ) #if ( INCLUDE_vTaskDelete == 1 )
@ -1173,7 +1188,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
/* Ensure interrupts don't access the task lists while the lists are being /* Ensure interrupts don't access the task lists while the lists are being
* updated. */ * updated. */
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
uxCurrentNumberOfTasks++; uxCurrentNumberOfTasks++;
@ -1267,13 +1282,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
portSETUP_TCB( pxNewTCB ); portSETUP_TCB( pxNewTCB );
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
if( xSchedulerRunning != pdFALSE ) if( xSchedulerRunning != pdFALSE )
{ {
/* If the created task is of a higher priority than the current task /* If the created task is of a higher priority than the current task
* then it should run now. */ * then it should run now. */
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
curTCB = pxCurrentTCB[ xCoreID ]; curTCB = pxCurrentTCB[ xCoreID ];
if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority ) if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority )
@ -1290,7 +1305,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
else else
{ {
@ -1308,7 +1323,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
BaseType_t core; BaseType_t core;
BaseType_t xFreeNow = 0; BaseType_t xFreeNow = 0;
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
core = xPortGetCoreID(); core = xPortGetCoreID();
curTCB = pxCurrentTCB[core]; curTCB = pxCurrentTCB[core];
@ -1396,7 +1411,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
traceTASK_DELETE( pxTCB ); traceTASK_DELETE( pxTCB );
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
if(xFreeNow == pdTRUE) { if(xFreeNow == pdTRUE) {
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
@ -1437,7 +1452,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
configASSERT( ( xTimeIncrement > 0U ) ); configASSERT( ( xTimeIncrement > 0U ) );
configASSERT( uxSchedulerSuspended[xPortGetCoreID()] == 0 ); configASSERT( uxSchedulerSuspended[xPortGetCoreID()] == 0 );
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* Minor optimisation. The tick count cannot change in this /* Minor optimisation. The tick count cannot change in this
* block. */ * block. */
@ -1493,7 +1508,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
/* Force a reschedule, we may have put ourselves to sleep. */ /* Force a reschedule, we may have put ourselves to sleep. */
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
@ -1510,7 +1525,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
if( xTicksToDelay > ( TickType_t ) 0U ) if( xTicksToDelay > ( TickType_t ) 0U )
{ {
configASSERT( uxSchedulerSuspended[xPortGetCoreID()] == 0 ); configASSERT( uxSchedulerSuspended[xPortGetCoreID()] == 0 );
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
traceTASK_DELAY(); traceTASK_DELAY();
@ -1523,7 +1538,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
* executing task. */ * executing task. */
prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToDelay ); prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToDelay );
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
} }
else else
{ {
@ -1547,7 +1562,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
configASSERT( pxTCB ); configASSERT( pxTCB );
taskENTER_CRITICAL( &xTaskQueueMutex ); //Need critical section incase either core context switches in between taskENTER_CRITICAL(); //Need critical section incase either core context switches in between
if( pxTCB == pxCurrentTCB[xPortGetCoreID()]) if( pxTCB == pxCurrentTCB[xPortGetCoreID()])
{ {
/* The task calling this function is querying its own state. */ /* The task calling this function is querying its own state. */
@ -1626,7 +1641,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
eReturn = eReady; eReturn = eReady;
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return eReturn; return eReturn;
} /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
@ -1641,14 +1656,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
TCB_t const * pxTCB; TCB_t const * pxTCB;
UBaseType_t uxReturn; UBaseType_t uxReturn;
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* If null is passed in here then it is the priority of the task /* If null is passed in here then it is the priority of the task
* that called uxTaskPriorityGet() that is being queried. */ * that called uxTaskPriorityGet() that is being queried. */
pxTCB = prvGetTCBFromHandle( xTask ); pxTCB = prvGetTCBFromHandle( xTask );
uxReturn = pxTCB->uxPriority; uxReturn = pxTCB->uxPriority;
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return uxReturn; return uxReturn;
} }
@ -1717,7 +1732,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* If null is passed in here then it is the priority of the calling /* If null is passed in here then it is the priority of the calling
* task that is being changed. */ * task that is being changed. */
@ -1878,7 +1893,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
( void ) uxPriorityUsedOnEntry; ( void ) uxPriorityUsedOnEntry;
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
} }
#endif /* INCLUDE_vTaskPrioritySet */ #endif /* INCLUDE_vTaskPrioritySet */
@ -1891,7 +1906,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
TCB_t * pxTCB; TCB_t * pxTCB;
TCB_t * curTCB; TCB_t * curTCB;
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* If null is passed in here then it is the running task that is /* If null is passed in here then it is the running task that is
* being suspended. */ * being suspended. */
@ -1934,17 +1949,17 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
} }
#endif #endif
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
if( xSchedulerRunning != pdFALSE ) if( xSchedulerRunning != pdFALSE )
{ {
/* Reset the next expected unblock time in case it referred to the /* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */ * task that is now in the Suspended state. */
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
prvResetNextTaskUnblockTime(); prvResetNextTaskUnblockTime();
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
} }
else else
{ {
@ -1956,9 +1971,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
if( xSchedulerRunning != pdFALSE ) if( xSchedulerRunning != pdFALSE )
{ {
/* The current task has just been suspended. */ /* The current task has just been suspended. */
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
BaseType_t suspended = uxSchedulerSuspended[xPortGetCoreID()]; BaseType_t suspended = uxSchedulerSuspended[xPortGetCoreID()];
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
configASSERT( suspended == 0 ); configASSERT( suspended == 0 );
(void)suspended; (void)suspended;
@ -1975,9 +1990,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
* NULL so when the next task is created pxCurrentTCB will * NULL so when the next task is created pxCurrentTCB will
* be set to point to it no matter what its relative priority * be set to point to it no matter what its relative priority
* is. */ * is. */
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
pxCurrentTCB[ xPortGetCoreID() ] = NULL; pxCurrentTCB[ xPortGetCoreID() ] = NULL;
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
else else
{ {
@ -1992,11 +2007,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
/* A task other than the currently running task was suspended, /* A task other than the currently running task was suspended,
* reset the next expected unblock time in case it referred to the * reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */ * task that is now in the Suspended state. */
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
{ {
prvResetNextTaskUnblockTime(); prvResetNextTaskUnblockTime();
} }
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
else else
{ {
@ -2063,7 +2078,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
/* It does not make sense to resume the calling task. */ /* It does not make sense to resume the calling task. */
configASSERT( xTaskToResume ); configASSERT( xTaskToResume );
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
/* The parameter cannot be NULL as it is impossible to resume the /* The parameter cannot be NULL as it is impossible to resume the
* currently executing task. */ * currently executing task. */
@ -2106,7 +2121,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
} }
#endif /* INCLUDE_vTaskSuspend */ #endif /* INCLUDE_vTaskSuspend */
@ -2140,7 +2155,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
* https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
//portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); //portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
taskENTER_CRITICAL_ISR(&xTaskQueueMutex); taskENTER_CRITICAL_ISR();
{ {
if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
{ {
@ -2180,7 +2195,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL_ISR(&xTaskQueueMutex); taskEXIT_CRITICAL_ISR();
return xYieldRequired; return xYieldRequired;
} }
@ -2394,7 +2409,7 @@ void vTaskSuspendAll( void )
TickType_t xReturn; TickType_t xReturn;
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY ) if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
{ {
xReturn = 0; xReturn = 0;
@ -2421,7 +2436,7 @@ void vTaskSuspendAll( void )
{ {
xReturn = xNextTaskUnblockTime - xTickCount; xReturn = xNextTaskUnblockTime - xTickCount;
} }
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -2436,7 +2451,7 @@ BaseType_t xTaskResumeAll( void )
TickType_t xTicksToNextUnblockTime; TickType_t xTicksToNextUnblockTime;
/* If uxSchedulerSuspended[xPortGetCoreID()] is zero then this function does not match a /* If uxSchedulerSuspended[xPortGetCoreID()] is zero then this function does not match a
* previous call to taskENTER_CRITICAL( &xTaskQueueMutex ). */ * previous call to taskENTER_CRITICAL(). */
configASSERT( uxSchedulerSuspended[xPortGetCoreID()] ); configASSERT( uxSchedulerSuspended[xPortGetCoreID()] );
/* It is possible that an ISR caused a task to be removed from an event /* It is possible that an ISR caused a task to be removed from an event
@ -2444,7 +2459,7 @@ BaseType_t xTaskResumeAll( void )
* removed task will have been added to the xPendingReadyList. Once the * removed task will have been added to the xPendingReadyList. Once the
* scheduler has been resumed it is safe to move all the pending ready * scheduler has been resumed it is safe to move all the pending ready
* tasks from this list into their appropriate ready list. */ * tasks from this list into their appropriate ready list. */
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
--uxSchedulerSuspended[xPortGetCoreID()]; --uxSchedulerSuspended[xPortGetCoreID()];
@ -2555,7 +2570,7 @@ BaseType_t xTaskResumeAll( void )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return xAlreadyYielded; return xAlreadyYielded;
} }
@ -2701,7 +2716,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
/* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* Search the ready lists. */ /* Search the ready lists. */
do do
@ -2747,7 +2762,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
} }
#endif #endif
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return pxTCB; return pxTCB;
} }
@ -2763,7 +2778,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
{ {
UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* Is there a space in the array for each task in the system? */ /* Is there a space in the array for each task in the system? */
if( uxArraySize >= uxCurrentNumberOfTasks ) if( uxArraySize >= uxCurrentNumberOfTasks )
@ -2822,7 +2837,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return uxTask; return uxTask;
} }
@ -2860,11 +2875,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
/* Correct the tick count value after a period during which the tick /* Correct the tick count value after a period during which the tick
* was suppressed. Note this does *not* call the tick hook function for * was suppressed. Note this does *not* call the tick hook function for
* each stepped tick. */ * each stepped tick. */
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
xTickCount += xTicksToJump; xTickCount += xTicksToJump;
traceINCREASE_TICK_COUNT( xTicksToJump ); traceINCREASE_TICK_COUNT( xTicksToJump );
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
#endif /* configUSE_TICKLESS_IDLE */ #endif /* configUSE_TICKLESS_IDLE */
@ -2880,9 +2895,9 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
/* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occuring when /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occuring when
* the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */ * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
xPendedTicks += xTicksToCatchUp; xPendedTicks += xTicksToCatchUp;
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return xYieldRequired; return xYieldRequired;
} }
@ -2897,7 +2912,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
configASSERT( pxTCB ); configASSERT( pxTCB );
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* A task can only be prematurely removed from the Blocked state if /* A task can only be prematurely removed from the Blocked state if
* it is actually in the Blocked state. */ * it is actually in the Blocked state. */
@ -2914,7 +2929,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
* the event list too. Interrupts can touch the event list item, * the event list too. Interrupts can touch the event list item,
* even though the scheduler is suspended, so a critical section * even though the scheduler is suspended, so a critical section
* is used. */ * is used. */
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{ {
@ -2926,7 +2941,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
/* Place the unblocked task into the appropriate ready list. */ /* Place the unblocked task into the appropriate ready list. */
prvAddTaskToReadyList( pxTCB ); prvAddTaskToReadyList( pxTCB );
@ -2960,7 +2975,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
xReturn = pdFAIL; xReturn = pdFAIL;
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -2998,7 +3013,7 @@ BaseType_t xTaskIncrementTick( void )
traceTASK_INCREMENT_TICK( xTickCount ); traceTASK_INCREMENT_TICK( xTickCount );
if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE ) if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
{ {
taskENTER_CRITICAL_ISR( &xTaskQueueMutex ); taskENTER_CRITICAL_ISR();
/* Minor optimisation. The tick count cannot change in this /* Minor optimisation. The tick count cannot change in this
* block. */ * block. */
const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
@ -3113,7 +3128,7 @@ BaseType_t xTaskIncrementTick( void )
} }
} }
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
taskEXIT_CRITICAL_ISR(&xTaskQueueMutex); taskEXIT_CRITICAL_ISR();
} }
else else
{ {
@ -3156,11 +3171,11 @@ BaseType_t xTaskIncrementTick( void )
/* Save the hook function in the TCB. A critical section is required as /* Save the hook function in the TCB. A critical section is required as
* the value can be accessed from an interrupt. */ * the value can be accessed from an interrupt. */
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
xTCB->pxTaskTag = pxHookFunction; xTCB->pxTaskTag = pxHookFunction;
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
} }
#endif /* configUSE_APPLICATION_TASK_TAG */ #endif /* configUSE_APPLICATION_TASK_TAG */
@ -3178,11 +3193,11 @@ BaseType_t xTaskIncrementTick( void )
/* Save the hook function in the TCB. A critical section is required as /* Save the hook function in the TCB. A critical section is required as
* the value can be accessed from an interrupt. */ * the value can be accessed from an interrupt. */
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
xReturn = pxTCB->pxTaskTag; xReturn = pxTCB->pxTaskTag;
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -3280,7 +3295,7 @@ void vTaskSwitchContext( void )
* overflows. The guard against negative values is to protect * overflows. The guard against negative values is to protect
* against suspect run time stat counter implementations - which * against suspect run time stat counter implementations - which
* are provided by the application, not the kernel. */ * are provided by the application, not the kernel. */
taskENTER_CRITICAL_ISR(&xTaskQueueMutex); taskENTER_CRITICAL_ISR();
if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] ) if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] )
{ {
pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] ); pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] );
@ -3289,7 +3304,7 @@ void vTaskSwitchContext( void )
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL_ISR(&xTaskQueueMutex); taskEXIT_CRITICAL_ISR();
ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime; ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime;
} }
#endif /* configGENERATE_RUN_TIME_STATS */ #endif /* configGENERATE_RUN_TIME_STATS */
@ -3301,7 +3316,7 @@ void vTaskSwitchContext( void )
/* Select a new task to run */ /* Select a new task to run */
/* /*
We cannot do taskENTER_CRITICAL_ISR(&xTaskQueueMutex); here because it saves the interrupt context to the task tcb, and we're We cannot do taskENTER_CRITICAL_ISR(); here because it saves the interrupt context to the task tcb, and we're
swapping that out here. Instead, we're going to do the work here ourselves. Because interrupts are already disabled, we only swapping that out here. Instead, we're going to do the work here ourselves. Because interrupts are already disabled, we only
need to acquire the mutex. need to acquire the mutex.
*/ */
@ -3405,9 +3420,11 @@ void vTaskSwitchContext( void )
traceTASK_SWITCHED_IN(); traceTASK_SWITCHED_IN();
xSwitchingContext[ xPortGetCoreID() ] = pdFALSE; xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
#ifdef ESP_PLATFORM
//Exit critical region manually as well: release the mux now, interrupts will be re-enabled when we //Exit critical region manually as well: release the mux now, interrupts will be re-enabled when we
//exit the function. //exit the function.
vPortCPUReleaseMutex( &xTaskQueueMutex ); vPortCPUReleaseMutex( &xTaskQueueMutex );
#endif // ESP_PLATFORM
#if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK #if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack); vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack);
@ -3421,7 +3438,7 @@ void vTaskSwitchContext( void )
void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait ) void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
{ {
configASSERT( pxEventList ); configASSERT( pxEventList );
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
/* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
* SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
@ -3433,14 +3450,14 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksT
vListInsert( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) ); vListInsert( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait); prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait);
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
{ {
configASSERT( pxEventList ); configASSERT( pxEventList );
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
/* Store the item value in the event list item. It is safe to access the /* Store the item value in the event list item. It is safe to access the
* event list item here as interrupts won't access the event list item of a * event list item here as interrupts won't access the event list item of a
@ -3455,7 +3472,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xIte
vListInsertEnd( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) ); vListInsertEnd( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait ); prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3463,7 +3480,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xIte
void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
{ {
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
configASSERT( pxEventList ); configASSERT( pxEventList );
/* This function should not be called by application code hence the /* This function should not be called by application code hence the
@ -3488,7 +3505,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xIte
traceTASK_DELAY_UNTIL( ); traceTASK_DELAY_UNTIL( );
prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait ); prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
#endif /* configUSE_TIMERS */ #endif /* configUSE_TIMERS */
@ -3501,7 +3518,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
BaseType_t xTaskCanBeReady; BaseType_t xTaskCanBeReady;
UBaseType_t i, uxTargetCPU; UBaseType_t i, uxTargetCPU;
taskENTER_CRITICAL_ISR(&xTaskQueueMutex); taskENTER_CRITICAL_ISR();
/* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
* called from a critical section within an ISR. */ * called from a critical section within an ISR. */
@ -3523,7 +3540,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
} }
else else
{ {
taskEXIT_CRITICAL_ISR(&xTaskQueueMutex); taskEXIT_CRITICAL_ISR();
return pdFALSE; return pdFALSE;
} }
@ -3593,7 +3610,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
} }
#endif #endif
taskEXIT_CRITICAL_ISR(&xTaskQueueMutex); taskEXIT_CRITICAL_ISR();
return xReturn; return xReturn;
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3603,7 +3620,7 @@ BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, cons
TCB_t * pxUnblockedTCB; TCB_t * pxUnblockedTCB;
BaseType_t xReturn; BaseType_t xReturn;
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
/* Store the new item value in the event list. */ /* Store the new item value in the event list. */
listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
@ -3642,7 +3659,7 @@ BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, cons
xReturn = pdFALSE; xReturn = pdFALSE;
} }
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3650,12 +3667,12 @@ BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, cons
void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
{ {
configASSERT( pxTimeOut ); configASSERT( pxTimeOut );
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xOverflowCount = xNumOfOverflows;
pxTimeOut->xTimeOnEntering = xTickCount; pxTimeOut->xTimeOnEntering = xTickCount;
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -3674,7 +3691,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const
configASSERT( pxTimeOut ); configASSERT( pxTimeOut );
configASSERT( pxTicksToWait ); configASSERT( pxTicksToWait );
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* Minor optimisation. The tick count cannot change in this block. */ /* Minor optimisation. The tick count cannot change in this block. */
const TickType_t xConstTickCount = xTickCount; const TickType_t xConstTickCount = xTickCount;
@ -3724,7 +3741,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const
xReturn = pdTRUE; xReturn = pdTRUE;
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -3872,7 +3889,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
{ {
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* Now the scheduler is suspended, the expected idle /* Now the scheduler is suspended, the expected idle
* time can be sampled again, and this time its value can * time can be sampled again, and this time its value can
@ -3896,7 +3913,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
} }
else else
{ {
@ -3916,7 +3933,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
const UBaseType_t uxNonApplicationTasks = 1; const UBaseType_t uxNonApplicationTasks = 1;
eSleepModeStatus eReturn = eStandardSleep; eSleepModeStatus eReturn = eStandardSleep;
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
if( listCURRENT_LIST_LENGTH( &xPendingReadyList[xPortGetCoreID()] ) != 0 ) if( listCURRENT_LIST_LENGTH( &xPendingReadyList[xPortGetCoreID()] ) != 0 )
{ {
/* A task was made ready while the scheduler was suspended. */ /* A task was made ready while the scheduler was suspended. */
@ -3942,7 +3959,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
return eReturn; return eReturn;
} }
@ -3960,11 +3977,11 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
{ {
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
pxTCB = prvGetTCBFromHandle( xTaskToSet ); pxTCB = prvGetTCBFromHandle( xTaskToSet );
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback; pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
} }
@ -3983,10 +4000,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
{ {
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
pxTCB = prvGetTCBFromHandle( xTaskToSet ); pxTCB = prvGetTCBFromHandle( xTaskToSet );
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
} }
#endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS */ #endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS */
@ -4083,13 +4100,13 @@ static void prvCheckTasksWaitingTermination( void )
BaseType_t xListIsEmpty; BaseType_t xListIsEmpty;
BaseType_t core = xPortGetCoreID(); BaseType_t core = xPortGetCoreID();
/* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL( &xTaskQueueMutex ) /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
* being called too often in the idle task. */ * being called too often in the idle task. */
while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
{ {
TCB_t *pxTCB = NULL; TCB_t *pxTCB = NULL;
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
{ {
xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination ); xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
if( xListIsEmpty == pdFALSE ) if( xListIsEmpty == pdFALSE )
@ -4116,7 +4133,7 @@ static void prvCheckTasksWaitingTermination( void )
} }
} }
} }
taskEXIT_CRITICAL(&xTaskQueueMutex); //Need to call deletion callbacks outside critical section taskEXIT_CRITICAL(); //Need to call deletion callbacks outside critical section
if (pxTCB != NULL) { //Call deletion callbacks and free TCB memory if (pxTCB != NULL) { //Call deletion callbacks and free TCB memory
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
@ -4197,14 +4214,14 @@ static void prvCheckTasksWaitingTermination( void )
* it should be reported as being in the Blocked state. */ * it should be reported as being in the Blocked state. */
if( eState == eSuspended ) if( eState == eSuspended )
{ {
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{ {
pxTaskStatus->eCurrentState = eBlocked; pxTaskStatus->eCurrentState = eBlocked;
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
} }
} }
#endif /* INCLUDE_vTaskSuspend */ #endif /* INCLUDE_vTaskSuspend */
@ -4553,7 +4570,7 @@ TCB_t *pxTCB;
TCB_t * const pxMutexHolderTCB = pxMutexHolder; TCB_t * const pxMutexHolderTCB = pxMutexHolder;
BaseType_t xReturn = pdFALSE; BaseType_t xReturn = pdFALSE;
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
/* If the mutex was given back by an interrupt while the queue was /* If the mutex was given back by an interrupt while the queue was
* locked then the mutex holder might now be NULL. _RB_ Is this still * locked then the mutex holder might now be NULL. _RB_ Is this still
* needed as interrupts can no longer use mutexes? */ * needed as interrupts can no longer use mutexes? */
@ -4630,7 +4647,7 @@ TCB_t *pxTCB;
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -4645,7 +4662,7 @@ TCB_t *pxTCB;
TCB_t * const pxTCB = pxMutexHolder; TCB_t * const pxTCB = pxMutexHolder;
BaseType_t xReturn = pdFALSE; BaseType_t xReturn = pdFALSE;
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
if( pxMutexHolder != NULL ) if( pxMutexHolder != NULL )
{ {
/* A task can only have an inherited priority if it holds the mutex. /* A task can only have an inherited priority if it holds the mutex.
@ -4712,7 +4729,7 @@ TCB_t *pxTCB;
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -4729,7 +4746,7 @@ TCB_t *pxTCB;
UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
if( pxMutexHolder != NULL ) if( pxMutexHolder != NULL )
{ {
/* If pxMutexHolder is not NULL then the holder must hold at least /* If pxMutexHolder is not NULL then the holder must hold at least
@ -4822,7 +4839,7 @@ TCB_t *pxTCB;
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
} }
#endif /* configUSE_MUTEXES */ #endif /* configUSE_MUTEXES */
@ -5156,13 +5173,13 @@ TickType_t uxTaskResetEventItemValue( void )
{ {
TickType_t uxReturn; TickType_t uxReturn;
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) ); uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
/* Reset the event list item to its normal value - so it can be used with /* Reset the event list item to its normal value - so it can be used with
* queues and semaphores. */ * queues and semaphores. */
listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
return uxReturn; return uxReturn;
} }
@ -5176,13 +5193,13 @@ TickType_t uxTaskResetEventItemValue( void )
/* If xSemaphoreCreateMutex() is called before any tasks have been created /* If xSemaphoreCreateMutex() is called before any tasks have been created
* then pxCurrentTCB will be NULL. */ * then pxCurrentTCB will be NULL. */
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
if( pxCurrentTCB[ xPortGetCoreID() ] != NULL ) if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
{ {
( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++; ( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
} }
curTCB = pxCurrentTCB[ xPortGetCoreID() ]; curTCB = pxCurrentTCB[ xPortGetCoreID() ];
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
return curTCB; return curTCB;
} }
@ -5196,7 +5213,7 @@ TickType_t uxTaskResetEventItemValue( void )
{ {
uint32_t ulReturn; uint32_t ulReturn;
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* Only block if the notification count is not already non-zero. */ /* Only block if the notification count is not already non-zero. */
if( pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue == 0UL ) if( pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue == 0UL )
@ -5225,9 +5242,9 @@ TickType_t uxTaskResetEventItemValue( void )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
traceTASK_NOTIFY_TAKE(); traceTASK_NOTIFY_TAKE();
ulReturn = pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue; ulReturn = pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue;
@ -5250,7 +5267,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxCurrentTCB[xPortGetCoreID()]->ucNotifyState = taskNOT_WAITING_NOTIFICATION; pxCurrentTCB[xPortGetCoreID()]->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return ulReturn; return ulReturn;
} }
@ -5264,7 +5281,7 @@ TickType_t uxTaskResetEventItemValue( void )
{ {
BaseType_t xReturn; BaseType_t xReturn;
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
/* Only block if a notification is not already pending. */ /* Only block if a notification is not already pending. */
if( pxCurrentTCB[xPortGetCoreID()]->ucNotifyState != taskNOTIFICATION_RECEIVED ) if( pxCurrentTCB[xPortGetCoreID()]->ucNotifyState != taskNOTIFICATION_RECEIVED )
@ -5298,9 +5315,9 @@ TickType_t uxTaskResetEventItemValue( void )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
traceTASK_NOTIFY_WAIT(); traceTASK_NOTIFY_WAIT();
@ -5330,7 +5347,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxCurrentTCB[xPortGetCoreID()]->ucNotifyState = taskNOT_WAITING_NOTIFICATION; pxCurrentTCB[xPortGetCoreID()]->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -5349,7 +5366,7 @@ TickType_t uxTaskResetEventItemValue( void )
configASSERT( xTaskToNotify ); configASSERT( xTaskToNotify );
pxTCB = xTaskToNotify; pxTCB = xTaskToNotify;
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
if( pulPreviousNotificationValue != NULL ) if( pulPreviousNotificationValue != NULL )
{ {
@ -5448,7 +5465,7 @@ TickType_t uxTaskResetEventItemValue( void )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -5486,7 +5503,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxTCB = xTaskToNotify; pxTCB = xTaskToNotify;
taskENTER_CRITICAL_ISR(&xTaskQueueMutex); taskENTER_CRITICAL_ISR();
{ {
if( pulPreviousNotificationValue != NULL ) if( pulPreviousNotificationValue != NULL )
{ {
@ -5576,7 +5593,7 @@ TickType_t uxTaskResetEventItemValue( void )
} }
} }
taskEXIT_CRITICAL_ISR(&xTaskQueueMutex); taskEXIT_CRITICAL_ISR();
return xReturn; return xReturn;
} }
@ -5614,7 +5631,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxTCB = xTaskToNotify; pxTCB = xTaskToNotify;
taskENTER_CRITICAL_ISR(&xTaskQueueMutex); taskENTER_CRITICAL_ISR();
{ {
ucOriginalNotifyState = pxTCB->ucNotifyState; ucOriginalNotifyState = pxTCB->ucNotifyState;
pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED; pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
@ -5665,7 +5682,7 @@ TickType_t uxTaskResetEventItemValue( void )
} }
} }
taskEXIT_CRITICAL_ISR(&xTaskQueueMutex); taskEXIT_CRITICAL_ISR();
} }
#endif /* configUSE_TASK_NOTIFICATIONS */ #endif /* configUSE_TASK_NOTIFICATIONS */
@ -5683,7 +5700,7 @@ TickType_t uxTaskResetEventItemValue( void )
its notification state cleared. */ its notification state cleared. */
pxTCB = prvGetTCBFromHandle( xTask ); pxTCB = prvGetTCBFromHandle( xTask );
taskENTER_CRITICAL( &xTaskQueueMutex ); taskENTER_CRITICAL();
{ {
if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED ) if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED )
{ {
@ -5695,7 +5712,7 @@ TickType_t uxTaskResetEventItemValue( void )
xReturn = pdFAIL; xReturn = pdFAIL;
} }
} }
taskEXIT_CRITICAL( &xTaskQueueMutex ); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} }
@ -5707,9 +5724,9 @@ TickType_t uxTaskResetEventItemValue( void )
uint32_t ulTaskGetIdleRunTimeCounter( void ) uint32_t ulTaskGetIdleRunTimeCounter( void )
{ {
taskENTER_CRITICAL(&xTaskQueueMutex); taskENTER_CRITICAL();
tskTCB *pxTCB = (tskTCB *)xIdleTaskHandle[xPortGetCoreID()]; tskTCB *pxTCB = (tskTCB *)xIdleTaskHandle[xPortGetCoreID()];
taskEXIT_CRITICAL(&xTaskQueueMutex); taskEXIT_CRITICAL();
return pxTCB->ulRunTimeCounter; return pxTCB->ulRunTimeCounter;
} }

View File

@ -41,6 +41,18 @@
#error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available. #error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available.
#endif #endif
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &xTimerMux
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#endif
/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
* for the header files above, but not in this file, in order to generate the * for the header files above, but not in this file, in order to generate the
@ -449,7 +461,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
Timer_t * pxTimer = xTimer; Timer_t * pxTimer = xTimer;
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL( &xTimerMux ); taskENTER_CRITICAL();
{ {
if( uxAutoReload != pdFALSE ) if( uxAutoReload != pdFALSE )
{ {
@ -460,7 +472,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
pxTimer->ucStatus &= ~tmrSTATUS_IS_AUTORELOAD; pxTimer->ucStatus &= ~tmrSTATUS_IS_AUTORELOAD;
} }
} }
taskEXIT_CRITICAL( &xTimerMux ); taskEXIT_CRITICAL();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -470,7 +482,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
UBaseType_t uxReturn; UBaseType_t uxReturn;
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL( &xTimerMux ); taskENTER_CRITICAL( );
{ {
if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 ) if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 )
{ {
@ -483,7 +495,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
uxReturn = ( UBaseType_t ) pdTRUE; uxReturn = ( UBaseType_t ) pdTRUE;
} }
} }
taskEXIT_CRITICAL( &xTimerMux ); taskEXIT_CRITICAL();
return uxReturn; return uxReturn;
} }
@ -592,7 +604,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
TickType_t xTimeNow; TickType_t xTimeNow;
BaseType_t xTimerListsWereSwitched; BaseType_t xTimerListsWereSwitched;
taskENTER_CRITICAL( &xTimerMux); taskENTER_CRITICAL();
{ {
/* Obtain the time now to make an assessment as to whether the timer /* Obtain the time now to make an assessment as to whether the timer
* has expired or not. If obtaining the time causes the lists to switch * has expired or not. If obtaining the time causes the lists to switch
@ -606,7 +618,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
/* The tick count has not overflowed, has the timer expired? */ /* The tick count has not overflowed, has the timer expired? */
if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) )
{ {
taskEXIT_CRITICAL( &xTimerMux); taskEXIT_CRITICAL();
prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); prvProcessExpiredTimer( xNextExpireTime, xTimeNow );
} }
else else
@ -626,7 +638,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
taskEXIT_CRITICAL( &xTimerMux); taskEXIT_CRITICAL();
/* Yield to wait for either a command to arrive, or the /* Yield to wait for either a command to arrive, or the
* block time to expire. If a command arrived between the * block time to expire. If a command arrived between the
@ -638,7 +650,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
} }
else else
{ {
taskEXIT_CRITICAL( &xTimerMux); taskEXIT_CRITICAL();
} }
} }
} }
@ -956,7 +968,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
if( xTimerQueue == NULL ) vPortCPUInitializeMutex( &xTimerMux ); if( xTimerQueue == NULL ) vPortCPUInitializeMutex( &xTimerMux );
taskENTER_CRITICAL( &xTimerMux); taskENTER_CRITICAL();
{ {
if( xTimerQueue == NULL ) if( xTimerQueue == NULL )
{ {
@ -998,7 +1010,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xTimerMux ); taskEXIT_CRITICAL();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -1010,7 +1022,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
configASSERT( xTimer ); configASSERT( xTimer );
/* Is the timer in the list of active timers? */ /* Is the timer in the list of active timers? */
taskENTER_CRITICAL( &xTimerMux ); taskENTER_CRITICAL();
{ {
if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 ) if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 )
{ {
@ -1021,7 +1033,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
xReturn = pdTRUE; xReturn = pdTRUE;
} }
} }
taskEXIT_CRITICAL( &xTimerMux ); taskEXIT_CRITICAL();
return xReturn; return xReturn;
} /*lint !e818 Can't be pointer to const due to the typedef. */ } /*lint !e818 Can't be pointer to const due to the typedef. */
@ -1034,11 +1046,11 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL( &xTimerMux ); taskENTER_CRITICAL();
{ {
pvReturn = pxTimer->pvTimerID; pvReturn = pxTimer->pvTimerID;
} }
taskEXIT_CRITICAL( &xTimerMux ); taskEXIT_CRITICAL();
return pvReturn; return pvReturn;
} }
@ -1051,11 +1063,11 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
configASSERT( xTimer ); configASSERT( xTimer );
taskENTER_CRITICAL( &xTimerMux ); taskENTER_CRITICAL();
{ {
pxTimer->pvTimerID = pvNewID; pxTimer->pvTimerID = pvNewID;
} }
taskEXIT_CRITICAL( &xTimerMux ); taskEXIT_CRITICAL();
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/