mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Revert "Merge branch 'feature/freertos_10.4.3_sync_various_functions' into 'master'"
This reverts merge request !19761
This commit is contained in:
parent
80af04372b
commit
0332b8db07
@ -220,7 +220,7 @@ Notes:
|
||||
#if ( configUSE_QUEUE_SETS != 1 )
|
||||
#define traceQUEUE_SEND( pxQueue ) SYSVIEW_RecordU32x4(apiFastID_OFFSET + apiID_XQUEUEGENERICSEND, SEGGER_SYSVIEW_ShrinkId((U32)pxQueue), (U32)pvItemToQueue, xTicksToWait, xCopyPosition)
|
||||
#else
|
||||
#define traceQUEUE_SEND( pxQueue ) SYSVIEW_RecordU32x4(apiFastID_OFFSET + apiID_XQUEUEGENERICSEND, SEGGER_SYSVIEW_ShrinkId((U32)pxQueue), 0, 0, 0)
|
||||
#define traceQUEUE_SEND( pxQueue ) SYSVIEW_RecordU32x4(apiFastID_OFFSET + apiID_XQUEUEGENERICSEND, SEGGER_SYSVIEW_ShrinkId((U32)pxQueue), 0, 0, xCopyPosition)
|
||||
#endif
|
||||
|
||||
#endif // CONFIG_FREERTOS_SMP
|
||||
|
@ -214,7 +214,9 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
{
|
||||
EventBits_t uxOriginalBitValue, uxReturn;
|
||||
EventGroup_t * pxEventBits = xEventGroup;
|
||||
#ifndef ESP_PLATFORM
|
||||
BaseType_t xAlreadyYielded;
|
||||
#endif // ESP_PLATFORM
|
||||
BaseType_t xTimeoutOccurred = pdFALSE;
|
||||
|
||||
configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
||||
@ -274,13 +276,15 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
}
|
||||
#ifdef ESP_PLATFORM // IDF-3755
|
||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
xAlreadyYielded = pdFALSE;
|
||||
#else
|
||||
xAlreadyYielded = xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
if( xTicksToWait != ( TickType_t ) 0 )
|
||||
{
|
||||
#ifdef ESP_PLATFORM
|
||||
portYIELD_WITHIN_API();
|
||||
#else
|
||||
if( xAlreadyYielded == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
@ -289,6 +293,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
/* The task blocked to wait for its required bits to be set - at this
|
||||
* point either the required bits were set or the block time expired. If
|
||||
@ -347,7 +352,11 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
{
|
||||
EventGroup_t * pxEventBits = xEventGroup;
|
||||
EventBits_t uxReturn, uxControlBits = 0;
|
||||
#ifdef ESP_PLATFORM
|
||||
BaseType_t xWaitConditionMet;
|
||||
#else
|
||||
BaseType_t xWaitConditionMet, xAlreadyYielded;
|
||||
#endif // ESP_PLATFORM
|
||||
BaseType_t xTimeoutOccurred = pdFALSE;
|
||||
|
||||
/* Check the user is not attempting to wait on the bits used by the kernel
|
||||
@ -435,13 +444,15 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
}
|
||||
#ifdef ESP_PLATFORM // IDF-3755
|
||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
xAlreadyYielded = pdFALSE;
|
||||
#else
|
||||
xAlreadyYielded = xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
if( xTicksToWait != ( TickType_t ) 0 )
|
||||
{
|
||||
#ifdef ESP_PLATFORM
|
||||
portYIELD_WITHIN_API();
|
||||
#else
|
||||
if( xAlreadyYielded == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
@ -450,6 +461,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
/* The task blocked to wait for its required bits to be set - at this
|
||||
* point either the required bits were set or the block time expired. If
|
||||
|
@ -14,7 +14,6 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
#define portBYTE_ALIGNMENT 16
|
||||
#define portTICK_TYPE_IS_ATOMIC 1
|
||||
|
||||
/* Type definitions. */
|
||||
#define portCHAR uint8_t
|
||||
|
@ -108,7 +108,6 @@ typedef uint32_t TickType_t;
|
||||
#define portSTACK_GROWTH (-1)
|
||||
#define portTICK_PERIOD_MS ((TickType_t) (1000 / configTICK_RATE_HZ))
|
||||
#define portBYTE_ALIGNMENT 16
|
||||
#define portTICK_TYPE_IS_ATOMIC 1
|
||||
#define portNOP() __asm volatile (" nop ")
|
||||
|
||||
|
||||
|
@ -140,7 +140,6 @@ typedef uint32_t TickType_t;
|
||||
#define portSTACK_GROWTH ( -1 )
|
||||
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
|
||||
#define portBYTE_ALIGNMENT 4
|
||||
#define portTICK_TYPE_IS_ATOMIC 1
|
||||
#define portNOP() XT_NOP()
|
||||
|
||||
|
||||
|
@ -220,7 +220,7 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
|
||||
* Checks to see if a queue is a member of a queue set, and if so, notifies
|
||||
* the queue set that the queue contains data.
|
||||
*/
|
||||
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
|
||||
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -362,8 +362,10 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
* variable of type StaticQueue_t or StaticSemaphore_t equals the size of
|
||||
* the real queue and semaphore structures. */
|
||||
volatile size_t xSize = sizeof( StaticQueue_t );
|
||||
configASSERT( xSize == sizeof( Queue_t ) );
|
||||
( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
|
||||
|
||||
/* This assertion cannot be branch covered in unit tests */
|
||||
configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */
|
||||
( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
|
||||
}
|
||||
#endif /* configASSERT_DEFINED */
|
||||
|
||||
@ -403,22 +405,30 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
const UBaseType_t uxItemSize,
|
||||
const uint8_t ucQueueType )
|
||||
{
|
||||
Queue_t * pxNewQueue;
|
||||
Queue_t * pxNewQueue = NULL;
|
||||
size_t xQueueSizeInBytes;
|
||||
uint8_t * pucQueueStorage;
|
||||
|
||||
configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
|
||||
|
||||
/* Allocate enough space to hold the maximum number of items that
|
||||
* can be in the queue at any time. It is valid for uxItemSize to be
|
||||
* zero in the case the queue is used as a semaphore. */
|
||||
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||
if( uxItemSize == ( UBaseType_t ) 0 )
|
||||
{
|
||||
/* There is not going to be a queue storage area. */
|
||||
xQueueSizeInBytes = ( size_t ) 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Allocate enough space to hold the maximum number of items that
|
||||
* can be in the queue at any time. It is valid for uxItemSize to be
|
||||
* zero in the case the queue is used as a semaphore. */
|
||||
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||
}
|
||||
|
||||
/* Check for multiplication overflow. */
|
||||
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
|
||||
|
||||
/* Check for addition overflow. */
|
||||
configASSERT( ( sizeof( Queue_t ) + xQueueSizeInBytes ) > xQueueSizeInBytes );
|
||||
configASSERT( ( sizeof( Queue_t ) + xQueueSizeInBytes ) > xQueueSizeInBytes );
|
||||
|
||||
/* Allocate the queue and storage area. Justification for MISRA
|
||||
* deviation as follows: pvPortMalloc() always ensures returned memory
|
||||
@ -588,7 +598,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
* calling task is the mutex holder, but not a good way of determining the
|
||||
* identity of the mutex holder, as the holder may change between the
|
||||
* following critical section exiting and the function returning. */
|
||||
taskENTER_CRITICAL( &( pxSemaphore->xQueueLock ) );
|
||||
#ifdef ESP_PLATFORM
|
||||
Queue_t * const pxQueue = (Queue_t *)pxSemaphore;
|
||||
#endif
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
@ -599,7 +612,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
pxReturn = NULL;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxSemaphore->xQueueLock ) );
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
return pxReturn;
|
||||
} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
|
||||
@ -737,7 +750,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
const UBaseType_t uxInitialCount,
|
||||
StaticQueue_t * pxStaticQueue )
|
||||
{
|
||||
QueueHandle_t xHandle;
|
||||
QueueHandle_t xHandle = NULL;
|
||||
|
||||
configASSERT( uxMaxCount != 0 );
|
||||
configASSERT( uxInitialCount <= uxMaxCount );
|
||||
@ -766,7 +779,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
|
||||
const UBaseType_t uxInitialCount )
|
||||
{
|
||||
QueueHandle_t xHandle;
|
||||
QueueHandle_t xHandle = NULL;
|
||||
|
||||
configASSERT( uxMaxCount != 0 );
|
||||
configASSERT( uxInitialCount <= uxMaxCount );
|
||||
@ -831,7 +844,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
{
|
||||
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
|
||||
|
||||
@ -844,7 +857,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
* in the queue has not changed. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
|
||||
{
|
||||
/* The queue is a member of a queue set, and posting
|
||||
* to the queue set caused a higher priority task to
|
||||
@ -1066,7 +1079,6 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
const int8_t cTxLock = pxQueue->cTxLock;
|
||||
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
traceQUEUE_SEND_FROM_ISR( pxQueue );
|
||||
|
||||
@ -1085,14 +1097,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
{
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
|
||||
{
|
||||
/* Do not notify the queue set as an existing item
|
||||
* was overwritten in the queue so the number of items
|
||||
* in the queue has not changed. */
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
|
||||
{
|
||||
/* The queue is a member of a queue set, and posting
|
||||
* to the queue set caused a higher priority task to
|
||||
@ -1165,9 +1170,6 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
/* Not used in this path. */
|
||||
( void ) uxPreviousMessagesWaiting;
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
}
|
||||
@ -1265,7 +1267,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
{
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
|
||||
{
|
||||
/* The semaphore is a member of a queue set, and
|
||||
* posting to the queue set caused a higher priority
|
||||
@ -1345,8 +1347,6 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was posted while it was locked. */
|
||||
configASSERT( cTxLock != queueINT8_MAX );
|
||||
|
||||
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
|
||||
}
|
||||
|
||||
@ -2007,8 +2007,6 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was removed while it was locked. */
|
||||
configASSERT( cRxLock != queueINT8_MAX );
|
||||
|
||||
pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
|
||||
}
|
||||
|
||||
@ -2087,14 +2085,15 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
|
||||
{
|
||||
UBaseType_t uxReturn;
|
||||
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
|
||||
|
||||
configASSERT( xQueue );
|
||||
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) );
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
|
||||
}
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) );
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
return uxReturn;
|
||||
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
|
||||
@ -2354,7 +2353,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
{
|
||||
if( pxQueue->pxQueueSetContainer != NULL )
|
||||
{
|
||||
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
|
||||
if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
|
||||
{
|
||||
/* The queue is a member of a queue set, and posting to
|
||||
* the queue set caused a higher priority task to unblock.
|
||||
@ -2497,9 +2496,6 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
#ifndef ESP_PLATFORM
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#endif
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
|
||||
{
|
||||
@ -2510,9 +2506,6 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
#ifndef ESP_PLATFORM
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
#endif
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -2984,8 +2977,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
QueueSetHandle_t xQueueSet )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
#ifdef ESP_PLATFORM
|
||||
Queue_t * pxQueue = (Queue_t * )xQueueOrSemaphore;
|
||||
#endif
|
||||
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) );
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
|
||||
{
|
||||
@ -3004,7 +3000,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) );
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -3034,12 +3030,15 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
}
|
||||
else
|
||||
{
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) );
|
||||
#ifdef ESP_PLATFORM
|
||||
Queue_t* pxQueue = (Queue_t*)pxQueueOrSemaphore;
|
||||
#endif
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
/* The queue is no longer contained in the set. */
|
||||
pxQueueOrSemaphore->pxQueueSetContainer = NULL;
|
||||
}
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) );
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
|
||||
@ -3078,16 +3077,20 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
|
||||
#if ( configUSE_QUEUE_SETS == 1 )
|
||||
|
||||
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
|
||||
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue,
|
||||
const BaseType_t xCopyPosition )
|
||||
{
|
||||
Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
|
||||
BaseType_t xReturn = pdFALSE;
|
||||
|
||||
/* This function must be called form a critical section. */
|
||||
|
||||
configASSERT( pxQueueSetContainer );
|
||||
/* The following line is not reachable in unit tests because every call
|
||||
* to prvNotifyQueueSetContainer is preceded by a check that
|
||||
* pxQueueSetContainer != NULL */
|
||||
configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
|
||||
|
||||
/* Acquire the Queue set's spinlock */
|
||||
//Acquire the Queue set's spinlock
|
||||
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
|
||||
|
||||
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
|
||||
@ -3096,10 +3099,10 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
{
|
||||
const int8_t cTxLock = pxQueueSetContainer->cTxLock;
|
||||
|
||||
traceQUEUE_SET_SEND( pxQueueSetContainer );
|
||||
traceQUEUE_SEND( pxQueueSetContainer );
|
||||
|
||||
/* The data copied is the handle of the queue that contains data. */
|
||||
xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
|
||||
xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
|
||||
|
||||
if( cTxLock == queueUNLOCKED )
|
||||
{
|
||||
@ -3122,8 +3125,6 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
}
|
||||
else
|
||||
{
|
||||
configASSERT( cTxLock != queueINT8_MAX );
|
||||
|
||||
pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
|
||||
}
|
||||
}
|
||||
@ -3132,7 +3133,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
/* Release the Queue set's spinlock */
|
||||
//Release the Queue set's spinlock
|
||||
taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
|
@ -588,14 +588,15 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
||||
size_t xReturn, xSpace = 0;
|
||||
size_t xRequiredSpace = xDataLengthBytes;
|
||||
TimeOut_t xTimeOut;
|
||||
|
||||
/* The maximum amount of space a stream buffer will ever report is its length
|
||||
* minus 1. */
|
||||
const size_t xMaxReportedSpace = pxStreamBuffer->xLength - ( size_t ) 1;
|
||||
size_t xMaxReportedSpace = 0;
|
||||
|
||||
configASSERT( pvTxData );
|
||||
configASSERT( pxStreamBuffer );
|
||||
|
||||
/* The maximum amount of space a stream buffer will ever report is its length
|
||||
* minus 1. */
|
||||
xMaxReportedSpace = pxStreamBuffer->xLength - ( size_t ) 1;
|
||||
|
||||
/* This send function is used to write to both message buffers and stream
|
||||
* buffers. If this is a message buffer then the space needed must be
|
||||
* increased by the amount of bytes needed to store the length of the
|
||||
|
@ -259,15 +259,6 @@ extern void esp_vApplicationIdleHook(void);
|
||||
#define taskCAN_RUN_ON_CORE( xCore, xCoreID ) ( pdTRUE )
|
||||
#endif /* configNUM_CORES > 1 */
|
||||
|
||||
/* Check if a task is a currently running task. */
|
||||
#if ( configNUM_CORES > 1 )
|
||||
#define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCB[ 0 ] ) || ( ( pxTCB ) == pxCurrentTCB[ 1 ] ) )
|
||||
#define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) ( ( pxTCB ) == pxCurrentTCB[ ( xCoreID ) ] )
|
||||
#else
|
||||
#define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( pxTCB ) == pxCurrentTCB[ 0 ] )
|
||||
#define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) taskIS_CURRENTLY_RUNNING( ( pxTCB ) )
|
||||
#endif /* configNUM_CORES > 1 */
|
||||
|
||||
/*
|
||||
* Several functions take a TaskHandle_t parameter that can optionally be NULL,
|
||||
* where NULL is used to indicate that the handle of the currently executing
|
||||
@ -699,21 +690,21 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
|
||||
|
||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
|
||||
TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode,
|
||||
TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pvTaskCode,
|
||||
const char * const pcName,
|
||||
const uint32_t ulStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
StackType_t * const puxStackBuffer,
|
||||
StackType_t * const pxStackBuffer,
|
||||
StaticTask_t * const pxTaskBuffer,
|
||||
const BaseType_t xCoreID )
|
||||
{
|
||||
TCB_t *pxNewTCB;
|
||||
TaskHandle_t xReturn;
|
||||
|
||||
configASSERT( portVALID_STACK_MEM( puxStackBuffer ) );
|
||||
configASSERT( portVALID_TCB_MEM( pxTaskBuffer ) );
|
||||
configASSERT( ( ( xCoreID >= 0 ) && ( xCoreID < configNUM_CORES ) ) || ( xCoreID == tskNO_AFFINITY ) );
|
||||
configASSERT( portVALID_TCB_MEM(pxTaskBuffer) );
|
||||
configASSERT( portVALID_STACK_MEM(pxStackBuffer) );
|
||||
configASSERT( (xCoreID>=0 && xCoreID<configNUM_CORES) || (xCoreID==tskNO_AFFINITY) );
|
||||
|
||||
#if ( configASSERT_DEFINED == 1 )
|
||||
{
|
||||
@ -726,12 +717,13 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
|
||||
}
|
||||
#endif /* configASSERT_DEFINED */
|
||||
|
||||
if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
|
||||
|
||||
if( ( pxTaskBuffer != NULL ) && ( pxStackBuffer != NULL ) )
|
||||
{
|
||||
/* The memory used for the task's TCB and stack are passed into this
|
||||
* function - use them. */
|
||||
pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
|
||||
pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
|
||||
pxNewTCB->pxStack = ( StackType_t * ) pxStackBuffer;
|
||||
|
||||
#if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
|
||||
{
|
||||
@ -741,7 +733,7 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
|
||||
}
|
||||
#endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
|
||||
|
||||
prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID );
|
||||
prvInitialiseNewTask( pvTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID );
|
||||
prvAddNewTaskToReadyList( pxNewTCB );
|
||||
}
|
||||
else
|
||||
@ -856,13 +848,13 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
|
||||
|
||||
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
|
||||
|
||||
BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode,
|
||||
const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
const configSTACK_DEPTH_TYPE usStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
TaskHandle_t * const pxCreatedTask,
|
||||
const BaseType_t xCoreID)
|
||||
BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pvTaskCode,
|
||||
const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
const uint32_t usStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
TaskHandle_t * const pvCreatedTask,
|
||||
const BaseType_t xCoreID)
|
||||
{
|
||||
TCB_t * pxNewTCB;
|
||||
BaseType_t xReturn;
|
||||
@ -933,7 +925,7 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
|
||||
}
|
||||
#endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
|
||||
|
||||
prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL, xCoreID );
|
||||
prvInitialiseNewTask( pvTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pvCreatedTask, pxNewTCB, NULL, xCoreID );
|
||||
prvAddNewTaskToReadyList( pxNewTCB );
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
@ -961,10 +953,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
|
||||
StackType_t * pxTopOfStack;
|
||||
UBaseType_t x;
|
||||
|
||||
#if ( configNUM_CORES == 1 )
|
||||
{
|
||||
xCoreID = 0;
|
||||
}
|
||||
#if (configNUM_CORES < 2)
|
||||
xCoreID = 0;
|
||||
#endif
|
||||
|
||||
#if ( portUSING_MPU_WRAPPERS == 1 )
|
||||
@ -1373,17 +1363,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
void vTaskDelete( TaskHandle_t xTaskToDelete )
|
||||
{
|
||||
TCB_t * pxTCB;
|
||||
BaseType_t xFreeNow;
|
||||
TCB_t * curTCB;
|
||||
BaseType_t core;
|
||||
BaseType_t xFreeNow = 0;
|
||||
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
{
|
||||
BaseType_t xCurCoreID;
|
||||
#if ( configNUM_CORES > 1 )
|
||||
xCurCoreID = xPortGetCoreID();
|
||||
#else
|
||||
xCurCoreID = 0;
|
||||
( void ) xCurCoreID;
|
||||
#endif
|
||||
core = xPortGetCoreID();
|
||||
curTCB = pxCurrentTCB[core];
|
||||
|
||||
/* If null is passed in here then it is the calling task that is
|
||||
* being deleted. */
|
||||
@ -1415,19 +1402,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
* not return. */
|
||||
uxTaskNumber++;
|
||||
|
||||
/*
|
||||
* We cannot immediately a task that is
|
||||
* - Currently running on either core
|
||||
* - If the task is not currently running but is pinned to the other (due to FPU cleanup)
|
||||
* Todo: Allow deletion of tasks pinned to other core (IDF-5803)
|
||||
*/
|
||||
#if ( configNUM_CORES > 1 )
|
||||
xFreeNow = ( taskIS_CURRENTLY_RUNNING( pxTCB ) || ( pxTCB->xCoreID == !xCurCoreID ) ) ? pdFALSE : pdTRUE;
|
||||
#else
|
||||
xFreeNow = ( taskIS_CURRENTLY_RUNNING( pxTCB ) ) ? pdFALSE : pdTRUE;
|
||||
#endif /* configNUM_CORES > 1 */
|
||||
|
||||
if( xFreeNow == pdFALSE )
|
||||
if( pxTCB == curTCB ||
|
||||
/* in SMP, we also can't immediately delete the task active on the other core */
|
||||
(configNUM_CORES > 1 && pxTCB == pxCurrentTCB[ !core ]) ||
|
||||
/* ... and we can't delete a non-running task pinned to the other core, as
|
||||
FPU cleanup has to happen on the same core */
|
||||
(configNUM_CORES > 1 && pxTCB->xCoreID == (!core)) )
|
||||
{
|
||||
/* A task is deleting itself. This cannot complete within the
|
||||
* task itself, as a context switch to another task is required.
|
||||
@ -1441,47 +1421,43 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
* check the xTasksWaitingTermination list. */
|
||||
++uxDeletedTasksWaitingCleanUp;
|
||||
|
||||
/* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
|
||||
* portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
|
||||
traceTASK_DELETE( pxTCB );
|
||||
|
||||
/* The pre-delete hook is primarily for the Windows simulator,
|
||||
* in which Windows specific clean up operations are performed,
|
||||
* after which it is not possible to yield away from this task -
|
||||
* hence xYieldPending is used to latch that a context switch is
|
||||
* required. */
|
||||
portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[ xCurCoreID ] );
|
||||
portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[core] );
|
||||
|
||||
#if ( configNUM_CORES > 1 )
|
||||
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) )
|
||||
{
|
||||
/* SMP case of deleting a task running on a different core. Same issue
|
||||
as a task deleting itself, but we need to send a yield to this task now
|
||||
before we release xKernelLock.
|
||||
if (configNUM_CORES > 1 && pxTCB == pxCurrentTCB[ !core ])
|
||||
{
|
||||
/* SMP case of deleting a task running on a different core. Same issue
|
||||
as a task deleting itself, but we need to send a yield to this task now
|
||||
before we release xKernelLock.
|
||||
|
||||
Specifically there is a case where the other core may already be spinning on
|
||||
xKernelLock waiting to go into a blocked state. A check is added in
|
||||
prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
|
||||
xTasksWaitingTermination list in this case (instead it will immediately
|
||||
release xKernelLock again and be yielded before the FreeRTOS function
|
||||
returns.) */
|
||||
vPortYieldOtherCore( !xCurCoreID );
|
||||
}
|
||||
#endif /* configNUM_CORES > 1 */
|
||||
Specifically there is a case where the other core may already be spinning on
|
||||
xKernelLock waiting to go into a blocked state. A check is added in
|
||||
prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
|
||||
xTasksWaitingTermination list in this case (instead it will immediately
|
||||
release xKernelLock again and be yielded before the FreeRTOS function
|
||||
returns.) */
|
||||
vPortYieldOtherCore( !core );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
--uxCurrentNumberOfTasks;
|
||||
traceTASK_DELETE( pxTCB );
|
||||
xFreeNow = pdTRUE;
|
||||
|
||||
/* Reset the next expected unblock time in case it referred to
|
||||
* the task that has just been deleted. */
|
||||
prvResetNextTaskUnblockTime();
|
||||
}
|
||||
|
||||
traceTASK_DELETE( pxTCB );
|
||||
}
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
|
||||
if( xFreeNow == pdTRUE ) {
|
||||
if(xFreeNow == pdTRUE) {
|
||||
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
|
||||
prvDeleteTLS( pxTCB );
|
||||
#endif
|
||||
@ -1493,8 +1469,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
* been deleted. */
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
{
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xPortGetCoreID() ) )
|
||||
if( pxTCB == curTCB )
|
||||
{
|
||||
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
|
||||
portYIELD_WITHIN_API();
|
||||
@ -1503,7 +1478,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
}
|
||||
}
|
||||
|
||||
@ -1525,7 +1499,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
const TickType_t xTimeIncrement )
|
||||
{
|
||||
TickType_t xTimeToWake;
|
||||
#ifdef ESP_PLATFORM
|
||||
BaseType_t xShouldDelay = pdFALSE;
|
||||
#else
|
||||
BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
configASSERT( pxPreviousWakeTime );
|
||||
configASSERT( ( xTimeIncrement > 0U ) );
|
||||
@ -1593,13 +1571,15 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
}
|
||||
#ifdef ESP_PLATFORM // IDF-3755
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
xAlreadyYielded = pdFALSE;
|
||||
#else
|
||||
xAlreadyYielded = xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
/* Force a reschedule if xTaskResumeAll has not already done so, we may
|
||||
* have put ourselves to sleep. */
|
||||
#ifdef ESP_PLATFORM
|
||||
portYIELD_WITHIN_API();
|
||||
#else
|
||||
if( xAlreadyYielded == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
@ -1608,7 +1588,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
#endif // ESP_PLATFORM
|
||||
return xShouldDelay;
|
||||
}
|
||||
|
||||
@ -1619,8 +1599,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
|
||||
void vTaskDelay( const TickType_t xTicksToDelay )
|
||||
{
|
||||
BaseType_t xAlreadyYielded = pdFALSE;
|
||||
|
||||
/* A delay time of zero just forces a reschedule. */
|
||||
if( xTicksToDelay > ( TickType_t ) 0U )
|
||||
{
|
||||
@ -1644,7 +1622,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
}
|
||||
#ifdef ESP_PLATFORM // IDF-3755
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
xAlreadyYielded = pdFALSE;
|
||||
#else
|
||||
xAlreadyYielded = xTaskResumeAll();
|
||||
#endif // ESP_PLATFORM
|
||||
@ -1654,16 +1631,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
/* Force a reschedule if xTaskResumeAll has not already done so, we may
|
||||
* have put ourselves to sleep. */
|
||||
if( xAlreadyYielded == pdFALSE )
|
||||
{
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
/* Force a reschedule, we may have put ourselves to sleep. */
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
|
||||
#endif /* INCLUDE_vTaskDelay */
|
||||
@ -1680,11 +1649,18 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
configASSERT( pxTCB );
|
||||
|
||||
taskENTER_CRITICAL( &xKernelLock ); //Need critical section incase either core context switches in between
|
||||
if( taskIS_CURRENTLY_RUNNING( pxTCB ) )
|
||||
if( pxTCB == pxCurrentTCB[xPortGetCoreID()])
|
||||
{
|
||||
/* The task calling this function is querying its own state. */
|
||||
eReturn = eRunning;
|
||||
}
|
||||
#if (configNUM_CORES > 1)
|
||||
else if (pxTCB == pxCurrentTCB[!xPortGetCoreID()])
|
||||
{
|
||||
/* The task calling this function is querying its own state. */
|
||||
eReturn = eRunning;
|
||||
}
|
||||
#endif
|
||||
else
|
||||
{
|
||||
pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
|
||||
@ -1871,7 +1847,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
* priority than the calling task. */
|
||||
if( uxNewPriority > uxCurrentBasePriority )
|
||||
{
|
||||
if( !taskIS_CURRENTLY_RUNNING( pxTCB ) )
|
||||
if( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] )
|
||||
{
|
||||
/* The priority of a task other than the currently
|
||||
* running task is being raised. Is the priority being
|
||||
@ -1892,22 +1868,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
* priority task able to run so no yield is required. */
|
||||
}
|
||||
}
|
||||
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, 0 ) )
|
||||
else if( pxTCB == pxCurrentTCB[ xPortGetCoreID() ] )
|
||||
{
|
||||
/* Setting the priority of the running task down means
|
||||
* there may now be another task of higher priority that
|
||||
* is ready to execute. */
|
||||
xYieldRequired = pdTRUE;
|
||||
}
|
||||
#if ( configNUM_CORES > 1 )
|
||||
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, 1 ) )
|
||||
{
|
||||
/* Setting the priority of the running task on the other
|
||||
* core down means there may now be another task of
|
||||
* higher priority that is ready to execute. */
|
||||
vPortYieldOtherCore( 1 );
|
||||
}
|
||||
#endif /* configNUM_CORES > 1 */
|
||||
else
|
||||
{
|
||||
/* Setting the priority of any other task down does not
|
||||
@ -2006,6 +1973,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
void vTaskSuspend( TaskHandle_t xTaskToSuspend )
|
||||
{
|
||||
TCB_t * pxTCB;
|
||||
TCB_t * curTCB;
|
||||
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
{
|
||||
@ -2037,6 +2005,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
}
|
||||
|
||||
vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
|
||||
curTCB = pxCurrentTCB[ xPortGetCoreID() ];
|
||||
|
||||
#if ( configUSE_TASK_NOTIFICATIONS == 1 )
|
||||
{
|
||||
@ -2053,70 +2022,76 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
}
|
||||
}
|
||||
#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
|
||||
}
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
{
|
||||
/* Reset the next expected unblock time in case it referred to the
|
||||
* task that is now in the Suspended state. */
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
{
|
||||
prvResetNextTaskUnblockTime();
|
||||
}
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
if( pxTCB == curTCB )
|
||||
{
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
{
|
||||
/* Reset the next expected unblock time in case it referred to the
|
||||
* task that is now in the Suspended state. */
|
||||
prvResetNextTaskUnblockTime();
|
||||
/* The current task has just been suspended. */
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
BaseType_t suspended = uxSchedulerSuspended[xPortGetCoreID()];
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
|
||||
configASSERT( suspended == 0 );
|
||||
(void)suspended;
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xPortGetCoreID() ) )
|
||||
{
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
/* The scheduler is not running, but the task that was pointed
|
||||
* to by pxCurrentTCB has just been suspended and pxCurrentTCB
|
||||
* must be adjusted to point to a different task. */
|
||||
if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
|
||||
{
|
||||
/* The current task has just been suspended. */
|
||||
configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 );
|
||||
portYIELD_WITHIN_API();
|
||||
/* No other tasks are ready, so set pxCurrentTCB back to
|
||||
* NULL so when the next task is created pxCurrentTCB will
|
||||
* be set to point to it no matter what its relative priority
|
||||
* is. */
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
pxCurrentTCB[ xPortGetCoreID() ] = NULL;
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The scheduler is not running, but the task that was pointed
|
||||
* to by pxCurrentTCB has just been suspended and pxCurrentTCB
|
||||
* must be adjusted to point to a different task. */
|
||||
if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
|
||||
{
|
||||
/* No other tasks are ready, so set pxCurrentTCB back to
|
||||
* NULL so when the next task is created pxCurrentTCB will
|
||||
* be set to point to it no matter what its relative priority
|
||||
* is. */
|
||||
pxCurrentTCB[ xPortGetCoreID() ] = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
vTaskSwitchContext();
|
||||
}
|
||||
vTaskSwitchContext();
|
||||
}
|
||||
}
|
||||
#if ( configNUM_CORES > 1 )
|
||||
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xPortGetCoreID() ) )
|
||||
}
|
||||
else
|
||||
{
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
{
|
||||
/* A task other than the currently running task was suspended,
|
||||
* reset the next expected unblock time in case it referred to the
|
||||
* task that is now in the Suspended state. */
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
{
|
||||
/* The other core's current task has just been suspended */
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
{
|
||||
vPortYieldOtherCore( !xPortGetCoreID() );
|
||||
}
|
||||
else
|
||||
{
|
||||
/* The scheduler is not running, but the task that was pointed
|
||||
* to by pxCurrentTCB[ otherCore ] has just been suspended.
|
||||
* We simply set the pxCurrentTCB[ otherCore ] to NULL for now.
|
||||
* Todo: Update vTaskSwitchContext() to be runnable on
|
||||
* behalf of the other core. */
|
||||
pxCurrentTCB[ !xPortGetCoreID() ] = NULL;
|
||||
}
|
||||
prvResetNextTaskUnblockTime();
|
||||
}
|
||||
#endif /* configNUM_CORES > 1 */
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
}
|
||||
|
||||
#endif /* INCLUDE_vTaskSuspend */
|
||||
@ -2139,12 +2114,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
|
||||
{
|
||||
/* Has the task already been resumed from within an ISR? */
|
||||
#if ( configNUM_CORES > 1 )
|
||||
if( ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE )
|
||||
&& ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 1 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) )
|
||||
#else
|
||||
if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE )
|
||||
#endif
|
||||
if( listIS_CONTAINED_WITHIN( &xPendingReadyList[xPortGetCoreID()], &( pxTCB->xEventListItem )) == pdFALSE &&
|
||||
listIS_CONTAINED_WITHIN( &xPendingReadyList[!xPortGetCoreID()], &( pxTCB->xEventListItem )) == pdFALSE )
|
||||
{
|
||||
/* Is it in the suspended list because it is in the Suspended
|
||||
* state, or because is is blocked with no timeout? */
|
||||
@ -2186,7 +2157,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
{
|
||||
/* The parameter cannot be NULL as it is impossible to resume the
|
||||
* currently executing task. */
|
||||
if( !taskIS_CURRENTLY_RUNNING( pxTCB ) && ( pxTCB != NULL ) )
|
||||
if( ( pxTCB != pxCurrentTCB[xPortGetCoreID()] ) && ( pxTCB != NULL ) )
|
||||
{
|
||||
if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
|
||||
{
|
||||
@ -2261,7 +2232,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
|
||||
traceTASK_RESUME_FROM_ISR( pxTCB );
|
||||
|
||||
/* Check the ready lists can be accessed. */
|
||||
/* Known issue IDF-5856. We also need to check if the other core is suspended */
|
||||
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
|
||||
{
|
||||
/* Ready lists can be accessed so move the task from the
|
||||
@ -2310,7 +2280,7 @@ void vTaskStartScheduler( void )
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
/* Create an IDLE task for each core */
|
||||
for( BaseType_t xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++ )
|
||||
for(BaseType_t xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++)
|
||||
#endif //ESP_PLATFORM
|
||||
/* Add the idle task at the lowest priority. */
|
||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
@ -2460,12 +2430,12 @@ void vTaskSuspendAll( void )
|
||||
* BaseType_t. Please read Richard Barry's reply in the following link to a
|
||||
* post in the FreeRTOS support forum before reporting this as a bug! -
|
||||
* https://goo.gl/wu4acr */
|
||||
|
||||
#if ( configNUM_CORES > 1 )
|
||||
/* For SMP, although each core has their own uxSchedulerSuspended, we still
|
||||
* need enter a critical section when accessing. */
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
#endif
|
||||
#ifdef ESP_PLATFORM
|
||||
/* For SMP, although each core has their own uxSchedulerSuspended, we still
|
||||
* need to disable interrupts or enter a critical section when accessing. */
|
||||
unsigned state;
|
||||
state = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
#endif
|
||||
|
||||
/* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that
|
||||
* do not otherwise exhibit real time behaviour. */
|
||||
@ -2479,53 +2449,55 @@ void vTaskSuspendAll( void )
|
||||
* the above increment elsewhere. */
|
||||
portMEMORY_BARRIER();
|
||||
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
#endif
|
||||
#ifdef ESP_PLATFORM
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( state );
|
||||
#endif
|
||||
}
|
||||
/*----------------------------------------------------------*/
|
||||
|
||||
#if ( configUSE_TICKLESS_IDLE != 0 )
|
||||
|
||||
#if ( configNUM_CORES > 1 )
|
||||
|
||||
static BaseType_t xHaveReadyTasks( void )
|
||||
{
|
||||
for (int i = tskIDLE_PRIORITY + 1; i < configMAX_PRIORITIES; ++i)
|
||||
{
|
||||
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ i ] ) ) > 0 )
|
||||
{
|
||||
return pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
return pdFALSE;
|
||||
}
|
||||
|
||||
#endif // configNUM_CORES > 1
|
||||
|
||||
static TickType_t prvGetExpectedIdleTime( void )
|
||||
{
|
||||
TickType_t xReturn;
|
||||
UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
|
||||
TickType_t xReturn;
|
||||
|
||||
|
||||
/* We need a critical section here as we are about to access kernel data structures */
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
|
||||
/* uxHigherPriorityReadyTasks takes care of the case where
|
||||
* configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
|
||||
* task that are in the Ready state, even though the idle task is
|
||||
* running. */
|
||||
#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
|
||||
{
|
||||
if( uxTopReadyPriority > tskIDLE_PRIORITY )
|
||||
{
|
||||
uxHigherPriorityReadyTasks = pdTRUE;
|
||||
}
|
||||
}
|
||||
#else
|
||||
{
|
||||
const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
|
||||
|
||||
/* When port optimised task selection is used the uxTopReadyPriority
|
||||
* variable is used as a bit map. If bits other than the least
|
||||
* significant bit are set then there are tasks that have a priority
|
||||
* above the idle priority that are in the Ready state. This takes
|
||||
* care of the case where the co-operative scheduler is in use. */
|
||||
if( uxTopReadyPriority > uxLeastSignificantBit )
|
||||
{
|
||||
uxHigherPriorityReadyTasks = pdTRUE;
|
||||
}
|
||||
}
|
||||
#endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */
|
||||
|
||||
if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
|
||||
{
|
||||
xReturn = 0;
|
||||
}
|
||||
#if configNUM_CORES > 1
|
||||
/* This function is called from Idle task; in single core case this
|
||||
* means that no higher priority tasks are ready to run, and we can
|
||||
* enter sleep. In SMP case, there might be ready tasks waiting for
|
||||
* the other CPU, so need to check all ready lists.
|
||||
*/
|
||||
else if( xHaveReadyTasks() )
|
||||
{
|
||||
xReturn = 0;
|
||||
}
|
||||
#endif // configNUM_CORES > 1
|
||||
else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > configNUM_CORES )
|
||||
{
|
||||
/* There are other idle priority tasks in the ready state. If
|
||||
@ -2533,18 +2505,10 @@ void vTaskSuspendAll( void )
|
||||
* processed. */
|
||||
xReturn = 0;
|
||||
}
|
||||
else if( uxHigherPriorityReadyTasks != pdFALSE )
|
||||
{
|
||||
/* There are tasks in the Ready state that have a priority above the
|
||||
* idle priority. This path can only be reached if
|
||||
* configUSE_PREEMPTION is 0. */
|
||||
xReturn = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = xNextTaskUnblockTime - xTickCount;
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
|
||||
return xReturn;
|
||||
@ -2569,9 +2533,12 @@ BaseType_t xTaskResumeAll( void )
|
||||
* tasks from this list into their appropriate ready list. */
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
{
|
||||
#ifdef ESP_PLATFORM
|
||||
/* Minor optimization. Core ID can't change while inside a critical section */
|
||||
BaseType_t xCoreID = xPortGetCoreID();
|
||||
|
||||
#else
|
||||
BaseType_t xCoreID = 0;
|
||||
#endif
|
||||
--uxSchedulerSuspended[ xCoreID ];
|
||||
|
||||
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
|
||||
@ -2614,9 +2581,11 @@ BaseType_t xTaskResumeAll( void )
|
||||
* they should be processed now. This ensures the tick count does
|
||||
* not slip, and that any delayed tasks are resumed at the correct
|
||||
* time. */
|
||||
#ifdef ESP_PLATFORM
|
||||
/* Core 0 is solely responsible for managing tick count, thus it
|
||||
* must be the only core to unwind the pended ticks */
|
||||
if ( xCoreID == 0 )
|
||||
#endif
|
||||
{
|
||||
TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
|
||||
|
||||
@ -2674,12 +2643,7 @@ TickType_t xTaskGetTickCount( void )
|
||||
{
|
||||
TickType_t xTicks;
|
||||
|
||||
/* Critical section required if running on a 16 bit processor. */
|
||||
portTICK_TYPE_ENTER_CRITICAL();
|
||||
{
|
||||
xTicks = xTickCount;
|
||||
}
|
||||
portTICK_TYPE_EXIT_CRITICAL();
|
||||
xTicks = xTickCount;
|
||||
|
||||
return xTicks;
|
||||
}
|
||||
@ -2688,6 +2652,7 @@ TickType_t xTaskGetTickCount( void )
|
||||
TickType_t xTaskGetTickCountFromISR( void )
|
||||
{
|
||||
TickType_t xReturn;
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
|
||||
/* RTOS ports that support interrupt nesting have the concept of a maximum
|
||||
* system call (or maximum API call) interrupt priority. Interrupts that are
|
||||
@ -2705,21 +2670,11 @@ TickType_t xTaskGetTickCountFromISR( void )
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
#if ( configNUM_CORES > 1 )
|
||||
/* We need a critical section here as we are about to access kernel data structures */
|
||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||
#else
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
|
||||
#endif
|
||||
uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
|
||||
{
|
||||
xReturn = xTickCount;
|
||||
}
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||
#else
|
||||
portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif
|
||||
portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -3490,26 +3445,28 @@ BaseType_t xTaskIncrementTick( void )
|
||||
{
|
||||
TCB_t * pxTCB;
|
||||
TaskHookFunction_t xReturn;
|
||||
#ifndef ESP_PLATFORM
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
#endif
|
||||
|
||||
/* If xTask is NULL then set the calling task's hook. */
|
||||
pxTCB = prvGetTCBFromHandle( xTask );
|
||||
|
||||
/* Save the hook function in the TCB. A critical section is required as
|
||||
* the value can be accessed from an interrupt. */
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||
#else
|
||||
UBaseType_t uxSavedInterruptStatus;
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
#endif
|
||||
#ifdef ESP_PLATFORM
|
||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||
#else
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
#endif
|
||||
{
|
||||
xReturn = pxTCB->pxTaskTag;
|
||||
}
|
||||
#if ( configNUM_CORES > 1 )
|
||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||
#else
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif
|
||||
#ifdef ESP_PLATFORM
|
||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||
#else
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
#endif
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -4097,7 +4054,6 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
|
||||
* around and gone past again. This passed since vTaskSetTimeout()
|
||||
* was called. */
|
||||
xReturn = pdTRUE;
|
||||
*pxTicksToWait = ( TickType_t ) 0;
|
||||
}
|
||||
else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
|
||||
{
|
||||
@ -4108,7 +4064,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
|
||||
}
|
||||
else
|
||||
{
|
||||
*pxTicksToWait = ( TickType_t ) 0;
|
||||
*pxTicksToWait = 0;
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
}
|
||||
@ -4545,8 +4501,9 @@ static void prvCheckTasksWaitingTermination( void )
|
||||
pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
|
||||
pxTaskStatus->pxStackBase = pxTCB->pxStack;
|
||||
pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
|
||||
|
||||
#if ( configTASKLIST_INCLUDE_COREID == 1 )
|
||||
pxTaskStatus->xCoreID = pxTCB->xCoreID;
|
||||
pxTaskStatus->xCoreID = pxTCB->xCoreID;
|
||||
#endif /* configTASKLIST_INCLUDE_COREID */
|
||||
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
@ -4925,7 +4882,6 @@ static void prvResetNextTaskUnblockTime( void )
|
||||
BaseType_t xReturn;
|
||||
unsigned state;
|
||||
|
||||
/* Known issue. This should use critical sections. See IDF-5889 */
|
||||
state = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
if( xSchedulerRunning == pdFALSE )
|
||||
{
|
||||
@ -5560,17 +5516,13 @@ static void prvResetNextTaskUnblockTime( void )
|
||||
TickType_t uxTaskResetEventItemValue( void )
|
||||
{
|
||||
TickType_t uxReturn;
|
||||
TCB_t *pxCurTCB;
|
||||
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
pxCurTCB = pxCurrentTCB[ xPortGetCoreID() ];
|
||||
|
||||
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ) );
|
||||
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
|
||||
|
||||
/* Reset the event list item to its normal value - so it can be used with
|
||||
* queues and semaphores. */
|
||||
listSET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||
|
||||
listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||
taskEXIT_CRITICAL( &xKernelLock );
|
||||
|
||||
return uxReturn;
|
||||
@ -6212,15 +6164,13 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
|
||||
const TickType_t xConstTickCount = xTickCount;
|
||||
BaseType_t xCurCoreID = xPortGetCoreID();
|
||||
|
||||
#if ( configNUM_CORES > 1 )
|
||||
if( listIS_CONTAINED_WITHIN( &xTasksWaitingTermination, &( pxCurrentTCB[ xCurCoreID ]->xStateListItem ) ) == pdTRUE )
|
||||
{
|
||||
/* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex,
|
||||
so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will
|
||||
be suspended permanently. Todo: IDF-5844. */
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if( ( configNUM_CORES > 1 ) && listIS_CONTAINED_WITHIN( &xTasksWaitingTermination, &( pxCurrentTCB[ xCurCoreID ]->xStateListItem ) ) )
|
||||
{
|
||||
/* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex,
|
||||
so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will
|
||||
be suspended permanently */
|
||||
return;
|
||||
}
|
||||
|
||||
#if ( INCLUDE_xTaskAbortDelay == 1 )
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user