2016-08-17 11:08:22 -04:00
/*
2019-11-28 13:27:47 -05:00
* FreeRTOS Kernel V10 .2 .1
* Copyright ( C ) 2019 Amazon . com , Inc . or its affiliates . All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a copy of
* this software and associated documentation files ( the " Software " ) , to deal in
* the Software without restriction , including without limitation the rights to
* use , copy , modify , merge , publish , distribute , sublicense , and / or sell copies of
* the Software , and to permit persons to whom the Software is furnished to do so ,
* subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER
* IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* http : //www.FreeRTOS.org
* http : //aws.amazon.com/freertos
*
* 1 tab = = 4 spaces !
*/
2016-08-17 11:08:22 -04:00
/* Standard includes. */
# include <stdlib.h>
/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
all the API functions to use the MPU wrappers . That should only be done when
task . h is included from an application file . */
# define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
/* FreeRTOS includes. */
# include "FreeRTOS.h"
# include "task.h"
# include "timers.h"
# include "event_groups.h"
2019-11-28 13:27:47 -05:00
/* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified
because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
for the header files above , but not in this file , in order to generate the
correct privileged Vs unprivileged linkage and placement . */
# undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021 See comment above. */
2016-08-17 11:08:22 -04:00
/* The following bit fields convey control information in a task's event list
item value . It is important they don ' t clash with the
taskEVENT_LIST_ITEM_VALUE_IN_USE definition . */
# if configUSE_16_BIT_TICKS == 1
# define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U
# define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U
# define eventWAIT_FOR_ALL_BITS 0x0400U
# define eventEVENT_BITS_CONTROL_BYTES 0xff00U
# else
# define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL
# define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL
# define eventWAIT_FOR_ALL_BITS 0x04000000UL
# define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL
# endif
2019-11-28 13:27:47 -05:00
typedef struct EventGroupDef_t
2016-08-17 11:08:22 -04:00
{
EventBits_t uxEventBits ;
List_t xTasksWaitingForBits ; /*< List of tasks waiting for a bit to be set. */
# if( configUSE_TRACE_FACILITY == 1 )
UBaseType_t uxEventGroupNumber ;
# endif
2017-10-20 07:03:01 -04:00
# if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated ; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
# endif
portMUX_TYPE eventGroupMux ; //Mutex required due to SMP
2016-08-17 11:08:22 -04:00
} EventGroup_t ;
/*-----------------------------------------------------------*/
/*
* Test the bits set in uxCurrentEventBits to see if the wait condition is met .
* The wait condition is defined by xWaitForAllBits . If xWaitForAllBits is
* pdTRUE then the wait condition is met if all the bits set in uxBitsToWaitFor
* are also set in uxCurrentEventBits . If xWaitForAllBits is pdFALSE then the
* wait condition is met if any of the bits set in uxBitsToWait for are also set
* in uxCurrentEventBits .
*/
2019-11-28 13:27:47 -05:00
static BaseType_t prvTestWaitCondition ( const EventBits_t uxCurrentEventBits , const EventBits_t uxBitsToWaitFor , const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION ;
2016-08-17 11:08:22 -04:00
/*-----------------------------------------------------------*/
2017-10-20 07:03:01 -04:00
# if( configSUPPORT_STATIC_ALLOCATION == 1 )
2016-08-17 11:08:22 -04:00
2017-10-20 07:03:01 -04:00
EventGroupHandle_t xEventGroupCreateStatic ( StaticEventGroup_t * pxEventGroupBuffer )
2016-08-17 11:08:22 -04:00
{
2017-10-20 07:03:01 -04:00
EventGroup_t * pxEventBits ;
/* A StaticEventGroup_t object must be provided. */
configASSERT ( pxEventGroupBuffer ) ;
2019-11-28 13:27:47 -05:00
# if( configASSERT_DEFINED == 1 )
{
/* Sanity check that the size of the structure used to declare a
variable of type StaticEventGroup_t equals the size of the real
event group structure . */
volatile size_t xSize = sizeof ( StaticEventGroup_t ) ;
configASSERT ( xSize = = sizeof ( EventGroup_t ) ) ;
} /*lint !e529 xSize is referenced if configASSERT() is defined. */
# endif /* configASSERT_DEFINED */
2017-10-20 07:03:01 -04:00
/* The user has provided a statically allocated event group - use it. */
2019-11-28 13:27:47 -05:00
pxEventBits = ( EventGroup_t * ) pxEventGroupBuffer ; /*lint !e740 !e9087 EventGroup_t and StaticEventGroup_t are deliberately aliased for data hiding purposes and guaranteed to have the same size and alignment requirement - checked by configASSERT(). */
2017-10-20 07:03:01 -04:00
if ( pxEventBits ! = NULL )
{
pxEventBits - > uxEventBits = 0 ;
vListInitialise ( & ( pxEventBits - > xTasksWaitingForBits ) ) ;
# if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
{
/* Both static and dynamic allocation can be used, so note that
this event group was created statically in case the event group
is later deleted . */
pxEventBits - > ucStaticallyAllocated = pdTRUE ;
}
# endif /* configSUPPORT_DYNAMIC_ALLOCATION */
traceEVENT_GROUP_CREATE ( pxEventBits ) ;
2019-11-28 13:27:47 -05:00
vPortCPUInitializeMutex ( & pxEventBits - > eventGroupMux ) ;
2017-10-20 07:03:01 -04:00
}
else
{
2019-11-28 13:27:47 -05:00
/* xEventGroupCreateStatic should only ever be called with
pxEventGroupBuffer pointing to a pre - allocated ( compile time
allocated ) StaticEventGroup_t variable . */
2017-10-20 07:03:01 -04:00
traceEVENT_GROUP_CREATE_FAILED ( ) ;
}
2019-11-28 13:27:47 -05:00
return pxEventBits ;
2016-08-17 11:08:22 -04:00
}
2017-10-20 07:03:01 -04:00
# endif /* configSUPPORT_STATIC_ALLOCATION */
/*-----------------------------------------------------------*/
# if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
EventGroupHandle_t xEventGroupCreate ( void )
2016-08-17 11:08:22 -04:00
{
2017-10-20 07:03:01 -04:00
EventGroup_t * pxEventBits ;
2016-08-17 11:08:22 -04:00
2019-11-28 13:27:47 -05:00
/* Allocate the event group. Justification for MISRA deviation as
follows : pvPortMalloc ( ) always ensures returned memory blocks are
aligned per the requirements of the MCU stack . In this case
pvPortMalloc ( ) must return a pointer that is guaranteed to meet the
alignment requirements of the EventGroup_t structure - which ( if you
follow it through ) is the alignment requirements of the TickType_t type
( EventBits_t being of TickType_t itself ) . Therefore , whenever the
stack alignment requirements are greater than or equal to the
TickType_t alignment requirements the cast is safe . In other cases ,
where the natural word size of the architecture is less than
sizeof ( TickType_t ) , the TickType_t variables will be accessed in two
or more reads operations , and the alignment requirements is only that
of each individual read . */
pxEventBits = ( EventGroup_t * ) pvPortMalloc ( sizeof ( EventGroup_t ) ) ; /*lint !e9087 !e9079 see comment above. */
2017-02-27 20:06:36 -05:00
2017-10-20 07:03:01 -04:00
if ( pxEventBits ! = NULL )
{
pxEventBits - > uxEventBits = 0 ;
vListInitialise ( & ( pxEventBits - > xTasksWaitingForBits ) ) ;
# if( configSUPPORT_STATIC_ALLOCATION == 1 )
{
/* Both static and dynamic allocation can be used, so note this
event group was allocated statically in case the event group is
later deleted . */
pxEventBits - > ucStaticallyAllocated = pdFALSE ;
}
# endif /* configSUPPORT_STATIC_ALLOCATION */
2019-11-28 13:27:47 -05:00
vPortCPUInitializeMutex ( & pxEventBits - > eventGroupMux ) ;
2017-10-20 07:03:01 -04:00
traceEVENT_GROUP_CREATE ( pxEventBits ) ;
}
else
{
2019-11-28 13:27:47 -05:00
traceEVENT_GROUP_CREATE_FAILED ( ) ; /*lint !e9063 Else branch only exists to allow tracing and does not generate code if trace macros are not defined. */
2017-10-20 07:03:01 -04:00
}
2019-11-28 13:27:47 -05:00
return pxEventBits ;
2017-10-20 07:03:01 -04:00
}
# endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2016-08-17 11:08:22 -04:00
/*-----------------------------------------------------------*/
EventBits_t xEventGroupSync ( EventGroupHandle_t xEventGroup , const EventBits_t uxBitsToSet , const EventBits_t uxBitsToWaitFor , TickType_t xTicksToWait )
{
EventBits_t uxOriginalBitValue , uxReturn ;
2019-11-28 13:27:47 -05:00
EventGroup_t * pxEventBits = xEventGroup ;
BaseType_t xAlreadyYielded = pdFALSE ;
2016-08-17 11:08:22 -04:00
BaseType_t xTimeoutOccurred = pdFALSE ;
configASSERT ( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) = = 0 ) ;
configASSERT ( uxBitsToWaitFor ! = 0 ) ;
# if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT ( ! ( ( xTaskGetSchedulerState ( ) = = taskSCHEDULER_SUSPENDED ) & & ( xTicksToWait ! = 0 ) ) ) ;
}
# endif
2019-11-28 13:27:47 -05:00
taskENTER_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
{
uxOriginalBitValue = pxEventBits - > uxEventBits ;
( void ) xEventGroupSetBits ( xEventGroup , uxBitsToSet ) ;
if ( ( ( uxOriginalBitValue | uxBitsToSet ) & uxBitsToWaitFor ) = = uxBitsToWaitFor )
{
/* All the rendezvous bits are now set - no need to block. */
uxReturn = ( uxOriginalBitValue | uxBitsToSet ) ;
/* Rendezvous always clear the bits. They will have been cleared
already unless this is the only task in the rendezvous . */
pxEventBits - > uxEventBits & = ~ uxBitsToWaitFor ;
xTicksToWait = 0 ;
}
else
{
if ( xTicksToWait ! = ( TickType_t ) 0 )
{
traceEVENT_GROUP_SYNC_BLOCK ( xEventGroup , uxBitsToSet , uxBitsToWaitFor ) ;
/* Store the bits that the calling task is waiting for in the
task ' s event list item so the kernel knows when a match is
found . Then enter the blocked state . */
vTaskPlaceOnUnorderedEventList ( & ( pxEventBits - > xTasksWaitingForBits ) , ( uxBitsToWaitFor | eventCLEAR_EVENTS_ON_EXIT_BIT | eventWAIT_FOR_ALL_BITS ) , xTicksToWait ) ;
/* This assignment is obsolete as uxReturn will get set after
the task unblocks , but some compilers mistakenly generate a
warning about uxReturn being returned without being set if the
assignment is omitted . */
uxReturn = 0 ;
}
else
{
/* The rendezvous bits were not set, but no block time was
specified - just return the current event bit value . */
uxReturn = pxEventBits - > uxEventBits ;
2019-11-28 13:27:47 -05:00
xTimeoutOccurred = pdTRUE ;
2016-08-17 11:08:22 -04:00
}
}
}
2020-11-10 02:40:01 -05:00
2017-02-27 20:06:36 -05:00
taskEXIT_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
if ( xTicksToWait ! = ( TickType_t ) 0 )
{
if ( xAlreadyYielded = = pdFALSE )
{
portYIELD_WITHIN_API ( ) ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
/* The task blocked to wait for its required bits to be set - at this
point either the required bits were set or the block time expired . If
the required bits were set they will have been stored in the task ' s
event list item , and they should now be retrieved then cleared . */
uxReturn = uxTaskResetEventItemValue ( ) ;
if ( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) = = ( EventBits_t ) 0 )
{
/* The task timed out, just return the current event bit value. */
2017-02-27 20:06:36 -05:00
taskENTER_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
{
uxReturn = pxEventBits - > uxEventBits ;
/* Although the task got here because it timed out before the
bits it was waiting for were set , it is possible that since it
unblocked another task has set the bits . If this is the case
then it needs to clear the bits before exiting . */
if ( ( uxReturn & uxBitsToWaitFor ) = = uxBitsToWaitFor )
{
pxEventBits - > uxEventBits & = ~ uxBitsToWaitFor ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
}
2017-02-27 20:06:36 -05:00
taskEXIT_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
xTimeoutOccurred = pdTRUE ;
}
else
{
/* The task unblocked because the bits were set. */
}
/* Control bits might be set as the task had blocked should not be
returned . */
uxReturn & = ~ eventEVENT_BITS_CONTROL_BYTES ;
}
traceEVENT_GROUP_SYNC_END ( xEventGroup , uxBitsToSet , uxBitsToWaitFor , xTimeoutOccurred ) ;
2019-11-28 13:27:47 -05:00
/* Prevent compiler warnings when trace macros are not used. */
( void ) xTimeoutOccurred ;
2016-08-17 11:08:22 -04:00
return uxReturn ;
}
/*-----------------------------------------------------------*/
EventBits_t xEventGroupWaitBits ( EventGroupHandle_t xEventGroup , const EventBits_t uxBitsToWaitFor , const BaseType_t xClearOnExit , const BaseType_t xWaitForAllBits , TickType_t xTicksToWait )
{
2019-11-28 13:27:47 -05:00
EventGroup_t * pxEventBits = xEventGroup ;
2016-08-17 11:08:22 -04:00
EventBits_t uxReturn , uxControlBits = 0 ;
2019-11-28 13:27:47 -05:00
BaseType_t xWaitConditionMet ;
2016-08-17 11:08:22 -04:00
BaseType_t xTimeoutOccurred = pdFALSE ;
/* Check the user is not attempting to wait on the bits used by the kernel
itself , and that at least one bit is being requested . */
configASSERT ( xEventGroup ) ;
configASSERT ( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) = = 0 ) ;
configASSERT ( uxBitsToWaitFor ! = 0 ) ;
# if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
{
configASSERT ( ! ( ( xTaskGetSchedulerState ( ) = = taskSCHEDULER_SUSPENDED ) & & ( xTicksToWait ! = 0 ) ) ) ;
}
# endif
2017-02-27 20:06:36 -05:00
taskENTER_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
{
const EventBits_t uxCurrentEventBits = pxEventBits - > uxEventBits ;
/* Check to see if the wait condition is already met or not. */
xWaitConditionMet = prvTestWaitCondition ( uxCurrentEventBits , uxBitsToWaitFor , xWaitForAllBits ) ;
if ( xWaitConditionMet ! = pdFALSE )
{
/* The wait condition has already been met so there is no need to
block . */
uxReturn = uxCurrentEventBits ;
xTicksToWait = ( TickType_t ) 0 ;
/* Clear the wait bits if requested to do so. */
if ( xClearOnExit ! = pdFALSE )
{
pxEventBits - > uxEventBits & = ~ uxBitsToWaitFor ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
}
else if ( xTicksToWait = = ( TickType_t ) 0 )
{
/* The wait condition has not been met, but no block time was
specified , so just return the current value . */
uxReturn = uxCurrentEventBits ;
2019-11-28 13:27:47 -05:00
xTimeoutOccurred = pdTRUE ;
2016-08-17 11:08:22 -04:00
}
else
{
/* The task is going to block to wait for its required bits to be
set . uxControlBits are used to remember the specified behaviour of
this call to xEventGroupWaitBits ( ) - for use when the event bits
unblock the task . */
if ( xClearOnExit ! = pdFALSE )
{
uxControlBits | = eventCLEAR_EVENTS_ON_EXIT_BIT ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
if ( xWaitForAllBits ! = pdFALSE )
{
uxControlBits | = eventWAIT_FOR_ALL_BITS ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
/* Store the bits that the calling task is waiting for in the
task ' s event list item so the kernel knows when a match is
found . Then enter the blocked state . */
vTaskPlaceOnUnorderedEventList ( & ( pxEventBits - > xTasksWaitingForBits ) , ( uxBitsToWaitFor | uxControlBits ) , xTicksToWait ) ;
/* This is obsolete as it will get set after the task unblocks, but
some compilers mistakenly generate a warning about the variable
being returned without being set if it is not done . */
uxReturn = 0 ;
traceEVENT_GROUP_WAIT_BITS_BLOCK ( xEventGroup , uxBitsToWaitFor ) ;
}
}
2019-11-28 13:27:47 -05:00
2017-02-27 20:06:36 -05:00
taskEXIT_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
if ( xTicksToWait ! = ( TickType_t ) 0 )
{
2019-11-28 13:27:47 -05:00
portYIELD_WITHIN_API ( ) ;
2016-08-17 11:08:22 -04:00
/* The task blocked to wait for its required bits to be set - at this
point either the required bits were set or the block time expired . If
the required bits were set they will have been stored in the task ' s
event list item , and they should now be retrieved then cleared . */
uxReturn = uxTaskResetEventItemValue ( ) ;
if ( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) = = ( EventBits_t ) 0 )
{
2017-02-27 20:06:36 -05:00
taskENTER_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
{
/* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits - > uxEventBits ;
/* It is possible that the event bits were updated between this
task leaving the Blocked state and running again . */
if ( prvTestWaitCondition ( uxReturn , uxBitsToWaitFor , xWaitForAllBits ) ! = pdFALSE )
{
if ( xClearOnExit ! = pdFALSE )
{
pxEventBits - > uxEventBits & = ~ uxBitsToWaitFor ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
2019-11-28 13:27:47 -05:00
xTimeoutOccurred = pdTRUE ;
2016-08-17 11:08:22 -04:00
}
2017-02-27 20:06:36 -05:00
taskEXIT_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
}
else
{
/* The task unblocked because the bits were set. */
}
/* The task blocked so control bits may have been set. */
uxReturn & = ~ eventEVENT_BITS_CONTROL_BYTES ;
}
traceEVENT_GROUP_WAIT_BITS_END ( xEventGroup , uxBitsToWaitFor , xTimeoutOccurred ) ;
2019-11-28 13:27:47 -05:00
/* Prevent compiler warnings when trace macros are not used. */
( void ) xTimeoutOccurred ;
2016-08-17 11:08:22 -04:00
return uxReturn ;
}
/*-----------------------------------------------------------*/
EventBits_t xEventGroupClearBits ( EventGroupHandle_t xEventGroup , const EventBits_t uxBitsToClear )
{
2019-11-28 13:27:47 -05:00
EventGroup_t * pxEventBits = xEventGroup ;
2016-08-17 11:08:22 -04:00
EventBits_t uxReturn ;
/* Check the user is not attempting to clear the bits used by the kernel
itself . */
configASSERT ( xEventGroup ) ;
configASSERT ( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) = = 0 ) ;
2017-02-27 20:06:36 -05:00
taskENTER_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
{
traceEVENT_GROUP_CLEAR_BITS ( xEventGroup , uxBitsToClear ) ;
/* The value returned is the event group value prior to the bits being
cleared . */
uxReturn = pxEventBits - > uxEventBits ;
/* Clear the bits. */
pxEventBits - > uxEventBits & = ~ uxBitsToClear ;
}
2017-02-27 20:06:36 -05:00
taskEXIT_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
return uxReturn ;
}
/*-----------------------------------------------------------*/
# if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
BaseType_t xEventGroupClearBitsFromISR ( EventGroupHandle_t xEventGroup , const EventBits_t uxBitsToClear )
{
BaseType_t xReturn ;
traceEVENT_GROUP_CLEAR_BITS_FROM_ISR ( xEventGroup , uxBitsToClear ) ;
2019-11-28 13:27:47 -05:00
xReturn = xTimerPendFunctionCallFromISR ( vEventGroupClearBitsCallback , ( void * ) xEventGroup , ( uint32_t ) uxBitsToClear , NULL ) ; /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */
2016-08-17 11:08:22 -04:00
return xReturn ;
}
# endif
/*-----------------------------------------------------------*/
EventBits_t xEventGroupGetBitsFromISR ( EventGroupHandle_t xEventGroup )
{
UBaseType_t uxSavedInterruptStatus ;
2019-11-28 13:27:47 -05:00
EventGroup_t const * const pxEventBits = xEventGroup ;
2016-08-17 11:08:22 -04:00
EventBits_t uxReturn ;
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR ( ) ;
{
uxReturn = pxEventBits - > uxEventBits ;
}
portCLEAR_INTERRUPT_MASK_FROM_ISR ( uxSavedInterruptStatus ) ;
return uxReturn ;
2019-11-28 13:27:47 -05:00
} /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */
2016-08-17 11:08:22 -04:00
/*-----------------------------------------------------------*/
EventBits_t xEventGroupSetBits ( EventGroupHandle_t xEventGroup , const EventBits_t uxBitsToSet )
{
ListItem_t * pxListItem , * pxNext ;
ListItem_t const * pxListEnd ;
2019-11-28 13:27:47 -05:00
List_t const * pxList ;
2016-08-17 11:08:22 -04:00
EventBits_t uxBitsToClear = 0 , uxBitsWaitedFor , uxControlBits ;
2019-11-28 13:27:47 -05:00
EventGroup_t * pxEventBits = xEventGroup ;
2016-08-17 11:08:22 -04:00
BaseType_t xMatchFound = pdFALSE ;
/* Check the user is not attempting to set the bits used by the kernel
itself . */
configASSERT ( xEventGroup ) ;
configASSERT ( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) = = 0 ) ;
pxList = & ( pxEventBits - > xTasksWaitingForBits ) ;
2019-11-28 13:27:47 -05:00
pxListEnd = listGET_END_MARKER ( pxList ) ; /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
2017-02-27 20:06:36 -05:00
2019-11-28 13:27:47 -05:00
taskENTER_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2022-07-19 00:02:19 -04:00
/* The critical section above only takes the event groups spinlock. However, we are about to traverse a task list.
Thus we need call the function below to take the task list spinlock located in tasks . c . Not doing so will risk
the task list ' s being changed while be are traversing it . */
vTaskTakeEventListLock ( ) ;
2016-08-17 11:08:22 -04:00
{
traceEVENT_GROUP_SET_BITS ( xEventGroup , uxBitsToSet ) ;
pxListItem = listGET_HEAD_ENTRY ( pxList ) ;
/* Set the bits. */
pxEventBits - > uxEventBits | = uxBitsToSet ;
/* See if the new bit value should unblock any tasks. */
while ( pxListItem ! = pxListEnd )
{
pxNext = listGET_NEXT ( pxListItem ) ;
uxBitsWaitedFor = listGET_LIST_ITEM_VALUE ( pxListItem ) ;
xMatchFound = pdFALSE ;
/* Split the bits waited for from the control bits. */
uxControlBits = uxBitsWaitedFor & eventEVENT_BITS_CONTROL_BYTES ;
uxBitsWaitedFor & = ~ eventEVENT_BITS_CONTROL_BYTES ;
if ( ( uxControlBits & eventWAIT_FOR_ALL_BITS ) = = ( EventBits_t ) 0 )
{
/* Just looking for single bit being set. */
if ( ( uxBitsWaitedFor & pxEventBits - > uxEventBits ) ! = ( EventBits_t ) 0 )
{
xMatchFound = pdTRUE ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
}
else if ( ( uxBitsWaitedFor & pxEventBits - > uxEventBits ) = = uxBitsWaitedFor )
{
/* All bits are set. */
xMatchFound = pdTRUE ;
}
else
{
/* Need all bits to be set, but not all the bits were set. */
}
if ( xMatchFound ! = pdFALSE )
{
/* The bits match. Should the bits be cleared on exit? */
if ( ( uxControlBits & eventCLEAR_EVENTS_ON_EXIT_BIT ) ! = ( EventBits_t ) 0 )
{
uxBitsToClear | = uxBitsWaitedFor ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
/* Store the actual event flag value in the task's event list
item before removing the task from the event list . The
eventUNBLOCKED_DUE_TO_BIT_SET bit is set so the task knows
that is was unblocked due to its required bits matching , rather
than because it timed out . */
2019-11-28 13:27:47 -05:00
xTaskRemoveFromUnorderedEventList ( pxListItem , pxEventBits - > uxEventBits | eventUNBLOCKED_DUE_TO_BIT_SET ) ;
2016-08-17 11:08:22 -04:00
}
/* Move onto the next list item. Note pxListItem->pxNext is not
used here as the list item may have been removed from the event list
and inserted into the ready / pending reading list . */
pxListItem = pxNext ;
}
/* Clear any bits that matched when the eventCLEAR_EVENTS_ON_EXIT_BIT
bit was set in the control word . */
pxEventBits - > uxEventBits & = ~ uxBitsToClear ;
}
2022-07-19 00:02:19 -04:00
/* Release the previously held task list spinlock, then release the event group spinlock. */
vTaskReleaseEventListLock ( ) ;
2019-11-28 13:27:47 -05:00
taskEXIT_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
return pxEventBits - > uxEventBits ;
}
/*-----------------------------------------------------------*/
void vEventGroupDelete ( EventGroupHandle_t xEventGroup )
{
2019-11-28 13:27:47 -05:00
EventGroup_t * pxEventBits = xEventGroup ;
const List_t * pxTasksWaitingForBits = & ( pxEventBits - > xTasksWaitingForBits ) ;
traceEVENT_GROUP_DELETE ( xEventGroup ) ;
2016-08-17 11:08:22 -04:00
2017-02-27 20:06:36 -05:00
taskENTER_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2022-07-19 00:02:19 -04:00
/* The critical section above only takes the event groups spinlock. However, we are about to traverse a task list.
Thus we need call the function below to take the task list spinlock located in tasks . c . Not doing so will risk
the task list ' s being changed while be are traversing it . */
vTaskTakeEventListLock ( ) ;
2016-08-17 11:08:22 -04:00
{
while ( listCURRENT_LIST_LENGTH ( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
{
/* Unblock the task, returning 0 as the event list is being deleted
2019-11-28 13:27:47 -05:00
and cannot therefore have any bits set . */
configASSERT ( pxTasksWaitingForBits - > xListEnd . pxNext ! = ( const ListItem_t * ) & ( pxTasksWaitingForBits - > xListEnd ) ) ;
xTaskRemoveFromUnorderedEventList ( pxTasksWaitingForBits - > xListEnd . pxNext , eventUNBLOCKED_DUE_TO_BIT_SET ) ;
2016-08-17 11:08:22 -04:00
}
2019-11-28 13:27:47 -05:00
}
2022-07-19 00:02:19 -04:00
/* Release the previously held task list spinlock. */
vTaskReleaseEventListLock ( ) ;
2019-11-28 13:27:47 -05:00
taskEXIT_CRITICAL ( & pxEventBits - > eventGroupMux ) ;
2016-08-17 11:08:22 -04:00
2019-11-28 13:27:47 -05:00
# if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{
/* The event group can only have been allocated dynamically - free
it again . */
vPortFree ( pxEventBits ) ;
}
# elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
{
/* The event group could have been allocated statically or
dynamically , so check before attempting to free the memory . */
if ( pxEventBits - > ucStaticallyAllocated = = ( uint8_t ) pdFALSE )
2017-10-20 07:03:01 -04:00
{
vPortFree ( pxEventBits ) ;
}
2019-11-28 13:27:47 -05:00
else
2017-10-20 07:03:01 -04:00
{
2019-11-28 13:27:47 -05:00
mtCOVERAGE_TEST_MARKER ( ) ;
2017-10-20 07:03:01 -04:00
}
2016-08-17 11:08:22 -04:00
}
2019-11-28 13:27:47 -05:00
# endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2016-08-17 11:08:22 -04:00
}
/*-----------------------------------------------------------*/
/* For internal use only - execute a 'set bits' command that was pended from
an interrupt . */
void vEventGroupSetBitsCallback ( void * pvEventGroup , const uint32_t ulBitsToSet )
{
2019-11-28 13:27:47 -05:00
( void ) xEventGroupSetBits ( pvEventGroup , ( EventBits_t ) ulBitsToSet ) ; /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */
2016-08-17 11:08:22 -04:00
}
/*-----------------------------------------------------------*/
/* For internal use only - execute a 'clear bits' command that was pended from
an interrupt . */
void vEventGroupClearBitsCallback ( void * pvEventGroup , const uint32_t ulBitsToClear )
{
2019-11-28 13:27:47 -05:00
( void ) xEventGroupClearBits ( pvEventGroup , ( EventBits_t ) ulBitsToClear ) ; /*lint !e9079 Can't avoid cast to void* as a generic timer callback prototype. Callback casts back to original type so safe. */
2016-08-17 11:08:22 -04:00
}
/*-----------------------------------------------------------*/
static BaseType_t prvTestWaitCondition ( const EventBits_t uxCurrentEventBits , const EventBits_t uxBitsToWaitFor , const BaseType_t xWaitForAllBits )
{
BaseType_t xWaitConditionMet = pdFALSE ;
if ( xWaitForAllBits = = pdFALSE )
{
/* Task only has to wait for one bit within uxBitsToWaitFor to be
set . Is one already set ? */
if ( ( uxCurrentEventBits & uxBitsToWaitFor ) ! = ( EventBits_t ) 0 )
{
xWaitConditionMet = pdTRUE ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
}
else
{
/* Task has to wait for all the bits in uxBitsToWaitFor to be set.
Are they set already ? */
if ( ( uxCurrentEventBits & uxBitsToWaitFor ) = = uxBitsToWaitFor )
{
xWaitConditionMet = pdTRUE ;
}
else
{
mtCOVERAGE_TEST_MARKER ( ) ;
}
}
return xWaitConditionMet ;
}
/*-----------------------------------------------------------*/
# if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
BaseType_t xEventGroupSetBitsFromISR ( EventGroupHandle_t xEventGroup , const EventBits_t uxBitsToSet , BaseType_t * pxHigherPriorityTaskWoken )
{
BaseType_t xReturn ;
traceEVENT_GROUP_SET_BITS_FROM_ISR ( xEventGroup , uxBitsToSet ) ;
2019-11-28 13:27:47 -05:00
xReturn = xTimerPendFunctionCallFromISR ( vEventGroupSetBitsCallback , ( void * ) xEventGroup , ( uint32_t ) uxBitsToSet , pxHigherPriorityTaskWoken ) ; /*lint !e9087 Can't avoid cast to void* as a generic callback function not specific to this use case. Callback casts back to original type so safe. */
2016-08-17 11:08:22 -04:00
return xReturn ;
}
# endif
/*-----------------------------------------------------------*/
# if (configUSE_TRACE_FACILITY == 1)
UBaseType_t uxEventGroupGetNumber ( void * xEventGroup )
{
UBaseType_t xReturn ;
2019-11-28 13:27:47 -05:00
EventGroup_t const * pxEventBits = ( EventGroup_t * ) xEventGroup ; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */
2016-08-17 11:08:22 -04:00
if ( xEventGroup = = NULL )
{
xReturn = 0 ;
}
else
{
xReturn = pxEventBits - > uxEventGroupNumber ;
}
return xReturn ;
}
2019-11-28 13:27:47 -05:00
# endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
# if ( configUSE_TRACE_FACILITY == 1 )
void vEventGroupSetNumber ( void * xEventGroup , UBaseType_t uxEventGroupNumber )
{
( ( EventGroup_t * ) xEventGroup ) - > uxEventGroupNumber = uxEventGroupNumber ; /*lint !e9087 !e9079 EventGroupHandle_t is a pointer to an EventGroup_t, but EventGroupHandle_t is kept opaque outside of this file for data hiding purposes. */
}
# endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/