freertos: Fix cross-core usage of event groups

Fixes & re-enabled broken unit tests
Adds per-event-group spinlock instead of single global lock
This commit is contained in:
Angus Gratton 2017-02-28 12:06:36 +11:00
parent 290c40a4ab
commit 20212ee823
2 changed files with 50 additions and 28 deletions

View File

@ -119,13 +119,10 @@ typedef struct xEventGroupDefinition
UBaseType_t uxEventGroupNumber; UBaseType_t uxEventGroupNumber;
#endif #endif
portMUX_TYPE eventGroupMux;
} EventGroup_t; } EventGroup_t;
/* Again: one mux for all events. Maybe this can be made more granular. ToDo: look into that. -JD */
static portMUX_TYPE xEventGroupMux = portMUX_INITIALIZER_UNLOCKED;
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
/* /*
@ -156,6 +153,8 @@ EventGroup_t *pxEventBits;
traceEVENT_GROUP_CREATE_FAILED(); traceEVENT_GROUP_CREATE_FAILED();
} }
vPortCPUInitializeMutex(&pxEventBits->eventGroupMux);
return ( EventGroupHandle_t ) pxEventBits; return ( EventGroupHandle_t ) pxEventBits;
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@ -176,6 +175,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
#endif #endif
vTaskSuspendAll(); vTaskSuspendAll();
taskENTER_CRITICAL(&pxEventBits->eventGroupMux);
{ {
uxOriginalBitValue = pxEventBits->uxEventBits; uxOriginalBitValue = pxEventBits->uxEventBits;
@ -217,6 +217,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
} }
} }
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = xTaskResumeAll();
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
@ -239,7 +240,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{ {
/* The task timed out, just return the current event bit value. */ /* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL( &xEventGroupMux ); taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{ {
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
@ -256,7 +257,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xEventGroupMux ); taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
xTimeoutOccurred = pdTRUE; xTimeoutOccurred = pdTRUE;
} }
@ -295,6 +296,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
#endif #endif
vTaskSuspendAll(); vTaskSuspendAll();
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{ {
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
@ -361,6 +363,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
} }
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = xTaskResumeAll();
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
@ -382,7 +385,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{ {
taskENTER_CRITICAL( &xEventGroupMux ); taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{ {
/* The task timed out, just return the current event bit value. */ /* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits; uxReturn = pxEventBits->uxEventBits;
@ -405,7 +408,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xEventGroupMux ); taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
/* Prevent compiler warnings when trace macros are not used. */ /* Prevent compiler warnings when trace macros are not used. */
xTimeoutOccurred = pdFALSE; xTimeoutOccurred = pdFALSE;
@ -434,7 +437,7 @@ EventBits_t uxReturn;
configASSERT( xEventGroup ); configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
taskENTER_CRITICAL( &xEventGroupMux ); taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{ {
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
@ -445,7 +448,7 @@ EventBits_t uxReturn;
/* Clear the bits. */ /* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear; pxEventBits->uxEventBits &= ~uxBitsToClear;
} }
taskEXIT_CRITICAL( &xEventGroupMux ); taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
return uxReturn; return uxReturn;
} }
@ -498,7 +501,9 @@ BaseType_t xMatchFound = pdFALSE;
pxList = &( pxEventBits->xTasksWaitingForBits ); pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */ pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */
vTaskSuspendAll(); vTaskSuspendAll();
taskENTER_CRITICAL(&pxEventBits->eventGroupMux);
{ {
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
@ -570,6 +575,7 @@ BaseType_t xMatchFound = pdFALSE;
bit was set in the control word. */ bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear; pxEventBits->uxEventBits &= ~uxBitsToClear;
} }
taskEXIT_CRITICAL(&pxEventBits->eventGroupMux);
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
return pxEventBits->uxEventBits; return pxEventBits->uxEventBits;
@ -578,10 +584,11 @@ BaseType_t xMatchFound = pdFALSE;
void vEventGroupDelete( EventGroupHandle_t xEventGroup ) void vEventGroupDelete( EventGroupHandle_t xEventGroup )
{ {
EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup; EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
vTaskSuspendAll(); vTaskSuspendAll();
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{ {
traceEVENT_GROUP_DELETE( xEventGroup ); traceEVENT_GROUP_DELETE( xEventGroup );
@ -593,6 +600,7 @@ const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
( void ) xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); ( void ) xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
} }
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
vPortFree( pxEventBits ); vPortFree( pxEventBits );
} }
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();

View File

@ -11,9 +11,10 @@
#define BIT_RESPONSE(TASK) (1 << (TASK+1)) #define BIT_RESPONSE(TASK) (1 << (TASK+1))
#define ALL_RESPONSE_BITS (((1 << NUM_TASKS) - 1) << 1) #define ALL_RESPONSE_BITS (((1 << NUM_TASKS) - 1) << 1)
static const int NUM_TASKS = 4; static const int NUM_TASKS = 8;
static const int COUNT = 4000; static const int COUNT = 1000;
static EventGroupHandle_t eg; static EventGroupHandle_t eg;
static SemaphoreHandle_t done_sem;
static void task_event_group_call_response(void *param) static void task_event_group_call_response(void *param)
{ {
@ -32,15 +33,14 @@ static void task_event_group_call_response(void *param)
} }
printf("Task %d done\n", task_num); printf("Task %d done\n", task_num);
xSemaphoreGive(done_sem);
/* Delay is due to not-yet-fixed bug with deleting tasks at same time */
vTaskDelay(100 / portTICK_PERIOD_MS);
vTaskDelete(NULL); vTaskDelete(NULL);
} }
TEST_CASE("FreeRTOS Event Groups", "[freertos][ignore]") TEST_CASE("FreeRTOS Event Groups", "[freertos]")
{ {
eg = xEventGroupCreate(); eg = xEventGroupCreate();
done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0);
/* Note: task_event_group_call_response all have higher priority than us, so will block together. /* Note: task_event_group_call_response all have higher priority than us, so will block together.
@ -50,7 +50,7 @@ TEST_CASE("FreeRTOS Event Groups", "[freertos][ignore]")
for (int c = 0; c < NUM_TASKS; c++) { for (int c = 0; c < NUM_TASKS; c++) {
xTaskCreatePinnedToCore(task_event_group_call_response, "tsk_call_resp", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS); xTaskCreatePinnedToCore(task_event_group_call_response, "tsk_call_resp", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS);
} }
/* Scheduler weirdness, if we don't sleep a few ticks here then the tasks on the other CPU aren't running yet... */ /* Scheduler weirdness (bug?), if we don't sleep a few ticks here then the tasks on the other CPU aren't running yet... */
vTaskDelay(10); vTaskDelay(10);
for (int i = 0; i < COUNT; i++) { for (int i = 0; i < COUNT; i++) {
@ -60,11 +60,17 @@ TEST_CASE("FreeRTOS Event Groups", "[freertos][ignore]")
/* signal all tasks with "CALL" bit... */ /* signal all tasks with "CALL" bit... */
xEventGroupSetBits(eg, BIT_CALL); xEventGroupSetBits(eg, BIT_CALL);
while (xEventGroupWaitBits(eg, ALL_RESPONSE_BITS, true, true, portMAX_DELAY) != ALL_RESPONSE_BITS) { TEST_ASSERT_EQUAL(ALL_RESPONSE_BITS, xEventGroupWaitBits(eg, ALL_RESPONSE_BITS, true, true, 100 / portMAX_DELAY));
}
} }
}
/* Ensure all tasks cleaned up correctly */
for (int c = 0; c < NUM_TASKS; c++) {
TEST_ASSERT( xSemaphoreTake(done_sem, 100/portTICK_PERIOD_MS) );
}
vSemaphoreDelete(done_sem);
vEventGroupDelete(eg);
}
#define BIT_DONE(X) (1<<(NUM_TASKS+1+X)) #define BIT_DONE(X) (1<<(NUM_TASKS+1+X))
@ -82,24 +88,32 @@ static void task_test_sync(void *param)
} }
int after_done = xEventGroupSetBits(eg, BIT_DONE(task_num)); int after_done = xEventGroupSetBits(eg, BIT_DONE(task_num));
printf("Done %d = %x\n", task_num, after_done); printf("Done %d = 0x%08x\n", task_num, after_done);
/* Delay is due to not-yet-fixed bug with deleting tasks at same time */ xSemaphoreGive(done_sem);
vTaskDelay(100 / portTICK_PERIOD_MS);
vTaskDelete(NULL); vTaskDelete(NULL);
} }
TEST_CASE("FreeRTOS Event Group Sync", "[freertos][ignore]") TEST_CASE("FreeRTOS Event Group Sync", "[freertos]")
{ {
eg = xEventGroupCreate(); eg = xEventGroupCreate();
done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0);
for (int c = 0; c < NUM_TASKS; c++) { for (int c = 0; c < NUM_TASKS; c++) {
xTaskCreatePinnedToCore(task_test_sync, "task_test_sync", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS); xTaskCreatePinnedToCore(task_test_sync, "task_test_sync", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS);
} }
for (int c = 0; c < NUM_TASKS; c++) { for (int c = 0; c < NUM_TASKS; c++) {
printf("Waiting on %d (%x)\n", c, BIT_DONE(c)); printf("Waiting on %d (0x%08x)\n", c, BIT_DONE(c));
xEventGroupWaitBits(eg, BIT_DONE(c), false, false, portMAX_DELAY); TEST_ASSERT( xEventGroupWaitBits(eg, BIT_DONE(c), false, false, portMAX_DELAY) );
} }
/* Ensure all tasks cleaned up correctly */
for (int c = 0; c < NUM_TASKS; c++) {
TEST_ASSERT( xSemaphoreTake(done_sem, 100/portTICK_PERIOD_MS) );
}
vSemaphoreDelete(done_sem);
vEventGroupDelete(eg);
} }