From 20212ee8231476e2030944cb29d269620f701401 Mon Sep 17 00:00:00 2001 From: Angus Gratton Date: Tue, 28 Feb 2017 12:06:36 +1100 Subject: [PATCH] freertos: Fix cross-core usage of event groups Fixes & re-enabled broken unit tests Adds per-event-group spinlock instead of single global lock --- components/freertos/event_groups.c | 32 ++++++++----- .../freertos/test/test_freertos_eventgroups.c | 46 ++++++++++++------- 2 files changed, 50 insertions(+), 28 deletions(-) diff --git a/components/freertos/event_groups.c b/components/freertos/event_groups.c index 902a4ad72a..69903817f4 100644 --- a/components/freertos/event_groups.c +++ b/components/freertos/event_groups.c @@ -119,13 +119,10 @@ typedef struct xEventGroupDefinition UBaseType_t uxEventGroupNumber; #endif + portMUX_TYPE eventGroupMux; } EventGroup_t; -/* Again: one mux for all events. Maybe this can be made more granular. ToDo: look into that. -JD */ -static portMUX_TYPE xEventGroupMux = portMUX_INITIALIZER_UNLOCKED; - - /*-----------------------------------------------------------*/ /* @@ -156,6 +153,8 @@ EventGroup_t *pxEventBits; traceEVENT_GROUP_CREATE_FAILED(); } + vPortCPUInitializeMutex(&pxEventBits->eventGroupMux); + return ( EventGroupHandle_t ) pxEventBits; } /*-----------------------------------------------------------*/ @@ -176,6 +175,7 @@ BaseType_t xTimeoutOccurred = pdFALSE; #endif vTaskSuspendAll(); + taskENTER_CRITICAL(&pxEventBits->eventGroupMux); { uxOriginalBitValue = pxEventBits->uxEventBits; @@ -217,6 +217,7 @@ BaseType_t xTimeoutOccurred = pdFALSE; } } } + taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); xAlreadyYielded = xTaskResumeAll(); if( xTicksToWait != ( TickType_t ) 0 ) @@ -239,7 +240,7 @@ BaseType_t xTimeoutOccurred = pdFALSE; if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { /* The task timed out, just return the current event bit value. */ - taskENTER_CRITICAL( &xEventGroupMux ); + taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); { uxReturn = pxEventBits->uxEventBits; @@ -256,7 +257,7 @@ BaseType_t xTimeoutOccurred = pdFALSE; mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL( &xEventGroupMux ); + taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); xTimeoutOccurred = pdTRUE; } @@ -295,6 +296,7 @@ BaseType_t xTimeoutOccurred = pdFALSE; #endif vTaskSuspendAll(); + taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); { const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; @@ -361,6 +363,7 @@ BaseType_t xTimeoutOccurred = pdFALSE; traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); } } + taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); xAlreadyYielded = xTaskResumeAll(); if( xTicksToWait != ( TickType_t ) 0 ) @@ -382,7 +385,7 @@ BaseType_t xTimeoutOccurred = pdFALSE; if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { - taskENTER_CRITICAL( &xEventGroupMux ); + taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); { /* The task timed out, just return the current event bit value. */ uxReturn = pxEventBits->uxEventBits; @@ -405,7 +408,7 @@ BaseType_t xTimeoutOccurred = pdFALSE; mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL( &xEventGroupMux ); + taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); /* Prevent compiler warnings when trace macros are not used. */ xTimeoutOccurred = pdFALSE; @@ -434,7 +437,7 @@ EventBits_t uxReturn; configASSERT( xEventGroup ); configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); - taskENTER_CRITICAL( &xEventGroupMux ); + taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); { traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); @@ -445,7 +448,7 @@ EventBits_t uxReturn; /* Clear the bits. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } - taskEXIT_CRITICAL( &xEventGroupMux ); + taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); return uxReturn; } @@ -498,7 +501,9 @@ BaseType_t xMatchFound = pdFALSE; pxList = &( pxEventBits->xTasksWaitingForBits ); pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + vTaskSuspendAll(); + taskENTER_CRITICAL(&pxEventBits->eventGroupMux); { traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); @@ -570,6 +575,7 @@ BaseType_t xMatchFound = pdFALSE; bit was set in the control word. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } + taskEXIT_CRITICAL(&pxEventBits->eventGroupMux); ( void ) xTaskResumeAll(); return pxEventBits->uxEventBits; @@ -578,10 +584,11 @@ BaseType_t xMatchFound = pdFALSE; void vEventGroupDelete( EventGroupHandle_t xEventGroup ) { -EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup; -const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); + EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup; + const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); vTaskSuspendAll(); + taskENTER_CRITICAL( &pxEventBits->eventGroupMux ); { traceEVENT_GROUP_DELETE( xEventGroup ); @@ -593,6 +600,7 @@ const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); ( void ) xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); } + taskEXIT_CRITICAL( &pxEventBits->eventGroupMux ); vPortFree( pxEventBits ); } ( void ) xTaskResumeAll(); diff --git a/components/freertos/test/test_freertos_eventgroups.c b/components/freertos/test/test_freertos_eventgroups.c index 32dee2d201..dce770af53 100644 --- a/components/freertos/test/test_freertos_eventgroups.c +++ b/components/freertos/test/test_freertos_eventgroups.c @@ -11,9 +11,10 @@ #define BIT_RESPONSE(TASK) (1 << (TASK+1)) #define ALL_RESPONSE_BITS (((1 << NUM_TASKS) - 1) << 1) -static const int NUM_TASKS = 4; -static const int COUNT = 4000; +static const int NUM_TASKS = 8; +static const int COUNT = 1000; static EventGroupHandle_t eg; +static SemaphoreHandle_t done_sem; static void task_event_group_call_response(void *param) { @@ -32,15 +33,14 @@ static void task_event_group_call_response(void *param) } printf("Task %d done\n", task_num); - - /* Delay is due to not-yet-fixed bug with deleting tasks at same time */ - vTaskDelay(100 / portTICK_PERIOD_MS); + xSemaphoreGive(done_sem); vTaskDelete(NULL); } -TEST_CASE("FreeRTOS Event Groups", "[freertos][ignore]") +TEST_CASE("FreeRTOS Event Groups", "[freertos]") { eg = xEventGroupCreate(); + done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0); /* Note: task_event_group_call_response all have higher priority than us, so will block together. @@ -50,7 +50,7 @@ TEST_CASE("FreeRTOS Event Groups", "[freertos][ignore]") for (int c = 0; c < NUM_TASKS; c++) { xTaskCreatePinnedToCore(task_event_group_call_response, "tsk_call_resp", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS); } - /* Scheduler weirdness, if we don't sleep a few ticks here then the tasks on the other CPU aren't running yet... */ + /* Scheduler weirdness (bug?), if we don't sleep a few ticks here then the tasks on the other CPU aren't running yet... */ vTaskDelay(10); for (int i = 0; i < COUNT; i++) { @@ -60,11 +60,17 @@ TEST_CASE("FreeRTOS Event Groups", "[freertos][ignore]") /* signal all tasks with "CALL" bit... */ xEventGroupSetBits(eg, BIT_CALL); - while (xEventGroupWaitBits(eg, ALL_RESPONSE_BITS, true, true, portMAX_DELAY) != ALL_RESPONSE_BITS) { - } + TEST_ASSERT_EQUAL(ALL_RESPONSE_BITS, xEventGroupWaitBits(eg, ALL_RESPONSE_BITS, true, true, 100 / portMAX_DELAY)); } -} + /* Ensure all tasks cleaned up correctly */ + for (int c = 0; c < NUM_TASKS; c++) { + TEST_ASSERT( xSemaphoreTake(done_sem, 100/portTICK_PERIOD_MS) ); + } + + vSemaphoreDelete(done_sem); + vEventGroupDelete(eg); +} #define BIT_DONE(X) (1<<(NUM_TASKS+1+X)) @@ -82,24 +88,32 @@ static void task_test_sync(void *param) } int after_done = xEventGroupSetBits(eg, BIT_DONE(task_num)); - printf("Done %d = %x\n", task_num, after_done); + printf("Done %d = 0x%08x\n", task_num, after_done); - /* Delay is due to not-yet-fixed bug with deleting tasks at same time */ - vTaskDelay(100 / portTICK_PERIOD_MS); + xSemaphoreGive(done_sem); vTaskDelete(NULL); } -TEST_CASE("FreeRTOS Event Group Sync", "[freertos][ignore]") +TEST_CASE("FreeRTOS Event Group Sync", "[freertos]") { eg = xEventGroupCreate(); + done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0); for (int c = 0; c < NUM_TASKS; c++) { xTaskCreatePinnedToCore(task_test_sync, "task_test_sync", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS); } for (int c = 0; c < NUM_TASKS; c++) { - printf("Waiting on %d (%x)\n", c, BIT_DONE(c)); - xEventGroupWaitBits(eg, BIT_DONE(c), false, false, portMAX_DELAY); + printf("Waiting on %d (0x%08x)\n", c, BIT_DONE(c)); + TEST_ASSERT( xEventGroupWaitBits(eg, BIT_DONE(c), false, false, portMAX_DELAY) ); } + + /* Ensure all tasks cleaned up correctly */ + for (int c = 0; c < NUM_TASKS; c++) { + TEST_ASSERT( xSemaphoreTake(done_sem, 100/portTICK_PERIOD_MS) ); + } + + vSemaphoreDelete(done_sem); + vEventGroupDelete(eg); }