Merge branch 'bugfix/freertos_event_groups' into 'master'

FreeRTOS: Fix cross-core event group sync

As above

Also includes fixes which allowed removing some semi-hacky bits from the event group unit tests - specifically, higher priority tasks will always be started immediately even if they run on the opposite core.

See merge request !535
This commit is contained in:
Ivan Grokhotkov 2017-03-03 11:45:59 +08:00
commit ccbc6183c3
4 changed files with 120 additions and 79 deletions

View File

@ -119,13 +119,10 @@ typedef struct xEventGroupDefinition
UBaseType_t uxEventGroupNumber;
#endif
portMUX_TYPE eventGroupMux;
} EventGroup_t;
/* Again: one mux for all events. Maybe this can be made more granular. ToDo: look into that. -JD */
static portMUX_TYPE xEventGroupMux = portMUX_INITIALIZER_UNLOCKED;
/*-----------------------------------------------------------*/
/*
@ -156,6 +153,8 @@ EventGroup_t *pxEventBits;
traceEVENT_GROUP_CREATE_FAILED();
}
vPortCPUInitializeMutex(&pxEventBits->eventGroupMux);
return ( EventGroupHandle_t ) pxEventBits;
}
/*-----------------------------------------------------------*/
@ -176,6 +175,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
#endif
vTaskSuspendAll();
taskENTER_CRITICAL(&pxEventBits->eventGroupMux);
{
uxOriginalBitValue = pxEventBits->uxEventBits;
@ -217,6 +217,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
}
}
}
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
xAlreadyYielded = xTaskResumeAll();
if( xTicksToWait != ( TickType_t ) 0 )
@ -239,7 +240,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
/* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL( &xEventGroupMux );
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{
uxReturn = pxEventBits->uxEventBits;
@ -256,7 +257,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL( &xEventGroupMux );
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
xTimeoutOccurred = pdTRUE;
}
@ -295,6 +296,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
#endif
vTaskSuspendAll();
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
@ -361,6 +363,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
}
}
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
xAlreadyYielded = xTaskResumeAll();
if( xTicksToWait != ( TickType_t ) 0 )
@ -382,7 +385,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
taskENTER_CRITICAL( &xEventGroupMux );
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{
/* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits;
@ -405,7 +408,7 @@ BaseType_t xTimeoutOccurred = pdFALSE;
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL( &xEventGroupMux );
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
/* Prevent compiler warnings when trace macros are not used. */
xTimeoutOccurred = pdFALSE;
@ -434,7 +437,7 @@ EventBits_t uxReturn;
configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
taskENTER_CRITICAL( &xEventGroupMux );
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
@ -445,7 +448,7 @@ EventBits_t uxReturn;
/* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
taskEXIT_CRITICAL( &xEventGroupMux );
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
return uxReturn;
}
@ -498,7 +501,9 @@ BaseType_t xMatchFound = pdFALSE;
pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */
vTaskSuspendAll();
taskENTER_CRITICAL(&pxEventBits->eventGroupMux);
{
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
@ -570,6 +575,7 @@ BaseType_t xMatchFound = pdFALSE;
bit was set in the control word. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
taskEXIT_CRITICAL(&pxEventBits->eventGroupMux);
( void ) xTaskResumeAll();
return pxEventBits->uxEventBits;
@ -582,6 +588,7 @@ EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup;
const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
vTaskSuspendAll();
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
{
traceEVENT_GROUP_DELETE( xEventGroup );
@ -593,6 +600,7 @@ const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
( void ) xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
}
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
vPortFree( pxEventBits );
}
( void ) xTaskResumeAll();

View File

@ -1046,25 +1046,59 @@ UBaseType_t x;
}
/*-----------------------------------------------------------*/
static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, const BaseType_t xCoreID )
static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, BaseType_t xCoreID )
{
TCB_t *curTCB;
BaseType_t i;
TCB_t *curTCB, *tcb0, *tcb1;
/* Ensure interrupts don't access the task lists while the lists are being
updated. */
taskENTER_CRITICAL(&xTaskQueueMutex);
{
uxCurrentNumberOfTasks++;
//If the task has no affinity and nothing is scheduled on this core, just throw it this core.
//If it has affinity, throw it on the core that needs it if nothing is already scheduled there.
BaseType_t xMyCore = xCoreID;
if ( xMyCore == tskNO_AFFINITY) xMyCore = xPortGetCoreID();
if( pxCurrentTCB[ xMyCore ] == NULL )
// Determine which core this task starts on
if ( xCoreID == tskNO_AFFINITY )
{
if ( portNUM_PROCESSORS == 1 )
{
xCoreID = 0;
}
else
{
// if the task has no affinity, put it on either core if nothing is currently scheduled there. Failing that,
// put it on the core where it will preempt the lowest priority running task. If neither of these are true,
// queue it on the currently running core.
tcb0 = pxCurrentTCB[0];
tcb1 = pxCurrentTCB[1];
if ( tcb0 == NULL )
{
xCoreID = 0;
}
else if ( tcb1 == NULL )
{
xCoreID = 1;
}
else if ( tcb0->uxPriority < pxNewTCB->uxPriority && tcb0->uxPriority < tcb1->uxPriority )
{
xCoreID = 0;
}
else if ( tcb1->uxPriority < pxNewTCB->uxPriority )
{
xCoreID = 1;
}
else
{
xCoreID = xPortGetCoreID(); // Both CPU have higher priority tasks running on them, so this won't run yet
}
}
}
// If nothing is running on this core, put the new task there now
if( pxCurrentTCB[ xCoreID ] == NULL )
{
/* There are no other tasks, or all the other tasks are in
the suspended state - make this the current task. */
pxCurrentTCB[ xMyCore ] = pxNewTCB;
pxCurrentTCB[ xCoreID ] = pxNewTCB;
if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
{
@ -1090,19 +1124,11 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
so far. */
if( xSchedulerRunning == pdFALSE )
{
/* Scheduler isn't running yet. We need to determine on which CPU to run this task. */
for ( i=0; i<portNUM_PROCESSORS; i++ )
/* Scheduler isn't running yet. We need to determine on which CPU to run this task.
Schedule now if either nothing is scheduled yet or we can replace a task of lower prio. */
if ( pxCurrentTCB[xCoreID] == NULL || pxCurrentTCB[xCoreID]->uxPriority <= pxNewTCB->uxPriority )
{
/* Can we schedule this task on core i? */
if (xCoreID == tskNO_AFFINITY || xCoreID == i)
{
/* Schedule if nothing is scheduled yet, or overwrite a task of lower prio. */
if ( pxCurrentTCB[i] == NULL || pxCurrentTCB[i]->uxPriority <= pxNewTCB->uxPriority )
{
pxCurrentTCB[i] = pxNewTCB;
break;
}
}
pxCurrentTCB[xCoreID] = pxNewTCB;
}
}
else
@ -1131,30 +1157,20 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
if( xSchedulerRunning != pdFALSE )
{
taskENTER_CRITICAL(&xTaskQueueMutex);
curTCB = pxCurrentTCB[ xPortGetCoreID() ];
curTCB = pxCurrentTCB[ xCoreID ];
/* Scheduler is running. If the created task is of a higher priority than an executing task
then it should run now.
ToDo: This only works for the current core. If a task is scheduled on an other processor,
the other processor will keep running the task it's working on, and only switch to the newer
task on a timer interrupt. */
//No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
if( curTCB->uxPriority < pxNewTCB->uxPriority )
{
/* Scheduler is running. If the created task is of a higher priority than an executing task
then it should run now.
No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires.
*/
if( tskCAN_RUN_HERE( xCoreID ) && curTCB->uxPriority < pxNewTCB->uxPriority )
if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority )
{
if( xCoreID == xPortGetCoreID() )
{
taskYIELD_IF_USING_PREEMPTION_MUX(&xTaskQueueMutex);
}
else if( xCoreID != xPortGetCoreID() ) {
else {
taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
else
{

View File

@ -11,9 +11,10 @@
#define BIT_RESPONSE(TASK) (1 << (TASK+1))
#define ALL_RESPONSE_BITS (((1 << NUM_TASKS) - 1) << 1)
static const int NUM_TASKS = 4;
static const int COUNT = 4000;
static const int NUM_TASKS = 8;
static const int COUNT = 1000;
static EventGroupHandle_t eg;
static SemaphoreHandle_t done_sem;
static void task_event_group_call_response(void *param)
{
@ -24,47 +25,51 @@ static void task_event_group_call_response(void *param)
for (int i = 0; i < COUNT; i++) {
/* Wait until the common "call" bit is set, starts off all tasks
(clear on return) */
while (!xEventGroupWaitBits(eg, BIT_CALL, true, false, portMAX_DELAY)) {
}
TEST_ASSERT( xEventGroupWaitBits(eg, BIT_CALL, true, false, portMAX_DELAY) );
/* Set our individual "response" bit */
xEventGroupSetBits(eg, BIT_RESPONSE(task_num));
}
printf("Task %d done\n", task_num);
/* Delay is due to not-yet-fixed bug with deleting tasks at same time */
vTaskDelay(100 / portTICK_PERIOD_MS);
xSemaphoreGive(done_sem);
vTaskDelete(NULL);
}
TEST_CASE("FreeRTOS Event Groups", "[freertos][ignore]")
TEST_CASE("FreeRTOS Event Groups", "[freertos]")
{
eg = xEventGroupCreate();
done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0);
/* Note: task_event_group_call_response all have higher priority than us, so will block together.
/* Note: task_event_group_call_response all have higher priority than this task, so on this core
they will always preempt this task.
This is important because we need to know they'll all have blocked on BIT_CALL each time we
signal it, or they get out of sync.
This is important because we need to know all tasks have blocked on BIT_CALL each time we signal it,
or they get out of sync.
*/
for (int c = 0; c < NUM_TASKS; c++) {
xTaskCreatePinnedToCore(task_event_group_call_response, "tsk_call_resp", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS);
}
/* Scheduler weirdness, if we don't sleep a few ticks here then the tasks on the other CPU aren't running yet... */
vTaskDelay(10);
/* Tasks all start instantly, but this task will resume running at the same time as the higher priority tasks on the
other processor may still be setting up, so give a tick for them to also block on BIT_CALL... */
vTaskDelay(1);
for (int i = 0; i < COUNT; i++) {
if (i % 100 == 0) {
//printf("Call %d\n", i);
}
/* signal all tasks with "CALL" bit... */
xEventGroupSetBits(eg, BIT_CALL);
while (xEventGroupWaitBits(eg, ALL_RESPONSE_BITS, true, true, portMAX_DELAY) != ALL_RESPONSE_BITS) {
}
}
TEST_ASSERT_EQUAL_HEX16(ALL_RESPONSE_BITS, xEventGroupWaitBits(eg, ALL_RESPONSE_BITS, true, true, 100 / portMAX_DELAY));
}
/* Ensure all tasks cleaned up correctly */
for (int c = 0; c < NUM_TASKS; c++) {
TEST_ASSERT( xSemaphoreTake(done_sem, 100/portTICK_PERIOD_MS) );
}
vSemaphoreDelete(done_sem);
vEventGroupDelete(eg);
}
#define BIT_DONE(X) (1<<(NUM_TASKS+1+X))
@ -82,24 +87,32 @@ static void task_test_sync(void *param)
}
int after_done = xEventGroupSetBits(eg, BIT_DONE(task_num));
printf("Done %d = %x\n", task_num, after_done);
printf("Done %d = 0x%08x\n", task_num, after_done);
/* Delay is due to not-yet-fixed bug with deleting tasks at same time */
vTaskDelay(100 / portTICK_PERIOD_MS);
xSemaphoreGive(done_sem);
vTaskDelete(NULL);
}
TEST_CASE("FreeRTOS Event Group Sync", "[freertos][ignore]")
TEST_CASE("FreeRTOS Event Group Sync", "[freertos]")
{
eg = xEventGroupCreate();
done_sem = xSemaphoreCreateCounting(NUM_TASKS, 0);
for (int c = 0; c < NUM_TASKS; c++) {
xTaskCreatePinnedToCore(task_test_sync, "task_test_sync", 4096, (void *)c, configMAX_PRIORITIES - 1, NULL, c % portNUM_PROCESSORS);
}
for (int c = 0; c < NUM_TASKS; c++) {
printf("Waiting on %d (%x)\n", c, BIT_DONE(c));
xEventGroupWaitBits(eg, BIT_DONE(c), false, false, portMAX_DELAY);
}
printf("Waiting on %d (0x%08x)\n", c, BIT_DONE(c));
TEST_ASSERT( xEventGroupWaitBits(eg, BIT_DONE(c), false, false, portMAX_DELAY) );
}
/* Ensure all tasks cleaned up correctly */
for (int c = 0; c < NUM_TASKS; c++) {
TEST_ASSERT( xSemaphoreTake(done_sem, 100/portTICK_PERIOD_MS) );
}
vSemaphoreDelete(done_sem);
vEventGroupDelete(eg);
}

View File

@ -10,13 +10,17 @@
static void task_delete_self(void *param)
{
printf("Task %p running on core %d. Deleting shortly...\n", xTaskGetCurrentTaskHandle(), xPortGetCoreID());
vTaskDelay(5);
vTaskDelete(NULL);
}
TEST_CASE("FreeRTOS Delete Tasks", "[freertos][ignore]")
TEST_CASE("FreeRTOS Delete Tasks", "[freertos]")
{
uint32_t before_count = uxTaskGetNumberOfTasks();
xTaskCreatePinnedToCore(task_delete_self, "tsk_self_a", 4096, NULL, configMAX_PRIORITIES - 1, NULL, 0);
xTaskCreatePinnedToCore(task_delete_self, "tsk_self_a", 4096, NULL, configMAX_PRIORITIES - 1, NULL, 0);
TEST_ASSERT_EQUAL(before_count + 2, uxTaskGetNumberOfTasks());
vTaskDelay(200 / portTICK_PERIOD_MS);
printf("Done?\n");
TEST_ASSERT_EQUAL(before_count, uxTaskGetNumberOfTasks());
}