Merge branch 'feature/freertos_optimize_xtaskincrementtickothercores' into 'master'

feat(freertos): Optimized xTaskIncrementTickOtherCores()

Closes IDF-7905

See merge request espressif/esp-idf!28232
This commit is contained in:
Sudeep Mohanty 2024-01-08 16:07:49 +08:00
commit 5a5869a4a6
2 changed files with 55 additions and 33 deletions

View File

@ -6,7 +6,7 @@
*
* SPDX-License-Identifier: MIT
*
* SPDX-FileContributor: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileContributor: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
@ -3284,15 +3284,52 @@ BaseType_t xTaskIncrementTick( void )
* For SMP, since this function is only run on core
* 0, we only need to context switch if the unblocked
* task can run on core 0 and has a higher priority
* than the current task. */
if( ( taskIS_AFFINITY_COMPATIBLE( 0, pxTCB ) == pdTRUE ) && ( pxTCB->uxPriority > pxCurrentTCBs[ 0 ]->uxPriority ) )
* than the current task.
*
* If the unblocked task has affinity to the other
* core or no affinity then we need to set xYieldPending
* for the other core if the unblocked task has a priority
* higher than the priority of the currently running task
* on the other core. */
if( taskIS_AFFINITY_COMPATIBLE( 0, pxTCB ) == pdTRUE )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
if( pxTCB->uxPriority > pxCurrentTCBs[ 0 ]->uxPriority )
{
xSwitchRequired = pdTRUE;
}
#if ( configNUMBER_OF_CORES > 1 )
else if( pxTCB->xCoreID == tskNO_AFFINITY )
{
if( pxTCB->uxPriority > pxCurrentTCBs[ 1 ]->uxPriority )
{
xYieldPending[ 1 ] = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#if ( configNUMBER_OF_CORES > 1 )
else
{
if( pxTCB->uxPriority > pxCurrentTCBs[ 1 ]->uxPriority )
{
xYieldPending[ 1 ] = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
}
#endif /* configUSE_PREEMPTION */
}
@ -5440,7 +5477,7 @@ static void prvResetNextTaskUnblockTime( void )
/* Write the rest of the string. */
#if ( configTASKLIST_INCLUDE_COREID == 1 )
{
const BaseType_t xCoreID = ( pxTaskStatusArray[ x ].xCoreID == tskNO_AFFINITY ) ? -1 : pxTaskStatusArray[ x ].xCoreID ;
const BaseType_t xCoreID = ( pxTaskStatusArray[ x ].xCoreID == tskNO_AFFINITY ) ? -1 : pxTaskStatusArray[ x ].xCoreID;
sprintf( pcWriteBuffer, "\t%c\t%u\t%d\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( int ) xCoreID, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
}
#else /* configTASKLIST_INCLUDE_COREID == 1 */

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -87,9 +87,8 @@ _Static_assert( tskNO_AFFINITY == ( BaseType_t ) CONFIG_FREERTOS_NO_AFFINITY, "C
/* This function should never be called by Core 0. */
configASSERT( xCoreID != 0 );
/* Called by the portable layer each time a tick interrupt occurs.
* Increments the tick then checks to see if the new tick value will
* cause any tasks to be unblocked. */
/* Called by the portable layer each time a tick interrupt occurs
* on a core other than core 0. */
traceTASK_INCREMENT_TICK( xTickCount );
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) 0U )
@ -98,23 +97,6 @@ _Static_assert( tskNO_AFFINITY == ( BaseType_t ) CONFIG_FREERTOS_NO_AFFINITY, "C
* kernel data structures. */
taskENTER_CRITICAL_ISR( &xKernelLock );
/* A task being unblocked cannot cause an immediate context switch
* if preemption is turned off. */
#if ( configUSE_PREEMPTION == 1 )
{
/* Check if core 0 calling xTaskIncrementTick() has
* unblocked a task that can be run. */
if( uxTopReadyPriority > pxCurrentTCBs[ xCoreID ]->uxPriority )
{
xSwitchRequired = pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* if ( configUSE_PREEMPTION == 1 ) */
/* Tasks of equal priority to the currently running task will share
* processing time (time slice) if preemption is on, and the application
* writer has not explicitly turned time slicing off. */
@ -891,7 +873,8 @@ uint8_t * pxTaskGetStackStart( TaskHandle_t xTask )
*
* @note There are currently differing number of task list between SMP FreeRTOS and ESP-IDF FreeRTOS
*/
static List_t * non_ready_task_lists[] = {
static List_t * non_ready_task_lists[] =
{
#ifdef CONFIG_FREERTOS_SMP
&xPendingReadyList,
#else /* CONFIG_FREERTOS_SMP */
@ -1104,7 +1087,8 @@ UBaseType_t uxTaskGetSnapshotAll( TaskSnapshot_t * const pxTaskSnapshotArray,
pxCurTaskList = pxGetNextTaskList( pxCurTaskList );
}
if (pxTCBSize != NULL) {
if( pxTCBSize != NULL )
{
*pxTCBSize = sizeof( TCB_t );
}
@ -1151,7 +1135,8 @@ void * pvTaskGetCurrentTCBForCore( BaseType_t xCoreID )
ESP_FREERTOS_DEBUG_TABLE_END,
};
const DRAM_ATTR uint8_t FreeRTOS_openocd_params[ ESP_FREERTOS_DEBUG_TABLE_END ] = {
const DRAM_ATTR uint8_t FreeRTOS_openocd_params[ ESP_FREERTOS_DEBUG_TABLE_END ] =
{
ESP_FREERTOS_DEBUG_TABLE_END, /* table size */
1, /* table version */
tskKERNEL_VERSION_MAJOR,