diff --git a/components/esp_gdbstub/src/port/riscv/gdbstub_riscv.c b/components/esp_gdbstub/src/port/riscv/gdbstub_riscv.c index 7a405d6edb..9e2d1b7846 100644 --- a/components/esp_gdbstub/src/port/riscv/gdbstub_riscv.c +++ b/components/esp_gdbstub/src/port/riscv/gdbstub_riscv.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -85,8 +85,15 @@ void esp_gdbstub_int(__attribute__((unused)) void *frame) /* Pointer to saved frame is in pxCurrentTCB * See rtos_int_enter function */ - extern void *pxCurrentTCB; - dummy_tcb_t *tcb = pxCurrentTCB; + /* Todo: Provide IDF interface for getting pxCurrentTCB (IDF-8182) */ + int core_id = esp_cpu_get_core_id(); +#if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + extern void **pxCurrentTCBs; + dummy_tcb_t *tcb = pxCurrentTCBs[core_id]; +#else + extern void **pxCurrentTCB; + dummy_tcb_t *tcb = pxCurrentTCB[core_id]; +#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ gdbstub_handle_uart_int((esp_gdbstub_frame_t *)tcb->top_of_stack); } diff --git a/components/freertos/CMakeLists.txt b/components/freertos/CMakeLists.txt index 9ac34ce96a..39a9634432 100644 --- a/components/freertos/CMakeLists.txt +++ b/components/freertos/CMakeLists.txt @@ -32,7 +32,7 @@ if(CONFIG_FREERTOS_SMP) set(kernel_impl "FreeRTOS-Kernel-SMP") else() if(CONFIG_FREERTOS_USE_KERNEL_10_5_1) - message(FATAL_ERROR "FreeRTOS v10.5.1 is not buildable yet. Still under development") + set(kernel_impl "FreeRTOS-Kernel-V10.5.1") else() set(kernel_impl "FreeRTOS-Kernel") endif() @@ -71,30 +71,54 @@ list(APPEND srcs "${kernel_impl}/queue.c" "${kernel_impl}/tasks.c" "${kernel_impl}/timers.c" - "${kernel_impl}/croutine.c" "${kernel_impl}/event_groups.c" "${kernel_impl}/stream_buffer.c") +if(NOT CONFIG_FREERTOS_USE_KERNEL_10_5_1) + list(APPEND srcs "${kernel_impl}/croutine.c") +endif() # Add port source files -list(APPEND srcs - "${kernel_impl}/portable/${arch}/port.c") - -if(arch STREQUAL "linux") +if(CONFIG_FREERTOS_USE_KERNEL_10_5_1) list(APPEND srcs - "${kernel_impl}/portable/${arch}/utils/wait_for_event.c") - if(kernel_impl STREQUAL "FreeRTOS-Kernel") - list(APPEND srcs - "${kernel_impl}/portable/${arch}/port_idf.c") - endif() + "FreeRTOS-Kernel/portable/${arch}/port.c") else() list(APPEND srcs - "${kernel_impl}/portable/${arch}/portasm.S") + "${kernel_impl}/portable/${arch}/port.c") +endif() + +if(arch STREQUAL "linux") + if(CONFIG_FREERTOS_USE_KERNEL_10_5_1) + list(APPEND srcs + "FreeRTOS-Kernel/portable/${arch}/utils/wait_for_event.c" + "FreeRTOS-Kernel/portable/${arch}/port_idf.c") + else() + list(APPEND srcs + "${kernel_impl}/portable/${arch}/utils/wait_for_event.c") + if(kernel_impl STREQUAL "FreeRTOS-Kernel") + list(APPEND srcs + "${kernel_impl}/portable/${arch}/port_idf.c") + endif() + endif() +else() + if(CONFIG_FREERTOS_USE_KERNEL_10_5_1) + list(APPEND srcs + "FreeRTOS-Kernel/portable/${arch}/portasm.S") + else() + list(APPEND srcs + "${kernel_impl}/portable/${arch}/portasm.S") + endif() endif() if(arch STREQUAL "xtensa") - list(APPEND srcs - "${kernel_impl}/portable/${arch}/xtensa_init.c" - "${kernel_impl}/portable/${arch}/xtensa_overlay_os_hook.c") + if(CONFIG_FREERTOS_USE_KERNEL_10_5_1) + list(APPEND srcs + "FreeRTOS-Kernel/portable/${arch}/xtensa_init.c" + "FreeRTOS-Kernel/portable/${arch}/xtensa_overlay_os_hook.c") + else() + list(APPEND srcs + "${kernel_impl}/portable/${arch}/xtensa_init.c" + "${kernel_impl}/portable/${arch}/xtensa_overlay_os_hook.c") + endif() endif() # Add ESP-additions source files @@ -127,9 +151,15 @@ list(APPEND include_dirs "${kernel_impl}/include") # FreeRTOS headers via `#include "freertos/xxx.h"` # Add port public include directories -list(APPEND include_dirs - "${kernel_impl}/portable/${arch}/include" # For port headers via `#include "freertos/...h"` - "${kernel_impl}/portable/${arch}/include/freertos") # For port headers via `#include "...h"` +if(CONFIG_FREERTOS_USE_KERNEL_10_5_1) + list(APPEND include_dirs + "FreeRTOS-Kernel/portable/${arch}/include" # For port headers via `#include "freertos/...h"` + "FreeRTOS-Kernel/portable/${arch}/include/freertos") # For port headers via `#include "...h"` +else() + list(APPEND include_dirs + "${kernel_impl}/portable/${arch}/include" # For port headers via `#include "freertos/...h"` + "${kernel_impl}/portable/${arch}/include/freertos") # For port headers via `#include "...h"` +endif() # Add ESP-additions public include directories list(APPEND include_dirs @@ -151,8 +181,13 @@ list(APPEND private_include_dirs # Add port private include directories if(arch STREQUAL "linux") - list(APPEND private_include_dirs - "${kernel_impl}/portable/${arch}/") # Linux port `#include "utils/wait_for_event.h"` + if(CONFIG_FREERTOS_USE_KERNEL_10_5_1) + list(APPEND private_include_dirs + "FreeRTOS-Kernel/portable/${arch}/") # Linux port `#include "utils/wait_for_event.h"` + else() + list(APPEND private_include_dirs + "${kernel_impl}/portable/${arch}/") # Linux port `#include "utils/wait_for_event.h"` + endif() endif() # Add ESP-additions private include directories diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/event_groups.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/event_groups.c index 6e4e23ea66..4ad031aefd 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/event_groups.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/event_groups.c @@ -43,6 +43,8 @@ #include "task.h" #include "timers.h" #include "event_groups.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" /* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined @@ -77,6 +79,8 @@ typedef struct EventGroupDef_t #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ #endif + + portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */ } EventGroup_t; /*-----------------------------------------------------------*/ @@ -131,6 +135,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + /* Initialize the event group's spinlock. */ + portMUX_INITIALIZE( &pxEventBits->xEventGroupLock ); + traceEVENT_GROUP_CREATE( pxEventBits ); } else @@ -182,6 +189,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits, } #endif /* configSUPPORT_STATIC_ALLOCATION */ + /* Initialize the event group's spinlock. */ + portMUX_INITIALIZE( &pxEventBits->xEventGroupLock ); + traceEVENT_GROUP_CREATE( pxEventBits ); } else @@ -213,7 +223,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, } #endif - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); { uxOriginalBitValue = pxEventBits->uxEventBits; @@ -256,7 +266,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, } } } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -278,7 +288,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { /* The task timed out, just return the current event bit value. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); { uxReturn = pxEventBits->uxEventBits; @@ -295,7 +305,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); xTimeoutOccurred = pdTRUE; } @@ -340,7 +350,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, } #endif - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); { const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; @@ -408,7 +418,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor ); } } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); if( xTicksToWait != ( TickType_t ) 0 ) { @@ -429,7 +439,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); { /* The task timed out, just return the current event bit value. */ uxReturn = pxEventBits->uxEventBits; @@ -454,7 +464,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, xTimeoutOccurred = pdTRUE; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); } else { @@ -485,7 +495,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, configASSERT( xEventGroup ); configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) ); { traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear ); @@ -496,7 +506,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, /* Clear the bits. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); return uxReturn; } @@ -552,7 +562,14 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, pxList = &( pxEventBits->xTasksWaitingForBits ); pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ - vTaskSuspendAll(); + + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); + #if ( configNUMBER_OF_CORES > 1 ) + + /* We are about to traverse a task list which is a kernel data structure. + * Thus we need to call prvTakeKernelLock() to take the kernel lock. */ + prvTakeKernelLock(); + #endif /* configNUMBER_OF_CORES > 1 */ { traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); @@ -624,7 +641,12 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, * bit was set in the control word. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } - ( void ) xTaskResumeAll(); + #if ( configNUMBER_OF_CORES > 1 ) + /* Release the previously taken kernel lock. */ + prvReleaseKernelLock(); + #endif /* configNUMBER_OF_CORES > 1 */ + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); + return pxEventBits->uxEventBits; } @@ -639,7 +661,13 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits ); - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) ); + #if ( configNUMBER_OF_CORES > 1 ) + + /* We are about to traverse a task list which is a kernel data structure. + * Thus we need to call prvTakeKernelLock() to take the kernel lock. */ + prvTakeKernelLock(); + #endif /* configNUMBER_OF_CORES > 1 */ { traceEVENT_GROUP_DELETE( xEventGroup ); @@ -651,7 +679,11 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET ); } } - ( void ) xTaskResumeAll(); + #if ( configNUMBER_OF_CORES > 1 ) + /* Release the previously taken kernel lock. */ + prvReleaseKernelLock(); + #endif /* configNUMBER_OF_CORES > 1 */ + prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) ); #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) { diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/idf_changes.md b/components/freertos/FreeRTOS-Kernel-V10.5.1/idf_changes.md index 2187fd068f..cbaf5d6e00 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/idf_changes.md +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/idf_changes.md @@ -2,6 +2,8 @@ This document is used to track all changes made the to FreeRTOS V10.5.1 source code when adding dual core SMP support or IDF additional features. +Todo: Add these to ESP-IDF docs once v10.5.1 becomes default kernel (IDF-8203) + ## License Headers - Added `SPDX-FileCopyrightText` and `SPDX-FileContributor` tags to all files to pass ESP-IDF pre-commit checks. @@ -13,4 +15,131 @@ This document is used to track all changes made the to FreeRTOS V10.5.1 source c ## Changes from Upstream Main Branch not Included in v10.5.1 -- Added ...GetStaticBuffers functions that were upstreamed but not included in v10.5.1 \ No newline at end of file +- Added ...GetStaticBuffers functions that were upstreamed but not included in v10.5.1 + +## Kernel SMP Changes + +List of changes to the Vanilla FreeRTOS V10.5.1 kernel in order to support dual-core SMP + +### Scheduling Behavior Changes + +- The kernel now executes two tasks concurrently +- The kernel now creates two IDLE tasks (pinned to each core) +- Tasks can be pinned to either core, or have no affinity (can run on both cores) +- Each core receives a tick interrupt, but only core 0 increments the tick count and unblocks timed out tasks + - Core 0 calls `xTaskIncrementTick()` + - Core 1 calls `xTaskIncrementTickOtherCores()` +- Each core independently calls `vTaskSwitchContext()` to pick the highest priority task it can currently run + - In single-core scheduling algorithm `taskSELECT_HIGHEST_PRIORITY_TASK()` unchanged + - In SMP, `prvSelectHighestPriorityTaskSMP()` is called. This will select the highest priority ready state task that... + - Has a compatible core affinity + - Is not being run by another core +- Each core can suspend scheduling independently (i.e., `vTaskSuspendAll()`) + +### Configuration + +Following configurations have been added + +- Added `configNUMBER_OF_CORES` to specify the number of cores to build. Can be `1` for vanilla, or `2` for SMP, error otherwise +- Disable `configUSE_PORT_OPTIMISED_TASK_SELECTION` for SMP + +### Data Structure Changes (`tasks.c`) + +The following data fields have been expanded to have `configNUMBER_OF_CORES` copies: + +- `pxCurrentTCBs`: Each core now has its own currently running task +- `xPendingReadyList`: Each core has its own list to pend ready tasks if the scheduler is suspended on the core +- `xYieldPending`: Each core has its own flag to track whether it has a pending yield +- `xIdleTaskHandle`: Each core now has its own idle task +- `uxSchedulerSuspended`: Each core can independently suspend scheduling on its core +- `ulTaskSwitchedInTime`: Each core tracks its own "task switched in" time + +Their access is now indexed by a `xCoreID` if in SMP, or set to `0` in single core. + +The following data structures have been added: + +- `TCB_t.xCoreID`: All tasks now store their core affinity in a TCB member. Always set to 0 in single-core + +### API Additions + +The following APIs have been added to support SMP + +- `xTaskCreatePinnedToCore()` and `xTaskCreateStaticPinnedToCore()` to create tasks with a core affinity + - In single-core, core affinity is ignored. Same behavior as `xTaskCreate()` +- `xTaskGetCoreID()` to get a task's affinity +- Add `ForCore()` versions of the following API + - `xTaskGetIdleTaskHandleForCore()` + - `xTaskGetCurrentTaskHandleForCore()` + - `ulTaskGetIdleRunTimeCounterForCore()` + +### API Modifications + +Added the following macros that abstract away single-core and SMP differences: + +- `taskYIELD_CORE()` triggers a particular core to yield +- `taskIS_YIELD_REQUIRED()`/`taskIS_YIELD_REQUIRED_USING_PRIORITY()` check if current core requires a yield after a task is unblocked +- `taskIS_AFFINITY_COMPATIBLE()` check if a task has compatible affinity +- `taskIS_CURRENTLY_RUNNING()`/`taskIS_CURRENTLY_RUNNING_ON_CORE()` checks if a task is running on either core +- `taskCAN_BE_SCHEDULED()` checks if an unblocked task can be scheduled on any core +- `taskIS_SCHEDULER_SUSPENDED()` checks if the scheduler on the current core is suspended +- `taskSELECT_HIGHEST_PRIORITY_TASK()` selects the highest priority task to execute for the current core +- `prvGetTCBFromHandle()` updated in SMP to call `xTaskGetCurrentTaskHandle()` when the handle is `NULL`. Done so for thread safety (in case the current task switches cores at the same time). + +The following functions were modified to accommodate SMP behavior: + +- `prvInitialiseNewTask()` + - Added `xCoreID` argument to pin task on creation + - For single-core, `xCoreID` is hard coded to `0` +- `prvAddNewTaskToReadyList()` + - Checks if new task can be scheduled on core 1 +- `vTaskDelete()` + - Checks if the deleted task is currently running on the other core. + - If so, sends a yield to the other core. +- `vTaskPrioritySet()` + - Checks if the task is currently running on the both cores, and yields the appropriate core if so +- `vTaskSuspend()` + - Checks if the task is currently running on the other core, and yields the other core if so. +- `prvTaskIsTaskSuspended()` + - Checks the `xPendingReadyList` of both cores to see if a task is suspended +- `xTaskResumeAll()` + - Limit catching up of tick counts to core 0 (given only core 0 calls `xTaskIncrementTick()`) +- `xTaskIncrementTick()` + - Limited to core 0 +- `vTaskSwitchContext()` + - Switches context for current core +- `xTaskRemoveFromEventList()` + - Created SMP copy of the function + - Checks if `pxEventList` has already been emptied by the other core before removing + - Checks if task can be scheduled on both cores, adds it to the appropriate core's pending list if it can't be scheduled. +- `vTaskRemoveFromUnorderedEventList()` + - In SMP, check if the task can be scheduled before adding it to the appropriate list. Whereas in single-core, the scheduler is always suspended thus the unblocked task always goes onto the pending ready list. +- `eTaskConfirmSleepModeStatus()` + - Updated logic to determine whether sleep is possible in SMP by checking the status of both cores. +- `prvCheckTasksWaitingTermination()` + - Updated logic so that we don't delete tasks on `xTasksWaitingTermination` which are still currently running on the other core. +- `prvAddCurrentTaskToDelayedList()` + - Added extra check to see if current blocking task has already been deleted by the other core. + +### Critical Section Changes + +- Granular Locks: The following objects are now given their own spinlocks + - Kernel objects (i.e., `tasks.c`): `xKernelLock` + - Each queue: `xQueueLock` + - Queue Registry: `xQueueRegistryLock` + - Each event group: `xEventGroupLock` + - Each stream buffer: `xStreamBufferLock` + - All timers: `xTimerLock` +- Critical sections now target the appropriate spinlocks +- Added missing critical sections for SMP (see `..._SMP_ONLY()` critical section calls) +- Queues no longer use queue locks (see `queueUSE_LOCKS`) + - Queues now just use critical sections and skips queue locking + - Queue functions can now execute within a single critical section block + +## Single Core Differences + +List of differences between Vanilla FreeRTOS V10.5.1 and building the dual-core SMP kernel with `congigNUMBER_OF_CORES == 1`. + +- `prvAddNewTaskToReadyList()` + - Extended critical section so that SMP can check for yields while still inside critical section +- `vTaskStepTick()` + - Extended critical section so that SMP can access `xTickCount` while still inside critical section diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/FreeRTOS.h b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/FreeRTOS.h index 5fca962bb7..dd8a67905a 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/FreeRTOS.h +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/FreeRTOS.h @@ -135,6 +135,24 @@ * within FreeRTOSConfig.h. */ +#ifndef configNUMBER_OF_CORES + #error Missing definition: configNUMBER_OF_CORES must be defined in FreeRTOSConfig.h +#endif + +#if ( configNUMBER_OF_CORES > 1 ) + #ifndef portGET_CORE_ID + #error "Missing definition: portGET_CORE_ID() must be defined if in portmacro.h if configNUMBER_OF_CORES > 1" + #endif + #ifndef portYIELD_CORE + #error "Missing definition: portYIELD_CORE() must be defined if in portmacro.h if configNUMBER_OF_CORES > 1" + #endif +#elif ( configNUMBER_OF_CORES == 1 ) + #undef portGET_CORE_ID + #define portGET_CORE_ID() 0 +#else + #error configNUMBER_OF_CORES must be defined to either 1 or > 1. +#endif /* if ( configNUMBER_OF_CORES > 1 ) */ + #ifndef configMINIMAL_STACK_SIZE #error Missing definition: configMINIMAL_STACK_SIZE must be defined in FreeRTOSConfig.h. configMINIMAL_STACK_SIZE defines the size (in words) of the stack allocated to the idle task. Refer to the demo project provided for your port for a suitable value. #endif @@ -1007,6 +1025,10 @@ #error configUSE_MUTEXES must be set to 1 to use recursive mutexes #endif +#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PORT_OPTIMISED_TASK_SELECTION != 0 ) ) + #error configUSE_PORT_OPTIMISED_TASK_SELECTION is not supported in SMP +#endif + #ifndef configINITIAL_TICK_COUNT #define configINITIAL_TICK_COUNT 0 #endif @@ -1267,6 +1289,8 @@ typedef struct xSTATIC_TCB UBaseType_t uxDummy5; void * pxDummy6; uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; + /* Todo: Remove xCoreID for single core builds (IDF-7894) */ + BaseType_t xDummyCoreID; #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) void * pxDummy8; #endif @@ -1347,6 +1371,7 @@ typedef struct xSTATIC_QUEUE UBaseType_t uxDummy8; uint8_t ucDummy9; #endif + portMUX_TYPE xDummyQueueLock; } StaticQueue_t; typedef StaticQueue_t StaticSemaphore_t; @@ -1376,6 +1401,7 @@ typedef struct xSTATIC_EVENT_GROUP #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucDummy4; #endif + portMUX_TYPE xDummyEventGroupLock; } StaticEventGroup_t; /* @@ -1430,6 +1456,7 @@ typedef struct xSTATIC_STREAM_BUFFER #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) void * pvDummy5[ 2 ]; #endif + portMUX_TYPE xDummyStreamBufferLock; } StaticStreamBuffer_t; /* Message buffers are built on stream buffers. */ @@ -1441,4 +1468,69 @@ typedef StaticStreamBuffer_t StaticMessageBuffer_t; #endif /* *INDENT-ON* */ +/*----------------------------------------------------------- +* IDF Compatibility +*----------------------------------------------------------*/ + +#ifdef ESP_PLATFORM + +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + +/* + * Default values for trace macros added by ESP-IDF and are not part of Vanilla FreeRTOS + */ + + #ifndef traceISR_EXIT_TO_SCHEDULER + #define traceISR_EXIT_TO_SCHEDULER() + #endif + + #ifndef traceISR_EXIT + #define traceISR_EXIT() + #endif + + #ifndef traceISR_ENTER + #define traceISR_ENTER( _n_ ) + #endif + + #ifndef traceQUEUE_SEMAPHORE_RECEIVE + #define traceQUEUE_SEMAPHORE_RECEIVE( pxQueue ) + #endif + + #ifndef traceQUEUE_GIVE_FROM_ISR + #define traceQUEUE_GIVE_FROM_ISR( pxQueue ) + #endif + + #ifndef traceQUEUE_GIVE_FROM_ISR_FAILED + #define traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue ) + #endif + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + +/* + * Include ESP-IDF API additions implicitly for compatibility reasons. + * + * ESP-IDF API additions were previously added directly to FreeRTOS headers + * (e.g., task.h, queue.h). These APIs have now been moved to + * idf_additions.h. + * + * To ensure there are no breaking changes, we include idf_additions.h + * implicitly here so that those API additions are still accessible. Given + * that FreeRTOS.h must be included first before calling any FreeRTOS API, + * any existing source code can continue using these relocated APIs without + * any additional header inclusions via this implicit inclusion. + * + * Todo: Deprecate this implicit inclusion by ESP-IDF v6.0 (IDF-8126) + */ + #include "freertos/idf_additions.h" + +#endif /* ESP_PLATFORM */ + #endif /* INC_FREERTOS_H */ diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/projdefs.h b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/projdefs.h index c234d4f2a4..eac820910b 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/projdefs.h +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/projdefs.h @@ -46,6 +46,15 @@ typedef void (* TaskFunction_t)( void * ); #define pdMS_TO_TICKS( xTimeInMs ) ( ( TickType_t ) ( ( ( TickType_t ) ( xTimeInMs ) * ( TickType_t ) configTICK_RATE_HZ ) / ( TickType_t ) 1000U ) ) #endif +/* Converts a time in ticks to milliseconds. This macro can be + * overridden by a macro of the same name defined in FreeRTOSConfig.h in case the + * definition here is not suitable for your application. + * + * Todo: Upstream this macro (IDF-8181) */ +#ifndef pdTICKS_TO_MS + #define pdTICKS_TO_MS( xTicks ) ( ( TickType_t ) ( ( uint64_t ) ( xTicks ) * 1000 / configTICK_RATE_HZ ) ) +#endif + #define pdFALSE ( ( BaseType_t ) 0 ) #define pdTRUE ( ( BaseType_t ) 1 ) diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/stack_macros.h b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/stack_macros.h index ea8d23cb59..741b8ad76a 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/stack_macros.h +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/stack_macros.h @@ -60,13 +60,13 @@ #if ( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) ) /* Only the current stack state is to be checked. */ - #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ - /* Is the currently saved stack pointer within the stack limit? */ \ - if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack + portSTACK_LIMIT_PADDING ) \ - { \ - vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ - } \ + #define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ) \ + { \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCBs[ xCurCoreID ]->pxTopOfStack <= pxCurrentTCBs[ xCurCoreID ]->pxStack + portSTACK_LIMIT_PADDING ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCBs[ xCurCoreID ], pxCurrentTCBs[ xCurCoreID ]->pcTaskName ); \ + } \ } #endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ @@ -75,14 +75,14 @@ #if ( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) ) /* Only the current stack state is to be checked. */ - #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ - \ - /* Is the currently saved stack pointer within the stack limit? */ \ - if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack - portSTACK_LIMIT_PADDING ) \ - { \ - vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ - } \ + #define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ) \ + { \ + \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCBs[ xCurCoreID ]->pxTopOfStack >= pxCurrentTCBs[ xCurCoreID ]->pxEndOfStack - portSTACK_LIMIT_PADDING ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCBs[ xCurCoreID ], pxCurrentTCBs[ xCurCoreID ]->pcTaskName ); \ + } \ } #endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ @@ -90,18 +90,18 @@ #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) - #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ - const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ - const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ - \ - if( ( pulStack[ 0 ] != ulCheckValue ) || \ - ( pulStack[ 1 ] != ulCheckValue ) || \ - ( pulStack[ 2 ] != ulCheckValue ) || \ - ( pulStack[ 3 ] != ulCheckValue ) ) \ - { \ - vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ - } \ + #define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ) \ + { \ + const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCBs[ xCurCoreID ]->pxStack; \ + const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ + \ + if( ( pulStack[ 0 ] != ulCheckValue ) || \ + ( pulStack[ 1 ] != ulCheckValue ) || \ + ( pulStack[ 2 ] != ulCheckValue ) || \ + ( pulStack[ 3 ] != ulCheckValue ) ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCBs[ xCurCoreID ], pxCurrentTCBs[ xCurCoreID ]->pcTaskName ); \ + } \ } #endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ @@ -109,9 +109,9 @@ #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) - #define taskCHECK_FOR_STACK_OVERFLOW() \ + #define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ) \ { \ - int8_t * pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ + int8_t * pcEndOfStack = ( int8_t * ) pxCurrentTCBs[ xCurCoreID ]->pxEndOfStack; \ static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ @@ -124,7 +124,7 @@ /* Has the extremity of the task stack ever been written over? */ \ if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \ { \ - vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCBs[ xCurCoreID ], pxCurrentTCBs[ xCurCoreID ]->pcTaskName ); \ } \ } @@ -133,7 +133,7 @@ /* Remove stack overflow macro if not being used. */ #ifndef taskCHECK_FOR_STACK_OVERFLOW - #define taskCHECK_FOR_STACK_OVERFLOW() + #define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ) #endif diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/task.h b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/task.h index ff55eafb61..a3427087af 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/task.h +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/task.h @@ -170,6 +170,7 @@ typedef struct xTASK_STATUS StackType_t * pxEndOfStack; /* Points to the end address of the task's stack area. */ #endif configSTACK_DEPTH_TYPE usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ + BaseType_t xCoreID; /*!< Core this task is pinned to (0, 1, or tskNO_AFFINITY). If configNUMBER_OF_CORES == 1, this will always be 0. */ } TaskStatus_t; /* Possible return values for eTaskConfirmSleepModeStatus(). */ @@ -189,6 +190,14 @@ typedef enum */ #define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U ) +/** + * Macro representing and unpinned (i.e., "no affinity") task in xCoreID parameters + * + * \ingroup Tasks + */ +#define tskNO_AFFINITY ( ( BaseType_t ) 0x7FFFFFFF ) +/* Todo: Update tskNO_AFFINITY value to -1 (IDF-7908) */ + /** * task. h * @@ -211,8 +220,9 @@ typedef enum * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL * \ingroup SchedulerControl */ -#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskENTER_CRITICAL( x ) portENTER_CRITICAL( x ) #define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() +#define taskENTER_CRITICAL_ISR( x ) portENTER_CRITICAL_ISR( x ) /** * task. h @@ -226,8 +236,9 @@ typedef enum * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL * \ingroup SchedulerControl */ -#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL( x ) portEXIT_CRITICAL( x ) #define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) +#define taskEXIT_CRITICAL_ISR( x ) portEXIT_CRITICAL_ISR( x ) /** * task. h @@ -294,6 +305,9 @@ typedef enum * support can alternatively create an MPU constrained task using * xTaskCreateRestricted(). * + * @note If configNUMBER_OF_CORES > 1, this function will create an unpinned + * task (see tskNO_AFFINITY for more details). + * * @param pxTaskCode Pointer to the task entry function. Tasks * must be implemented to never return (i.e. continuous loop). * @@ -301,10 +315,8 @@ typedef enum * facilitate debugging. Max length defined by configMAX_TASK_NAME_LEN - default * is 16. * - * @param usStackDepth The size of the task stack specified as the number of - * variables the stack can hold - not the number of bytes. For example, if - * the stack is 16 bits wide and usStackDepth is defined as 100, 200 bytes - * will be allocated for stack storage. + * @param usStackDepth The size of the task stack specified as the NUMBER OF + * BYTES. Note that this differs from vanilla FreeRTOS. * * @param pvParameters Pointer that will be used as the parameter for the task * being created. @@ -321,6 +333,9 @@ typedef enum * @return pdPASS if the task was successfully created and added to a ready * list, otherwise an error code defined in the file projdefs.h * + * @note If program uses thread local variables (ones specified with "__thread" + * keyword) then storage for them will be allocated on the task's stack. + * * Example usage: * @code{c} * // Task to be created. @@ -356,13 +371,39 @@ typedef enum * \ingroup Tasks */ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + static inline __attribute__( ( always_inline ) ) BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ const configSTACK_DEPTH_TYPE usStackDepth, void * const pvParameters, UBaseType_t uxPriority, - TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; -#endif + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION + { + /* + * The idf_additions.h has not been included here yet due to inclusion + * order. Thus we manually declare the function here. + */ + extern BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode, + const char * const pcName, + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pvCreatedTask, + const BaseType_t xCoreID ); + + /* + * Call the "PinnedToCore" version with tskNO_AFFINITY to create + * an unpinned task. + */ + return xTaskCreatePinnedToCore( pxTaskCode, + pcName, + usStackDepth, + pvParameters, + uxPriority, + pxCreatedTask, + tskNO_AFFINITY ); + } +#endif /* configSUPPORT_DYNAMIC_ALLOCATION == 1 */ /** * task. h @@ -388,6 +429,9 @@ typedef enum * memory. xTaskCreateStatic() therefore allows a task to be created without * using any dynamic memory allocation. * + * @note If configNUMBER_OF_CORES > 1, this function will create an unpinned + * task (see tskNO_AFFINITY for more details). + * * @param pxTaskCode Pointer to the task entry function. Tasks * must be implemented to never return (i.e. continuous loop). * @@ -395,10 +439,8 @@ typedef enum * facilitate debugging. The maximum length of the string is defined by * configMAX_TASK_NAME_LEN in FreeRTOSConfig.h. * - * @param ulStackDepth The size of the task stack specified as the number of - * variables the stack can hold - not the number of bytes. For example, if - * the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes - * will be allocated for stack storage. + * @param ulStackDepth The size of the task stack specified as the NUMBER OF + * BYTES. Note that this differs from vanilla FreeRTOS. * * @param pvParameters Pointer that will be used as the parameter for the task * being created. @@ -418,6 +460,9 @@ typedef enum * puxStackBuffer or pxTaskBuffer are NULL then the task will not be created and * NULL is returned. * + * @note If program uses thread local variables (ones specified with "__thread" + * keyword) then storage for them will be allocated on the task's stack. + * * Example usage: * @code{c} * @@ -473,13 +518,41 @@ typedef enum * \ingroup Tasks */ #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + static inline __attribute__( ( always_inline ) ) TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ const uint32_t ulStackDepth, void * const pvParameters, UBaseType_t uxPriority, StackType_t * const puxStackBuffer, - StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; + StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION + { + /* + * The idf_additions.h has not been included here yet due to inclusion + * order. Thus we manually declare the function here. + */ + extern TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const pxStackBuffer, + StaticTask_t * const pxTaskBuffer, + const BaseType_t xCoreID ); + + /* + * Call the "PinnedToCore" version with tskNO_AFFINITY to create + * an unpinned task. + */ + return xTaskCreateStaticPinnedToCore( pxTaskCode, + pcName, + ulStackDepth, + pvParameters, + uxPriority, + puxStackBuffer, + pxTaskBuffer, + tskNO_AFFINITY ); + } #endif /* configSUPPORT_STATIC_ALLOCATION */ /** @@ -1737,8 +1810,8 @@ BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, * xTaskGetIdleTaskHandle() is only available if * INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h. * - * Simply returns the handle of the idle task. It is not valid to call - * xTaskGetIdleTaskHandle() before the scheduler has been started. + * Simply returns the handle of the idle task of the current core. It is not + * valid to call xTaskGetIdleTaskHandle() before the scheduler has been started. */ TaskHandle_t xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION; @@ -1979,6 +2052,9 @@ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e * system if there are no other tasks executing at the idle priority, tickless * idle is not used, and configIDLE_SHOULD_YIELD is set to 0. * + * @note If configNUMBER_OF_CORES > 1, calling this function will query the idle + * task of the current core. + * * @return The total run time of the idle task or the percentage of the total * run time consumed by the idle task. This is the amount of time the * idle task has actually been executing. The unit of time is dependent on the @@ -2954,6 +3030,9 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) PRIVILEGED_FUNCTION; * or * + Time slicing is in use and there is a task of equal priority to the * currently running task. + * + * Note: If configNUMBER_OF_CORES > 1, this function must only be called by + * core 0. Other cores should call xTaskIncrementTickOtherCores() instead. */ BaseType_t xTaskIncrementTick( void ) PRIVILEGED_FUNCTION; diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/queue.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/queue.c index fa36ad70e6..32e787a26d 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/queue.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/queue.c @@ -41,6 +41,8 @@ #include "FreeRTOS.h" #include "task.h" #include "queue.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" #if ( configUSE_CO_ROUTINES == 1 ) #include "croutine.h" @@ -52,11 +54,71 @@ * correct privileged Vs unprivileged linkage and placement. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ +/* Some code sections require extra critical sections when building for SMP + * ( configNUMBER_OF_CORES > 1 ). */ +#if ( configNUMBER_OF_CORES > 1 ) +/* Macros that Enter/exit a critical section only when building for SMP */ + #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) + #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) -/* Constants used with the cRxLock and cTxLock structure members. */ -#define queueUNLOCKED ( ( int8_t ) -1 ) -#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) -#define queueINT8_MAX ( ( int8_t ) 127 ) + static inline __attribute__( ( always_inline ) ) + void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskENTER_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskENTER_CRITICAL_ISR( pxLock ); + } + } + + static inline __attribute__( ( always_inline ) ) + void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskEXIT_CRITICAL_ISR( pxLock ); + } + } +#else /* configNUMBER_OF_CORES > 1 */ + /* Macros that Enter/exit a critical section only when building for SMP */ + #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) + #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) +#endif /* configNUMBER_OF_CORES > 1 */ + +/* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList() + * calls are deterministic (as queue locks use scheduler suspension instead of + * critical sections). However, the SMP implementation is non-deterministic + * anyways, thus SMP can forego the use of queue locks (replaced with a critical + * sections) in exchange for better queue performance. */ +#if ( configNUMBER_OF_CORES > 1 ) + #define queueUSE_LOCKS 0 + #define queueUNLOCKED ( ( int8_t ) 0 ) +#else /* configNUMBER_OF_CORES > 1 */ + #define queueUSE_LOCKS 1 + /* Constants used with the cRxLock and cTxLock structure members. */ + #define queueUNLOCKED ( ( int8_t ) -1 ) + #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) + #define queueINT8_MAX ( ( int8_t ) 127 ) +#endif /* configNUMBER_OF_CORES > 1 */ /* When the Queue_t structure is used to represent a base queue its pcHead and * pcTail members are used as pointers into the queue storage area. When the @@ -67,8 +129,8 @@ * is maintained. The QueuePointers_t and SemaphoreData_t types are used to form * a union as their usage is mutually exclusive dependent on what the queue is * being used for. */ -#define uxQueueType pcHead -#define queueQUEUE_IS_MUTEX NULL +#define uxQueueType pcHead +#define queueQUEUE_IS_MUTEX NULL typedef struct QueuePointers { @@ -119,8 +181,10 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ - volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ - volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + #if ( queueUSE_LOCKS == 1 ) + volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + #endif /* queueUSE_LOCKS == 1 */ #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ @@ -134,6 +198,8 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b UBaseType_t uxQueueNumber; uint8_t ucQueueType; #endif + + portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */ } xQUEUE; /* The old xQUEUE name is maintained above then typedefed to the new Queue_t @@ -167,8 +233,15 @@ typedef xQUEUE Queue_t; * array position being vacant. */ PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; + #if ( configNUMBER_OF_CORES > 1 ) +/* Spinlock required in SMP when accessing the queue registry */ + static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED; + #endif /* configNUMBER_OF_CORES > 1 */ + #endif /* configQUEUE_REGISTRY_SIZE */ +#if ( queueUSE_LOCKS == 1 ) + /* * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not * prevent an ISR from adding or removing items to the queue, but does prevent @@ -177,21 +250,22 @@ typedef xQUEUE Queue_t; * to indicate that a task may require unblocking. When the queue in unlocked * these lock counts are inspected, and the appropriate action taken. */ -static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; + static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; /* * Uses a critical section to determine if there is any data in a queue. * * @return pdTRUE if the queue contains no items, otherwise pdFALSE. */ -static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; /* * Uses a critical section to determine if there is any space in a queue. * * @return pdTRUE if there is no space, otherwise pdFALSE; */ -static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; + static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; +#endif /* queueUSE_LOCKS == 1 */ /* * Copies an item into the queue, either at the front of the queue or the @@ -248,12 +322,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, #endif /*-----------------------------------------------------------*/ +#if ( queueUSE_LOCKS == 1 ) + /* * Macro to mark a queue as locked. Locking a queue prevents an ISR from * accessing the queue event lists. */ -#define prvLockQueue( pxQueue ) \ - taskENTER_CRITICAL(); \ + #define prvLockQueue( pxQueue ) \ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); \ { \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ { \ @@ -264,14 +340,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \ } \ } \ - taskEXIT_CRITICAL() + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ) /* * Macro to increment cTxLock member of the queue data structure. It is * capped at the number of tasks in the system as we cannot unblock more * tasks than the number of tasks in the system. */ -#define prvIncrementQueueTxLock( pxQueue, cTxLock ) \ + #define prvIncrementQueueTxLock( pxQueue, cTxLock ) \ { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \ @@ -286,7 +362,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * capped at the number of tasks in the system as we cannot unblock more * tasks than the number of tasks in the system. */ -#define prvIncrementQueueRxLock( pxQueue, cRxLock ) \ + #define prvIncrementQueueRxLock( pxQueue, cRxLock ) \ { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \ @@ -295,6 +371,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ } \ } +#endif /* queueUSE_LOCKS == 1 */ /*-----------------------------------------------------------*/ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, @@ -305,19 +382,28 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, configASSERT( pxQueue ); + if( xNewQueue == pdTRUE ) + { + portMUX_INITIALIZE( &( pxQueue->xQueueLock ) ); + } + if( ( pxQueue != NULL ) && ( pxQueue->uxLength >= 1U ) && /* Check for multiplication overflow. */ ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; pxQueue->pcWriteTo = pxQueue->pcHead; pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ - pxQueue->cRxLock = queueUNLOCKED; - pxQueue->cTxLock = queueUNLOCKED; + #if ( queueUSE_LOCKS == 1 ) + { + pxQueue->cRxLock = queueUNLOCKED; + pxQueue->cTxLock = queueUNLOCKED; + } + #endif /* queueUSE_LOCKS == 1 */ if( xNewQueue == pdFALSE ) { @@ -349,7 +435,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, vListInitialise( &( pxQueue->xTasksWaitingToReceive ) ); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); } else { @@ -606,6 +692,9 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, /* In case this is a recursive mutex. */ pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; + /* Initialize the mutex's spinlock */ + portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) ); + traceCREATE_MUTEX( pxNewQueue ); /* Start with the semaphore in the expected state. */ @@ -671,7 +760,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * calling task is the mutex holder, but not a good way of determining the * identity of the mutex holder, as the holder may change between the * following critical section exiting and the function returning. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxSemaphore->xQueueLock ) ); { if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) { @@ -682,7 +771,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, pxReturn = NULL; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxSemaphore->xQueueLock ) ); return pxReturn; } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ @@ -908,7 +997,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, * interest of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { /* Is there room on the queue now? The running task must be the * highest priority task wanting to access the queue. If the head item @@ -1014,7 +1103,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, } #endif /* configUSE_QUEUE_SETS */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1023,7 +1112,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, { /* The queue was full and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); /* Return to the original privilege level before exiting * the function. */ @@ -1043,56 +1132,88 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } - } - taskEXIT_CRITICAL(); - /* Interrupts and other tasks can send to and receive from the queue - * now the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - if( prvIsQueueFull( pxQueue ) != pdFALSE ) + /* If queue locks ARE NOT being used: + * - At this point, the queue is full and entry time has been set + * - We simply check for a time out, block if not timed out, or + * return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) { - traceBLOCKING_ON_QUEUE_SEND( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); - - /* Unlocking the queue means queue events can effect the - * event list. It is possible that interrupts occurring now - * remove this task from the event list again - but as the - * scheduler is suspended the task will go onto the pending - * ready list instead of the actual ready list. */ - prvUnlockQueue( pxQueue ); - - /* Resuming the scheduler will move tasks from the pending - * ready list into the ready list - so it is feasible that this - * task is already in the ready list before it yields - in which - * case the yield will not cause a context switch unless there - * is also a higher priority task in the pending ready list. */ - if( xTaskResumeAll() == pdFALSE ) + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { + /* Not timed out yet. Block the current task. */ + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); portYIELD_WITHIN_API(); } + else + { + /* We have timed out. Return an error. */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; + } + } + #endif /* queueUSE_LOCKS == 0 */ + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* If queue locks ARE being used: + * - At this point, the queue is full and entry time has been set + * - We follow the original procedure of locking the queue before + * attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) + { + /* Interrupts and other tasks can send to and receive from the queue + * now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_SEND( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); + + /* Unlocking the queue means queue events can effect the + * event list. It is possible that interrupts occurring now + * remove this task from the event list again - but as the + * scheduler is suspended the task will go onto the pending + * ready list instead of the actual ready list. */ + prvUnlockQueue( pxQueue ); + + /* Resuming the scheduler will move tasks from the pending + * ready list into the ready list - so it is feasible that this + * task is already in the ready list before it yields - in which + * case the yield will not cause a context switch unless there + * is also a higher priority task in the pending ready list. */ + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + } + else + { + /* Try again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } } else { - /* Try again. */ + /* The timeout has expired. */ prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); + + traceQUEUE_SEND_FAILED( pxQueue ); + return errQUEUE_FULL; } } - else - { - /* The timeout has expired. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - traceQUEUE_SEND_FAILED( pxQueue ); - return errQUEUE_FULL; - } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1131,11 +1252,16 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { - const int8_t cTxLock = pxQueue->cTxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cTxLock = pxQueue->cTxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cTxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; traceQUEUE_SEND_FROM_ISR( pxQueue ); @@ -1243,9 +1369,13 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, } else { - /* Increment the lock count so the task that unlocks the queue - * knows that data was posted while it was locked. */ - prvIncrementQueueTxLock( pxQueue, cTxLock ); + #if ( queueUSE_LOCKS == 1 ) + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was posted while it was locked. */ + prvIncrementQueueTxLock( pxQueue, cTxLock ); + } + #endif /* queueUSE_LOCKS == 1 */ } xReturn = pdPASS; @@ -1256,7 +1386,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -1302,7 +1432,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1311,9 +1441,15 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * space'. */ if( uxMessagesWaiting < pxQueue->uxLength ) { - const int8_t cTxLock = pxQueue->cTxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cTxLock = pxQueue->cTxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cTxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ - traceQUEUE_SEND_FROM_ISR( pxQueue ); + /* Todo: Reconcile tracing differences (IDF-8183) */ + traceQUEUE_GIVE_FROM_ISR( pxQueue ); /* A task can only have an inherited priority if it is a mutex * holder - and if there is a mutex holder then the mutex cannot be @@ -1409,20 +1545,25 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, } else { - /* Increment the lock count so the task that unlocks the queue - * knows that data was posted while it was locked. */ - prvIncrementQueueTxLock( pxQueue, cTxLock ); + #if ( queueUSE_LOCKS == 1 ) + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was posted while it was locked. */ + prvIncrementQueueTxLock( pxQueue, cTxLock ); + } + #endif /* queueUSE_LOCKS == 1 */ } xReturn = pdPASS; } else { - traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); + /* Todo: Reconcile tracing differences (IDF-8183) */ + traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue ); xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -1455,7 +1596,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, * interest of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1487,7 +1628,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1496,7 +1637,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, { /* The queue was empty and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); traceQUEUE_RECEIVE_FAILED( pxQueue ); return errQUEUE_EMPTY; } @@ -1513,60 +1654,92 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } - } - taskEXIT_CRITICAL(); - /* Interrupts and other tasks can send to and receive from the queue - * now the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* The timeout has not expired. If the queue is still empty place - * the task on the list of tasks waiting to receive from the queue. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + /* If queue locks ARE NOT being used: + * - At this point, the queue is empty and entry time has been set + * - We simply check for a time out, block if not timed out, or + * return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) { - traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { + /* Not timed out yet. Block the current task. */ + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); portYIELD_WITHIN_API(); } else + { + /* We have timed out. Return an error. */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + } + #endif /* queueUSE_LOCKS == 0 */ + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* If queue locks ARE being used: + * - At this point, the queue is empty and entry time has been set + * - We follow the original procedure for locking the queue before + * attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) + { + /* Interrupts and other tasks can send to and receive from the queue + * now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* The timeout has not expired. If the queue is still empty place + * the task on the list of tasks waiting to receive from the queue. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The queue contains data again. Loop back to try and read the + * data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. If there is no data in the queue exit, otherwise loop + * back and attempt to read the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else { mtCOVERAGE_TEST_MARKER(); } } - else - { - /* The queue contains data again. Loop back to try and read the - * data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* Timed out. If there is no data in the queue exit, otherwise loop - * back and attempt to read the data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1601,7 +1774,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, * of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { /* Semaphores are queues with an item size of 0, and where the * number of messages in the queue is the semaphore's count value. */ @@ -1611,7 +1784,8 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, * must be the highest priority task wanting to access the queue. */ if( uxSemaphoreCount > ( UBaseType_t ) 0 ) { - traceQUEUE_RECEIVE( pxQueue ); + /* Todo: Reconcile tracing differences (IDF-8183) */ + traceQUEUE_SEMAPHORE_RECEIVE( pxQueue ); /* Semaphores are queues with a data size of zero and where the * messages waiting is the semaphore's count. Reduce the count. */ @@ -1650,7 +1824,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1659,7 +1833,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, { /* The semaphore count was 0 and no block time is specified * (or the block time has expired) so exit now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); traceQUEUE_RECEIVE_FAILED( pxQueue ); return errQUEUE_EMPTY; } @@ -1676,107 +1850,165 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } - } - taskEXIT_CRITICAL(); - /* Interrupts and other tasks can give to and take from the semaphore - * now the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* A block time is specified and not expired. If the semaphore - * count is 0 then enter the Blocked state to wait for a semaphore to - * become available. As semaphores are implemented with queues the - * queue being empty is equivalent to the semaphore count being 0. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + /* If queue locks ARE NOT being used: + * - At this point, the semaphore/mutex is empty/held and entry time + * has been set. + * - We simply check for a time out, inherit priority and block if + * not timed out, or return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) { - traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); - - #if ( configUSE_MUTEXES == 1 ) + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { - if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + /* Not timed out yet. If this is a mutex, make the holder + * inherit our priority, then block the current task. */ + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + #if ( configUSE_MUTEXES == 1 ) { - taskENTER_CRITICAL(); + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); } - taskEXIT_CRITICAL(); + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( configUSE_MUTEXES == 1 ) */ + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + portYIELD_WITHIN_API(); + } + else + { + /* We have timed out. If this is a mutex, make the holder + * disinherit our priority, then return an error. */ + #if ( configUSE_MUTEXES == 1 ) + { + if( xInheritanceOccurred != pdFALSE ) + { + UBaseType_t uxHighestWaitingPriority; + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + } + #endif /* configUSE_MUTEXES */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + } + #endif /* queueUSE_LOCKS == 0 */ + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* If queue locks ARE being used: + * - At this point, the semaphore/mutex is empty/held and entry time + * has been set. + * - We follow the original procedure for locking the queue, inheriting + * priority, then attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) + { + /* Interrupts and other tasks can give to and take from the semaphore + * now the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* A block time is specified and not expired. If the semaphore + * count is 0 then enter the Blocked state to wait for a semaphore to + * become available. As semaphores are implemented with queues the + * queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); + + #if ( configUSE_MUTEXES == 1 ) + { + if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) + { + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + { + xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder ); + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( configUSE_MUTEXES == 1 ) */ + + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); } else { mtCOVERAGE_TEST_MARKER(); } } - #endif /* if ( configUSE_MUTEXES == 1 ) */ - - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) + else { - portYIELD_WITHIN_API(); + /* There was no timeout and the semaphore count was not 0, so + * attempt to take the semaphore again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* Timed out. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + /* If the semaphore count is 0 exit now as the timeout has + * expired. Otherwise return to attempt to take the semaphore that is + * known to be available. As semaphores are implemented by queues the + * queue being empty is equivalent to the semaphore count being 0. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + #if ( configUSE_MUTEXES == 1 ) + { + /* xInheritanceOccurred could only have be set if + * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to + * test the mutex type again to check it is actually a mutex. */ + if( xInheritanceOccurred != pdFALSE ) + { + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + { + UBaseType_t uxHighestWaitingPriority; + + /* This task blocking on the mutex caused another + * task to inherit this task's priority. Now this task + * has timed out the priority should be disinherited + * again, but only as low as the next highest priority + * task that is waiting for the same mutex. */ + uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); + vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + } + } + #endif /* configUSE_MUTEXES */ + + traceQUEUE_RECEIVE_FAILED( pxQueue ); + return errQUEUE_EMPTY; } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - /* There was no timeout and the semaphore count was not 0, so - * attempt to take the semaphore again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* Timed out. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - /* If the semaphore count is 0 exit now as the timeout has - * expired. Otherwise return to attempt to take the semaphore that is - * known to be available. As semaphores are implemented by queues the - * queue being empty is equivalent to the semaphore count being 0. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - #if ( configUSE_MUTEXES == 1 ) - { - /* xInheritanceOccurred could only have be set if - * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to - * test the mutex type again to check it is actually a mutex. */ - if( xInheritanceOccurred != pdFALSE ) - { - taskENTER_CRITICAL(); - { - UBaseType_t uxHighestWaitingPriority; - - /* This task blocking on the mutex caused another - * task to inherit this task's priority. Now this task - * has timed out the priority should be disinherited - * again, but only as low as the next highest priority - * task that is waiting for the same mutex. */ - uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue ); - vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority ); - } - taskEXIT_CRITICAL(); - } - } - #endif /* configUSE_MUTEXES */ - - traceQUEUE_RECEIVE_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1809,7 +2041,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, * interest of execution time efficiency. */ for( ; ; ) { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1847,7 +2079,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return pdPASS; } else @@ -1856,7 +2088,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, { /* The queue was empty and no block time is specified (or * the block time has expired) so leave now. */ - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); traceQUEUE_PEEK_FAILED( pxQueue ); return errQUEUE_EMPTY; } @@ -1874,60 +2106,92 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue, mtCOVERAGE_TEST_MARKER(); } } - } - taskEXIT_CRITICAL(); - /* Interrupts and other tasks can send to and receive from the queue - * now that the critical section has been exited. */ - - vTaskSuspendAll(); - prvLockQueue( pxQueue ); - - /* Update the timeout state to see if it has expired yet. */ - if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) - { - /* Timeout has not expired yet, check to see if there is data in the - * queue now, and if not enter the Blocked state to wait for data. */ - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + /* If queue locks ARE NOT being used: + * - At this point, the queue is empty and entry time has been set + * - We simply check for a time out, block if not timed out, or + * return an error if we have timed out. */ + #if ( queueUSE_LOCKS == 0 ) { - traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); - vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); - prvUnlockQueue( pxQueue ); - - if( xTaskResumeAll() == pdFALSE ) + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { + /* Not timed out yet. Block the current task. */ + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); portYIELD_WITHIN_API(); } else + { + /* We have timed out. Return an error. */ + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + } + #endif /* queueUSE_LOCKS == 0 */ + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* If queue locks ARE being used: + * - At this point, the queue is empty and entry time has been set + * - We follow the original procedure for locking the queue before + * attempting to block. */ + #if ( queueUSE_LOCKS == 1 ) + { + /* Interrupts and other tasks can send to and receive from the queue + * now that the critical section has been exited. */ + + vTaskSuspendAll(); + prvLockQueue( pxQueue ); + + /* Update the timeout state to see if it has expired yet. */ + if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) + { + /* Timeout has not expired yet, check to see if there is data in the + * queue now, and if not enter the Blocked state to wait for data. */ + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); + vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); + prvUnlockQueue( pxQueue ); + + if( xTaskResumeAll() == pdFALSE ) + { + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* There is data in the queue now, so don't enter the blocked + * state, instead return to try and obtain the data. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + } + } + else + { + /* The timeout has expired. If there is still no data in the queue + * exit, otherwise go back and try to read the data again. */ + prvUnlockQueue( pxQueue ); + ( void ) xTaskResumeAll(); + + if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) + { + traceQUEUE_PEEK_FAILED( pxQueue ); + return errQUEUE_EMPTY; + } + else { mtCOVERAGE_TEST_MARKER(); } } - else - { - /* There is data in the queue now, so don't enter the blocked - * state, instead return to try and obtain the data. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - } - } - else - { - /* The timeout has expired. If there is still no data in the queue - * exit, otherwise go back and try to read the data again. */ - prvUnlockQueue( pxQueue ); - ( void ) xTaskResumeAll(); - - if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) - { - traceQUEUE_PEEK_FAILED( pxQueue ); - return errQUEUE_EMPTY; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } } + #endif /* queueUSE_LOCKS == 1 */ } /*lint -restore */ } /*-----------------------------------------------------------*/ @@ -1959,14 +2223,19 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; /* Cannot block in an ISR, so check there is data available. */ if( uxMessagesWaiting > ( UBaseType_t ) 0 ) { - const int8_t cRxLock = pxQueue->cRxLock; + #if ( queueUSE_LOCKS == 1 ) + const int8_t cRxLock = pxQueue->cRxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cRxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); @@ -2006,9 +2275,13 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, } else { - /* Increment the lock count so the task that unlocks the queue - * knows that data was removed while it was locked. */ - prvIncrementQueueRxLock( pxQueue, cRxLock ); + #if ( queueUSE_LOCKS == 1 ) + { + /* Increment the lock count so the task that unlocks the queue + * knows that data was removed while it was locked. */ + prvIncrementQueueRxLock( pxQueue, cRxLock ); + } + #endif /* queueUSE_LOCKS == 1 */ } xReturn = pdPASS; @@ -2019,7 +2292,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -2053,7 +2326,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2074,7 +2347,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus ); return xReturn; } @@ -2086,11 +2359,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) configASSERT( xQueue ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) ); { uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) ); return uxReturn; } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ @@ -2103,11 +2376,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) configASSERT( pxQueue ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); return uxReturn; } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ @@ -2329,50 +2602,74 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, } /*-----------------------------------------------------------*/ -static void prvUnlockQueue( Queue_t * const pxQueue ) -{ - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ - - /* The lock counts contains the number of extra data items placed or - * removed from the queue while the queue was locked. When a queue is - * locked items can be added or removed, but the event lists cannot be - * updated. */ - taskENTER_CRITICAL(); +#if ( queueUSE_LOCKS == 1 ) + static void prvUnlockQueue( Queue_t * const pxQueue ) { - int8_t cTxLock = pxQueue->cTxLock; + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ - /* See if data was added to the queue while it was locked. */ - while( cTxLock > queueLOCKED_UNMODIFIED ) + /* The lock counts contains the number of extra data items placed or + * removed from the queue while the queue was locked. When a queue is + * locked items can be added or removed, but the event lists cannot be + * updated. */ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); { - /* Data was posted while the queue was locked. Are any tasks - * blocked waiting for data to become available? */ - #if ( configUSE_QUEUE_SETS == 1 ) + int8_t cTxLock = pxQueue->cTxLock; + + /* See if data was added to the queue while it was locked. */ + while( cTxLock > queueLOCKED_UNMODIFIED ) { - if( pxQueue->pxQueueSetContainer != NULL ) + /* Data was posted while the queue was locked. Are any tasks + * blocked waiting for data to become available? */ + #if ( configUSE_QUEUE_SETS == 1 ) { - if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + if( pxQueue->pxQueueSetContainer != NULL ) { - /* The queue is a member of a queue set, and posting to - * the queue set caused a higher priority task to unblock. - * A context switch is required. */ - vTaskMissedYield(); + if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) + { + /* The queue is a member of a queue set, and posting to + * the queue set caused a higher priority task to unblock. + * A context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { - mtCOVERAGE_TEST_MARKER(); + /* Tasks that are removed from the event list will get + * added to the pending ready list as the scheduler is still + * suspended. */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority so record that a + * context switch is required. */ + vTaskMissedYield(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + break; + } } } - else + #else /* configUSE_QUEUE_SETS */ { - /* Tasks that are removed from the event list will get - * added to the pending ready list as the scheduler is still - * suspended. */ + /* Tasks that are removed from the event list will get added to + * the pending ready list as the scheduler is still suspended. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { - /* The task waiting has a higher priority so record that a - * context switch is required. */ + /* The task waiting has a higher priority so record that + * a context switch is required. */ vTaskMissedYield(); } else @@ -2385,89 +2682,69 @@ static void prvUnlockQueue( Queue_t * const pxQueue ) break; } } + #endif /* configUSE_QUEUE_SETS */ + + --cTxLock; } - #else /* configUSE_QUEUE_SETS */ + + pxQueue->cTxLock = queueUNLOCKED; + } + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); + + /* Do the same for the Rx lock. */ + taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); + { + int8_t cRxLock = pxQueue->cRxLock; + + while( cRxLock > queueLOCKED_UNMODIFIED ) { - /* Tasks that are removed from the event list will get added to - * the pending ready list as the scheduler is still suspended. */ - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) { - /* The task waiting has a higher priority so record that - * a context switch is required. */ vTaskMissedYield(); } else { mtCOVERAGE_TEST_MARKER(); } + + --cRxLock; } else { break; } } - #endif /* configUSE_QUEUE_SETS */ - --cTxLock; + pxQueue->cRxLock = queueUNLOCKED; } - - pxQueue->cTxLock = queueUNLOCKED; + taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); } - taskEXIT_CRITICAL(); +#endif /* queueUSE_LOCKS == 1 */ +/*-----------------------------------------------------------*/ - /* Do the same for the Rx lock. */ - taskENTER_CRITICAL(); +#if ( queueUSE_LOCKS == 1 ) + static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) { - int8_t cRxLock = pxQueue->cRxLock; + BaseType_t xReturn; - while( cRxLock > queueLOCKED_UNMODIFIED ) + taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); { - if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) { - if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) - { - vTaskMissedYield(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - --cRxLock; + xReturn = pdTRUE; } else { - break; + xReturn = pdFALSE; } } + taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); - pxQueue->cRxLock = queueUNLOCKED; + return xReturn; } - taskEXIT_CRITICAL(); -} -/*-----------------------------------------------------------*/ - -static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) -{ - BaseType_t xReturn; - - taskENTER_CRITICAL(); - { - if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - } - taskEXIT_CRITICAL(); - - return xReturn; -} +#endif /* queueUSE_LOCKS == 1 */ /*-----------------------------------------------------------*/ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) @@ -2490,25 +2767,27 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ /*-----------------------------------------------------------*/ -static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) -{ - BaseType_t xReturn; - - taskENTER_CRITICAL(); +#if ( queueUSE_LOCKS == 1 ) + static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) { - if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) - { - xReturn = pdTRUE; - } - else - { - xReturn = pdFALSE; - } - } - taskEXIT_CRITICAL(); + BaseType_t xReturn; - return xReturn; -} + taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); + { + if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); + + return xReturn; + } +#endif /* queueUSE_LOCKS == 1 */ /*-----------------------------------------------------------*/ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) @@ -2828,38 +3107,45 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) configASSERT( xQueue ); - if( pcQueueName != NULL ) + /* For SMP, we need to take the queue registry lock in case another + * core updates the register simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { - /* See if there is an empty space in the registry. A NULL name denotes - * a free slot. */ - for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + if( pcQueueName != NULL ) { - /* Replace an existing entry if the queue is already in the registry. */ - if( xQueue == xQueueRegistry[ ux ].xHandle ) + /* See if there is an empty space in the registry. A NULL name denotes + * a free slot. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) { - pxEntryToWrite = &( xQueueRegistry[ ux ] ); - break; - } - /* Otherwise, store in the next empty location */ - else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) ) - { - pxEntryToWrite = &( xQueueRegistry[ ux ] ); - } - else - { - mtCOVERAGE_TEST_MARKER(); + /* Replace an existing entry if the queue is already in the registry. */ + if( xQueue == xQueueRegistry[ ux ].xHandle ) + { + pxEntryToWrite = &( xQueueRegistry[ ux ] ); + break; + } + /* Otherwise, store in the next empty location */ + else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) ) + { + pxEntryToWrite = &( xQueueRegistry[ ux ] ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } - } - if( pxEntryToWrite != NULL ) - { - /* Store the information on this queue. */ - pxEntryToWrite->pcQueueName = pcQueueName; - pxEntryToWrite->xHandle = xQueue; + if( pxEntryToWrite != NULL ) + { + /* Store the information on this queue. */ + pxEntryToWrite->pcQueueName = pcQueueName; + pxEntryToWrite->xHandle = xQueue; - traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); + traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName ); + } } + /* Release the previously taken queue registry lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); } #endif /* configQUEUE_REGISTRY_SIZE */ @@ -2874,21 +3160,28 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) configASSERT( xQueue ); - /* Note there is nothing here to protect against another task adding or - * removing entries from the registry while it is being searched. */ - - for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + /* For SMP, we need to take the queue registry lock in case another + * core updates the register simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { - if( xQueueRegistry[ ux ].xHandle == xQueue ) + /* Note there is nothing here to protect against another task adding or + * removing entries from the registry while it is being searched. */ + + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) { - pcReturn = xQueueRegistry[ ux ].pcQueueName; - break; - } - else - { - mtCOVERAGE_TEST_MARKER(); + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + pcReturn = xQueueRegistry[ ux ].pcQueueName; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + /* Release the previously taken queue registry lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); return pcReturn; } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ @@ -2904,26 +3197,33 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) configASSERT( xQueue ); - /* See if the handle of the queue being unregistered in actually in the - * registry. */ - for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) + /* For SMP, we need to take the queue registry lock in case another + * core updates the register simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { - if( xQueueRegistry[ ux ].xHandle == xQueue ) + /* See if the handle of the queue being unregistered in actually in the + * registry. */ + for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ ) { - /* Set the name to NULL to show that this slot if free again. */ - xQueueRegistry[ ux ].pcQueueName = NULL; + if( xQueueRegistry[ ux ].xHandle == xQueue ) + { + /* Set the name to NULL to show that this slot if free again. */ + xQueueRegistry[ ux ].pcQueueName = NULL; - /* Set the handle to NULL to ensure the same queue handle cannot - * appear in the registry twice if it is added, removed, then - * added again. */ - xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; - break; - } - else - { - mtCOVERAGE_TEST_MARKER(); + /* Set the handle to NULL to ensure the same queue handle cannot + * appear in the registry twice if it is added, removed, then + * added again. */ + xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0; + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + /* Release the previously taken queue registry lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ #endif /* configQUEUE_REGISTRY_SIZE */ @@ -2945,25 +3245,40 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) * so it should be called with the scheduler locked and not from a critical * section. */ - /* Only do anything if there are no messages in the queue. This function - * will not actually cause the task to block, just place it on a blocked - * list. It will not block until the scheduler is unlocked - at which - * time a yield will be performed. If an item is added to the queue while - * the queue is locked, and the calling task blocks on the queue, then the - * calling task will be immediately unblocked when the queue is unlocked. */ - prvLockQueue( pxQueue ); - - if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) + /* For SMP, we need to take the queue's xQueueLock as we are about to + * access the queue. */ + taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); { - /* There is nothing in the queue, block for the specified period. */ - vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + #if ( queueUSE_LOCKS == 1 ) + { + /* Only do anything if there are no messages in the queue. This function + * will not actually cause the task to block, just place it on a blocked + * list. It will not block until the scheduler is unlocked - at which + * time a yield will be performed. If an item is added to the queue while + * the queue is locked, and the calling task blocks on the queue, then the + * calling task will be immediately unblocked when the queue is unlocked. */ + prvLockQueue( pxQueue ); + } + #endif /* queueUSE_LOCKS == 1 */ - prvUnlockQueue( pxQueue ); + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) + { + /* There is nothing in the queue, block for the specified period. */ + vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( queueUSE_LOCKS == 1 ) + { + prvUnlockQueue( pxQueue ); + } + #endif /* queueUSE_LOCKS == 1 */ + } + /* Release the previously taken xQueueLock. */ + taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); } #endif /* configUSE_TIMERS */ @@ -2990,7 +3305,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) { BaseType_t xReturn; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) ); { if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) { @@ -3009,7 +3324,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) xReturn = pdPASS; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) ); return xReturn; } @@ -3039,12 +3354,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } else { - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) ); { /* The queue is no longer contained in the set. */ pxQueueOrSemaphore->pxQueueSetContainer = NULL; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) ); xReturn = pdPASS; } @@ -3096,23 +3411,37 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */ configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); - if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) + /* In SMP, queue sets have their own xQueueLock. Thus we need to also + * acquire the queue set's xQueueLock before accessing it. */ + taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); { - const int8_t cTxLock = pxQueueSetContainer->cTxLock; - - traceQUEUE_SET_SEND( pxQueueSetContainer ); - - /* The data copied is the handle of the queue that contains data. */ - xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK ); - - if( cTxLock == queueUNLOCKED ) + if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) { - if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) + #if ( queueUSE_LOCKS == 1 ) + const int8_t cTxLock = pxQueueSetContainer->cTxLock; + #else + /* Queue locks not used, so we treat it as unlocked. */ + const int8_t cTxLock = queueUNLOCKED; + #endif /* queueUSE_LOCKS == 1 */ + + traceQUEUE_SET_SEND( pxQueueSetContainer ); + + /* The data copied is the handle of the queue that contains data. */ + xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK ); + + if( cTxLock == queueUNLOCKED ) { - if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE ) { - /* The task waiting has a higher priority. */ - xReturn = pdTRUE; + if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The task waiting has a higher priority. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { @@ -3121,18 +3450,20 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } else { - mtCOVERAGE_TEST_MARKER(); + #if ( queueUSE_LOCKS == 1 ) + { + prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock ); + } + #endif /* queueUSE_LOCKS == 1 */ } } else { - prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock ); + mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Release the previously acquired queue set's xQueueLock. */ + taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); return xReturn; } diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/stream_buffer.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/stream_buffer.c index 236729cee6..f474580ebc 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/stream_buffer.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/stream_buffer.c @@ -43,6 +43,8 @@ #include "FreeRTOS.h" #include "task.h" #include "stream_buffer.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" #if ( configUSE_TASK_NOTIFICATIONS != 1 ) #error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c @@ -63,18 +65,18 @@ * that uses task notifications. */ /*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */ #ifndef sbRECEIVE_COMPLETED - #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ - vTaskSuspendAll(); \ - { \ - if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ - { \ - ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ - ( uint32_t ) 0, \ - eNoAction ); \ - ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ - } \ - } \ - ( void ) xTaskResumeAll(); + #define sbRECEIVE_COMPLETED( pxStreamBuffer ) \ + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToSend, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ + } \ + } \ + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); #endif /* sbRECEIVE_COMPLETED */ /* If user has provided a per-instance receive complete callback, then @@ -140,18 +142,18 @@ * implementation that uses task notifications. */ #ifndef sbSEND_COMPLETED - #define sbSEND_COMPLETED( pxStreamBuffer ) \ - vTaskSuspendAll(); \ - { \ - if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ - { \ - ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ - ( uint32_t ) 0, \ - eNoAction ); \ - ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ - } \ - } \ - ( void ) xTaskResumeAll(); + #define sbSEND_COMPLETED( pxStreamBuffer ) \ + prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \ + { \ + if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ + { \ + ( void ) xTaskNotify( ( pxStreamBuffer )->xTaskWaitingToReceive, \ + ( uint32_t ) 0, \ + eNoAction ); \ + ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ + } \ + } \ + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); #endif /* sbSEND_COMPLETED */ /* If user has provided a per-instance send completed callback, then @@ -243,6 +245,8 @@ typedef struct StreamBufferDef_t /*lint !e9058 Style convention StreamBufferCallbackFunction_t pxSendCompletedCallback; /* Optional callback called on send complete. sbSEND_COMPLETED is called if this is NULL. */ StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */ #endif + + portMUX_TYPE xStreamBufferLock; /* Spinlock required for SMP critical sections */ } StreamBuffer_t; /* @@ -385,6 +389,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, pxSendCompletedCallback, pxReceiveCompletedCallback ); + /* Initialize the stream buffer's spinlock separately, as + * prvInitialiseNewStreamBuffer() is also called from + * xStreamBufferReset(). */ + portMUX_INITIALIZE( &( ( ( StreamBuffer_t * ) pucAllocatedMemory )->xStreamBufferLock ) ); + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); } else @@ -463,6 +472,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, * again. */ pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED; + /* Initialize the stream buffer's spinlock separately, as + * prvInitialiseNewStreamBuffer() is also called from + * xStreamBufferReset(). */ + portMUX_INITIALIZE( &( pxStreamBuffer->xStreamBufferLock ) ); + traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ); xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */ @@ -560,7 +574,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) #endif /* Can only reset a message buffer if there are no tasks blocked on it. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); { if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) ) { @@ -590,7 +604,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) xReturn = pdPASS; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); return xReturn; } @@ -736,7 +750,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, { /* Wait until the required number of bytes are free in the message * buffer. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); { xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer ); @@ -751,11 +765,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, } else { - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); break; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ); ( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait ); @@ -932,7 +946,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, { /* Checking if there is data and clearing the notification state must be * performed atomically. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); { xBytesAvailable = prvBytesInBuffer( pxStreamBuffer ); @@ -955,7 +969,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); if( xBytesAvailable <= xBytesToStoreMessageLength ) { @@ -1409,7 +1423,17 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ #endif - ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ + /* This function could be called from xStreamBufferReset(), so we reset the + * stream buffer fields manually in order to avoid clearing + * xStreamBufferLock. The xStreamBufferLock is initialized separately on + * stream buffer creation. */ + pxStreamBuffer->xTail = ( size_t ) 0; + pxStreamBuffer->xHead = ( size_t ) 0; + pxStreamBuffer->xTaskWaitingToReceive = ( TaskHandle_t ) 0; + pxStreamBuffer->xTaskWaitingToSend = ( TaskHandle_t ) 0; + #if ( configUSE_TRACE_FACILITY == 1 ) + pxStreamBuffer->uxStreamBufferNumber = ( UBaseType_t ) 0; + #endif pxStreamBuffer->pucBuffer = pucBuffer; pxStreamBuffer->xLength = xBufferSizeBytes; pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes; diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/tasks.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/tasks.c index f71ffdd63d..2644f5c1e4 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/tasks.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/tasks.c @@ -44,6 +44,9 @@ #include "task.h" #include "timers.h" #include "stack_macros.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" +#include "freertos/idf_additions.h" /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined @@ -62,6 +65,67 @@ #include #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ +/* Some code sections require extra critical sections when building for SMP + * ( configNUMBER_OF_CORES > 1 ). */ +#if ( configNUMBER_OF_CORES > 1 ) + /* Macros that Enter/exit a critical section only when building for SMP */ + #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock ) + #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock ) + #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) + #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) + /* Macros that Enter/exit a critical section only when building for single-core */ + #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + + static inline __attribute__( ( always_inline ) ) + void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskENTER_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskENTER_CRITICAL_ISR( pxLock ); + } + } + + static inline __attribute__( ( always_inline ) ) + void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskEXIT_CRITICAL_ISR( pxLock ); + } + } + +#else /* configNUMBER_OF_CORES > 1 */ + /* Macros that Enter/exit a critical section only when building for SMP */ + #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) + #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) + #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) + #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) + /* Macros that Enter/exit a critical section only when building for single-core */ + #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) +#endif /* configNUMBER_OF_CORES > 1 */ + #if ( configUSE_PREEMPTION == 0 ) /* If the cooperative scheduler is being used then a yield should not be @@ -71,6 +135,10 @@ #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API() #endif +#if ( configNUMBER_OF_CORES > 1 ) + #define taskYIELD_CORE( xCoreID ) portYIELD_CORE( xCoreID ) +#endif /* configNUMBER_OF_CORES > 1 */ + /* Values that can be assigned to the ucNotifyState member of the TCB. */ #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */ #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 ) @@ -119,6 +187,80 @@ #define configIDLE_TASK_NAME "IDLE" #endif +/*-----------------------------------------------------------*/ + +/* Macros to check if an unblocked task causes a yield on the current core. + * - pxTCB is the TCB of the task to check + * - xCurCoreID is the current core's ID + * - xYieldEqualPriority indicates whether a yield should occur if the unblocked + * task's priority is equal to the priority of the task currently running on the + * current core. + * - uxTaskPriority is the task's priority + * - xTaskCoreID is the task's core affinity */ +#if ( configNUMBER_OF_CORES > 1 ) + #define taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, xYieldEqualPriority ) prvIsYieldUsingPrioritySMP( ( pxTCB )->uxPriority, ( pxTCB )->xCoreID, xCurCoreID, xYieldEqualPriority ) + #define taskIS_YIELD_REQUIRED_USING_PRIORITY( uxTaskPriority, xTaskCoreID, xCurCoreID, xYieldEqualPriority ) prvIsYieldUsingPrioritySMP( uxTaskPriority, xTaskCoreID, xCurCoreID, xYieldEqualPriority ) +#else + #define taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, xYieldEqualPriority ) ( ( ( ( pxTCB )->uxPriority + ( ( xYieldEqualPriority == pdTRUE ) ? 1 : 0 ) ) > pxCurrentTCBs[ 0 ]->uxPriority ) ? pdTRUE : pdFALSE ) + #define taskIS_YIELD_REQUIRED_USING_PRIORITY( uxTaskPriority, xTaskCoreID, xCurCoreID, xYieldEqualPriority ) ( ( ( uxTaskPriority + ( ( xYieldEqualPriority == pdTRUE ) ? 1 : 0 ) ) >= pxCurrentTCBs[ 0 ]->uxPriority ) ? pdTRUE : pdFALSE ) +#endif /* configNUMBER_OF_CORES > 1 */ +/*-----------------------------------------------------------*/ + +/* Macros to check if a task has a compatible affinity with a particular core. + * - xCore is the target core + * - xCoreID is the affinity of the task to check + * + * This macro will always return true on single core as the concept of core + * affinity doesn't exist. */ +#if ( configNUMBER_OF_CORES > 1 ) + #define taskIS_AFFINITY_COMPATIBLE( xCore, xCoreID ) ( ( ( ( xCoreID ) == xCore ) || ( ( xCoreID ) == tskNO_AFFINITY ) ) ? pdTRUE : pdFALSE ) +#else + #define taskIS_AFFINITY_COMPATIBLE( xCore, xCoreID ) ( pdTRUE ) +#endif /* configNUMBER_OF_CORES > 1 */ +/*-----------------------------------------------------------*/ + +/* Macros to check if a particular task is a currently running. */ +#if ( configNUMBER_OF_CORES > 1 ) + #define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( ( ( pxTCB ) == pxCurrentTCBs[ 0 ] ) || ( ( pxTCB ) == pxCurrentTCBs[ 1 ] ) ) ? pdTRUE : pdFALSE ) + #define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) ( ( ( pxTCB ) == pxCurrentTCBs[ ( xCoreID ) ] ) ? pdTRUE : pdFALSE ) +#else + #define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCBs[ 0 ] ) ? pdTRUE : pdFALSE ) + #define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) taskIS_CURRENTLY_RUNNING( pxTCB ) +#endif /* configNUMBER_OF_CORES > 1 */ +/*-----------------------------------------------------------*/ + +/* Macro to check if a particular task can currently be scheduled (i.e., is + * the scheduler suspended). */ +#if ( configNUMBER_OF_CORES > 1 ) + #define taskCAN_BE_SCHEDULED( pxTCB ) prvCheckTaskCanBeScheduledSMP( pxTCB ) +#else + #define taskCAN_BE_SCHEDULED( pxTCB ) ( ( ( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) 0U ) ) ? pdTRUE : pdFALSE ) +#endif /* configNUMBER_OF_CORES > 1 */ +/*-----------------------------------------------------------*/ + +/* Macro to check if the scheduler is suspended (on the current core) + * + * There are various blocking tasks.c APIs that call configASSERT() to check if + * the API is being called while the scheduler is suspended. However, these + * asserts are done outside a critical section or interrupt disabled block. + * Directly checking uxSchedulerSuspended[ portGET_CORE_ID() ] outside a + * critical section can lead to false positives in SMP. Thus for SMP, we call + * xTaskGetSchedulerState() instead. + * + * Take the following example of an unpinned Task A in SMP calling + * uxSchedulerSuspended[ portGET_CORE_ID() ]: + * - Task A calls portGET_CORE_ID() which is 0 + * - Task A gets preempted by Task B, Task A switches to core 1 + * - Task B on core 0 calls vTaskSuspendAll() + * - Task A checks uxSchedulerSuspended[ 0 ] leading to a false positive + */ +#if ( configNUMBER_OF_CORES > 1 ) + #define taskIS_SCHEDULER_SUSPENDED() ( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) ? pdTRUE : pdFALSE ) +#else + #define taskIS_SCHEDULER_SUSPENDED() ( ( ( uxSchedulerSuspended[ 0 ] != ( UBaseType_t ) 0U ) ) ? pdTRUE : pdFALSE ) +#endif /* configNUMBER_OF_CORES > 1 */ +/*-----------------------------------------------------------*/ + #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is @@ -137,7 +279,10 @@ /*-----------------------------------------------------------*/ - #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + #if ( configNUMBER_OF_CORES > 1 ) + #define taskSELECT_HIGHEST_PRIORITY_TASK() prvSelectHighestPriorityTaskSMP() + #else /* if ( configNUMBER_OF_CORES > 1 ) */ + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ { \ UBaseType_t uxTopPriority = uxTopReadyPriority; \ \ @@ -149,10 +294,11 @@ } \ \ /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \ - * the same priority get an equal share of the processor time. */ \ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - uxTopReadyPriority = uxTopPriority; \ + * the same priority get an equal share of the processor time. */ \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCBs[ 0 ], &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + uxTopReadyPriority = uxTopPriority; \ } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + #endif /* if ( configNUMBER_OF_CORES > 1 ) */ /*-----------------------------------------------------------*/ @@ -173,14 +319,14 @@ /*-----------------------------------------------------------*/ - #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ - UBaseType_t uxTopPriority; \ - \ - /* Find the highest priority list that contains ready tasks. */ \ - portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ - configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ - listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ + #define taskSELECT_HIGHEST_PRIORITY_TASK() \ + { \ + UBaseType_t uxTopPriority; \ + \ + /* Find the highest priority list that contains ready tasks. */ \ + portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ + configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCBs[ 0 ], &( pxReadyTasksLists[ uxTopPriority ] ) ); \ } /* taskSELECT_HIGHEST_PRIORITY_TASK() */ /*-----------------------------------------------------------*/ @@ -234,8 +380,14 @@ * where NULL is used to indicate that the handle of the currently executing * task should be used in place of the parameter. This macro simply checks to * see if the parameter is NULL and returns a pointer to the appropriate TCB. + * + * In SMP, calling xTaskGetCurrentTaskHandle() ensures atomic access to pxCurrentTCBs */ -#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) ) +#if ( configNUMBER_OF_CORES > 1 ) + #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? xTaskGetCurrentTaskHandle() : ( pxHandle ) ) +#else + #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCBs[ 0 ] : ( pxHandle ) ) +#endif /* The item value of the event list item is normally used to hold the priority * of the task to which it belongs (coded to allow it to be held in reverse @@ -270,6 +422,9 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to StackType_t * pxStack; /*< Points to the start of the stack. */ char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + /* Todo: Remove xCoreID for single core builds (IDF-7894) */ + BaseType_t xCoreID; /*< The core that this task is pinned to */ + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */ #endif @@ -330,18 +485,18 @@ typedef tskTCB TCB_t; /*lint -save -e956 A manual analysis and inspection has been used to determine * which static variables must be declared volatile. */ -portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; +portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUMBER_OF_CORES ] = { NULL }; /* Lists for ready and blocked tasks. -------------------- * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but * doing so breaks some kernel aware debuggers and debuggers that rely on removing * the static qualifier. */ -PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /*< Prioritised ready tasks. */ -PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */ -PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ -PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */ -PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ -PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ +PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /*< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */ +PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t xPendingReadyList[ configNUMBER_OF_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ #if ( INCLUDE_vTaskDelete == 1 ) @@ -368,11 +523,11 @@ PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINI PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; -PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; +PRIVILEGED_DATA static volatile BaseType_t xYieldPending[ configNUMBER_OF_CORES ] = { pdFALSE }; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; -PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ +PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[ configNUMBER_OF_CORES ] = { NULL }; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -387,23 +542,97 @@ const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U; * kernel to move the task from the pending ready list into the real ready list * when the scheduler is unsuspended. The pending ready list itself can only be * accessed from a critical section. */ -PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; +PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[ configNUMBER_OF_CORES ] = { ( UBaseType_t ) pdFALSE }; #if ( configGENERATE_RUN_TIME_STATS == 1 ) /* Do not move these variables to function scope as doing so prevents the * code working with debuggers that need to remove the static qualifier. */ - PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ - PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ +PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime[ configNUMBER_OF_CORES ] = { 0UL }; /*< Holds the value of a timer/counter the last time a task was switched in. */ +PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ #endif +/* Spinlock required for SMP critical sections. This lock protects all of the + * kernel's data structures such as various tasks lists, flags, and tick counts. */ +PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED; + /*lint -restore */ /*-----------------------------------------------------------*/ /* File private functions. --------------------------------*/ +/** + * Utility function to check whether a yield (on either core) is required after + * unblocking (or changing the priority of) a particular task. + * + * - This function is the SMP replacement for checking if an unblocked task has + * a higher (or equal) priority than the current task. + * - It should be called before calling taskYIELD_IF_USING_PREEMPTION() or + * before setting xYieldRequired + * - If it is the other core that requires a yield, this function will + * internally trigger the other core to yield + * + * Note: In some special instances, a yield is triggered if the unblocked task + * has an equal priority (such as in xTaskResumeAll). Thus the + * xYieldEqualPriority parameter specifies whether to yield if the current + * task has equal priority. + * + * Scheduling Algorithm: + * This function will bias towards yielding the current core. + * - If the unblocked task has a higher (or equal) priority than the current + * core, the current core is yielded regardless of the current priority of the + * other core. + * - A core (current or other) will only yield if their schedulers are not + * suspended. + * + * Todo: This can be optimized (IDF-5772) + * + * Entry: + * - This function must be called in a critical section + * - A task must just have been unblocked, or its priority raised + * Exit: + * - Returns pdTRUE if the current core requires yielding + * - The other core will be triggered to yield if required + */ +#if ( configNUMBER_OF_CORES > 1 ) + + static BaseType_t prvIsYieldUsingPrioritySMP( UBaseType_t uxTaskPriority, + BaseType_t xTaskCoreID, + BaseType_t xCurCoreID, + BaseType_t xYieldEqualPriority ) PRIVILEGED_FUNCTION; + +#endif /* configNUMBER_OF_CORES > 1 */ + +/** + * Utility function to check whether a task can currently be scheduled on one + * or more cores. This function is the SMP replacement for checking if + * `uxSchedulerSuspended == 0`. + * + * - If a task is pinned, check the scheduler suspension state on the task's + * pinned core. The task can be scheduled if the scheduler is not suspended on + * the pinned core. + * - If a task is unpinned, check the scheduler suspension state on both cores. + * The task can be scheduled if the scheduler is not suspended on either of + * the cores. + */ +#if ( configNUMBER_OF_CORES > 1 ) + + static BaseType_t prvCheckTaskCanBeScheduledSMP( TCB_t * pxTCB ) PRIVILEGED_FUNCTION; + +#endif /* configNUMBER_OF_CORES > 1 */ + +/** + * Utility function to select the highest priority and runnable task for the + * current core. + */ +#if ( configNUMBER_OF_CORES > 1 ) + + static void prvSelectHighestPriorityTaskSMP( void ) PRIVILEGED_FUNCTION; + +#endif /* configNUMBER_OF_CORES > 1 */ + /** * Utility task that simply returns pdTRUE if the task referenced by xTask is * currently in the Suspended state, or pdFALSE if the task referenced by xTask @@ -542,7 +771,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask, TCB_t * pxNewTCB, - const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION; + const MemoryRegion_t * const xRegions, + BaseType_t xCoreID ) PRIVILEGED_FUNCTION; /* * Called after a new task has been created and initialised to place the task @@ -563,65 +793,98 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ -#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) +#if ( configNUMBER_OF_CORES > 1 ) - TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, - const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - const uint32_t ulStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - StackType_t * const puxStackBuffer, - StaticTask_t * const pxTaskBuffer ) + static BaseType_t prvIsYieldUsingPrioritySMP( UBaseType_t uxTaskPriority, + BaseType_t xTaskCoreID, + BaseType_t xCurCoreID, + BaseType_t xYieldEqualPriority ) { - TCB_t * pxNewTCB; - TaskHandle_t xReturn; + configASSERT( uxTaskPriority < configMAX_PRIORITIES ); - configASSERT( puxStackBuffer != NULL ); - configASSERT( pxTaskBuffer != NULL ); - - #if ( configASSERT_DEFINED == 1 ) + if( xYieldEqualPriority == pdTRUE ) { - /* Sanity check that the size of the structure used to declare a - * variable of type StaticTask_t equals the size of the real task - * structure. */ - volatile size_t xSize = sizeof( StaticTask_t ); - configASSERT( xSize == sizeof( TCB_t ) ); - ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */ + /* Increment the task priority to achieve the same affect as + * if( uxTaskPriority >= pxCurrentTCBs->uxPriority ). */ + uxTaskPriority++; } - #endif /* configASSERT_DEFINED */ - if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) + /* Indicate whether the current core needs to yield */ + BaseType_t xYieldRequiredCurrentCore; + + /* If the target task can run on the current core, and has a higher + * priority than the current core, and the core has not suspended + * scheduling, then yield the current core. + * Todo: Make fair scheduling a configurable option (IDF-5772). */ + if( ( taskIS_AFFINITY_COMPATIBLE( xCurCoreID, xTaskCoreID ) == pdTRUE ) && + ( uxTaskPriority > pxCurrentTCBs[ xCurCoreID ]->uxPriority ) && + ( uxSchedulerSuspended[ xCurCoreID ] == ( UBaseType_t ) 0U ) ) { - /* The memory used for the task's TCB and stack are passed into this - * function - use them. */ - pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); - pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; + /* Return true for the caller to yield the current core */ + xYieldRequiredCurrentCore = pdTRUE; + } - #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ - { - /* Tasks can be created statically or dynamically, so note this - * task was created statically in case the task is later deleted. */ - pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; - } - #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ - - prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); - prvAddNewTaskToReadyList( pxNewTCB ); + /* If the target task can run on the other core, and has a higher + * priority then the other core, and the other core has not suspended + * scheduling, then yield the other core */ + else if( ( taskIS_AFFINITY_COMPATIBLE( !xCurCoreID, xTaskCoreID ) == pdTRUE ) && + ( uxTaskPriority > pxCurrentTCBs[ !xCurCoreID ]->uxPriority ) && + ( uxSchedulerSuspended[ !xCurCoreID ] == ( UBaseType_t ) 0U ) ) + { + /* Signal the other core to yield */ + taskYIELD_CORE( !xCurCoreID ); + xYieldRequiredCurrentCore = pdFALSE; } else { - xReturn = NULL; + xYieldRequiredCurrentCore = pdFALSE; + } + + return xYieldRequiredCurrentCore; + } + +#endif /* configNUMBER_OF_CORES > 1 */ +/*-----------------------------------------------------------*/ + +#if ( configNUMBER_OF_CORES > 1 ) + + static BaseType_t prvCheckTaskCanBeScheduledSMP( TCB_t * pxTCB ) + { + BaseType_t xReturn; + + if( pxTCB->xCoreID == tskNO_AFFINITY ) + { + /* Task is unpinned. As long as one core has not suspended + * scheduling, the task can be scheduled. */ + if( ( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) 0U ) || ( uxSchedulerSuspended[ 1 ] == ( UBaseType_t ) 0U ) ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + else if( uxSchedulerSuspended[ pxTCB->xCoreID ] == ( UBaseType_t ) 0U ) + { + /* The task is pinned to a core. If it's pinned core has not + * suspended scheduling, the task can be scheduled. */ + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; } return xReturn; } -#endif /* SUPPORT_STATIC_ALLOCATION */ +#endif /* configNUMBER_OF_CORES > 1 */ /*-----------------------------------------------------------*/ #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) +/* Todo: Add support for task restricted API (IDF-7895) */ BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t * pxCreatedTask ) { @@ -670,6 +933,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) +/* Todo: Add support for task restricted API (IDF-7895) */ BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t * pxCreatedTask ) { @@ -720,103 +984,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; #endif /* portUSING_MPU_WRAPPERS */ /*-----------------------------------------------------------*/ -#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) - - BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, - const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - const configSTACK_DEPTH_TYPE usStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - TaskHandle_t * const pxCreatedTask ) - { - TCB_t * pxNewTCB; - BaseType_t xReturn; - - /* If the stack grows down then allocate the stack then the TCB so the stack - * does not grow into the TCB. Likewise if the stack grows up then allocate - * the TCB then the stack. */ - #if ( portSTACK_GROWTH > 0 ) - { - /* Allocate space for the TCB. Where the memory comes from depends on - * the implementation of the port malloc function and whether or not static - * allocation is being used. */ - pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); - - if( pxNewTCB != NULL ) - { - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); - - /* Allocate space for the stack used by the task being created. - * The base of the stack memory stored in the TCB so the task can - * be deleted later if required. */ - pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - - if( pxNewTCB->pxStack == NULL ) - { - /* Could not allocate the stack. Delete the allocated TCB. */ - vPortFree( pxNewTCB ); - pxNewTCB = NULL; - } - } - } - #else /* portSTACK_GROWTH */ - { - StackType_t * pxStack; - - /* Allocate space for the stack used by the task being created. */ - pxStack = pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */ - - if( pxStack != NULL ) - { - /* Allocate space for the TCB. */ - pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */ - - if( pxNewTCB != NULL ) - { - memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); - - /* Store the stack location in the TCB. */ - pxNewTCB->pxStack = pxStack; - } - else - { - /* The stack cannot be used as the TCB was not created. Free - * it again. */ - vPortFreeStack( pxStack ); - } - } - else - { - pxNewTCB = NULL; - } - } - #endif /* portSTACK_GROWTH */ - - if( pxNewTCB != NULL ) - { - #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */ - { - /* Tasks can be created statically or dynamically, so note this - * task was created dynamically in case it is later deleted. */ - pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB; - } - #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ - - prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); - prvAddNewTaskToReadyList( pxNewTCB ); - xReturn = pdPASS; - } - else - { - xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; - } - - return xReturn; - } - -#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ -/*-----------------------------------------------------------*/ - static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ const uint32_t ulStackDepth, @@ -824,11 +991,20 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask, TCB_t * pxNewTCB, - const MemoryRegion_t * const xRegions ) + const MemoryRegion_t * const xRegions, + BaseType_t xCoreID ) { StackType_t * pxTopOfStack; UBaseType_t x; + #if ( configNUMBER_OF_CORES > 1 ) + /* Check that xCoreID is valid */ + configASSERT( ( ( xCoreID >= 0 ) && ( xCoreID < configNUMBER_OF_CORES ) ) || ( xCoreID == tskNO_AFFINITY ) ); + #else + /* Hard code xCoreID to 0 */ + xCoreID = 0; + #endif + #if ( portUSING_MPU_WRAPPERS == 1 ) /* Should the task be created in privileged mode? */ BaseType_t xRunPrivileged; @@ -927,6 +1103,7 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } pxNewTCB->uxPriority = uxPriority; + pxNewTCB->xCoreID = xCoreID; /* Todo: Remove xCoreID for single core builds (IDF-7894) */ #if ( configUSE_MUTEXES == 1 ) { pxNewTCB->uxBasePriority = uxPriority; @@ -1031,39 +1208,59 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* Ensure interrupts don't access the task lists while the lists are being * updated. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { uxCurrentNumberOfTasks++; - if( pxCurrentTCB == NULL ) + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) { - /* There are no other tasks, or all the other tasks are in - * the suspended state - make this the current task. */ - pxCurrentTCB = pxNewTCB; - - if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) - { - /* This is the first task to be created so do the preliminary - * initialisation required. We will not recover if this call - * fails, but we will report the failure. */ - prvInitialiseTaskLists(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* This is the first task to be created so do the preliminary + * initialisation required. We will not recover if this call + * fails, but we will report the failure. */ + prvInitialiseTaskLists(); } else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( pxCurrentTCBs[ 0 ] == NULL ) && ( taskIS_AFFINITY_COMPATIBLE( 0, pxNewTCB->xCoreID ) == pdTRUE ) ) + { + /* On core 0, there are no other tasks, or all the other tasks + * are in the suspended state - make this the current task. */ + pxCurrentTCBs[ 0 ] = pxNewTCB; + } + + #if ( configNUMBER_OF_CORES > 1 ) + else if( ( pxCurrentTCBs[ 1 ] == NULL ) && ( taskIS_AFFINITY_COMPATIBLE( 1, pxNewTCB->xCoreID ) == pdTRUE ) ) + { + /* On core 1, there are no other tasks, or all the other tasks + * are in the suspended state - make this the current task. */ + pxCurrentTCBs[ 1 ] = pxNewTCB; + } + #endif /* configNUMBER_OF_CORES > 1 */ + else { /* If the scheduler is not already running, make this task the * current task if it is the highest priority task to be created * so far. */ if( xSchedulerRunning == pdFALSE ) { - if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority ) + if( ( pxCurrentTCBs[ 0 ] != NULL ) && + ( taskIS_AFFINITY_COMPATIBLE( 0, pxNewTCB->xCoreID ) == pdTRUE ) && + ( pxCurrentTCBs[ 0 ]->uxPriority <= pxNewTCB->uxPriority ) ) { - pxCurrentTCB = pxNewTCB; + pxCurrentTCBs[ 0 ] = pxNewTCB; } + + #if ( configNUMBER_OF_CORES > 1 ) + else if( ( pxCurrentTCBs[ 1 ] != NULL ) && + ( taskIS_AFFINITY_COMPATIBLE( 1, pxNewTCB->xCoreID ) == pdTRUE ) && + ( pxCurrentTCBs[ 1 ]->uxPriority <= pxNewTCB->uxPriority ) ) + { + pxCurrentTCBs[ 1 ] = pxNewTCB; + } + #endif /* configNUMBER_OF_CORES > 1 */ else { mtCOVERAGE_TEST_MARKER(); @@ -1088,26 +1285,29 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvAddTaskToReadyList( pxNewTCB ); portSETUP_TCB( pxNewTCB ); - } - taskEXIT_CRITICAL(); - if( xSchedulerRunning != pdFALSE ) - { - /* If the created task is of a higher priority than the current task - * then it should run now. */ - if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority ) + if( xSchedulerRunning != pdFALSE ) { - taskYIELD_IF_USING_PREEMPTION(); + /* If the created task is of a higher priority than the current task + * then it should run now. */ + if( taskIS_YIELD_REQUIRED( pxNewTCB, portGET_CORE_ID(), pdTRUE ) == pdTRUE ) + { + taskYIELD_IF_USING_PREEMPTION(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + taskEXIT_CRITICAL( &xKernelLock ); + + /* SINGLE-CORE MODIFICATION: Extended critical section so that SMP can check + * for yield inside critical section. */ } /*-----------------------------------------------------------*/ @@ -1116,9 +1316,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskDelete( TaskHandle_t xTaskToDelete ) { TCB_t * pxTCB; + BaseType_t xSelfDelete; + BaseType_t xIsCurRunning; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + /* If null is passed in here then it is the calling task that is * being deleted. */ pxTCB = prvGetTCBFromHandle( xTaskToDelete ); @@ -1149,13 +1354,35 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * not return. */ uxTaskNumber++; - if( pxTCB == pxCurrentTCB ) + /* Check if the task is deleting itself, or is currently running on + * the other core. */ + if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCurCoreID ) == pdTRUE ) { - /* A task is deleting itself. This cannot complete within the - * task itself, as a context switch to another task is required. - * Place the task in the termination list. The idle task will - * check the termination list and free up any memory allocated by - * the scheduler for the TCB and stack of the deleted task. */ + xSelfDelete = pdTRUE; + xIsCurRunning = pdTRUE; + } + + #if ( configNUMBER_OF_CORES > 1 ) + else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) == pdTRUE ) + { + xSelfDelete = pdFALSE; + xIsCurRunning = pdTRUE; + } + #endif /* configNUMBER_OF_CORES > 1 */ + else + { + xSelfDelete = pdFALSE; + xIsCurRunning = pdFALSE; + } + + if( xIsCurRunning == pdTRUE ) + { + /* A task is deleting itself or is currently running. This + * cannot complete within the task itself, as a context switch + * to another task is required. Place the task in the + * termination list. The idle task will check the termination + * list and free up any memory allocated by the scheduler for + * the TCB and stack of the deleted task. */ vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) ); /* Increment the ucTasksDeleted variable so the idle task knows @@ -1172,7 +1399,20 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * after which it is not possible to yield away from this task - * hence xYieldPending is used to latch that a context switch is * required. */ - portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending ); + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[ xCurCoreID ] ); + + #if ( configNUMBER_OF_CORES > 1 ) + if( xSelfDelete == pdFALSE ) + { + /* The task that is being deleted is currently running + * on the other core. Send a yield request to the other + * core so that the task is swapped out. */ + taskYIELD_CORE( !xCurCoreID ); + } + #else /* configNUMBER_OF_CORES > 1 */ + /* xCurCoreID is unused */ + ( void ) xCurCoreID; + #endif /* configNUMBER_OF_CORES > 1 */ } else { @@ -1184,30 +1424,38 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvResetNextTaskUnblockTime(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); - /* If the task is not deleting itself, call prvDeleteTCB from outside of - * critical section. If a task deletes itself, prvDeleteTCB is called - * from prvCheckTasksWaitingTermination which is called from Idle task. */ - if( pxTCB != pxCurrentTCB ) + /* If the task is currently running, call prvDeleteTCB from outside of + * critical section. If a task is currently running, prvDeleteTCB is + * called from prvCheckTasksWaitingTermination which is called from + * Idle task. */ + if( xIsCurRunning == pdFALSE ) { prvDeleteTCB( pxTCB ); } - /* Force a reschedule if it is the currently running task that has just - * been deleted. */ - if( xSchedulerRunning != pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - if( pxTCB == pxCurrentTCB ) + /* Force a reschedule if it is the currently running task that has just + * been deleted. */ + if( xSchedulerRunning != pdFALSE ) { - configASSERT( uxSchedulerSuspended == 0 ); - portYIELD_WITHIN_API(); - } - else - { - mtCOVERAGE_TEST_MARKER(); + if( xSelfDelete == pdTRUE ) + { + configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE ); + portYIELD_WITHIN_API(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* INCLUDE_vTaskDelete */ @@ -1223,9 +1471,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxPreviousWakeTime ); configASSERT( ( xTimeIncrement > 0U ) ); - configASSERT( uxSchedulerSuspended == 0 ); + configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE ); - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Minor optimisation. The tick count cannot change in this * block. */ @@ -1281,7 +1529,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); /* Force a reschedule if xTaskResumeAll has not already done so, we may * have put ourselves to sleep. */ @@ -1309,8 +1557,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* A delay time of zero just forces a reschedule. */ if( xTicksToDelay > ( TickType_t ) 0U ) { - configASSERT( uxSchedulerSuspended == 0 ); - vTaskSuspendAll(); + configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE ); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { traceTASK_DELAY(); @@ -1323,7 +1571,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * executing task. */ prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE ); } - xAlreadyYielded = xTaskResumeAll(); + xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); } else { @@ -1357,86 +1605,86 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxTCB ); - if( pxTCB == pxCurrentTCB ) + taskENTER_CRITICAL( &xKernelLock ); { - /* The task calling this function is querying its own state. */ - eReturn = eRunning; - } - else - { - taskENTER_CRITICAL(); + if( taskIS_CURRENTLY_RUNNING( pxTCB ) == pdTRUE ) + { + /* The task calling this function is querying its own state. */ + eReturn = eRunning; + } + else { pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); pxDelayedList = pxDelayedTaskList; pxOverflowedDelayedList = pxOverflowDelayedTaskList; - } - taskEXIT_CRITICAL(); - if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) - { - /* The task being queried is referenced from one of the Blocked - * lists. */ - eReturn = eBlocked; - } - - #if ( INCLUDE_vTaskSuspend == 1 ) - else if( pxStateList == &xSuspendedTaskList ) + if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) { - /* The task being queried is referenced from the suspended - * list. Is it genuinely suspended or is it blocked - * indefinitely? */ - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ) + /* The task being queried is referenced from one of the Blocked + * lists. */ + eReturn = eBlocked; + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + else if( pxStateList == &xSuspendedTaskList ) { - #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + /* The task being queried is referenced from the suspended + * list. Is it genuinely suspended or is it blocked + * indefinitely? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ) { - BaseType_t x; - - /* The task does not appear on the event list item of - * and of the RTOS objects, but could still be in the - * blocked state if it is waiting on its notification - * rather than waiting on an object. If not, is - * suspended. */ - eReturn = eSuspended; - - for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) { - if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) + BaseType_t x; + + /* The task does not appear on the event list item of + * and of the RTOS objects, but could still be in the + * blocked state if it is waiting on its notification + * rather than waiting on an object. If not, is + * suspended. */ + eReturn = eSuspended; + + for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) { - eReturn = eBlocked; - break; + if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) + { + eReturn = eBlocked; + break; + } } } + #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ + { + eReturn = eSuspended; + } + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ } - #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ + else { - eReturn = eSuspended; + eReturn = eBlocked; } - #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ } - else + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ + + #if ( INCLUDE_vTaskDelete == 1 ) + else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) ) { - eReturn = eBlocked; + /* The task being queried is referenced from the deleted + * tasks list, or it is not referenced from any lists at + * all. */ + eReturn = eDeleted; } - } - #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ + #endif - #if ( INCLUDE_vTaskDelete == 1 ) - else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) ) + else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ { - /* The task being queried is referenced from the deleted - * tasks list, or it is not referenced from any lists at - * all. */ - eReturn = eDeleted; + /* If the task is not in any other state, it must be in the + * Ready (including pending ready) state. */ + eReturn = eReady; } - #endif - - else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ - { - /* If the task is not in any other state, it must be in the - * Ready (including pending ready) state. */ - eReturn = eReady; } } + taskEXIT_CRITICAL( &xKernelLock ); return eReturn; } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ @@ -1451,14 +1699,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) TCB_t const * pxTCB; UBaseType_t uxReturn; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* If null is passed in here then it is the priority of the task * that called uxTaskPriorityGet() that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return uxReturn; } @@ -1491,14 +1739,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptState ); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptState ); return uxReturn; } @@ -1527,8 +1775,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + /* If null is passed in here then it is the priority of the calling * task that is being changed. */ pxTCB = prvGetTCBFromHandle( xTask ); @@ -1551,12 +1802,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * priority than the calling task. */ if( uxNewPriority > uxCurrentBasePriority ) { - if( pxTCB != pxCurrentTCB ) + if( taskIS_CURRENTLY_RUNNING( pxTCB ) == pdFALSE ) { /* The priority of a task other than the currently * running task is being raised. Is the priority being * raised above that of the running task? */ - if( uxNewPriority >= pxCurrentTCB->uxPriority ) + if( taskIS_YIELD_REQUIRED_USING_PRIORITY( uxNewPriority, pxTCB->xCoreID, portGET_CORE_ID(), pdTRUE ) == pdTRUE ) { xYieldRequired = pdTRUE; } @@ -1572,13 +1823,23 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * priority task able to run so no yield is required. */ } } - else if( pxTCB == pxCurrentTCB ) + else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCurCoreID ) == pdTRUE ) { - /* Setting the priority of the running task down means - * there may now be another task of higher priority that - * is ready to execute. */ + /* Lowering the priority of task currently running on the + * current core means there may now be another task of + * higher priority that is ready to execute. */ xYieldRequired = pdTRUE; } + + #if ( configNUMBER_OF_CORES > 1 ) + else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) == pdTRUE ) + { + /* Lowering the priority of task currently running on the + * other core also means there may now be another task of + * higher priority that is ready to execute. */ + taskYIELD_CORE( !xCurCoreID ); + } + #endif /* configNUMBER_OF_CORES > 1 */ else { /* Setting the priority of any other task down does not @@ -1666,7 +1927,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) ( void ) uxPriorityUsedOnEntry; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* INCLUDE_vTaskPrioritySet */ @@ -1678,8 +1939,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { TCB_t * pxTCB; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + /* If null is passed in here then it is the running task that is * being suspended. */ pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); @@ -1724,55 +1988,73 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ - } - taskEXIT_CRITICAL(); - if( xSchedulerRunning != pdFALSE ) - { - /* Reset the next expected unblock time in case it referred to the - * task that is now in the Suspended state. */ - taskENTER_CRITICAL(); - { - prvResetNextTaskUnblockTime(); - } - taskEXIT_CRITICAL(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - if( pxTCB == pxCurrentTCB ) - { if( xSchedulerRunning != pdFALSE ) { - /* The current task has just been suspended. */ - configASSERT( uxSchedulerSuspended == 0 ); - portYIELD_WITHIN_API(); + /* Reset the next expected unblock time in case it referred to the + * task that is now in the Suspended state. */ + prvResetNextTaskUnblockTime(); } else { - /* The scheduler is not running, but the task that was pointed - * to by pxCurrentTCB has just been suspended and pxCurrentTCB - * must be adjusted to point to a different task. */ - if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + mtCOVERAGE_TEST_MARKER(); + } + + if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCurCoreID ) == pdTRUE ) + { + if( xSchedulerRunning != pdFALSE ) { - /* No other tasks are ready, so set pxCurrentTCB back to - * NULL so when the next task is created pxCurrentTCB will - * be set to point to it no matter what its relative priority - * is. */ - pxCurrentTCB = NULL; + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended[ xCurCoreID ] == 0 ); + portYIELD_WITHIN_API(); } else { - vTaskSwitchContext(); + /* The scheduler is not running, but the task that was pointed + * to by pxCurrentTCBs has just been suspended and pxCurrentTCBs + * must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + { + /* No other tasks are ready, so set pxCurrentTCBs back to + * NULL so when the next task is created pxCurrentTCBs will + * be set to point to it no matter what its relative priority + * is. */ + pxCurrentTCBs[ xCurCoreID ] = NULL; + } + else + { + vTaskSwitchContext(); + } } } + + #if ( configNUMBER_OF_CORES > 1 ) + else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) == pdTRUE ) + { + /* The other core's current task has just been suspended */ + if( xSchedulerRunning != pdFALSE ) + { + taskYIELD_CORE( !xCurCoreID ); + } + else + { + /* The scheduler is not running, but the task that was + * pointed to by pxCurrentTCBs[ otherCore ] has just been + * suspended. We simply set the + * pxCurrentTCBs[ otherCore ] to NULL for now. + * + * Todo: Update vTaskSwitchContext() to be able to run + * on behalf of the other core. */ + pxCurrentTCBs[ !xCurCoreID ] = NULL; + } + } + #endif /* configNUMBER_OF_CORES > 1 */ + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* INCLUDE_vTaskSuspend */ @@ -1795,7 +2077,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE ) { /* Has the task already been resumed from within an ISR? */ - if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE ) + #if ( configNUMBER_OF_CORES > 1 ) + if( ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) && + ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 1 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) ) + #else + if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) + #endif /* configNUMBER_OF_CORES > 1 */ { /* Is it in the suspended list because it is in the Suspended * state, or because is is blocked with no timeout? */ @@ -1833,11 +2120,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* It does not make sense to resume the calling task. */ configASSERT( xTaskToResume ); - /* The parameter cannot be NULL as it is impossible to resume the - * currently executing task. */ - if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) ) + taskENTER_CRITICAL( &xKernelLock ); { - taskENTER_CRITICAL(); + /* The parameter cannot be NULL as it is impossible to resume the + * currently executing task. */ + if( ( taskIS_CURRENTLY_RUNNING( pxTCB ) == pdFALSE ) && ( pxTCB != NULL ) ) { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -1849,7 +2136,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvAddTaskToReadyList( pxTCB ); /* A higher priority task may have just been resumed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + if( taskIS_YIELD_REQUIRED( pxTCB, portGET_CORE_ID(), pdTRUE ) == pdTRUE ) { /* This yield may not cause the task just resumed to run, * but will leave the lists in the correct state for the @@ -1866,12 +2153,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); - } - else - { - mtCOVERAGE_TEST_MARKER(); + else + { + mtCOVERAGE_TEST_MARKER(); + } } + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* INCLUDE_vTaskSuspend */ @@ -1906,25 +2193,28 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + traceTASK_RESUME_FROM_ISR( pxTCB ); /* Check the ready lists can be accessed. */ - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( taskCAN_BE_SCHEDULED( pxTCB ) == pdTRUE ) { /* Ready lists can be accessed so move the task from the * suspended list to the ready list directly. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdTRUE ) == pdTRUE ) { xYieldRequired = pdTRUE; /* Mark that a yield is pending in case the user is not * using the return value to initiate a context switch * from the ISR using portYIELD_FROM_ISR. */ - xYieldPending = pdTRUE; + xYieldPending[ xCurCoreID ] = pdTRUE; } else { @@ -1939,7 +2229,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* The delayed or ready lists cannot be accessed so the task * is held in the pending ready list until the scheduler is * unsuspended. */ - vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + vListInsertEnd( &( xPendingReadyList[ xCurCoreID ] ), &( pxTCB->xEventListItem ) ); } } else @@ -1947,7 +2237,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); return xYieldRequired; } @@ -1958,45 +2248,57 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) void vTaskStartScheduler( void ) { BaseType_t xReturn; + UBaseType_t x; - /* Add the idle task at the lowest priority. */ - #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + /* Create idle tasks that are pinned to each core */ + for( x = 0; x < configNUMBER_OF_CORES; x++ ) { - StaticTask_t * pxIdleTaskTCBBuffer = NULL; - StackType_t * pxIdleTaskStackBuffer = NULL; - uint32_t ulIdleTaskStackSize; - - /* The Idle task is created using user provided RAM - obtain the - * address of the RAM then create the idle task. */ - vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); - xIdleTaskHandle = xTaskCreateStatic( prvIdleTask, - configIDLE_TASK_NAME, - ulIdleTaskStackSize, - ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - pxIdleTaskStackBuffer, - pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - - if( xIdleTaskHandle != NULL ) + /* Add the idle task at the lowest priority. */ + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) { - xReturn = pdPASS; + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandle[ x ] = xTaskCreateStaticPinnedToCore( prvIdleTask, + configIDLE_TASK_NAME, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer, /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + x ); + + if( xIdleTaskHandle[ x ] != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + break; + } } - else + #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ { - xReturn = pdFAIL; - } + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreatePinnedToCore( prvIdleTask, + configIDLE_TASK_NAME, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle[ xCoreID ], /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + xCoreID ); + + if( xReturn == pdFAIL ) + { + break; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ } - #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ - { - /* The Idle task is being created using dynamically allocated RAM. */ - xReturn = xTaskCreate( prvIdleTask, - configIDLE_TASK_NAME, - configMINIMAL_STACK_SIZE, - ( void * ) NULL, - portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ - &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ - } - #endif /* configSUPPORT_STATIC_ALLOCATION */ #if ( configUSE_TIMERS == 1 ) { @@ -2029,17 +2331,24 @@ void vTaskStartScheduler( void ) * starts to run. */ portDISABLE_INTERRUPTS(); - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - /* Switch C-Runtime's TLS Block to point to the TLS - * block specific to the task that will run first. */ - configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); - } - #endif + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + { + /* Switch C-Runtime's TLS Block to point to the TLS + * block specific to the task that will run first. */ + configSET_TLS_BLOCK( pxCurrentTCBs[ portGET_CORE_ID() ]->xTLSBlock ); + } + #endif - xNextTaskUnblockTime = portMAX_DELAY; - xSchedulerRunning = pdTRUE; - xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; + xNextTaskUnblockTime = portMAX_DELAY; + xSchedulerRunning = pdTRUE; + xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); /* If configGENERATE_RUN_TIME_STATS is defined then the following * macro must be defined to configure the timer/counter used to generate @@ -2072,7 +2381,7 @@ void vTaskStartScheduler( void ) /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0, * meaning xIdleTaskHandle is not used anywhere else. */ - ( void ) xIdleTaskHandle; + ( void ) xIdleTaskHandle[ 0 ]; /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority * from getting optimized out as it is no longer used by the kernel. */ @@ -2086,7 +2395,15 @@ void vTaskEndScheduler( void ) * routine so the original ISRs can be restored if necessary. The port * layer must ensure interrupts enable bit is left in the correct state. */ portDISABLE_INTERRUPTS(); - xSchedulerRunning = pdFALSE; + + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + xSchedulerRunning = pdFALSE; + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); vPortEndScheduler(); } /*----------------------------------------------------------*/ @@ -2104,7 +2421,7 @@ void vTaskSuspendAll( void ) /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment * is used to allow calls to vTaskSuspendAll() to nest. */ - ++uxSchedulerSuspended; + ++uxSchedulerSuspended[ portGET_CORE_ID() ]; /* Enforces ordering for ports and optimised compilers that may otherwise place * the above increment elsewhere. */ @@ -2146,11 +2463,11 @@ void vTaskSuspendAll( void ) } #endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */ - if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY ) + if( pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority > tskIDLE_PRIORITY ) { xReturn = 0; } - else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 ) + else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > configNUMBER_OF_CORES ) { /* There are other idle priority tasks in the ready state. If * time slicing is used then the very next tick interrupt must be @@ -2182,26 +2499,29 @@ BaseType_t xTaskResumeAll( void ) /* If uxSchedulerSuspended is zero then this function does not match a * previous call to vTaskSuspendAll(). */ - configASSERT( uxSchedulerSuspended ); + configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdTRUE ); /* It is possible that an ISR caused a task to be removed from an event * list while the scheduler was suspended. If this was the case then the * removed task will have been added to the xPendingReadyList. Once the * scheduler has been resumed it is safe to move all the pending ready * tasks from this list into their appropriate ready list. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { - --uxSchedulerSuspended; + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + --uxSchedulerSuspended[ xCurCoreID ]; + + if( uxSchedulerSuspended[ xCurCoreID ] == ( UBaseType_t ) pdFALSE ) { if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) { /* Move any readied tasks from the pending list into the * appropriate ready list. */ - while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + while( listLIST_IS_EMPTY( &xPendingReadyList[ xCurCoreID ] ) == pdFALSE ) { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[ xCurCoreID ] ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); portMEMORY_BARRIER(); listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); @@ -2209,9 +2529,9 @@ BaseType_t xTaskResumeAll( void ) /* If the moved task has a priority higher than or equal to * the current task then a yield must be performed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdTRUE ) == pdTRUE ) { - xYieldPending = pdTRUE; + xYieldPending[ xCurCoreID ] = pdTRUE; } else { @@ -2230,6 +2550,13 @@ BaseType_t xTaskResumeAll( void ) prvResetNextTaskUnblockTime(); } + #if ( configNUMBER_OF_CORES > 1 ) + + /* Core 0 is solely responsible for managing tick count, thus it + * must be the only core to unwind the pended ticks */ + if( xCurCoreID == 0 ) + #endif + /* If any ticks occurred while the scheduler was suspended then * they should be processed now. This ensures the tick count does * not slip, and that any delayed tasks are resumed at the correct @@ -2243,7 +2570,7 @@ BaseType_t xTaskResumeAll( void ) { if( xTaskIncrementTick() != pdFALSE ) { - xYieldPending = pdTRUE; + xYieldPending[ xCurCoreID ] = pdTRUE; } else { @@ -2261,7 +2588,7 @@ BaseType_t xTaskResumeAll( void ) } } - if( xYieldPending != pdFALSE ) + if( xYieldPending[ xCurCoreID ] != pdFALSE ) { #if ( configUSE_PREEMPTION != 0 ) { @@ -2281,7 +2608,7 @@ BaseType_t xTaskResumeAll( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xAlreadyYielded; } @@ -2323,11 +2650,18 @@ TickType_t xTaskGetTickCountFromISR( void ) * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); { - xReturn = xTickCount; + uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = xTickCount; + } + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); } - portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -2434,7 +2768,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Search the ready lists. */ do @@ -2480,7 +2814,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char } #endif } - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); return pxTCB; } @@ -2544,7 +2878,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char { UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Is there a space in the array for each task in the system? */ if( uxArraySize >= uxCurrentNumberOfTasks ) @@ -2603,7 +2937,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char mtCOVERAGE_TEST_MARKER(); } } - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); return uxTask; } @@ -2615,10 +2949,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char TaskHandle_t xTaskGetIdleTaskHandle( void ) { - /* If xTaskGetIdleTaskHandle() is called before the scheduler has been - * started, then xIdleTaskHandle will be NULL. */ - configASSERT( ( xIdleTaskHandle != NULL ) ); - return xIdleTaskHandle; + return xTaskGetIdleTaskHandleForCore( portGET_CORE_ID() ); } #endif /* INCLUDE_xTaskGetIdleTaskHandle */ @@ -2632,34 +2963,36 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char void vTaskStepTick( TickType_t xTicksToJump ) { - /* Correct the tick count value after a period during which the tick - * was suppressed. Note this does *not* call the tick hook function for - * each stepped tick. */ - configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); - - if( ( xTickCount + xTicksToJump ) == xNextTaskUnblockTime ) + /* SINGLE-CORE MODIFICATION: Expanded critical section so that SMP + * accesses xTickCount inside a critical section. */ + taskENTER_CRITICAL( &xKernelLock ); { - /* Arrange for xTickCount to reach xNextTaskUnblockTime in - * xTaskIncrementTick() when the scheduler resumes. This ensures - * that any delayed tasks are resumed at the correct time. */ - configASSERT( uxSchedulerSuspended ); - configASSERT( xTicksToJump != ( TickType_t ) 0 ); + /* Correct the tick count value after a period during which the tick + * was suppressed. Note this does *not* call the tick hook function for + * each stepped tick. */ + configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); - /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskENTER_CRITICAL(); + if( ( xTickCount + xTicksToJump ) == xNextTaskUnblockTime ) { - xPendedTicks++; - } - taskEXIT_CRITICAL(); - xTicksToJump--; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Arrange for xTickCount to reach xNextTaskUnblockTime in + * xTaskIncrementTick() when the scheduler resumes. This ensures + * that any delayed tasks are resumed at the correct time. */ + configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdTRUE ); + configASSERT( xTicksToJump != ( TickType_t ) 0 ); - xTickCount += xTicksToJump; - traceINCREASE_TICK_COUNT( xTicksToJump ); + xPendedTicks++; + xTicksToJump--; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xTickCount += xTicksToJump; + traceINCREASE_TICK_COUNT( xTicksToJump ); + } + /* SINGLE-CORE MODIFICATION: Expanded critical section */ + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_TICKLESS_IDLE */ @@ -2671,18 +3004,18 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) /* Must not be called with the scheduler suspended as the implementation * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */ - configASSERT( uxSchedulerSuspended == 0 ); + configASSERT( taskIS_SCHEDULER_SUSPENDED() == pdFALSE ); /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */ vTaskSuspendAll(); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { xPendedTicks += xTicksToCatchUp; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); xYieldOccurred = xTaskResumeAll(); return xYieldOccurred; @@ -2698,7 +3031,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) configASSERT( pxTCB ); - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* A task can only be prematurely removed from the Blocked state if * it is actually in the Blocked state. */ @@ -2715,7 +3048,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * the event list too. Interrupts can touch the event list item, * even though the scheduler is suspended, so a critical section * is used. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL_SC_ONLY( &xKernelLock ); { if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { @@ -2731,7 +3064,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL_SC_ONLY( &xKernelLock ); /* Place the unblocked task into the appropriate ready list. */ prvAddTaskToReadyList( pxTCB ); @@ -2740,14 +3073,17 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * switch if preemption is turned off. */ #if ( configUSE_PREEMPTION == 1 ) { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + /* Preemption is on, but a context switch should only be * performed if the unblocked task has a priority that is * higher than the currently executing task. */ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdFALSE ) == pdTRUE ) { /* Pend the yield to be performed when the scheduler * is unsuspended. */ - xYieldPending = pdTRUE; + xYieldPending[ xCurCoreID ] = pdTRUE; } else { @@ -2761,7 +3097,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) xReturn = pdFAIL; } } - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); return xReturn; } @@ -2771,176 +3107,204 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) BaseType_t xTaskIncrementTick( void ) { + #if ( configNUMBER_OF_CORES > 1 ) + /* Only Core 0 should ever call this function. */ + configASSERT( portGET_CORE_ID() == 0 ); + #endif /* configNUMBER_OF_CORES > 1 */ + TCB_t * pxTCB; TickType_t xItemValue; BaseType_t xSwitchRequired = pdFALSE; + #if ( configUSE_TICK_HOOK == 1 ) + BaseType_t xCallTickHook; + #endif /* configUSE_TICK_HOOK == 1 */ /* Called by the portable layer each time a tick interrupt occurs. * Increments the tick then checks to see if the new tick value will cause any * tasks to be unblocked. */ traceTASK_INCREMENT_TICK( xTickCount ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures (unlike single core which calls this function with + * interrupts disabled). */ + taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); { - /* Minor optimisation. The tick count cannot change in this - * block. */ - const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; - - /* Increment the RTOS tick, switching the delayed and overflowed - * delayed lists if it wraps to 0. */ - xTickCount = xConstTickCount; - - if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ + if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) { - taskSWITCH_DELAYED_LISTS(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Minor optimisation. The tick count cannot change in this + * block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; - /* See if this tick has made a timeout expire. Tasks are stored in - * the queue in the order of their wake time - meaning once one task - * has been found whose block time has not expired there is no need to - * look any further down the list. */ - if( xConstTickCount >= xNextTaskUnblockTime ) - { - for( ; ; ) + /* Increment the RTOS tick, switching the delayed and overflowed + * delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; + + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ { - if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) - { - /* The delayed list is empty. Set xNextTaskUnblockTime - * to the maximum possible value so it is extremely - * unlikely that the - * if( xTickCount >= xNextTaskUnblockTime ) test will pass - * next time through. */ - xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - break; - } - else - { - /* The delayed list is not empty, get the value of the - * item at the head of the delayed list. This is the time - * at which the task at the head of the delayed list must - * be removed from the Blocked state. */ - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - if( xConstTickCount < xItemValue ) + /* See if this tick has made a timeout expire. Tasks are stored in + * the queue in the order of their wake time - meaning once one task + * has been found whose block time has not expired there is no need to + * look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ; ; ) + { + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) { - /* It is not time to unblock this item yet, but the - * item value is the time at which the task at the head - * of the blocked list must be removed from the Blocked - * state - so record the item value in - * xNextTaskUnblockTime. */ - xNextTaskUnblockTime = xItemValue; - break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */ + /* The delayed list is empty. Set xNextTaskUnblockTime + * to the maximum possible value so it is extremely + * unlikely that the + * if( xTickCount >= xNextTaskUnblockTime ) test will pass + * next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; } else { - mtCOVERAGE_TEST_MARKER(); - } + /* The delayed list is not empty, get the value of the + * item at the head of the delayed list. This is the time + * at which the task at the head of the delayed list must + * be removed from the Blocked state. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); - /* It is time to remove the item from the Blocked state. */ - listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); - - /* Is the task waiting on an event also? If so remove - * it from the event list. */ - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) - { - listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Place the unblocked task into the appropriate ready - * list. */ - prvAddTaskToReadyList( pxTCB ); - - /* A task being unblocked cannot cause an immediate - * context switch if preemption is turned off. */ - #if ( configUSE_PREEMPTION == 1 ) - { - /* Preemption is on, but a context switch should - * only be performed if the unblocked task's - * priority is higher than the currently executing - * task. - * The case of equal priority tasks sharing - * processing time (which happens when both - * preemption and time slicing are on) is - * handled below.*/ - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + if( xConstTickCount < xItemValue ) { - xSwitchRequired = pdTRUE; + /* It is not time to unblock this item yet, but the + * item value is the time at which the task at the head + * of the blocked list must be removed from the Blocked + * state - so record the item value in + * xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */ } else { mtCOVERAGE_TEST_MARKER(); } + + /* It is time to remove the item from the Blocked state. */ + listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove + * it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Place the unblocked task into the appropriate ready + * list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate + * context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + /* Preemption is on, but a context switch should + * only be performed if the unblocked task has a + * priority that is equal to or higher than the + * currently executing task. + * + * For SMP, since this function is only run on core + * 0, we only need to context switch if the unblocked + * task can run on core 0 and has a higher priority + * than the current task. */ + if( ( taskIS_AFFINITY_COMPATIBLE( 0, pxTCB->xCoreID ) == pdTRUE ) && ( pxTCB->uxPriority > pxCurrentTCBs[ 0 ]->uxPriority ) ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_PREEMPTION */ } - #endif /* configUSE_PREEMPTION */ } } - } - /* Tasks of equal priority to the currently running task will share - * processing time (time slice) if preemption is on, and the application - * writer has not explicitly turned time slicing off. */ - #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) - { - if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + /* Tasks of equal priority to the currently running task will share + * processing time (time slice) if preemption is on, and the application + * writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) { - xSwitchRequired = pdTRUE; + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ 0 ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ - #if ( configUSE_TICK_HOOK == 1 ) - { - /* Guard against the tick hook being called when the pended tick - * count is being unwound (when the scheduler is being unlocked). */ - if( xPendedTicks == ( TickType_t ) 0 ) + #if ( configUSE_TICK_HOOK == 1 ) { - vApplicationTickHook(); + /* Guard against the tick hook being called when the pended tick + * count is being unwound (when the scheduler is being unlocked). */ + if( xPendedTicksTemp == ( TickType_t ) 0 ) + { + xCallTickHook = pdTRUE; + } + else + { + xCallTickHook = pdFALSE; + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif /* configUSE_TICK_HOOK */ + #endif /* configUSE_TICK_HOOK */ - #if ( configUSE_PREEMPTION == 1 ) - { - if( xYieldPending != pdFALSE ) + #if ( configUSE_PREEMPTION == 1 ) { - xSwitchRequired = pdTRUE; - } - else - { - mtCOVERAGE_TEST_MARKER(); + if( xYieldPending[ 0 ] != pdFALSE ) + { + xSwitchRequired = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } + #endif /* configUSE_PREEMPTION */ + } + else + { + ++xPendedTicks; + + /* The tick hook gets called at regular intervals, even if the + * scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) + { + xCallTickHook = pdTRUE; + } + #endif } - #endif /* configUSE_PREEMPTION */ } - else - { - ++xPendedTicks; - /* The tick hook gets called at regular intervals, even if the - * scheduler is locked. */ - #if ( configUSE_TICK_HOOK == 1 ) + /* Release the previously taken kernel lock as we have finished accessing + * the kernel data structures. */ + taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + + #if ( configUSE_TICK_HOOK == 1 ) + { + if( xCallTickHook == pdTRUE ) { vApplicationTickHook(); } - #endif } + #endif return xSwitchRequired; } @@ -2957,7 +3321,7 @@ BaseType_t xTaskIncrementTick( void ) * getting set. */ if( xTask == NULL ) { - xTCB = ( TCB_t * ) pxCurrentTCB; + xTCB = ( TCB_t * ) xTaskGetCurrentTaskHandle(); } else { @@ -2966,11 +3330,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { xTCB->pxTaskTag = pxHookFunction; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_APPLICATION_TASK_TAG */ @@ -2988,11 +3352,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { xReturn = pxTCB->pxTaskTag; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -3013,11 +3377,11 @@ BaseType_t xTaskIncrementTick( void ) /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { xReturn = pxTCB->pxTaskTag; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); return xReturn; } @@ -3036,7 +3400,7 @@ BaseType_t xTaskIncrementTick( void ) /* If xTask is NULL then we are calling our own task hook. */ if( xTask == NULL ) { - xTCB = pxCurrentTCB; + xTCB = xTaskGetCurrentTaskHandle(); } else { @@ -3058,77 +3422,181 @@ BaseType_t xTaskIncrementTick( void ) #endif /* configUSE_APPLICATION_TASK_TAG */ /*-----------------------------------------------------------*/ +#if ( configNUMBER_OF_CORES > 1 ) + + static void prvSelectHighestPriorityTaskSMP( void ) + { + /* This function is called from a critical section. So some optimizations are made */ + BaseType_t uxCurPriority; + BaseType_t xTaskScheduled = pdFALSE; + BaseType_t xNewTopPrioritySet = pdFALSE; + BaseType_t xCurCoreID = portGET_CORE_ID(); + + /* Search for tasks, starting form the highest ready priority. If nothing is + * found, we eventually default to the IDLE tasks at priority 0 */ + + for( uxCurPriority = uxTopReadyPriority; uxCurPriority >= 0 && xTaskScheduled == pdFALSE; uxCurPriority-- ) + { + /* Check if current priority has one or more ready tasks. Skip if none */ + if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurPriority ] ) ) ) + { + continue; + } + + /* Save a copy of highest priority that has a ready state task */ + if( xNewTopPrioritySet == pdFALSE ) + { + xNewTopPrioritySet = pdTRUE; + uxTopReadyPriority = uxCurPriority; + } + + /* We now search this priority's ready task list for a runnable task. + * We always start searching from the head of the list, so we reset + * pxIndex to point to the tail so that we start walking the list from + * the first item */ + pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd ); + + /* Get the first item on the list */ + TCB_t * pxTCBCur; + TCB_t * pxTCBFirst; + listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) ); + pxTCBFirst = pxTCBCur; + + do + { + /* Check if the current task is currently being executed. However, if + * it's being executed by the current core, we can still schedule it. + * Todo: Each task can store a xTaskRunState, instead of needing to + * check each core */ + UBaseType_t x; + + for( x = 0; x < configNUMBER_OF_CORES; x++ ) + { + if( x == xCurCoreID ) + { + continue; + } + else if( pxCurrentTCBs[ x ] == pxTCBCur ) + { + /* Current task is already being executed. Get the next task */ + goto get_next_task; + } + } + + /* Check if the current task has a compatible affinity */ + if( taskIS_AFFINITY_COMPATIBLE( xCurCoreID, pxTCBCur->xCoreID ) == pdFALSE ) + { + goto get_next_task; + } + + /* The current task is runnable. Schedule it */ + pxCurrentTCBs[ xCurCoreID ] = pxTCBCur; + xTaskScheduled = pdTRUE; + + /* Move the current tasks list item to the back of the list in order + * to implement best effort round robin. To do this, we need to reset + * the pxIndex to point to the tail again. */ + pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd ); + listREMOVE_ITEM( &( pxTCBCur->xStateListItem ) ); + listINSERT_END( &( pxReadyTasksLists[ uxCurPriority ] ), &( pxTCBCur->xStateListItem ) ); + break; + +get_next_task: + /* The current task cannot be scheduled. Get the next task in the list */ + listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) ); + } while( pxTCBCur != pxTCBFirst ); /* Check to see if we've walked the entire list */ + } + + configASSERT( xTaskScheduled == pdTRUE ); /* At this point, a task MUST have been scheduled */ + } + +#endif /* configNUMBER_OF_CORES > 1 */ +/*-----------------------------------------------------------*/ + void vTaskSwitchContext( void ) { - if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures (unlike single core which calls this function with + * either interrupts disabled or when the scheduler hasn't started yet). */ + taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); { - /* The scheduler is currently suspended - do not allow a context - * switch. */ - xYieldPending = pdTRUE; - } - else - { - xYieldPending = pdFALSE; - traceTASK_SWITCHED_OUT(); + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - #if ( configGENERATE_RUN_TIME_STATS == 1 ) + if( uxSchedulerSuspended[ xCurCoreID ] != ( UBaseType_t ) pdFALSE ) { - #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE - portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); - #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + /* The scheduler is currently suspended - do not allow a context + * switch. */ + xYieldPending[ xCurCoreID ] = pdTRUE; + } + else + { + xYieldPending[ xCurCoreID ] = pdFALSE; + traceTASK_SWITCHED_OUT(); + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime[ xCurCoreID ] ) + { + pxCurrentTCBs[ xCurCoreID ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xCurCoreID ] ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ulTaskSwitchedInTime[ xCurCoreID ] = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ); + + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCBs[ xCurCoreID ]->iTaskErrno = FreeRTOS_errno; + } #endif - /* Add the amount of time the task has been running to the - * accumulated time so far. The time the task started running was - * stored in ulTaskSwitchedInTime. Note that there is no overflow - * protection here so count values are only valid until the timer - * overflows. The guard against negative values is to protect - * against suspect run time stat counter implementations - which - * are provided by the application, not the kernel. */ - if( ulTotalRunTime > ulTaskSwitchedInTime ) + /* Select a new task to run using either the generic C or port + * optimised asm code. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) { - pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + FreeRTOS_errno = pxCurrentTCBs[ xCurCoreID ]->iTaskErrno; } - else + #endif + + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { - mtCOVERAGE_TEST_MARKER(); + /* Switch C-Runtime's TLS Block to point to the TLS + * Block specific to this task. */ + configSET_TLS_BLOCK( pxCurrentTCBs[ xCurCoreID ]->xTLSBlock ); } - - ulTaskSwitchedInTime = ulTotalRunTime; + #endif } - #endif /* configGENERATE_RUN_TIME_STATS */ - - /* Check for stack overflow, if configured. */ - taskCHECK_FOR_STACK_OVERFLOW(); - - /* Before the currently running task is switched out, save its errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - pxCurrentTCB->iTaskErrno = FreeRTOS_errno; - } - #endif - - /* Select a new task to run using either the generic C or port - * optimised asm code. */ - taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - traceTASK_SWITCHED_IN(); - - /* After the new task is switched in, update the global errno. */ - #if ( configUSE_POSIX_ERRNO == 1 ) - { - FreeRTOS_errno = pxCurrentTCB->iTaskErrno; - } - #endif - - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) - { - /* Switch C-Runtime's TLS Block to point to the TLS - * Block specific to this task. */ - configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); - } - #endif } + + /* Release the previously taken kernel lock as we have finished accessing + * the kernel data structures. */ + taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3137,23 +3605,31 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, { configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE - * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + /* IN SINGLE-CORE THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED + * OR THE SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. IN SMP + * THIS FUNCTION MUST BE CALLED WITH THE QUEUE'S xQueueLock TAKEN. */ - /* Place the event list item of the TCB in the appropriate event list. - * This is placed in the list in priority order so the highest priority task - * is the first to be woken by the event. - * - * Note: Lists are sorted in ascending order by ListItem_t.xItemValue. - * Normally, the xItemValue of a TCB's ListItem_t members is: - * xItemValue = ( configMAX_PRIORITIES - uxPriority ) - * Therefore, the event list is sorted in descending priority order. - * - * The queue that contains the event list is locked, preventing - * simultaneous access from interrupts. */ - vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + /* Place the event list item of the TCB in the appropriate event list. + * This is placed in the list in priority order so the highest priority task + * is the first to be woken by the event. + * + * Note: Lists are sorted in ascending order by ListItem_t.xItemValue. + * Normally, the xItemValue of a TCB's ListItem_t members is: + * xItemValue = ( configMAX_PRIORITIES - uxPriority ) + * Therefore, the event list is sorted in descending priority order. + * + * The queue that contains the event list is locked, preventing + * simultaneous access from interrupts. */ + vListInsert( pxEventList, &( pxCurrentTCBs[ portGET_CORE_ID() ]->xEventListItem ) ); - prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3161,25 +3637,44 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait ) { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + configASSERT( pxEventList ); - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by - * the event groups implementation. */ - configASSERT( uxSchedulerSuspended != 0 ); + #if ( configNUMBER_OF_CORES > 1 ) + { + /* IN SMP, THIS FUNCTION MUST BE CALLED WITH THE EVENT GROUP'S + * xEventGroupLock ALREADY TAKEN. */ + } + #else /* configNUMBER_OF_CORES > 1 */ + { + /* IN SINGLE-CORE, THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. + * It is used by the event groups implementation. */ + configASSERT( uxSchedulerSuspended[ xCurCoreID ] != 0 ); + } + #endif /* configNUMBER_OF_CORES > 1 */ - /* Store the item value in the event list item. It is safe to access the - * event list item here as interrupts won't access the event list item of a - * task that is not in the Blocked state. */ - listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + /* Store the item value in the event list item. It is safe to access the + * event list item here as interrupts won't access the event list item of a + * task that is not in the Blocked state. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); - /* Place the event list item of the TCB at the end of the appropriate event - * list. It is safe to access the event list here because it is part of an - * event group implementation - and interrupts don't access event groups - * directly (instead they access them indirectly by pending function calls to - * the task level). */ - listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + /* Place the event list item of the TCB at the end of the appropriate event + * list. It is safe to access the event list here because it is part of an + * event group implementation - and interrupts don't access event groups + * directly (instead they access them indirectly by pending function calls to + * the task level). */ + listINSERT_END( pxEventList, &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ) ); - prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3194,106 +3689,235 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* This function should not be called by application code hence the * 'Restricted' in its name. It is not part of the public API. It is * designed for use by kernel code, and has special calling requirements - - * it should be called with the scheduler suspended. */ + * it should be called with the scheduler suspended in single-core, or + * with the queue's xQueueLock already taken in SMP. */ - - /* Place the event list item of the TCB in the appropriate event list. - * In this case it is assume that this is the only task that is going to - * be waiting on this event list, so the faster vListInsertEnd() function - * can be used in place of vListInsert. */ - listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) ); - - /* If the task should block indefinitely then set the block time to a - * value that will be recognised as an indefinite delay inside the - * prvAddCurrentTaskToDelayedList() function. */ - if( xWaitIndefinitely != pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - xTicksToWait = portMAX_DELAY; - } + /* Place the event list item of the TCB in the appropriate event list. + * In this case it is assume that this is the only task that is going to + * be waiting on this event list, so the faster vListInsertEnd() function + * can be used in place of vListInsert. */ + listINSERT_END( pxEventList, &( pxCurrentTCBs[ portGET_CORE_ID() ]->xEventListItem ) ); - traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); - prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + /* If the task should block indefinitely then set the block time to a + * value that will be recognised as an indefinite delay inside the + * prvAddCurrentTaskToDelayedList() function. */ + if( xWaitIndefinitely != pdFALSE ) + { + xTicksToWait = portMAX_DELAY; + } + + traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); + prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL( &xKernelLock ); } #endif /* configUSE_TIMERS */ /*-----------------------------------------------------------*/ -BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) -{ - TCB_t * pxUnblockedTCB; - BaseType_t xReturn; +#if ( configNUMBER_OF_CORES > 1 ) - /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be - * called from a critical section within an ISR. */ - - /* The event list is sorted in priority order, so the first in the list can - * be removed as it is known to be the highest priority. Remove the TCB from - * the delayed list, and add it to the ready list. - * - * If an event is for a queue that is locked then this function will never - * get called - the lock count on the queue will get modified instead. This - * means exclusive access to the event list is guaranteed here. - * - * This function assumes that a check has already been made to ensure that - * pxEventList is not empty. */ - pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - configASSERT( pxUnblockedTCB ); - listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) ); - - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) { - listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxUnblockedTCB ); + TCB_t * pxUnblockedTCB; + BaseType_t xReturn; - #if ( configUSE_TICKLESS_IDLE != 0 ) + + /* For SMP, we need to take the kernel lock here as we are about to access + * kernel data structures. + * This function can also be called from an ISR context, so we + * need to check whether we are in an ISR.*/ + if( portCHECK_IF_IN_ISR() == pdFALSE ) { - /* If a task is blocked on a kernel object then xNextTaskUnblockTime - * might be set to the blocked task's time out time. If the task is - * unblocked for a reason other than a timeout xNextTaskUnblockTime is - * normally left unchanged, because it is automatically reset to a new - * value when the tick count equals xNextTaskUnblockTime. However if - * tickless idling is used it might be more important to enter sleep mode - * at the earliest possible time - so reset xNextTaskUnblockTime here to - * ensure it is updated at the earliest possible time. */ - prvResetNextTaskUnblockTime(); + taskENTER_CRITICAL( &xKernelLock ); } - #endif - } - else - { - /* The delayed and ready lists cannot be accessed, so hold this task - * pending until the scheduler is resumed. */ - listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); + else + { + taskENTER_CRITICAL_ISR( &xKernelLock ); + } + + { + /* Before taking the kernel lock, another task/ISR could have already + * emptied the pxEventList. So we insert a check here to see if + * pxEventList is empty before attempting to remove an item from it. */ + if( listLIST_IS_EMPTY( pxEventList ) == pdFALSE ) + { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + + /* Remove the task from its current event list */ + pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); + configASSERT( pxUnblockedTCB ); + listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) ); + + /* Add the task to the ready list if a core with compatible affinity + * has NOT suspended its scheduler. This occurs when: + * - The task is pinned, and the pinned core's scheduler is running + * - The task is unpinned, and at least one of the core's scheduler is running */ + if( taskCAN_BE_SCHEDULED( pxUnblockedTCB ) == pdTRUE ) + { + listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + * might be set to the blocked task's time out time. If the task is + * unblocked for a reason other than a timeout xNextTaskUnblockTime is + * normally left unchanged, because it is automatically reset to a new + * value when the tick count equals xNextTaskUnblockTime. However if + * tickless idling is used it might be more important to enter sleep mode + * at the earliest possible time - so reset xNextTaskUnblockTime here to + * ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + } + else + { + /* We arrive here due to one of the following possibilities: + * - The task is pinned to core X and core X has suspended its scheduler + * - The task is unpinned and both cores have suspend their schedulers + * Therefore, we add the task to one of the pending lists: + * - If the task is pinned to core X, add it to core X's pending list + * - If the task is unpinned, add it to the current core's pending list */ + UBaseType_t uxPendCore = ( ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) ? xCurCoreID : pxUnblockedTCB->xCoreID ); + configASSERT( uxSchedulerSuspended[ uxPendCore ] != ( UBaseType_t ) 0U ); + + /* Add the task to the current core's pending list */ + listINSERT_END( &( xPendingReadyList[ uxPendCore ] ), &( pxUnblockedTCB->xEventListItem ) ); + } + + if( taskIS_YIELD_REQUIRED( pxUnblockedTCB, xCurCoreID, pdFALSE ) == pdTRUE ) + { + /* The unblocked task requires a the current core to yield */ + xReturn = pdTRUE; + + /* Mark that a yield is pending in case the user is not using the + * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPending[ xCurCoreID ] = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + else + { + /* The pxEventList was emptied before we entered the critical + * section, Nothing to do except return pdFALSE. */ + xReturn = pdFALSE; + } + } + + /* Release the previously taken kernel lock. */ + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( &xKernelLock ); + } + else + { + taskEXIT_CRITICAL_ISR( &xKernelLock ); + } + + return xReturn; } - if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) - { - /* Return true if the task removed from the event list has a higher - * priority than the calling task. This allows the calling task to know if - * it should force a context switch now. */ - xReturn = pdTRUE; +#else /* configNUMBER_OF_CORES > 1 */ - /* Mark that a yield is pending in case the user is not using the - * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ - xYieldPending = pdTRUE; - } - else + BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) { - xReturn = pdFALSE; + TCB_t * pxUnblockedTCB; + BaseType_t xReturn; + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + * called from a critical section within an ISR. */ + + /* The event list is sorted in priority order, so the first in the list can + * be removed as it is known to be the highest priority. Remove the TCB from + * the delayed list, and add it to the ready list. + * + * If an event is for a queue that is locked then this function will never + * get called - the lock count on the queue will get modified instead. This + * means exclusive access to the event list is guaranteed here. + * + * This function assumes that a check has already been made to ensure that + * pxEventList is not empty. */ + pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + configASSERT( pxUnblockedTCB ); + listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) ); + + if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) + { + listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + * might be set to the blocked task's time out time. If the task is + * unblocked for a reason other than a timeout xNextTaskUnblockTime is + * normally left unchanged, because it is automatically reset to a new + * value when the tick count equals xNextTaskUnblockTime. However if + * tickless idling is used it might be more important to enter sleep mode + * at the earliest possible time - so reset xNextTaskUnblockTime here to + * ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + } + else + { + /* The delayed and ready lists cannot be accessed, so hold this task + * pending until the scheduler is resumed. */ + listINSERT_END( &( xPendingReadyList[ 0 ] ), &( pxUnblockedTCB->xEventListItem ) ); + } + + if( pxUnblockedTCB->uxPriority > pxCurrentTCBs[ 0 ]->uxPriority ) + { + /* Return true if the task removed from the event list has a higher + * priority than the calling task. This allows the calling task to know if + * it should force a context switch now. */ + xReturn = pdTRUE; + + /* Mark that a yield is pending in case the user is not using the + * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */ + xYieldPending[ 0 ] = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; } - return xReturn; -} +#endif /* configNUMBER_OF_CORES > 1 */ /*-----------------------------------------------------------*/ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue ) { TCB_t * pxUnblockedTCB; + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by - * the event flags implementation. */ - configASSERT( uxSchedulerSuspended != pdFALSE ); + #if ( configNUM_CORES > 1 ) + + /* THIS FUNCTION MUST BE CALLED WITH THE KERNEL LOCK ALREADY TAKEN. + * It is used by the event flags implementation, thus those functions + * should call prvTakeKernelLock() before calling this function. */ + #else /* configNUM_CORES > 1 */ + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + * the event flags implementation. */ + configASSERT( uxSchedulerSuspended[ 0 ] != ( UBaseType_t ) 0U ); + #endif /* configNUM_CORES > 1 */ /* Store the new item value in the event list. */ listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); @@ -3318,19 +3942,50 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, } #endif - /* Remove the task from the delayed list and add it to the ready list. The - * scheduler is suspended so interrupts will not be accessing the ready - * lists. */ - listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); - prvAddTaskToReadyList( pxUnblockedTCB ); + #if ( configNUM_CORES > 1 ) - if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority ) + /* Add the task to the ready list if a core with compatible affinity + * has NOT suspended its scheduler. This occurs when: + * - The task is pinned, and the pinned core's scheduler is running + * - The task is unpinned, and at least one of the core's scheduler is + * running */ + if( taskCAN_BE_SCHEDULED( pxUnblockedTCB ) == pdFALSE ) + { + /* We arrive here due to one of the following possibilities: + * - The task is pinned to core X and core X has suspended its scheduler + * - The task is unpinned and both cores have suspend their schedulers + * Therefore, we add the task to one of the pending lists: + * - If the task is pinned to core X, add it to core X's pending list + * - If the task is unpinned, add it to the current core's pending list */ + BaseType_t xPendingListCore = ( ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY ) ? xCurCoreID : pxUnblockedTCB->xCoreID ); + configASSERT( uxSchedulerSuspended[ xPendingListCore ] != ( UBaseType_t ) 0U ); + + /* The delayed and ready lists cannot be accessed, so hold this task + * pending until the scheduler is resumed. */ + listINSERT_END( &( xPendingReadyList[ xPendingListCore ] ), &( pxUnblockedTCB->xEventListItem ) ); + } + else + #else /* configNUM_CORES > 1 */ + + /* In single core, the caller of this function has already suspended the + * scheduler, which means we have exclusive access to the ready list. + * We add the unblocked task to the ready list directly. */ + #endif /* configNUM_CORES > 1 */ { - /* The unblocked task has a priority above that of the calling task, so - * a context switch is required. This function is called with the - * scheduler suspended so xYieldPending is set so the context switch - * occurs immediately that the scheduler is resumed (unsuspended). */ - xYieldPending = pdTRUE; + /* Remove the task from the delayed list and add it to the ready list. The + * scheduler is suspended so interrupts will not be accessing the ready + * lists. */ + listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + if( taskIS_YIELD_REQUIRED( pxUnblockedTCB, xCurCoreID, pdFALSE ) == pdTRUE ) + { + /* The unblocked task has a priority above that of the calling task, so + * a context switch is required. This function is called with the + * scheduler suspended so xYieldPending is set so the context switch + * occurs immediately that the scheduler is resumed (unsuspended). */ + xYieldPending[ xCurCoreID ] = pdTRUE; + } } } /*-----------------------------------------------------------*/ @@ -3338,12 +3993,12 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) { configASSERT( pxTimeOut ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { pxTimeOut->xOverflowCount = xNumOfOverflows; pxTimeOut->xTimeOnEntering = xTickCount; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3363,18 +4018,20 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, configASSERT( pxTimeOut ); configASSERT( pxTicksToWait ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Minor optimisation. The tick count cannot change in this block. */ const TickType_t xConstTickCount = xTickCount; const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering; + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); #if ( INCLUDE_xTaskAbortDelay == 1 ) - if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE ) + if( pxCurrentTCBs[ xCurCoreID ]->ucDelayAborted != ( uint8_t ) pdFALSE ) { /* The delay was aborted, which is not the same as a time out, * but has the same result. */ - pxCurrentTCB->ucDelayAborted = pdFALSE; + pxCurrentTCBs[ xCurCoreID ]->ucDelayAborted = pdFALSE; xReturn = pdTRUE; } else @@ -3414,7 +4071,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -3422,7 +4079,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, void vTaskMissedYield( void ) { - xYieldPending = pdTRUE; + xYieldPending[ portGET_CORE_ID() ] = pdTRUE; } /*-----------------------------------------------------------*/ @@ -3540,6 +4197,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) } #endif /* configUSE_IDLE_HOOK */ + /* Call the esp-idf idle hook system. Todo IDF-8180 */ + extern void esp_vApplicationIdleHook( void ); + esp_vApplicationIdleHook(); + /* This conditional compilation should use inequality to 0, not equality * to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when * user defined low power mode implementations require @@ -3557,7 +4218,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) { - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xKernelLock ); { /* Now the scheduler is suspended, the expected idle * time can be sampled again, and this time its value can @@ -3581,7 +4242,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) mtCOVERAGE_TEST_MARKER(); } } - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xKernelLock ); } else { @@ -3606,23 +4267,28 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) /* This function must be called from a critical section. */ - if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 ) + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + + if( listCURRENT_LIST_LENGTH( &xPendingReadyList[ xCurCoreID ] ) != 0 ) { /* A task was made ready while the scheduler was suspended. */ eReturn = eAbortSleep; } - else if( xYieldPending != pdFALSE ) + else if( xYieldPending[ xCurCoreID ] != pdFALSE ) { /* A yield was pended while the scheduler was suspended. */ eReturn = eAbortSleep; } - else if( xPendedTicks != 0 ) - { - /* A tick interrupt has already occurred but was held pending - * because the scheduler is suspended. */ - eReturn = eAbortSleep; - } + #if ( configNUMBER_OF_CORES == 1 ) + else if( xPendedTicks != 0 ) + { + /* A tick interrupt has already occurred but was held pending + * because the scheduler is suspended. */ + eReturn = eAbortSleep; + } + #endif /* configNUMBER_OF_CORES == 1 */ #if ( INCLUDE_vTaskSuspend == 1 ) else if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) ) { @@ -3709,6 +4375,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) static void prvInitialiseTaskLists( void ) { UBaseType_t uxPriority; + UBaseType_t x; for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ ) { @@ -3717,7 +4384,11 @@ static void prvInitialiseTaskLists( void ) vListInitialise( &xDelayedTaskList1 ); vListInitialise( &xDelayedTaskList2 ); - vListInitialise( &xPendingReadyList ); + + for( x = 0; x < configNUMBER_OF_CORES; x++ ) + { + vListInitialise( &xPendingReadyList[ x ] ); + } #if ( INCLUDE_vTaskDelete == 1 ) { @@ -3750,16 +4421,58 @@ static void prvCheckTasksWaitingTermination( void ) * being called too often in the idle task. */ while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) { - taskENTER_CRITICAL(); + #if ( configNUMBER_OF_CORES > 1 ) { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ - ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); - --uxCurrentNumberOfTasks; - --uxDeletedTasksWaitingCleanUp; - } - taskEXIT_CRITICAL(); + pxTCB = NULL; + taskENTER_CRITICAL( &xKernelLock ); + { + /* List may have already been cleared by the other core. Check again */ + if( listLIST_IS_EMPTY( &xTasksWaitingTermination ) == pdFALSE ) + { + /* We can't delete a task if it is still running on + * the other core. Keep walking the list until we + * find a task we can free, or until we walk the + * entire list. */ + ListItem_t * xEntry; - prvDeleteTCB( pxTCB ); + for( xEntry = listGET_HEAD_ENTRY( &xTasksWaitingTermination ); xEntry != listGET_END_MARKER( &xTasksWaitingTermination ); xEntry = listGET_NEXT( xEntry ) ) + { + if( taskIS_CURRENTLY_RUNNING( ( ( TCB_t * ) listGET_LIST_ITEM_OWNER( xEntry ) ) ) == pdFALSE ) + { + pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( xEntry ); + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + break; + } + } + } + } + taskEXIT_CRITICAL( &xKernelLock ); + + if( pxTCB != NULL ) + { + prvDeleteTCB( pxTCB ); + } + else + { + /* No task found to delete, break out of loop */ + break; + } + } + #else /* configNUMBER_OF_CORES > 1 */ + { + taskENTER_CRITICAL( &xKernelLock ); + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + } + taskEXIT_CRITICAL( &xKernelLock ); + prvDeleteTCB( pxTCB ); + } + #endif /* configNUMBER_OF_CORES > 1 */ } } #endif /* INCLUDE_vTaskDelete */ @@ -3778,92 +4491,111 @@ static void prvCheckTasksWaitingTermination( void ) /* xTask is NULL then get the state of the calling task. */ pxTCB = prvGetTCBFromHandle( xTask ); - pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; - pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] ); - pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; - pxTaskStatus->pxStackBase = pxTCB->pxStack; - #if ( ( portSTACK_GROWTH > 0 ) && ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) - pxTaskStatus->pxTopOfStack = pxTCB->pxTopOfStack; - pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack; - #endif - pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + /* A critical section is required for SMP in case another core modifies + * the task simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; + pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] ); + pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; + pxTaskStatus->pxStackBase = pxTCB->pxStack; + #if ( ( portSTACK_GROWTH > 0 ) && ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) + pxTaskStatus->pxTopOfStack = pxTCB->pxTopOfStack; + pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack; + #endif + pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + /* Todo: Remove xCoreID for single core builds (IDF-7894) */ + pxTaskStatus->xCoreID = pxTCB->xCoreID; - #if ( configUSE_MUTEXES == 1 ) - { - pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; - } - #else - { - pxTaskStatus->uxBasePriority = 0; - } - #endif - - #if ( configGENERATE_RUN_TIME_STATS == 1 ) - { - pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter; - } - #else - { - pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0; - } - #endif - - /* Obtaining the task state is a little fiddly, so is only done if the - * value of eState passed into this function is eInvalid - otherwise the - * state is just set to whatever is passed in. */ - if( eState != eInvalid ) - { - if( pxTCB == pxCurrentTCB ) + #if ( configUSE_MUTEXES == 1 ) { - pxTaskStatus->eCurrentState = eRunning; - } - else - { - pxTaskStatus->eCurrentState = eState; - - #if ( INCLUDE_vTaskSuspend == 1 ) - { - /* If the task is in the suspended list then there is a - * chance it is actually just blocked indefinitely - so really - * it should be reported as being in the Blocked state. */ - if( eState == eSuspended ) - { - vTaskSuspendAll(); - { - if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) - { - pxTaskStatus->eCurrentState = eBlocked; - } - } - ( void ) xTaskResumeAll(); - } - } - #endif /* INCLUDE_vTaskSuspend */ - } - } - else - { - pxTaskStatus->eCurrentState = eTaskGetState( pxTCB ); - } - - /* Obtaining the stack space takes some time, so the xGetFreeStackSpace - * parameter is provided to allow it to be skipped. */ - if( xGetFreeStackSpace != pdFALSE ) - { - #if ( portSTACK_GROWTH > 0 ) - { - pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack ); + pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; } #else { - pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack ); + pxTaskStatus->uxBasePriority = 0; } #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter; + } + #else + { + pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0; + } + #endif + + /* Obtaining the task state is a little fiddly, so is only done if the + * value of eState passed into this function is eInvalid - otherwise the + * state is just set to whatever is passed in. */ + if( eState != eInvalid ) + { + if( pxTCB == pxCurrentTCBs[ portGET_CORE_ID() ] ) + { + pxTaskStatus->eCurrentState = eRunning; + } + else + { + pxTaskStatus->eCurrentState = eState; + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* If the task is in the suspended list then there is a + * chance it is actually just blocked indefinitely - so really + * it should be reported as being in the Blocked state. */ + if( eState == eSuspended ) + { + #if ( configNUMBER_OF_CORES == 1 ) + { + /* Single core uses a scheduler suspension to + * atomically check if the task task is blocked. */ + vTaskSuspendAll(); + } + #endif /* configNUMBER_OF_CORES == 1 */ + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + pxTaskStatus->eCurrentState = eBlocked; + } + } + #if ( configNUMBER_OF_CORES == 1 ) + { + ( void ) xTaskResumeAll(); + } + #endif /* configNUMBER_OF_CORES == 1 */ + } + } + #endif /* INCLUDE_vTaskSuspend */ + } + } + else + { + pxTaskStatus->eCurrentState = eTaskGetState( pxTCB ); + } + + /* Obtaining the stack space takes some time, so the xGetFreeStackSpace + * parameter is provided to allow it to be skipped. */ + if( xGetFreeStackSpace != pdFALSE ) + { + #if ( portSTACK_GROWTH > 0 ) + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack ); + } + #else + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack ); + } + #endif + } + else + { + pxTaskStatus->usStackHighWaterMark = 0; + } } - else - { - pxTaskStatus->usStackHighWaterMark = 0; - } + /* Exit the previously entered critical section. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* configUSE_TRACE_FACILITY */ @@ -4005,7 +4737,8 @@ static void prvCheckTasksWaitingTermination( void ) #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { /* Free up the memory allocated for the task's TLS Block. */ - configDEINIT_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); + /* Note: Fixed bug in upstream. Free TLS block of pxTCB, NOT pxCurrentTCBs */ + configDEINIT_TLS_BLOCK( pxTCB->xTLSBlock ); } #endif @@ -4073,14 +4806,7 @@ static void prvResetNextTaskUnblockTime( void ) TaskHandle_t xTaskGetCurrentTaskHandle( void ) { - TaskHandle_t xReturn; - - /* A critical section is not required as this is not called from - * an interrupt and the current TCB will always be the same for any - * individual execution thread. */ - xReturn = pxCurrentTCB; - - return xReturn; + return xTaskGetCurrentTaskHandleForCore( portGET_CORE_ID() ); } #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ @@ -4092,21 +4818,28 @@ static void prvResetNextTaskUnblockTime( void ) { BaseType_t xReturn; - if( xSchedulerRunning == pdFALSE ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - xReturn = taskSCHEDULER_NOT_STARTED; - } - else - { - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( xSchedulerRunning == pdFALSE ) { - xReturn = taskSCHEDULER_RUNNING; + xReturn = taskSCHEDULER_NOT_STARTED; } else { - xReturn = taskSCHEDULER_SUSPENDED; + if( uxSchedulerSuspended[ portGET_CORE_ID() ] == ( UBaseType_t ) pdFALSE ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } } } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -4121,82 +4854,92 @@ static void prvResetNextTaskUnblockTime( void ) TCB_t * const pxMutexHolderTCB = pxMutexHolder; BaseType_t xReturn = pdFALSE; - /* If the mutex was given back by an interrupt while the queue was - * locked then the mutex holder might now be NULL. _RB_ Is this still - * needed as interrupts can no longer use mutexes? */ - if( pxMutexHolder != NULL ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - /* If the holder of the mutex has a priority below the priority of - * the task attempting to obtain the mutex then it will temporarily - * inherit the priority of the task attempting to obtain the mutex. */ - if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority ) - { - /* Adjust the mutex holder state to account for its new - * priority. Only reset the event list item value if the value is - * not being used for anything else. */ - if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) - { - listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - /* If the task being modified is in the ready state it will need - * to be moved into a new list. */ - if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + /* If the mutex was given back by an interrupt while the queue was + * locked then the mutex holder might now be NULL. _RB_ Is this still + * needed as interrupts can no longer use mutexes? */ + if( pxMutexHolder != NULL ) + { + /* If the holder of the mutex has a priority below the priority of + * the task attempting to obtain the mutex then it will temporarily + * inherit the priority of the task attempting to obtain the mutex. */ + if( pxMutexHolderTCB->uxPriority < pxCurrentTCBs[ xCurCoreID ]->uxPriority ) { - if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Adjust the mutex holder state to account for its new + * priority. Only reset the event list item value if the value is + * not being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) { - /* It is known that the task is in its ready list so - * there is no need to check again and the port level - * reset macro can be called directly. */ - portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority ); + listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ } else { mtCOVERAGE_TEST_MARKER(); } - /* Inherit the priority before being moved into the new list. */ - pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; - prvAddTaskToReadyList( pxMutexHolderTCB ); - } - else - { - /* Just inherit the priority. */ - pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; - } + /* If the task being modified is in the ready state it will need + * to be moved into a new list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } - traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority ); + /* Inherit the priority before being moved into the new list. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCBs[ xCurCoreID ]->uxPriority; + prvAddTaskToReadyList( pxMutexHolderTCB ); + } + else + { + /* Just inherit the priority. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCBs[ xCurCoreID ]->uxPriority; + } - /* Inheritance occurred. */ - xReturn = pdTRUE; - } - else - { - if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority ) - { - /* The base priority of the mutex holder is lower than the - * priority of the task attempting to take the mutex, but the - * current priority of the mutex holder is not lower than the - * priority of the task attempting to take the mutex. - * Therefore the mutex holder must have already inherited a - * priority, but inheritance would have occurred if that had - * not been the case. */ + traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCBs[ xCurCoreID ]->uxPriority ); + + /* Inheritance occurred. */ xReturn = pdTRUE; } else { - mtCOVERAGE_TEST_MARKER(); + if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCBs[ xCurCoreID ]->uxPriority ) + { + /* The base priority of the mutex holder is lower than the + * priority of the task attempting to take the mutex, but the + * current priority of the mutex holder is not lower than the + * priority of the task attempting to take the mutex. + * Therefore the mutex holder must have already inherited a + * priority, but inheritance would have occurred if that had + * not been the case. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } } + else + { + mtCOVERAGE_TEST_MARKER(); + } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -4211,57 +4954,66 @@ static void prvResetNextTaskUnblockTime( void ) TCB_t * const pxTCB = pxMutexHolder; BaseType_t xReturn = pdFALSE; - if( pxMutexHolder != NULL ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - /* A task can only have an inherited priority if it holds the mutex. - * If the mutex is held by a task then it cannot be given from an - * interrupt, and if a mutex is given by the holding task then it must - * be the running state task. */ - configASSERT( pxTCB == pxCurrentTCB ); - configASSERT( pxTCB->uxMutexesHeld ); - ( pxTCB->uxMutexesHeld )--; - - /* Has the holder of the mutex inherited the priority of another - * task? */ - if( pxTCB->uxPriority != pxTCB->uxBasePriority ) + if( pxMutexHolder != NULL ) { - /* Only disinherit if no other mutexes are held. */ - if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) + /* A task can only have an inherited priority if it holds the mutex. + * If the mutex is held by a task then it cannot be given from an + * interrupt, and if a mutex is given by the holding task then it must + * be the running state task. */ + configASSERT( pxTCB == pxCurrentTCBs[ portGET_CORE_ID() ] ); + configASSERT( pxTCB->uxMutexesHeld ); + ( pxTCB->uxMutexesHeld )--; + + /* Has the holder of the mutex inherited the priority of another + * task? */ + if( pxTCB->uxPriority != pxTCB->uxBasePriority ) { - /* A task can only have an inherited priority if it holds - * the mutex. If the mutex is held by a task then it cannot be - * given from an interrupt, and if a mutex is given by the - * holding task then it must be the running state task. Remove - * the holding task from the ready list. */ - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Only disinherit if no other mutexes are held. */ + if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) { - portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + /* A task can only have an inherited priority if it holds + * the mutex. If the mutex is held by a task then it cannot be + * given from an interrupt, and if a mutex is given by the + * holding task then it must be the running state task. Remove + * the holding task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Disinherit the priority before adding the task into the + * new ready list. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + pxTCB->uxPriority = pxTCB->uxBasePriority; + + /* Reset the event list item value. It cannot be in use for + * any other purpose if this task is running, and it must be + * running to give back the mutex. */ + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + prvAddTaskToReadyList( pxTCB ); + + /* Return true to indicate that a context switch is required. + * This is only actually required in the corner case whereby + * multiple mutexes were held and the mutexes were given back + * in an order different to that in which they were taken. + * If a context switch did not occur when the first mutex was + * returned, even if a task was waiting on it, then a context + * switch should occur when the last mutex is returned whether + * a task is waiting on it or not. */ + xReturn = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } - - /* Disinherit the priority before adding the task into the - * new ready list. */ - traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); - pxTCB->uxPriority = pxTCB->uxBasePriority; - - /* Reset the event list item value. It cannot be in use for - * any other purpose if this task is running, and it must be - * running to give back the mutex. */ - listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - prvAddTaskToReadyList( pxTCB ); - - /* Return true to indicate that a context switch is required. - * This is only actually required in the corner case whereby - * multiple mutexes were held and the mutexes were given back - * in an order different to that in which they were taken. - * If a context switch did not occur when the first mutex was - * returned, even if a task was waiting on it, then a context - * switch should occur when the last mutex is returned whether - * a task is waiting on it or not. */ - xReturn = pdTRUE; } else { @@ -4273,10 +5025,8 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -4293,78 +5043,87 @@ static void prvResetNextTaskUnblockTime( void ) UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; - if( pxMutexHolder != NULL ) + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { - /* If pxMutexHolder is not NULL then the holder must hold at least - * one mutex. */ - configASSERT( pxTCB->uxMutexesHeld ); + if( pxMutexHolder != NULL ) + { + /* If pxMutexHolder is not NULL then the holder must hold at least + * one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); - /* Determine the priority to which the priority of the task that - * holds the mutex should be set. This will be the greater of the - * holding task's base priority and the priority of the highest - * priority task that is waiting to obtain the mutex. */ - if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) - { - uxPriorityToUse = uxHighestPriorityWaitingTask; - } - else - { - uxPriorityToUse = pxTCB->uxBasePriority; - } - - /* Does the priority need to change? */ - if( pxTCB->uxPriority != uxPriorityToUse ) - { - /* Only disinherit if no other mutexes are held. This is a - * simplification in the priority inheritance implementation. If - * the task that holds the mutex is also holding other mutexes then - * the other mutexes may have caused the priority inheritance. */ - if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + /* Determine the priority to which the priority of the task that + * holds the mutex should be set. This will be the greater of the + * holding task's base priority and the priority of the highest + * priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) { - /* If a task has timed out because it already holds the - * mutex it was trying to obtain then it cannot of inherited - * its own priority. */ - configASSERT( pxTCB != pxCurrentTCB ); + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } - /* Disinherit the priority, remembering the previous - * priority to facilitate determining the subject task's - * state. */ - traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); - uxPriorityUsedOnEntry = pxTCB->uxPriority; - pxTCB->uxPriority = uxPriorityToUse; + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + * simplification in the priority inheritance implementation. If + * the task that holds the mutex is also holding other mutexes then + * the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + { + /* If a task has timed out because it already holds the + * mutex it was trying to obtain then it cannot of inherited + * its own priority. */ + configASSERT( pxTCB != pxCurrentTCBs[ portGET_CORE_ID() ] ); - /* Only reset the event list item value if the value is not - * being used for anything else. */ - if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) - { - listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Disinherit the priority, remembering the previous + * priority to facilitate determining the subject task's + * state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; - /* If the running task is not the task that holds the mutex - * then the task that holds the mutex could be in either the - * Ready, Blocked or Suspended states. Only remove the task - * from its current state list if it is in the Ready state as - * the task's priority is going to change and there is one - * Ready list per priority. */ - if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) - { - if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + /* Only reset the event list item value if the value is not + * being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) { - /* It is known that the task is in its ready list so - * there is no need to check again and the port level - * reset macro can be called directly. */ - portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ } else { mtCOVERAGE_TEST_MARKER(); } - prvAddTaskToReadyList( pxTCB ); + /* If the running task is not the task that holds the mutex + * then the task that holds the mutex could be in either the + * Ready, Blocked or Suspended states. Only remove the task + * from its current state list if it is in the Ready state as + * the task's priority is going to change and there is one + * Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { @@ -4381,10 +5140,8 @@ static void prvResetNextTaskUnblockTime( void ) mtCOVERAGE_TEST_MARKER(); } } - else - { - mtCOVERAGE_TEST_MARKER(); - } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* configUSE_MUTEXES */ @@ -4398,7 +5155,10 @@ static void prvResetNextTaskUnblockTime( void ) if( xSchedulerRunning != pdFALSE ) { - ( pxCurrentTCB->uxCriticalNesting )++; + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + + ( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting )++; /* This is not the interrupt safe version of the enter critical * function so assert() if it is being called from an interrupt @@ -4406,7 +5166,7 @@ static void prvResetNextTaskUnblockTime( void ) * interrupt. Only assert if the critical nesting count is 1 to * protect against recursive calls if the assert function also uses a * critical section. */ - if( pxCurrentTCB->uxCriticalNesting == 1 ) + if( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting == 1 ) { portASSERT_IF_IN_ISR(); } @@ -4426,11 +5186,14 @@ static void prvResetNextTaskUnblockTime( void ) { if( xSchedulerRunning != pdFALSE ) { - if( pxCurrentTCB->uxCriticalNesting > 0U ) - { - ( pxCurrentTCB->uxCriticalNesting )--; + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - if( pxCurrentTCB->uxCriticalNesting == 0U ) + if( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting > 0U ) + { + ( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting )--; + + if( pxCurrentTCBs[ xCurCoreID ]->uxCriticalNesting == 0U ) { portENABLE_INTERRUPTS(); } @@ -4569,8 +5332,8 @@ static void prvResetNextTaskUnblockTime( void ) pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); /* Write the rest of the string. */ - sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ - pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ + sprintf( pcWriteBuffer, "\t%c\t%u\t%d\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, pxTaskStatusArray[ x ].xCoreID, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ } /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION @@ -4711,11 +5474,21 @@ TickType_t uxTaskResetEventItemValue( void ) { TickType_t uxReturn; - uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) ); + /* For SMP, we need to take the kernel lock here to ensure nothing else + * modifies the task's event item value simultaneously. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); - /* Reset the event list item to its normal value - so it can be used with - * queues and semaphores. */ - listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ) ); + + /* Reset the event list item to its normal value - so it can be used with + * queues and semaphores. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + /* Release the previously taken kernel lock. */ return uxReturn; } @@ -4725,14 +5498,28 @@ TickType_t uxTaskResetEventItemValue( void ) TaskHandle_t pvTaskIncrementMutexHeldCount( void ) { - /* If xSemaphoreCreateMutex() is called before any tasks have been created - * then pxCurrentTCB will be NULL. */ - if( pxCurrentTCB != NULL ) - { - ( pxCurrentTCB->uxMutexesHeld )++; - } + TaskHandle_t xReturn; - return pxCurrentTCB; + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + + /* If xSemaphoreCreateMutex() is called before any tasks have been created + * then pxCurrentTCBs will be NULL. */ + if( pxCurrentTCBs[ xCurCoreID ] != NULL ) + { + ( pxCurrentTCBs[ xCurCoreID ]->uxMutexesHeld )++; + } + + xReturn = pxCurrentTCBs[ xCurCoreID ]; + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + + return xReturn; } #endif /* configUSE_MUTEXES */ @@ -4748,13 +5535,16 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + /* Only block if the notification count is not already non-zero. */ - if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] == 0UL ) + if( pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] == 0UL ) { /* Mark this task as waiting for a notification. */ - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; + pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; if( xTicksToWait > ( TickType_t ) 0 ) { @@ -4777,22 +5567,25 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + traceTASK_NOTIFY_TAKE( uxIndexToWait ); - ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ]; + ulReturn = pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ]; if( ulReturn != 0UL ) { if( xClearCountOnExit != pdFALSE ) { - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = 0UL; + pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] = 0UL; } else { - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1; + pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1; } } else @@ -4800,9 +5593,9 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; + pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return ulReturn; } @@ -4822,18 +5615,21 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + /* Only block if a notification is not already pending. */ - if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) + if( pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) { /* Clear bits in the task's notification value as bits may get * set by the notifying task or interrupt. This can be used to * clear the value to zero. */ - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry; + pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry; /* Mark this task as waiting for a notification. */ - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; + pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; if( xTicksToWait > ( TickType_t ) 0 ) { @@ -4856,24 +5652,27 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + traceTASK_NOTIFY_WAIT( uxIndexToWait ); if( pulNotificationValue != NULL ) { /* Output the current notification value, which may or may not * have changed. */ - *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ]; + *pulNotificationValue = pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ]; } /* If ucNotifyValue is set then either the task never entered the * blocked state (because a notification was already pending) or the * task unblocked because of a notification. Otherwise the task * unblocked because of a timeout. */ - if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) + if( pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) { /* A notification was not received. */ xReturn = pdFALSE; @@ -4882,13 +5681,13 @@ TickType_t uxTaskResetEventItemValue( void ) { /* A notification was already pending or a notification was * received while the task was waiting. */ - pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit; + pxCurrentTCBs[ xCurCoreID ]->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit; xReturn = pdTRUE; } - pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; + pxCurrentTCBs[ xCurCoreID ]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -4912,7 +5711,7 @@ TickType_t uxTaskResetEventItemValue( void ) configASSERT( xTaskToNotify ); pxTCB = xTaskToNotify; - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( pulPreviousNotificationValue != NULL ) { @@ -4995,7 +5794,7 @@ TickType_t uxTaskResetEventItemValue( void ) } #endif - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + if( taskIS_YIELD_REQUIRED( pxTCB, portGET_CORE_ID(), pdFALSE ) == pdTRUE ) { /* The notified task has a priority above the currently * executing task so a yield is required. */ @@ -5011,7 +5810,7 @@ TickType_t uxTaskResetEventItemValue( void ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -5056,8 +5855,11 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + if( pulPreviousNotificationValue != NULL ) { *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ]; @@ -5118,7 +5920,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* The task should not have been on an event list. */ configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( taskCAN_BE_SCHEDULED( pxTCB ) == pdTRUE ) { listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); @@ -5127,10 +5929,10 @@ TickType_t uxTaskResetEventItemValue( void ) { /* The delayed and ready lists cannot be accessed, so hold * this task pending until the scheduler is resumed. */ - listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + listINSERT_END( &( xPendingReadyList[ xCurCoreID ] ), &( pxTCB->xEventListItem ) ); } - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdFALSE ) == pdTRUE ) { /* The notified task has a priority above the currently * executing task so a yield is required. */ @@ -5142,7 +5944,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* Mark that a yield is pending in case the user is not * using the "xHigherPriorityTaskWoken" parameter to an ISR * safe FreeRTOS function. */ - xYieldPending = pdTRUE; + xYieldPending[ xCurCoreID ] = pdTRUE; } else { @@ -5150,7 +5952,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); return xReturn; } @@ -5191,8 +5993,11 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + prvENTER_CRITICAL_OR_MASK_ISR( &xKernelLock, uxSavedInterruptStatus ); { + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -5209,7 +6014,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* The task should not have been on an event list. */ configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( taskCAN_BE_SCHEDULED( pxTCB ) == pdTRUE ) { listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); @@ -5218,10 +6023,10 @@ TickType_t uxTaskResetEventItemValue( void ) { /* The delayed and ready lists cannot be accessed, so hold * this task pending until the scheduler is resumed. */ - listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + listINSERT_END( &( xPendingReadyList[ xCurCoreID ] ), &( pxTCB->xEventListItem ) ); } - if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) + if( taskIS_YIELD_REQUIRED( pxTCB, xCurCoreID, pdFALSE ) == pdTRUE ) { /* The notified task has a priority above the currently * executing task so a yield is required. */ @@ -5233,7 +6038,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* Mark that a yield is pending in case the user is not * using the "xHigherPriorityTaskWoken" parameter in an ISR * safe FreeRTOS function. */ - xYieldPending = pdTRUE; + xYieldPending[ xCurCoreID ] = pdTRUE; } else { @@ -5241,7 +6046,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + prvEXIT_CRITICAL_OR_UNMASK_ISR( &xKernelLock, uxSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ @@ -5261,7 +6066,7 @@ TickType_t uxTaskResetEventItemValue( void ) * its notification state cleared. */ pxTCB = prvGetTCBFromHandle( xTask ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED ) { @@ -5273,7 +6078,7 @@ TickType_t uxTaskResetEventItemValue( void ) xReturn = pdFAIL; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return xReturn; } @@ -5294,14 +6099,14 @@ TickType_t uxTaskResetEventItemValue( void ) * its notification state cleared. */ pxTCB = prvGetTCBFromHandle( xTask ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xKernelLock ); { /* Return the notification as it was before the bits were cleared, * then clear the bit mask. */ ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ]; pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xKernelLock ); return ulReturn; } @@ -5313,7 +6118,7 @@ TickType_t uxTaskResetEventItemValue( void ) configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void ) { - return xIdleTaskHandle->ulRunTimeCounter; + return ulTaskGetIdleRunTimeCounterForCore( portGET_CORE_ID() ); } #endif @@ -5323,24 +6128,7 @@ TickType_t uxTaskResetEventItemValue( void ) configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ) { - configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; - - ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE(); - - /* For percentage calculations. */ - ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; - - /* Avoid divide by zero errors. */ - if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 ) - { - ulReturn = xIdleTaskHandle->ulRunTimeCounter / ulTotalTime; - } - else - { - ulReturn = 0; - } - - return ulReturn; + return ulTaskGetIdleRunTimePercentForCore( portGET_CORE_ID() ); } #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ @@ -5351,23 +6139,39 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, { TickType_t xTimeToWake; const TickType_t xConstTickCount = xTickCount; + /* Get current core ID as we can no longer be preempted. */ + const BaseType_t xCurCoreID = portGET_CORE_ID(); + + #if ( configNUMBER_OF_CORES > 1 ) + { + if( listIS_CONTAINED_WITHIN( &xTasksWaitingTermination, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) ) == pdTRUE ) + { + /* In SMP, it is possible that another core has already deleted the + * current task (via vTaskDelete()) which will result in the current + * task being placed on the waiting termination list. In this case, + * we do nothing and return, the current task will yield as soon + * as it re-enables interrupts. */ + return; + } + } + #endif /* configNUMBER_OF_CORES > 1 */ #if ( INCLUDE_xTaskAbortDelay == 1 ) { /* About to enter a delayed list, so ensure the ucDelayAborted flag is * reset to pdFALSE so it can be detected as having been set to pdTRUE * when the task leaves the Blocked state. */ - pxCurrentTCB->ucDelayAborted = pdFALSE; + pxCurrentTCBs[ xCurCoreID ]->ucDelayAborted = pdFALSE; } #endif /* Remove the task from the ready list before adding it to the blocked list * as the same list item is used for both lists. */ - if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + if( uxListRemove( &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) ) == ( UBaseType_t ) 0 ) { /* The current task must be in a ready list, so there is no need to * check, and the port reset macro can be called directly. */ - portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */ + portRESET_READY_PRIORITY( pxCurrentTCBs[ xCurCoreID ]->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCBs cannot change as it is the calling task. pxCurrentTCBs->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */ } else { @@ -5381,7 +6185,7 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, /* Add the task to the suspended task list instead of a delayed task * list to ensure it is not woken by a timing event. It will block * indefinitely. */ - listINSERT_END( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) ); + listINSERT_END( &xSuspendedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) ); } else { @@ -5391,19 +6195,19 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, xTimeToWake = xConstTickCount + xTicksToWait; /* The list item will be inserted in wake time order. */ - listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ), xTimeToWake ); if( xTimeToWake < xConstTickCount ) { /* Wake time has overflowed. Place this item in the overflow * list. */ - vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) ); } else { /* The wake time has not overflowed, so the current block list * is used. */ - vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + vListInsert( pxDelayedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) ); /* If the task entering the blocked state was placed at the * head of the list of blocked tasks then xNextTaskUnblockTime @@ -5427,17 +6231,17 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, xTimeToWake = xConstTickCount + xTicksToWait; /* The list item will be inserted in wake time order. */ - listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ), xTimeToWake ); if( xTimeToWake < xConstTickCount ) { /* Wake time has overflowed. Place this item in the overflow list. */ - vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) ); } else { /* The wake time has not overflowed, so the current block list is used. */ - vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + vListInsert( pxDelayedTaskList, &( pxCurrentTCBs[ xCurCoreID ]->xStateListItem ) ); /* If the task entering the blocked state was placed at the head of the * list of blocked tasks then xNextTaskUnblockTime needs to be updated diff --git a/components/freertos/FreeRTOS-Kernel-V10.5.1/timers.c b/components/freertos/FreeRTOS-Kernel-V10.5.1/timers.c index 6803cb8e63..084c7d9b48 100644 --- a/components/freertos/FreeRTOS-Kernel-V10.5.1/timers.c +++ b/components/freertos/FreeRTOS-Kernel-V10.5.1/timers.c @@ -42,6 +42,8 @@ #include "task.h" #include "queue.h" #include "timers.h" +/* Include private IDF API additions for critical thread safety macros */ +#include "esp_private/freertos_idf_additions_priv.h" #if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 ) #error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available. @@ -147,6 +149,10 @@ PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL; PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL; +/* Spinlock required in SMP when accessing the timers. For now we use a single lock + * Todo: Each timer could possible have its own lock for increased granularity. */ + PRIVILEGED_DATA static portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED; + /*lint -restore */ /*-----------------------------------------------------------*/ @@ -252,14 +258,16 @@ StackType_t * pxTimerTaskStackBuffer = NULL; uint32_t ulTimerTaskStackSize; + /* Timer tasks is always pinned to core 0. Todo: IDF-7906 */ vApplicationGetTimerTaskMemory( &pxTimerTaskTCBBuffer, &pxTimerTaskStackBuffer, &ulTimerTaskStackSize ); - xTimerTaskHandle = xTaskCreateStatic( prvTimerTask, - configTIMER_SERVICE_TASK_NAME, - ulTimerTaskStackSize, - NULL, - ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, - pxTimerTaskStackBuffer, - pxTimerTaskTCBBuffer ); + xTimerTaskHandle = xTaskCreateStaticPinnedToCore( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + ulTimerTaskStackSize, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + pxTimerTaskStackBuffer, + pxTimerTaskTCBBuffer, + 0 ); if( xTimerTaskHandle != NULL ) { @@ -268,12 +276,14 @@ } #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ { - xReturn = xTaskCreate( prvTimerTask, - configTIMER_SERVICE_TASK_NAME, - configTIMER_TASK_STACK_DEPTH, - NULL, - ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, - &xTimerTaskHandle ); + /* Timer tasks is always pinned to core 0. Todo: IDF-7906 */ + xReturn = xTaskCreatePinnedToCore( prvTimerTask, + configTIMER_SERVICE_TASK_NAME, + configTIMER_TASK_STACK_DEPTH, + NULL, + ( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT, + &xTimerTaskHandle, + 0 ); } #endif /* configSUPPORT_STATIC_ALLOCATION */ } @@ -458,7 +468,7 @@ Timer_t * pxTimer = xTimer; configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( xAutoReload != pdFALSE ) { @@ -469,7 +479,7 @@ pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_AUTORELOAD ); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); } /*-----------------------------------------------------------*/ @@ -479,7 +489,7 @@ BaseType_t xReturn; configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 ) { @@ -492,7 +502,7 @@ xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); return xReturn; } @@ -635,7 +645,7 @@ TickType_t xTimeNow; BaseType_t xTimerListsWereSwitched; - vTaskSuspendAll(); + prvENTER_CRITICAL_OR_SUSPEND_ALL( &xTimerLock ); { /* Obtain the time now to make an assessment as to whether the timer * has expired or not. If obtaining the time causes the lists to switch @@ -649,7 +659,7 @@ /* The tick count has not overflowed, has the timer expired? */ if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) ) { - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ); prvProcessExpiredTimer( xNextExpireTime, xTimeNow ); } else @@ -669,7 +679,7 @@ vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty ); - if( xTaskResumeAll() == pdFALSE ) + if( prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ) == pdFALSE ) { /* Yield to wait for either a command to arrive, or the * block time to expire. If a command arrived between the @@ -685,7 +695,7 @@ } else { - ( void ) xTaskResumeAll(); + ( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ); } } } @@ -963,7 +973,7 @@ /* Check that the list from which active timers are referenced, and the * queue used to communicate with the timer service, have been * initialised. */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( xTimerQueue == NULL ) { @@ -1005,7 +1015,7 @@ mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); } /*-----------------------------------------------------------*/ @@ -1017,7 +1027,7 @@ configASSERT( xTimer ); /* Is the timer in the list of active timers? */ - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 ) { @@ -1028,7 +1038,7 @@ xReturn = pdTRUE; } } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); return xReturn; } /*lint !e818 Can't be pointer to const due to the typedef. */ @@ -1041,11 +1051,11 @@ configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { pvReturn = pxTimer->pvTimerID; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); return pvReturn; } @@ -1058,11 +1068,11 @@ configASSERT( xTimer ); - taskENTER_CRITICAL(); + taskENTER_CRITICAL( &xTimerLock ); { pxTimer->pvTimerID = pvNewID; } - taskEXIT_CRITICAL(); + taskEXIT_CRITICAL( &xTimerLock ); } /*-----------------------------------------------------------*/ diff --git a/components/freertos/FreeRTOS-Kernel/portable/riscv/include/freertos/portmacro.h b/components/freertos/FreeRTOS-Kernel/portable/riscv/include/freertos/portmacro.h index 08e489d3c0..b026bf49b9 100644 --- a/components/freertos/FreeRTOS-Kernel/portable/riscv/include/freertos/portmacro.h +++ b/components/freertos/FreeRTOS-Kernel/portable/riscv/include/freertos/portmacro.h @@ -444,6 +444,11 @@ void vPortTCBPreDeleteHook( void *pxTCB ); * - Maps to forward declared functions * ------------------------------------------------------------------------------------------------------------------ */ +#if CONFIG_FREERTOS_USE_KERNEL_10_5_1 +#define portGET_CORE_ID() xPortGetCoreID() +#define portYIELD_CORE( x ) vPortYieldOtherCore( x ) +#endif + // --------------------- Interrupts ------------------------ #define portDISABLE_INTERRUPTS() portSET_INTERRUPT_MASK_FROM_ISR() diff --git a/components/freertos/FreeRTOS-Kernel/portable/riscv/portasm.S b/components/freertos/FreeRTOS-Kernel/portable/riscv/portasm.S index 214303e4f8..2b215156c7 100644 --- a/components/freertos/FreeRTOS-Kernel/portable/riscv/portasm.S +++ b/components/freertos/FreeRTOS-Kernel/portable/riscv/portasm.S @@ -8,6 +8,13 @@ #include "freertos/FreeRTOSConfig.h" #include "soc/soc_caps.h" +#if CONFIG_FREERTOS_USE_KERNEL_10_5_1 +#define pxCurrentTCB pxCurrentTCBs +.extern pxCurrentTCBs +#else +.extern pxCurrentTCB +#endif + #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD #include "esp_private/hw_stack_guard.h" #endif diff --git a/components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/portmacro.h b/components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/portmacro.h index 6a44c2817a..2c107da027 100644 --- a/components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/portmacro.h +++ b/components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/portmacro.h @@ -427,6 +427,11 @@ void vPortTCBPreDeleteHook( void *pxTCB ); * - Maps to forward declared functions * ------------------------------------------------------------------------------------------------------------------ */ +#if CONFIG_FREERTOS_USE_KERNEL_10_5_1 +#define portGET_CORE_ID() xPortGetCoreID() +#define portYIELD_CORE( x ) vPortYieldOtherCore( x ) +#endif + // --------------------- Interrupts ------------------------ /** diff --git a/components/freertos/FreeRTOS-Kernel/portable/xtensa/portasm.S b/components/freertos/FreeRTOS-Kernel/portable/xtensa/portasm.S index b7fe0f36a7..931aa3dc31 100644 --- a/components/freertos/FreeRTOS-Kernel/portable/xtensa/portasm.S +++ b/components/freertos/FreeRTOS-Kernel/portable/xtensa/portasm.S @@ -3,7 +3,7 @@ * * SPDX-License-Identifier: MIT * - * SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD + * SPDX-FileContributor: 2016-2023 Espressif Systems (Shanghai) CO LTD */ /* * Copyright (c) 2015-2019 Cadence Design Systems, Inc. @@ -33,7 +33,13 @@ #define TOPOFSTACK_OFFS 0x00 /* StackType_t *pxTopOfStack */ +#if CONFIG_FREERTOS_USE_KERNEL_10_5_1 +#define pxCurrentTCB pxCurrentTCBs +.extern pxCurrentTCBs +#else .extern pxCurrentTCB +#endif + #if XCHAL_CP_NUM > 0 /* Offsets used to get a task's coprocessor save area (CPSA) from its TCB */ .extern offset_pxEndOfStack diff --git a/components/freertos/Kconfig b/components/freertos/Kconfig index 678ba052fc..ebe716b483 100644 --- a/components/freertos/Kconfig +++ b/components/freertos/Kconfig @@ -4,11 +4,12 @@ menu "FreeRTOS" # Upstream FreeRTOS configurations go here config FREERTOS_USE_KERNEL_10_5_1 - bool "Use v10.5.1 Kernel (EXPERIMENTAL)" - depends on IDF_EXPERIMENTAL_FEATURES + bool "Use v10.5.1 Kernel (BETA)" default n help - Hidden option for development/testing purposes to enable building with the v10.5.1 kernel + This option enables building for FreeRTOS v10.5.1 kernel. + + Note: The v10.5.1 kernel is still in BETA, thus is not production ready. config FREERTOS_SMP bool "Run the Amazon SMP FreeRTOS kernel instead (FEATURE UNDER DEVELOPMENT)" diff --git a/components/freertos/config/include/freertos/FreeRTOSConfig.h b/components/freertos/config/include/freertos/FreeRTOSConfig.h index 23c7e21bbe..83f337af7d 100644 --- a/components/freertos/config/include/freertos/FreeRTOSConfig.h +++ b/components/freertos/config/include/freertos/FreeRTOSConfig.h @@ -107,7 +107,7 @@ /* ----------------------- System -------------------------- */ -#define configMAX_TASK_NAME_LEN CONFIG_FREERTOS_MAX_TASK_NAME_LEN +#define configMAX_TASK_NAME_LEN CONFIG_FREERTOS_MAX_TASK_NAME_LEN /* If deletion callbacks are enabled, the number of TLSP's are doubled (i.e., * the length of the TCB's pvThreadLocalStoragePointersThis array). This allows @@ -199,6 +199,7 @@ #define INCLUDE_xTaskResumeFromISR 1 #define INCLUDE_xTimerPendFunctionCall 1 #define INCLUDE_xTaskGetSchedulerState 1 +#define INCLUDE_xTaskGetCurrentTaskHandle 1 /* -------------------- Trace Macros ----------------------- */ @@ -257,10 +258,12 @@ #if !CONFIG_FREERTOS_SMP #ifdef CONFIG_FREERTOS_UNICORE - #define configNUM_CORES 1 + #define configNUMBER_OF_CORES 1 #else - #define configNUM_CORES 2 + #define configNUMBER_OF_CORES 2 #endif /* CONFIG_FREERTOS_UNICORE */ + /* For compatibility */ + #define configNUM_CORES configNUMBER_OF_CORES #ifdef CONFIG_FREERTOS_VTASKLIST_INCLUDE_COREID #define configTASKLIST_INCLUDE_COREID 1 #endif /* CONFIG_FREERTOS_VTASKLIST_INCLUDE_COREID */ diff --git a/components/freertos/config/linux/include/freertos/FreeRTOSConfig_arch.h b/components/freertos/config/linux/include/freertos/FreeRTOSConfig_arch.h index 4530bc0fd3..bd0276b06b 100644 --- a/components/freertos/config/linux/include/freertos/FreeRTOSConfig_arch.h +++ b/components/freertos/config/linux/include/freertos/FreeRTOSConfig_arch.h @@ -52,7 +52,7 @@ /* -------------------- API Includes ----------------------- */ -#define INCLUDE_xTaskGetCurrentTaskHandle 0 /* not defined in POSIX simulator */ +/* Todo: Reconcile INCLUDE_option differences (IDF-8186) */ #define INCLUDE_vTaskDelayUntil 1 #define INCLUDE_uxTaskGetStackHighWaterMark2 0 diff --git a/components/freertos/config/riscv/include/freertos/FreeRTOSConfig_arch.h b/components/freertos/config/riscv/include/freertos/FreeRTOSConfig_arch.h index 75e4e75948..e0288dd5c8 100644 --- a/components/freertos/config/riscv/include/freertos/FreeRTOSConfig_arch.h +++ b/components/freertos/config/riscv/include/freertos/FreeRTOSConfig_arch.h @@ -28,20 +28,31 @@ /* ---------------- Amazon SMP FreeRTOS -------------------- */ #if CONFIG_FREERTOS_SMP - #define configUSE_CORE_AFFINITY 1 + #define configUSE_CORE_AFFINITY 1 /* This is always enabled to call IDF style idle hooks, by can be "--Wl,--wrap" * if users enable CONFIG_FREERTOS_USE_MINIMAL_IDLE_HOOK. */ - #define configUSE_MINIMAL_IDLE_HOOK 1 + #define configUSE_MINIMAL_IDLE_HOOK 1 - /* IDF Newlib supports dynamic reentrancy. We provide our own __getreent() - * function. */ - #define configNEWLIB_REENTRANT_IS_DYNAMIC 1 +/* IDF Newlib supports dynamic reentrancy. We provide our own __getreent() + * function. */ + #define configNEWLIB_REENTRANT_IS_DYNAMIC 1 #endif /* ----------------------- System -------------------------- */ -#define configUSE_NEWLIB_REENTRANT 1 +#define configUSE_NEWLIB_REENTRANT 1 +#if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + +/* - FreeRTOS provides default for configTLS_BLOCK_TYPE. + * - We simply provide our own INIT and DEINIT functions + * - We set "SET" to a blank macro since there is no need to set the reentrancy + * pointer. All newlib functions calls __getreent. */ + #define configINIT_TLS_BLOCK( xTLSBlock ) esp_reent_init( &( xTLSBlock ) ) + #define configSET_TLS_BLOCK( xTLSBlock ) + #define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) ) + +#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ #define configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H 1 @@ -61,9 +72,9 @@ /* -------------------- API Includes ----------------------- */ -#define INCLUDE_xTaskDelayUntil 1 -#define INCLUDE_xTaskGetCurrentTaskHandle 1 -#define INCLUDE_uxTaskGetStackHighWaterMark2 1 +/* Todo: Reconcile INCLUDE_option differences (IDF-8186) */ +#define INCLUDE_xTaskDelayUntil 1 +#define INCLUDE_uxTaskGetStackHighWaterMark2 1 /* ------------------------------------------------ ESP-IDF Additions -------------------------------------------------- * diff --git a/components/freertos/config/xtensa/include/freertos/FreeRTOSConfig_arch.h b/components/freertos/config/xtensa/include/freertos/FreeRTOSConfig_arch.h index c93bb977e5..2349c6d49b 100644 --- a/components/freertos/config/xtensa/include/freertos/FreeRTOSConfig_arch.h +++ b/components/freertos/config/xtensa/include/freertos/FreeRTOSConfig_arch.h @@ -55,20 +55,31 @@ /* ---------------- Amazon SMP FreeRTOS -------------------- */ #if CONFIG_FREERTOS_SMP - #define configUSE_CORE_AFFINITY 1 + #define configUSE_CORE_AFFINITY 1 /* This is always enabled to call IDF style idle hooks, by can be "--Wl,--wrap" * if users enable CONFIG_FREERTOS_USE_MINIMAL_IDLE_HOOK. */ - #define configUSE_MINIMAL_IDLE_HOOK 1 + #define configUSE_MINIMAL_IDLE_HOOK 1 - /* IDF Newlib supports dynamic reentrancy. We provide our own __getreent() - * function. */ - #define configNEWLIB_REENTRANT_IS_DYNAMIC 1 +/* IDF Newlib supports dynamic reentrancy. We provide our own __getreent() + * function. */ + #define configNEWLIB_REENTRANT_IS_DYNAMIC 1 #endif /* ----------------------- System -------------------------- */ -#define configUSE_NEWLIB_REENTRANT 1 +#define configUSE_NEWLIB_REENTRANT 1 +#if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + +/* - FreeRTOS provides default for configTLS_BLOCK_TYPE. + * - We simply provide our own INIT and DEINIT functions + * - We set "SET" to a blank macro since there is no need to set the reentrancy + * pointer. All newlib functions calls __getreent. */ + #define configINIT_TLS_BLOCK( xTLSBlock ) esp_reent_init( &( xTLSBlock ) ) + #define configSET_TLS_BLOCK( xTLSBlock ) + #define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) ) + +#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ #define configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H 1 @@ -88,9 +99,9 @@ /* -------------------- API Includes ----------------------- */ -#define INCLUDE_xTaskDelayUntil 1 -#define INCLUDE_xTaskGetCurrentTaskHandle 1 -#define INCLUDE_uxTaskGetStackHighWaterMark2 1 +/* Todo: Reconcile INCLUDE_option differences (IDF-8186) */ +#define INCLUDE_xTaskDelayUntil 1 +#define INCLUDE_uxTaskGetStackHighWaterMark2 1 /* ------------------------------------------------ ESP-IDF Additions -------------------------------------------------- * diff --git a/components/freertos/esp_additions/freertos_tasks_c_additions.h b/components/freertos/esp_additions/freertos_tasks_c_additions.h index f93d36e04a..e0266a53ef 100644 --- a/components/freertos/esp_additions/freertos_tasks_c_additions.h +++ b/components/freertos/esp_additions/freertos_tasks_c_additions.h @@ -20,6 +20,11 @@ * additional API. */ +#if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + #define pxCurrentTCB pxCurrentTCBs +#else +#endif + /* ------------------------------------------------- Static Asserts ------------------------------------------------- */ /* @@ -222,10 +227,23 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt if( pxNewTCB != NULL ) { - /* Allocate space for the stack used by the task being created. - * The base of the stack memory stored in the TCB so the task can - * be deleted later if required. */ - pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + #if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + { + memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + + /* Allocate space for the stack used by the task being created. + * The base of the stack memory stored in the TCB so the task can + * be deleted later if required. */ + pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + #else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + { + /* Allocate space for the stack used by the task being created. + * The base of the stack memory stored in the TCB so the task can + * be deleted later if required. */ + pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + #endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ if( pxNewTCB->pxStack == NULL ) { @@ -239,8 +257,17 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt { StackType_t * pxStack; - /* Allocate space for the stack used by the task being created. */ - pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */ + #if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + { + /* Allocate space for the stack used by the task being created. */ + pxStack = pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */ + } + #else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + { + /* Allocate space for the stack used by the task being created. */ + pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */ + } + #endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ if( pxStack != NULL ) { @@ -249,6 +276,12 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt if( pxNewTCB != NULL ) { + #if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + { + memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + } + #endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + /* Store the stack location in the TCB. */ pxNewTCB->pxStack = pxStack; } @@ -256,7 +289,15 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt { /* The stack cannot be used as the TCB was not created. Free * it again. */ - vPortFree( pxStack ); + #if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + { + vPortFreeStack( pxStack ); + } + #else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + { + vPortFree( pxStack ); + } + #endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ } } else @@ -356,6 +397,13 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt /* The memory used for the task's TCB and stack are passed into this * function - use them. */ pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + + #if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + { + memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) ); + } + #endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ @@ -405,48 +453,7 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt /* ------------------------------------------------- Task Utilities ------------------------------------------------- */ -#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) - - TaskHandle_t xTaskGetIdleTaskHandleForCPU( BaseType_t xCoreID ) - { - configASSERT( xCoreID >= 0 && xCoreID < configNUM_CORES ); - configASSERT( ( xIdleTaskHandle[ xCoreID ] != NULL ) ); - return ( TaskHandle_t ) xIdleTaskHandle[ xCoreID ]; - } - -#endif /* INCLUDE_xTaskGetIdleTaskHandle */ -/*----------------------------------------------------------*/ - -#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) - - TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t xCoreID ) - { - TaskHandle_t xReturn; - - #if CONFIG_FREERTOS_SMP - { - xReturn = xTaskGetCurrentTaskHandleCPU( xCoreID ); - } - #else /* CONFIG_FREERTOS_SMP */ - { - if( xCoreID < configNUM_CORES ) - { - xReturn = pxCurrentTCB[ xCoreID ]; - } - else - { - xReturn = NULL; - } - } - #endif /* CONFIG_FREERTOS_SMP */ - - return xReturn; - } - -#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ -/*----------------------------------------------------------*/ - -BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) +BaseType_t xTaskGetCoreID( TaskHandle_t xTask ) { BaseType_t xReturn; @@ -472,11 +479,10 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) #else /* CONFIG_FREERTOS_SMP */ TCB_t * pxTCB; + /* Todo: Remove xCoreID for single core builds (IDF-7894) */ pxTCB = prvGetTCBFromHandle( xTask ); - /* Simply read the xCoreID member of the TCB */ - taskENTER_CRITICAL( &xKernelLock ); + xReturn = pxTCB->xCoreID; - taskEXIT_CRITICAL_ISR( &xKernelLock ); #endif /* CONFIG_FREERTOS_SMP */ } #else /* configNUM_CORES > 1 */ @@ -490,6 +496,140 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) } /*----------------------------------------------------------*/ +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + + TaskHandle_t xTaskGetIdleTaskHandleForCore( BaseType_t xCoreID ) + { + #if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + { + /* If xTaskGetIdleTaskHandle() is called before the scheduler has been + * started, then xIdleTaskHandle will be NULL. */ + configASSERT( ( xCoreID < configNUMBER_OF_CORES ) && ( xCoreID != tskNO_AFFINITY ) ); + configASSERT( ( xIdleTaskHandle[ xCoreID ] != NULL ) ); + return xIdleTaskHandle[ xCoreID ]; + } + #else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + { + configASSERT( xCoreID >= 0 && xCoreID < configNUM_CORES ); + configASSERT( ( xIdleTaskHandle[ xCoreID ] != NULL ) ); + return ( TaskHandle_t ) xIdleTaskHandle[ xCoreID ]; + } + #endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + } + +#endif /* INCLUDE_xTaskGetIdleTaskHandle */ +/*----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + + TaskHandle_t xTaskGetCurrentTaskHandleForCore( BaseType_t xCoreID ) + { + TaskHandle_t xReturn; + + #if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + { + configASSERT( xCoreID < configNUMBER_OF_CORES ); + configASSERT( xCoreID != tskNO_AFFINITY ); + + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. For single core, a critical section is + * not required as this is not called from an interrupt and the current + * TCB will always be the same for any individual execution thread. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + xReturn = pxCurrentTCBs[ xCoreID ]; + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + } + #else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + { + #if CONFIG_FREERTOS_SMP + { + xReturn = xTaskGetCurrentTaskHandleCPU( xCoreID ); + } + #else /* CONFIG_FREERTOS_SMP */ + { + if( xCoreID < configNUM_CORES ) + { + xReturn = pxCurrentTCB[ xCoreID ]; + } + else + { + xReturn = NULL; + } + } + #endif /* CONFIG_FREERTOS_SMP */ + } + #endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*----------------------------------------------------------*/ + +#if ( CONFIG_FREERTOS_USE_KERNEL_10_5_1 && ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + + configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounterForCore( BaseType_t xCoreID ) + { + uint32_t ulRunTimeCounter; + + configASSERT( xCoreID < configNUMBER_OF_CORES ); + configASSERT( xCoreID != tskNO_AFFINITY ); + + /* For SMP, we need to take the kernel lock here as we are about to + * access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter; + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + + return ulRunTimeCounter; + } + +#endif /* ( CONFIG_FREERTOS_USE_KERNEL_10_5_1 && ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*----------------------------------------------------------*/ + +#if ( CONFIG_FREERTOS_USE_KERNEL_10_5_1 && ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + + configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercentForCore( BaseType_t xCoreID ) + { + configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; + + configASSERT( xCoreID < configNUMBER_OF_CORES ); + configASSERT( xCoreID != tskNO_AFFINITY ); + + ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE(); + + /* For percentage calculations. */ + ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; + + /* Avoid divide by zero errors. */ + if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 ) + { + /* For SMP, we need to take the kernel lock here as we are about + * to access kernel data structures. */ + taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + { + ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime; + } + /* Release the previously taken kernel lock. */ + taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + } + else + { + ulReturn = 0; + } + + return ulReturn; + } + +#endif /* ( CONFIG_FREERTOS_USE_KERNEL_10_5_1 && ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + uint8_t * pxTaskGetStackStart( TaskHandle_t xTask ) { TCB_t * pxTCB; @@ -800,7 +940,15 @@ uint8_t * pxTaskGetStackStart( TaskHandle_t xTask ) else { /* We have a task; return its reentrant struct. */ - ret = &pxCurTask->xNewLib_reent; + #if CONFIG_FREERTOS_USE_KERNEL_10_5_1 + { + ret = &pxCurTask->xTLSBlock; + } + #else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + { + ret = &pxCurTask->xNewLib_reent; + } + #endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ } return ret; diff --git a/components/freertos/esp_additions/include/freertos/idf_additions.h b/components/freertos/esp_additions/include/freertos/idf_additions.h index 5f2e431bcf..0da84064d6 100644 --- a/components/freertos/esp_additions/include/freertos/idf_additions.h +++ b/components/freertos/esp_additions/include/freertos/idf_additions.h @@ -31,6 +31,7 @@ #endif /* *INDENT-ON* */ + /* -------------------------------------------------- Task Creation ------------------------------------------------- */ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) @@ -108,6 +109,35 @@ /* ------------------------------------------------- Task Utilities ------------------------------------------------- */ +/** + * @brief Get the current core ID of a particular task + * + * Helper function to get the core ID of a particular task. If the task is + * pinned to a particular core, the core ID is returned. If the task is not + * pinned to a particular core, tskNO_AFFINITY is returned. + * + * If CONFIG_FREERTOS_UNICORE is enabled, this function simply returns 0. + * + * [refactor-todo] See if this needs to be deprecated (IDF-8145)(IDF-8164) + * + * @note If CONFIG_FREERTOS_SMP is enabled, please call vTaskCoreAffinityGet() + * instead. + * @note In IDF FreerTOS when configNUMBER_OF_CORES == 1, this function will + * always return 0, + * @param xTask The task to query + * @return The task's core ID or tskNO_AFFINITY + */ +BaseType_t xTaskGetCoreID( TaskHandle_t xTask ); + +/** @cond */ +/* Todo: Deprecate this API in favor of xTaskGetIdleTaskHandleForCore (IDF-8163) */ +static inline __attribute__( ( always_inline ) ) +BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) +{ + return xTaskGetCoreID( xTask ); +} +/** @endcond */ + /** * @brief Get the handle of idle task for the given core. * @@ -118,7 +148,16 @@ * @param xCoreID The core to query * @return Handle of the idle task for the queried core */ -TaskHandle_t xTaskGetIdleTaskHandleForCPU( BaseType_t xCoreID ); +TaskHandle_t xTaskGetIdleTaskHandleForCore( BaseType_t xCoreID ); + +/** @cond */ +/* Todo: Deprecate this API in favor of xTaskGetIdleTaskHandleForCore (IDF-8163) */ +static inline __attribute__( ( always_inline ) ) +TaskHandle_t xTaskGetIdleTaskHandleForCPU( BaseType_t xCoreID ) +{ + return xTaskGetIdleTaskHandleForCore( xCoreID ); +} +/** @endcond */ /** * @brief Get the handle of the task currently running on a certain core @@ -134,25 +173,53 @@ TaskHandle_t xTaskGetIdleTaskHandleForCPU( BaseType_t xCoreID ); * @param xCoreID The core to query * @return Handle of the current task running on the queried core */ -TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t xCoreID ); +TaskHandle_t xTaskGetCurrentTaskHandleForCore( BaseType_t xCoreID ); + +/** @cond */ +/* Todo: Deprecate this API in favor of xTaskGetCurrentTaskHandleForCore (IDF-8163) */ +static inline __attribute__( ( always_inline ) ) +TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t xCoreID ) +{ + return xTaskGetCurrentTaskHandleForCore( xCoreID ); +} +/** @endcond */ + +#if CONFIG_FREERTOS_USE_KERNEL_10_5_1 /** - * @brief Get the current core affinity of a particular task + * @brief Get the total execution of a particular core's idle task * - * Helper function to get the core affinity of a particular task. If the task is - * pinned to a particular core, the core ID is returned. If the task is not - * pinned to a particular core, tskNO_AFFINITY is returned. + * This function is equivalent to ulTaskGetIdleRunTimeCounter() but queries the + * idle task of a particular core. * - * If CONFIG_FREERTOS_UNICORE is enabled, this function simply returns 0. - * - * [refactor-todo] See if this needs to be deprecated (IDF-8145)(IDF-8164) - * - * @note If CONFIG_FREERTOS_SMP is enabled, please call vTaskCoreAffinityGet() - * instead. - * @param xTask The task to query - * @return The tasks coreID or tskNO_AFFINITY + * @param xCoreID Core ID of the idle task to query + * @return The total run time of the idle task */ -BaseType_t xTaskGetAffinity( TaskHandle_t xTask ); + configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounterForCore( BaseType_t xCoreID ); + +/** + * @brief Get the percentage run time of a particular core's idle task + * + * This function is equivalent to ulTaskGetIdleRunTimePercent() but queries the + * idle task of a particular core. + * + * @param xCoreID Core ID of the idle task to query + * @return The percentage run time of the idle task + */ + configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercentForCore( BaseType_t xCoreID ); + +#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ + +/* CMock Workaround: CMock currently doesn't preprocess files, thus functions + * guarded by ifdef still get mocked. We provide a dummy define here so that + * functions using configRUN_TIME_COUNTER_TYPE can still be mocked. + * + * Todo: Will be removed when V10.5.1 becomes the default kernel. */ + #ifndef configRUN_TIME_COUNTER_TYPE + #define configRUN_TIME_COUNTER_TYPE unsigned int + #endif + +#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */ /** * Returns the start of the stack associated with xTask. diff --git a/components/freertos/linker.lf b/components/freertos/linker.lf index 31a1b39cfb..b66d99bb98 100644 --- a/components/freertos/linker.lf +++ b/components/freertos/linker.lf @@ -171,6 +171,8 @@ entries: tasks:ulTaskGenericNotifyValueClear (default) if FREERTOS_GENERATE_RUN_TIME_STATS = y: tasks:ulTaskGetIdleRunTimeCounter (default) + if FREERTOS_USE_KERNEL_10_5_1 = y: + tasks:ulTaskGetIdleRunTimePercent (default) tasks:prvAddCurrentTaskToDelayedList (default) if FREERTOS_USE_TRACE_FACILITY = y: tasks:uxTaskGetSystemState (default) @@ -193,6 +195,8 @@ entries: timers:uxTimerGetReloadMode (default) timers:xTimerGetExpiryTime (default) timers:pcTimerGetName (default) + if FREERTOS_USE_KERNEL_10_5_1 = y: + timers:prvReloadTimer (default) timers:prvProcessExpiredTimer (default) timers:prvTimerTask (default) timers:prvProcessTimerOrBlockTask (default) diff --git a/components/freertos/linker_common.lf b/components/freertos/linker_common.lf index d836cf4902..c0824ed7e0 100644 --- a/components/freertos/linker_common.lf +++ b/components/freertos/linker_common.lf @@ -25,10 +25,12 @@ entries: tasks:xTaskCreatePinnedToCore (default) tasks:xTaskCreateStaticPinnedToCore (default) # Task Utilities - tasks:xTaskGetCurrentTaskHandleForCPU (default) - tasks:xTaskGetIdleTaskHandleForCPU (default) - tasks:xTaskGetCurrentTaskHandleForCPU (default) - tasks:xTaskGetAffinity (default) + tasks:xTaskGetCoreID (default) + tasks:xTaskGetIdleTaskHandleForCore (default) + tasks:xTaskGetCurrentTaskHandleForCore (default) + if FREERTOS_USE_KERNEL_10_5_1 = y && FREERTOS_GENERATE_RUN_TIME_STATS = y: + tasks:ulTaskGetIdleRunTimeCounterForCore (default) + tasks:ulTaskGetIdleRunTimePercentForCore (default) tasks:pxTaskGetStackStart (default) tasks:prvTaskPriorityRaise (default) tasks:prvTaskPriorityRestore (default) diff --git a/tools/ci/check_public_headers_exceptions.txt b/tools/ci/check_public_headers_exceptions.txt index 39ddc16643..2167932fe2 100644 --- a/tools/ci/check_public_headers_exceptions.txt +++ b/tools/ci/check_public_headers_exceptions.txt @@ -12,6 +12,7 @@ components/freertos/FreeRTOS-Kernel/include/freertos/ components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/ components/freertos/FreeRTOS-Kernel-SMP/include/freertos/ components/freertos/FreeRTOS-Kernel-SMP/portable/xtensa/include/freertos/ +components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/ components/log/include/esp_log_internal.h