mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'feature/freertos_v1051_smp_changes' into 'master'
feat(freertos): Add FreeRTOS v10.5.1 SMP Support (BETA release) Closes IDF-7877, IDF-7878, IDF-5739, IDF-4142, and IDF-8182 See merge request espressif/esp-idf!24995
This commit is contained in:
commit
4d85cbff6d
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
@ -85,8 +85,15 @@ void esp_gdbstub_int(__attribute__((unused)) void *frame)
|
||||
/* Pointer to saved frame is in pxCurrentTCB
|
||||
* See rtos_int_enter function
|
||||
*/
|
||||
extern void *pxCurrentTCB;
|
||||
dummy_tcb_t *tcb = pxCurrentTCB;
|
||||
/* Todo: Provide IDF interface for getting pxCurrentTCB (IDF-8182) */
|
||||
int core_id = esp_cpu_get_core_id();
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
extern void **pxCurrentTCBs;
|
||||
dummy_tcb_t *tcb = pxCurrentTCBs[core_id];
|
||||
#else
|
||||
extern void **pxCurrentTCB;
|
||||
dummy_tcb_t *tcb = pxCurrentTCB[core_id];
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
gdbstub_handle_uart_int((esp_gdbstub_frame_t *)tcb->top_of_stack);
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ if(CONFIG_FREERTOS_SMP)
|
||||
set(kernel_impl "FreeRTOS-Kernel-SMP")
|
||||
else()
|
||||
if(CONFIG_FREERTOS_USE_KERNEL_10_5_1)
|
||||
message(FATAL_ERROR "FreeRTOS v10.5.1 is not buildable yet. Still under development")
|
||||
set(kernel_impl "FreeRTOS-Kernel-V10.5.1")
|
||||
else()
|
||||
set(kernel_impl "FreeRTOS-Kernel")
|
||||
endif()
|
||||
@ -71,31 +71,55 @@ list(APPEND srcs
|
||||
"${kernel_impl}/queue.c"
|
||||
"${kernel_impl}/tasks.c"
|
||||
"${kernel_impl}/timers.c"
|
||||
"${kernel_impl}/croutine.c"
|
||||
"${kernel_impl}/event_groups.c"
|
||||
"${kernel_impl}/stream_buffer.c")
|
||||
if(NOT CONFIG_FREERTOS_USE_KERNEL_10_5_1)
|
||||
list(APPEND srcs "${kernel_impl}/croutine.c")
|
||||
endif()
|
||||
|
||||
# Add port source files
|
||||
if(CONFIG_FREERTOS_USE_KERNEL_10_5_1)
|
||||
list(APPEND srcs
|
||||
"FreeRTOS-Kernel/portable/${arch}/port.c")
|
||||
else()
|
||||
list(APPEND srcs
|
||||
"${kernel_impl}/portable/${arch}/port.c")
|
||||
endif()
|
||||
|
||||
if(arch STREQUAL "linux")
|
||||
if(CONFIG_FREERTOS_USE_KERNEL_10_5_1)
|
||||
list(APPEND srcs
|
||||
"FreeRTOS-Kernel/portable/${arch}/utils/wait_for_event.c"
|
||||
"FreeRTOS-Kernel/portable/${arch}/port_idf.c")
|
||||
else()
|
||||
list(APPEND srcs
|
||||
"${kernel_impl}/portable/${arch}/utils/wait_for_event.c")
|
||||
if(kernel_impl STREQUAL "FreeRTOS-Kernel")
|
||||
list(APPEND srcs
|
||||
"${kernel_impl}/portable/${arch}/port_idf.c")
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
if(CONFIG_FREERTOS_USE_KERNEL_10_5_1)
|
||||
list(APPEND srcs
|
||||
"FreeRTOS-Kernel/portable/${arch}/portasm.S")
|
||||
else()
|
||||
list(APPEND srcs
|
||||
"${kernel_impl}/portable/${arch}/portasm.S")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(arch STREQUAL "xtensa")
|
||||
if(CONFIG_FREERTOS_USE_KERNEL_10_5_1)
|
||||
list(APPEND srcs
|
||||
"FreeRTOS-Kernel/portable/${arch}/xtensa_init.c"
|
||||
"FreeRTOS-Kernel/portable/${arch}/xtensa_overlay_os_hook.c")
|
||||
else()
|
||||
list(APPEND srcs
|
||||
"${kernel_impl}/portable/${arch}/xtensa_init.c"
|
||||
"${kernel_impl}/portable/${arch}/xtensa_overlay_os_hook.c")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Add ESP-additions source files
|
||||
list(APPEND srcs
|
||||
@ -127,9 +151,15 @@ list(APPEND include_dirs
|
||||
"${kernel_impl}/include") # FreeRTOS headers via `#include "freertos/xxx.h"`
|
||||
|
||||
# Add port public include directories
|
||||
if(CONFIG_FREERTOS_USE_KERNEL_10_5_1)
|
||||
list(APPEND include_dirs
|
||||
"FreeRTOS-Kernel/portable/${arch}/include" # For port headers via `#include "freertos/...h"`
|
||||
"FreeRTOS-Kernel/portable/${arch}/include/freertos") # For port headers via `#include "...h"`
|
||||
else()
|
||||
list(APPEND include_dirs
|
||||
"${kernel_impl}/portable/${arch}/include" # For port headers via `#include "freertos/...h"`
|
||||
"${kernel_impl}/portable/${arch}/include/freertos") # For port headers via `#include "...h"`
|
||||
endif()
|
||||
|
||||
# Add ESP-additions public include directories
|
||||
list(APPEND include_dirs
|
||||
@ -151,9 +181,14 @@ list(APPEND private_include_dirs
|
||||
|
||||
# Add port private include directories
|
||||
if(arch STREQUAL "linux")
|
||||
if(CONFIG_FREERTOS_USE_KERNEL_10_5_1)
|
||||
list(APPEND private_include_dirs
|
||||
"FreeRTOS-Kernel/portable/${arch}/") # Linux port `#include "utils/wait_for_event.h"`
|
||||
else()
|
||||
list(APPEND private_include_dirs
|
||||
"${kernel_impl}/portable/${arch}/") # Linux port `#include "utils/wait_for_event.h"`
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Add ESP-additions private include directories
|
||||
list(APPEND private_include_dirs
|
||||
|
@ -43,6 +43,8 @@
|
||||
#include "task.h"
|
||||
#include "timers.h"
|
||||
#include "event_groups.h"
|
||||
/* Include private IDF API additions for critical thread safety macros */
|
||||
#include "esp_private/freertos_idf_additions_priv.h"
|
||||
|
||||
/* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified
|
||||
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
|
||||
@ -77,6 +79,8 @@ typedef struct EventGroupDef_t
|
||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
|
||||
#endif
|
||||
|
||||
portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
|
||||
} EventGroup_t;
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
@ -131,6 +135,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
||||
}
|
||||
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
||||
|
||||
/* Initialize the event group's spinlock. */
|
||||
portMUX_INITIALIZE( &pxEventBits->xEventGroupLock );
|
||||
|
||||
traceEVENT_GROUP_CREATE( pxEventBits );
|
||||
}
|
||||
else
|
||||
@ -182,6 +189,9 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
||||
}
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/* Initialize the event group's spinlock. */
|
||||
portMUX_INITIALIZE( &pxEventBits->xEventGroupLock );
|
||||
|
||||
traceEVENT_GROUP_CREATE( pxEventBits );
|
||||
}
|
||||
else
|
||||
@ -213,7 +223,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
}
|
||||
#endif
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
uxOriginalBitValue = pxEventBits->uxEventBits;
|
||||
|
||||
@ -256,7 +266,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
}
|
||||
}
|
||||
}
|
||||
xAlreadyYielded = xTaskResumeAll();
|
||||
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
if( xTicksToWait != ( TickType_t ) 0 )
|
||||
{
|
||||
@ -278,7 +288,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
||||
{
|
||||
/* The task timed out, just return the current event bit value. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
uxReturn = pxEventBits->uxEventBits;
|
||||
|
||||
@ -295,7 +305,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
xTimeoutOccurred = pdTRUE;
|
||||
}
|
||||
@ -340,7 +350,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
}
|
||||
#endif
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
||||
|
||||
@ -408,7 +418,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
||||
}
|
||||
}
|
||||
xAlreadyYielded = xTaskResumeAll();
|
||||
xAlreadyYielded = prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
if( xTicksToWait != ( TickType_t ) 0 )
|
||||
{
|
||||
@ -429,7 +439,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
|
||||
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
/* The task timed out, just return the current event bit value. */
|
||||
uxReturn = pxEventBits->uxEventBits;
|
||||
@ -454,7 +464,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
|
||||
xTimeoutOccurred = pdTRUE;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -485,7 +495,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
configASSERT( xEventGroup );
|
||||
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
{
|
||||
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
|
||||
|
||||
@ -496,7 +506,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
/* Clear the bits. */
|
||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
return uxReturn;
|
||||
}
|
||||
@ -552,7 +562,14 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
|
||||
pxList = &( pxEventBits->xTasksWaitingForBits );
|
||||
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
||||
vTaskSuspendAll();
|
||||
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
|
||||
/* We are about to traverse a task list which is a kernel data structure.
|
||||
* Thus we need to call prvTakeKernelLock() to take the kernel lock. */
|
||||
prvTakeKernelLock();
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
{
|
||||
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
||||
|
||||
@ -624,7 +641,12 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
* bit was set in the control word. */
|
||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||
}
|
||||
( void ) xTaskResumeAll();
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
/* Release the previously taken kernel lock. */
|
||||
prvReleaseKernelLock();
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
|
||||
return pxEventBits->uxEventBits;
|
||||
}
|
||||
@ -639,7 +661,13 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
||||
|
||||
pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
|
||||
/* We are about to traverse a task list which is a kernel data structure.
|
||||
* Thus we need to call prvTakeKernelLock() to take the kernel lock. */
|
||||
prvTakeKernelLock();
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
{
|
||||
traceEVENT_GROUP_DELETE( xEventGroup );
|
||||
|
||||
@ -651,7 +679,11 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
|
||||
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
||||
}
|
||||
}
|
||||
( void ) xTaskResumeAll();
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
/* Release the previously taken kernel lock. */
|
||||
prvReleaseKernelLock();
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxEventBits->xEventGroupLock ) );
|
||||
|
||||
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
||||
{
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
This document is used to track all changes made the to FreeRTOS V10.5.1 source code when adding dual core SMP support or IDF additional features.
|
||||
|
||||
Todo: Add these to ESP-IDF docs once v10.5.1 becomes default kernel (IDF-8203)
|
||||
|
||||
## License Headers
|
||||
|
||||
- Added `SPDX-FileCopyrightText` and `SPDX-FileContributor` tags to all files to pass ESP-IDF pre-commit checks.
|
||||
@ -14,3 +16,130 @@ This document is used to track all changes made the to FreeRTOS V10.5.1 source c
|
||||
## Changes from Upstream Main Branch not Included in v10.5.1
|
||||
|
||||
- Added ...GetStaticBuffers functions that were upstreamed but not included in v10.5.1
|
||||
|
||||
## Kernel SMP Changes
|
||||
|
||||
List of changes to the Vanilla FreeRTOS V10.5.1 kernel in order to support dual-core SMP
|
||||
|
||||
### Scheduling Behavior Changes
|
||||
|
||||
- The kernel now executes two tasks concurrently
|
||||
- The kernel now creates two IDLE tasks (pinned to each core)
|
||||
- Tasks can be pinned to either core, or have no affinity (can run on both cores)
|
||||
- Each core receives a tick interrupt, but only core 0 increments the tick count and unblocks timed out tasks
|
||||
- Core 0 calls `xTaskIncrementTick()`
|
||||
- Core 1 calls `xTaskIncrementTickOtherCores()`
|
||||
- Each core independently calls `vTaskSwitchContext()` to pick the highest priority task it can currently run
|
||||
- In single-core scheduling algorithm `taskSELECT_HIGHEST_PRIORITY_TASK()` unchanged
|
||||
- In SMP, `prvSelectHighestPriorityTaskSMP()` is called. This will select the highest priority ready state task that...
|
||||
- Has a compatible core affinity
|
||||
- Is not being run by another core
|
||||
- Each core can suspend scheduling independently (i.e., `vTaskSuspendAll()`)
|
||||
|
||||
### Configuration
|
||||
|
||||
Following configurations have been added
|
||||
|
||||
- Added `configNUMBER_OF_CORES` to specify the number of cores to build. Can be `1` for vanilla, or `2` for SMP, error otherwise
|
||||
- Disable `configUSE_PORT_OPTIMISED_TASK_SELECTION` for SMP
|
||||
|
||||
### Data Structure Changes (`tasks.c`)
|
||||
|
||||
The following data fields have been expanded to have `configNUMBER_OF_CORES` copies:
|
||||
|
||||
- `pxCurrentTCBs`: Each core now has its own currently running task
|
||||
- `xPendingReadyList`: Each core has its own list to pend ready tasks if the scheduler is suspended on the core
|
||||
- `xYieldPending`: Each core has its own flag to track whether it has a pending yield
|
||||
- `xIdleTaskHandle`: Each core now has its own idle task
|
||||
- `uxSchedulerSuspended`: Each core can independently suspend scheduling on its core
|
||||
- `ulTaskSwitchedInTime`: Each core tracks its own "task switched in" time
|
||||
|
||||
Their access is now indexed by a `xCoreID` if in SMP, or set to `0` in single core.
|
||||
|
||||
The following data structures have been added:
|
||||
|
||||
- `TCB_t.xCoreID`: All tasks now store their core affinity in a TCB member. Always set to 0 in single-core
|
||||
|
||||
### API Additions
|
||||
|
||||
The following APIs have been added to support SMP
|
||||
|
||||
- `xTaskCreatePinnedToCore()` and `xTaskCreateStaticPinnedToCore()` to create tasks with a core affinity
|
||||
- In single-core, core affinity is ignored. Same behavior as `xTaskCreate()`
|
||||
- `xTaskGetCoreID()` to get a task's affinity
|
||||
- Add `ForCore()` versions of the following API
|
||||
- `xTaskGetIdleTaskHandleForCore()`
|
||||
- `xTaskGetCurrentTaskHandleForCore()`
|
||||
- `ulTaskGetIdleRunTimeCounterForCore()`
|
||||
|
||||
### API Modifications
|
||||
|
||||
Added the following macros that abstract away single-core and SMP differences:
|
||||
|
||||
- `taskYIELD_CORE()` triggers a particular core to yield
|
||||
- `taskIS_YIELD_REQUIRED()`/`taskIS_YIELD_REQUIRED_USING_PRIORITY()` check if current core requires a yield after a task is unblocked
|
||||
- `taskIS_AFFINITY_COMPATIBLE()` check if a task has compatible affinity
|
||||
- `taskIS_CURRENTLY_RUNNING()`/`taskIS_CURRENTLY_RUNNING_ON_CORE()` checks if a task is running on either core
|
||||
- `taskCAN_BE_SCHEDULED()` checks if an unblocked task can be scheduled on any core
|
||||
- `taskIS_SCHEDULER_SUSPENDED()` checks if the scheduler on the current core is suspended
|
||||
- `taskSELECT_HIGHEST_PRIORITY_TASK()` selects the highest priority task to execute for the current core
|
||||
- `prvGetTCBFromHandle()` updated in SMP to call `xTaskGetCurrentTaskHandle()` when the handle is `NULL`. Done so for thread safety (in case the current task switches cores at the same time).
|
||||
|
||||
The following functions were modified to accommodate SMP behavior:
|
||||
|
||||
- `prvInitialiseNewTask()`
|
||||
- Added `xCoreID` argument to pin task on creation
|
||||
- For single-core, `xCoreID` is hard coded to `0`
|
||||
- `prvAddNewTaskToReadyList()`
|
||||
- Checks if new task can be scheduled on core 1
|
||||
- `vTaskDelete()`
|
||||
- Checks if the deleted task is currently running on the other core.
|
||||
- If so, sends a yield to the other core.
|
||||
- `vTaskPrioritySet()`
|
||||
- Checks if the task is currently running on the both cores, and yields the appropriate core if so
|
||||
- `vTaskSuspend()`
|
||||
- Checks if the task is currently running on the other core, and yields the other core if so.
|
||||
- `prvTaskIsTaskSuspended()`
|
||||
- Checks the `xPendingReadyList` of both cores to see if a task is suspended
|
||||
- `xTaskResumeAll()`
|
||||
- Limit catching up of tick counts to core 0 (given only core 0 calls `xTaskIncrementTick()`)
|
||||
- `xTaskIncrementTick()`
|
||||
- Limited to core 0
|
||||
- `vTaskSwitchContext()`
|
||||
- Switches context for current core
|
||||
- `xTaskRemoveFromEventList()`
|
||||
- Created SMP copy of the function
|
||||
- Checks if `pxEventList` has already been emptied by the other core before removing
|
||||
- Checks if task can be scheduled on both cores, adds it to the appropriate core's pending list if it can't be scheduled.
|
||||
- `vTaskRemoveFromUnorderedEventList()`
|
||||
- In SMP, check if the task can be scheduled before adding it to the appropriate list. Whereas in single-core, the scheduler is always suspended thus the unblocked task always goes onto the pending ready list.
|
||||
- `eTaskConfirmSleepModeStatus()`
|
||||
- Updated logic to determine whether sleep is possible in SMP by checking the status of both cores.
|
||||
- `prvCheckTasksWaitingTermination()`
|
||||
- Updated logic so that we don't delete tasks on `xTasksWaitingTermination` which are still currently running on the other core.
|
||||
- `prvAddCurrentTaskToDelayedList()`
|
||||
- Added extra check to see if current blocking task has already been deleted by the other core.
|
||||
|
||||
### Critical Section Changes
|
||||
|
||||
- Granular Locks: The following objects are now given their own spinlocks
|
||||
- Kernel objects (i.e., `tasks.c`): `xKernelLock`
|
||||
- Each queue: `xQueueLock`
|
||||
- Queue Registry: `xQueueRegistryLock`
|
||||
- Each event group: `xEventGroupLock`
|
||||
- Each stream buffer: `xStreamBufferLock`
|
||||
- All timers: `xTimerLock`
|
||||
- Critical sections now target the appropriate spinlocks
|
||||
- Added missing critical sections for SMP (see `..._SMP_ONLY()` critical section calls)
|
||||
- Queues no longer use queue locks (see `queueUSE_LOCKS`)
|
||||
- Queues now just use critical sections and skips queue locking
|
||||
- Queue functions can now execute within a single critical section block
|
||||
|
||||
## Single Core Differences
|
||||
|
||||
List of differences between Vanilla FreeRTOS V10.5.1 and building the dual-core SMP kernel with `congigNUMBER_OF_CORES == 1`.
|
||||
|
||||
- `prvAddNewTaskToReadyList()`
|
||||
- Extended critical section so that SMP can check for yields while still inside critical section
|
||||
- `vTaskStepTick()`
|
||||
- Extended critical section so that SMP can access `xTickCount` while still inside critical section
|
||||
|
@ -135,6 +135,24 @@
|
||||
* within FreeRTOSConfig.h.
|
||||
*/
|
||||
|
||||
#ifndef configNUMBER_OF_CORES
|
||||
#error Missing definition: configNUMBER_OF_CORES must be defined in FreeRTOSConfig.h
|
||||
#endif
|
||||
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
#ifndef portGET_CORE_ID
|
||||
#error "Missing definition: portGET_CORE_ID() must be defined if in portmacro.h if configNUMBER_OF_CORES > 1"
|
||||
#endif
|
||||
#ifndef portYIELD_CORE
|
||||
#error "Missing definition: portYIELD_CORE() must be defined if in portmacro.h if configNUMBER_OF_CORES > 1"
|
||||
#endif
|
||||
#elif ( configNUMBER_OF_CORES == 1 )
|
||||
#undef portGET_CORE_ID
|
||||
#define portGET_CORE_ID() 0
|
||||
#else
|
||||
#error configNUMBER_OF_CORES must be defined to either 1 or > 1.
|
||||
#endif /* if ( configNUMBER_OF_CORES > 1 ) */
|
||||
|
||||
#ifndef configMINIMAL_STACK_SIZE
|
||||
#error Missing definition: configMINIMAL_STACK_SIZE must be defined in FreeRTOSConfig.h. configMINIMAL_STACK_SIZE defines the size (in words) of the stack allocated to the idle task. Refer to the demo project provided for your port for a suitable value.
|
||||
#endif
|
||||
@ -1007,6 +1025,10 @@
|
||||
#error configUSE_MUTEXES must be set to 1 to use recursive mutexes
|
||||
#endif
|
||||
|
||||
#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PORT_OPTIMISED_TASK_SELECTION != 0 ) )
|
||||
#error configUSE_PORT_OPTIMISED_TASK_SELECTION is not supported in SMP
|
||||
#endif
|
||||
|
||||
#ifndef configINITIAL_TICK_COUNT
|
||||
#define configINITIAL_TICK_COUNT 0
|
||||
#endif
|
||||
@ -1267,6 +1289,8 @@ typedef struct xSTATIC_TCB
|
||||
UBaseType_t uxDummy5;
|
||||
void * pxDummy6;
|
||||
uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ];
|
||||
/* Todo: Remove xCoreID for single core builds (IDF-7894) */
|
||||
BaseType_t xDummyCoreID;
|
||||
#if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
|
||||
void * pxDummy8;
|
||||
#endif
|
||||
@ -1347,6 +1371,7 @@ typedef struct xSTATIC_QUEUE
|
||||
UBaseType_t uxDummy8;
|
||||
uint8_t ucDummy9;
|
||||
#endif
|
||||
portMUX_TYPE xDummyQueueLock;
|
||||
} StaticQueue_t;
|
||||
typedef StaticQueue_t StaticSemaphore_t;
|
||||
|
||||
@ -1376,6 +1401,7 @@ typedef struct xSTATIC_EVENT_GROUP
|
||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||
uint8_t ucDummy4;
|
||||
#endif
|
||||
portMUX_TYPE xDummyEventGroupLock;
|
||||
} StaticEventGroup_t;
|
||||
|
||||
/*
|
||||
@ -1430,6 +1456,7 @@ typedef struct xSTATIC_STREAM_BUFFER
|
||||
#if ( configUSE_SB_COMPLETED_CALLBACK == 1 )
|
||||
void * pvDummy5[ 2 ];
|
||||
#endif
|
||||
portMUX_TYPE xDummyStreamBufferLock;
|
||||
} StaticStreamBuffer_t;
|
||||
|
||||
/* Message buffers are built on stream buffers. */
|
||||
@ -1441,4 +1468,69 @@ typedef StaticStreamBuffer_t StaticMessageBuffer_t;
|
||||
#endif
|
||||
/* *INDENT-ON* */
|
||||
|
||||
/*-----------------------------------------------------------
|
||||
* IDF Compatibility
|
||||
*----------------------------------------------------------*/
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
/* *INDENT-ON* */
|
||||
|
||||
/*
|
||||
* Default values for trace macros added by ESP-IDF and are not part of Vanilla FreeRTOS
|
||||
*/
|
||||
|
||||
#ifndef traceISR_EXIT_TO_SCHEDULER
|
||||
#define traceISR_EXIT_TO_SCHEDULER()
|
||||
#endif
|
||||
|
||||
#ifndef traceISR_EXIT
|
||||
#define traceISR_EXIT()
|
||||
#endif
|
||||
|
||||
#ifndef traceISR_ENTER
|
||||
#define traceISR_ENTER( _n_ )
|
||||
#endif
|
||||
|
||||
#ifndef traceQUEUE_SEMAPHORE_RECEIVE
|
||||
#define traceQUEUE_SEMAPHORE_RECEIVE( pxQueue )
|
||||
#endif
|
||||
|
||||
#ifndef traceQUEUE_GIVE_FROM_ISR
|
||||
#define traceQUEUE_GIVE_FROM_ISR( pxQueue )
|
||||
#endif
|
||||
|
||||
#ifndef traceQUEUE_GIVE_FROM_ISR_FAILED
|
||||
#define traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue )
|
||||
#endif
|
||||
|
||||
/* *INDENT-OFF* */
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
/* *INDENT-ON* */
|
||||
|
||||
/*
|
||||
* Include ESP-IDF API additions implicitly for compatibility reasons.
|
||||
*
|
||||
* ESP-IDF API additions were previously added directly to FreeRTOS headers
|
||||
* (e.g., task.h, queue.h). These APIs have now been moved to
|
||||
* idf_additions.h.
|
||||
*
|
||||
* To ensure there are no breaking changes, we include idf_additions.h
|
||||
* implicitly here so that those API additions are still accessible. Given
|
||||
* that FreeRTOS.h must be included first before calling any FreeRTOS API,
|
||||
* any existing source code can continue using these relocated APIs without
|
||||
* any additional header inclusions via this implicit inclusion.
|
||||
*
|
||||
* Todo: Deprecate this implicit inclusion by ESP-IDF v6.0 (IDF-8126)
|
||||
*/
|
||||
#include "freertos/idf_additions.h"
|
||||
|
||||
#endif /* ESP_PLATFORM */
|
||||
|
||||
#endif /* INC_FREERTOS_H */
|
||||
|
@ -46,6 +46,15 @@ typedef void (* TaskFunction_t)( void * );
|
||||
#define pdMS_TO_TICKS( xTimeInMs ) ( ( TickType_t ) ( ( ( TickType_t ) ( xTimeInMs ) * ( TickType_t ) configTICK_RATE_HZ ) / ( TickType_t ) 1000U ) )
|
||||
#endif
|
||||
|
||||
/* Converts a time in ticks to milliseconds. This macro can be
|
||||
* overridden by a macro of the same name defined in FreeRTOSConfig.h in case the
|
||||
* definition here is not suitable for your application.
|
||||
*
|
||||
* Todo: Upstream this macro (IDF-8181) */
|
||||
#ifndef pdTICKS_TO_MS
|
||||
#define pdTICKS_TO_MS( xTicks ) ( ( TickType_t ) ( ( uint64_t ) ( xTicks ) * 1000 / configTICK_RATE_HZ ) )
|
||||
#endif
|
||||
|
||||
#define pdFALSE ( ( BaseType_t ) 0 )
|
||||
#define pdTRUE ( ( BaseType_t ) 1 )
|
||||
|
||||
|
@ -60,12 +60,12 @@
|
||||
#if ( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) )
|
||||
|
||||
/* Only the current stack state is to be checked. */
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW() \
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ) \
|
||||
{ \
|
||||
/* Is the currently saved stack pointer within the stack limit? */ \
|
||||
if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack + portSTACK_LIMIT_PADDING ) \
|
||||
if( pxCurrentTCBs[ xCurCoreID ]->pxTopOfStack <= pxCurrentTCBs[ xCurCoreID ]->pxStack + portSTACK_LIMIT_PADDING ) \
|
||||
{ \
|
||||
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
|
||||
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCBs[ xCurCoreID ], pxCurrentTCBs[ xCurCoreID ]->pcTaskName ); \
|
||||
} \
|
||||
}
|
||||
|
||||
@ -75,13 +75,13 @@
|
||||
#if ( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) )
|
||||
|
||||
/* Only the current stack state is to be checked. */
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW() \
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ) \
|
||||
{ \
|
||||
\
|
||||
/* Is the currently saved stack pointer within the stack limit? */ \
|
||||
if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack - portSTACK_LIMIT_PADDING ) \
|
||||
if( pxCurrentTCBs[ xCurCoreID ]->pxTopOfStack >= pxCurrentTCBs[ xCurCoreID ]->pxEndOfStack - portSTACK_LIMIT_PADDING ) \
|
||||
{ \
|
||||
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
|
||||
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCBs[ xCurCoreID ], pxCurrentTCBs[ xCurCoreID ]->pcTaskName ); \
|
||||
} \
|
||||
}
|
||||
|
||||
@ -90,9 +90,9 @@
|
||||
|
||||
#if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) )
|
||||
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW() \
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ) \
|
||||
{ \
|
||||
const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \
|
||||
const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCBs[ xCurCoreID ]->pxStack; \
|
||||
const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \
|
||||
\
|
||||
if( ( pulStack[ 0 ] != ulCheckValue ) || \
|
||||
@ -100,7 +100,7 @@
|
||||
( pulStack[ 2 ] != ulCheckValue ) || \
|
||||
( pulStack[ 3 ] != ulCheckValue ) ) \
|
||||
{ \
|
||||
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
|
||||
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCBs[ xCurCoreID ], pxCurrentTCBs[ xCurCoreID ]->pcTaskName ); \
|
||||
} \
|
||||
}
|
||||
|
||||
@ -109,9 +109,9 @@
|
||||
|
||||
#if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) )
|
||||
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW() \
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID ) \
|
||||
{ \
|
||||
int8_t * pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \
|
||||
int8_t * pcEndOfStack = ( int8_t * ) pxCurrentTCBs[ xCurCoreID ]->pxEndOfStack; \
|
||||
static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
|
||||
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
|
||||
tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \
|
||||
@ -124,7 +124,7 @@
|
||||
/* Has the extremity of the task stack ever been written over? */ \
|
||||
if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \
|
||||
{ \
|
||||
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \
|
||||
vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCBs[ xCurCoreID ], pxCurrentTCBs[ xCurCoreID ]->pcTaskName ); \
|
||||
} \
|
||||
}
|
||||
|
||||
@ -133,7 +133,7 @@
|
||||
|
||||
/* Remove stack overflow macro if not being used. */
|
||||
#ifndef taskCHECK_FOR_STACK_OVERFLOW
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW()
|
||||
#define taskCHECK_FOR_STACK_OVERFLOW( xCurCoreID )
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -170,6 +170,7 @@ typedef struct xTASK_STATUS
|
||||
StackType_t * pxEndOfStack; /* Points to the end address of the task's stack area. */
|
||||
#endif
|
||||
configSTACK_DEPTH_TYPE usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */
|
||||
BaseType_t xCoreID; /*!< Core this task is pinned to (0, 1, or tskNO_AFFINITY). If configNUMBER_OF_CORES == 1, this will always be 0. */
|
||||
} TaskStatus_t;
|
||||
|
||||
/* Possible return values for eTaskConfirmSleepModeStatus(). */
|
||||
@ -189,6 +190,14 @@ typedef enum
|
||||
*/
|
||||
#define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U )
|
||||
|
||||
/**
|
||||
* Macro representing and unpinned (i.e., "no affinity") task in xCoreID parameters
|
||||
*
|
||||
* \ingroup Tasks
|
||||
*/
|
||||
#define tskNO_AFFINITY ( ( BaseType_t ) 0x7FFFFFFF )
|
||||
/* Todo: Update tskNO_AFFINITY value to -1 (IDF-7908) */
|
||||
|
||||
/**
|
||||
* task. h
|
||||
*
|
||||
@ -211,8 +220,9 @@ typedef enum
|
||||
* \defgroup taskENTER_CRITICAL taskENTER_CRITICAL
|
||||
* \ingroup SchedulerControl
|
||||
*/
|
||||
#define taskENTER_CRITICAL() portENTER_CRITICAL()
|
||||
#define taskENTER_CRITICAL( x ) portENTER_CRITICAL( x )
|
||||
#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR()
|
||||
#define taskENTER_CRITICAL_ISR( x ) portENTER_CRITICAL_ISR( x )
|
||||
|
||||
/**
|
||||
* task. h
|
||||
@ -226,8 +236,9 @@ typedef enum
|
||||
* \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL
|
||||
* \ingroup SchedulerControl
|
||||
*/
|
||||
#define taskEXIT_CRITICAL() portEXIT_CRITICAL()
|
||||
#define taskEXIT_CRITICAL( x ) portEXIT_CRITICAL( x )
|
||||
#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x )
|
||||
#define taskEXIT_CRITICAL_ISR( x ) portEXIT_CRITICAL_ISR( x )
|
||||
|
||||
/**
|
||||
* task. h
|
||||
@ -294,6 +305,9 @@ typedef enum
|
||||
* support can alternatively create an MPU constrained task using
|
||||
* xTaskCreateRestricted().
|
||||
*
|
||||
* @note If configNUMBER_OF_CORES > 1, this function will create an unpinned
|
||||
* task (see tskNO_AFFINITY for more details).
|
||||
*
|
||||
* @param pxTaskCode Pointer to the task entry function. Tasks
|
||||
* must be implemented to never return (i.e. continuous loop).
|
||||
*
|
||||
@ -301,10 +315,8 @@ typedef enum
|
||||
* facilitate debugging. Max length defined by configMAX_TASK_NAME_LEN - default
|
||||
* is 16.
|
||||
*
|
||||
* @param usStackDepth The size of the task stack specified as the number of
|
||||
* variables the stack can hold - not the number of bytes. For example, if
|
||||
* the stack is 16 bits wide and usStackDepth is defined as 100, 200 bytes
|
||||
* will be allocated for stack storage.
|
||||
* @param usStackDepth The size of the task stack specified as the NUMBER OF
|
||||
* BYTES. Note that this differs from vanilla FreeRTOS.
|
||||
*
|
||||
* @param pvParameters Pointer that will be used as the parameter for the task
|
||||
* being created.
|
||||
@ -321,6 +333,9 @@ typedef enum
|
||||
* @return pdPASS if the task was successfully created and added to a ready
|
||||
* list, otherwise an error code defined in the file projdefs.h
|
||||
*
|
||||
* @note If program uses thread local variables (ones specified with "__thread"
|
||||
* keyword) then storage for them will be allocated on the task's stack.
|
||||
*
|
||||
* Example usage:
|
||||
* @code{c}
|
||||
* // Task to be created.
|
||||
@ -356,13 +371,39 @@ typedef enum
|
||||
* \ingroup Tasks
|
||||
*/
|
||||
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
|
||||
static inline __attribute__( ( always_inline ) )
|
||||
BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
|
||||
const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
const configSTACK_DEPTH_TYPE usStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION;
|
||||
#endif
|
||||
TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION
|
||||
{
|
||||
/*
|
||||
* The idf_additions.h has not been included here yet due to inclusion
|
||||
* order. Thus we manually declare the function here.
|
||||
*/
|
||||
extern BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode,
|
||||
const char * const pcName,
|
||||
const configSTACK_DEPTH_TYPE usStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
TaskHandle_t * const pvCreatedTask,
|
||||
const BaseType_t xCoreID );
|
||||
|
||||
/*
|
||||
* Call the "PinnedToCore" version with tskNO_AFFINITY to create
|
||||
* an unpinned task.
|
||||
*/
|
||||
return xTaskCreatePinnedToCore( pxTaskCode,
|
||||
pcName,
|
||||
usStackDepth,
|
||||
pvParameters,
|
||||
uxPriority,
|
||||
pxCreatedTask,
|
||||
tskNO_AFFINITY );
|
||||
}
|
||||
#endif /* configSUPPORT_DYNAMIC_ALLOCATION == 1 */
|
||||
|
||||
/**
|
||||
* task. h
|
||||
@ -388,6 +429,9 @@ typedef enum
|
||||
* memory. xTaskCreateStatic() therefore allows a task to be created without
|
||||
* using any dynamic memory allocation.
|
||||
*
|
||||
* @note If configNUMBER_OF_CORES > 1, this function will create an unpinned
|
||||
* task (see tskNO_AFFINITY for more details).
|
||||
*
|
||||
* @param pxTaskCode Pointer to the task entry function. Tasks
|
||||
* must be implemented to never return (i.e. continuous loop).
|
||||
*
|
||||
@ -395,10 +439,8 @@ typedef enum
|
||||
* facilitate debugging. The maximum length of the string is defined by
|
||||
* configMAX_TASK_NAME_LEN in FreeRTOSConfig.h.
|
||||
*
|
||||
* @param ulStackDepth The size of the task stack specified as the number of
|
||||
* variables the stack can hold - not the number of bytes. For example, if
|
||||
* the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes
|
||||
* will be allocated for stack storage.
|
||||
* @param ulStackDepth The size of the task stack specified as the NUMBER OF
|
||||
* BYTES. Note that this differs from vanilla FreeRTOS.
|
||||
*
|
||||
* @param pvParameters Pointer that will be used as the parameter for the task
|
||||
* being created.
|
||||
@ -418,6 +460,9 @@ typedef enum
|
||||
* puxStackBuffer or pxTaskBuffer are NULL then the task will not be created and
|
||||
* NULL is returned.
|
||||
*
|
||||
* @note If program uses thread local variables (ones specified with "__thread"
|
||||
* keyword) then storage for them will be allocated on the task's stack.
|
||||
*
|
||||
* Example usage:
|
||||
* @code{c}
|
||||
*
|
||||
@ -473,13 +518,41 @@ typedef enum
|
||||
* \ingroup Tasks
|
||||
*/
|
||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
static inline __attribute__( ( always_inline ) )
|
||||
TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
|
||||
const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
const uint32_t ulStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
StackType_t * const puxStackBuffer,
|
||||
StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION;
|
||||
StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION
|
||||
{
|
||||
/*
|
||||
* The idf_additions.h has not been included here yet due to inclusion
|
||||
* order. Thus we manually declare the function here.
|
||||
*/
|
||||
extern TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode,
|
||||
const char * const pcName,
|
||||
const uint32_t ulStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
StackType_t * const pxStackBuffer,
|
||||
StaticTask_t * const pxTaskBuffer,
|
||||
const BaseType_t xCoreID );
|
||||
|
||||
/*
|
||||
* Call the "PinnedToCore" version with tskNO_AFFINITY to create
|
||||
* an unpinned task.
|
||||
*/
|
||||
return xTaskCreateStaticPinnedToCore( pxTaskCode,
|
||||
pcName,
|
||||
ulStackDepth,
|
||||
pvParameters,
|
||||
uxPriority,
|
||||
puxStackBuffer,
|
||||
pxTaskBuffer,
|
||||
tskNO_AFFINITY );
|
||||
}
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
@ -1737,8 +1810,8 @@ BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
|
||||
* xTaskGetIdleTaskHandle() is only available if
|
||||
* INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h.
|
||||
*
|
||||
* Simply returns the handle of the idle task. It is not valid to call
|
||||
* xTaskGetIdleTaskHandle() before the scheduler has been started.
|
||||
* Simply returns the handle of the idle task of the current core. It is not
|
||||
* valid to call xTaskGetIdleTaskHandle() before the scheduler has been started.
|
||||
*/
|
||||
TaskHandle_t xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
@ -1979,6 +2052,9 @@ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e
|
||||
* system if there are no other tasks executing at the idle priority, tickless
|
||||
* idle is not used, and configIDLE_SHOULD_YIELD is set to 0.
|
||||
*
|
||||
* @note If configNUMBER_OF_CORES > 1, calling this function will query the idle
|
||||
* task of the current core.
|
||||
*
|
||||
* @return The total run time of the idle task or the percentage of the total
|
||||
* run time consumed by the idle task. This is the amount of time the
|
||||
* idle task has actually been executing. The unit of time is dependent on the
|
||||
@ -2954,6 +3030,9 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) PRIVILEGED_FUNCTION;
|
||||
* or
|
||||
* + Time slicing is in use and there is a task of equal priority to the
|
||||
* currently running task.
|
||||
*
|
||||
* Note: If configNUMBER_OF_CORES > 1, this function must only be called by
|
||||
* core 0. Other cores should call xTaskIncrementTickOtherCores() instead.
|
||||
*/
|
||||
BaseType_t xTaskIncrementTick( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
|
@ -41,6 +41,8 @@
|
||||
#include "FreeRTOS.h"
|
||||
#include "task.h"
|
||||
#include "queue.h"
|
||||
/* Include private IDF API additions for critical thread safety macros */
|
||||
#include "esp_private/freertos_idf_additions_priv.h"
|
||||
|
||||
#if ( configUSE_CO_ROUTINES == 1 )
|
||||
#include "croutine.h"
|
||||
@ -52,11 +54,71 @@
|
||||
* correct privileged Vs unprivileged linkage and placement. */
|
||||
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
|
||||
|
||||
/* Some code sections require extra critical sections when building for SMP
|
||||
* ( configNUMBER_OF_CORES > 1 ). */
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
/* Macros that Enter/exit a critical section only when building for SMP */
|
||||
#define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock )
|
||||
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock )
|
||||
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock )
|
||||
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock )
|
||||
|
||||
static inline __attribute__( ( always_inline ) )
|
||||
void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
||||
{
|
||||
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||
{
|
||||
taskENTER_CRITICAL( pxLock );
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef __clang_analyzer__
|
||||
/* Teach clang-tidy that ISR version macro can be different */
|
||||
configASSERT( 1 );
|
||||
#endif
|
||||
taskENTER_CRITICAL_ISR( pxLock );
|
||||
}
|
||||
}
|
||||
|
||||
static inline __attribute__( ( always_inline ) )
|
||||
void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
|
||||
{
|
||||
if( portCHECK_IF_IN_ISR() == pdFALSE )
|
||||
{
|
||||
taskEXIT_CRITICAL( pxLock );
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef __clang_analyzer__
|
||||
/* Teach clang-tidy that ISR version macro can be different */
|
||||
configASSERT( 1 );
|
||||
#endif
|
||||
taskEXIT_CRITICAL_ISR( pxLock );
|
||||
}
|
||||
}
|
||||
#else /* configNUMBER_OF_CORES > 1 */
|
||||
/* Macros that Enter/exit a critical section only when building for SMP */
|
||||
#define taskENTER_CRITICAL_SMP_ONLY( pxLock )
|
||||
#define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
|
||||
#define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
||||
#define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
|
||||
/* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList()
|
||||
* calls are deterministic (as queue locks use scheduler suspension instead of
|
||||
* critical sections). However, the SMP implementation is non-deterministic
|
||||
* anyways, thus SMP can forego the use of queue locks (replaced with a critical
|
||||
* sections) in exchange for better queue performance. */
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
#define queueUSE_LOCKS 0
|
||||
#define queueUNLOCKED ( ( int8_t ) 0 )
|
||||
#else /* configNUMBER_OF_CORES > 1 */
|
||||
#define queueUSE_LOCKS 1
|
||||
/* Constants used with the cRxLock and cTxLock structure members. */
|
||||
#define queueUNLOCKED ( ( int8_t ) -1 )
|
||||
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
|
||||
#define queueINT8_MAX ( ( int8_t ) 127 )
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
|
||||
/* When the Queue_t structure is used to represent a base queue its pcHead and
|
||||
* pcTail members are used as pointers into the queue storage area. When the
|
||||
@ -119,8 +181,10 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
|
||||
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
|
||||
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
||||
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
|
||||
@ -134,6 +198,8 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
|
||||
UBaseType_t uxQueueNumber;
|
||||
uint8_t ucQueueType;
|
||||
#endif
|
||||
|
||||
portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */
|
||||
} xQUEUE;
|
||||
|
||||
/* The old xQUEUE name is maintained above then typedefed to the new Queue_t
|
||||
@ -167,8 +233,15 @@ typedef xQUEUE Queue_t;
|
||||
* array position being vacant. */
|
||||
PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
|
||||
|
||||
#if ( configNUMBER_OF_CORES > 1 )
|
||||
/* Spinlock required in SMP when accessing the queue registry */
|
||||
static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED;
|
||||
#endif /* configNUMBER_OF_CORES > 1 */
|
||||
|
||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
|
||||
/*
|
||||
* Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
|
||||
* prevent an ISR from adding or removing items to the queue, but does prevent
|
||||
@ -192,6 +265,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION
|
||||
* @return pdTRUE if there is no space, otherwise pdFALSE;
|
||||
*/
|
||||
static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
/*
|
||||
* Copies an item into the queue, either at the front of the queue or the
|
||||
@ -248,12 +322,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
#endif
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
|
||||
/*
|
||||
* Macro to mark a queue as locked. Locking a queue prevents an ISR from
|
||||
* accessing the queue event lists.
|
||||
*/
|
||||
#define prvLockQueue( pxQueue ) \
|
||||
taskENTER_CRITICAL(); \
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); \
|
||||
{ \
|
||||
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
|
||||
{ \
|
||||
@ -264,7 +340,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
|
||||
} \
|
||||
} \
|
||||
taskEXIT_CRITICAL()
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) )
|
||||
|
||||
/*
|
||||
* Macro to increment cTxLock member of the queue data structure. It is
|
||||
@ -295,6 +371,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
|
||||
} \
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
@ -305,19 +382,28 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
|
||||
configASSERT( pxQueue );
|
||||
|
||||
if( xNewQueue == pdTRUE )
|
||||
{
|
||||
portMUX_INITIALIZE( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
|
||||
if( ( pxQueue != NULL ) &&
|
||||
( pxQueue->uxLength >= 1U ) &&
|
||||
/* Check for multiplication overflow. */
|
||||
( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
|
||||
pxQueue->pcWriteTo = pxQueue->pcHead;
|
||||
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
if( xNewQueue == pdFALSE )
|
||||
{
|
||||
@ -349,7 +435,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
|
||||
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -606,6 +692,9 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
/* In case this is a recursive mutex. */
|
||||
pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
|
||||
|
||||
/* Initialize the mutex's spinlock */
|
||||
portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) );
|
||||
|
||||
traceCREATE_MUTEX( pxNewQueue );
|
||||
|
||||
/* Start with the semaphore in the expected state. */
|
||||
@ -671,7 +760,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
* calling task is the mutex holder, but not a good way of determining the
|
||||
* identity of the mutex holder, as the holder may change between the
|
||||
* following critical section exiting and the function returning. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxSemaphore->xQueueLock ) );
|
||||
{
|
||||
if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
@ -682,7 +771,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
|
||||
pxReturn = NULL;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxSemaphore->xQueueLock ) );
|
||||
|
||||
return pxReturn;
|
||||
} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
|
||||
@ -908,7 +997,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
* interest of execution time efficiency. */
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
/* Is there room on the queue now? The running task must be the
|
||||
* highest priority task wanting to access the queue. If the head item
|
||||
@ -1014,7 +1103,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
}
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
@ -1023,7 +1112,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
{
|
||||
/* The queue was full and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* Return to the original privilege level before exiting
|
||||
* the function. */
|
||||
@ -1043,9 +1132,39 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the queue is full and entry time has been set
|
||||
* - We simply check for a time out, block if not timed out, or
|
||||
* return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. Block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. Return an error. */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the queue is full and entry time has been set
|
||||
* - We follow the original procedure of locking the queue before
|
||||
* attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
@ -1093,6 +1212,8 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
traceQUEUE_SEND_FAILED( pxQueue );
|
||||
return errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@ -1131,11 +1252,16 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
* read, instead return a flag to say whether a context switch is required or
|
||||
* not (i.e. has a task with a higher priority than us been woken by this
|
||||
* post). */
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
{
|
||||
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cTxLock = pxQueue->cTxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cTxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
traceQUEUE_SEND_FROM_ISR( pxQueue );
|
||||
@ -1242,11 +1368,15 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was posted while it was locked. */
|
||||
prvIncrementQueueTxLock( pxQueue, cTxLock );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
@ -1256,7 +1386,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
|
||||
xReturn = errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -1302,7 +1432,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
@ -1311,9 +1441,15 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
* space'. */
|
||||
if( uxMessagesWaiting < pxQueue->uxLength )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cTxLock = pxQueue->cTxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cTxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
traceQUEUE_SEND_FROM_ISR( pxQueue );
|
||||
/* Todo: Reconcile tracing differences (IDF-8183) */
|
||||
traceQUEUE_GIVE_FROM_ISR( pxQueue );
|
||||
|
||||
/* A task can only have an inherited priority if it is a mutex
|
||||
* holder - and if there is a mutex holder then the mutex cannot be
|
||||
@ -1408,21 +1544,26 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
#endif /* configUSE_QUEUE_SETS */
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was posted while it was locked. */
|
||||
prvIncrementQueueTxLock( pxQueue, cTxLock );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
else
|
||||
{
|
||||
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
|
||||
/* Todo: Reconcile tracing differences (IDF-8183) */
|
||||
traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue );
|
||||
xReturn = errQUEUE_FULL;
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -1455,7 +1596,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
* interest of execution time efficiency. */
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
@ -1487,7 +1628,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
@ -1496,7 +1637,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
{
|
||||
/* The queue was empty and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
@ -1513,9 +1654,39 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We simply check for a time out, block if not timed out, or
|
||||
* return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. Block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. Return an error. */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We follow the original procedure for locking the queue before
|
||||
* attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now the critical section has been exited. */
|
||||
|
||||
@ -1567,6 +1738,8 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@ -1601,7 +1774,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
* of execution time efficiency. */
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
/* Semaphores are queues with an item size of 0, and where the
|
||||
* number of messages in the queue is the semaphore's count value. */
|
||||
@ -1611,7 +1784,8 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
* must be the highest priority task wanting to access the queue. */
|
||||
if( uxSemaphoreCount > ( UBaseType_t ) 0 )
|
||||
{
|
||||
traceQUEUE_RECEIVE( pxQueue );
|
||||
/* Todo: Reconcile tracing differences (IDF-8183) */
|
||||
traceQUEUE_SEMAPHORE_RECEIVE( pxQueue );
|
||||
|
||||
/* Semaphores are queues with a data size of zero and where the
|
||||
* messages waiting is the semaphore's count. Reduce the count. */
|
||||
@ -1650,7 +1824,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
@ -1659,7 +1833,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
{
|
||||
/* The semaphore count was 0 and no block time is specified
|
||||
* (or the block time has expired) so exit now. */
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
@ -1676,9 +1850,65 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the semaphore/mutex is empty/held and entry time
|
||||
* has been set.
|
||||
* - We simply check for a time out, inherit priority and block if
|
||||
* not timed out, or return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. If this is a mutex, make the holder
|
||||
* inherit our priority, then block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
{
|
||||
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* if ( configUSE_MUTEXES == 1 ) */
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. If this is a mutex, make the holder
|
||||
* disinherit our priority, then return an error. */
|
||||
#if ( configUSE_MUTEXES == 1 )
|
||||
{
|
||||
if( xInheritanceOccurred != pdFALSE )
|
||||
{
|
||||
UBaseType_t uxHighestWaitingPriority;
|
||||
uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
|
||||
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_MUTEXES */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_RECEIVE_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the semaphore/mutex is empty/held and entry time
|
||||
* has been set.
|
||||
* - We follow the original procedure for locking the queue, inheriting
|
||||
* priority, then attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can give to and take from the semaphore
|
||||
* now the critical section has been exited. */
|
||||
|
||||
@ -1700,11 +1930,11 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
{
|
||||
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1752,7 +1982,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
* test the mutex type again to check it is actually a mutex. */
|
||||
if( xInheritanceOccurred != pdFALSE )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
UBaseType_t uxHighestWaitingPriority;
|
||||
|
||||
@ -1764,7 +1994,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
|
||||
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_MUTEXES */
|
||||
@ -1777,6 +2007,8 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@ -1809,7 +2041,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
* interest of execution time efficiency. */
|
||||
for( ; ; )
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
@ -1847,7 +2079,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
return pdPASS;
|
||||
}
|
||||
else
|
||||
@ -1856,7 +2088,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
{
|
||||
/* The queue was empty and no block time is specified (or
|
||||
* the block time has expired) so leave now. */
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
@ -1874,9 +2106,39 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
|
||||
/* If queue locks ARE NOT being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We simply check for a time out, block if not timed out, or
|
||||
* return an error if we have timed out. */
|
||||
#if ( queueUSE_LOCKS == 0 )
|
||||
{
|
||||
/* Update the timeout state to see if it has expired yet. */
|
||||
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
|
||||
{
|
||||
/* Not timed out yet. Block the current task. */
|
||||
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
|
||||
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have timed out. Return an error. */
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
traceQUEUE_PEEK_FAILED( pxQueue );
|
||||
return errQUEUE_EMPTY;
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 0 */
|
||||
}
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* If queue locks ARE being used:
|
||||
* - At this point, the queue is empty and entry time has been set
|
||||
* - We follow the original procedure for locking the queue before
|
||||
* attempting to block. */
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Interrupts and other tasks can send to and receive from the queue
|
||||
* now that the critical section has been exited. */
|
||||
|
||||
@ -1928,6 +2190,8 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
} /*lint -restore */
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
@ -1959,14 +2223,19 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
{
|
||||
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
|
||||
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cRxLock = pxQueue->cRxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cRxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
|
||||
|
||||
@ -2005,11 +2274,15 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Increment the lock count so the task that unlocks the queue
|
||||
* knows that data was removed while it was locked. */
|
||||
prvIncrementQueueRxLock( pxQueue, cRxLock );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
@ -2019,7 +2292,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
|
||||
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -2053,7 +2326,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
|
||||
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
|
||||
|
||||
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
prvENTER_CRITICAL_OR_MASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
{
|
||||
/* Cannot block in an ISR, so check there is data available. */
|
||||
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
|
||||
@ -2074,7 +2347,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
|
||||
}
|
||||
}
|
||||
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
|
||||
prvEXIT_CRITICAL_OR_UNMASK_ISR( &( pxQueue->xQueueLock ), uxSavedInterruptStatus );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -2086,11 +2359,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( xQueue );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) );
|
||||
{
|
||||
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) );
|
||||
|
||||
return uxReturn;
|
||||
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
|
||||
@ -2103,11 +2376,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( pxQueue );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
return uxReturn;
|
||||
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
|
||||
@ -2329,6 +2602,7 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
{
|
||||
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
|
||||
@ -2337,7 +2611,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
* removed from the queue while the queue was locked. When a queue is
|
||||
* locked items can be added or removed, but the event lists cannot be
|
||||
* updated. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
int8_t cTxLock = pxQueue->cTxLock;
|
||||
|
||||
@ -2415,10 +2689,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
|
||||
pxQueue->cTxLock = queueUNLOCKED;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
|
||||
/* Do the same for the Rx lock. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
int8_t cRxLock = pxQueue->cRxLock;
|
||||
|
||||
@ -2445,15 +2719,17 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
|
||||
|
||||
pxQueue->cRxLock = queueUNLOCKED;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
|
||||
{
|
||||
@ -2464,10 +2740,11 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
|
||||
@ -2490,11 +2767,12 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
|
||||
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
{
|
||||
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
|
||||
{
|
||||
@ -2505,10 +2783,11 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
|
||||
xReturn = pdFALSE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
@ -2828,6 +3107,10 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( xQueue );
|
||||
|
||||
/* For SMP, we need to take the queue registry lock in case another
|
||||
* core updates the register simultaneously. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
{
|
||||
if( pcQueueName != NULL )
|
||||
{
|
||||
/* See if there is an empty space in the registry. A NULL name denotes
|
||||
@ -2861,6 +3144,9 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
|
||||
}
|
||||
}
|
||||
/* Release the previously taken queue registry lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
}
|
||||
|
||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||
/*-----------------------------------------------------------*/
|
||||
@ -2874,6 +3160,10 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( xQueue );
|
||||
|
||||
/* For SMP, we need to take the queue registry lock in case another
|
||||
* core updates the register simultaneously. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
{
|
||||
/* Note there is nothing here to protect against another task adding or
|
||||
* removing entries from the registry while it is being searched. */
|
||||
|
||||
@ -2889,6 +3179,9 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Release the previously taken queue registry lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
|
||||
return pcReturn;
|
||||
} /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
|
||||
@ -2904,6 +3197,10 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
|
||||
configASSERT( xQueue );
|
||||
|
||||
/* For SMP, we need to take the queue registry lock in case another
|
||||
* core updates the register simultaneously. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
{
|
||||
/* See if the handle of the queue being unregistered in actually in the
|
||||
* registry. */
|
||||
for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
|
||||
@ -2924,6 +3221,9 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Release the previously taken queue registry lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
|
||||
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
|
||||
|
||||
#endif /* configQUEUE_REGISTRY_SIZE */
|
||||
@ -2945,6 +3245,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
* so it should be called with the scheduler locked and not from a critical
|
||||
* section. */
|
||||
|
||||
/* For SMP, we need to take the queue's xQueueLock as we are about to
|
||||
* access the queue. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
/* Only do anything if there are no messages in the queue. This function
|
||||
* will not actually cause the task to block, just place it on a blocked
|
||||
* list. It will not block until the scheduler is unlocked - at which
|
||||
@ -2952,6 +3258,8 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
* the queue is locked, and the calling task blocks on the queue, then the
|
||||
* calling task will be immediately unblocked when the queue is unlocked. */
|
||||
prvLockQueue( pxQueue );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
|
||||
{
|
||||
@ -2963,8 +3271,15 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
prvUnlockQueue( pxQueue );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
/* Release the previously taken xQueueLock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
|
||||
}
|
||||
|
||||
#endif /* configUSE_TIMERS */
|
||||
/*-----------------------------------------------------------*/
|
||||
@ -2990,7 +3305,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) );
|
||||
{
|
||||
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
|
||||
{
|
||||
@ -3009,7 +3324,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -3039,12 +3354,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
}
|
||||
else
|
||||
{
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) );
|
||||
{
|
||||
/* The queue is no longer contained in the set. */
|
||||
pxQueueOrSemaphore->pxQueueSetContainer = NULL;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) );
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
|
||||
@ -3096,9 +3411,18 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
|
||||
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
|
||||
|
||||
/* In SMP, queue sets have their own xQueueLock. Thus we need to also
|
||||
* acquire the queue set's xQueueLock before accessing it. */
|
||||
taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
|
||||
{
|
||||
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
const int8_t cTxLock = pxQueueSetContainer->cTxLock;
|
||||
#else
|
||||
/* Queue locks not used, so we treat it as unlocked. */
|
||||
const int8_t cTxLock = queueUNLOCKED;
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
|
||||
traceQUEUE_SET_SEND( pxQueueSetContainer );
|
||||
|
||||
@ -3125,14 +3449,21 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
#if ( queueUSE_LOCKS == 1 )
|
||||
{
|
||||
prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock );
|
||||
}
|
||||
#endif /* queueUSE_LOCKS == 1 */
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
/* Release the previously acquired queue set's xQueueLock. */
|
||||
taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
|
@ -43,6 +43,8 @@
|
||||
#include "FreeRTOS.h"
|
||||
#include "task.h"
|
||||
#include "stream_buffer.h"
|
||||
/* Include private IDF API additions for critical thread safety macros */
|
||||
#include "esp_private/freertos_idf_additions_priv.h"
|
||||
|
||||
#if ( configUSE_TASK_NOTIFICATIONS != 1 )
|
||||
#error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c
|
||||
@ -64,7 +66,7 @@
|
||||
/*lint -save -e9026 Function like macros allowed and needed here so they can be overridden. */
|
||||
#ifndef sbRECEIVE_COMPLETED
|
||||
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
|
||||
vTaskSuspendAll(); \
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
||||
{ \
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
|
||||
{ \
|
||||
@ -74,7 +76,7 @@
|
||||
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
|
||||
} \
|
||||
} \
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
#endif /* sbRECEIVE_COMPLETED */
|
||||
|
||||
/* If user has provided a per-instance receive complete callback, then
|
||||
@ -141,7 +143,7 @@
|
||||
*/
|
||||
#ifndef sbSEND_COMPLETED
|
||||
#define sbSEND_COMPLETED( pxStreamBuffer ) \
|
||||
vTaskSuspendAll(); \
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &( pxStreamBuffer->xStreamBufferLock ) ); \
|
||||
{ \
|
||||
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
|
||||
{ \
|
||||
@ -151,7 +153,7 @@
|
||||
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
|
||||
} \
|
||||
} \
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
#endif /* sbSEND_COMPLETED */
|
||||
|
||||
/* If user has provided a per-instance send completed callback, then
|
||||
@ -243,6 +245,8 @@ typedef struct StreamBufferDef_t /*lint !e9058 Style convention
|
||||
StreamBufferCallbackFunction_t pxSendCompletedCallback; /* Optional callback called on send complete. sbSEND_COMPLETED is called if this is NULL. */
|
||||
StreamBufferCallbackFunction_t pxReceiveCompletedCallback; /* Optional callback called on receive complete. sbRECEIVE_COMPLETED is called if this is NULL. */
|
||||
#endif
|
||||
|
||||
portMUX_TYPE xStreamBufferLock; /* Spinlock required for SMP critical sections */
|
||||
} StreamBuffer_t;
|
||||
|
||||
/*
|
||||
@ -385,6 +389,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
||||
pxSendCompletedCallback,
|
||||
pxReceiveCompletedCallback );
|
||||
|
||||
/* Initialize the stream buffer's spinlock separately, as
|
||||
* prvInitialiseNewStreamBuffer() is also called from
|
||||
* xStreamBufferReset(). */
|
||||
portMUX_INITIALIZE( &( ( ( StreamBuffer_t * ) pucAllocatedMemory )->xStreamBufferLock ) );
|
||||
|
||||
traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer );
|
||||
}
|
||||
else
|
||||
@ -463,6 +472,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
||||
* again. */
|
||||
pxStreamBuffer->ucFlags |= sbFLAGS_IS_STATICALLY_ALLOCATED;
|
||||
|
||||
/* Initialize the stream buffer's spinlock separately, as
|
||||
* prvInitialiseNewStreamBuffer() is also called from
|
||||
* xStreamBufferReset(). */
|
||||
portMUX_INITIALIZE( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
|
||||
traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer );
|
||||
|
||||
xReturn = ( StreamBufferHandle_t ) pxStaticStreamBuffer; /*lint !e9087 Data hiding requires cast to opaque type. */
|
||||
@ -560,7 +574,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
|
||||
#endif
|
||||
|
||||
/* Can only reset a message buffer if there are no tasks blocked on it. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
{
|
||||
if( ( pxStreamBuffer->xTaskWaitingToReceive == NULL ) && ( pxStreamBuffer->xTaskWaitingToSend == NULL ) )
|
||||
{
|
||||
@ -590,7 +604,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
|
||||
xReturn = pdPASS;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -736,7 +750,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
||||
{
|
||||
/* Wait until the required number of bytes are free in the message
|
||||
* buffer. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
{
|
||||
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
|
||||
|
||||
@ -751,11 +765,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
||||
}
|
||||
else
|
||||
{
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
break;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
|
||||
traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer );
|
||||
( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait );
|
||||
@ -932,7 +946,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
||||
{
|
||||
/* Checking if there is data and clearing the notification state must be
|
||||
* performed atomically. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
{
|
||||
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
|
||||
|
||||
@ -955,7 +969,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
|
||||
|
||||
if( xBytesAvailable <= xBytesToStoreMessageLength )
|
||||
{
|
||||
@ -1409,7 +1423,17 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
||||
} /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */
|
||||
#endif
|
||||
|
||||
( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */
|
||||
/* This function could be called from xStreamBufferReset(), so we reset the
|
||||
* stream buffer fields manually in order to avoid clearing
|
||||
* xStreamBufferLock. The xStreamBufferLock is initialized separately on
|
||||
* stream buffer creation. */
|
||||
pxStreamBuffer->xTail = ( size_t ) 0;
|
||||
pxStreamBuffer->xHead = ( size_t ) 0;
|
||||
pxStreamBuffer->xTaskWaitingToReceive = ( TaskHandle_t ) 0;
|
||||
pxStreamBuffer->xTaskWaitingToSend = ( TaskHandle_t ) 0;
|
||||
#if ( configUSE_TRACE_FACILITY == 1 )
|
||||
pxStreamBuffer->uxStreamBufferNumber = ( UBaseType_t ) 0;
|
||||
#endif
|
||||
pxStreamBuffer->pucBuffer = pucBuffer;
|
||||
pxStreamBuffer->xLength = xBufferSizeBytes;
|
||||
pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -42,6 +42,8 @@
|
||||
#include "task.h"
|
||||
#include "queue.h"
|
||||
#include "timers.h"
|
||||
/* Include private IDF API additions for critical thread safety macros */
|
||||
#include "esp_private/freertos_idf_additions_priv.h"
|
||||
|
||||
#if ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 0 )
|
||||
#error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available.
|
||||
@ -147,6 +149,10 @@
|
||||
PRIVILEGED_DATA static QueueHandle_t xTimerQueue = NULL;
|
||||
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
|
||||
|
||||
/* Spinlock required in SMP when accessing the timers. For now we use a single lock
|
||||
* Todo: Each timer could possible have its own lock for increased granularity. */
|
||||
PRIVILEGED_DATA static portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED;
|
||||
|
||||
/*lint -restore */
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
@ -252,14 +258,16 @@
|
||||
StackType_t * pxTimerTaskStackBuffer = NULL;
|
||||
uint32_t ulTimerTaskStackSize;
|
||||
|
||||
/* Timer tasks is always pinned to core 0. Todo: IDF-7906 */
|
||||
vApplicationGetTimerTaskMemory( &pxTimerTaskTCBBuffer, &pxTimerTaskStackBuffer, &ulTimerTaskStackSize );
|
||||
xTimerTaskHandle = xTaskCreateStatic( prvTimerTask,
|
||||
xTimerTaskHandle = xTaskCreateStaticPinnedToCore( prvTimerTask,
|
||||
configTIMER_SERVICE_TASK_NAME,
|
||||
ulTimerTaskStackSize,
|
||||
NULL,
|
||||
( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT,
|
||||
pxTimerTaskStackBuffer,
|
||||
pxTimerTaskTCBBuffer );
|
||||
pxTimerTaskTCBBuffer,
|
||||
0 );
|
||||
|
||||
if( xTimerTaskHandle != NULL )
|
||||
{
|
||||
@ -268,12 +276,14 @@
|
||||
}
|
||||
#else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
|
||||
{
|
||||
xReturn = xTaskCreate( prvTimerTask,
|
||||
/* Timer tasks is always pinned to core 0. Todo: IDF-7906 */
|
||||
xReturn = xTaskCreatePinnedToCore( prvTimerTask,
|
||||
configTIMER_SERVICE_TASK_NAME,
|
||||
configTIMER_TASK_STACK_DEPTH,
|
||||
NULL,
|
||||
( ( UBaseType_t ) configTIMER_TASK_PRIORITY ) | portPRIVILEGE_BIT,
|
||||
&xTimerTaskHandle );
|
||||
&xTimerTaskHandle,
|
||||
0 );
|
||||
}
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
}
|
||||
@ -458,7 +468,7 @@
|
||||
Timer_t * pxTimer = xTimer;
|
||||
|
||||
configASSERT( xTimer );
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
if( xAutoReload != pdFALSE )
|
||||
{
|
||||
@ -469,7 +479,7 @@
|
||||
pxTimer->ucStatus &= ( ( uint8_t ) ~tmrSTATUS_IS_AUTORELOAD );
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
@ -479,7 +489,7 @@
|
||||
BaseType_t xReturn;
|
||||
|
||||
configASSERT( xTimer );
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 )
|
||||
{
|
||||
@ -492,7 +502,7 @@
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
@ -635,7 +645,7 @@
|
||||
TickType_t xTimeNow;
|
||||
BaseType_t xTimerListsWereSwitched;
|
||||
|
||||
vTaskSuspendAll();
|
||||
prvENTER_CRITICAL_OR_SUSPEND_ALL( &xTimerLock );
|
||||
{
|
||||
/* Obtain the time now to make an assessment as to whether the timer
|
||||
* has expired or not. If obtaining the time causes the lists to switch
|
||||
@ -649,7 +659,7 @@
|
||||
/* The tick count has not overflowed, has the timer expired? */
|
||||
if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) )
|
||||
{
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
|
||||
prvProcessExpiredTimer( xNextExpireTime, xTimeNow );
|
||||
}
|
||||
else
|
||||
@ -669,7 +679,7 @@
|
||||
|
||||
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
|
||||
|
||||
if( xTaskResumeAll() == pdFALSE )
|
||||
if( prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock ) == pdFALSE )
|
||||
{
|
||||
/* Yield to wait for either a command to arrive, or the
|
||||
* block time to expire. If a command arrived between the
|
||||
@ -685,7 +695,7 @@
|
||||
}
|
||||
else
|
||||
{
|
||||
( void ) xTaskResumeAll();
|
||||
( void ) prvEXIT_CRITICAL_OR_RESUME_ALL( &xTimerLock );
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -963,7 +973,7 @@
|
||||
/* Check that the list from which active timers are referenced, and the
|
||||
* queue used to communicate with the timer service, have been
|
||||
* initialised. */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
if( xTimerQueue == NULL )
|
||||
{
|
||||
@ -1005,7 +1015,7 @@
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
@ -1017,7 +1027,7 @@
|
||||
configASSERT( xTimer );
|
||||
|
||||
/* Is the timer in the list of active timers? */
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 )
|
||||
{
|
||||
@ -1028,7 +1038,7 @@
|
||||
xReturn = pdTRUE;
|
||||
}
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
|
||||
return xReturn;
|
||||
} /*lint !e818 Can't be pointer to const due to the typedef. */
|
||||
@ -1041,11 +1051,11 @@
|
||||
|
||||
configASSERT( xTimer );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
pvReturn = pxTimer->pvTimerID;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
|
||||
return pvReturn;
|
||||
}
|
||||
@ -1058,11 +1068,11 @@
|
||||
|
||||
configASSERT( xTimer );
|
||||
|
||||
taskENTER_CRITICAL();
|
||||
taskENTER_CRITICAL( &xTimerLock );
|
||||
{
|
||||
pxTimer->pvTimerID = pvNewID;
|
||||
}
|
||||
taskEXIT_CRITICAL();
|
||||
taskEXIT_CRITICAL( &xTimerLock );
|
||||
}
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
|
@ -444,6 +444,11 @@ void vPortTCBPreDeleteHook( void *pxTCB );
|
||||
* - Maps to forward declared functions
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
#define portGET_CORE_ID() xPortGetCoreID()
|
||||
#define portYIELD_CORE( x ) vPortYieldOtherCore( x )
|
||||
#endif
|
||||
|
||||
// --------------------- Interrupts ------------------------
|
||||
|
||||
#define portDISABLE_INTERRUPTS() portSET_INTERRUPT_MASK_FROM_ISR()
|
||||
|
@ -8,6 +8,13 @@
|
||||
#include "freertos/FreeRTOSConfig.h"
|
||||
#include "soc/soc_caps.h"
|
||||
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
#define pxCurrentTCB pxCurrentTCBs
|
||||
.extern pxCurrentTCBs
|
||||
#else
|
||||
.extern pxCurrentTCB
|
||||
#endif
|
||||
|
||||
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
|
||||
#include "esp_private/hw_stack_guard.h"
|
||||
#endif
|
||||
|
@ -427,6 +427,11 @@ void vPortTCBPreDeleteHook( void *pxTCB );
|
||||
* - Maps to forward declared functions
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
#define portGET_CORE_ID() xPortGetCoreID()
|
||||
#define portYIELD_CORE( x ) vPortYieldOtherCore( x )
|
||||
#endif
|
||||
|
||||
// --------------------- Interrupts ------------------------
|
||||
|
||||
/**
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
|
||||
* SPDX-FileContributor: 2016-2023 Espressif Systems (Shanghai) CO LTD
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2015-2019 Cadence Design Systems, Inc.
|
||||
@ -33,7 +33,13 @@
|
||||
|
||||
#define TOPOFSTACK_OFFS 0x00 /* StackType_t *pxTopOfStack */
|
||||
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
#define pxCurrentTCB pxCurrentTCBs
|
||||
.extern pxCurrentTCBs
|
||||
#else
|
||||
.extern pxCurrentTCB
|
||||
#endif
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
/* Offsets used to get a task's coprocessor save area (CPSA) from its TCB */
|
||||
.extern offset_pxEndOfStack
|
||||
|
@ -4,11 +4,12 @@ menu "FreeRTOS"
|
||||
# Upstream FreeRTOS configurations go here
|
||||
|
||||
config FREERTOS_USE_KERNEL_10_5_1
|
||||
bool "Use v10.5.1 Kernel (EXPERIMENTAL)"
|
||||
depends on IDF_EXPERIMENTAL_FEATURES
|
||||
bool "Use v10.5.1 Kernel (BETA)"
|
||||
default n
|
||||
help
|
||||
Hidden option for development/testing purposes to enable building with the v10.5.1 kernel
|
||||
This option enables building for FreeRTOS v10.5.1 kernel.
|
||||
|
||||
Note: The v10.5.1 kernel is still in BETA, thus is not production ready.
|
||||
|
||||
config FREERTOS_SMP
|
||||
bool "Run the Amazon SMP FreeRTOS kernel instead (FEATURE UNDER DEVELOPMENT)"
|
||||
|
@ -199,6 +199,7 @@
|
||||
#define INCLUDE_xTaskResumeFromISR 1
|
||||
#define INCLUDE_xTimerPendFunctionCall 1
|
||||
#define INCLUDE_xTaskGetSchedulerState 1
|
||||
#define INCLUDE_xTaskGetCurrentTaskHandle 1
|
||||
|
||||
/* -------------------- Trace Macros ----------------------- */
|
||||
|
||||
@ -257,10 +258,12 @@
|
||||
|
||||
#if !CONFIG_FREERTOS_SMP
|
||||
#ifdef CONFIG_FREERTOS_UNICORE
|
||||
#define configNUM_CORES 1
|
||||
#define configNUMBER_OF_CORES 1
|
||||
#else
|
||||
#define configNUM_CORES 2
|
||||
#define configNUMBER_OF_CORES 2
|
||||
#endif /* CONFIG_FREERTOS_UNICORE */
|
||||
/* For compatibility */
|
||||
#define configNUM_CORES configNUMBER_OF_CORES
|
||||
#ifdef CONFIG_FREERTOS_VTASKLIST_INCLUDE_COREID
|
||||
#define configTASKLIST_INCLUDE_COREID 1
|
||||
#endif /* CONFIG_FREERTOS_VTASKLIST_INCLUDE_COREID */
|
||||
|
@ -52,7 +52,7 @@
|
||||
|
||||
/* -------------------- API Includes ----------------------- */
|
||||
|
||||
#define INCLUDE_xTaskGetCurrentTaskHandle 0 /* not defined in POSIX simulator */
|
||||
/* Todo: Reconcile INCLUDE_option differences (IDF-8186) */
|
||||
#define INCLUDE_vTaskDelayUntil 1
|
||||
#define INCLUDE_uxTaskGetStackHighWaterMark2 0
|
||||
|
||||
|
@ -42,6 +42,17 @@
|
||||
/* ----------------------- System -------------------------- */
|
||||
|
||||
#define configUSE_NEWLIB_REENTRANT 1
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
|
||||
/* - FreeRTOS provides default for configTLS_BLOCK_TYPE.
|
||||
* - We simply provide our own INIT and DEINIT functions
|
||||
* - We set "SET" to a blank macro since there is no need to set the reentrancy
|
||||
* pointer. All newlib functions calls __getreent. */
|
||||
#define configINIT_TLS_BLOCK( xTLSBlock ) esp_reent_init( &( xTLSBlock ) )
|
||||
#define configSET_TLS_BLOCK( xTLSBlock )
|
||||
#define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) )
|
||||
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
|
||||
#define configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H 1
|
||||
|
||||
@ -61,8 +72,8 @@
|
||||
|
||||
/* -------------------- API Includes ----------------------- */
|
||||
|
||||
/* Todo: Reconcile INCLUDE_option differences (IDF-8186) */
|
||||
#define INCLUDE_xTaskDelayUntil 1
|
||||
#define INCLUDE_xTaskGetCurrentTaskHandle 1
|
||||
#define INCLUDE_uxTaskGetStackHighWaterMark2 1
|
||||
|
||||
/* ------------------------------------------------ ESP-IDF Additions --------------------------------------------------
|
||||
|
@ -69,6 +69,17 @@
|
||||
/* ----------------------- System -------------------------- */
|
||||
|
||||
#define configUSE_NEWLIB_REENTRANT 1
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
|
||||
/* - FreeRTOS provides default for configTLS_BLOCK_TYPE.
|
||||
* - We simply provide our own INIT and DEINIT functions
|
||||
* - We set "SET" to a blank macro since there is no need to set the reentrancy
|
||||
* pointer. All newlib functions calls __getreent. */
|
||||
#define configINIT_TLS_BLOCK( xTLSBlock ) esp_reent_init( &( xTLSBlock ) )
|
||||
#define configSET_TLS_BLOCK( xTLSBlock )
|
||||
#define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) )
|
||||
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
|
||||
#define configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H 1
|
||||
|
||||
@ -88,8 +99,8 @@
|
||||
|
||||
/* -------------------- API Includes ----------------------- */
|
||||
|
||||
/* Todo: Reconcile INCLUDE_option differences (IDF-8186) */
|
||||
#define INCLUDE_xTaskDelayUntil 1
|
||||
#define INCLUDE_xTaskGetCurrentTaskHandle 1
|
||||
#define INCLUDE_uxTaskGetStackHighWaterMark2 1
|
||||
|
||||
/* ------------------------------------------------ ESP-IDF Additions --------------------------------------------------
|
||||
|
@ -20,6 +20,11 @@
|
||||
* additional API.
|
||||
*/
|
||||
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
#define pxCurrentTCB pxCurrentTCBs
|
||||
#else
|
||||
#endif
|
||||
|
||||
/* ------------------------------------------------- Static Asserts ------------------------------------------------- */
|
||||
|
||||
/*
|
||||
@ -221,11 +226,24 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt
|
||||
pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
|
||||
|
||||
if( pxNewTCB != NULL )
|
||||
{
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
{
|
||||
memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
|
||||
|
||||
/* Allocate space for the stack used by the task being created.
|
||||
* The base of the stack memory stored in the TCB so the task can
|
||||
* be deleted later if required. */
|
||||
pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
{
|
||||
/* Allocate space for the stack used by the task being created.
|
||||
* The base of the stack memory stored in the TCB so the task can
|
||||
* be deleted later if required. */
|
||||
pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
|
||||
if( pxNewTCB->pxStack == NULL )
|
||||
{
|
||||
@ -239,8 +257,17 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt
|
||||
{
|
||||
StackType_t * pxStack;
|
||||
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
{
|
||||
/* Allocate space for the stack used by the task being created. */
|
||||
pxStack = pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
{
|
||||
/* Allocate space for the stack used by the task being created. */
|
||||
pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
|
||||
if( pxStack != NULL )
|
||||
{
|
||||
@ -249,6 +276,12 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt
|
||||
|
||||
if( pxNewTCB != NULL )
|
||||
{
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
{
|
||||
memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
|
||||
/* Store the stack location in the TCB. */
|
||||
pxNewTCB->pxStack = pxStack;
|
||||
}
|
||||
@ -256,8 +289,16 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt
|
||||
{
|
||||
/* The stack cannot be used as the TCB was not created. Free
|
||||
* it again. */
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
{
|
||||
vPortFreeStack( pxStack );
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
{
|
||||
vPortFree( pxStack );
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -356,6 +397,13 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt
|
||||
/* The memory used for the task's TCB and stack are passed into this
|
||||
* function - use them. */
|
||||
pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
|
||||
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
{
|
||||
memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
|
||||
pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
|
||||
|
||||
#if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
|
||||
@ -405,48 +453,7 @@ _Static_assert( offsetof( StaticTask_t, pxDummy8 ) == offsetof( TCB_t, pxEndOfSt
|
||||
|
||||
/* ------------------------------------------------- Task Utilities ------------------------------------------------- */
|
||||
|
||||
#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
|
||||
|
||||
TaskHandle_t xTaskGetIdleTaskHandleForCPU( BaseType_t xCoreID )
|
||||
{
|
||||
configASSERT( xCoreID >= 0 && xCoreID < configNUM_CORES );
|
||||
configASSERT( ( xIdleTaskHandle[ xCoreID ] != NULL ) );
|
||||
return ( TaskHandle_t ) xIdleTaskHandle[ xCoreID ];
|
||||
}
|
||||
|
||||
#endif /* INCLUDE_xTaskGetIdleTaskHandle */
|
||||
/*----------------------------------------------------------*/
|
||||
|
||||
#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
|
||||
|
||||
TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t xCoreID )
|
||||
{
|
||||
TaskHandle_t xReturn;
|
||||
|
||||
#if CONFIG_FREERTOS_SMP
|
||||
{
|
||||
xReturn = xTaskGetCurrentTaskHandleCPU( xCoreID );
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_SMP */
|
||||
{
|
||||
if( xCoreID < configNUM_CORES )
|
||||
{
|
||||
xReturn = pxCurrentTCB[ xCoreID ];
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = NULL;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_SMP */
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
|
||||
#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
|
||||
/*----------------------------------------------------------*/
|
||||
|
||||
BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
|
||||
BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
|
||||
{
|
||||
BaseType_t xReturn;
|
||||
|
||||
@ -472,11 +479,10 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
|
||||
#else /* CONFIG_FREERTOS_SMP */
|
||||
TCB_t * pxTCB;
|
||||
|
||||
/* Todo: Remove xCoreID for single core builds (IDF-7894) */
|
||||
pxTCB = prvGetTCBFromHandle( xTask );
|
||||
/* Simply read the xCoreID member of the TCB */
|
||||
taskENTER_CRITICAL( &xKernelLock );
|
||||
|
||||
xReturn = pxTCB->xCoreID;
|
||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||
#endif /* CONFIG_FREERTOS_SMP */
|
||||
}
|
||||
#else /* configNUM_CORES > 1 */
|
||||
@ -490,6 +496,140 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
|
||||
}
|
||||
/*----------------------------------------------------------*/
|
||||
|
||||
#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
|
||||
|
||||
TaskHandle_t xTaskGetIdleTaskHandleForCore( BaseType_t xCoreID )
|
||||
{
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
{
|
||||
/* If xTaskGetIdleTaskHandle() is called before the scheduler has been
|
||||
* started, then xIdleTaskHandle will be NULL. */
|
||||
configASSERT( ( xCoreID < configNUMBER_OF_CORES ) && ( xCoreID != tskNO_AFFINITY ) );
|
||||
configASSERT( ( xIdleTaskHandle[ xCoreID ] != NULL ) );
|
||||
return xIdleTaskHandle[ xCoreID ];
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
{
|
||||
configASSERT( xCoreID >= 0 && xCoreID < configNUM_CORES );
|
||||
configASSERT( ( xIdleTaskHandle[ xCoreID ] != NULL ) );
|
||||
return ( TaskHandle_t ) xIdleTaskHandle[ xCoreID ];
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
}
|
||||
|
||||
#endif /* INCLUDE_xTaskGetIdleTaskHandle */
|
||||
/*----------------------------------------------------------*/
|
||||
|
||||
#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
|
||||
|
||||
TaskHandle_t xTaskGetCurrentTaskHandleForCore( BaseType_t xCoreID )
|
||||
{
|
||||
TaskHandle_t xReturn;
|
||||
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
{
|
||||
configASSERT( xCoreID < configNUMBER_OF_CORES );
|
||||
configASSERT( xCoreID != tskNO_AFFINITY );
|
||||
|
||||
/* For SMP, we need to take the kernel lock here as we are about to
|
||||
* access kernel data structures. For single core, a critical section is
|
||||
* not required as this is not called from an interrupt and the current
|
||||
* TCB will always be the same for any individual execution thread. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
{
|
||||
xReturn = pxCurrentTCBs[ xCoreID ];
|
||||
}
|
||||
/* Release the previously taken kernel lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
{
|
||||
#if CONFIG_FREERTOS_SMP
|
||||
{
|
||||
xReturn = xTaskGetCurrentTaskHandleCPU( xCoreID );
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_SMP */
|
||||
{
|
||||
if( xCoreID < configNUM_CORES )
|
||||
{
|
||||
xReturn = pxCurrentTCB[ xCoreID ];
|
||||
}
|
||||
else
|
||||
{
|
||||
xReturn = NULL;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_SMP */
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
|
||||
return xReturn;
|
||||
}
|
||||
|
||||
#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
|
||||
/*----------------------------------------------------------*/
|
||||
|
||||
#if ( CONFIG_FREERTOS_USE_KERNEL_10_5_1 && ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
|
||||
|
||||
configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounterForCore( BaseType_t xCoreID )
|
||||
{
|
||||
uint32_t ulRunTimeCounter;
|
||||
|
||||
configASSERT( xCoreID < configNUMBER_OF_CORES );
|
||||
configASSERT( xCoreID != tskNO_AFFINITY );
|
||||
|
||||
/* For SMP, we need to take the kernel lock here as we are about to
|
||||
* access kernel data structures. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
{
|
||||
ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter;
|
||||
}
|
||||
/* Release the previously taken kernel lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
|
||||
return ulRunTimeCounter;
|
||||
}
|
||||
|
||||
#endif /* ( CONFIG_FREERTOS_USE_KERNEL_10_5_1 && ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
|
||||
/*----------------------------------------------------------*/
|
||||
|
||||
#if ( CONFIG_FREERTOS_USE_KERNEL_10_5_1 && ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
|
||||
|
||||
configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercentForCore( BaseType_t xCoreID )
|
||||
{
|
||||
configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
|
||||
|
||||
configASSERT( xCoreID < configNUMBER_OF_CORES );
|
||||
configASSERT( xCoreID != tskNO_AFFINITY );
|
||||
|
||||
ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE();
|
||||
|
||||
/* For percentage calculations. */
|
||||
ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
|
||||
|
||||
/* Avoid divide by zero errors. */
|
||||
if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
|
||||
{
|
||||
/* For SMP, we need to take the kernel lock here as we are about
|
||||
* to access kernel data structures. */
|
||||
taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
{
|
||||
ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime;
|
||||
}
|
||||
/* Release the previously taken kernel lock. */
|
||||
taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
|
||||
}
|
||||
else
|
||||
{
|
||||
ulReturn = 0;
|
||||
}
|
||||
|
||||
return ulReturn;
|
||||
}
|
||||
|
||||
#endif /* ( CONFIG_FREERTOS_USE_KERNEL_10_5_1 && ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
uint8_t * pxTaskGetStackStart( TaskHandle_t xTask )
|
||||
{
|
||||
TCB_t * pxTCB;
|
||||
@ -800,8 +940,16 @@ uint8_t * pxTaskGetStackStart( TaskHandle_t xTask )
|
||||
else
|
||||
{
|
||||
/* We have a task; return its reentrant struct. */
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
{
|
||||
ret = &pxCurTask->xTLSBlock;
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
{
|
||||
ret = &pxCurTask->xNewLib_reent;
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#endif
|
||||
/* *INDENT-ON* */
|
||||
|
||||
|
||||
/* -------------------------------------------------- Task Creation ------------------------------------------------- */
|
||||
|
||||
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
|
||||
@ -108,6 +109,35 @@
|
||||
|
||||
/* ------------------------------------------------- Task Utilities ------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief Get the current core ID of a particular task
|
||||
*
|
||||
* Helper function to get the core ID of a particular task. If the task is
|
||||
* pinned to a particular core, the core ID is returned. If the task is not
|
||||
* pinned to a particular core, tskNO_AFFINITY is returned.
|
||||
*
|
||||
* If CONFIG_FREERTOS_UNICORE is enabled, this function simply returns 0.
|
||||
*
|
||||
* [refactor-todo] See if this needs to be deprecated (IDF-8145)(IDF-8164)
|
||||
*
|
||||
* @note If CONFIG_FREERTOS_SMP is enabled, please call vTaskCoreAffinityGet()
|
||||
* instead.
|
||||
* @note In IDF FreerTOS when configNUMBER_OF_CORES == 1, this function will
|
||||
* always return 0,
|
||||
* @param xTask The task to query
|
||||
* @return The task's core ID or tskNO_AFFINITY
|
||||
*/
|
||||
BaseType_t xTaskGetCoreID( TaskHandle_t xTask );
|
||||
|
||||
/** @cond */
|
||||
/* Todo: Deprecate this API in favor of xTaskGetIdleTaskHandleForCore (IDF-8163) */
|
||||
static inline __attribute__( ( always_inline ) )
|
||||
BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
|
||||
{
|
||||
return xTaskGetCoreID( xTask );
|
||||
}
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @brief Get the handle of idle task for the given core.
|
||||
*
|
||||
@ -118,7 +148,16 @@
|
||||
* @param xCoreID The core to query
|
||||
* @return Handle of the idle task for the queried core
|
||||
*/
|
||||
TaskHandle_t xTaskGetIdleTaskHandleForCPU( BaseType_t xCoreID );
|
||||
TaskHandle_t xTaskGetIdleTaskHandleForCore( BaseType_t xCoreID );
|
||||
|
||||
/** @cond */
|
||||
/* Todo: Deprecate this API in favor of xTaskGetIdleTaskHandleForCore (IDF-8163) */
|
||||
static inline __attribute__( ( always_inline ) )
|
||||
TaskHandle_t xTaskGetIdleTaskHandleForCPU( BaseType_t xCoreID )
|
||||
{
|
||||
return xTaskGetIdleTaskHandleForCore( xCoreID );
|
||||
}
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @brief Get the handle of the task currently running on a certain core
|
||||
@ -134,25 +173,53 @@ TaskHandle_t xTaskGetIdleTaskHandleForCPU( BaseType_t xCoreID );
|
||||
* @param xCoreID The core to query
|
||||
* @return Handle of the current task running on the queried core
|
||||
*/
|
||||
TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t xCoreID );
|
||||
TaskHandle_t xTaskGetCurrentTaskHandleForCore( BaseType_t xCoreID );
|
||||
|
||||
/** @cond */
|
||||
/* Todo: Deprecate this API in favor of xTaskGetCurrentTaskHandleForCore (IDF-8163) */
|
||||
static inline __attribute__( ( always_inline ) )
|
||||
TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t xCoreID )
|
||||
{
|
||||
return xTaskGetCurrentTaskHandleForCore( xCoreID );
|
||||
}
|
||||
/** @endcond */
|
||||
|
||||
#if CONFIG_FREERTOS_USE_KERNEL_10_5_1
|
||||
|
||||
/**
|
||||
* @brief Get the current core affinity of a particular task
|
||||
* @brief Get the total execution of a particular core's idle task
|
||||
*
|
||||
* Helper function to get the core affinity of a particular task. If the task is
|
||||
* pinned to a particular core, the core ID is returned. If the task is not
|
||||
* pinned to a particular core, tskNO_AFFINITY is returned.
|
||||
* This function is equivalent to ulTaskGetIdleRunTimeCounter() but queries the
|
||||
* idle task of a particular core.
|
||||
*
|
||||
* If CONFIG_FREERTOS_UNICORE is enabled, this function simply returns 0.
|
||||
*
|
||||
* [refactor-todo] See if this needs to be deprecated (IDF-8145)(IDF-8164)
|
||||
*
|
||||
* @note If CONFIG_FREERTOS_SMP is enabled, please call vTaskCoreAffinityGet()
|
||||
* instead.
|
||||
* @param xTask The task to query
|
||||
* @return The tasks coreID or tskNO_AFFINITY
|
||||
* @param xCoreID Core ID of the idle task to query
|
||||
* @return The total run time of the idle task
|
||||
*/
|
||||
BaseType_t xTaskGetAffinity( TaskHandle_t xTask );
|
||||
configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounterForCore( BaseType_t xCoreID );
|
||||
|
||||
/**
|
||||
* @brief Get the percentage run time of a particular core's idle task
|
||||
*
|
||||
* This function is equivalent to ulTaskGetIdleRunTimePercent() but queries the
|
||||
* idle task of a particular core.
|
||||
*
|
||||
* @param xCoreID Core ID of the idle task to query
|
||||
* @return The percentage run time of the idle task
|
||||
*/
|
||||
configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercentForCore( BaseType_t xCoreID );
|
||||
|
||||
#else /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
|
||||
/* CMock Workaround: CMock currently doesn't preprocess files, thus functions
|
||||
* guarded by ifdef still get mocked. We provide a dummy define here so that
|
||||
* functions using configRUN_TIME_COUNTER_TYPE can still be mocked.
|
||||
*
|
||||
* Todo: Will be removed when V10.5.1 becomes the default kernel. */
|
||||
#ifndef configRUN_TIME_COUNTER_TYPE
|
||||
#define configRUN_TIME_COUNTER_TYPE unsigned int
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_FREERTOS_USE_KERNEL_10_5_1 */
|
||||
|
||||
/**
|
||||
* Returns the start of the stack associated with xTask.
|
||||
|
@ -171,6 +171,8 @@ entries:
|
||||
tasks:ulTaskGenericNotifyValueClear (default)
|
||||
if FREERTOS_GENERATE_RUN_TIME_STATS = y:
|
||||
tasks:ulTaskGetIdleRunTimeCounter (default)
|
||||
if FREERTOS_USE_KERNEL_10_5_1 = y:
|
||||
tasks:ulTaskGetIdleRunTimePercent (default)
|
||||
tasks:prvAddCurrentTaskToDelayedList (default)
|
||||
if FREERTOS_USE_TRACE_FACILITY = y:
|
||||
tasks:uxTaskGetSystemState (default)
|
||||
@ -193,6 +195,8 @@ entries:
|
||||
timers:uxTimerGetReloadMode (default)
|
||||
timers:xTimerGetExpiryTime (default)
|
||||
timers:pcTimerGetName (default)
|
||||
if FREERTOS_USE_KERNEL_10_5_1 = y:
|
||||
timers:prvReloadTimer (default)
|
||||
timers:prvProcessExpiredTimer (default)
|
||||
timers:prvTimerTask (default)
|
||||
timers:prvProcessTimerOrBlockTask (default)
|
||||
|
@ -25,10 +25,12 @@ entries:
|
||||
tasks:xTaskCreatePinnedToCore (default)
|
||||
tasks:xTaskCreateStaticPinnedToCore (default)
|
||||
# Task Utilities
|
||||
tasks:xTaskGetCurrentTaskHandleForCPU (default)
|
||||
tasks:xTaskGetIdleTaskHandleForCPU (default)
|
||||
tasks:xTaskGetCurrentTaskHandleForCPU (default)
|
||||
tasks:xTaskGetAffinity (default)
|
||||
tasks:xTaskGetCoreID (default)
|
||||
tasks:xTaskGetIdleTaskHandleForCore (default)
|
||||
tasks:xTaskGetCurrentTaskHandleForCore (default)
|
||||
if FREERTOS_USE_KERNEL_10_5_1 = y && FREERTOS_GENERATE_RUN_TIME_STATS = y:
|
||||
tasks:ulTaskGetIdleRunTimeCounterForCore (default)
|
||||
tasks:ulTaskGetIdleRunTimePercentForCore (default)
|
||||
tasks:pxTaskGetStackStart (default)
|
||||
tasks:prvTaskPriorityRaise (default)
|
||||
tasks:prvTaskPriorityRestore (default)
|
||||
|
@ -12,6 +12,7 @@ components/freertos/FreeRTOS-Kernel/include/freertos/
|
||||
components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/
|
||||
components/freertos/FreeRTOS-Kernel-SMP/include/freertos/
|
||||
components/freertos/FreeRTOS-Kernel-SMP/portable/xtensa/include/freertos/
|
||||
components/freertos/FreeRTOS-Kernel-V10.5.1/include/freertos/
|
||||
|
||||
|
||||
components/log/include/esp_log_internal.h
|
||||
|
Loading…
Reference in New Issue
Block a user