feat(ipc): Adds a new no blocking IPC call

This commit is contained in:
KonstantinKondrashov 2023-11-25 23:32:47 +08:00 committed by Konstantin Kondrashov
parent 004b037743
commit a3bfb3f857
5 changed files with 134 additions and 36 deletions

View File

@ -15,7 +15,7 @@
#include "esp_app_trace.h" #include "esp_app_trace.h"
#include "esp_freertos_hooks.h" #include "esp_freertos_hooks.h"
#include "dbg_stubs.h" #include "dbg_stubs.h"
#include "esp_ipc.h" #include "esp_private/esp_ipc.h"
#include "hal/wdt_hal.h" #include "hal/wdt_hal.h"
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/libc_stubs.h" #include "esp32/rom/libc_stubs.h"
@ -82,9 +82,8 @@ void gcov_create_task(void *arg)
void gcov_create_task_tick_hook(void) void gcov_create_task_tick_hook(void)
{ {
extern esp_err_t esp_ipc_start_gcov_from_isr(uint32_t cpu_id, esp_ipc_func_t func, void* arg);
if (s_create_gcov_task) { if (s_create_gcov_task) {
if (esp_ipc_start_gcov_from_isr(xPortGetCoreID(), &gcov_create_task, NULL) == ESP_OK) { if (esp_ipc_call_nonblocking(xPortGetCoreID(), &gcov_create_task, NULL) == ESP_OK) {
s_create_gcov_task = false; s_create_gcov_task = false;
} }
} }

View File

@ -13,11 +13,14 @@
#include "esp_ipc.h" #include "esp_ipc.h"
#include "esp_private/esp_ipc_isr.h" #include "esp_private/esp_ipc_isr.h"
#include "esp_attr.h" #include "esp_attr.h"
#include "esp_cpu.h"
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "freertos/task.h" #include "freertos/task.h"
#include "freertos/semphr.h" #include "freertos/semphr.h"
#define IPC_MAX_PRIORITY (configMAX_PRIORITIES - 1)
#if !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE) #if !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE)
#if CONFIG_COMPILER_OPTIMIZATION_NONE #if CONFIG_COMPILER_OPTIMIZATION_NONE
@ -40,10 +43,11 @@ typedef enum {
IPC_WAIT_FOR_END, IPC_WAIT_FOR_END,
} esp_ipc_wait_t; } esp_ipc_wait_t;
#if CONFIG_APPTRACE_GCOV_ENABLE static esp_ipc_wait_t volatile s_wait_for[portNUM_PROCESSORS];
static volatile esp_ipc_func_t s_gcov_func = NULL; // Gcov dump starter function which should be called by high priority task
static void * volatile s_gcov_func_arg; // Argument to pass into s_gcov_func static volatile esp_ipc_func_t s_no_block_func[portNUM_PROCESSORS] = { 0 };
#endif static volatile bool s_no_block_func_and_arg_are_ready[portNUM_PROCESSORS] = { 0 };
static void * volatile s_no_block_func_arg[portNUM_PROCESSORS];
static void IRAM_ATTR ipc_task(void* arg) static void IRAM_ATTR ipc_task(void* arg)
{ {
@ -55,29 +59,23 @@ static void IRAM_ATTR ipc_task(void* arg)
#endif #endif
while (true) { while (true) {
uint32_t ipc_wait; ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
xTaskNotifyWait(0, ULONG_MAX, &ipc_wait, portMAX_DELAY);
#if CONFIG_APPTRACE_GCOV_ENABLE if (s_no_block_func_and_arg_are_ready[cpuid] && s_no_block_func[cpuid]) {
if (s_gcov_func) { (*s_no_block_func[cpuid])(s_no_block_func_arg[cpuid]);
(*s_gcov_func)(s_gcov_func_arg); s_no_block_func_and_arg_are_ready[cpuid] = false;
s_gcov_func = NULL; s_no_block_func[cpuid] = NULL;
/* we can not interfer with IPC calls so no need for further processing */
// esp_ipc API and gcov_from_isr APIs can be processed together if they came at the same time
if (ipc_wait == IPC_WAIT_NO) {
continue;
}
} }
#endif // CONFIG_APPTRACE_GCOV_ENABLE
#ifndef CONFIG_FREERTOS_UNICORE #ifndef CONFIG_FREERTOS_UNICORE
if (s_func[cpuid]) { if (s_func[cpuid]) {
// we need to cache s_func, s_func_arg and ipc_ack variables locally // we need to cache s_func, s_func_arg and ipc_ack variables locally
// because they can be changed by a subsequent IPC call (after xTaskNotify(caller_task_handle)). // because they can be changed by a subsequent IPC call (after xTaskNotify(caller_task_handle)).
esp_ipc_func_t func = s_func[cpuid]; esp_ipc_func_t func = s_func[cpuid];
s_func[cpuid] = NULL;
void* func_arg = s_func_arg[cpuid]; void* func_arg = s_func_arg[cpuid];
esp_ipc_wait_t ipc_wait = s_wait_for[cpuid];
SemaphoreHandle_t ipc_ack = s_ipc_ack[cpuid]; SemaphoreHandle_t ipc_ack = s_ipc_ack[cpuid];
s_func[cpuid] = NULL;
if (ipc_wait == IPC_WAIT_FOR_START) { if (ipc_wait == IPC_WAIT_FOR_START) {
xSemaphoreGive(ipc_ack); xSemaphoreGive(ipc_ack);
@ -120,7 +118,7 @@ static void esp_ipc_init(void)
s_ipc_mutex[i] = xSemaphoreCreateMutexStatic(&s_ipc_mutex_buffer[i]); s_ipc_mutex[i] = xSemaphoreCreateMutexStatic(&s_ipc_mutex_buffer[i]);
s_ipc_ack[i] = xSemaphoreCreateBinaryStatic(&s_ipc_ack_buffer[i]); s_ipc_ack[i] = xSemaphoreCreateBinaryStatic(&s_ipc_ack_buffer[i]);
BaseType_t res = xTaskCreatePinnedToCore(ipc_task, task_name, IPC_STACK_SIZE, (void*) i, BaseType_t res = xTaskCreatePinnedToCore(ipc_task, task_name, IPC_STACK_SIZE, (void*) i,
configMAX_PRIORITIES - 1, &s_ipc_task_handle[i], i); IPC_MAX_PRIORITY, &s_ipc_task_handle[i], i);
assert(res == pdTRUE); assert(res == pdTRUE);
(void)res; (void)res;
} }
@ -152,9 +150,11 @@ static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, voi
xSemaphoreTake(s_ipc_mutex[0], portMAX_DELAY); xSemaphoreTake(s_ipc_mutex[0], portMAX_DELAY);
#endif #endif
s_func[cpu_id] = func;
s_func_arg[cpu_id] = arg; s_func_arg[cpu_id] = arg;
xTaskNotify(s_ipc_task_handle[cpu_id], wait_for, eSetValueWithOverwrite); s_wait_for[cpu_id] = wait_for;
// s_func must be set after all other parameters. The ipc_task use this as indicator of the IPC is prepared.
s_func[cpu_id] = func;
xTaskNotifyGive(s_ipc_task_handle[cpu_id]);
xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY); xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY);
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY #ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
@ -175,28 +175,33 @@ esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_END); return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_END);
} }
// currently this is only called from gcov component esp_err_t esp_ipc_call_nonblocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
// the top level guarantees that the next call will be only after the previous one has completed
#if CONFIG_APPTRACE_GCOV_ENABLE
esp_err_t esp_ipc_start_gcov_from_isr(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
{ {
if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) { if (cpu_id >= portNUM_PROCESSORS || s_ipc_task_handle[cpu_id] == NULL) {
return ESP_ERR_INVALID_ARG;
}
if (cpu_id == xPortGetCoreID() && xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
return ESP_ERR_INVALID_STATE; return ESP_ERR_INVALID_STATE;
} }
// Since it is called from an interrupt, it can not wait for a mutex to be released. // Since it can be called from an interrupt or Scheduler is Suspened, it can not wait for a mutex to be released.
if (s_gcov_func == NULL) { if (esp_cpu_compare_and_set((volatile uint32_t *)&s_no_block_func[cpu_id], 0, (uint32_t)func)) {
s_gcov_func_arg = arg; s_no_block_func_arg[cpu_id] = arg;
s_gcov_func = func; s_no_block_func_and_arg_are_ready[cpu_id] = true;
// If the target task already has a notification pending then its notification value is not updated (WithoutOverwrite). if (xPortInIsrContext()) {
xTaskNotifyFromISR(s_ipc_task_handle[cpu_id], IPC_WAIT_NO, eSetValueWithoutOverwrite, NULL); vTaskNotifyGiveFromISR(s_ipc_task_handle[cpu_id], NULL);
} else {
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
vTaskPrioritySet(s_ipc_task_handle[cpu_id], IPC_MAX_PRIORITY);
#endif
xTaskNotifyGive(s_ipc_task_handle[cpu_id]);
}
return ESP_OK; return ESP_OK;
} }
// the previous call was not completed // the previous call was not completed
return ESP_FAIL; return ESP_FAIL;
} }
#endif // CONFIG_APPTRACE_GCOV_ENABLE
#endif // !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE) #endif // !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE)

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */

View File

@ -0,0 +1,47 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "../esp_ipc.h"
#include "sdkconfig.h"
#ifdef __cplusplus
extern "C" {
#endif
#if !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE)
/**
* @brief Execute a callback on a given CPU without any blocking operations for the caller
*
* Since it does not have any blocking operations it is suitable to be run from interrupts
* or even when the Scheduler on the current core is suspended.
*
* The function:
* - does not wait for the callback to begin or complete execution,
* - does not change the IPC priority.
* The function returns after sending a notification to the IPC task to execute the callback.
*
* @param[in] cpu_id CPU where the given function should be executed (0 or 1)
* @param[in] func Pointer to a function of type void func(void* arg) to be executed
* @param[in] arg Arbitrary argument of type void* to be passed into the function
*
* @return
* - ESP_ERR_INVALID_ARG if cpu_id is invalid
* - ESP_ERR_INVALID_STATE 1. IPC tasks have not been initialized yet,
* 2. cpu_id requests IPC on the current core, but the FreeRTOS scheduler is not running on it
* (the IPC task cannot be executed).
* - ESP_FAIL IPC is busy due to a previous call was not completed.
* - ESP_OK otherwise
*/
esp_err_t esp_ipc_call_nonblocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg);
#endif // !defined(CONFIG_FREERTOS_UNICORE) || defined(CONFIG_APPTRACE_GCOV_ENABLE)
#ifdef __cplusplus
}
#endif

View File

@ -12,6 +12,7 @@
#include "unity.h" #include "unity.h"
#if !CONFIG_FREERTOS_UNICORE #if !CONFIG_FREERTOS_UNICORE
#include "esp_ipc.h" #include "esp_ipc.h"
#include "esp_private/esp_ipc.h"
#endif #endif
#include "esp_log.h" #include "esp_log.h"
#include "esp_rom_sys.h" #include "esp_rom_sys.h"
@ -162,4 +163,50 @@ TEST_CASE("Test ipc_task can not wake up blocking task early", "[ipc]")
TEST_ASSERT_EQUAL(31, val2); TEST_ASSERT_EQUAL(31, val2);
} }
TEST_CASE("Test ipc call nonblocking", "[ipc]")
{
int val_for_1_call = 20;
TEST_ESP_OK(esp_ipc_call_nonblocking(1, test_func_ipc_cb2, (void*)&val_for_1_call));
TEST_ASSERT_EQUAL(20, val_for_1_call);
int val_for_2_call = 30;
TEST_ESP_ERR(ESP_FAIL, esp_ipc_call_nonblocking(1, test_func_ipc_cb3, (void*)&val_for_2_call));
vTaskDelay(150 / portTICK_PERIOD_MS);
TEST_ASSERT_EQUAL(21, val_for_1_call);
TEST_ESP_OK(esp_ipc_call_nonblocking(1, test_func_ipc_cb3, (void*)&val_for_2_call));
vTaskDelay(550 / portTICK_PERIOD_MS);
TEST_ASSERT_EQUAL(31, val_for_2_call);
}
static void test_func_ipc_cb4(void *arg)
{
int *val = (int *)arg;
*val = *val + 1;
}
TEST_CASE("Test ipc call nonblocking when FreeRTOS Scheduler is suspended", "[ipc]")
{
#ifdef CONFIG_FREERTOS_SMP
//Note: Scheduler suspension behavior changed in FreeRTOS SMP
vTaskPreemptionDisable(NULL);
#else
// Disable scheduler on the current CPU
vTaskSuspendAll();
#endif // CONFIG_FREERTOS_SMP
volatile int value = 20;
TEST_ESP_OK(esp_ipc_call_nonblocking(1, test_func_ipc_cb4, (void*)&value));
while (value == 20) { };
TEST_ASSERT_EQUAL(21, value);
#ifdef CONFIG_FREERTOS_SMP
//Note: Scheduler suspension behavior changed in FreeRTOS SMP
vTaskPreemptionEnable(NULL);
#else
xTaskResumeAll();
#endif
}
#endif /* !CONFIG_FREERTOS_UNICORE */ #endif /* !CONFIG_FREERTOS_UNICORE */