From dec846a9530a395c164c95484cff8ab7053db964 Mon Sep 17 00:00:00 2001 From: Ivan Grokhotkov Date: Wed, 24 Aug 2016 18:01:01 +0800 Subject: [PATCH 01/10] components/spi_flash: make spi_flash APIs dual-core-compatible See esp_spi_flash.c for implementation notes. --- components/spi_flash/esp_spi_flash.c | 258 ++++++++++++++----- components/spi_flash/include/esp_spi_flash.h | 9 + 2 files changed, 197 insertions(+), 70 deletions(-) diff --git a/components/spi_flash/esp_spi_flash.c b/components/spi_flash/esp_spi_flash.c index df919bc2d0..e3d09acaf0 100644 --- a/components/spi_flash/esp_spi_flash.c +++ b/components/spi_flash/esp_spi_flash.c @@ -21,6 +21,194 @@ #include #include #include +#include "sdkconfig.h" + +/* + Driver for SPI flash read/write/erase operations + + In order to perform some flash operations, we need to make sure both CPUs + are not running any code from flash for the duration of the flash operation. + In a single-core setup this is easy: we disable interrupts/scheduler and do + the flash operation. In the dual-core setup this is slightly more complicated. + We need to make sure that the other CPU doesn't run any code from flash. + + SPI flash driver starts two tasks (spi_flash_op_block_task), one pinned to + each CPU. Each task is associated with its own semaphore. + + When SPI flash API is called on CPU A (can be PRO or APP), we wake up the task + on CPU B by "giving" the semaphore associated with it. Tasks resumes, disables + cache on CPU B, and acknowledges that it has taken the semaphore by setting + a flag (s_flash_op_can_start). Flash API function running on CPU A waits for + this flag to be set. Once the flag is set, it disables cache on CPU A and + starts flash operation. + + While flash operation is running, interrupts can still run on CPU B. + We assume that all interrupt code is placed into RAM. + + Once flash operation is complete, function on CPU A sets another flag, + s_flash_op_complete, to let the task on CPU B know that it can re-enable + cache and release the CPU. Then the function on CPU A re-enables the cache on + CPU A as well and returns control to the calling code. Task on CPU B returns + to suspended state by "taking" the semaphore. + + Additionally, all API functions are protected with a mutex (s_flash_op_mutex). + + In a single core environment (CONFIG_FREERTOS_UNICORE enabled), we simply + disable both caches, no inter-CPU communication takes place. +*/ + +static esp_err_t spi_flash_translate_rc(SpiFlashOpResult rc); +extern void Cache_Flush(int); + +#ifndef CONFIG_FREERTOS_UNICORE + +static TaskHandle_t s_flash_op_tasks[2]; +static SemaphoreHandle_t s_flash_op_mutex; +static SemaphoreHandle_t s_flash_op_sem[2]; +static bool s_flash_op_can_start = false; +static bool s_flash_op_complete = false; + + +// Task whose duty is to block other tasks from running on a given CPU +static void IRAM_ATTR spi_flash_op_block_task(void* arg) +{ + uint32_t cpuid = (uint32_t) arg; + while (true) { + // Wait for flash operation to be initiated. + // This will be indicated by giving the semaphore corresponding to + // this CPU. + if (xSemaphoreTake(s_flash_op_sem[cpuid], portMAX_DELAY) != pdTRUE) { + // TODO: when can this happen? + abort(); + } + // Disable cache on this CPU + Cache_Read_Disable(cpuid); + // Signal to the flash API function that flash operation can start + s_flash_op_can_start = true; + while (!s_flash_op_complete) { + // until we have a way to use interrupts for inter-CPU communication, + // busy loop here and wait for the other CPU to finish flash operation + } + // Flash operation is complete, re-enable cache + Cache_Read_Enable(cpuid); + } + // TODO: currently this is unreachable code. Introduce spi_flash_uninit + // function which will signal to both tasks that they can shut down. + // Not critical at this point, we don't have a use case for stopping + // SPI flash driver yet. + // Also need to delete the semaphore here. + vTaskDelete(NULL); +} + +void spi_flash_init() +{ + s_flash_op_can_start = false; + s_flash_op_complete = false; + s_flash_op_sem[0] = xSemaphoreCreateBinary(); + s_flash_op_sem[1] = xSemaphoreCreateBinary(); + s_flash_op_mutex = xSemaphoreCreateMutex(); + // Start two tasks, one on each CPU, with max priorities + // TODO: optimize stack usage. Stack size 512 is too small. + xTaskCreatePinnedToCore(spi_flash_op_block_task, "flash_op_pro", 1024, (void*) 0, + configMAX_PRIORITIES - 1, &s_flash_op_tasks[0], 0); + xTaskCreatePinnedToCore(spi_flash_op_block_task, "flash_op_app", 1024, (void*) 1, + configMAX_PRIORITIES - 1, &s_flash_op_tasks[1], 1); +} + +static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu() +{ + // Take the API lock + xSemaphoreTake(s_flash_op_mutex, portMAX_DELAY); + const uint32_t cpuid = xPortGetCoreID(); + uint32_t other_cpuid = !cpuid; + s_flash_op_can_start = false; + s_flash_op_complete = false; + // Signal to the spi_flash_op_block_task on the other CPU that we need it to + // disable cache there and block other tasks from executing. + xSemaphoreGive(s_flash_op_sem[other_cpuid]); + while (!s_flash_op_can_start) { + // Busy loop and wait for spi_flash_op_block_task to take the semaphore on the + // other CPU. + } + vTaskSuspendAll(); + // Disable cache on this CPU as well + Cache_Read_Disable(cpuid); +} + +static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu() +{ + uint32_t cpuid = xPortGetCoreID(); + // Signal to spi_flash_op_block_task that flash operation is complete + s_flash_op_complete = true; + // Re-enable cache on this CPU + Cache_Read_Enable(cpuid); + // Release API lock + xSemaphoreGive(s_flash_op_mutex); + xTaskResumeAll(); +} + +#else // CONFIG_FREERTOS_UNICORE + +void spi_flash_init() +{ + // No-op in single core mode +} + +static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu() +{ + vTaskSuspendAll(); + Cache_Read_Disable(0); + Cache_Read_Disable(1); +} + +static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu() +{ + Cache_Read_Enable(0); + Cache_Read_Enable(1); + xTaskResumeAll(); +} + +#endif // CONFIG_FREERTOS_UNICORE + + +esp_err_t IRAM_ATTR spi_flash_erase_sector(uint16_t sec) +{ + spi_flash_disable_interrupts_caches_and_other_cpu(); + SpiFlashOpResult rc; + rc = SPIUnlock(); + if (rc == SPI_FLASH_RESULT_OK) { + rc = SPIEraseSector(sec); + } + Cache_Flush(0); + Cache_Flush(1); + spi_flash_enable_interrupts_caches_and_other_cpu(); + return spi_flash_translate_rc(rc); +} + +esp_err_t IRAM_ATTR spi_flash_write(uint32_t dest_addr, const uint32_t *src, uint32_t size) +{ + spi_flash_disable_interrupts_caches_and_other_cpu(); + SpiFlashOpResult rc; + rc = SPIUnlock(); + if (rc == SPI_FLASH_RESULT_OK) { + rc = SPIWrite(dest_addr, src, (int32_t) size); + } + Cache_Flush(0); + Cache_Flush(1); + spi_flash_enable_interrupts_caches_and_other_cpu(); + return spi_flash_translate_rc(rc); +} + +esp_err_t IRAM_ATTR spi_flash_read(uint32_t src_addr, uint32_t *dest, uint32_t size) +{ + spi_flash_disable_interrupts_caches_and_other_cpu(); + SpiFlashOpResult rc; + rc = SPIRead(src_addr, dest, (int32_t) size); + Cache_Flush(0); + Cache_Flush(1); + spi_flash_enable_interrupts_caches_and_other_cpu(); + return spi_flash_translate_rc(rc); +} static esp_err_t spi_flash_translate_rc(SpiFlashOpResult rc) { @@ -34,73 +222,3 @@ static esp_err_t spi_flash_translate_rc(SpiFlashOpResult rc) return ESP_ERR_FLASH_OP_FAIL; } } - -extern void Cache_Flush(int); - -static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_app_cpu() -{ - vTaskSuspendAll(); - // SET_PERI_REG_MASK(APPCPU_CTRL_REG_C, DPORT_APPCPU_RUNSTALL); - Cache_Read_Disable(0); - Cache_Read_Disable(1); -} - -static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_app_cpu() -{ - Cache_Read_Enable(0); - Cache_Read_Enable(1); - // CLEAR_PERI_REG_MASK(APPCPU_CTRL_REG_C, DPORT_APPCPU_RUNSTALL); - xTaskResumeAll(); -} - -esp_err_t IRAM_ATTR spi_flash_erase_sector(uint16_t sec) -{ - spi_flash_disable_interrupts_caches_and_app_cpu(); - SpiFlashOpResult rc; - if (xPortGetCoreID() == 1) { - rc = SPI_FLASH_RESULT_ERR; - } - else { - rc = SPIUnlock(); - if (rc == SPI_FLASH_RESULT_OK) { - rc = SPIEraseSector(sec); - } - } - Cache_Flush(0); - Cache_Flush(1); - spi_flash_enable_interrupts_caches_and_app_cpu(); - return spi_flash_translate_rc(rc); -} - -esp_err_t IRAM_ATTR spi_flash_write(uint32_t dest_addr, const uint32_t *src, uint32_t size) -{ - spi_flash_disable_interrupts_caches_and_app_cpu(); - SpiFlashOpResult rc; - if (xPortGetCoreID() == 1) { - rc = SPI_FLASH_RESULT_ERR; - } - else { - rc = SPIUnlock(); - if (rc == SPI_FLASH_RESULT_OK) { - rc = SPIWrite(dest_addr, src, (int32_t) size); - } - } - Cache_Flush(0); - Cache_Flush(1); - spi_flash_enable_interrupts_caches_and_app_cpu(); - return spi_flash_translate_rc(rc); -} - -esp_err_t IRAM_ATTR spi_flash_read(uint32_t src_addr, uint32_t *dest, uint32_t size) -{ - spi_flash_disable_interrupts_caches_and_app_cpu(); - SpiFlashOpResult rc; - if (xPortGetCoreID() == 1) { - rc = SPI_FLASH_RESULT_ERR; - } - else { - rc = SPIRead(src_addr, dest, (int32_t) size); - } - spi_flash_enable_interrupts_caches_and_app_cpu(); - return spi_flash_translate_rc(rc); -} diff --git a/components/spi_flash/include/esp_spi_flash.h b/components/spi_flash/include/esp_spi_flash.h index aa12b2aa42..da6b03c0d1 100644 --- a/components/spi_flash/include/esp_spi_flash.h +++ b/components/spi_flash/include/esp_spi_flash.h @@ -28,6 +28,15 @@ extern "C" { #define SPI_FLASH_SEC_SIZE 4096 /**< SPI Flash sector size */ +/** + * @brief Initialize SPI flash access driver + * + * This function must be called exactly once, before any other + * spi_flash_* functions are called. + * + */ +void spi_flash_init(); + /** * @brief Erase the Flash sector. * From 08ec33c6a2c244582d4735deb1a91b4ecc2cf9ac Mon Sep 17 00:00:00 2001 From: Jeroen Domburg Date: Mon, 5 Sep 2016 11:46:08 +0800 Subject: [PATCH 02/10] Make vTaskEnterCritical callable from ISR --- components/freertos/tasks.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/components/freertos/tasks.c b/components/freertos/tasks.c index f9ad8ff8cd..a7850ba2a6 100644 --- a/components/freertos/tasks.c +++ b/components/freertos/tasks.c @@ -163,6 +163,7 @@ typedef struct tskTaskControlBlock #if ( portCRITICAL_NESTING_IN_TCB == 1 ) UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ + uint32_t uxOldInterruptState; /*< Interrupt state before the outer taskEnterCritical was called */ #endif #if ( configUSE_TRACE_FACILITY == 1 ) @@ -2595,8 +2596,7 @@ BaseType_t xReturn; /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be called from a critical section within an ISR. */ -//That makes the taskENTER_CRITICALs here unnecessary, right? -JD -// taskENTER_CRITICAL(&xTaskQueueMutex); + taskENTER_CRITICAL_ISR(&xTaskQueueMutex); /* The event list is sorted in priority order, so the first in the list can be removed as it is known to be the highest priority. Remove the TCB from the delayed list, and add it to the ready list. @@ -2654,7 +2654,7 @@ BaseType_t xReturn; prvResetNextTaskUnblockTime(); } #endif -// taskEXIT_CRITICAL(&xTaskQueueMutex); + taskEXIT_CRITICAL_ISR(&xTaskQueueMutex); return xReturn; } @@ -3761,7 +3761,7 @@ scheduler will re-enable the interrupts instead. */ void vTaskEnterCritical( portMUX_TYPE *mux ) #endif { - portDISABLE_INTERRUPTS(); + portENTER_CRITICAL_NESTED(uxOldInterruptState); #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG vPortCPUAcquireMutex( mux, function, line ); #else @@ -3814,7 +3814,7 @@ scheduler will re-enable the interrupts instead. */ if( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 0U ) { - portENABLE_INTERRUPTS(); + portEXIT_CRITICAL_NESTED(uxOldInterruptState); } else { From 128bb77c5acd661c7ac14d72f2c5ef38d1e66ddc Mon Sep 17 00:00:00 2001 From: Jeroen Domburg Date: Mon, 5 Sep 2016 11:58:40 +0800 Subject: [PATCH 03/10] Fix prev code to not crash horribly when scheduler is not running yet --- components/freertos/tasks.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/components/freertos/tasks.c b/components/freertos/tasks.c index a7850ba2a6..92970b582a 100644 --- a/components/freertos/tasks.c +++ b/components/freertos/tasks.c @@ -3761,7 +3761,10 @@ scheduler will re-enable the interrupts instead. */ void vTaskEnterCritical( portMUX_TYPE *mux ) #endif { - portENTER_CRITICAL_NESTED(uxOldInterruptState); + if( xSchedulerRunning != pdFALSE ) + { + portENTER_CRITICAL_NESTED(pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState); + } #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG vPortCPUAcquireMutex( mux, function, line ); #else @@ -3814,7 +3817,7 @@ scheduler will re-enable the interrupts instead. */ if( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 0U ) { - portEXIT_CRITICAL_NESTED(uxOldInterruptState); + portEXIT_CRITICAL_NESTED(pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState); } else { From 9664de6867f05579a3907ed1c225793085dde6a8 Mon Sep 17 00:00:00 2001 From: Jeroen Domburg Date: Mon, 5 Sep 2016 12:30:57 +0800 Subject: [PATCH 04/10] Add working portASSERT_IF_IN_ISR function, fix enter_critical thing even better. --- components/freertos/include/freertos/portmacro.h | 4 ++++ components/freertos/port.c | 7 +++++-- components/freertos/tasks.c | 9 ++++++++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/components/freertos/include/freertos/portmacro.h b/components/freertos/include/freertos/portmacro.h index 908206eb37..ab83b0e05a 100644 --- a/components/freertos/include/freertos/portmacro.h +++ b/components/freertos/include/freertos/portmacro.h @@ -167,6 +167,9 @@ typedef struct { #define portDISABLE_INTERRUPTS() do { XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); portbenchmarkINTERRUPT_DISABLE(); } while (0) #define portENABLE_INTERRUPTS() do { portbenchmarkINTERRUPT_RESTORE(0); XTOS_SET_INTLEVEL(0); } while (0) +#define portASSERT_IF_IN_ISR() vPortAssertIfInISR() +void vPortAssertIfInISR(); + #define portCRITICAL_NESTING_IN_TCB 1 /* @@ -213,6 +216,7 @@ portBASE_TYPE vPortCPUReleaseMutex(portMUX_TYPE *mux); // Cleaner and preferred solution allows nested interrupts disabling and restoring via local registers or stack. // They can be called from interrupts too. +//NOT SMP-COMPATIBLE! Use only if all you want is to disable the interrupts locally! static inline unsigned portENTER_CRITICAL_NESTED() { unsigned state = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); portbenchmarkINTERRUPT_DISABLE(); return state; } #define portEXIT_CRITICAL_NESTED(state) do { portbenchmarkINTERRUPT_RESTORE(state); XTOS_RESTORE_JUST_INTLEVEL(state); } while (0) diff --git a/components/freertos/port.c b/components/freertos/port.c index afa4978639..6117a2804e 100644 --- a/components/freertos/port.c +++ b/components/freertos/port.c @@ -248,14 +248,17 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMOR #endif +void vPortAssertIfInISR() +{ + configASSERT(port_interruptNesting[xPortGetCoreID()]==0) +} + /* * Wrapper for the Xtensa compare-and-set instruction. This subroutine will atomically compare * *mux to compare, and if it's the same, will set *mux to set. It will return the old value * of *addr. * - * Note: the NOPs are needed on the ESP31 processor but superfluous on the ESP32. - * * Warning: From the ISA docs: in some (unspecified) cases, the s32c1i instruction may return the * *bitwise inverse* of the old mem if the mem wasn't written. This doesn't seem to happen on the * ESP32, though. (Would show up directly if it did because the magic wouldn't match.) diff --git a/components/freertos/tasks.c b/components/freertos/tasks.c index 92970b582a..153fa3f03f 100644 --- a/components/freertos/tasks.c +++ b/components/freertos/tasks.c @@ -3763,7 +3763,7 @@ scheduler will re-enable the interrupts instead. */ { if( xSchedulerRunning != pdFALSE ) { - portENTER_CRITICAL_NESTED(pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState); + pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState=portENTER_CRITICAL_NESTED(); } #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG vPortCPUAcquireMutex( mux, function, line ); @@ -3775,16 +3775,23 @@ scheduler will re-enable the interrupts instead. */ { ( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting )++; + /* This is not the interrupt safe version of the enter critical function so assert() if it is being called from an interrupt context. Only API functions that end in "FromISR" can be used in an interrupt. Only assert if the critical nesting count is 1 to protect against recursive calls if the assert function also uses a critical section. */ + /* DISABLED in the esp32 port - because of SMP, vTaskEnterCritical + has to be used in way more places than before, and some are called + both from ISR as well as non-ISR code, thus we re-organized + vTaskEnterCritical to also work in ISRs. */ +#if 0 if( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 1 ) { portASSERT_IF_IN_ISR(); } +#endif } else From 1c6859573b42df4a303935a653c3af11ac41b04b Mon Sep 17 00:00:00 2001 From: Ivan Grokhotkov Date: Mon, 5 Sep 2016 10:22:37 +0800 Subject: [PATCH 05/10] freertos: protect calls to prvAddTaskToReadyList with xTaskQueueMutex --- components/freertos/tasks.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/components/freertos/tasks.c b/components/freertos/tasks.c index 153fa3f03f..f53c67d4b5 100644 --- a/components/freertos/tasks.c +++ b/components/freertos/tasks.c @@ -3614,6 +3614,7 @@ In fact, nothing below this line has/is. { if( pxTCB->uxPriority < pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) { + taskENTER_CRITICAL(&xTaskQueueMutex); /* Adjust the mutex holder state to account for its new priority. Only reset the event list item value if the value is not being used for anything else. */ @@ -3649,6 +3650,8 @@ In fact, nothing below this line has/is. pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority; } + taskEXIT_CRITICAL(&xTaskQueueMutex); + traceTASK_PRIORITY_INHERIT( pxTCB, pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ); } else @@ -3686,6 +3689,7 @@ In fact, nothing below this line has/is. /* Only disinherit if no other mutexes are held. */ if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) { + taskENTER_CRITICAL(&xTaskQueueMutex); /* A task can only have an inhertied priority if it holds the mutex. If the mutex is held by a task then it cannot be given from an interrupt, and if a mutex is given by the @@ -3720,6 +3724,7 @@ In fact, nothing below this line has/is. switch should occur when the last mutex is returned whether a task is waiting on it or not. */ xReturn = pdTRUE; + taskEXIT_CRITICAL(&xTaskQueueMutex); } else { From e9f2645b210544e3ddb3977de7ea03f6f59c2089 Mon Sep 17 00:00:00 2001 From: Ivan Grokhotkov Date: Mon, 12 Sep 2016 18:54:45 +0800 Subject: [PATCH 06/10] components/esp32: add inter-processor call API and implement spi_flash through it With this change, flash operations can run on both cores. NVS and WiFi stack can also run in dual core mode now. --- components/esp32/cpu_start.c | 25 ++-- components/esp32/include/esp_err.h | 5 +- components/esp32/include/esp_ipc.h | 84 +++++++++++++ components/esp32/ipc.c | 117 ++++++++++++++++++ components/spi_flash/esp_spi_flash.c | 175 ++++++++++++++++----------- 5 files changed, 321 insertions(+), 85 deletions(-) create mode 100644 components/esp32/include/esp_ipc.h create mode 100644 components/esp32/ipc.c diff --git a/components/esp32/cpu_start.c b/components/esp32/cpu_start.c index 85d3009ef3..d58ba43f88 100644 --- a/components/esp32/cpu_start.c +++ b/components/esp32/cpu_start.c @@ -36,6 +36,8 @@ #include "esp_spi_flash.h" #include "nvs_flash.h" #include "esp_event.h" +#include "esp_spi_flash.h" +#include "esp_ipc.h" static void IRAM_ATTR user_start_cpu0(void); static void IRAM_ATTR call_user_start_cpu1(); @@ -180,37 +182,39 @@ void IRAM_ATTR call_user_start_cpu1() { user_start_cpu1(); } -extern volatile int port_xSchedulerRunning; -extern int xPortStartScheduler(); +extern volatile int port_xSchedulerRunning[2]; -void user_start_cpu1(void) { - //Wait for the freertos initialization is finished on CPU0 - while (port_xSchedulerRunning == 0) ; - ets_printf("Core0 started initializing FreeRTOS. Jumping to scheduler.\n"); - //Okay, start the scheduler! +void IRAM_ATTR user_start_cpu1(void) { + // Wait for FreeRTOS initialization to finish on PRO CPU + while (port_xSchedulerRunning[0] == 0) { + ; + } + ets_printf("Starting scheduler on APP CPU.\n"); + // Start the scheduler on APP CPU xPortStartScheduler(); } extern void (*__init_array_start)(void); extern void (*__init_array_end)(void); -extern esp_err_t app_main(); static void do_global_ctors(void) { void (**p)(void); for(p = &__init_array_start; p != &__init_array_end; ++p) (*p)(); } +extern esp_err_t app_main(); void user_start_cpu0(void) { ets_setup_syscalls(); do_global_ctors(); + esp_ipc_init(); + spi_flash_init(); #if CONFIG_WIFI_ENABLED - ets_printf("nvs_flash_init\n"); esp_err_t ret = nvs_flash_init(5, 3); if (ret != ESP_OK) { - ets_printf("nvs_flash_init fail, ret=%d\n", ret); + printf("nvs_flash_init failed, ret=%d\n", ret); } system_init(); @@ -227,6 +231,7 @@ void user_start_cpu0(void) { app_main(); #endif + ets_printf("Starting scheduler on PRO CPU.\n"); vTaskStartScheduler(); } diff --git a/components/esp32/include/esp_err.h b/components/esp32/include/esp_err.h index 39b60a905a..6ea176991c 100644 --- a/components/esp32/include/esp_err.h +++ b/components/esp32/include/esp_err.h @@ -27,7 +27,10 @@ typedef int32_t esp_err_t; #define ESP_OK 0 #define ESP_FAIL -1 -#define ESP_ERR_NO_MEM 0x101 +#define ESP_ERR_NO_MEM 0x101 +#define ESP_ERR_INVALID_ARG 0x102 +#define ESP_ERR_INVALID_STATE 0x103 + #ifdef __cplusplus } diff --git a/components/esp32/include/esp_ipc.h b/components/esp32/include/esp_ipc.h new file mode 100644 index 0000000000..b28df31285 --- /dev/null +++ b/components/esp32/include/esp_ipc.h @@ -0,0 +1,84 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ESP_IPC_H__ +#define __ESP_IPC_H__ + +#include + +typedef void (*esp_ipc_func_t)(void* arg); + +/** + * @brief Inter-processor call APIs + * + * FreeRTOS provides several APIs which can be used to communicate between + * different tasks, including tasks running on different CPUs. + * This module provides additional APIs to run some code on the other CPU. + */ + + +/** + * @brief Initialize inter-processor call module. + * + * This function start two tasks, one on each CPU. These tasks are started + * with high priority. These tasks are normally inactive, waiting until one of + * the esp_ipc_call_* functions to be used. One of these tasks will be + * woken up to execute the callback provided to esp_ipc_call_nonblocking or + * esp_ipc_call_blocking. + */ +void esp_ipc_init(); + + +/** + * @brief Execute function on the given CPU + * + * This will wake a high-priority task on CPU indicated by cpu_id argument, + * and run func(arg) in the context of that task. + * This function returns as soon as the high-priority task is woken up. + * If another IPC call is already being executed, this function will also wait + * for it to complete. + * + * In single-core mode, returns ESP_ERR_INVALID_ARG for cpu_id 1. + * + * @param cpu_id CPU where function should be executed (0 or 1) + * @param func pointer to a function which should be executed + * @param arg arbitrary argument to be passed into function + * + * @return ESP_ERR_INVALID_ARG if cpu_id is invalid + * ESP_OK otherwise + */ +esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg); + + +/** + * @brief Execute function on the given CPU and wait for it to finish + * + * This will wake a high-priority task on CPU indicated by cpu_id argument, + * and run func(arg) in the context of that task. + * This function waits for func to return. + * + * In single-core mode, returns ESP_ERR_INVALID_ARG for cpu_id 1. + * + * @param cpu_id CPU where function should be executed (0 or 1) + * @param func pointer to a function which should be executed + * @param arg arbitrary argument to be passed into function + * + * @return ESP_ERR_INVALID_ARG if cpu_id is invalid + * ESP_OK otherwise + */ +esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg); + + + +#endif /* __ESP_IPC_H__ */ diff --git a/components/esp32/ipc.c b/components/esp32/ipc.c new file mode 100644 index 0000000000..b7524cae68 --- /dev/null +++ b/components/esp32/ipc.c @@ -0,0 +1,117 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include "esp_err.h" +#include "esp_ipc.h" +#include "esp_attr.h" + +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "freertos/semphr.h" + + +static TaskHandle_t s_ipc_tasks[portNUM_PROCESSORS]; // Two high priority tasks, one for each CPU +static SemaphoreHandle_t s_ipc_mutex; // This mutex is used as a global lock for esp_ipc_* APIs +static SemaphoreHandle_t s_ipc_sem[portNUM_PROCESSORS]; // Two semaphores used to wake each of s_ipc_tasks +static SemaphoreHandle_t s_ipc_ack; // Semaphore used to acknowledge that task was woken up, + // or function has finished running +static volatile esp_ipc_func_t s_func; // Function which should be called by high priority task +static void * volatile s_func_arg; // Argument to pass into s_func +typedef enum { + IPC_WAIT_FOR_START, + IPC_WAIT_FOR_END +} esp_ipc_wait_t; + +static volatile esp_ipc_wait_t s_ipc_wait; // This variable tells high priority task when it should give + // s_ipc_ack semaphore: before s_func is called, or + // after it returns + +static void IRAM_ATTR ipc_task(void* arg) +{ + const uint32_t cpuid = (uint32_t) arg; + assert(cpuid == xPortGetCoreID()); + while (true) { + // Wait for IPC to be initiated. + // This will be indicated by giving the semaphore corresponding to + // this CPU. + if (xSemaphoreTake(s_ipc_sem[cpuid], portMAX_DELAY) != pdTRUE) { + // TODO: when can this happen? + abort(); + } + + esp_ipc_func_t func = s_func; + void* arg = s_func_arg; + + if (s_ipc_wait == IPC_WAIT_FOR_START) { + xSemaphoreGive(s_ipc_ack); + } + (*func)(arg); + if (s_ipc_wait == IPC_WAIT_FOR_END) { + xSemaphoreGive(s_ipc_ack); + } + } + // TODO: currently this is unreachable code. Introduce esp_ipc_uninit + // function which will signal to both tasks that they can shut down. + // Not critical at this point, we don't have a use case for stopping + // IPC yet. + // Also need to delete the semaphore here. + vTaskDelete(NULL); +} + +void esp_ipc_init() +{ + s_ipc_mutex = xSemaphoreCreateMutex(); + s_ipc_ack = xSemaphoreCreateBinary(); + const char* task_names[2] = {"ipc0", "ipc1"}; + for (int i = 0; i < portNUM_PROCESSORS; ++i) { + s_ipc_sem[i] = xSemaphoreCreateBinary(); + xTaskCreatePinnedToCore(ipc_task, task_names[i], XT_STACK_MIN_SIZE, (void*) i, + configMAX_PRIORITIES - 1, &s_ipc_tasks[i], i); + } +} + +static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, void* arg, esp_ipc_wait_t wait_for) +{ + if (cpu_id >= portNUM_PROCESSORS) { + return ESP_ERR_INVALID_ARG; + } + if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) { + return ESP_ERR_INVALID_STATE; + } + + xSemaphoreTake(s_ipc_mutex, portMAX_DELAY); + + s_func = func; + s_func_arg = arg; + s_ipc_wait = IPC_WAIT_FOR_START; + xSemaphoreGive(s_ipc_sem[cpu_id]); + xSemaphoreTake(s_ipc_ack, portMAX_DELAY); + xSemaphoreGive(s_ipc_mutex); + return ESP_OK; +} + +esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg) +{ + return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_START); +} + +esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg) +{ + return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_END); +} + diff --git a/components/spi_flash/esp_spi_flash.c b/components/spi_flash/esp_spi_flash.c index e3d09acaf0..fb34a44872 100644 --- a/components/spi_flash/esp_spi_flash.c +++ b/components/spi_flash/esp_spi_flash.c @@ -12,16 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include +#include #include #include #include - -#include #include #include -#include +#include #include #include "sdkconfig.h" +#include "esp_ipc.h" +#include "esp_attr.h" +#include "esp_spi_flash.h" + /* Driver for SPI flash read/write/erase operations @@ -42,7 +46,7 @@ this flag to be set. Once the flag is set, it disables cache on CPU A and starts flash operation. - While flash operation is running, interrupts can still run on CPU B. + While flash operation is running, interrupts can still run on CPU B. We assume that all interrupt code is placed into RAM. Once flash operation is complete, function on CPU A sets another flag, @@ -58,93 +62,94 @@ */ static esp_err_t spi_flash_translate_rc(SpiFlashOpResult rc); -extern void Cache_Flush(int); +static void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t* saved_state); +static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state); + +static uint32_t s_flash_op_cache_state[2]; + +#ifndef CONFIG_FREERTOS_UNICORE +static SemaphoreHandle_t s_flash_op_mutex; +static bool s_flash_op_can_start = false; +static bool s_flash_op_complete = false; +#endif //CONFIG_FREERTOS_UNICORE + #ifndef CONFIG_FREERTOS_UNICORE -static TaskHandle_t s_flash_op_tasks[2]; -static SemaphoreHandle_t s_flash_op_mutex; -static SemaphoreHandle_t s_flash_op_sem[2]; -static bool s_flash_op_can_start = false; -static bool s_flash_op_complete = false; - - -// Task whose duty is to block other tasks from running on a given CPU -static void IRAM_ATTR spi_flash_op_block_task(void* arg) +static void IRAM_ATTR spi_flash_op_block_func(void* arg) { + // Disable scheduler on this CPU + vTaskSuspendAll(); uint32_t cpuid = (uint32_t) arg; - while (true) { - // Wait for flash operation to be initiated. - // This will be indicated by giving the semaphore corresponding to - // this CPU. - if (xSemaphoreTake(s_flash_op_sem[cpuid], portMAX_DELAY) != pdTRUE) { - // TODO: when can this happen? - abort(); - } - // Disable cache on this CPU - Cache_Read_Disable(cpuid); - // Signal to the flash API function that flash operation can start - s_flash_op_can_start = true; - while (!s_flash_op_complete) { - // until we have a way to use interrupts for inter-CPU communication, - // busy loop here and wait for the other CPU to finish flash operation - } - // Flash operation is complete, re-enable cache - Cache_Read_Enable(cpuid); + // Disable cache so that flash operation can start + spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]); + s_flash_op_can_start = true; + while (!s_flash_op_complete) { + // until we have a way to use interrupts for inter-CPU communication, + // busy loop here and wait for the other CPU to finish flash operation } - // TODO: currently this is unreachable code. Introduce spi_flash_uninit - // function which will signal to both tasks that they can shut down. - // Not critical at this point, we don't have a use case for stopping - // SPI flash driver yet. - // Also need to delete the semaphore here. - vTaskDelete(NULL); + // Flash operation is complete, re-enable cache + spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]); + // Re-enable scheduler + xTaskResumeAll(); } void spi_flash_init() { - s_flash_op_can_start = false; - s_flash_op_complete = false; - s_flash_op_sem[0] = xSemaphoreCreateBinary(); - s_flash_op_sem[1] = xSemaphoreCreateBinary(); s_flash_op_mutex = xSemaphoreCreateMutex(); - // Start two tasks, one on each CPU, with max priorities - // TODO: optimize stack usage. Stack size 512 is too small. - xTaskCreatePinnedToCore(spi_flash_op_block_task, "flash_op_pro", 1024, (void*) 0, - configMAX_PRIORITIES - 1, &s_flash_op_tasks[0], 0); - xTaskCreatePinnedToCore(spi_flash_op_block_task, "flash_op_app", 1024, (void*) 1, - configMAX_PRIORITIES - 1, &s_flash_op_tasks[1], 1); } static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu() { // Take the API lock xSemaphoreTake(s_flash_op_mutex, portMAX_DELAY); + const uint32_t cpuid = xPortGetCoreID(); - uint32_t other_cpuid = !cpuid; - s_flash_op_can_start = false; - s_flash_op_complete = false; - // Signal to the spi_flash_op_block_task on the other CPU that we need it to - // disable cache there and block other tasks from executing. - xSemaphoreGive(s_flash_op_sem[other_cpuid]); - while (!s_flash_op_can_start) { - // Busy loop and wait for spi_flash_op_block_task to take the semaphore on the - // other CPU. + const uint32_t other_cpuid = !cpuid; + + if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) { + // Scheduler hasn't been started yet, so we don't need to worry + // about cached code running on the APP CPU. + spi_flash_disable_cache(other_cpuid, &s_flash_op_cache_state[other_cpuid]); + } else { + // Signal to the spi_flash_op_block_task on the other CPU that we need it to + // disable cache there and block other tasks from executing. + s_flash_op_can_start = false; + s_flash_op_complete = false; + esp_ipc_call(other_cpuid, &spi_flash_op_block_func, (void*) other_cpuid); + while (!s_flash_op_can_start) { + // Busy loop and wait for spi_flash_op_block_func to disable cache + // on the other CPU + } + // Disable scheduler on CPU cpuid + vTaskSuspendAll(); + // This is guaranteed to run on CPU because the other CPU is now + // occupied by highest priority task + assert(xPortGetCoreID() == cpuid); } - vTaskSuspendAll(); // Disable cache on this CPU as well - Cache_Read_Disable(cpuid); + spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]); } static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu() { - uint32_t cpuid = xPortGetCoreID(); - // Signal to spi_flash_op_block_task that flash operation is complete - s_flash_op_complete = true; + const uint32_t cpuid = xPortGetCoreID(); + const uint32_t other_cpuid = !cpuid; + // Re-enable cache on this CPU - Cache_Read_Enable(cpuid); + spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]); + + if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) { + // Scheduler is not running yet — just re-enable cache on APP CPU + spi_flash_restore_cache(other_cpuid, s_flash_op_cache_state[other_cpuid]); + } else { + // Signal to spi_flash_op_block_task that flash operation is complete + s_flash_op_complete = true; + // Resume tasks on the current CPU + xTaskResumeAll(); + } // Release API lock xSemaphoreGive(s_flash_op_mutex); - xTaskResumeAll(); } #else // CONFIG_FREERTOS_UNICORE @@ -157,14 +162,12 @@ void spi_flash_init() static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu() { vTaskSuspendAll(); - Cache_Read_Disable(0); - Cache_Read_Disable(1); + spi_flash_disable_cache(0, &s_flash_op_cache_state[0]); } static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu() { - Cache_Read_Enable(0); - Cache_Read_Enable(1); + spi_flash_restore_cache(0, s_flash_op_cache_state[0]); xTaskResumeAll(); } @@ -179,8 +182,6 @@ esp_err_t IRAM_ATTR spi_flash_erase_sector(uint16_t sec) if (rc == SPI_FLASH_RESULT_OK) { rc = SPIEraseSector(sec); } - Cache_Flush(0); - Cache_Flush(1); spi_flash_enable_interrupts_caches_and_other_cpu(); return spi_flash_translate_rc(rc); } @@ -193,8 +194,6 @@ esp_err_t IRAM_ATTR spi_flash_write(uint32_t dest_addr, const uint32_t *src, uin if (rc == SPI_FLASH_RESULT_OK) { rc = SPIWrite(dest_addr, src, (int32_t) size); } - Cache_Flush(0); - Cache_Flush(1); spi_flash_enable_interrupts_caches_and_other_cpu(); return spi_flash_translate_rc(rc); } @@ -204,8 +203,6 @@ esp_err_t IRAM_ATTR spi_flash_read(uint32_t src_addr, uint32_t *dest, uint32_t s spi_flash_disable_interrupts_caches_and_other_cpu(); SpiFlashOpResult rc; rc = SPIRead(src_addr, dest, (int32_t) size); - Cache_Flush(0); - Cache_Flush(1); spi_flash_enable_interrupts_caches_and_other_cpu(); return spi_flash_translate_rc(rc); } @@ -222,3 +219,33 @@ static esp_err_t spi_flash_translate_rc(SpiFlashOpResult rc) return ESP_ERR_FLASH_OP_FAIL; } } + +static void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t* saved_state) +{ + uint32_t ret = 0; + if (cpuid == 0) { + ret |= GET_PERI_REG_BITS2(PRO_CACHE_CTRL1_REG, 0x1f, 0); + while (GET_PERI_REG_BITS2(PRO_DCACHE_DBUG_REG0, DPORT_PRO_CACHE_STATE, DPORT_PRO_CACHE_STATE_S) != 1) { + ; + } + SET_PERI_REG_BITS(PRO_CACHE_CTRL_REG, 1, 0, DPORT_PRO_CACHE_ENABLE_S); + } else { + ret |= GET_PERI_REG_BITS2(APP_CACHE_CTRL1_REG, 0x1f, 0); + while (GET_PERI_REG_BITS2(APP_DCACHE_DBUG_REG0, DPORT_APP_CACHE_STATE, DPORT_APP_CACHE_STATE_S) != 1) { + ; + } + SET_PERI_REG_BITS(APP_CACHE_CTRL_REG, 1, 0, DPORT_APP_CACHE_ENABLE_S); + } + *saved_state = ret; +} + +static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state) +{ + if (cpuid == 0) { + SET_PERI_REG_BITS(PRO_CACHE_CTRL_REG, 1, 1, DPORT_PRO_CACHE_ENABLE_S); + SET_PERI_REG_BITS(PRO_CACHE_CTRL1_REG, 0x1f, saved_state, 0); + } else { + SET_PERI_REG_BITS(APP_CACHE_CTRL_REG, 1, 1, DPORT_APP_CACHE_ENABLE_S); + SET_PERI_REG_BITS(APP_CACHE_CTRL1_REG, 0x1f, saved_state, 0); + } +} From 1b6022bd07a513089fb83b0407a942dff909d2ef Mon Sep 17 00:00:00 2001 From: Ivan Grokhotkov Date: Mon, 12 Sep 2016 19:53:38 +0800 Subject: [PATCH 07/10] components/esp32: remove dependency of WIFI_ENABLED on FREERTOS_UNICORE --- components/esp32/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/components/esp32/Kconfig b/components/esp32/Kconfig index af34c4a336..154ee4150e 100644 --- a/components/esp32/Kconfig +++ b/components/esp32/Kconfig @@ -7,7 +7,6 @@ config WIFI_ENABLED This compiles in the low-level WiFi stack. Temporarily, this option requires that FreeRTOS runs in single core mode. - depends on FREERTOS_UNICORE config WIFI_AUTO_STARTUP bool "Start WiFi with system startup" From 23d5c7579bebb5c69b4beee0b41cd8618ad7c72b Mon Sep 17 00:00:00 2001 From: Ivan Grokhotkov Date: Mon, 12 Sep 2016 19:54:35 +0800 Subject: [PATCH 08/10] components/esp32,spi_flash: update some comments --- components/esp32/cpu_start.c | 1 - components/esp32/include/esp_ipc.h | 4 ++++ components/spi_flash/esp_spi_flash.c | 18 ++++++++---------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/components/esp32/cpu_start.c b/components/esp32/cpu_start.c index d58ba43f88..4e922891c3 100644 --- a/components/esp32/cpu_start.c +++ b/components/esp32/cpu_start.c @@ -190,7 +190,6 @@ void IRAM_ATTR user_start_cpu1(void) { ; } ets_printf("Starting scheduler on APP CPU.\n"); - // Start the scheduler on APP CPU xPortStartScheduler(); } diff --git a/components/esp32/include/esp_ipc.h b/components/esp32/include/esp_ipc.h index b28df31285..a77b4932fe 100644 --- a/components/esp32/include/esp_ipc.h +++ b/components/esp32/include/esp_ipc.h @@ -25,6 +25,8 @@ typedef void (*esp_ipc_func_t)(void* arg); * FreeRTOS provides several APIs which can be used to communicate between * different tasks, including tasks running on different CPUs. * This module provides additional APIs to run some code on the other CPU. + * + * These APIs can only be used when FreeRTOS scheduler is running. */ @@ -56,6 +58,7 @@ void esp_ipc_init(); * @param arg arbitrary argument to be passed into function * * @return ESP_ERR_INVALID_ARG if cpu_id is invalid + * ESP_ERR_INVALID_STATE if FreeRTOS scheduler is not running * ESP_OK otherwise */ esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg); @@ -75,6 +78,7 @@ esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg); * @param arg arbitrary argument to be passed into function * * @return ESP_ERR_INVALID_ARG if cpu_id is invalid + * ESP_ERR_INVALID_STATE if FreeRTOS scheduler is not running * ESP_OK otherwise */ esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg); diff --git a/components/spi_flash/esp_spi_flash.c b/components/spi_flash/esp_spi_flash.c index fb34a44872..a4e6c4c3ea 100644 --- a/components/spi_flash/esp_spi_flash.c +++ b/components/spi_flash/esp_spi_flash.c @@ -36,15 +36,14 @@ the flash operation. In the dual-core setup this is slightly more complicated. We need to make sure that the other CPU doesn't run any code from flash. - SPI flash driver starts two tasks (spi_flash_op_block_task), one pinned to - each CPU. Each task is associated with its own semaphore. - When SPI flash API is called on CPU A (can be PRO or APP), we wake up the task - on CPU B by "giving" the semaphore associated with it. Tasks resumes, disables - cache on CPU B, and acknowledges that it has taken the semaphore by setting - a flag (s_flash_op_can_start). Flash API function running on CPU A waits for - this flag to be set. Once the flag is set, it disables cache on CPU A and - starts flash operation. + When SPI flash API is called on CPU A (can be PRO or APP), we start + spi_flash_op_block_func function on CPU B using esp_ipc_call API. This API + wakes up high priority task on CPU B and tells it to execute given function, + in this case spi_flash_op_block_func. This function disables cache on CPU B and + signals that cache is disabled by setting s_flash_op_can_start flag. + Then the task on CPU A disables cache as well, and proceeds to execute flash + operation. While flash operation is running, interrupts can still run on CPU B. We assume that all interrupt code is placed into RAM. @@ -52,8 +51,7 @@ Once flash operation is complete, function on CPU A sets another flag, s_flash_op_complete, to let the task on CPU B know that it can re-enable cache and release the CPU. Then the function on CPU A re-enables the cache on - CPU A as well and returns control to the calling code. Task on CPU B returns - to suspended state by "taking" the semaphore. + CPU A as well and returns control to the calling code. Additionally, all API functions are protected with a mutex (s_flash_op_mutex). From 174a0e3f8b6033b565965bca8f8b0b1521d2f10c Mon Sep 17 00:00:00 2001 From: Jeroen Domburg Date: Tue, 13 Sep 2016 10:22:38 +0800 Subject: [PATCH 09/10] Fix bug where nesting vTaskEnterCritical calls would not re-enable interrupts after vTaskExitCritical sections. --- components/freertos/tasks.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/components/freertos/tasks.c b/components/freertos/tasks.c index f53c67d4b5..a4595154ed 100644 --- a/components/freertos/tasks.c +++ b/components/freertos/tasks.c @@ -3766,9 +3766,13 @@ scheduler will re-enable the interrupts instead. */ void vTaskEnterCritical( portMUX_TYPE *mux ) #endif { + BaseType_t oldInterruptLevel=0; if( xSchedulerRunning != pdFALSE ) { - pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState=portENTER_CRITICAL_NESTED(); + //Interrupts may already be disabled (because we're doing this recursively) but we can't get the interrupt level after + //vPortCPUAquireMutex, because it also may mess with interrupts. Get it here first, then later figure out if we're nesting + //and save for real there. + oldInterruptLevel=portENTER_CRITICAL_NESTED(); } #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG vPortCPUAcquireMutex( mux, function, line ); @@ -3780,13 +3784,20 @@ scheduler will re-enable the interrupts instead. */ { ( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting )++; + if( xSchedulerRunning != pdFALSE && pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 1 ) + { + //This is the first time we get called. Save original interrupt level. + pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState=oldInterruptLevel; + } - /* This is not the interrupt safe version of the enter critical + /* Original FreeRTOS comment, saved for reference: + This is not the interrupt safe version of the enter critical function so assert() if it is being called from an interrupt context. Only API functions that end in "FromISR" can be used in an interrupt. Only assert if the critical nesting count is 1 to protect against recursive calls if the assert function also uses a critical section. */ + /* DISABLED in the esp32 port - because of SMP, vTaskEnterCritical has to be used in way more places than before, and some are called both from ISR as well as non-ISR code, thus we re-organized From ce2153c447d263df021f5f55bb351e62bb4231a7 Mon Sep 17 00:00:00 2001 From: Ivan Grokhotkov Date: Tue, 13 Sep 2016 12:47:21 +0800 Subject: [PATCH 10/10] components/spi_flash: improve comments and readability --- components/spi_flash/esp_spi_flash.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/components/spi_flash/esp_spi_flash.c b/components/spi_flash/esp_spi_flash.c index a4e6c4c3ea..65d4c709dd 100644 --- a/components/spi_flash/esp_spi_flash.c +++ b/components/spi_flash/esp_spi_flash.c @@ -103,11 +103,14 @@ static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu() xSemaphoreTake(s_flash_op_mutex, portMAX_DELAY); const uint32_t cpuid = xPortGetCoreID(); - const uint32_t other_cpuid = !cpuid; + const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0; if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) { - // Scheduler hasn't been started yet, so we don't need to worry - // about cached code running on the APP CPU. + // Scheduler hasn't been started yet, it means that spi_flash API is being + // called from the 2nd stage bootloader or from user_start_cpu0, i.e. from + // PRO CPU. APP CPU is either in reset or spinning inside user_start_cpu1, + // which is in IRAM. So it is safe to disable cache for the other_cpuid here. + assert(other_cpuid == 1); spi_flash_disable_cache(other_cpuid, &s_flash_op_cache_state[other_cpuid]); } else { // Signal to the spi_flash_op_block_task on the other CPU that we need it to @@ -132,13 +135,16 @@ static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu() static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu() { const uint32_t cpuid = xPortGetCoreID(); - const uint32_t other_cpuid = !cpuid; + const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0; // Re-enable cache on this CPU spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]); if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) { - // Scheduler is not running yet — just re-enable cache on APP CPU + // Scheduler is not running yet — this means we are running on PRO CPU. + // other_cpuid is APP CPU, and it is either in reset or is spinning in + // user_start_cpu1, which is in IRAM. So we can simply reenable cache. + assert(other_cpuid == 1); spi_flash_restore_cache(other_cpuid, s_flash_op_cache_state[other_cpuid]); } else { // Signal to spi_flash_op_block_task that flash operation is complete @@ -218,17 +224,21 @@ static esp_err_t spi_flash_translate_rc(SpiFlashOpResult rc) } } +static const uint32_t cache_mask = DPORT_APP_CACHE_MASK_OPSDRAM | DPORT_APP_CACHE_MASK_DROM0 | + DPORT_APP_CACHE_MASK_DRAM1 | DPORT_APP_CACHE_MASK_IROM0 | + DPORT_APP_CACHE_MASK_IRAM1 | DPORT_APP_CACHE_MASK_IRAM0; + static void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t* saved_state) { uint32_t ret = 0; if (cpuid == 0) { - ret |= GET_PERI_REG_BITS2(PRO_CACHE_CTRL1_REG, 0x1f, 0); + ret |= GET_PERI_REG_BITS2(PRO_CACHE_CTRL1_REG, cache_mask, 0); while (GET_PERI_REG_BITS2(PRO_DCACHE_DBUG_REG0, DPORT_PRO_CACHE_STATE, DPORT_PRO_CACHE_STATE_S) != 1) { ; } SET_PERI_REG_BITS(PRO_CACHE_CTRL_REG, 1, 0, DPORT_PRO_CACHE_ENABLE_S); } else { - ret |= GET_PERI_REG_BITS2(APP_CACHE_CTRL1_REG, 0x1f, 0); + ret |= GET_PERI_REG_BITS2(APP_CACHE_CTRL1_REG, cache_mask, 0); while (GET_PERI_REG_BITS2(APP_DCACHE_DBUG_REG0, DPORT_APP_CACHE_STATE, DPORT_APP_CACHE_STATE_S) != 1) { ; } @@ -241,9 +251,9 @@ static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_sta { if (cpuid == 0) { SET_PERI_REG_BITS(PRO_CACHE_CTRL_REG, 1, 1, DPORT_PRO_CACHE_ENABLE_S); - SET_PERI_REG_BITS(PRO_CACHE_CTRL1_REG, 0x1f, saved_state, 0); + SET_PERI_REG_BITS(PRO_CACHE_CTRL1_REG, cache_mask, saved_state, 0); } else { SET_PERI_REG_BITS(APP_CACHE_CTRL_REG, 1, 1, DPORT_APP_CACHE_ENABLE_S); - SET_PERI_REG_BITS(APP_CACHE_CTRL1_REG, 0x1f, saved_state, 0); + SET_PERI_REG_BITS(APP_CACHE_CTRL1_REG, cache_mask, saved_state, 0); } }