diff --git a/components/esp32s3/CMakeLists.txt b/components/esp32s3/CMakeLists.txt index be3b927a8f..34c91b07a5 100644 --- a/components/esp32s3/CMakeLists.txt +++ b/components/esp32s3/CMakeLists.txt @@ -1,4 +1,74 @@ idf_build_get_property(target IDF_TARGET) +idf_build_get_property(sdkconfig_header SDKCONFIG_HEADER) + if(NOT "${target}" STREQUAL "esp32s3") return() endif() + +if(BOOTLOADER_BUILD) + # For bootloader, all we need from esp32s3 is headers + idf_component_register(INCLUDE_DIRS include) + target_linker_script(${COMPONENT_LIB} INTERFACE "ld/esp32s3.peripherals.ld") +else() + # Regular app build + + set(srcs "cache_err_int.c" + "clk.c" + "crosscore_int.c" + "dport_access.c" + "esp_crypto_lock.c" + "hw_random.c" + "intr_alloc.c" + "memprot.c" + "pm_esp32s3.c" + "pm_trace.c" + "sleep_modes.c" + "system_api_esp32s3.c") + set(include_dirs "include") + + set(requires driver efuse soc) #unfortunately rom/uart uses SOC registers directly + + # driver is a public requirement because esp_sleep.h uses gpio_num_t & touch_pad_t + # app_update is added here because cpu_start.c uses esp_ota_get_app_description() function. + # esp_timer is added here because cpu_start.c uses esp_timer + set(priv_requires app_trace app_update bootloader_support log mbedtls nvs_flash pthread + spi_flash vfs espcoredump esp_common perfmon esp_timer esp_ipc) + set(fragments linker.lf ld/esp32s3_fragments.lf) + + idf_component_register(SRCS "${srcs}" + INCLUDE_DIRS "${include_dirs}" + LDFRAGMENTS "${fragments}" + REQUIRES "${requires}" + PRIV_REQUIRES "${priv_requires}" + REQUIRED_IDF_TARGETS esp32s3) + + target_linker_script(${COMPONENT_LIB} INTERFACE "${CMAKE_CURRENT_BINARY_DIR}/esp32s3_out.ld") + + # Process the template file through the linker script generation mechanism, and use the output for linking the + # final binary + target_linker_script(${COMPONENT_LIB} INTERFACE "${CMAKE_CURRENT_LIST_DIR}/ld/esp32s3.project.ld.in" + PROCESS "${CMAKE_CURRENT_BINARY_DIR}/ld/esp32s3.project.ld") + + target_linker_script(${COMPONENT_LIB} INTERFACE "ld/esp32s3.peripherals.ld") + target_link_libraries(${COMPONENT_LIB} PUBLIC gcc) + target_link_libraries(${COMPONENT_LIB} INTERFACE "-u call_user_start_cpu0") + + idf_build_get_property(config_dir CONFIG_DIR) + # Preprocess esp32s3.ld linker script to include configuration, becomes esp32s3_out.ld + set(LD_DIR ${CMAKE_CURRENT_SOURCE_DIR}/ld) + add_custom_command( + OUTPUT esp32s3_out.ld + COMMAND "${CMAKE_C_COMPILER}" -C -P -x c -E -o esp32s3_out.ld -I ${config_dir} ${LD_DIR}/esp32s3.ld + MAIN_DEPENDENCY ${LD_DIR}/esp32s3.ld ${sdkconfig_header} + COMMENT "Generating linker script..." + VERBATIM) + + add_custom_target(esp32s3_linker_script DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/esp32s3_out.ld) + add_dependencies(${COMPONENT_LIB} esp32s3_linker_script) + + # disable stack protection in files which are involved in initialization of that feature + set_source_files_properties( + cpu_start.c + PROPERTIES COMPILE_FLAGS + -fno-stack-protector) +endif() diff --git a/components/esp32s3/cache_err_int.c b/components/esp32s3/cache_err_int.c new file mode 100644 index 0000000000..96c952e731 --- /dev/null +++ b/components/esp32s3/cache_err_int.c @@ -0,0 +1,75 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @file cache_err_int.c + * @brief The cache has an interrupt that can be raised as soon as an access to a cached + * region (Flash, PSRAM) is done without the cache being enabled. + * We use that here to panic the CPU, which from a debugging perspective, + * is better than grabbing bad data from the bus. + */ + +#include +#include "sdkconfig.h" +#include "esp_err.h" +#include "esp_attr.h" +#include "esp_intr_alloc.h" +#include "soc/soc.h" +#include "soc/extmem_reg.h" +#include "soc/periph_defs.h" +#include "hal/cpu_hal.h" +#include "esp32s3/dport_access.h" +#include "esp32s3/rom/ets_sys.h" + +void esp_cache_err_int_init(void) +{ + uint32_t core_id = cpu_hal_get_core_id(); + ESP_INTR_DISABLE(ETS_CACHEERR_INUM); + + // We do not register a handler for the interrupt because it is interrupt + // level 4 which is not serviceable from C. Instead, xtensa_vectors.S has + // a call to the panic handler for this interrupt. + intr_matrix_set(core_id, ETS_CACHE_IA_INTR_SOURCE, ETS_CACHEERR_INUM); + + // Enable invalid cache access interrupt when the cache is disabled. + // When the interrupt happens, we can not determine the CPU where the + // invalid cache access has occurred. We enable the interrupt to catch + // invalid access on both CPUs, but the interrupt is connected to the + // CPU which happens to call this function. + // For this reason, panic handler backtrace will not be correct if the + // interrupt is connected to PRO CPU and invalid access happens on the APP CPU. + + SET_PERI_REG_MASK(EXTMEM_CACHE_ILG_INT_CLR_REG, + EXTMEM_MMU_ENTRY_FAULT_INT_CLR | + EXTMEM_DCACHE_WRITE_FLASH_INT_CLR | + EXTMEM_DCACHE_PRELOAD_OP_FAULT_INT_CLR | + EXTMEM_DCACHE_SYNC_OP_FAULT_INT_CLR | + EXTMEM_ICACHE_PRELOAD_OP_FAULT_INT_CLR | + EXTMEM_ICACHE_SYNC_OP_FAULT_INT_CLR); + SET_PERI_REG_MASK(EXTMEM_CACHE_ILG_INT_ENA_REG, + EXTMEM_MMU_ENTRY_FAULT_INT_ENA | + EXTMEM_DCACHE_WRITE_FLASH_INT_ENA | + EXTMEM_DCACHE_PRELOAD_OP_FAULT_INT_ENA | + EXTMEM_DCACHE_SYNC_OP_FAULT_INT_ENA | + EXTMEM_ICACHE_PRELOAD_OP_FAULT_INT_ENA | + EXTMEM_ICACHE_SYNC_OP_FAULT_INT_ENA); + + ESP_INTR_ENABLE(ETS_CACHEERR_INUM); +} + +int IRAM_ATTR esp_cache_err_get_cpuid(void) +{ + // FIXME + return -1; +} diff --git a/components/esp32s3/clk.c b/components/esp32s3/clk.c new file mode 100644 index 0000000000..0f37b89848 --- /dev/null +++ b/components/esp32s3/clk.c @@ -0,0 +1,52 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "esp_attr.h" +#include "esp32s3/clk.h" +#include "soc/rtc.h" + +#define MHZ (1000000) + +// g_ticks_us defined in ROMs for PRO and APP CPU +extern uint32_t g_ticks_per_us_pro; +#ifndef CONFIG_FREERTOS_UNICORE +extern uint32_t g_ticks_per_us_app; +#endif + +int IRAM_ATTR esp_clk_cpu_freq(void) +{ + return g_ticks_per_us_pro * MHZ; +} + +int IRAM_ATTR esp_clk_apb_freq(void) +{ + return MIN(g_ticks_per_us_pro, 80) * MHZ; +} + +int IRAM_ATTR esp_clk_xtal_freq(void) +{ + return rtc_clk_xtal_freq_get() * MHZ; +} + +void IRAM_ATTR ets_update_cpu_frequency(uint32_t ticks_per_us) +{ + /* Update scale factors used by ets_delay_us */ + g_ticks_per_us_pro = ticks_per_us; +#ifndef CONFIG_FREERTOS_UNICORE + g_ticks_per_us_app = ticks_per_us; +#endif +} \ No newline at end of file diff --git a/components/esp32s3/crosscore_int.c b/components/esp32s3/crosscore_int.c new file mode 100644 index 0000000000..ea8c45414a --- /dev/null +++ b/components/esp32s3/crosscore_int.c @@ -0,0 +1,113 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include +#include "esp_attr.h" +#include "esp_err.h" +#include "esp_intr_alloc.h" +#include "esp_debug_helpers.h" +#include "soc/periph_defs.h" +#include "soc/system_reg.h" +#include "hal/cpu_hal.h" +#include "freertos/FreeRTOS.h" +#include "freertos/portmacro.h" + +#define REASON_YIELD BIT(0) +#define REASON_FREQ_SWITCH BIT(1) +#define REASON_PRINT_BACKTRACE BIT(2) + +static portMUX_TYPE reason_spinlock = portMUX_INITIALIZER_UNLOCKED; +static volatile uint32_t reason[portNUM_PROCESSORS]; + +static inline void IRAM_ATTR esp_crosscore_isr_handle_yield(void) +{ + portYIELD_FROM_ISR(); +} + +static void IRAM_ATTR esp_crosscore_isr(void *arg) +{ + uint32_t my_reason_val; + //A pointer to the correct reason array item is passed to this ISR. + volatile uint32_t *my_reason = arg; + + //Clear the interrupt first. + if (cpu_hal_get_core_id() == 0) { + WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, 0); + } else { + WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, 0); + } + //Grab the reason and clear it. + portENTER_CRITICAL_ISR(&reason_spinlock); + my_reason_val = *my_reason; + *my_reason = 0; + portEXIT_CRITICAL_ISR(&reason_spinlock); + + //Check what we need to do. + if (my_reason_val & REASON_YIELD) { + esp_crosscore_isr_handle_yield(); + } + if (my_reason_val & REASON_FREQ_SWITCH) { + /* Nothing to do here; the frequency switch event was already + * handled by a hook in xtensa_vectors.S. Could be used in the future + * to allow DFS features without the extra latency of the ISR hook. + */ + } + if (my_reason_val & REASON_PRINT_BACKTRACE) { + esp_backtrace_print(100); + } +} + +// Initialize the crosscore interrupt on this core. +void esp_crosscore_int_init(void) +{ + portENTER_CRITICAL(&reason_spinlock); + reason[cpu_hal_get_core_id()] = 0; + portEXIT_CRITICAL(&reason_spinlock); + esp_err_t err; + if (cpu_hal_get_core_id() == 0) { + err = esp_intr_alloc(ETS_FROM_CPU_INTR0_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void *)&reason[0], NULL); + } else { + err = esp_intr_alloc(ETS_FROM_CPU_INTR1_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void *)&reason[1], NULL); + } + assert(err == ESP_OK); +} + +static void IRAM_ATTR esp_crosscore_int_send(int core_id, uint32_t reason_mask) +{ + assert(core_id < portNUM_PROCESSORS); + //Mark the reason we interrupt the other CPU + portENTER_CRITICAL(&reason_spinlock); + reason[core_id] |= reason_mask; + portEXIT_CRITICAL(&reason_spinlock); + //Poke the other CPU. + if (core_id == 0) { + WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, SYSTEM_CPU_INTR_FROM_CPU_0); + } else { + WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, SYSTEM_CPU_INTR_FROM_CPU_1); + } +} + +void IRAM_ATTR esp_crosscore_int_send_yield(int core_id) +{ + esp_crosscore_int_send(core_id, REASON_YIELD); +} + +void IRAM_ATTR esp_crosscore_int_send_freq_switch(int core_id) +{ + esp_crosscore_int_send(core_id, REASON_FREQ_SWITCH); +} + +void IRAM_ATTR esp_crosscore_int_send_print_backtrace(int core_id) +{ + esp_crosscore_int_send(core_id, REASON_PRINT_BACKTRACE); +} diff --git a/components/esp32s3/dport_access.c b/components/esp32s3/dport_access.c new file mode 100644 index 0000000000..51bc5b4623 --- /dev/null +++ b/components/esp32s3/dport_access.c @@ -0,0 +1,25 @@ +// Copyright 2010-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "soc/dport_access.h" + +// Read a sequence of DPORT registers to the buffer. +void esp_dport_access_read_buffer(uint32_t *buff_out, uint32_t address, uint32_t num_words) +{ + for (uint32_t i = 0; i < num_words; ++i) { + buff_out[i] = DPORT_SEQUENCE_REG_READ(address + i * 4); + } +} diff --git a/components/esp32s3/hw_random.c b/components/esp32s3/hw_random.c new file mode 100644 index 0000000000..251538b878 --- /dev/null +++ b/components/esp32s3/hw_random.c @@ -0,0 +1,70 @@ +// Copyright 2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +#include +#include +#include +#include +#include "esp_attr.h" +#include "esp32s3/clk.h" +#include "soc/wdev_reg.h" +#include "freertos/FreeRTOSConfig.h" +#include "xtensa/core-macros.h" + +uint32_t IRAM_ATTR esp_random(void) +{ + /* The PRNG which implements WDEV_RANDOM register gets 2 bits + * of extra entropy from a hardware randomness source every APB clock cycle + * (provided WiFi or BT are enabled). To make sure entropy is not drained + * faster than it is added, this function needs to wait for at least 16 APB + * clock cycles after reading previous word. This implementation may actually + * wait a bit longer due to extra time spent in arithmetic and branch statements. + * + * As a (probably unncessary) precaution to avoid returning the + * RNG state as-is, the result is XORed with additional + * WDEV_RND_REG reads while waiting. + */ + + /* This code does not run in a critical section, so CPU frequency switch may + * happens while this code runs (this will not happen in the current + * implementation, but possible in the future). However if that happens, + * the number of cycles spent on frequency switching will certainly be more + * than the number of cycles we need to wait here. + */ + uint32_t cpu_to_apb_freq_ratio = esp_clk_cpu_freq() / esp_clk_apb_freq(); + + static uint32_t last_ccount = 0; + uint32_t ccount; + uint32_t result = 0; + do { + ccount = XTHAL_GET_CCOUNT(); + result ^= REG_READ(WDEV_RND_REG); + } while (ccount - last_ccount < cpu_to_apb_freq_ratio * 16); + last_ccount = ccount; + return result ^ REG_READ(WDEV_RND_REG); +} + +void esp_fill_random(void *buf, size_t len) +{ + assert(buf != NULL); + uint8_t *buf_bytes = (uint8_t *)buf; + while (len > 0) { + uint32_t word = esp_random(); + uint32_t to_copy = MIN(sizeof(word), len); + memcpy(buf_bytes, &word, to_copy); + buf_bytes += to_copy; + len -= to_copy; + } +} diff --git a/components/esp32s3/include/esp32s3/brownout.h b/components/esp32s3/include/esp32s3/brownout.h new file mode 100644 index 0000000000..b1124213b6 --- /dev/null +++ b/components/esp32s3/include/esp32s3/brownout.h @@ -0,0 +1,21 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +#ifndef __ESP_BROWNOUT_H +#define __ESP_BROWNOUT_H + +void esp_brownout_init(void); + +#endif \ No newline at end of file diff --git a/components/esp32s3/include/esp32s3/cache_err_int.h b/components/esp32s3/include/esp32s3/cache_err_int.h new file mode 100644 index 0000000000..9c8d9ddb2f --- /dev/null +++ b/components/esp32s3/include/esp32s3/cache_err_int.h @@ -0,0 +1,33 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +/** + * @brief initialize cache invalid access interrupt + * + * This function enables cache invalid access interrupt source and connects it + * to interrupt input number ETS_CACHEERR_INUM (see soc/soc.h). It is called + * from the startup code. + */ +void esp_cache_err_int_init(void); + + +/** + * @brief get the CPU which caused cache invalid access interrupt + * @return + * - PRO_CPU_NUM, if PRO_CPU has caused cache IA interrupt + * - APP_CPU_NUM, if APP_CPU has caused cache IA interrupt + * - (-1) otherwise + */ +int esp_cache_err_get_cpuid(void); diff --git a/components/esp32s3/include/esp32s3/clk.h b/components/esp32s3/include/esp32s3/clk.h new file mode 100644 index 0000000000..1d4e462bcc --- /dev/null +++ b/components/esp32s3/include/esp32s3/clk.h @@ -0,0 +1,84 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file esp_clk.h + * + * This file contains declarations of clock related functions. + */ + +/** + * @brief Get the calibration value of RTC slow clock + * + * The value is in the same format as returned by rtc_clk_cal (microseconds, + * in Q13.19 fixed-point format). + * + * @return the calibration value obtained using rtc_clk_cal, at startup time + */ +uint32_t esp_clk_slowclk_cal_get(void); + +/** + * @brief Update the calibration value of RTC slow clock + * + * The value has to be in the same format as returned by rtc_clk_cal (microseconds, + * in Q13.19 fixed-point format). + * This value is used by timekeeping functions (such as gettimeofday) to + * calculate current time based on RTC counter value. + * @param value calibration value obtained using rtc_clk_cal + */ +void esp_clk_slowclk_cal_set(uint32_t value); + +/** + * @brief Return current CPU clock frequency + * When frequency switching is performed, this frequency may change. + * However it is guaranteed that the frequency never changes with a critical + * section. + * + * @return CPU clock frequency, in Hz + */ +int esp_clk_cpu_freq(void); + +/** + * @brief Return current APB clock frequency + * + * When frequency switching is performed, this frequency may change. + * However it is guaranteed that the frequency never changes with a critical + * section. + * + * @return APB clock frequency, in Hz + */ +int esp_clk_apb_freq(void); + + +/** + * @brief Read value of RTC counter, converting it to microseconds + * @attention The value returned by this function may change abruptly when + * calibration value of RTC counter is updated via esp_clk_slowclk_cal_set + * function. This should not happen unless application calls esp_clk_slowclk_cal_set. + * In ESP-IDF, esp_clk_slowclk_cal_set is only called in startup code. + * + * @return Value or RTC counter, expressed in microseconds + */ +uint64_t esp_clk_rtc_time(void); + +#ifdef __cplusplus +} +#endif diff --git a/components/esp32s3/include/esp32s3/dport_access.h b/components/esp32s3/include/esp32s3/dport_access.h new file mode 100644 index 0000000000..96f9363b28 --- /dev/null +++ b/components/esp32s3/include/esp32s3/dport_access.h @@ -0,0 +1,40 @@ +// Copyright 2010-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef _ESP_DPORT_ACCESS_H_ +#define _ESP_DPORT_ACCESS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Read a sequence of DPORT registers to the buffer. + * + * @param[out] buff_out Contains the read data. + * @param[in] address Initial address for reading registers. + * @param[in] num_words The number of words. + */ +void esp_dport_access_read_buffer(uint32_t *buff_out, uint32_t address, uint32_t num_words); + +#define DPORT_STALL_OTHER_CPU_START() +#define DPORT_STALL_OTHER_CPU_END() +#define DPORT_INTERRUPT_DISABLE() +#define DPORT_INTERRUPT_RESTORE() + +#ifdef __cplusplus +} +#endif + +#endif /* _ESP_DPORT_ACCESS_H_ */ diff --git a/components/esp32s3/include/esp32s3/memprot.h b/components/esp32s3/include/esp32s3/memprot.h new file mode 100644 index 0000000000..7246826530 --- /dev/null +++ b/components/esp32s3/include/esp32s3/memprot.h @@ -0,0 +1,353 @@ +// Copyright 2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +/* INTERNAL API + * generic interface to MMU memory protection features + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + MEMPROT_IRAM0 = 0x00000000, + MEMPROT_DRAM0 = 0x00000001, + MEMPROT_UNKNOWN +} mem_type_prot_t; + + +/** + * @brief Returns splitting address for required memory region + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return Splitting address for the memory region required. + * The address is given by region-specific global symbol exported from linker script, + * it is not read out from related configuration register. + */ +uint32_t *IRAM_ATTR esp_memprot_get_split_addr(mem_type_prot_t mem_type); + +/** + * @brief Initializes illegal memory access control (MMU) for required memory section. + * + * All memory access interrupts share ETS_MEMACCESS_ERR_INUM input channel, it is caller's + * responsibility to properly detect actual intr. source as well as possible prioritization in case + * of multiple source reported during one intr.handling routine run + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + */ +void esp_memprot_intr_init(mem_type_prot_t mem_type); + +/** + * @brief Enable/disable the memory protection interrupt + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param enable enable/disable + */ +void esp_memprot_intr_ena(mem_type_prot_t mem_type, bool enable); + +/** + * @brief Detects whether any of the memory protection interrupts is active + * + * @return true/false + */ +bool esp_memprot_is_assoc_intr_any(void); + +/** + * @brief Detects whether specific memory protection interrupt is active + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return true/false + */ +bool esp_memprot_is_assoc_intr(mem_type_prot_t mem_type); + +/** + * @brief Sets a request for clearing interrupt-on flag for specified memory region (register write) + * + * @note When called without actual interrupt-on flag set, subsequent occurrence of related interrupt is ignored. + * Should be used only after the real interrupt appears, typically as the last step in interrupt handler's routine. + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + */ +void esp_memprot_clear_intr(mem_type_prot_t mem_type); + +/** + * @brief Detects which memory protection interrupt is active, check order: IRAM0, DRAM0 + * + * @return Memory protection area type (see mem_type_prot_t enum) + */ +mem_type_prot_t IRAM_ATTR esp_memprot_get_intr_memtype(void); + +/** + * @brief Gets interrupt status register contents for specified memory region + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return Contents of status register + */ +uint32_t esp_memprot_get_fault_reg(mem_type_prot_t mem_type); + +/** + * @brief Get details of given interrupt status + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param faulting_address Faulting address causing the interrupt [out] + * @param op_type Operation being processed at the faulting address [out] + * IRAM0: 0 - read, 1 - write + * DRAM0: 0 - read, 1 - write + * @param op_subtype Additional info for op_type [out] + * IRAM0: 0 - instruction segment access, 1 - data segment access + * DRAM0: 0 - non-atomic operation, 1 - atomic operation + */ +void IRAM_ATTR esp_memprot_get_fault_status(mem_type_prot_t mem_type, uint32_t **faulting_address, uint32_t *op_type, uint32_t *op_subtype); + +/** + * @brief Gets string representation of required memory region identifier + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return mem_type as string + */ +const char *IRAM_ATTR esp_memprot_type_to_str(mem_type_prot_t mem_type); + +/** + * @brief Detects whether any of the interrupt locks is active (requires digital system reset to unlock) + * + * @return true/false + */ +bool esp_memprot_is_locked_any(void); + +/** + * @brief Sets lock for specified memory region. + * + * Locks can be unlocked only by digital system reset + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + */ +void esp_memprot_set_lock(mem_type_prot_t mem_type); + +/** + * @brief Gets lock status for required memory region + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return true/false (locked/unlocked) + */ +bool esp_memprot_get_lock(mem_type_prot_t mem_type); + +/** + * @brief Gets interrupt permission control register contents for required memory region + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return Permission control register contents + */ +uint32_t esp_memprot_get_ena_reg(mem_type_prot_t mem_type); + +/** + * @brief Gets interrupt permission settings for unified management block + * + * Gets interrupt permission settings register contents for required memory region, returns settings for unified management blocks + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return Permission settings register contents + */ +uint32_t esp_memprot_get_perm_uni_reg(mem_type_prot_t mem_type); + +/** + * @brief Gets interrupt permission settings for split management block + * + * Gets interrupt permission settings register contents for required memory region, returns settings for split management blocks + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return Permission settings register contents + */ +uint32_t esp_memprot_get_perm_split_reg(mem_type_prot_t mem_type); + +/** + * @brief Detects whether any of the memory protection interrupts is enabled + * + * @return true/false + */ +bool esp_memprot_is_intr_ena_any(void); + +/** + * @brief Gets interrupt-enabled flag for given memory region + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return Interrupt-enabled value + */ +uint32_t esp_memprot_get_intr_ena_bit(mem_type_prot_t mem_type); + +/** + * @brief Gets interrupt-active flag for given memory region + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return Interrupt-active value + */ +uint32_t esp_memprot_get_intr_on_bit(mem_type_prot_t mem_type); + +/** + * @brief Gets interrupt-clear request flag for given memory region + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * + * @return Interrupt-clear request value + */ +uint32_t esp_memprot_get_intr_clr_bit(mem_type_prot_t mem_type); + +/** + * @brief Gets read permission value for specified block and memory region + * + * Returns read permission bit value for required unified-management block (0-3) in given memory region. + * Applicable to all memory types. + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param block Memory block identifier (0-3) + * + * @return Read permission value for required block + */ +uint32_t esp_memprot_get_uni_block_read_bit(mem_type_prot_t mem_type, uint32_t block); + +/** + * @brief Gets write permission value for specified block and memory region + * + * Returns write permission bit value for required unified-management block (0-3) in given memory region. + * Applicable to all memory types. + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param block Memory block identifier (0-3) + * + * @return Write permission value for required block + */ +uint32_t esp_memprot_get_uni_block_write_bit(mem_type_prot_t mem_type, uint32_t block); + +/** + * @brief Gets execute permission value for specified block and memory region + * + * Returns execute permission bit value for required unified-management block (0-3) in given memory region. + * Applicable only to IRAM memory types + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param block Memory block identifier (0-3) + * + * @return Execute permission value for required block + */ +uint32_t esp_memprot_get_uni_block_exec_bit(mem_type_prot_t mem_type, uint32_t block); + +/** + * @brief Sets permissions for specified block in DRAM region + * + * Sets Read and Write permission for specified unified-management block (0-3) in given memory region. + * Applicable only to DRAM memory types + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param block Memory block identifier (0-3) + * @param write_perm Write permission flag + * @param read_perm Read permission flag + */ +void esp_memprot_set_uni_block_perm_dram(mem_type_prot_t mem_type, uint32_t block, bool write_perm, bool read_perm); + +/** + * @brief Sets permissions for high and low memory segment in DRAM region + * + * Sets Read and Write permission for both low and high memory segments given by splitting address. + * The splitting address must be equal to or higher then beginning of block 5 + * Applicable only to DRAM memory types + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param split_addr Address to split the memory region to lower and higher segment + * @param lw Low segment Write permission flag + * @param lr Low segment Read permission flag + * @param hw High segment Write permission flag + * @param hr High segment Read permission flag + */ +void esp_memprot_set_prot_dram(mem_type_prot_t mem_type, uint32_t *split_addr, bool lw, bool lr, bool hw, bool hr); + +/** + * @brief Sets permissions for specified block in IRAM region + * + * Sets Read, Write and Execute permission for specified unified-management block (0-3) in given memory region. + * Applicable only to IRAM memory types + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param block Memory block identifier (0-3) + * @param write_perm Write permission flag + * @param exec_perm Execute permission flag + */ +void esp_memprot_set_uni_block_perm_iram(mem_type_prot_t mem_type, uint32_t block, bool write_perm, bool read_perm, bool exec_perm); + +/** + * @brief Sets permissions for high and low memory segment in IRAM region + * + * Sets Read, Write and Execute permission for both low and high memory segments given by splitting address. + * The splitting address must be equal to or higher then beginning of block 5 + * Applicable only to IRAM memory types + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param split_addr Address to split the memory region to lower and higher segment + * @param lw Low segment Write permission flag + * @param lr Low segment Read permission flag + * @param lx Low segment Execute permission flag + * @param hw High segment Write permission flag + * @param hr High segment Read permission flag + * @param hx High segment Execute permission flag + */ +void esp_memprot_set_prot_iram(mem_type_prot_t mem_type, uint32_t *split_addr, bool lw, bool lr, bool lx, bool hw, bool hr, bool hx); + +/** + * @brief Activates memory protection for all supported memory region types + * + * @note The feature is disabled when JTAG interface is connected + * + * @param invoke_panic_handler map mem.prot interrupt to ETS_MEMACCESS_ERR_INUM and thus invokes panic handler when fired ('true' not suitable for testing) + * @param lock_feature sets LOCK bit, see esp_memprot_set_lock() ('true' not suitable for testing) + */ +void esp_memprot_set_prot(bool invoke_panic_handler, bool lock_feature); + +/** + * @brief Get permission settings bits for IRAM split mgmt based on current split address + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param lw Low segment Write permission flag + * @param lr Low segment Read permission flag + * @param lx Low segment Execute permission flag + * @param hw High segment Write permission flag + * @param hr High segment Read permission flag + * @param hx High segment Execute permission flag + */ +void esp_memprot_get_perm_split_bits_iram(mem_type_prot_t mem_type, bool *lw, bool *lr, bool *lx, bool *hw, bool *hr, bool *hx); + +/** + * @brief Get permission settings bits for DRAM split mgmt based on current split address + * + * @param mem_type Memory protection area type (see mem_type_prot_t enum) + * @param lw Low segment Write permission flag + * @param lr Low segment Read permission flag + * @param hw High segment Write permission flag + * @param hr High segment Read permission flag + */ +void esp_memprot_get_perm_split_bits_dram(mem_type_prot_t mem_type, bool *lw, bool *lr, bool *hw, bool *hr); + +#ifdef __cplusplus +} +#endif diff --git a/components/esp32s3/include/esp32s3/pm.h b/components/esp32s3/include/esp32s3/pm.h new file mode 100644 index 0000000000..0b29c66aba --- /dev/null +++ b/components/esp32s3/include/esp32s3/pm.h @@ -0,0 +1,42 @@ +// Copyright 2016-2017 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +#pragma once +#include +#include +#include "esp_err.h" + +#include "soc/rtc.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/** + * @brief Power management config for ESP32 + * + * Pass a pointer to this structure as an argument to esp_pm_configure function. + */ +typedef struct { + int max_freq_mhz; /*!< Maximum CPU frequency, in MHz */ + int min_freq_mhz; /*!< Minimum CPU frequency to use when no locks are taken, in MHz */ + bool light_sleep_enable; /*!< Enter light sleep when no locks are taken */ +} esp_pm_config_esp32s3_t; + + +#ifdef __cplusplus +} +#endif diff --git a/components/esp32s3/include/esp32s3/spiram.h b/components/esp32s3/include/esp32s3/spiram.h new file mode 100644 index 0000000000..fdfc7b8c5c --- /dev/null +++ b/components/esp32s3/include/esp32s3/spiram.h @@ -0,0 +1,91 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +#ifndef __ESP_SPIRAM_H +#define __ESP_SPIRAM_H + +#include +#include +#include +#include "esp_err.h" + +/** + * @brief Initialize spiram interface/hardware. Normally called from cpu_start.c. + * + * @return ESP_OK on success + */ +esp_err_t esp_spiram_init(void); + +/** + * @brief Configure Cache/MMU for access to external SPI RAM. + * + * Normally this function is called from cpu_start, if CONFIG_SPIRAM_BOOT_INIT + * option is enabled. Applications which need to enable SPI RAM at run time + * can disable CONFIG_SPIRAM_BOOT_INIT, and call this function later. + * + * @attention this function must be called with flash cache disabled. + */ +void esp_spiram_init_cache(void); + + +/** + * @brief Memory test for SPI RAM. Should be called after SPI RAM is initialized and + * (in case of a dual-core system) the app CPU is online. This test overwrites the + * memory with crap, so do not call after e.g. the heap allocator has stored important + * stuff in SPI RAM. + * + * @return true on success, false on failed memory test + */ +bool esp_spiram_test(void); + + +/** + * @brief Add the initialized SPI RAM to the heap allocator. + */ +esp_err_t esp_spiram_add_to_heapalloc(void); + + +/** + * @brief Get the size of the attached SPI RAM chip selected in menuconfig + * + * @return Size in bytes, or 0 if no external RAM chip support compiled in. + */ +size_t esp_spiram_get_size(void); + + +/** + * @brief Force a writeback of the data in the SPI RAM cache. This is to be called whenever + * cache is disabled, because disabling cache on the ESP32 discards the data in the SPI + * RAM cache. + * + * This is meant for use from within the SPI flash code. + */ +void esp_spiram_writeback_cache(void); + + + +/** + * @brief Reserve a pool of internal memory for specific DMA/internal allocations + * + * @param size Size of reserved pool in bytes + * + * @return + * - ESP_OK on success + * - ESP_ERR_NO_MEM when no memory available for pool + */ +esp_err_t esp_spiram_reserve_dma_pool(size_t size); + + +#endif diff --git a/components/esp32s3/include/esp_clk.h b/components/esp32s3/include/esp_clk.h new file mode 100644 index 0000000000..78f6678aac --- /dev/null +++ b/components/esp32s3/include/esp_clk.h @@ -0,0 +1,77 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +/** + * @file esp_clk.h + * + * This file contains declarations of clock related functions. + */ + +/** + * @brief Get the calibration value of RTC slow clock + * + * The value is in the same format as returned by rtc_clk_cal (microseconds, + * in Q13.19 fixed-point format). + * + * @return the calibration value obtained using rtc_clk_cal, at startup time + */ +uint32_t esp_clk_slowclk_cal_get(void); + +/** + * @brief Update the calibration value of RTC slow clock + * + * The value has to be in the same format as returned by rtc_clk_cal (microseconds, + * in Q13.19 fixed-point format). + * This value is used by timekeeping functions (such as gettimeofday) to + * calculate current time based on RTC counter value. + * @param value calibration value obtained using rtc_clk_cal + */ +void esp_clk_slowclk_cal_set(uint32_t value); + +/** + * @brief Return current CPU clock frequency + * When frequency switching is performed, this frequency may change. + * However it is guaranteed that the frequency never changes with a critical + * section. + * + * @return CPU clock frequency, in Hz + */ +int esp_clk_cpu_freq(void); + +/** + * @brief Return current APB clock frequency + * + * When frequency switching is performed, this frequency may change. + * However it is guaranteed that the frequency never changes with a critical + * section. + * + * @return APB clock frequency, in Hz + */ +int esp_clk_apb_freq(void); + + +/** + * @brief Read value of RTC counter, converting it to microseconds + * @attention The value returned by this function may change abruptly when + * calibration value of RTC counter is updated via esp_clk_slowclk_cal_set + * function. This should not happen unless application calls esp_clk_slowclk_cal_set. + * In ESP-IDF, esp_clk_slowclk_cal_set is only called in startup code. + * + * @return Value or RTC counter, expressed in microseconds + */ +uint64_t esp_clk_rtc_time(void); diff --git a/components/esp32s3/include/esp_intr_alloc.h b/components/esp32s3/include/esp_intr_alloc.h new file mode 100644 index 0000000000..9b3bf662bc --- /dev/null +++ b/components/esp32s3/include/esp_intr_alloc.h @@ -0,0 +1,299 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ESP_INTR_ALLOC_H__ +#define __ESP_INTR_ALLOC_H__ + +#include +#include +#include "esp_err.h" +#include "freertos/xtensa_api.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/** @addtogroup Intr_Alloc + * @{ + */ + + +/** @brief Interrupt allocation flags + * + * These flags can be used to specify which interrupt qualities the + * code calling esp_intr_alloc* needs. + * + */ + +//Keep the LEVELx values as they are here; they match up with (1<3 + * is requested, because these types of interrupts aren't C-callable. + * @param arg Optional argument for passed to the interrupt handler + * @param ret_handle Pointer to an intr_handle_t to store a handle that can later be + * used to request details or free the interrupt. Can be NULL if no handle + * is required. + * + * @return ESP_ERR_INVALID_ARG if the combination of arguments is invalid. + * ESP_ERR_NOT_FOUND No free interrupt found with the specified flags + * ESP_OK otherwise + */ +esp_err_t esp_intr_alloc(int source, int flags, intr_handler_t handler, void *arg, intr_handle_t *ret_handle); + + +/** + * @brief Allocate an interrupt with the given parameters. + * + * + * This essentially does the same as esp_intr_alloc, but allows specifying a register and mask + * combo. For shared interrupts, the handler is only called if a read from the specified + * register, ANDed with the mask, returns non-zero. By passing an interrupt status register + * address and a fitting mask, this can be used to accelerate interrupt handling in the case + * a shared interrupt is triggered; by checking the interrupt statuses first, the code can + * decide which ISRs can be skipped + * + * @param source The interrupt source. One of the ETS_*_INTR_SOURCE interrupt mux + * sources, as defined in soc/soc.h, or one of the internal + * ETS_INTERNAL_*_INTR_SOURCE sources as defined in this header. + * @param flags An ORred mask of the ESP_INTR_FLAG_* defines. These restrict the + * choice of interrupts that this routine can choose from. If this value + * is 0, it will default to allocating a non-shared interrupt of level + * 1, 2 or 3. If this is ESP_INTR_FLAG_SHARED, it will allocate a shared + * interrupt of level 1. Setting ESP_INTR_FLAG_INTRDISABLED will return + * from this function with the interrupt disabled. + * @param intrstatusreg The address of an interrupt status register + * @param intrstatusmask A mask. If a read of address intrstatusreg has any of the bits + * that are 1 in the mask set, the ISR will be called. If not, it will be + * skipped. + * @param handler The interrupt handler. Must be NULL when an interrupt of level >3 + * is requested, because these types of interrupts aren't C-callable. + * @param arg Optional argument for passed to the interrupt handler + * @param ret_handle Pointer to an intr_handle_t to store a handle that can later be + * used to request details or free the interrupt. Can be NULL if no handle + * is required. + * + * @return ESP_ERR_INVALID_ARG if the combination of arguments is invalid. + * ESP_ERR_NOT_FOUND No free interrupt found with the specified flags + * ESP_OK otherwise + */ +esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusreg, uint32_t intrstatusmask, intr_handler_t handler, void *arg, intr_handle_t *ret_handle); + + +/** + * @brief Disable and free an interrupt. + * + * Use an interrupt handle to disable the interrupt and release the resources + * associated with it. + * + * @note + * When the handler shares its source with other handlers, the interrupt status + * bits it's responsible for should be managed properly before freeing it. see + * ``esp_intr_disable`` for more details. + * + * @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus + * + * @return ESP_ERR_INVALID_ARG if handle is invalid, or esp_intr_free runs on another core than + * where the interrupt is allocated on. + * ESP_OK otherwise + */ +esp_err_t esp_intr_free(intr_handle_t handle); + + +/** + * @brief Get CPU number an interrupt is tied to + * + * @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus + * + * @return The core number where the interrupt is allocated + */ +int esp_intr_get_cpu(intr_handle_t handle); + +/** + * @brief Get the allocated interrupt for a certain handle + * + * @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus + * + * @return The interrupt number + */ +int esp_intr_get_intno(intr_handle_t handle); + +/** + * @brief Disable the interrupt associated with the handle + * + * @note + * 1. For local interrupts (ESP_INTERNAL_* sources), this function has to be called on the + * CPU the interrupt is allocated on. Other interrupts have no such restriction. + * 2. When several handlers sharing a same interrupt source, interrupt status bits, which are + * handled in the handler to be disabled, should be masked before the disabling, or handled + * in other enabled interrupts properly. Miss of interrupt status handling will cause infinite + * interrupt calls and finally system crash. + * + * @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus + * + * @return ESP_ERR_INVALID_ARG if the combination of arguments is invalid. + * ESP_OK otherwise + */ +esp_err_t esp_intr_disable(intr_handle_t handle); + +/** + * @brief Enable the interrupt associated with the handle + * + * @note For local interrupts (ESP_INTERNAL_* sources), this function has to be called on the + * CPU the interrupt is allocated on. Other interrupts have no such restriction. + * + * @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus + * + * @return ESP_ERR_INVALID_ARG if the combination of arguments is invalid. + * ESP_OK otherwise + */ +esp_err_t esp_intr_enable(intr_handle_t handle); + +/** + * @brief Set the "in IRAM" status of the handler. + * + * @note Does not work on shared interrupts. + * + * @param handle The handle, as obtained by esp_intr_alloc or esp_intr_alloc_intrstatus + * @param is_in_iram Whether the handler associated with this handle resides in IRAM. + * Handlers residing in IRAM can be called when cache is disabled. + * + * @return ESP_ERR_INVALID_ARG if the combination of arguments is invalid. + * ESP_OK otherwise + */ +esp_err_t esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram); + +/** + * @brief Disable interrupts that aren't specifically marked as running from IRAM + */ +void esp_intr_noniram_disable(void); + + +/** + * @brief Re-enable interrupts disabled by esp_intr_noniram_disable + */ +void esp_intr_noniram_enable(void); + +/**@}*/ + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/components/esp32s3/include/esp_sleep.h b/components/esp32s3/include/esp_sleep.h new file mode 100644 index 0000000000..b1c21f1739 --- /dev/null +++ b/components/esp32s3/include/esp_sleep.h @@ -0,0 +1,358 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "esp_err.h" +#include "driver/gpio.h" +#include "driver/touch_pad.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Logic function used for EXT1 wakeup mode. + */ +typedef enum { + ESP_EXT1_WAKEUP_ALL_LOW = 0, //!< Wake the chip when all selected GPIOs go low + ESP_EXT1_WAKEUP_ANY_HIGH = 1 //!< Wake the chip when any of the selected GPIOs go high +} esp_sleep_ext1_wakeup_mode_t; + +/** + * @brief Power domains which can be powered down in sleep mode + */ +typedef enum { + ESP_PD_DOMAIN_RTC_PERIPH, //!< RTC IO, sensors and ULP co-processor + ESP_PD_DOMAIN_RTC_SLOW_MEM, //!< RTC slow memory + ESP_PD_DOMAIN_RTC_FAST_MEM, //!< RTC fast memory + ESP_PD_DOMAIN_XTAL, //!< XTAL oscillator + ESP_PD_DOMAIN_MAX //!< Number of domains +} esp_sleep_pd_domain_t; + +/** + * @brief Power down options + */ +typedef enum { + ESP_PD_OPTION_OFF, //!< Power down the power domain in sleep mode + ESP_PD_OPTION_ON, //!< Keep power domain enabled during sleep mode + ESP_PD_OPTION_AUTO //!< Keep power domain enabled in sleep mode, if it is needed by one of the wakeup options. Otherwise power it down. +} esp_sleep_pd_option_t; + +/** + * @brief Sleep wakeup cause + */ +typedef enum { + ESP_SLEEP_WAKEUP_UNDEFINED, //!< In case of deep sleep, reset was not caused by exit from deep sleep + ESP_SLEEP_WAKEUP_ALL, //!< Not a wakeup cause, used to disable all wakeup sources with esp_sleep_disable_wakeup_source + ESP_SLEEP_WAKEUP_EXT0, //!< Wakeup caused by external signal using RTC_IO + ESP_SLEEP_WAKEUP_EXT1, //!< Wakeup caused by external signal using RTC_CNTL + ESP_SLEEP_WAKEUP_TIMER, //!< Wakeup caused by timer + ESP_SLEEP_WAKEUP_TOUCHPAD, //!< Wakeup caused by touchpad + ESP_SLEEP_WAKEUP_ULP, //!< Wakeup caused by ULP program + ESP_SLEEP_WAKEUP_GPIO, //!< Wakeup caused by GPIO (light sleep only) + ESP_SLEEP_WAKEUP_UART, //!< Wakeup caused by UART (light sleep only) + ESP_SLEEP_WAKEUP_WIFI, //!< Wakeup caused by WIFI (light sleep only) + ESP_SLEEP_WAKEUP_COCPU, //!< Wakeup caused by COCPU int + ESP_SLEEP_WAKEUP_COCPU_TRAP_TRIG, //!< Wakeup caused by COCPU crash +} esp_sleep_source_t; + +/* Leave this type define for compatibility */ +typedef esp_sleep_source_t esp_sleep_wakeup_cause_t; + +/** + * @brief Disable wakeup source + * + * This function is used to deactivate wake up trigger for source + * defined as parameter of the function. + * + * @note This function does not modify wake up configuration in RTC. + * It will be performed in esp_sleep_start function. + * + * See docs/sleep-modes.rst for details. + * + * @param source - number of source to disable of type esp_sleep_source_t + * @return + * - ESP_OK on success + * - ESP_ERR_INVALID_STATE if trigger was not active + */ +esp_err_t esp_sleep_disable_wakeup_source(esp_sleep_source_t source); + +/** + * @brief Enable wakeup by ULP coprocessor + * @note In revisions 0 and 1 of the ESP32, ULP wakeup source + * can not be used when RTC_PERIPH power domain is forced + * to be powered on (ESP_PD_OPTION_ON) or when ext0 wakeup + * source is used. + * @return + * - ESP_OK on success + * - ESP_ERR_NOT_SUPPORTED if additional current by touch (CONFIG_ESP32_RTC_EXT_CRYST_ADDIT_CURRENT) is enabled. + * - ESP_ERR_INVALID_STATE if ULP co-processor is not enabled or if wakeup triggers conflict + */ +esp_err_t esp_sleep_enable_ulp_wakeup(void); + +/** + * @brief Enable wakeup by timer + * @param time_in_us time before wakeup, in microseconds + * @return + * - ESP_OK on success + * - ESP_ERR_INVALID_ARG if value is out of range (TBD) + */ +esp_err_t esp_sleep_enable_timer_wakeup(uint64_t time_in_us); + +/** + * @brief Enable wakeup by touch sensor + * + * @note In revisions 0 and 1 of the ESP32, touch wakeup source + * can not be used when RTC_PERIPH power domain is forced + * to be powered on (ESP_PD_OPTION_ON) or when ext0 wakeup + * source is used. + * + * @note The FSM mode of the touch button should be configured + * as the timer trigger mode. + * + * @return + * - ESP_OK on success + * - ESP_ERR_NOT_SUPPORTED if additional current by touch (CONFIG_ESP32_RTC_EXT_CRYST_ADDIT_CURRENT) is enabled. + * - ESP_ERR_INVALID_STATE if wakeup triggers conflict + */ +esp_err_t esp_sleep_enable_touchpad_wakeup(void); + +/** + * @brief Get the touch pad which caused wakeup + * + * If wakeup was caused by another source, this function will return TOUCH_PAD_MAX; + * + * @return touch pad which caused wakeup + */ +touch_pad_t esp_sleep_get_touchpad_wakeup_status(void); + +/** + * @brief Enable wakeup using a pin + * + * This function uses external wakeup feature of RTC_IO peripheral. + * It will work only if RTC peripherals are kept on during sleep. + * + * This feature can monitor any pin which is an RTC IO. Once the pin transitions + * into the state given by level argument, the chip will be woken up. + * + * @note This function does not modify pin configuration. The pin is + * configured in esp_sleep_start, immediately before entering sleep mode. + * + * @note In revisions 0 and 1 of the ESP32, ext0 wakeup source + * can not be used together with touch or ULP wakeup sources. + * + * @param gpio_num GPIO number used as wakeup source. Only GPIOs which are have RTC + * functionality can be used: 0,2,4,12-15,25-27,32-39. + * @param level input level which will trigger wakeup (0=low, 1=high) + * @return + * - ESP_OK on success + * - ESP_ERR_INVALID_ARG if the selected GPIO is not an RTC GPIO, + * or the mode is invalid + * - ESP_ERR_INVALID_STATE if wakeup triggers conflict + */ +esp_err_t esp_sleep_enable_ext0_wakeup(gpio_num_t gpio_num, int level); + +/** + * @brief Enable wakeup using multiple pins + * + * This function uses external wakeup feature of RTC controller. + * It will work even if RTC peripherals are shut down during sleep. + * + * This feature can monitor any number of pins which are in RTC IOs. + * Once any of the selected pins goes into the state given by mode argument, + * the chip will be woken up. + * + * @note This function does not modify pin configuration. The pins are + * configured in esp_sleep_start, immediately before + * entering sleep mode. + * + * @note internal pullups and pulldowns don't work when RTC peripherals are + * shut down. In this case, external resistors need to be added. + * Alternatively, RTC peripherals (and pullups/pulldowns) may be + * kept enabled using esp_sleep_pd_config function. + * + * @param mask bit mask of GPIO numbers which will cause wakeup. Only GPIOs + * which are have RTC functionality can be used in this bit map: + * 0,2,4,12-15,25-27,32-39. + * @param mode select logic function used to determine wakeup condition: + * - ESP_EXT1_WAKEUP_ALL_LOW: wake up when all selected GPIOs are low + * - ESP_EXT1_WAKEUP_ANY_HIGH: wake up when any of the selected GPIOs is high + * @return + * - ESP_OK on success + * - ESP_ERR_INVALID_ARG if any of the selected GPIOs is not an RTC GPIO, + * or mode is invalid + */ +esp_err_t esp_sleep_enable_ext1_wakeup(uint64_t mask, esp_sleep_ext1_wakeup_mode_t mode); + +/** + * @brief Enable wakeup from light sleep using GPIOs + * + * Each GPIO supports wakeup function, which can be triggered on either low level + * or high level. Unlike EXT0 and EXT1 wakeup sources, this method can be used + * both for all IOs: RTC IOs and digital IOs. It can only be used to wakeup from + * light sleep though. + * + * To enable wakeup, first call gpio_wakeup_enable, specifying gpio number and + * wakeup level, for each GPIO which is used for wakeup. + * Then call this function to enable wakeup feature. + * + * @note In revisions 0 and 1 of the ESP32, GPIO wakeup source + * can not be used together with touch or ULP wakeup sources. + * + * @return + * - ESP_OK on success + * - ESP_ERR_INVALID_STATE if wakeup triggers conflict + */ +esp_err_t esp_sleep_enable_gpio_wakeup(void); + +/** + * @brief Enable wakeup from light sleep using UART + * + * Use uart_set_wakeup_threshold function to configure UART wakeup threshold. + * + * Wakeup from light sleep takes some time, so not every character sent + * to the UART can be received by the application. + * + * @param uart_num UART port to wake up from + * @return + * - ESP_OK on success + * - ESP_ERR_INVALID_ARG if wakeup from given UART is not supported + */ +esp_err_t esp_sleep_enable_uart_wakeup(int uart_num); + +/** + * @brief Get the bit mask of GPIOs which caused wakeup (ext1) + * + * If wakeup was caused by another source, this function will return 0. + * + * @return bit mask, if GPIOn caused wakeup, BIT(n) will be set + */ +uint64_t esp_sleep_get_ext1_wakeup_status(void); + +/** + * @brief Set power down mode for an RTC power domain in sleep mode + * + * If not set set using this API, all power domains default to ESP_PD_OPTION_AUTO. + * + * @param domain power domain to configure + * @param option power down option (ESP_PD_OPTION_OFF, ESP_PD_OPTION_ON, or ESP_PD_OPTION_AUTO) + * @return + * - ESP_OK on success + * - ESP_ERR_INVALID_ARG if either of the arguments is out of range + */ +esp_err_t esp_sleep_pd_config(esp_sleep_pd_domain_t domain, + esp_sleep_pd_option_t option); + +/** + * @brief Enter deep sleep with the configured wakeup options + * + * This function does not return. + */ +void esp_deep_sleep_start(void) __attribute__((noreturn)); + +/** + * @brief Enter light sleep with the configured wakeup options + * + * @return + * - ESP_OK on success (returned after wakeup) + * - ESP_ERR_INVALID_STATE if WiFi or BT is not stopped + */ +esp_err_t esp_light_sleep_start(void); + +/** + * @brief Enter deep-sleep mode + * + * The device will automatically wake up after the deep-sleep time + * Upon waking up, the device calls deep sleep wake stub, and then proceeds + * to load application. + * + * Call to this function is equivalent to a call to esp_deep_sleep_enable_timer_wakeup + * followed by a call to esp_deep_sleep_start. + * + * esp_deep_sleep does not shut down WiFi, BT, and higher level protocol + * connections gracefully. + * Make sure relevant WiFi and BT stack functions are called to close any + * connections and deinitialize the peripherals. These include: + * - esp_bluedroid_disable + * - esp_bt_controller_disable + * - esp_wifi_stop + * + * This function does not return. + * + * @param time_in_us deep-sleep time, unit: microsecond + */ +void esp_deep_sleep(uint64_t time_in_us) __attribute__((noreturn)); + + +/** + * @brief Get the wakeup source which caused wakeup from sleep + * + * @return cause of wake up from last sleep (deep sleep or light sleep) + */ +esp_sleep_wakeup_cause_t esp_sleep_get_wakeup_cause(void); + + +/** + * @brief Default stub to run on wake from deep sleep. + * + * Allows for executing code immediately on wake from sleep, before + * the software bootloader or ESP-IDF app has started up. + * + * This function is weak-linked, so you can implement your own version + * to run code immediately when the chip wakes from + * sleep. + * + * See docs/deep-sleep-stub.rst for details. + */ +void esp_wake_deep_sleep(void); + +/** + * @brief Function type for stub to run on wake from sleep. + * + */ +typedef void (*esp_deep_sleep_wake_stub_fn_t)(void); + +/** + * @brief Install a new stub at runtime to run on wake from deep sleep + * + * If implementing esp_wake_deep_sleep() then it is not necessary to + * call this function. + * + * However, it is possible to call this function to substitute a + * different deep sleep stub. Any function used as a deep sleep stub + * must be marked RTC_IRAM_ATTR, and must obey the same rules given + * for esp_wake_deep_sleep(). + */ +void esp_set_deep_sleep_wake_stub(esp_deep_sleep_wake_stub_fn_t new_stub); + +/** + * @brief Get current wake from deep sleep stub + * @return Return current wake from deep sleep stub, or NULL if + * no stub is installed. + */ +esp_deep_sleep_wake_stub_fn_t esp_get_deep_sleep_wake_stub(void); + +/** + * @brief The default esp-idf-provided esp_wake_deep_sleep() stub. + * + * See docs/deep-sleep-stub.rst for details. + */ +void esp_default_wake_deep_sleep(void); + + +#ifdef __cplusplus +} +#endif diff --git a/components/esp32s3/include/esp_spiram.h b/components/esp32s3/include/esp_spiram.h new file mode 100644 index 0000000000..d7a54ccb17 --- /dev/null +++ b/components/esp32s3/include/esp_spiram.h @@ -0,0 +1,90 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +#ifndef __ESP_SPIRAM_H +#define __ESP_SPIRAM_H + +#include +#include +#include "esp_err.h" + +/** + * @brief Initialize spiram interface/hardware. Normally called from cpu_start.c. + * + * @return ESP_OK on success + */ +esp_err_t esp_spiram_init(void); + +/** + * @brief Configure Cache/MMU for access to external SPI RAM. + * + * Normally this function is called from cpu_start, if CONFIG_SPIRAM_BOOT_INIT + * option is enabled. Applications which need to enable SPI RAM at run time + * can disable CONFIG_SPIRAM_BOOT_INIT, and call this function later. + * + * @attention this function must be called with flash cache disabled. + */ +void esp_spiram_init_cache(void); + + +/** + * @brief Memory test for SPI RAM. Should be called after SPI RAM is initialized and + * (in case of a dual-core system) the app CPU is online. This test overwrites the + * memory with crap, so do not call after e.g. the heap allocator has stored important + * stuff in SPI RAM. + * + * @return true on success, false on failed memory test + */ +bool esp_spiram_test(void); + + +/** + * @brief Add the initialized SPI RAM to the heap allocator. + */ +esp_err_t esp_spiram_add_to_heapalloc(void); + + +/** + * @brief Get the size of the attached SPI RAM chip selected in menuconfig + * + * @return Size in bytes, or 0 if no external RAM chip support compiled in. + */ +size_t esp_spiram_get_size(void); + + +/** + * @brief Force a writeback of the data in the SPI RAM cache. This is to be called whenever + * cache is disabled, because disabling cache on the ESP32 discards the data in the SPI + * RAM cache. + * + * This is meant for use from within the SPI flash code. + */ +void esp_spiram_writeback_cache(void); + + + +/** + * @brief Reserve a pool of internal memory for specific DMA/internal allocations + * + * @param size Size of reserved pool in bytes + * + * @return + * - ESP_OK on success + * - ESP_ERR_NO_MEM when no memory available for pool + */ +esp_err_t esp_spiram_reserve_dma_pool(size_t size); + + +#endif diff --git a/components/esp32s3/intr_alloc.c b/components/esp32s3/intr_alloc.c new file mode 100644 index 0000000000..c61d762608 --- /dev/null +++ b/components/esp32s3/intr_alloc.c @@ -0,0 +1,954 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include "sdkconfig.h" +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "esp_err.h" +#include "esp_log.h" +#include "esp_intr_alloc.h" +#include "esp_attr.h" +#include "soc/soc.h" + +static const char *TAG = "intr_alloc"; + +#define ETS_INTERNAL_TIMER0_INTR_NO 6 +#define ETS_INTERNAL_TIMER1_INTR_NO 15 +#define ETS_INTERNAL_TIMER2_INTR_NO 16 +#define ETS_INTERNAL_SW0_INTR_NO 7 +#define ETS_INTERNAL_SW1_INTR_NO 29 +#define ETS_INTERNAL_PROFILING_INTR_NO 11 + +/* +Define this to debug the choices made when allocating the interrupt. This leads to much debugging +output within a critical region, which can lead to weird effects like e.g. the interrupt watchdog +being triggered, that is why it is separate from the normal LOG* scheme. +*/ +//define DEBUG_INT_ALLOC_DECISIONS +#ifdef DEBUG_INT_ALLOC_DECISIONS +# define ALCHLOG(...) ESP_EARLY_LOGD(TAG, __VA_ARGS__) +#else +# define ALCHLOG(...) do {} while (0) +#endif + + +typedef enum { + INTDESC_NORMAL = 0, + INTDESC_RESVD, + INTDESC_SPECIAL //for xtensa timers / software ints +} int_desc_flag_t; + +typedef enum { + INTTP_LEVEL = 0, + INTTP_EDGE, + INTTP_NA +} int_type_t; + +typedef struct { + int level; + int_type_t type; + int_desc_flag_t cpuflags[2]; +} int_desc_t; + + +//We should mark the interrupt for the timer used by FreeRTOS as reserved. The specific timer +//is selectable using menuconfig; we use these cpp bits to convert that into something we can use in +//the table below. +#if CONFIG_FREERTOS_CORETIMER_0 +#define INT6RES INTDESC_RESVD +#else +#define INT6RES INTDESC_SPECIAL +#endif + +#if CONFIG_FREERTOS_CORETIMER_1 +#define INT15RES INTDESC_RESVD +#else +#define INT15RES INTDESC_SPECIAL +#endif + +//This is basically a software-readable version of the interrupt usage table in include/soc/soc.h +const static int_desc_t int_desc[32] = { + { 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //0 + { 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //1 + { 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //2 + { 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //3 + { 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_NORMAL} }, //4 + { 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //5 + { 1, INTTP_NA, {INT6RES, INT6RES } }, //6 + { 1, INTTP_NA, {INTDESC_SPECIAL, INTDESC_SPECIAL}}, //7 + { 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //8 + { 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //9 + { 1, INTTP_EDGE, {INTDESC_NORMAL, INTDESC_NORMAL} }, //10 + { 3, INTTP_NA, {INTDESC_SPECIAL, INTDESC_SPECIAL}}, //11 + { 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //12 + { 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //13 + { 7, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //14, NMI + { 3, INTTP_NA, {INT15RES, INT15RES } }, //15 + { 5, INTTP_NA, {INTDESC_SPECIAL, INTDESC_SPECIAL} }, //16 + { 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //17 + { 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //18 + { 2, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //19 + { 2, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //20 + { 2, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //21 + { 3, INTTP_EDGE, {INTDESC_RESVD, INTDESC_NORMAL} }, //22 + { 3, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //23 + { 4, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_NORMAL} }, //24 + { 4, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //25 + { 5, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //26 + { 3, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //27 + { 4, INTTP_EDGE, {INTDESC_NORMAL, INTDESC_NORMAL} }, //28 + { 3, INTTP_NA, {INTDESC_SPECIAL, INTDESC_SPECIAL}}, //29 + { 4, INTTP_EDGE, {INTDESC_RESVD, INTDESC_RESVD } }, //30 + { 5, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //31 +}; + +typedef struct shared_vector_desc_t shared_vector_desc_t; +typedef struct vector_desc_t vector_desc_t; + +struct shared_vector_desc_t { + int disabled: 1; + int source: 8; + volatile uint32_t *statusreg; + uint32_t statusmask; + intr_handler_t isr; + void *arg; + shared_vector_desc_t *next; +}; + + +#define VECDESC_FL_RESERVED (1<<0) +#define VECDESC_FL_INIRAM (1<<1) +#define VECDESC_FL_SHARED (1<<2) +#define VECDESC_FL_NONSHARED (1<<3) + +//Pack using bitfields for better memory use +struct vector_desc_t { + int flags: 16; //OR of VECDESC_FLAG_* defines + unsigned int cpu: 1; + unsigned int intno: 5; + int source: 8; //Interrupt mux flags, used when not shared + shared_vector_desc_t *shared_vec_info; //used when VECDESC_FL_SHARED + vector_desc_t *next; +}; + +struct intr_handle_data_t { + vector_desc_t *vector_desc; + shared_vector_desc_t *shared_vector_desc; +}; + +typedef struct non_shared_isr_arg_t non_shared_isr_arg_t; + +struct non_shared_isr_arg_t { + intr_handler_t isr; + void *isr_arg; + int source; +}; + +//Linked list of vector descriptions, sorted by cpu.intno value +static vector_desc_t *vector_desc_head = NULL; + +//This bitmask has an 1 if the int should be disabled when the flash is disabled. +static uint32_t non_iram_int_mask[portNUM_PROCESSORS]; +//This bitmask has 1 in it if the int was disabled using esp_intr_noniram_disable. +static uint32_t non_iram_int_disabled[portNUM_PROCESSORS]; +static bool non_iram_int_disabled_flag[portNUM_PROCESSORS]; + +#if CONFIG_SYSVIEW_ENABLE +extern uint32_t port_switch_flag[]; +#endif + +static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED; + +//Inserts an item into vector_desc list so that the list is sorted +//with an incrementing cpu.intno value. +static void insert_vector_desc(vector_desc_t *to_insert) +{ + vector_desc_t *vd = vector_desc_head; + vector_desc_t *prev = NULL; + while (vd != NULL) { + if (vd->cpu > to_insert->cpu) { + break; + } + if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) { + break; + } + prev = vd; + vd = vd->next; + } + if ((vector_desc_head == NULL) || (prev == NULL)) { + //First item + to_insert->next = vd; + vector_desc_head = to_insert; + } else { + prev->next = to_insert; + to_insert->next = vd; + } +} + +//Returns a vector_desc entry for an intno/cpu, or NULL if none exists. +static vector_desc_t *find_desc_for_int(int intno, int cpu) +{ + vector_desc_t *vd = vector_desc_head; + while (vd != NULL) { + if (vd->cpu == cpu && vd->intno == intno) { + break; + } + vd = vd->next; + } + return vd; +} + +//Returns a vector_desc entry for an intno/cpu. +//Either returns a preexisting one or allocates a new one and inserts +//it into the list. Returns NULL on malloc fail. +static vector_desc_t *get_desc_for_int(int intno, int cpu) +{ + vector_desc_t *vd = find_desc_for_int(intno, cpu); + if (vd == NULL) { + vector_desc_t *newvd = heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + if (newvd == NULL) { + return NULL; + } + memset(newvd, 0, sizeof(vector_desc_t)); + newvd->intno = intno; + newvd->cpu = cpu; + insert_vector_desc(newvd); + return newvd; + } else { + return vd; + } +} + +//Returns a vector_desc entry for an source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs +static vector_desc_t *find_desc_for_source(int source, int cpu) +{ + vector_desc_t *vd = vector_desc_head; + while (vd != NULL) { + if ( !(vd->flags & VECDESC_FL_SHARED) ) { + if ( vd->source == source && cpu == vd->cpu ) { + break; + } + } else if ( vd->cpu == cpu ) { + // check only shared vds for the correct cpu, otherwise skip + bool found = false; + shared_vector_desc_t *svd = vd->shared_vec_info; + assert(svd != NULL ); + while (svd) { + if ( svd->source == source ) { + found = true; + break; + } + svd = svd->next; + } + if ( found ) { + break; + } + } + vd = vd->next; + } + return vd; +} + +esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram) +{ + if (intno > 31) { + return ESP_ERR_INVALID_ARG; + } + if (cpu >= portNUM_PROCESSORS) { + return ESP_ERR_INVALID_ARG; + } + + portENTER_CRITICAL(&spinlock); + vector_desc_t *vd = get_desc_for_int(intno, cpu); + if (vd == NULL) { + portEXIT_CRITICAL(&spinlock); + return ESP_ERR_NO_MEM; + } + vd->flags = VECDESC_FL_SHARED; + if (is_int_ram) { + vd->flags |= VECDESC_FL_INIRAM; + } + portEXIT_CRITICAL(&spinlock); + + return ESP_OK; +} + +esp_err_t esp_intr_reserve(int intno, int cpu) +{ + if (intno > 31) { + return ESP_ERR_INVALID_ARG; + } + if (cpu >= portNUM_PROCESSORS) { + return ESP_ERR_INVALID_ARG; + } + + portENTER_CRITICAL(&spinlock); + vector_desc_t *vd = get_desc_for_int(intno, cpu); + if (vd == NULL) { + portEXIT_CRITICAL(&spinlock); + return ESP_ERR_NO_MEM; + } + vd->flags = VECDESC_FL_RESERVED; + portEXIT_CRITICAL(&spinlock); + + return ESP_OK; +} + +//Interrupt handler table and unhandled uinterrupt routine. Duplicated +//from xtensa_intr.c... it's supposed to be private, but we need to look +//into it in order to see if someone allocated an int using +//xt_set_interrupt_handler. +typedef struct xt_handler_table_entry { + void *handler; + void *arg; +} xt_handler_table_entry; +extern xt_handler_table_entry _xt_interrupt_table[XCHAL_NUM_INTERRUPTS * portNUM_PROCESSORS]; +extern void xt_unhandled_interrupt(void *arg); + +//Returns true if handler for interrupt is not the default unhandled interrupt handler +static bool int_has_handler(int intr, int cpu) +{ + return (_xt_interrupt_table[intr * portNUM_PROCESSORS + cpu].handler != xt_unhandled_interrupt); +} + +static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force) +{ + //Check if interrupt is not reserved by design + int x = vd->intno; + if (int_desc[x].cpuflags[cpu] == INTDESC_RESVD) { + ALCHLOG("....Unusable: reserved"); + return false; + } + if (int_desc[x].cpuflags[cpu] == INTDESC_SPECIAL && force == -1) { + ALCHLOG("....Unusable: special-purpose int"); + return false; + } + //Check if the interrupt level is acceptable + if (!(flags & (1 << int_desc[x].level))) { + ALCHLOG("....Unusable: incompatible level"); + return false; + } + //check if edge/level type matches what we want + if (((flags & ESP_INTR_FLAG_EDGE) && (int_desc[x].type == INTTP_LEVEL)) || + (((!(flags & ESP_INTR_FLAG_EDGE)) && (int_desc[x].type == INTTP_EDGE)))) { + ALCHLOG("....Unusable: incompatible trigger type"); + return false; + } + //check if interrupt is reserved at runtime + if (vd->flags & VECDESC_FL_RESERVED) { + ALCHLOG("....Unusable: reserved at runtime."); + return false; + } + //Ints can't be both shared and non-shared. + assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED))); + //check if interrupt already is in use by a non-shared interrupt + if (vd->flags & VECDESC_FL_NONSHARED) { + ALCHLOG("....Unusable: already in (non-shared) use."); + return false; + } + // check shared interrupt flags + if (vd->flags & VECDESC_FL_SHARED ) { + if (flags & ESP_INTR_FLAG_SHARED) { + bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0); + bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0); + //Bail out if int is shared, but iram property doesn't match what we want. + if ((vd->flags & VECDESC_FL_SHARED) && (desc_in_iram_flag != in_iram_flag)) { + ALCHLOG("....Unusable: shared but iram prop doesn't match"); + return false; + } + } else { + //We need an unshared IRQ; can't use shared ones; bail out if this is shared. + ALCHLOG("...Unusable: int is shared, we need non-shared."); + return false; + } + } else if (int_has_handler(x, cpu)) { + //Check if interrupt already is allocated by xt_set_interrupt_handler + ALCHLOG("....Unusable: already allocated"); + return false; + } + return true; +} + +//Locate a free interrupt compatible with the flags given. +//The 'force' argument can be -1, or 0-31 to force checking a certain interrupt. +//When a CPU is forced, the INTDESC_SPECIAL marked interrupts are also accepted. +static int get_available_int(int flags, int cpu, int force, int source) +{ + int x; + int best = -1; + int bestLevel = 9; + int bestSharedCt = INT_MAX; + //Default vector desc, for vectors not in the linked list + vector_desc_t empty_vect_desc; + memset(&empty_vect_desc, 0, sizeof(vector_desc_t)); + + //Level defaults to any low/med interrupt + if (!(flags & ESP_INTR_FLAG_LEVELMASK)) { + flags |= ESP_INTR_FLAG_LOWMED; + } + + ALCHLOG("get_available_int: try to find existing. Cpu: %d, Source: %d", cpu, source); + vector_desc_t *vd = find_desc_for_source(source, cpu); + if ( vd ) { + // if existing vd found, don't need to search any more. + ALCHLOG("get_avalible_int: existing vd found. intno: %d", vd->intno); + if ( force != -1 && force != vd->intno ) { + ALCHLOG("get_avalible_int: intr forced but not matach existing. existing intno: %d, force: %d", vd->intno, force); + } else if ( !is_vect_desc_usable(vd, flags, cpu, force) ) { + ALCHLOG("get_avalible_int: existing vd invalid."); + } else { + best = vd->intno; + } + return best; + } + if (force != -1) { + ALCHLOG("get_available_int: try to find force. Cpu: %d, Source: %d, Force: %d", cpu, source, force); + //if force assigned, don't need to search any more. + vd = find_desc_for_int(force, cpu); + if (vd == NULL ) { + //if existing vd not found, just check the default state for the intr. + empty_vect_desc.intno = force; + vd = &empty_vect_desc; + } + if ( is_vect_desc_usable(vd, flags, cpu, force) ) { + best = vd->intno; + } else { + ALCHLOG("get_avalible_int: forced vd invalid."); + } + return best; + } + + ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu); + //No allocated handlers as well as forced intr, iterate over the 32 possible interrupts + for (x = 0; x < 32; x++) { + //Grab the vector_desc for this vector. + vd = find_desc_for_int(x, cpu); + if (vd == NULL) { + empty_vect_desc.intno = x; + vd = &empty_vect_desc; + } + + ALCHLOG("Int %d reserved %d level %d %s hasIsr %d", + x, int_desc[x].cpuflags[cpu] == INTDESC_RESVD, int_desc[x].level, + int_desc[x].type == INTTP_LEVEL ? "LEVEL" : "EDGE", int_has_handler(x, cpu)); + if ( !is_vect_desc_usable(vd, flags, cpu, force) ) { + continue; + } + + if (flags & ESP_INTR_FLAG_SHARED) { + //We're allocating a shared int. + //See if int already is used as a shared interrupt. + if (vd->flags & VECDESC_FL_SHARED) { + //We can use this already-marked-as-shared interrupt. Count the already attached isrs in order to see + //how useful it is. + int no = 0; + shared_vector_desc_t *svdesc = vd->shared_vec_info; + while (svdesc != NULL) { + no++; + svdesc = svdesc->next; + } + if (no < bestSharedCt || bestLevel > int_desc[x].level) { + //Seems like this shared vector is both okay and has the least amount of ISRs already attached to it. + best = x; + bestSharedCt = no; + bestLevel = int_desc[x].level; + ALCHLOG("...int %d more usable as a shared int: has %d existing vectors", x, no); + } else { + ALCHLOG("...worse than int %d", best); + } + } else { + if (best == -1) { + //We haven't found a feasible shared interrupt yet. This one is still free and usable, even if + //not marked as shared. + //Remember it in case we don't find any other shared interrupt that qualifies. + if (bestLevel > int_desc[x].level) { + best = x; + bestLevel = int_desc[x].level; + ALCHLOG("...int %d usable as a new shared int", x); + } + } else { + ALCHLOG("...already have a shared int"); + } + } + } else { + //Seems this interrupt is feasible. Select it and break out of the loop; no need to search further. + if (bestLevel > int_desc[x].level) { + best = x; + bestLevel = int_desc[x].level; + } else { + ALCHLOG("...worse than int %d", best); + } + } + } + ALCHLOG("get_available_int: using int %d", best); + + //Okay, by now we have looked at all potential interrupts and hopefully have selected the best one in best. + return best; +} + +//Common shared isr handler. Chain-call all ISRs. +static void IRAM_ATTR shared_intr_isr(void *arg) +{ + vector_desc_t *vd = (vector_desc_t *)arg; + shared_vector_desc_t *sh_vec = vd->shared_vec_info; + portENTER_CRITICAL(&spinlock); + while (sh_vec) { + if (!sh_vec->disabled) { + if ((sh_vec->statusreg == NULL) || (*sh_vec->statusreg & sh_vec->statusmask)) { +#if CONFIG_SYSVIEW_ENABLE + traceISR_ENTER(sh_vec->source + ETS_INTERNAL_INTR_SOURCE_OFF); +#endif + sh_vec->isr(sh_vec->arg); +#if CONFIG_SYSVIEW_ENABLE + // check if we will return to scheduler or to interrupted task after ISR + if (!port_switch_flag[xPortGetCoreID()]) { + traceISR_EXIT(); + } +#endif + } + } + sh_vec = sh_vec->next; + } + portEXIT_CRITICAL(&spinlock); +} + +#if CONFIG_SYSVIEW_ENABLE +//Common non-shared isr handler wrapper. +static void IRAM_ATTR non_shared_intr_isr(void *arg) +{ + non_shared_isr_arg_t *ns_isr_arg = (non_shared_isr_arg_t *)arg; + portENTER_CRITICAL(&spinlock); + traceISR_ENTER(ns_isr_arg->source + ETS_INTERNAL_INTR_SOURCE_OFF); + // FIXME: can we call ISR and check port_switch_flag after releasing spinlock? + // when CONFIG_SYSVIEW_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock + ns_isr_arg->isr(ns_isr_arg->isr_arg); + // check if we will return to scheduler or to interrupted task after ISR + if (!port_switch_flag[xPortGetCoreID()]) { + traceISR_EXIT(); + } + portEXIT_CRITICAL(&spinlock); +} +#endif + +//We use ESP_EARLY_LOG* here because this can be called before the scheduler is running. +esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusreg, uint32_t intrstatusmask, intr_handler_t handler, + void *arg, intr_handle_t *ret_handle) +{ + intr_handle_data_t *ret = NULL; + int force = -1; + ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %d): checking args", xPortGetCoreID()); + //Shared interrupts should be level-triggered. + if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) { + return ESP_ERR_INVALID_ARG; + } + //You can't set an handler / arg for a non-C-callable interrupt. + if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) { + return ESP_ERR_INVALID_ARG; + } + //Shared ints should have handler and non-processor-local source + if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source < 0)) { + return ESP_ERR_INVALID_ARG; + } + //Statusreg should have a mask + if (intrstatusreg && !intrstatusmask) { + return ESP_ERR_INVALID_ARG; + } + //If the ISR is marked to be IRAM-resident, the handler must not be in the cached region + if ((flags & ESP_INTR_FLAG_IRAM) && + (ptrdiff_t) handler >= SOC_RTC_IRAM_HIGH && + (ptrdiff_t) handler < SOC_RTC_DATA_LOW ) { + return ESP_ERR_INVALID_ARG; + } + + //Default to prio 1 for shared interrupts. Default to prio 1, 2 or 3 for non-shared interrupts. + if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) { + if (flags & ESP_INTR_FLAG_SHARED) { + flags |= ESP_INTR_FLAG_LEVEL1; + } else { + flags |= ESP_INTR_FLAG_LOWMED; + } + } + ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %d): Args okay. Resulting flags 0x%X", xPortGetCoreID(), flags); + + //Check 'special' interrupt sources. These are tied to one specific interrupt, so we + //have to force get_free_int to only look at that. + if (source == ETS_INTERNAL_TIMER0_INTR_SOURCE) { + force = ETS_INTERNAL_TIMER0_INTR_NO; + } + if (source == ETS_INTERNAL_TIMER1_INTR_SOURCE) { + force = ETS_INTERNAL_TIMER1_INTR_NO; + } + if (source == ETS_INTERNAL_TIMER2_INTR_SOURCE) { + force = ETS_INTERNAL_TIMER2_INTR_NO; + } + if (source == ETS_INTERNAL_SW0_INTR_SOURCE) { + force = ETS_INTERNAL_SW0_INTR_NO; + } + if (source == ETS_INTERNAL_SW1_INTR_SOURCE) { + force = ETS_INTERNAL_SW1_INTR_NO; + } + if (source == ETS_INTERNAL_PROFILING_INTR_SOURCE) { + force = ETS_INTERNAL_PROFILING_INTR_NO; + } + + //Allocate a return handle. If we end up not needing it, we'll free it later on. + ret = heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + if (ret == NULL) { + return ESP_ERR_NO_MEM; + } + + portENTER_CRITICAL(&spinlock); + int cpu = xPortGetCoreID(); + //See if we can find an interrupt that matches the flags. + int intr = get_available_int(flags, cpu, force, source); + if (intr == -1) { + //None found. Bail out. + portEXIT_CRITICAL(&spinlock); + free(ret); + return ESP_ERR_NOT_FOUND; + } + //Get an int vector desc for int. + vector_desc_t *vd = get_desc_for_int(intr, cpu); + if (vd == NULL) { + portEXIT_CRITICAL(&spinlock); + free(ret); + return ESP_ERR_NO_MEM; + } + + //Allocate that int! + if (flags & ESP_INTR_FLAG_SHARED) { + //Populate vector entry and add to linked list. + shared_vector_desc_t *sh_vec = malloc(sizeof(shared_vector_desc_t)); + if (sh_vec == NULL) { + portEXIT_CRITICAL(&spinlock); + free(ret); + return ESP_ERR_NO_MEM; + } + memset(sh_vec, 0, sizeof(shared_vector_desc_t)); + sh_vec->statusreg = (uint32_t *)intrstatusreg; + sh_vec->statusmask = intrstatusmask; + sh_vec->isr = handler; + sh_vec->arg = arg; + sh_vec->next = vd->shared_vec_info; + sh_vec->source = source; + sh_vec->disabled = 0; + vd->shared_vec_info = sh_vec; + vd->flags |= VECDESC_FL_SHARED; + //(Re-)set shared isr handler to new value. + xt_set_interrupt_handler(intr, shared_intr_isr, vd); + } else { + //Mark as unusable for other interrupt sources. This is ours now! + vd->flags = VECDESC_FL_NONSHARED; + if (handler) { +#if CONFIG_SYSVIEW_ENABLE + non_shared_isr_arg_t *ns_isr_arg = malloc(sizeof(non_shared_isr_arg_t)); + if (!ns_isr_arg) { + portEXIT_CRITICAL(&spinlock); + free(ret); + return ESP_ERR_NO_MEM; + } + ns_isr_arg->isr = handler; + ns_isr_arg->isr_arg = arg; + ns_isr_arg->source = source; + xt_set_interrupt_handler(intr, non_shared_intr_isr, ns_isr_arg); +#else + xt_set_interrupt_handler(intr, handler, arg); +#endif + } + if (flags & ESP_INTR_FLAG_EDGE) { + xthal_set_intclear(1 << intr); + } + vd->source = source; + } + if (flags & ESP_INTR_FLAG_IRAM) { + vd->flags |= VECDESC_FL_INIRAM; + non_iram_int_mask[cpu] &= ~(1 << intr); + } else { + vd->flags &= ~VECDESC_FL_INIRAM; + non_iram_int_mask[cpu] |= (1 << intr); + } + if (source >= 0) { + intr_matrix_set(cpu, source, intr); + } + + //Fill return handle data. + ret->vector_desc = vd; + ret->shared_vector_desc = vd->shared_vec_info; + + //Enable int at CPU-level; + ESP_INTR_ENABLE(intr); + + //If interrupt has to be started disabled, do that now; ints won't be enabled for real until the end + //of the critical section. + if (flags & ESP_INTR_FLAG_INTRDISABLED) { + esp_intr_disable(ret); + } + + portEXIT_CRITICAL(&spinlock); + + //Fill return handle if needed, otherwise free handle. + if (ret_handle != NULL) { + *ret_handle = ret; + } else { + free(ret); + } + + ESP_EARLY_LOGD(TAG, "Connected src %d to int %d (cpu %d)", source, intr, cpu); + return ESP_OK; +} + +esp_err_t esp_intr_alloc(int source, int flags, intr_handler_t handler, void *arg, intr_handle_t *ret_handle) +{ + /* + As an optimization, we can create a table with the possible interrupt status registers and masks for every single + source there is. We can then add code here to look up an applicable value and pass that to the + esp_intr_alloc_intrstatus function. + */ + return esp_intr_alloc_intrstatus(source, flags, 0, 0, handler, arg, ret_handle); +} + +esp_err_t IRAM_ATTR esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram) +{ + if (!handle) { + return ESP_ERR_INVALID_ARG; + } + vector_desc_t *vd = handle->vector_desc; + if (vd->flags & VECDESC_FL_SHARED) { + return ESP_ERR_INVALID_ARG; + } + portENTER_CRITICAL(&spinlock); + uint32_t mask = (1 << vd->intno); + if (is_in_iram) { + vd->flags |= VECDESC_FL_INIRAM; + non_iram_int_mask[vd->cpu] &= ~mask; + } else { + vd->flags &= ~VECDESC_FL_INIRAM; + non_iram_int_mask[vd->cpu] |= mask; + } + portEXIT_CRITICAL(&spinlock); + return ESP_OK; +} + +esp_err_t esp_intr_free(intr_handle_t handle) +{ + bool free_shared_vector = false; + if (!handle) { + return ESP_ERR_INVALID_ARG; + } + //This routine should be called from the interrupt the task is scheduled on. + if (handle->vector_desc->cpu != xPortGetCoreID()) { + return ESP_ERR_INVALID_ARG; + } + + portENTER_CRITICAL(&spinlock); + esp_intr_disable(handle); + if (handle->vector_desc->flags & VECDESC_FL_SHARED) { + //Find and kill the shared int + shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info; + shared_vector_desc_t *prevsvd = NULL; + assert(svd); //should be something in there for a shared int + while (svd != NULL) { + if (svd == handle->shared_vector_desc) { + //Found it. Now kill it. + if (prevsvd) { + prevsvd->next = svd->next; + } else { + handle->vector_desc->shared_vec_info = svd->next; + } + free(svd); + break; + } + prevsvd = svd; + svd = svd->next; + } + //If nothing left, disable interrupt. + if (handle->vector_desc->shared_vec_info == NULL) { + free_shared_vector = true; + } + ESP_LOGV(TAG, "esp_intr_free: Deleting shared int: %s. Shared int is %s", svd ? "not found or last one" : "deleted", free_shared_vector ? "empty now." : "still in use"); + } + + if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) { + ESP_LOGV(TAG, "esp_intr_free: Disabling int, killing handler"); +#if CONFIG_SYSVIEW_ENABLE + if (!free_shared_vector) { + void *isr_arg = xt_get_interrupt_handler_arg(handle->vector_desc->intno); + if (isr_arg) { + free(isr_arg); + } + } +#endif + //Reset to normal handler + xt_set_interrupt_handler(handle->vector_desc->intno, xt_unhandled_interrupt, (void *)((int)handle->vector_desc->intno)); + //Theoretically, we could free the vector_desc... not sure if that's worth the few bytes of memory + //we save.(We can also not use the same exit path for empty shared ints anymore if we delete + //the desc.) For now, just mark it as free. + handle->vector_desc->flags &= !(VECDESC_FL_NONSHARED | VECDESC_FL_RESERVED); + //Also kill non_iram mask bit. + non_iram_int_mask[handle->vector_desc->cpu] &= ~(1 << (handle->vector_desc->intno)); + } + portEXIT_CRITICAL(&spinlock); + free(handle); + return ESP_OK; +} + +int esp_intr_get_intno(intr_handle_t handle) +{ + return handle->vector_desc->intno; +} + +int esp_intr_get_cpu(intr_handle_t handle) +{ + return handle->vector_desc->cpu; +} + +/* + Interrupt disabling strategy: + If the source is >=0 (meaning a muxed interrupt), we disable it by muxing the interrupt to a non-connected + interrupt. If the source is <0 (meaning an internal, per-cpu interrupt), we disable it using ESP_INTR_DISABLE. + This allows us to, for the muxed CPUs, disable an int from the other core. It also allows disabling shared + interrupts. + */ + +//Muxing an interrupt source to interrupt 6, 7, 11, 15, 16 or 29 cause the interrupt to effectively be disabled. +#define INT_MUX_DISABLED_INTNO 6 + +esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle) +{ + if (!handle) { + return ESP_ERR_INVALID_ARG; + } + portENTER_CRITICAL_SAFE(&spinlock); + int source; + if (handle->shared_vector_desc) { + handle->shared_vector_desc->disabled = 0; + source = handle->shared_vector_desc->source; + } else { + source = handle->vector_desc->source; + } + if (source >= 0) { + //Disabled using int matrix; re-connect to enable + intr_matrix_set(handle->vector_desc->cpu, source, handle->vector_desc->intno); + } else { + //Re-enable using cpu int ena reg + if (handle->vector_desc->cpu != xPortGetCoreID()) { + return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu + } + ESP_INTR_ENABLE(handle->vector_desc->intno); + } + portEXIT_CRITICAL_SAFE(&spinlock); + return ESP_OK; +} + +esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle) +{ + if (!handle) { + return ESP_ERR_INVALID_ARG; + } + portENTER_CRITICAL_SAFE(&spinlock); + int source; + bool disabled = 1; + if (handle->shared_vector_desc) { + handle->shared_vector_desc->disabled = 1; + source = handle->shared_vector_desc->source; + + shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info; + assert( svd != NULL ); + while ( svd ) { + if ( svd->source == source && svd->disabled == 0 ) { + disabled = 0; + break; + } + svd = svd->next; + } + } else { + source = handle->vector_desc->source; + } + + if (source >= 0) { + if ( disabled ) { + //Disable using int matrix + intr_matrix_set(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO); + } + } else { + //Disable using per-cpu regs + if (handle->vector_desc->cpu != xPortGetCoreID()) { + portEXIT_CRITICAL_SAFE(&spinlock); + return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu + } + ESP_INTR_DISABLE(handle->vector_desc->intno); + } + portEXIT_CRITICAL_SAFE(&spinlock); + return ESP_OK; +} + + +void IRAM_ATTR esp_intr_noniram_disable(void) +{ + int oldint; + int cpu = xPortGetCoreID(); + int intmask = ~non_iram_int_mask[cpu]; + if (non_iram_int_disabled_flag[cpu]) { + abort(); + } + non_iram_int_disabled_flag[cpu] = true; + asm volatile ( + "movi %0,0\n" + "xsr %0,INTENABLE\n" //disable all ints first + "rsync\n" + "and a3,%0,%1\n" //mask ints that need disabling + "wsr a3,INTENABLE\n" //write back + "rsync\n" + :"=&r"(oldint):"r"(intmask):"a3"); + //Save which ints we did disable + non_iram_int_disabled[cpu] = oldint & non_iram_int_mask[cpu]; +} + +void IRAM_ATTR esp_intr_noniram_enable(void) +{ + int cpu = xPortGetCoreID(); + int intmask = non_iram_int_disabled[cpu]; + if (!non_iram_int_disabled_flag[cpu]) { + abort(); + } + non_iram_int_disabled_flag[cpu] = false; + asm volatile ( + "movi a3,0\n" + "xsr a3,INTENABLE\n" + "rsync\n" + "or a3,a3,%0\n" + "wsr a3,INTENABLE\n" + "rsync\n" + ::"r"(intmask):"a3"); +} + +//These functions are provided in ROM, but the ROM-based functions use non-multicore-capable +//virtualized interrupt levels. Thus, we disable them in the ld file and provide working +//equivalents here. + + +void IRAM_ATTR ets_isr_unmask(unsigned int mask) +{ + xt_ints_on(mask); +} + +void IRAM_ATTR ets_isr_mask(unsigned int mask) +{ + xt_ints_off(mask); +} diff --git a/components/esp32s3/ld/esp32s3.ld b/components/esp32s3/ld/esp32s3.ld new file mode 100644 index 0000000000..ab069788ca --- /dev/null +++ b/components/esp32s3/ld/esp32s3.ld @@ -0,0 +1,111 @@ +/** + * ESP32-S3 Linker Script Memory Layout + * This file describes the memory layout (memory blocks) by virtual memory addresses. + * This linker script is passed through the C preprocessor to include configuration options. + * Please use preprocessor features sparingly! + * Restrict to simple macros with numeric values, and/or #if/#endif blocks. + */ + +#include "sdkconfig.h" + +#define SRAM_IRAM_START 0x40370000 +#define SRAM_DRAM_START 0x3FC80000 +#define I_D_SRAM_OFFSET (SRAM_IRAM_START - SRAM_DRAM_START) +#define SRAM_DRAM_END 0x40054000 - I_D_SRAM_OFFSET /* 2nd stage bootloader iram_loader_seg start address */ + +#define SRAM_IRAM_ORG (SRAM_IRAM_START + CONFIG_ESP32S3_INSTRUCTION_CACHE_SIZE) +#define SRAM_DRAM_ORG (SRAM_DRAM_START + CONFIG_ESP32S3_INSTRUCTION_CACHE_SIZE) + +#define I_D_SRAM_SIZE SRAM_DRAM_END - SRAM_DRAM_ORG + +#if CONFIG_ESP32S3_USE_FIXED_STATIC_RAM_SIZE +ASSERT((CONFIG_ESP32S3_FIXED_STATIC_RAM_SIZE <= I_D_SRAM_SIZE), "Fixed static ram data does not fit.") +#define DRAM0_0_SEG_LEN CONFIG_ESP32S3_FIXED_STATIC_RAM_SIZE +#else +#define DRAM0_0_SEG_LEN I_D_SRAM_SIZE +#endif // CONFIG_ESP32S3_USE_FIXED_STATIC_RAM_SIZE + +MEMORY +{ + /** + * All these values assume the flash cache is on, and have the blocks this uses subtracted from the length + * of the various regions. The 'data access port' dram/drom regions map to the same iram/irom regions but + * are connected to the data port of the CPU and eg allow byte-wise access. + */ + + /* IRAM for PRO CPU. */ + iram0_0_seg (RX) : org = SRAM_IRAM_ORG, len = I_D_SRAM_SIZE + +#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS + /* Flash mapped instruction data */ + iram0_2_seg (RX) : org = 0x42000020, len = 0x8000000-0x20 + + /** + * (0x20 offset above is a convenience for the app binary image generation. + * Flash cache has 64KB pages. The .bin file which is flashed to the chip + * has a 0x18 byte file header, and each segment has a 0x08 byte segment + * header. Setting this offset makes it simple to meet the flash cache MMU's + * constraint that (paddr % 64KB == vaddr % 64KB).) + */ +#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS + + /** + * Shared data RAM, excluding memory reserved for ROM bss/data/stack. + * Enabling Bluetooth & Trace Memory features in menuconfig will decrease the amount of RAM available. + */ + dram0_0_seg (RW) : org = SRAM_DRAM_ORG, len = DRAM0_0_SEG_LEN + +#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS + /* Flash mapped constant data */ + drom0_0_seg (R) : org = 0x3C000020, len = 0x8000000-0x20 + + /* (See iram0_2_seg for meaning of 0x20 offset in the above.) */ +#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS + + /** + * RTC fast memory (executable). Persists over deep sleep. + */ + rtc_iram_seg(RWX) : org = 0x600fe000, len = 0x2000 + + /** + * RTC fast memory (same block as above), viewed from data bus + */ + rtc_data_seg(RW) : org = 0x3ff80000, len = 0x2000 + + /** + * RTC slow memory (data accessible). Persists over deep sleep. + * Start of RTC slow memory is reserved for ULP co-processor code + data, if enabled. + */ + rtc_slow_seg(RW) : org = 0x50000000 + CONFIG_ESP32S3_ULP_COPROC_RESERVE_MEM, + len = 0x1000 - CONFIG_ESP32S3_ULP_COPROC_RESERVE_MEM +} + +#if CONFIG_ESP32S3_USE_FIXED_STATIC_RAM_SIZE +/* static data ends at defined address */ +_static_data_end = 0x3FCA0000 + DRAM0_0_SEG_LEN; +#else +_static_data_end = _bss_end; +#endif // CONFIG_ESP32S3_USE_FIXED_STATIC_RAM_SIZE + +/* Heap ends at top of dram0_0_seg */ +_heap_end = 0x40000000; + +_data_seg_org = ORIGIN(rtc_data_seg); + +#if CONFIG_ESP32S3_RTCDATA_IN_FAST_MEM +REGION_ALIAS("rtc_data_location", rtc_slow_seg ); +#else +REGION_ALIAS("rtc_data_location", rtc_data_seg ); +#endif // CONFIG_ESP32S3_RTCDATA_IN_FAST_MEM + +#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS +REGION_ALIAS("default_code_seg", iram0_2_seg); +#else +REGION_ALIAS("default_code_seg", iram0_0_seg); +#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS + +#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS +REGION_ALIAS("default_rodata_seg", drom0_0_seg); +#else +REGION_ALIAS("default_rodata_seg", dram0_0_seg); +#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS diff --git a/components/esp32s3/ld/esp32s3.peripherals.ld b/components/esp32s3/ld/esp32s3.peripherals.ld new file mode 100644 index 0000000000..ac13dfd067 --- /dev/null +++ b/components/esp32s3/ld/esp32s3.peripherals.ld @@ -0,0 +1,31 @@ +PROVIDE ( UART0 = 0x60000000 ); +PROVIDE ( SPIMEM1 = 0x60002000 ); +PROVIDE ( SPIMEM0 = 0x60003000 ); +PROVIDE ( GPIO = 0x60004000 ); +PROVIDE ( SIGMADELTA = 0x60004f00 ); +PROVIDE ( RTCCNTL = 0x60008000 ); +PROVIDE ( RTCIO = 0x60008400 ); +PROVIDE ( SENS = 0x60008800 ); +PROVIDE ( HINF = 0x6000B000 ); +PROVIDE ( I2S0 = 0x6000F000 ); +PROVIDE ( UART1 = 0x60010000 ); +PROVIDE ( I2C0 = 0x60013000 ); +PROVIDE ( UHCI0 = 0x60014000 ); +PROVIDE ( UHCI1 = 0x60014000 ); +PROVIDE ( HOST = 0x60015000 ); +PROVIDE ( RMT = 0x60016000 ); +PROVIDE ( RMTMEM = 0x60016800 ); +PROVIDE ( PCNT = 0x60017000 ); +PROVIDE ( SLC = 0x60018000 ); +PROVIDE ( LEDC = 0x60019000 ); +PROVIDE ( MCP = 0x600c3000 ); +PROVIDE ( TIMERG0 = 0x6001F000 ); +PROVIDE ( TIMERG1 = 0x60020000 ); +PROVIDE ( GPSPI2 = 0x60024000 ); +PROVIDE ( GPSPI3 = 0x60025000 ); +PROVIDE ( SYSCON = 0x60026000 ); +PROVIDE ( I2C1 = 0x60027000 ); +PROVIDE ( GPSPI4 = 0x60037000 ); +PROVIDE ( UART2 = 0x60010000 ); +PROVIDE ( APB_SARADC = 0x60040000 ); +PROVIDE ( LCD_CAM = 0x60041000 ); diff --git a/components/esp32s3/ld/esp32s3.project.ld.in b/components/esp32s3/ld/esp32s3.project.ld.in new file mode 100644 index 0000000000..b85c127ea0 --- /dev/null +++ b/components/esp32s3/ld/esp32s3.project.ld.in @@ -0,0 +1,420 @@ +/* Default entry point */ +ENTRY(call_start_cpu0); + +SECTIONS +{ + /** + * RTC fast memory holds RTC wake stub code, + * including from any source file named rtc_wake_stub*.c + */ + .rtc.text : + { + . = ALIGN(4); + + mapping[rtc_text] + + *rtc_wake_stub*.*(.literal .text .literal.* .text.*) + _rtc_text_end = ABSOLUTE(.); + } > rtc_iram_seg + + /** + * This section is required to skip rtc.text area because rtc_iram_seg and + * rtc_data_seg are reflect the same address space on different buses. + */ + .rtc.dummy : + { + _rtc_dummy_start = ABSOLUTE(.); + _rtc_fast_start = ABSOLUTE(.); + . = SIZEOF(.rtc.text); + _rtc_dummy_end = ABSOLUTE(.); + } > rtc_data_seg + + /** + * This section located in RTC FAST Memory area. + * It holds data marked with RTC_FAST_ATTR attribute. + * See the file "esp_attr.h" for more information. + */ + .rtc.force_fast : + { + . = ALIGN(4); + _rtc_force_fast_start = ABSOLUTE(.); + + _coredump_rtc_fast_start = ABSOLUTE(.); + mapping[rtc_fast_coredump] + _coredump_rtc_fast_end = ABSOLUTE(.); + + *(.rtc.force_fast .rtc.force_fast.*) + . = ALIGN(4) ; + _rtc_force_fast_end = ABSOLUTE(.); + } > rtc_data_seg + + /** + * RTC data section holds RTC wake stub + * data/rodata, including from any source file + * named rtc_wake_stub*.c and the data marked with + * RTC_DATA_ATTR, RTC_RODATA_ATTR attributes. + * The memory location of the data is dependent on + * CONFIG_ESP32S3_RTCDATA_IN_FAST_MEM option. + */ + .rtc.data : + { + _rtc_data_start = ABSOLUTE(.); + + /* coredump mapping */ + _coredump_rtc_start = ABSOLUTE(.); + mapping[rtc_coredump] + _coredump_rtc_end = ABSOLUTE(.); + + /* should be placed after coredump mapping */ + mapping[rtc_data] + + *rtc_wake_stub*.*(.data .rodata .data.* .rodata.* .bss .bss.*) + _rtc_data_end = ABSOLUTE(.); + } > rtc_data_location + + /* RTC bss, from any source file named rtc_wake_stub*.c */ + .rtc.bss (NOLOAD) : + { + _rtc_bss_start = ABSOLUTE(.); + *rtc_wake_stub*.*(.bss .bss.*) + *rtc_wake_stub*.*(COMMON) + + mapping[rtc_bss] + + _rtc_bss_end = ABSOLUTE(.); + } > rtc_data_location + + /** + * This section holds data that should not be initialized at power up + * and will be retained during deep sleep. + * User data marked with RTC_NOINIT_ATTR will be placed + * into this section. See the file "esp_attr.h" for more information. + * The memory location of the data is dependent on CONFIG_ESP32S3_RTCDATA_IN_FAST_MEM option. + */ + .rtc_noinit (NOLOAD): + { + . = ALIGN(4); + _rtc_noinit_start = ABSOLUTE(.); + *(.rtc_noinit .rtc_noinit.*) + . = ALIGN(4) ; + _rtc_noinit_end = ABSOLUTE(.); + } > rtc_data_location + + /** + * This section located in RTC SLOW Memory area. + * It holds data marked with RTC_SLOW_ATTR attribute. + * See the file "esp_attr.h" for more information. + */ + .rtc.force_slow : + { + . = ALIGN(4); + _rtc_force_slow_start = ABSOLUTE(.); + *(.rtc.force_slow .rtc.force_slow.*) + . = ALIGN(4) ; + _rtc_force_slow_end = ABSOLUTE(.); + } > rtc_slow_seg + + /* Get size of rtc slow data based on rtc_data_location alias */ + _rtc_slow_length = (ORIGIN(rtc_slow_seg) == ORIGIN(rtc_data_location)) + ? (_rtc_force_slow_end - _rtc_data_start) + : (_rtc_force_slow_end - _rtc_force_slow_start); + + _rtc_fast_length = (ORIGIN(rtc_slow_seg) == ORIGIN(rtc_data_location)) + ? (_rtc_force_fast_end - _rtc_fast_start) + : (_rtc_noinit_end - _rtc_fast_start); + + ASSERT((_rtc_slow_length <= LENGTH(rtc_slow_seg)), + "RTC_SLOW segment data does not fit.") + + ASSERT((_rtc_fast_length <= LENGTH(rtc_data_seg)), + "RTC_FAST segment data does not fit.") + + /* Send .iram0 code to iram */ + .iram0.vectors : + { + _iram_start = ABSOLUTE(.); + /* Vectors go to IRAM */ + _init_start = ABSOLUTE(.); + . = 0x0; + KEEP(*(.WindowVectors.text)); + . = 0x180; + KEEP(*(.Level2InterruptVector.text)); + . = 0x1c0; + KEEP(*(.Level3InterruptVector.text)); + . = 0x200; + KEEP(*(.Level4InterruptVector.text)); + . = 0x240; + KEEP(*(.Level5InterruptVector.text)); + . = 0x280; + KEEP(*(.DebugExceptionVector.text)); + . = 0x2c0; + KEEP(*(.NMIExceptionVector.text)); + . = 0x300; + KEEP(*(.KernelExceptionVector.text)); + . = 0x340; + KEEP(*(.UserExceptionVector.text)); + . = 0x3C0; + KEEP(*(.DoubleExceptionVector.text)); + . = 0x400; + _invalid_pc_placeholder = ABSOLUTE(.); + *(.*Vector.literal) + + *(.UserEnter.literal); + *(.UserEnter.text); + . = ALIGN (16); + *(.entry.text) + *(.init.literal) + *(.init) + _init_end = ABSOLUTE(.); + } > iram0_0_seg + + .iram0.text : + { + /* Code marked as running out of IRAM */ + _iram_text_start = ABSOLUTE(.); + + mapping[iram0_text] + + } > iram0_0_seg + + /** + * This section is required to skip .iram0.text area because iram0_0_seg and + * dram0_0_seg reflect the same address space on different buses. + */ + .dram0.dummy (NOLOAD): + { + . = ORIGIN(dram0_0_seg) + _iram_end - _iram_start; + } > dram0_0_seg + + .dram0.data : + { + _data_start = ABSOLUTE(.); + _bt_data_start = ABSOLUTE(.); + *libbt.a:(.data .data.*) + . = ALIGN (4); + _bt_data_end = ABSOLUTE(.); + _btdm_data_start = ABSOLUTE(.); + *libbtdm_app.a:(.data .data.*) + . = ALIGN (4); + _btdm_data_end = ABSOLUTE(.); + _nimble_data_start = ABSOLUTE(.); + *libnimble.a:(.data .data.*) + . = ALIGN (4); + _nimble_data_end = ABSOLUTE(.); + *(.gnu.linkonce.d.*) + *(.data1) + *(.sdata) + *(.sdata.*) + *(.gnu.linkonce.s.*) + *(.sdata2) + *(.sdata2.*) + *(.gnu.linkonce.s2.*) + *(.jcr) + + /* coredump mapping */ + _coredump_dram_start = ABSOLUTE(.); + mapping[dram_coredump] + _coredump_dram_end = ABSOLUTE(.); + + /* should be placed after coredump mapping */ + _esp_system_init_fn_array_start = ABSOLUTE(.); + KEEP (*(SORT(.esp_system_init_fn) SORT(.esp_system_init_fn.*))) + _esp_system_init_fn_array_end = ABSOLUTE(.); + + mapping[dram0_data] + + _data_end = ABSOLUTE(.); + . = ALIGN(4); + } > dram0_0_seg + + /** + * This section holds data that should not be initialized at power up. + * The section located in Internal SRAM memory region. The macro _NOINIT + * can be used as attribute to place data into this section. + * See the "esp_attr.h" file for more information. + */ + .noinit (NOLOAD): + { + . = ALIGN(4); + _noinit_start = ABSOLUTE(.); + *(.noinit .noinit.*) + . = ALIGN(4) ; + _noinit_end = ABSOLUTE(.); + } > dram0_0_seg + + /* Shared RAM */ + .dram0.bss (NOLOAD) : + { + . = ALIGN (8); + _bss_start = ABSOLUTE(.); + *(.ext_ram.bss*) + _bt_bss_start = ABSOLUTE(.); + *libbt.a:(.bss .bss.* COMMON) + . = ALIGN (4); + _bt_bss_end = ABSOLUTE(.); + _btdm_bss_start = ABSOLUTE(.); + *libbtdm_app.a:(.bss .bss.* COMMON) + . = ALIGN (4); + _btdm_bss_end = ABSOLUTE(.); + _nimble_bss_start = ABSOLUTE(.); + *libnimble.a:(.bss .bss.* COMMON) + . = ALIGN (4); + _nimble_bss_end = ABSOLUTE(.); + + mapping[dram0_bss] + + *(.dynsbss) + *(.sbss) + *(.sbss.*) + *(.gnu.linkonce.sb.*) + *(.scommon) + *(.sbss2) + *(.sbss2.*) + *(.gnu.linkonce.sb2.*) + *(.dynbss) + *(.share.mem) + *(.gnu.linkonce.b.*) + + . = ALIGN (8); + _bss_end = ABSOLUTE(.); + } > dram0_0_seg + + ASSERT(((_bss_end - ORIGIN(dram0_0_seg)) <= LENGTH(dram0_0_seg)), "DRAM segment data does not fit.") + + .flash.text : + { + _stext = .; + _text_start = ABSOLUTE(.); + + mapping[flash_text] + + *(.stub .gnu.warning .gnu.linkonce.literal.* .gnu.linkonce.t.*.literal .gnu.linkonce.t.*) + *(.irom0.text) /* catch stray ICACHE_RODATA_ATTR */ + *(.fini.literal) + *(.fini) + *(.gnu.version) + _text_end = ABSOLUTE(.); + _etext = .; + + /** + * Similar to _iram_start, this symbol goes here so it is + * resolved by addr2line in preference to the first symbol in + * the flash.text segment. + */ + _flash_cache_start = ABSOLUTE(0); + } > default_code_seg + + .flash_rodata_dummy (NOLOAD): + { + . = SIZEOF(.flash.text); + . = ALIGN(0x10000) + 0x20; + } > drom0_0_seg + + /* When modifying the alignment, don't forget to update tls_section_alignment in pxPortInitialiseStack */ + .flash.rodata : ALIGN(0x10) + { + _rodata_start = ABSOLUTE(.); + + *(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */ + *(.rodata_custom_desc .rodata_custom_desc.*) /* Should be the second. Custom app version info. DO NOT PUT ANYTHING BEFORE IT! */ + + mapping[flash_rodata] + + *(.irom1.text) /* catch stray ICACHE_RODATA_ATTR */ + *(.gnu.linkonce.r.*) + *(.rodata1) + __XT_EXCEPTION_TABLE_ = ABSOLUTE(.); + *(.xt_except_table) + *(.gcc_except_table .gcc_except_table.*) + *(.gnu.linkonce.e.*) + *(.gnu.version_r) + . = (. + 3) & ~ 3; + __eh_frame = ABSOLUTE(.); + KEEP(*(.eh_frame)) + . = (. + 7) & ~ 3; + /* C++ constructor and destructor tables */ + /* Don't include anything from crtbegin.o or crtend.o, as IDF doesn't use toolchain crt */ + __init_array_start = ABSOLUTE(.); + KEEP (*(EXCLUDE_FILE (*crtend.* *crtbegin.*) .ctors .ctors.*)) + __init_array_end = ABSOLUTE(.); + KEEP (*crtbegin.*(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.*) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + /* C++ exception handlers table: */ + __XT_EXCEPTION_DESCS_ = ABSOLUTE(.); + *(.xt_except_desc) + *(.gnu.linkonce.h.*) + __XT_EXCEPTION_DESCS_END__ = ABSOLUTE(.); + *(.xt_except_desc_end) + *(.dynamic) + *(.gnu.version_d) + /* Addresses of memory regions reserved via SOC_RESERVE_MEMORY_REGION() */ + soc_reserved_memory_region_start = ABSOLUTE(.); + KEEP (*(.reserved_memory_address)) + soc_reserved_memory_region_end = ABSOLUTE(.); + _rodata_end = ABSOLUTE(.); + /* Literals are also RO data. */ + _lit4_start = ABSOLUTE(.); + *(*.lit4) + *(.lit4.*) + *(.gnu.linkonce.lit4.*) + _lit4_end = ABSOLUTE(.); + . = ALIGN(4); + _thread_local_start = ABSOLUTE(.); + *(.tdata) + *(.tdata.*) + *(.tbss) + *(.tbss.*) + _thread_local_end = ABSOLUTE(.); + . = ALIGN(4); + } > default_rodata_seg + + /* Marks the end of IRAM code segment */ + .iram0.text_end (NOLOAD) : + { + . = ALIGN (4); + _iram_text_end = ABSOLUTE(.); + } > iram0_0_seg + + .iram0.data : + { + . = ALIGN(4); + _iram_data_start = ABSOLUTE(.); + + /* coredump mapping */ + _coredump_iram_start = ABSOLUTE(.); + mapping[iram_coredump] + _coredump_iram_end = ABSOLUTE(.); + + /* should be placed after coredump mapping */ + mapping[iram0_data] + + _iram_data_end = ABSOLUTE(.); + } > iram0_0_seg + + .iram0.bss (NOLOAD) : + { + . = ALIGN(4); + _iram_bss_start = ABSOLUTE(.); + + mapping[iram0_bss] + + _iram_bss_end = ABSOLUTE(.); + . = ALIGN(4); + _iram_end = ABSOLUTE(.); + } > iram0_0_seg + + /* Marks the end of data, bss and possibly rodata */ + .dram0.heap_start (NOLOAD) : + { + . = ALIGN (8); + _heap_start = ABSOLUTE(.); + } > dram0_0_seg +} + +ASSERT(((_iram_end - ORIGIN(iram0_0_seg)) <= LENGTH(iram0_0_seg)), + "IRAM0 segment data does not fit.") + +ASSERT(((_heap_start - ORIGIN(dram0_0_seg)) <= LENGTH(dram0_0_seg)), + "DRAM segment data does not fit.") diff --git a/components/esp32s3/ld/esp32s3_fragments.lf b/components/esp32s3/ld/esp32s3_fragments.lf new file mode 100644 index 0000000000..2ab27f1091 --- /dev/null +++ b/components/esp32s3/ld/esp32s3_fragments.lf @@ -0,0 +1,132 @@ +[sections:text] +entries: + .text+ + .literal+ + +[sections:data] +entries: + .data+ + +[sections:bss] +entries: + .bss+ + +[sections:common] +entries: + COMMON + +[sections:rodata] +entries: + .rodata+ + +[sections:rtc_text] +entries: + .rtc.text+ + .rtc.literal + +[sections:rtc_data] +entries: + .rtc.data+ + +[sections:rtc_rodata] +entries: + .rtc.rodata+ + +[sections:rtc_bss] +entries: + .rtc.bss + +[sections:rtc_fast_coredump] +entries: + .rtc.fast.coredump+ + +[sections:rtc_coredump] +entries: + .rtc.coredump+ + +[sections:dram_coredump] +entries: + .dram1.coredump+ + +[sections:iram_coredump] +entries: + .iram.data.coredump+ + +[sections:iram] +entries: + .iram1+ + +[sections:iram_data] +entries: + .iram.data+ + +[sections:iram_bss] +entries: + .iram.bss+ + +[sections:dram] +entries: + .dram1+ + +[sections:wifi_iram] +entries: + .wifi0iram+ + +[sections:wifi_rx_iram] +entries: + .wifirxiram+ + +[scheme:default] +entries: + if APP_BUILD_USE_FLASH_SECTIONS = y: + text -> flash_text + rodata -> flash_rodata + else: + text -> iram0_text + rodata -> dram0_data + data -> dram0_data + bss -> dram0_bss + common -> dram0_bss + iram -> iram0_text + iram_data -> iram0_data + iram_bss -> iram0_bss + dram -> dram0_data + rtc_text -> rtc_text + rtc_data -> rtc_data + rtc_rodata -> rtc_data + rtc_bss -> rtc_bss + wifi_iram -> flash_text + wifi_rx_iram -> flash_text + dram_coredump -> dram_coredump + iram_coredump -> iram_coredump + rtc_coredump -> rtc_coredump + rtc_fast_coredump -> rtc_fast_coredump + +[scheme:rtc] +entries: + text -> rtc_text + data -> rtc_data + rodata -> rtc_data + bss -> rtc_bss + common -> rtc_bss + +[scheme:noflash] +entries: + text -> iram0_text + rodata -> dram0_data + +[scheme:noflash_data] +entries: + rodata -> dram0_data + +[scheme:noflash_text] +entries: + text -> iram0_text + +[scheme:wifi_iram] +entries: + wifi_iram -> iram0_text + +[scheme:wifi_rx_iram] +entries: + wifi_rx_iram -> iram0_text diff --git a/components/esp32s3/linker.lf b/components/esp32s3/linker.lf new file mode 100644 index 0000000000..87e068ccd4 --- /dev/null +++ b/components/esp32s3/linker.lf @@ -0,0 +1,9 @@ +[mapping:gcc] +archive: libgcc.a +entries: + lib2funcs (noflash_text) + +[mapping:gcov] +archive: libgcov.a +entries: + * (noflash) diff --git a/components/esp32s3/memprot.c b/components/esp32s3/memprot.c new file mode 100644 index 0000000000..00dbf2063c --- /dev/null +++ b/components/esp32s3/memprot.c @@ -0,0 +1,24 @@ +// Copyright 2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* INTERNAL API + * implementation of generic interface to MMU memory protection features + */ + +#include + +bool esp_memprot_is_assoc_intr_any() +{ + return true; +} diff --git a/components/esp32s3/pm_esp32s3.c b/components/esp32s3/pm_esp32s3.c new file mode 100644 index 0000000000..7f96c5617d --- /dev/null +++ b/components/esp32s3/pm_esp32s3.c @@ -0,0 +1,643 @@ +// Copyright 2016-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "esp_attr.h" +#include "esp_err.h" +#include "esp_pm.h" +#include "esp_log.h" +#include "esp32s3/clk.h" +#include "esp_private/crosscore_int.h" + +#include "soc/rtc.h" + +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "freertos/xtensa_timer.h" +#include "xtensa/core-macros.h" + +#include "esp_private/pm_impl.h" +#include "esp_private/pm_trace.h" +#include "esp_private/esp_timer_private.h" +#include "esp32s3/pm.h" +#include "esp_sleep.h" + +/* CCOMPARE update timeout, in CPU cycles. Any value above ~600 cycles will work + * for the purpose of detecting a deadlock. + */ +#define CCOMPARE_UPDATE_TIMEOUT 1000000 + +/* When changing CCOMPARE, don't allow changes if the difference is less + * than this. This is to prevent setting CCOMPARE below CCOUNT. + */ +#define CCOMPARE_MIN_CYCLES_IN_FUTURE 1000 + +/* When light sleep is used, wake this number of microseconds earlier than + * the next tick. + */ +#define LIGHT_SLEEP_EARLY_WAKEUP_US 100 + +/* Minimal divider at which REF_CLK_FREQ can be obtained */ +#define REF_CLK_DIV_MIN 2 + +#ifdef CONFIG_PM_PROFILING +#define WITH_PROFILING +#endif + + +static portMUX_TYPE s_switch_lock = portMUX_INITIALIZER_UNLOCKED; +/* The following state variables are protected using s_switch_lock: */ +/* Current sleep mode; When switching, contains old mode until switch is complete */ +static pm_mode_t s_mode = PM_MODE_CPU_MAX; +/* True when switch is in progress */ +static volatile bool s_is_switching; +/* When switch is in progress, this is the mode we are switching into */ +static pm_mode_t s_new_mode = PM_MODE_CPU_MAX; +/* Number of times each mode was locked */ +static size_t s_mode_lock_counts[PM_MODE_COUNT]; +/* Bit mask of locked modes. BIT(i) is set iff s_mode_lock_counts[i] > 0. */ +static uint32_t s_mode_mask; + +/* Divider and multiplier used to adjust (ccompare - ccount) duration. + * Only set to non-zero values when switch is in progress. + */ +static uint32_t s_ccount_div; +static uint32_t s_ccount_mul; + +#if CONFIG_FREERTOS_USE_TICKLESS_IDLE +#define PERIPH_SKIP_LIGHT_SLEEP_NO 1 + +/* Indicates if light sleep shoule be skipped by peripherals. */ +static skip_light_sleep_cb_t s_periph_skip_light_sleep_cb[PERIPH_SKIP_LIGHT_SLEEP_NO]; + +/* Indicates if light sleep entry was skipped in vApplicationSleep for given CPU. + * This in turn gets used in IDLE hook to decide if `waiti` needs + * to be invoked or not. + */ +static bool s_skipped_light_sleep[portNUM_PROCESSORS]; + +#if portNUM_PROCESSORS == 2 +/* When light sleep is finished on one CPU, it is possible that the other CPU + * will enter light sleep again very soon, before interrupts on the first CPU + * get a chance to run. To avoid such situation, set a flag for the other CPU to + * skip light sleep attempt. + */ +static bool s_skip_light_sleep[portNUM_PROCESSORS]; +#endif // portNUM_PROCESSORS == 2 +#endif // CONFIG_FREERTOS_USE_TICKLESS_IDLE + +/* Indicates to the ISR hook that CCOMPARE needs to be updated on the given CPU. + * Used in conjunction with cross-core interrupt to update CCOMPARE on the other CPU. + */ +static volatile bool s_need_update_ccompare[portNUM_PROCESSORS]; + +/* A flag indicating that Idle hook has run on a given CPU; + * Next interrupt on the same CPU will take s_rtos_lock_handle. + */ +static bool s_core_idle[portNUM_PROCESSORS]; + +/* When no RTOS tasks are active, these locks are released to allow going into + * a lower power mode. Used by ISR hook and idle hook. + */ +static esp_pm_lock_handle_t s_rtos_lock_handle[portNUM_PROCESSORS]; + +/* Lookup table of CPU frequency configs to be used in each mode. + * Initialized by esp_pm_impl_init and modified by esp_pm_configure. + */ +rtc_cpu_freq_config_t s_cpu_freq_by_mode[PM_MODE_COUNT]; + +/* Whether automatic light sleep is enabled */ +static bool s_light_sleep_en = false; + +/* When configuration is changed, current frequency may not match the + * newly configured frequency for the current mode. This is an indicator + * to the mode switch code to get the actual current frequency instead of + * relying on the current mode. + */ +static bool s_config_changed = false; + +#ifdef WITH_PROFILING +/* Time, in microseconds, spent so far in each mode */ +static pm_time_t s_time_in_mode[PM_MODE_COUNT]; +/* Timestamp, in microseconds, when the mode switch last happened */ +static pm_time_t s_last_mode_change_time; +/* User-readable mode names, used by esp_pm_impl_dump_stats */ +static const char *s_mode_names[] = { + "SLEEP", + "APB_MIN", + "APB_MAX", + "CPU_MAX" +}; +#endif // WITH_PROFILING + + +static const char *TAG = "pm_esp32s3"; + +static void update_ccompare(void); +static void do_switch(pm_mode_t new_mode); +static void leave_idle(void); +static void on_freq_update(uint32_t old_ticks_per_us, uint32_t ticks_per_us); + + +pm_mode_t esp_pm_impl_get_mode(esp_pm_lock_type_t type, int arg) +{ + (void) arg; + if (type == ESP_PM_CPU_FREQ_MAX) { + return PM_MODE_CPU_MAX; + } else if (type == ESP_PM_APB_FREQ_MAX) { + return PM_MODE_APB_MAX; + } else if (type == ESP_PM_NO_LIGHT_SLEEP) { + return PM_MODE_APB_MIN; + } else { + // unsupported mode + abort(); + } +} + +esp_err_t esp_pm_configure(const void *vconfig) +{ +#ifndef CONFIG_PM_ENABLE + return ESP_ERR_NOT_SUPPORTED; +#endif + + const esp_pm_config_esp32s3_t *config = (const esp_pm_config_esp32s3_t *) vconfig; +#ifndef CONFIG_FREERTOS_USE_TICKLESS_IDLE + if (config->light_sleep_enable) { + return ESP_ERR_NOT_SUPPORTED; + } +#endif + + int min_freq_mhz = config->min_freq_mhz; + int max_freq_mhz = config->max_freq_mhz; + + if (min_freq_mhz > max_freq_mhz) { + return ESP_ERR_INVALID_ARG; + } + + rtc_cpu_freq_config_t freq_config; + if (!rtc_clk_cpu_freq_mhz_to_config(min_freq_mhz, &freq_config)) { + ESP_LOGW(TAG, "invalid min_freq_mhz value (%d)", min_freq_mhz); + return ESP_ERR_INVALID_ARG; + } + + int xtal_freq_mhz = (int) rtc_clk_xtal_freq_get(); + if (min_freq_mhz < xtal_freq_mhz && min_freq_mhz * MHZ / REF_CLK_FREQ < REF_CLK_DIV_MIN) { + ESP_LOGW(TAG, "min_freq_mhz should be >= %d", REF_CLK_FREQ * REF_CLK_DIV_MIN / MHZ); + return ESP_ERR_INVALID_ARG; + } + + if (!rtc_clk_cpu_freq_mhz_to_config(max_freq_mhz, &freq_config)) { + ESP_LOGW(TAG, "invalid max_freq_mhz value (%d)", max_freq_mhz); + return ESP_ERR_INVALID_ARG; + } + + int apb_max_freq = MIN(max_freq_mhz, 80); /* CPU frequency in APB_MAX mode */ + apb_max_freq = MAX(apb_max_freq, min_freq_mhz); + + ESP_LOGI(TAG, "Frequency switching config: " + "CPU_MAX: %d, APB_MAX: %d, APB_MIN: %d, Light sleep: %s", + max_freq_mhz, + apb_max_freq, + min_freq_mhz, + config->light_sleep_enable ? "ENABLED" : "DISABLED"); + + portENTER_CRITICAL(&s_switch_lock); + bool res; + res = rtc_clk_cpu_freq_mhz_to_config(max_freq_mhz, &s_cpu_freq_by_mode[PM_MODE_CPU_MAX]); + assert(res); + res = rtc_clk_cpu_freq_mhz_to_config(apb_max_freq, &s_cpu_freq_by_mode[PM_MODE_APB_MAX]); + assert(res); + res = rtc_clk_cpu_freq_mhz_to_config(min_freq_mhz, &s_cpu_freq_by_mode[PM_MODE_APB_MIN]); + assert(res); + s_cpu_freq_by_mode[PM_MODE_LIGHT_SLEEP] = s_cpu_freq_by_mode[PM_MODE_APB_MIN]; + s_light_sleep_en = config->light_sleep_enable; + s_config_changed = true; + portEXIT_CRITICAL(&s_switch_lock); + + return ESP_OK; +} + +static pm_mode_t IRAM_ATTR get_lowest_allowed_mode(void) +{ + /* TODO: optimize using ffs/clz */ + if (s_mode_mask >= BIT(PM_MODE_CPU_MAX)) { + return PM_MODE_CPU_MAX; + } else if (s_mode_mask >= BIT(PM_MODE_APB_MAX)) { + return PM_MODE_APB_MAX; + } else if (s_mode_mask >= BIT(PM_MODE_APB_MIN) || !s_light_sleep_en) { + return PM_MODE_APB_MIN; + } else { + return PM_MODE_LIGHT_SLEEP; + } +} + +void IRAM_ATTR esp_pm_impl_switch_mode(pm_mode_t mode, + pm_mode_switch_t lock_or_unlock, pm_time_t now) +{ + bool need_switch = false; + uint32_t mode_mask = BIT(mode); + portENTER_CRITICAL_SAFE(&s_switch_lock); + uint32_t count; + if (lock_or_unlock == MODE_LOCK) { + count = ++s_mode_lock_counts[mode]; + } else { + count = s_mode_lock_counts[mode]--; + } + if (count == 1) { + if (lock_or_unlock == MODE_LOCK) { + s_mode_mask |= mode_mask; + } else { + s_mode_mask &= ~mode_mask; + } + need_switch = true; + } + + pm_mode_t new_mode = s_mode; + if (need_switch) { + new_mode = get_lowest_allowed_mode(); +#ifdef WITH_PROFILING + if (s_last_mode_change_time != 0) { + pm_time_t diff = now - s_last_mode_change_time; + s_time_in_mode[s_mode] += diff; + } + s_last_mode_change_time = now; +#endif // WITH_PROFILING + } + portEXIT_CRITICAL_SAFE(&s_switch_lock); + if (need_switch && new_mode != s_mode) { + do_switch(new_mode); + } +} + +/** + * @brief Update clock dividers in esp_timer and FreeRTOS, and adjust CCOMPARE + * values on both CPUs. + * @param old_ticks_per_us old CPU frequency + * @param ticks_per_us new CPU frequency + */ +static void IRAM_ATTR on_freq_update(uint32_t old_ticks_per_us, uint32_t ticks_per_us) +{ + uint32_t old_apb_ticks_per_us = MIN(old_ticks_per_us, 80); + uint32_t apb_ticks_per_us = MIN(ticks_per_us, 80); + /* Update APB frequency value used by the timer */ + if (old_apb_ticks_per_us != apb_ticks_per_us) { + esp_timer_private_update_apb_freq(apb_ticks_per_us); + } + + /* Calculate new tick divisor */ + _xt_tick_divisor = ticks_per_us * MHZ / XT_TICK_PER_SEC; + + int core_id = xPortGetCoreID(); + if (s_rtos_lock_handle[core_id] != NULL) { + ESP_PM_TRACE_ENTER(CCOMPARE_UPDATE, core_id); + /* ccount_div and ccount_mul are used in esp_pm_impl_update_ccompare + * to calculate new CCOMPARE value. + */ + s_ccount_div = old_ticks_per_us; + s_ccount_mul = ticks_per_us; + + /* Update CCOMPARE value on this CPU */ + update_ccompare(); + +#if portNUM_PROCESSORS == 2 + /* Send interrupt to the other CPU to update CCOMPARE value */ + int other_core_id = (core_id == 0) ? 1 : 0; + + s_need_update_ccompare[other_core_id] = true; + esp_crosscore_int_send_freq_switch(other_core_id); + + int timeout = 0; + while (s_need_update_ccompare[other_core_id]) { + if (++timeout == CCOMPARE_UPDATE_TIMEOUT) { + assert(false && "failed to update CCOMPARE, possible deadlock"); + } + } +#endif // portNUM_PROCESSORS == 2 + + s_ccount_mul = 0; + s_ccount_div = 0; + ESP_PM_TRACE_EXIT(CCOMPARE_UPDATE, core_id); + } +} + +/** + * Perform the switch to new power mode. + * Currently only changes the CPU frequency and adjusts clock dividers. + * No light sleep yet. + * @param new_mode mode to switch to + */ +static void IRAM_ATTR do_switch(pm_mode_t new_mode) +{ + const int core_id = xPortGetCoreID(); + + do { + portENTER_CRITICAL_ISR(&s_switch_lock); + if (!s_is_switching) { + break; + } + if (s_new_mode <= new_mode) { + portEXIT_CRITICAL_ISR(&s_switch_lock); + return; + } + if (s_need_update_ccompare[core_id]) { + s_need_update_ccompare[core_id] = false; + } + portEXIT_CRITICAL_ISR(&s_switch_lock); + } while (true); + s_new_mode = new_mode; + s_is_switching = true; + bool config_changed = s_config_changed; + s_config_changed = false; + portEXIT_CRITICAL_ISR(&s_switch_lock); + + rtc_cpu_freq_config_t new_config = s_cpu_freq_by_mode[new_mode]; + rtc_cpu_freq_config_t old_config; + + if (!config_changed) { + old_config = s_cpu_freq_by_mode[s_mode]; + } else { + rtc_clk_cpu_freq_get_config(&old_config); + } + + if (new_config.freq_mhz != old_config.freq_mhz) { + uint32_t old_ticks_per_us = old_config.freq_mhz; + uint32_t new_ticks_per_us = new_config.freq_mhz; + + bool switch_down = new_ticks_per_us < old_ticks_per_us; + + ESP_PM_TRACE_ENTER(FREQ_SWITCH, core_id); + if (switch_down) { + on_freq_update(old_ticks_per_us, new_ticks_per_us); + } + rtc_clk_cpu_freq_set_config_fast(&new_config); + if (!switch_down) { + on_freq_update(old_ticks_per_us, new_ticks_per_us); + } + ESP_PM_TRACE_EXIT(FREQ_SWITCH, core_id); + } + + portENTER_CRITICAL_ISR(&s_switch_lock); + s_mode = new_mode; + s_is_switching = false; + portEXIT_CRITICAL_ISR(&s_switch_lock); +} + +/** + * @brief Calculate new CCOMPARE value based on s_ccount_{mul,div} + * + * Adjusts CCOMPARE value so that the interrupt happens at the same time as it + * would happen without the frequency change. + * Assumes that the new_frequency = old_frequency * s_ccount_mul / s_ccount_div. + */ +static void IRAM_ATTR update_ccompare(void) +{ + uint32_t ccount = XTHAL_GET_CCOUNT(); + uint32_t ccompare = XTHAL_GET_CCOMPARE(XT_TIMER_INDEX); + if ((ccompare - CCOMPARE_MIN_CYCLES_IN_FUTURE) - ccount < UINT32_MAX / 2) { + uint32_t diff = ccompare - ccount; + uint32_t diff_scaled = (diff * s_ccount_mul + s_ccount_div - 1) / s_ccount_div; + if (diff_scaled < _xt_tick_divisor) { + uint32_t new_ccompare = ccount + diff_scaled; + XTHAL_SET_CCOMPARE(XT_TIMER_INDEX, new_ccompare); + } + } +} + +static void IRAM_ATTR leave_idle(void) +{ + int core_id = xPortGetCoreID(); + if (s_core_idle[core_id]) { + // TODO: possible optimization: raise frequency here first + esp_pm_lock_acquire(s_rtos_lock_handle[core_id]); + s_core_idle[core_id] = false; + } +} + +void esp_pm_impl_idle_hook(void) +{ + int core_id = xPortGetCoreID(); + uint32_t state = portENTER_CRITICAL_NESTED(); + if (!s_core_idle[core_id]) { + esp_pm_lock_release(s_rtos_lock_handle[core_id]); + s_core_idle[core_id] = true; + } + portEXIT_CRITICAL_NESTED(state); + ESP_PM_TRACE_ENTER(IDLE, core_id); +} + +void IRAM_ATTR esp_pm_impl_isr_hook(void) +{ + int core_id = xPortGetCoreID(); + ESP_PM_TRACE_ENTER(ISR_HOOK, core_id); + /* Prevent higher level interrupts (than the one this function was called from) + * from happening in this section, since they will also call into esp_pm_impl_isr_hook. + */ + uint32_t state = portENTER_CRITICAL_NESTED(); +#if portNUM_PROCESSORS == 2 + if (s_need_update_ccompare[core_id]) { + update_ccompare(); + s_need_update_ccompare[core_id] = false; + } else { + leave_idle(); + } +#else + leave_idle(); +#endif // portNUM_PROCESSORS == 2 + portEXIT_CRITICAL_NESTED(state); + ESP_PM_TRACE_EXIT(ISR_HOOK, core_id); +} + +void esp_pm_impl_waiti(void) +{ +#if CONFIG_FREERTOS_USE_TICKLESS_IDLE + int core_id = xPortGetCoreID(); + if (s_skipped_light_sleep[core_id]) { + asm("waiti 0"); + /* Interrupt took the CPU out of waiti and s_rtos_lock_handle[core_id] + * is now taken. However since we are back to idle task, we can release + * the lock so that vApplicationSleep can attempt to enter light sleep. + */ + esp_pm_impl_idle_hook(); + s_skipped_light_sleep[core_id] = false; + } +#else + asm("waiti 0"); +#endif // CONFIG_FREERTOS_USE_TICKLESS_IDLE +} + +#if CONFIG_FREERTOS_USE_TICKLESS_IDLE + +esp_err_t esp_pm_register_skip_light_sleep_callback(skip_light_sleep_cb_t cb) +{ + for (int i = 0; i < PERIPH_SKIP_LIGHT_SLEEP_NO; i++) { + if (s_periph_skip_light_sleep_cb[i] == cb) { + return ESP_OK; + } else if (s_periph_skip_light_sleep_cb[i] == NULL) { + s_periph_skip_light_sleep_cb[i] = cb; + return ESP_OK; + } + } + return ESP_ERR_NO_MEM; +} + +esp_err_t esp_pm_unregister_skip_light_sleep_callback(skip_light_sleep_cb_t cb) +{ + for (int i = 0; i < PERIPH_SKIP_LIGHT_SLEEP_NO; i++) { + if (s_periph_skip_light_sleep_cb[i] == cb) { + s_periph_skip_light_sleep_cb[i] = NULL; + return ESP_OK; + } + } + return ESP_ERR_INVALID_STATE; +} + +static inline bool IRAM_ATTR periph_should_skip_light_sleep(void) +{ + for (int i = 0; i < PERIPH_SKIP_LIGHT_SLEEP_NO; i++) { + if (s_periph_skip_light_sleep_cb[i]) { + if (s_periph_skip_light_sleep_cb[i]() == true) { + return true; + } + } + } + return false; +} +static inline bool IRAM_ATTR should_skip_light_sleep(int core_id) +{ +#if portNUM_PROCESSORS == 2 + if (s_skip_light_sleep[core_id]) { + s_skip_light_sleep[core_id] = false; + s_skipped_light_sleep[core_id] = true; + return true; + } +#endif // portNUM_PROCESSORS == 2 + if (s_mode != PM_MODE_LIGHT_SLEEP || s_is_switching || periph_should_skip_light_sleep()) { + s_skipped_light_sleep[core_id] = true; + } else { + s_skipped_light_sleep[core_id] = false; + } + return s_skipped_light_sleep[core_id]; +} + +static inline void IRAM_ATTR other_core_should_skip_light_sleep(int core_id) +{ +#if portNUM_PROCESSORS == 2 + s_skip_light_sleep[!core_id] = true; +#endif +} + +void IRAM_ATTR vApplicationSleep( TickType_t xExpectedIdleTime ) +{ + portENTER_CRITICAL(&s_switch_lock); + int core_id = xPortGetCoreID(); + if (!should_skip_light_sleep(core_id)) { + /* Calculate how much we can sleep */ + int64_t next_esp_timer_alarm = esp_timer_get_next_alarm(); + int64_t now = esp_timer_get_time(); + int64_t time_until_next_alarm = next_esp_timer_alarm - now; + int64_t wakeup_delay_us = portTICK_PERIOD_MS * 1000LL * xExpectedIdleTime; + int64_t sleep_time_us = MIN(wakeup_delay_us, time_until_next_alarm); + if (sleep_time_us >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP * portTICK_PERIOD_MS * 1000LL) { + esp_sleep_enable_timer_wakeup(sleep_time_us - LIGHT_SLEEP_EARLY_WAKEUP_US); +#ifdef CONFIG_PM_TRACE + /* to force tracing GPIOs to keep state */ + esp_sleep_pd_config(ESP_PD_DOMAIN_RTC_PERIPH, ESP_PD_OPTION_ON); +#endif + /* Enter sleep */ + ESP_PM_TRACE_ENTER(SLEEP, core_id); + int64_t sleep_start = esp_timer_get_time(); + esp_light_sleep_start(); + int64_t slept_us = esp_timer_get_time() - sleep_start; + ESP_PM_TRACE_EXIT(SLEEP, core_id); + + uint32_t slept_ticks = slept_us / (portTICK_PERIOD_MS * 1000LL); + if (slept_ticks > 0) { + /* Adjust RTOS tick count based on the amount of time spent in sleep */ + vTaskStepTick(slept_ticks); + + /* Trigger tick interrupt, since sleep time was longer + * than portTICK_PERIOD_MS. Note that setting INTSET does not + * work for timer interrupt, and changing CCOMPARE would clear + * the interrupt flag. + */ + XTHAL_SET_CCOUNT(XTHAL_GET_CCOMPARE(XT_TIMER_INDEX) - 16); + while (!(XTHAL_GET_INTERRUPT() & BIT(XT_TIMER_INTNUM))) { + ; + } + } + other_core_should_skip_light_sleep(core_id); + } + } + portEXIT_CRITICAL(&s_switch_lock); +} +#endif //CONFIG_FREERTOS_USE_TICKLESS_IDLE + +#ifdef WITH_PROFILING +void esp_pm_impl_dump_stats(FILE *out) +{ + pm_time_t time_in_mode[PM_MODE_COUNT]; + + portENTER_CRITICAL_ISR(&s_switch_lock); + memcpy(time_in_mode, s_time_in_mode, sizeof(time_in_mode)); + pm_time_t last_mode_change_time = s_last_mode_change_time; + pm_mode_t cur_mode = s_mode; + pm_time_t now = pm_get_time(); + portEXIT_CRITICAL_ISR(&s_switch_lock); + + time_in_mode[cur_mode] += now - last_mode_change_time; + + fprintf(out, "Mode stats:\n"); + for (int i = 0; i < PM_MODE_COUNT; ++i) { + if (i == PM_MODE_LIGHT_SLEEP && !s_light_sleep_en) { + /* don't display light sleep mode if it's not enabled */ + continue; + } + fprintf(out, "%8s %3dM %12lld %2d%%\n", + s_mode_names[i], + s_cpu_freq_by_mode[i].freq_mhz, + time_in_mode[i], + (int) (time_in_mode[i] * 100 / now)); + } +} +#endif // WITH_PROFILING + +void esp_pm_impl_init(void) +{ +#ifdef CONFIG_PM_TRACE + esp_pm_trace_init(); +#endif + ESP_ERROR_CHECK(esp_pm_lock_create(ESP_PM_CPU_FREQ_MAX, 0, "rtos0", + &s_rtos_lock_handle[0])); + ESP_ERROR_CHECK(esp_pm_lock_acquire(s_rtos_lock_handle[0])); +#if portNUM_PROCESSORS == 2 + ESP_ERROR_CHECK(esp_pm_lock_create(ESP_PM_CPU_FREQ_MAX, 0, "rtos1", + &s_rtos_lock_handle[1])); + ESP_ERROR_CHECK(esp_pm_lock_acquire(s_rtos_lock_handle[1])); +#endif // portNUM_PROCESSORS == 2 + + /* Configure all modes to use the default CPU frequency. + * This will be modified later by a call to esp_pm_configure. + */ + rtc_cpu_freq_config_t default_config; + if (!rtc_clk_cpu_freq_mhz_to_config(CONFIG_ESP32S3_DEFAULT_CPU_FREQ_MHZ, &default_config)) { + assert(false && "unsupported frequency"); + } + for (size_t i = 0; i < PM_MODE_COUNT; ++i) { + s_cpu_freq_by_mode[i] = default_config; + } +} diff --git a/components/esp32s3/pm_trace.c b/components/esp32s3/pm_trace.c new file mode 100644 index 0000000000..9cca096192 --- /dev/null +++ b/components/esp32s3/pm_trace.c @@ -0,0 +1,51 @@ +// Copyright 2016-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "esp_private/pm_trace.h" +#include "driver/gpio.h" +#include "soc/gpio_reg.h" + +/* GPIOs to use for tracing of esp_pm events. + * Two entries in the array for each type, one for each CPU. + * Feel free to change when debugging. + */ +static const int DRAM_ATTR s_trace_io[] = { + BIT(4), BIT(5), // ESP_PM_TRACE_IDLE + BIT(16), BIT(17), // ESP_PM_TRACE_TICK + BIT(18), BIT(18), // ESP_PM_TRACE_FREQ_SWITCH + BIT(19), BIT(19), // ESP_PM_TRACE_CCOMPARE_UPDATE + BIT(25), BIT(26), // ESP_PM_TRACE_ISR_HOOK + BIT(27), BIT(27), // ESP_PM_TRACE_SLEEP +}; + +void esp_pm_trace_init(void) +{ + for (size_t i = 0; i < sizeof(s_trace_io) / sizeof(s_trace_io[0]); ++i) { + int io = __builtin_ffs(s_trace_io[i]); + if (io == 0) { + continue; + } + gpio_set_direction(io - 1, GPIO_MODE_OUTPUT); + } +} + +void IRAM_ATTR esp_pm_trace_enter(esp_pm_trace_event_t event, int core_id) +{ + REG_WRITE(GPIO_OUT_W1TS_REG, s_trace_io[2 * event + core_id]); +} + +void IRAM_ATTR esp_pm_trace_exit(esp_pm_trace_event_t event, int core_id) +{ + REG_WRITE(GPIO_OUT_W1TC_REG, s_trace_io[2 * event + core_id]); +} diff --git a/components/esp32s3/sleep_modes.c b/components/esp32s3/sleep_modes.c new file mode 100644 index 0000000000..ee71b65243 --- /dev/null +++ b/components/esp32s3/sleep_modes.c @@ -0,0 +1,717 @@ +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include "esp_attr.h" +#include "esp_sleep.h" +#include "esp_private/esp_timer_private.h" +#include "esp_log.h" +#include "esp32s3/clk.h" +#include "esp_newlib.h" +#include "esp_spi_flash.h" +#include "esp32s3/rom/cache.h" +#include "esp32s3/rom/rtc.h" +#include "esp32s3/rom/ets_sys.h" +#include "esp_rom_uart.h" +#include "soc/cpu.h" +#include "soc/rtc.h" +#include "soc/spi_periph.h" +#include "soc/dport_reg.h" +#include "soc/extmem_reg.h" +#include "soc/soc_memory_layout.h" +#include "soc/uart_caps.h" +#include "hal/wdt_hal.h" +#include "hal/clk_gate_ll.h" +#include "driver/rtc_io.h" +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "sdkconfig.h" + +// If light sleep time is less than that, don't power down flash +#define FLASH_PD_MIN_SLEEP_TIME_US 2000 + +// Time from VDD_SDIO power up to first flash read in ROM code +#define VDD_SDIO_POWERUP_TO_FLASH_READ_US 700 + +// Extra time it takes to enter and exit light sleep and deep sleep +// For deep sleep, this is until the wake stub runs (not the app). +#ifdef CONFIG_ESP32S3_RTC_CLK_SRC_EXT_CRYS +#define LIGHT_SLEEP_TIME_OVERHEAD_US (650 + 30 * 240 / CONFIG_ESP32S3_DEFAULT_CPU_FREQ_MHZ) +#define DEEP_SLEEP_TIME_OVERHEAD_US (650 + 100 * 240 / CONFIG_ESP32S3_DEFAULT_CPU_FREQ_MHZ) +#else +#define LIGHT_SLEEP_TIME_OVERHEAD_US (250 + 30 * 240 / CONFIG_ESP32S3_DEFAULT_CPU_FREQ_MHZ) +#define DEEP_SLEEP_TIME_OVERHEAD_US (250 + 100 * 240 / CONFIG_ESP32S3_DEFAULT_CPU_FREQ_MHZ) +#endif // CONFIG_ESP32S3_RTC_CLK_SRC_EXT_CRYS + +// Minimal amount of time we can sleep for +#define LIGHT_SLEEP_MIN_TIME_US 200 + +#define CHECK_SOURCE(source, value, mask) ((s_config.wakeup_triggers & mask) && \ + (source == value)) + +/** + * Internal structure which holds all requested deep sleep parameters + */ +typedef struct { + esp_sleep_pd_option_t pd_options[ESP_PD_DOMAIN_MAX]; + uint64_t sleep_duration; + uint32_t wakeup_triggers : 15; + uint32_t ext1_trigger_mode : 1; + uint32_t ext1_rtc_gpio_mask : 18; + uint32_t ext0_trigger_level : 1; + uint32_t ext0_rtc_gpio_num : 5; + uint32_t sleep_time_adjustment; + uint64_t rtc_ticks_at_sleep_start; +} sleep_config_t; + +static sleep_config_t s_config = { + .pd_options = { ESP_PD_OPTION_AUTO, ESP_PD_OPTION_AUTO, ESP_PD_OPTION_AUTO }, + .wakeup_triggers = 0 +}; + +/* Internal variable used to track if light sleep wakeup sources are to be + expected when determining wakeup cause. */ +static bool s_light_sleep_wakeup = false; + +/* Updating RTC_MEMORY_CRC_REG register via set_rtc_memory_crc() + is not thread-safe. */ +static _lock_t lock_rtc_memory_crc; + +static const char *TAG = "sleep"; + +static uint32_t get_power_down_flags(void); +static void ext0_wakeup_prepare(void); +static void ext1_wakeup_prepare(void); +static void timer_wakeup_prepare(void); +static void touch_wakeup_prepare(void); + +/* Wake from deep sleep stub + See esp_deepsleep.h esp_wake_deep_sleep() comments for details. +*/ +esp_deep_sleep_wake_stub_fn_t esp_get_deep_sleep_wake_stub(void) +{ + _lock_acquire(&lock_rtc_memory_crc); + uint32_t stored_crc = REG_READ(RTC_MEMORY_CRC_REG); + set_rtc_memory_crc(); + uint32_t calc_crc = REG_READ(RTC_MEMORY_CRC_REG); + REG_WRITE(RTC_MEMORY_CRC_REG, stored_crc); + _lock_release(&lock_rtc_memory_crc); + + if (stored_crc != calc_crc) { + return NULL; + } + esp_deep_sleep_wake_stub_fn_t stub_ptr = (esp_deep_sleep_wake_stub_fn_t) REG_READ(RTC_ENTRY_ADDR_REG); + if (!esp_ptr_executable(stub_ptr)) { + return NULL; + } + return stub_ptr; +} + +void esp_set_deep_sleep_wake_stub(esp_deep_sleep_wake_stub_fn_t new_stub) +{ + _lock_acquire(&lock_rtc_memory_crc); + REG_WRITE(RTC_ENTRY_ADDR_REG, (uint32_t)new_stub); + set_rtc_memory_crc(); + _lock_release(&lock_rtc_memory_crc); +} + +void RTC_IRAM_ATTR esp_default_wake_deep_sleep(void) +{ + REG_SET_BIT(EXTMEM_CACHE_CONF_MISC_REG, EXTMEM_CACHE_TRACE_ENA); +} + +void __attribute__((weak, alias("esp_default_wake_deep_sleep"))) esp_wake_deep_sleep(void); + +void esp_deep_sleep(uint64_t time_in_us) +{ + esp_sleep_enable_timer_wakeup(time_in_us); + esp_deep_sleep_start(); +} + +static void IRAM_ATTR flush_uarts(void) +{ + for (int i = 0; i < SOC_UART_NUM; ++i) { + if (periph_ll_periph_enabled(PERIPH_UART0_MODULE + i)) { + esp_rom_uart_tx_wait_idle(i); + } + } +} + +static void IRAM_ATTR suspend_uarts(void) +{ + for (int i = 0; i < SOC_UART_NUM; ++i) { + if (periph_ll_periph_enabled(PERIPH_UART0_MODULE + i)) { + REG_CLR_BIT(UART_FLOW_CONF_REG(i), UART_FORCE_XON); + REG_SET_BIT(UART_FLOW_CONF_REG(i), UART_SW_FLOW_CON_EN | UART_FORCE_XOFF); + while (REG_GET_FIELD(UART_FSM_STATUS_REG(i), UART_ST_UTX_OUT) != 0) { + ; + } + } + } +} + +static void IRAM_ATTR resume_uarts(void) +{ + for (int i = 0; i < SOC_UART_NUM; ++i) { + if (periph_ll_periph_enabled(PERIPH_UART0_MODULE + i)) { + REG_CLR_BIT(UART_FLOW_CONF_REG(i), UART_FORCE_XOFF); + REG_SET_BIT(UART_FLOW_CONF_REG(i), UART_FORCE_XON); + REG_CLR_BIT(UART_FLOW_CONF_REG(i), UART_SW_FLOW_CON_EN | UART_FORCE_XON); + } + } +} + +static uint32_t IRAM_ATTR esp_sleep_start(uint32_t pd_flags) +{ + // Stop UART output so that output is not lost due to APB frequency change. + // For light sleep, suspend UART output — it will resume after wakeup. + // For deep sleep, wait for the contents of UART FIFO to be sent. + if (pd_flags & RTC_SLEEP_PD_DIG) { + flush_uarts(); + } else { + suspend_uarts(); + } + + // Save current frequency and switch to XTAL + // Save current frequency and switch to XTAL + rtc_cpu_freq_config_t cpu_freq_config; + rtc_clk_cpu_freq_get_config(&cpu_freq_config); + rtc_clk_cpu_freq_set_xtal(); + + // Configure pins for external wakeup + if (s_config.wakeup_triggers & RTC_EXT0_TRIG_EN) { + ext0_wakeup_prepare(); + } + if (s_config.wakeup_triggers & RTC_EXT1_TRIG_EN) { + ext1_wakeup_prepare(); + } + // Enable ULP wakeup + if (s_config.wakeup_triggers & RTC_ULP_TRIG_EN) { + // no-op for esp32s3 + } + // Enable Touch wakeup + if (s_config.wakeup_triggers & RTC_TOUCH_TRIG_EN) { + touch_wakeup_prepare(); + } + + uint32_t reject_triggers = 0; + if ((pd_flags & RTC_SLEEP_PD_DIG) == 0) { + /* Light sleep, enable sleep reject for faster return from this function, + * in case the wakeup is already triggerred. + */ + reject_triggers = s_config.wakeup_triggers; + } + + // Enter sleep + rtc_sleep_config_t config = RTC_SLEEP_CONFIG_DEFAULT(pd_flags); + rtc_sleep_init(config); + + // Configure timer wakeup + if ((s_config.wakeup_triggers & RTC_TIMER_TRIG_EN) && + s_config.sleep_duration > 0) { + timer_wakeup_prepare(); + } + + uint32_t result = rtc_sleep_start(s_config.wakeup_triggers, reject_triggers, 1); + + // Restore CPU frequency + rtc_clk_cpu_freq_set_config(&cpu_freq_config); + + // re-enable UART output + resume_uarts(); + + return result; +} + +void IRAM_ATTR esp_deep_sleep_start(void) +{ + // record current RTC time + s_config.rtc_ticks_at_sleep_start = rtc_time_get(); + esp_sync_counters_rtc_and_frc(); + // Configure wake stub + if (esp_get_deep_sleep_wake_stub() == NULL) { + esp_set_deep_sleep_wake_stub(esp_wake_deep_sleep); + } + + // Decide which power domains can be powered down + uint32_t pd_flags = get_power_down_flags(); + + // Correct the sleep time + s_config.sleep_time_adjustment = DEEP_SLEEP_TIME_OVERHEAD_US; + + // Enter sleep + esp_sleep_start(RTC_SLEEP_PD_DIG | RTC_SLEEP_PD_VDDSDIO | pd_flags); + + // Because RTC is in a slower clock domain than the CPU, it + // can take several CPU cycles for the sleep mode to start. + while (1) { + ; + } +} + +/** + * Helper function which handles entry to and exit from light sleep + * Placed into IRAM as flash may need some time to be powered on. + */ +static esp_err_t esp_light_sleep_inner(uint32_t pd_flags, + uint32_t flash_enable_time_us, + rtc_vddsdio_config_t vddsdio_config) IRAM_ATTR __attribute__((noinline)); + +static esp_err_t esp_light_sleep_inner(uint32_t pd_flags, + uint32_t flash_enable_time_us, + rtc_vddsdio_config_t vddsdio_config) +{ + // Enter sleep + esp_err_t err = esp_sleep_start(pd_flags); + + // If VDDSDIO regulator was controlled by RTC registers before sleep, + // restore the configuration. + if (vddsdio_config.force) { + rtc_vddsdio_set_config(vddsdio_config); + } + + // If SPI flash was powered down, wait for it to become ready + if (pd_flags & RTC_SLEEP_PD_VDDSDIO) { + // Wait for the flash chip to start up + ets_delay_us(flash_enable_time_us); + } + return err; +} + +esp_err_t esp_light_sleep_start(void) +{ + static portMUX_TYPE light_sleep_lock = portMUX_INITIALIZER_UNLOCKED; + portENTER_CRITICAL(&light_sleep_lock); + /* We will be calling esp_timer_private_advance inside DPORT access critical + * section. Make sure the code on the other CPU is not holding esp_timer + * lock, otherwise there will be deadlock. + */ + esp_timer_private_lock(); + s_config.rtc_ticks_at_sleep_start = rtc_time_get(); + uint64_t frc_time_at_start = esp_timer_get_time(); + DPORT_STALL_OTHER_CPU_START(); + + // Decide which power domains can be powered down + uint32_t pd_flags = get_power_down_flags(); + + // Amount of time to subtract from actual sleep time. + // This is spent on entering and leaving light sleep. + s_config.sleep_time_adjustment = LIGHT_SLEEP_TIME_OVERHEAD_US; + + // Decide if VDD_SDIO needs to be powered down; + // If it needs to be powered down, adjust sleep time. + const uint32_t flash_enable_time_us = VDD_SDIO_POWERUP_TO_FLASH_READ_US; + +#ifndef CONFIG_SPIRAM + const uint32_t vddsdio_pd_sleep_duration = MAX(FLASH_PD_MIN_SLEEP_TIME_US, + flash_enable_time_us + LIGHT_SLEEP_TIME_OVERHEAD_US + LIGHT_SLEEP_MIN_TIME_US); + + if (s_config.sleep_duration > vddsdio_pd_sleep_duration) { + pd_flags |= RTC_SLEEP_PD_VDDSDIO; + s_config.sleep_time_adjustment += flash_enable_time_us; + } +#endif //CONFIG_SPIRAM + + rtc_vddsdio_config_t vddsdio_config = rtc_vddsdio_get_config(); + + // Safety net: enable WDT in case exit from light sleep fails + wdt_hal_context_t rtc_wdt_ctx = {.inst = WDT_RWDT, .rwdt_dev = &RTCCNTL}; + bool wdt_was_enabled = wdt_hal_is_enabled(&rtc_wdt_ctx); // If WDT was enabled in the user code, then do not change it here. + if (!wdt_was_enabled) { + wdt_hal_init(&rtc_wdt_ctx, WDT_RWDT, 0, false); + uint32_t stage_timeout_ticks = (uint32_t)(1000ULL * rtc_clk_slow_freq_get_hz() / 1000ULL); + wdt_hal_write_protect_disable(&rtc_wdt_ctx); + wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC); + wdt_hal_enable(&rtc_wdt_ctx); + wdt_hal_write_protect_enable(&rtc_wdt_ctx); + } + + // Enter sleep, then wait for flash to be ready on wakeup + esp_err_t err = esp_light_sleep_inner(pd_flags, + flash_enable_time_us, vddsdio_config); + + s_light_sleep_wakeup = true; + + // FRC1 has been clock gated for the duration of the sleep, correct for that. + uint64_t rtc_ticks_at_end = rtc_time_get(); + uint64_t frc_time_at_end = esp_timer_get_time(); + + uint64_t rtc_time_diff = rtc_time_slowclk_to_us(rtc_ticks_at_end - s_config.rtc_ticks_at_sleep_start, + esp_clk_slowclk_cal_get()); + uint64_t frc_time_diff = frc_time_at_end - frc_time_at_start; + + int64_t time_diff = rtc_time_diff - frc_time_diff; + /* Small negative values (up to 1 RTC_SLOW clock period) are possible, + * for very small values of sleep_duration. Ignore those to keep esp_timer + * monotonic. + */ + if (time_diff > 0) { + esp_timer_private_advance(time_diff); + } + esp_set_time_from_rtc(); + + esp_timer_private_unlock(); + DPORT_STALL_OTHER_CPU_END(); + if (!wdt_was_enabled) { + wdt_hal_write_protect_disable(&rtc_wdt_ctx); + wdt_hal_disable(&rtc_wdt_ctx); + wdt_hal_write_protect_enable(&rtc_wdt_ctx); + } + portEXIT_CRITICAL(&light_sleep_lock); + return err; +} + +esp_err_t esp_sleep_disable_wakeup_source(esp_sleep_source_t source) +{ + // For most of sources it is enough to set trigger mask in local + // configuration structure. The actual RTC wake up options + // will be updated by esp_sleep_start(). + if (source == ESP_SLEEP_WAKEUP_ALL) { + s_config.wakeup_triggers = 0; + } else if (CHECK_SOURCE(source, ESP_SLEEP_WAKEUP_TIMER, RTC_TIMER_TRIG_EN)) { + s_config.wakeup_triggers &= ~RTC_TIMER_TRIG_EN; + s_config.sleep_duration = 0; + } else if (CHECK_SOURCE(source, ESP_SLEEP_WAKEUP_EXT0, RTC_EXT0_TRIG_EN)) { + s_config.ext0_rtc_gpio_num = 0; + s_config.ext0_trigger_level = 0; + s_config.wakeup_triggers &= ~RTC_EXT0_TRIG_EN; + } else if (CHECK_SOURCE(source, ESP_SLEEP_WAKEUP_EXT1, RTC_EXT1_TRIG_EN)) { + s_config.ext1_rtc_gpio_mask = 0; + s_config.ext1_trigger_mode = 0; + s_config.wakeup_triggers &= ~RTC_EXT1_TRIG_EN; + } else if (CHECK_SOURCE(source, ESP_SLEEP_WAKEUP_TOUCHPAD, RTC_TOUCH_TRIG_EN)) { + s_config.wakeup_triggers &= ~RTC_TOUCH_TRIG_EN; + } else if (CHECK_SOURCE(source, ESP_SLEEP_WAKEUP_GPIO, RTC_GPIO_TRIG_EN)) { + s_config.wakeup_triggers &= ~RTC_GPIO_TRIG_EN; + } else if (CHECK_SOURCE(source, ESP_SLEEP_WAKEUP_UART, (RTC_UART0_TRIG_EN | RTC_UART1_TRIG_EN))) { + s_config.wakeup_triggers &= ~(RTC_UART0_TRIG_EN | RTC_UART1_TRIG_EN); + } +#ifdef CONFIG_ESP32S3_ULP_COPROC_ENABLED + else if (CHECK_SOURCE(source, ESP_SLEEP_WAKEUP_ULP, RTC_ULP_TRIG_EN)) { + s_config.wakeup_triggers &= ~RTC_ULP_TRIG_EN; + } +#endif + else { + ESP_LOGE(TAG, "Incorrect wakeup source (%d) to disable.", (int) source); + return ESP_ERR_INVALID_STATE; + } + return ESP_OK; +} + +esp_err_t esp_sleep_enable_ulp_wakeup(void) +{ + s_config.wakeup_triggers |= (RTC_ULP_TRIG_EN | RTC_COCPU_TRIG_EN | RTC_COCPU_TRAP_TRIG_EN); + return ESP_OK; +} + +esp_err_t esp_sleep_enable_timer_wakeup(uint64_t time_in_us) +{ + s_config.wakeup_triggers |= RTC_TIMER_TRIG_EN; + s_config.sleep_duration = time_in_us; + return ESP_OK; +} + +static void timer_wakeup_prepare(void) +{ + uint32_t period = esp_clk_slowclk_cal_get(); + int64_t sleep_duration = (int64_t) s_config.sleep_duration - (int64_t) s_config.sleep_time_adjustment; + if (sleep_duration < 0) { + sleep_duration = 0; + } + int64_t rtc_count_delta = rtc_time_us_to_slowclk(sleep_duration, period); + rtc_sleep_set_wakeup_time(s_config.rtc_ticks_at_sleep_start + rtc_count_delta); + SET_PERI_REG_MASK(RTC_CNTL_INT_CLR_REG, RTC_CNTL_MAIN_TIMER_INT_CLR_M); + SET_PERI_REG_MASK(RTC_CNTL_SLP_TIMER1_REG, RTC_CNTL_MAIN_TIMER_ALARM_EN_M); +} + +/* In deep sleep mode, only the sleep channel is supported, and other touch channels should be turned off. */ +static void touch_wakeup_prepare(void) +{ + touch_pad_sleep_channel_t slp_config; + touch_pad_fsm_stop(); + touch_pad_clear_channel_mask(SOC_TOUCH_SENSOR_BIT_MASK_MAX); + touch_pad_sleep_channel_get_info(&slp_config); + touch_pad_set_channel_mask(BIT(slp_config.touch_num)); + touch_pad_fsm_start(); +} + +esp_err_t esp_sleep_enable_touchpad_wakeup(void) +{ + if (s_config.wakeup_triggers & (RTC_EXT0_TRIG_EN)) { + ESP_LOGE(TAG, "Conflicting wake-up trigger: ext0"); + return ESP_ERR_INVALID_STATE; + } + s_config.wakeup_triggers |= RTC_TOUCH_TRIG_EN; + return ESP_OK; +} + +touch_pad_t esp_sleep_get_touchpad_wakeup_status(void) +{ + if (esp_sleep_get_wakeup_cause() != ESP_SLEEP_WAKEUP_TOUCHPAD) { + return TOUCH_PAD_MAX; + } + touch_pad_t pad_num; + esp_err_t ret = touch_pad_get_wakeup_status(&pad_num); //TODO 723diff commit id:fda9ada1b + assert(ret == ESP_OK && "wakeup reason is RTC_TOUCH_TRIG_EN but SENS_TOUCH_MEAS_EN is zero"); + return pad_num; +} + +esp_err_t esp_sleep_enable_ext0_wakeup(gpio_num_t gpio_num, int level) +{ + if (level < 0 || level > 1) { + return ESP_ERR_INVALID_ARG; + } + if (!RTC_GPIO_IS_VALID_GPIO(gpio_num)) { + return ESP_ERR_INVALID_ARG; + } + if (s_config.wakeup_triggers & (RTC_TOUCH_TRIG_EN | RTC_ULP_TRIG_EN)) { + ESP_LOGE(TAG, "Conflicting wake-up triggers: touch / ULP"); + return ESP_ERR_INVALID_STATE; + } + s_config.ext0_rtc_gpio_num = rtc_io_number_get(gpio_num); + s_config.ext0_trigger_level = level; + s_config.wakeup_triggers |= RTC_EXT0_TRIG_EN; + return ESP_OK; +} + +static void ext0_wakeup_prepare(void) +{ + int rtc_gpio_num = s_config.ext0_rtc_gpio_num; + // Set GPIO to be used for wakeup + REG_SET_FIELD(RTC_IO_EXT_WAKEUP0_REG, RTC_IO_EXT_WAKEUP0_SEL, rtc_gpio_num); + // Set level which will trigger wakeup + SET_PERI_REG_BITS(RTC_CNTL_EXT_WAKEUP_CONF_REG, 0x1, + s_config.ext0_trigger_level, RTC_CNTL_EXT_WAKEUP0_LV_S); + // Find GPIO descriptor in the rtc_io_desc table and configure the pad + const rtc_io_desc_t *desc = &rtc_io_desc[rtc_gpio_num]; + REG_SET_BIT(desc->reg, desc->mux); + SET_PERI_REG_BITS(desc->reg, 0x3, 0, desc->func); + REG_SET_BIT(desc->reg, desc->ie); +} + +esp_err_t esp_sleep_enable_ext1_wakeup(uint64_t mask, esp_sleep_ext1_wakeup_mode_t mode) +{ + if (mode > ESP_EXT1_WAKEUP_ANY_HIGH) { + return ESP_ERR_INVALID_ARG; + } + // Translate bit map of GPIO numbers into the bit map of RTC IO numbers + uint32_t rtc_gpio_mask = 0; + for (int gpio = 0; mask; ++gpio, mask >>= 1) { + if ((mask & 1) == 0) { + continue; + } + if (!RTC_GPIO_IS_VALID_GPIO(gpio)) { + ESP_LOGE(TAG, "Not an RTC IO: GPIO%d", gpio); + return ESP_ERR_INVALID_ARG; + } + rtc_gpio_mask |= BIT(rtc_io_number_get(gpio)); + } + s_config.ext1_rtc_gpio_mask = rtc_gpio_mask; + s_config.ext1_trigger_mode = mode; + s_config.wakeup_triggers |= RTC_EXT1_TRIG_EN; + return ESP_OK; +} + +static void ext1_wakeup_prepare(void) +{ + // Configure all RTC IOs selected as ext1 wakeup inputs + uint32_t rtc_gpio_mask = s_config.ext1_rtc_gpio_mask; + for (int gpio = 0; gpio < GPIO_PIN_COUNT && rtc_gpio_mask != 0; ++gpio) { + int rtc_pin = rtc_io_number_get(gpio); + if ((rtc_gpio_mask & BIT(rtc_pin)) == 0) { + continue; + } + const rtc_io_desc_t *desc = &rtc_io_desc[rtc_pin]; + // Route pad to RTC + REG_SET_BIT(desc->reg, desc->mux); + SET_PERI_REG_BITS(desc->reg, 0x3, 0, desc->func); + // set input enable in sleep mode + REG_SET_BIT(desc->reg, desc->ie); + // Pad configuration depends on RTC_PERIPH state in sleep mode + if (s_config.pd_options[ESP_PD_DOMAIN_RTC_PERIPH] != ESP_PD_OPTION_ON) { + // RTC_PERIPH will be powered down, so RTC_IO_ registers will + // loose their state. Lock pad configuration. + // Pullups/pulldowns also need to be disabled. + REG_CLR_BIT(desc->reg, desc->pulldown); + REG_CLR_BIT(desc->reg, desc->pullup); + REG_SET_BIT(RTC_CNTL_PAD_HOLD_REG, desc->hold_force); + } + // Keep track of pins which are processed to bail out early + rtc_gpio_mask &= ~BIT(rtc_pin); + } + // Clear state from previous wakeup + REG_SET_BIT(RTC_CNTL_EXT_WAKEUP1_REG, RTC_CNTL_EXT_WAKEUP1_STATUS_CLR); + // Set pins to be used for wakeup + REG_SET_FIELD(RTC_CNTL_EXT_WAKEUP1_REG, RTC_CNTL_EXT_WAKEUP1_SEL, s_config.ext1_rtc_gpio_mask); + // Set logic function (any low, all high) + SET_PERI_REG_BITS(RTC_CNTL_EXT_WAKEUP_CONF_REG, 0x1, + s_config.ext1_trigger_mode, RTC_CNTL_EXT_WAKEUP1_LV_S); +} + +uint64_t esp_sleep_get_ext1_wakeup_status(void) +{ + if (esp_sleep_get_wakeup_cause() != ESP_SLEEP_WAKEUP_EXT1) { + return 0; + } + uint32_t status = REG_GET_FIELD(RTC_CNTL_EXT_WAKEUP1_STATUS_REG, RTC_CNTL_EXT_WAKEUP1_STATUS); + // Translate bit map of RTC IO numbers into the bit map of GPIO numbers + uint64_t gpio_mask = 0; + for (int gpio = 0; gpio < GPIO_PIN_COUNT; ++gpio) { + if (!RTC_GPIO_IS_VALID_GPIO(gpio)) { + continue; + } + int rtc_pin = rtc_io_number_get(gpio); + if ((status & BIT(rtc_pin)) == 0) { + continue; + } + gpio_mask |= 1ULL << gpio; + } + return gpio_mask; +} + +esp_err_t esp_sleep_enable_gpio_wakeup(void) +{ + if (s_config.wakeup_triggers & (RTC_TOUCH_TRIG_EN | RTC_ULP_TRIG_EN)) { + ESP_LOGE(TAG, "Conflicting wake-up triggers: touch / ULP"); + return ESP_ERR_INVALID_STATE; + } + s_config.wakeup_triggers |= RTC_GPIO_TRIG_EN; + return ESP_OK; +} + +esp_err_t esp_sleep_enable_uart_wakeup(int uart_num) +{ + if (uart_num == 0) { + s_config.wakeup_triggers |= RTC_UART0_TRIG_EN; + } else if (uart_num == 1) { + s_config.wakeup_triggers |= RTC_UART1_TRIG_EN; + } else { + return ESP_ERR_INVALID_ARG; + } + + return ESP_OK; +} + +esp_err_t esp_sleep_enable_wifi_wakeup(void) +{ + s_config.wakeup_triggers |= RTC_MAC_TRIG_EN; + return ESP_OK; +} + +esp_sleep_wakeup_cause_t esp_sleep_get_wakeup_cause(void) +{ + if (rtc_get_reset_reason(0) != DEEPSLEEP_RESET && !s_light_sleep_wakeup) { + return ESP_SLEEP_WAKEUP_UNDEFINED; + } + + uint32_t wakeup_cause = REG_GET_FIELD(RTC_CNTL_WAKEUP_STATE_REG, RTC_CNTL_WAKEUP_CAUSE); + if (wakeup_cause & RTC_EXT0_TRIG_EN) { + return ESP_SLEEP_WAKEUP_EXT0; + } else if (wakeup_cause & RTC_EXT1_TRIG_EN) { + return ESP_SLEEP_WAKEUP_EXT1; + } else if (wakeup_cause & RTC_TIMER_TRIG_EN) { + return ESP_SLEEP_WAKEUP_TIMER; + } else if (wakeup_cause & RTC_TOUCH_TRIG_EN) { + return ESP_SLEEP_WAKEUP_TOUCHPAD; + } else if (wakeup_cause & RTC_ULP_TRIG_EN) { + return ESP_SLEEP_WAKEUP_ULP; + } else if (wakeup_cause & RTC_GPIO_TRIG_EN) { + return ESP_SLEEP_WAKEUP_GPIO; + } else if (wakeup_cause & (RTC_UART0_TRIG_EN | RTC_UART1_TRIG_EN)) { + return ESP_SLEEP_WAKEUP_UART; + } else if (wakeup_cause & RTC_MAC_TRIG_EN) { + return ESP_SLEEP_WAKEUP_WIFI; + } else if (wakeup_cause & RTC_COCPU_TRIG_EN) { + return ESP_SLEEP_WAKEUP_ULP; + } else if (wakeup_cause & RTC_COCPU_TRAP_TRIG_EN) { + return ESP_SLEEP_WAKEUP_COCPU_TRAP_TRIG; + } else { + return ESP_SLEEP_WAKEUP_UNDEFINED; + } +} + +esp_err_t esp_sleep_pd_config(esp_sleep_pd_domain_t domain, + esp_sleep_pd_option_t option) +{ + if (domain >= ESP_PD_DOMAIN_MAX || option > ESP_PD_OPTION_AUTO) { + return ESP_ERR_INVALID_ARG; + } + s_config.pd_options[domain] = option; + return ESP_OK; +} + +static uint32_t get_power_down_flags(void) +{ + // Where needed, convert AUTO options to ON. Later interpret AUTO as OFF. + + // RTC_SLOW_MEM is needed for the ULP, so keep RTC_SLOW_MEM powered up if ULP + // is used and RTC_SLOW_MEM is Auto. + // If there is any data placed into .rtc.data or .rtc.bss segments, and + // RTC_SLOW_MEM is Auto, keep it powered up as well. + + // Labels are defined in the linker script, see esp32s3.ld. + extern int _rtc_slow_length; + + if ((s_config.pd_options[ESP_PD_DOMAIN_RTC_SLOW_MEM] == ESP_PD_OPTION_AUTO) && + ((size_t) &_rtc_slow_length > 0 || + (s_config.wakeup_triggers & RTC_ULP_TRIG_EN))) { + s_config.pd_options[ESP_PD_DOMAIN_RTC_SLOW_MEM] = ESP_PD_OPTION_ON; + } + + // RTC_FAST_MEM is needed for deep sleep stub. + // If RTC_FAST_MEM is Auto, keep it powered on, so that deep sleep stub + // can run. + // In the new chip revision, deep sleep stub will be optional, + // and this can be changed. + if (s_config.pd_options[ESP_PD_DOMAIN_RTC_FAST_MEM] == ESP_PD_OPTION_AUTO) { + s_config.pd_options[ESP_PD_DOMAIN_RTC_FAST_MEM] = ESP_PD_OPTION_ON; + } + + // RTC_PERIPH is needed for EXT0 wakeup and GPIO wakeup. + // If RTC_PERIPH is auto, and EXT0/GPIO aren't enabled, power down RTC_PERIPH. + if (s_config.pd_options[ESP_PD_DOMAIN_RTC_PERIPH] == ESP_PD_OPTION_AUTO) { + if (s_config.wakeup_triggers & (RTC_EXT0_TRIG_EN | RTC_GPIO_TRIG_EN)) { + s_config.pd_options[ESP_PD_DOMAIN_RTC_PERIPH] = ESP_PD_OPTION_ON; + } else if (s_config.wakeup_triggers & (RTC_TOUCH_TRIG_EN | RTC_ULP_TRIG_EN)) { + // In both rev. 0 and rev. 1 of ESP32, forcing power up of RTC_PERIPH + // prevents ULP timer and touch FSMs from working correctly. + s_config.pd_options[ESP_PD_DOMAIN_RTC_PERIPH] = ESP_PD_OPTION_OFF; + } + } + + if (s_config.pd_options[ESP_PD_DOMAIN_XTAL] == ESP_PD_OPTION_AUTO) { + s_config.pd_options[ESP_PD_DOMAIN_XTAL] = ESP_PD_OPTION_OFF; + } + + const char *option_str[] = {"OFF", "ON", "AUTO(OFF)" /* Auto works as OFF */}; + ESP_LOGD(TAG, "RTC_PERIPH: %s, RTC_SLOW_MEM: %s, RTC_FAST_MEM: %s", + option_str[s_config.pd_options[ESP_PD_DOMAIN_RTC_PERIPH]], + option_str[s_config.pd_options[ESP_PD_DOMAIN_RTC_SLOW_MEM]], + option_str[s_config.pd_options[ESP_PD_DOMAIN_RTC_FAST_MEM]]); + + // Prepare flags based on the selected options + uint32_t pd_flags = 0; + if (s_config.pd_options[ESP_PD_DOMAIN_RTC_FAST_MEM] != ESP_PD_OPTION_ON) { + pd_flags |= RTC_SLEEP_PD_RTC_FAST_MEM; + } + if (s_config.pd_options[ESP_PD_DOMAIN_RTC_SLOW_MEM] != ESP_PD_OPTION_ON) { + pd_flags |= RTC_SLEEP_PD_RTC_SLOW_MEM; + } + if (s_config.pd_options[ESP_PD_DOMAIN_RTC_PERIPH] != ESP_PD_OPTION_ON) { + pd_flags |= RTC_SLEEP_PD_RTC_PERIPH; + } + return pd_flags; +} diff --git a/components/esp32s3/system_api_esp32s3.c b/components/esp32s3/system_api_esp32s3.c new file mode 100644 index 0000000000..252a171bcf --- /dev/null +++ b/components/esp32s3/system_api_esp32s3.c @@ -0,0 +1,143 @@ +// Copyright 2013-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "sdkconfig.h" +#include "esp_system.h" +#include "esp_private/system_internal.h" +#include "esp_attr.h" +#include "esp_wifi.h" +#include "esp_log.h" +#include "esp32s3/rom/cache.h" +#include "esp_rom_uart.h" +#include "soc/dport_reg.h" +#include "soc/gpio_reg.h" +#include "soc/rtc_cntl_reg.h" +#include "soc/timer_group_reg.h" +#include "soc/cpu.h" +#include "soc/rtc.h" +#include "soc/syscon_reg.h" +#include "hal/wdt_hal.h" +#include "freertos/xtensa_api.h" + +/* "inner" restart function for after RTOS, interrupts & anything else on this + * core are already stopped. Stalls other core, resets hardware, + * triggers restart. +*/ +void IRAM_ATTR esp_restart_noos(void) +{ + // Disable interrupts + xt_ints_off(0xFFFFFFFF); + + // Enable RTC watchdog for 1 second + wdt_hal_context_t rtc_wdt_ctx; + wdt_hal_init(&rtc_wdt_ctx, WDT_RWDT, 0, false); + uint32_t stage_timeout_ticks = (uint32_t)(1000ULL * rtc_clk_slow_freq_get_hz() / 1000ULL); + wdt_hal_write_protect_disable(&rtc_wdt_ctx); + wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_SYSTEM); + wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE1, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC); + //Enable flash boot mode so that flash booting after restart is protected by the RTC WDT. + wdt_hal_set_flashboot_en(&rtc_wdt_ctx, true); + wdt_hal_write_protect_enable(&rtc_wdt_ctx); + + // Reset and stall the other CPU. + // CPU must be reset before stalling, in case it was running a s32c1i + // instruction. This would cause memory pool to be locked by arbiter + // to the stalled CPU, preventing current CPU from accessing this pool. + const uint32_t core_id = xPortGetCoreID(); +#if !CONFIG_FREERTOS_UNICORE + const uint32_t other_core_id = (core_id == 0) ? 1 : 0; + esp_cpu_reset(other_core_id); + esp_cpu_stall(other_core_id); +#endif + + // Disable TG0/TG1 watchdogs + wdt_hal_context_t wdt0_context = {.inst = WDT_MWDT0, .mwdt_dev = &TIMERG0}; + wdt_hal_write_protect_disable(&wdt0_context); + wdt_hal_disable(&wdt0_context); + wdt_hal_write_protect_enable(&wdt0_context); + + wdt_hal_context_t wdt1_context = {.inst = WDT_MWDT1, .mwdt_dev = &TIMERG1}; + wdt_hal_write_protect_disable(&wdt1_context); + wdt_hal_disable(&wdt1_context); + wdt_hal_write_protect_enable(&wdt1_context); + + // Flush any data left in UART FIFOs + esp_rom_uart_tx_wait_idle(0); + esp_rom_uart_tx_wait_idle(1); + // Disable cache + Cache_Disable_ICache(); + Cache_Disable_DCache(); + + // 2nd stage bootloader reconfigures SPI flash signals. + // Reset them to the defaults expected by ROM. + WRITE_PERI_REG(GPIO_FUNC0_IN_SEL_CFG_REG, 0x30); + WRITE_PERI_REG(GPIO_FUNC1_IN_SEL_CFG_REG, 0x30); + WRITE_PERI_REG(GPIO_FUNC2_IN_SEL_CFG_REG, 0x30); + WRITE_PERI_REG(GPIO_FUNC3_IN_SEL_CFG_REG, 0x30); + WRITE_PERI_REG(GPIO_FUNC4_IN_SEL_CFG_REG, 0x30); + WRITE_PERI_REG(GPIO_FUNC5_IN_SEL_CFG_REG, 0x30); + + // Reset wifi/bluetooth/ethernet/sdio (bb/mac) + SET_PERI_REG_MASK(SYSTEM_CORE_RST_EN_REG, + SYSTEM_BB_RST | SYSTEM_FE_RST | SYSTEM_MAC_RST | + SYSTEM_BT_RST | SYSTEM_BTMAC_RST | SYSTEM_SDIO_RST | + SYSTEM_SDIO_HOST_RST | SYSTEM_EMAC_RST | SYSTEM_MACPWR_RST | + SYSTEM_RW_BTMAC_RST | SYSTEM_RW_BTLP_RST); + REG_WRITE(SYSTEM_CORE_RST_EN_REG, 0); + + // Reset timer/spi/uart + SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN0_REG, + SYSTEM_TIMERS_RST | SYSTEM_SPI01_RST | SYSTEM_UART_RST); + REG_WRITE(SYSTEM_PERIP_RST_EN0_REG, 0); + + // Set CPU back to XTAL source, no PLL, same as hard reset +#if !CONFIG_IDF_ENV_FPGA + rtc_clk_cpu_freq_set_xtal(); +#endif + +#if !CONFIG_FREERTOS_UNICORE + // Clear entry point for APP CPU + REG_WRITE(SYSTEM_CORE_1_CONTROL_1_REG, 0); +#endif + + // Reset CPUs + if (core_id == 0) { + // Running on PRO CPU: APP CPU is stalled. Can reset both CPUs. +#if !CONFIG_FREERTOS_UNICORE + esp_cpu_reset(1); +#endif + esp_cpu_reset(0); + } +#if !CONFIG_FREERTOS_UNICORE + else { + // Running on APP CPU: need to reset PRO CPU and unstall it, + // then reset APP CPU + esp_cpu_reset(0); + esp_cpu_unstall(0); + esp_cpu_reset(1); + } +#endif + while (true) { + ; + } +} + +void esp_chip_info(esp_chip_info_t *out_info) +{ + memset(out_info, 0, sizeof(*out_info)); + out_info->model = CHIP_ESP32S3; + out_info->cores = 2; + out_info->features = CHIP_FEATURE_WIFI_BGN; +} diff --git a/components/esp_system/port/esp32s3/clk.c b/components/esp_system/port/esp32s3/clk.c new file mode 100644 index 0000000000..d449399fbf --- /dev/null +++ b/components/esp_system/port/esp32s3/clk.c @@ -0,0 +1,320 @@ + +// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include "sdkconfig.h" +#include "esp_attr.h" +#include "esp_log.h" +#include "esp32s3/clk.h" +#include "esp_clk_internal.h" +#include "esp32s3/rom/ets_sys.h" +#include "esp32s3/rom/rtc.h" +#include "esp_rom_uart.h" +#include "soc/system_reg.h" +#include "soc/dport_access.h" +#include "soc/soc.h" +#include "soc/rtc.h" +#include "hal/wdt_hal.h" +#include "soc/rtc_periph.h" +#include "soc/i2s_reg.h" +#include "driver/periph_ctrl.h" +#include "xtensa/core-macros.h" +#include "bootloader_clock.h" +#include "soc/syscon_reg.h" + +static const char *TAG = "clk"; + +/* Number of cycles to wait from the 32k XTAL oscillator to consider it running. + * Larger values increase startup delay. Smaller values may cause false positive + * detection (i.e. oscillator runs for a few cycles and then stops). + */ +#define SLOW_CLK_CAL_CYCLES CONFIG_ESP32S3_RTC_CLK_CAL_CYCLES + +#ifdef CONFIG_ESP32S3_RTC_XTAL_CAL_RETRY +#define RTC_XTAL_CAL_RETRY CONFIG_ESP32S3_RTC_XTAL_CAL_RETRY +#else +#define RTC_XTAL_CAL_RETRY 1 +#endif + +/* Lower threshold for a reasonably-looking calibration value for a 32k XTAL. + * The ideal value (assuming 32768 Hz frequency) is 1000000/32768*(2**19) = 16*10^6. + */ +#define MIN_32K_XTAL_CAL_VAL 15000000L + +/* Indicates that this 32k oscillator gets input from external oscillator, rather + * than a crystal. + */ +#define EXT_OSC_FLAG BIT(3) + +/* This is almost the same as rtc_slow_freq_t, except that we define + * an extra enum member for the external 32k oscillator. + * For convenience, lower 2 bits should correspond to rtc_slow_freq_t values. + */ +typedef enum { + SLOW_CLK_RTC = RTC_SLOW_FREQ_RTC, //!< Internal 90 kHz RC oscillator + SLOW_CLK_32K_XTAL = RTC_SLOW_FREQ_32K_XTAL, //!< External 32 kHz XTAL + SLOW_CLK_8MD256 = RTC_SLOW_FREQ_8MD256, //!< Internal 8 MHz RC oscillator, divided by 256 + SLOW_CLK_32K_EXT_OSC = RTC_SLOW_FREQ_32K_XTAL | EXT_OSC_FLAG //!< External 32k oscillator connected to 32K_XP pin +} slow_clk_sel_t; + +static void select_rtc_slow_clk(slow_clk_sel_t slow_clk); + +void esp_clk_init(void) +{ + rtc_config_t cfg = RTC_CONFIG_DEFAULT(); + rtc_init(cfg); + + assert(rtc_clk_xtal_freq_get() == RTC_XTAL_FREQ_40M); + + rtc_clk_fast_freq_set(RTC_FAST_FREQ_8M); + +#ifdef CONFIG_BOOTLOADER_WDT_ENABLE + // WDT uses a SLOW_CLK clock source. After a function select_rtc_slow_clk a frequency of this source can changed. + // If the frequency changes from 90kHz to 32kHz, then the timeout set for the WDT will increase 2.8 times. + // Therefore, for the time of frequency change, set a new lower timeout value (1.6 sec). + // This prevents excessive delay before resetting in case the supply voltage is drawdown. + // (If frequency is changed from 90kHz to 32kHz then WDT timeout will increased to 1.6sec * 90/32 = 4.5 sec). + wdt_hal_context_t rtc_wdt_ctx = {.inst = WDT_RWDT, .rwdt_dev = &RTCCNTL}; + uint32_t stage_timeout_ticks = (uint32_t)(1600ULL * rtc_clk_slow_freq_get_hz() / 1000ULL); + wdt_hal_write_protect_disable(&rtc_wdt_ctx); + wdt_hal_feed(&rtc_wdt_ctx); + //Bootloader has enabled RTC WDT until now. We're only modifying timeout, so keep the stage and timeout action the same + wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC); + wdt_hal_write_protect_enable(&rtc_wdt_ctx); +#endif + +#if defined(CONFIG_ESP32S3_RTC_CLK_SRC_EXT_CRYS) + select_rtc_slow_clk(SLOW_CLK_32K_XTAL); +#elif defined(CONFIG_ESP32S3_RTC_CLK_SRC_EXT_OSC) + select_rtc_slow_clk(SLOW_CLK_32K_EXT_OSC); +#elif defined(CONFIG_ESP32S3_RTC_CLK_SRC_INT_8MD256) + select_rtc_slow_clk(SLOW_CLK_8MD256); +#else + select_rtc_slow_clk(RTC_SLOW_FREQ_RTC); +#endif + +#ifdef CONFIG_BOOTLOADER_WDT_ENABLE + // After changing a frequency WDT timeout needs to be set for new frequency. + stage_timeout_ticks = (uint32_t)((uint64_t)CONFIG_BOOTLOADER_WDT_TIME_MS * rtc_clk_slow_freq_get_hz() / 1000ULL); + wdt_hal_write_protect_disable(&rtc_wdt_ctx); + wdt_hal_feed(&rtc_wdt_ctx); + wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC); + wdt_hal_write_protect_enable(&rtc_wdt_ctx); +#endif + + rtc_cpu_freq_config_t old_config, new_config; + rtc_clk_cpu_freq_get_config(&old_config); + const uint32_t old_freq_mhz = old_config.freq_mhz; + const uint32_t new_freq_mhz = CONFIG_ESP32S3_DEFAULT_CPU_FREQ_MHZ; + + bool res = rtc_clk_cpu_freq_mhz_to_config(new_freq_mhz, &new_config); + assert(res); + + // Wait for UART TX to finish, otherwise some UART output will be lost + // when switching APB frequency + if (CONFIG_ESP_CONSOLE_UART_NUM >= 0) { + esp_rom_uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM); + } + + rtc_clk_cpu_freq_set_config(&new_config); + + // Re calculate the ccount to make time calculation correct. + XTHAL_SET_CCOUNT( (uint64_t)XTHAL_GET_CCOUNT() * new_freq_mhz / old_freq_mhz ); +} + +static void select_rtc_slow_clk(slow_clk_sel_t slow_clk) +{ + rtc_slow_freq_t rtc_slow_freq = slow_clk & RTC_CNTL_ANA_CLK_RTC_SEL_V; + uint32_t cal_val = 0; + /* number of times to repeat 32k XTAL calibration + * before giving up and switching to the internal RC + */ + int retry_32k_xtal = RTC_XTAL_CAL_RETRY; + + do { + if (rtc_slow_freq == RTC_SLOW_FREQ_32K_XTAL) { + /* 32k XTAL oscillator needs to be enabled and running before it can + * be used. Hardware doesn't have a direct way of checking if the + * oscillator is running. Here we use rtc_clk_cal function to count + * the number of main XTAL cycles in the given number of 32k XTAL + * oscillator cycles. If the 32k XTAL has not started up, calibration + * will time out, returning 0. + */ + ESP_EARLY_LOGD(TAG, "waiting for 32k oscillator to start up"); + if (slow_clk == SLOW_CLK_32K_XTAL) { + rtc_clk_32k_enable(true); + } else if (slow_clk == SLOW_CLK_32K_EXT_OSC) { + rtc_clk_32k_enable_external(); + } + // When SLOW_CLK_CAL_CYCLES is set to 0, clock calibration will not be performed at startup. + if (SLOW_CLK_CAL_CYCLES > 0) { + cal_val = rtc_clk_cal(RTC_CAL_32K_XTAL, SLOW_CLK_CAL_CYCLES); + if (cal_val == 0 || cal_val < MIN_32K_XTAL_CAL_VAL) { + if (retry_32k_xtal-- > 0) { + continue; + } + ESP_EARLY_LOGW(TAG, "32 kHz XTAL not found, switching to internal 90 kHz oscillator"); + rtc_slow_freq = RTC_SLOW_FREQ_RTC; + } + } + } else if (rtc_slow_freq == RTC_SLOW_FREQ_8MD256) { + rtc_clk_8m_enable(true, true); + } + rtc_clk_slow_freq_set(rtc_slow_freq); + + if (SLOW_CLK_CAL_CYCLES > 0) { + /* TODO: 32k XTAL oscillator has some frequency drift at startup. + * Improve calibration routine to wait until the frequency is stable. + */ + cal_val = rtc_clk_cal(RTC_CAL_RTC_MUX, SLOW_CLK_CAL_CYCLES); + } else { + const uint64_t cal_dividend = (1ULL << RTC_CLK_CAL_FRACT) * 1000000ULL; + cal_val = (uint32_t) (cal_dividend / rtc_clk_slow_freq_get_hz()); + } + } while (cal_val == 0); + ESP_EARLY_LOGD(TAG, "RTC_SLOW_CLK calibration value: %d", cal_val); + esp_clk_slowclk_cal_set(cal_val); +} + +void rtc_clk_select_rtc_slow_clk(void) +{ + select_rtc_slow_clk(RTC_SLOW_FREQ_32K_XTAL); +} + +/* This function is not exposed as an API at this point. + * All peripheral clocks are default enabled after chip is powered on. + * This function disables some peripheral clocks when cpu starts. + * These peripheral clocks are enabled when the peripherals are initialized + * and disabled when they are de-initialized. + */ +void esp_perip_clk_init(void) +{ + uint32_t common_perip_clk, hwcrypto_perip_clk, wifi_bt_sdio_clk = 0; + uint32_t common_perip_clk1 = 0; + +#if CONFIG_FREERTOS_UNICORE + RESET_REASON rst_reas[1]; +#else + RESET_REASON rst_reas[2]; +#endif + + rst_reas[0] = rtc_get_reset_reason(0); +#if !CONFIG_FREERTOS_UNICORE + rst_reas[1] = rtc_get_reset_reason(1); +#endif + + /* For reason that only reset CPU, do not disable the clocks + * that have been enabled before reset. + */ + if ((rst_reas[0] >= TG0WDT_CPU_RESET && rst_reas[0] <= TG0WDT_CPU_RESET && rst_reas[0] != RTCWDT_BROWN_OUT_RESET) +#if !CONFIG_FREERTOS_UNICORE + || (rst_reas[1] >= TG0WDT_CPU_RESET && rst_reas[1] <= RTCWDT_CPU_RESET) +#endif + ) { + common_perip_clk = ~READ_PERI_REG(SYSTEM_PERIP_CLK_EN0_REG); + hwcrypto_perip_clk = ~READ_PERI_REG(SYSTEM_PERIP_CLK_EN1_REG); + wifi_bt_sdio_clk = ~READ_PERI_REG(SYSTEM_WIFI_CLK_EN_REG); + } else { + common_perip_clk = SYSTEM_WDG_CLK_EN | + SYSTEM_I2S0_CLK_EN | +#if CONFIG_CONSOLE_UART_NUM != 0 + SYSTEM_UART_CLK_EN | +#endif +#if CONFIG_CONSOLE_UART_NUM != 1 + SYSTEM_UART1_CLK_EN | +#endif +#if CONFIG_CONSOLE_UART_NUM != 2 + SYSTEM_UART2_CLK_EN | +#endif + SYSTEM_USB_CLK_EN | + SYSTEM_SPI2_CLK_EN | + SYSTEM_I2C_EXT0_CLK_EN | + SYSTEM_UHCI0_CLK_EN | + SYSTEM_RMT_CLK_EN | + SYSTEM_PCNT_CLK_EN | + SYSTEM_LEDC_CLK_EN | + SYSTEM_TIMERGROUP1_CLK_EN | + SYSTEM_SPI3_CLK_EN | + SYSTEM_SPI4_CLK_EN | + SYSTEM_PWM0_CLK_EN | + SYSTEM_CAN_CLK_EN | + SYSTEM_PWM1_CLK_EN | + SYSTEM_I2S1_CLK_EN | + SYSTEM_SPI2_DMA_CLK_EN | + SYSTEM_SPI3_DMA_CLK_EN | + SYSTEM_PWM2_CLK_EN | + SYSTEM_PWM3_CLK_EN; + common_perip_clk1 = 0; + hwcrypto_perip_clk = SYSTEM_CRYPTO_AES_CLK_EN | + SYSTEM_CRYPTO_SHA_CLK_EN | + SYSTEM_CRYPTO_RSA_CLK_EN; + wifi_bt_sdio_clk = SYSTEM_WIFI_CLK_WIFI_EN | + SYSTEM_WIFI_CLK_BT_EN_M | + SYSTEM_WIFI_CLK_UNUSED_BIT5 | + SYSTEM_WIFI_CLK_UNUSED_BIT12 | + SYSTEM_WIFI_CLK_SDIO_HOST_EN; + } + + //Reset the communication peripherals like I2C, SPI, UART, I2S and bring them to known state. + common_perip_clk |= SYSTEM_I2S0_CLK_EN | +#if CONFIG_CONSOLE_UART_NUM != 0 + SYSTEM_UART_CLK_EN | +#endif +#if CONFIG_CONSOLE_UART_NUM != 1 + SYSTEM_UART1_CLK_EN | +#endif +#if CONFIG_CONSOLE_UART_NUM != 2 + SYSTEM_UART2_CLK_EN | +#endif + SYSTEM_USB_CLK_EN | + SYSTEM_SPI2_CLK_EN | + SYSTEM_I2C_EXT0_CLK_EN | + SYSTEM_UHCI0_CLK_EN | + SYSTEM_RMT_CLK_EN | + SYSTEM_UHCI1_CLK_EN | + SYSTEM_SPI3_CLK_EN | + SYSTEM_SPI4_CLK_EN | + SYSTEM_I2C_EXT1_CLK_EN | + SYSTEM_I2S1_CLK_EN | + SYSTEM_SPI2_DMA_CLK_EN | + SYSTEM_SPI3_DMA_CLK_EN; + common_perip_clk1 = 0; + + /* Change I2S clock to audio PLL first. Because if I2S uses 160MHz clock, + * the current is not reduced when disable I2S clock. + */ + REG_SET_FIELD(I2S_CLKM_CONF_REG(0), I2S_CLK_SEL, I2S_CLK_AUDIO_PLL); + REG_SET_FIELD(I2S_CLKM_CONF_REG(1), I2S_CLK_SEL, I2S_CLK_AUDIO_PLL); + + /* Disable some peripheral clocks. */ + CLEAR_PERI_REG_MASK(SYSTEM_PERIP_CLK_EN0_REG, common_perip_clk); + SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN0_REG, common_perip_clk); + + CLEAR_PERI_REG_MASK(SYSTEM_PERIP_CLK_EN1_REG, common_perip_clk1); + SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN1_REG, common_perip_clk1); + + /* Disable hardware crypto clocks. */ + CLEAR_PERI_REG_MASK(SYSTEM_PERIP_CLK_EN1_REG, hwcrypto_perip_clk); + SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN1_REG, hwcrypto_perip_clk); + + /* Disable WiFi/BT/SDIO clocks. */ + CLEAR_PERI_REG_MASK(SYSTEM_WIFI_CLK_EN_REG, wifi_bt_sdio_clk); + + /* Enable RNG clock. */ + periph_module_enable(PERIPH_RNG_MODULE); +} \ No newline at end of file diff --git a/components/esp_system/port/esp32s3/reset_reason.c b/components/esp_system/port/esp32s3/reset_reason.c new file mode 100644 index 0000000000..25f8f0a90d --- /dev/null +++ b/components/esp_system/port/esp32s3/reset_reason.c @@ -0,0 +1,120 @@ +// Copyright 2018 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "esp_system.h" +#include "esp32s3/rom/rtc.h" +#include "esp_private/system_internal.h" +#include "soc/rtc_periph.h" + +static void esp_reset_reason_clear_hint(void); + +static esp_reset_reason_t s_reset_reason; + +static esp_reset_reason_t get_reset_reason(RESET_REASON rtc_reset_reason, esp_reset_reason_t reset_reason_hint) +{ + switch (rtc_reset_reason) { + case POWERON_RESET: + return ESP_RST_POWERON; + + case RTC_SW_CPU_RESET: + case RTC_SW_SYS_RESET: + if (reset_reason_hint == ESP_RST_PANIC || + reset_reason_hint == ESP_RST_BROWNOUT || + reset_reason_hint == ESP_RST_TASK_WDT || + reset_reason_hint == ESP_RST_INT_WDT) { + return reset_reason_hint; + } + return ESP_RST_SW; + + case DEEPSLEEP_RESET: + return ESP_RST_DEEPSLEEP; + + case TG0WDT_SYS_RESET: + return ESP_RST_TASK_WDT; + + case TG1WDT_SYS_RESET: + return ESP_RST_INT_WDT; + + case RTCWDT_SYS_RESET: + case RTCWDT_RTC_RESET: + case SUPER_WDT_RESET: + case RTCWDT_CPU_RESET: /* unused */ + case TG0WDT_CPU_RESET: /* unused */ + case TG1WDT_CPU_RESET: /* unused */ + return ESP_RST_WDT; + + case RTCWDT_BROWN_OUT_RESET: + return ESP_RST_BROWNOUT; + + case INTRUSION_RESET: /* unused */ + default: + return ESP_RST_UNKNOWN; + } +} + +static void __attribute__((constructor)) esp_reset_reason_init(void) +{ + esp_reset_reason_t hint = esp_reset_reason_get_hint(); + s_reset_reason = get_reset_reason(rtc_get_reset_reason(PRO_CPU_NUM), + hint); + if (hint != ESP_RST_UNKNOWN) { + esp_reset_reason_clear_hint(); + } +} + +esp_reset_reason_t esp_reset_reason(void) +{ + return s_reset_reason; +} + +/* Reset reason hint is stored in RTC_RESET_CAUSE_REG, a.k.a. RTC_CNTL_STORE6_REG, + * a.k.a. RTC_ENTRY_ADDR_REG. It is safe to use this register both for the + * deep sleep wake stub entry address and for reset reason hint, since wake stub + * is only used for deep sleep reset, and in this case the reason provided by + * rtc_get_reset_reason is unambiguous. + * + * Same layout is used as for RTC_APB_FREQ_REG (a.k.a. RTC_CNTL_STORE5_REG): + * the value is replicated in low and high half-words. In addition to that, + * MSB is set to 1, which doesn't happen when RTC_CNTL_STORE6_REG contains + * deep sleep wake stub address. + */ + +#define RST_REASON_BIT 0x80000000 +#define RST_REASON_MASK 0x7FFF +#define RST_REASON_SHIFT 16 + +/* in IRAM, can be called from panic handler */ +void IRAM_ATTR esp_reset_reason_set_hint(esp_reset_reason_t hint) +{ + assert((hint & (~RST_REASON_MASK)) == 0); + uint32_t val = hint | (hint << RST_REASON_SHIFT) | RST_REASON_BIT; + REG_WRITE(RTC_RESET_CAUSE_REG, val); +} + +/* in IRAM, can be called from panic handler */ +esp_reset_reason_t IRAM_ATTR esp_reset_reason_get_hint(void) +{ + uint32_t reset_reason_hint = REG_READ(RTC_RESET_CAUSE_REG); + uint32_t high = (reset_reason_hint >> RST_REASON_SHIFT) & RST_REASON_MASK; + uint32_t low = reset_reason_hint & RST_REASON_MASK; + if ((reset_reason_hint & RST_REASON_BIT) == 0 || high != low) { + return ESP_RST_UNKNOWN; + } + return (esp_reset_reason_t) low; +} +static void esp_reset_reason_clear_hint(void) +{ + REG_WRITE(RTC_RESET_CAUSE_REG, 0); +} + diff --git a/components/freertos/xtensa/port.c b/components/freertos/xtensa/port.c index 9c9fa0dc3d..62eb61fdf5 100644 --- a/components/freertos/xtensa/port.c +++ b/components/freertos/xtensa/port.c @@ -547,7 +547,9 @@ void start_app_other_cores(void) #endif esp_crosscore_int_init(); +#if CONFIG_IDF_TARGET_ESP32 esp_dport_access_int_init(); +#endif ESP_EARLY_LOGI(TAG, "Starting scheduler on APP CPU."); xPortStartScheduler(); @@ -570,7 +572,9 @@ void start_app(void) esp_crosscore_int_init(); #ifndef CONFIG_FREERTOS_UNICORE +#if CONFIG_IDF_TARGET_ESP32 esp_dport_access_int_init(); +#endif #endif portBASE_TYPE res = xTaskCreatePinnedToCore(&main_task, "main", diff --git a/components/mbedtls/port/esp32/esp_sha1.c b/components/mbedtls/port/esp32/esp_sha1.c index 893afb44fb..ba5231b9ce 100644 --- a/components/mbedtls/port/esp32/esp_sha1.c +++ b/components/mbedtls/port/esp32/esp_sha1.c @@ -47,11 +47,7 @@ #endif /* MBEDTLS_PLATFORM_C */ #endif /* MBEDTLS_SELF_TEST */ -#if CONFIG_IDF_TARGET_ESP32 #include "esp32/sha.h" -#elif CONFIG_IDF_TARGET_ESP32S2 -#include "esp32s2/sha.h" -#endif /* Implementation that should never be optimized out by the compiler */ static void mbedtls_zeroize( void *v, size_t n ) { diff --git a/components/mbedtls/port/esp32/esp_sha256.c b/components/mbedtls/port/esp32/esp_sha256.c index df31ec022e..7a1e2a1475 100644 --- a/components/mbedtls/port/esp32/esp_sha256.c +++ b/components/mbedtls/port/esp32/esp_sha256.c @@ -48,11 +48,7 @@ #endif /* MBEDTLS_PLATFORM_C */ #endif /* MBEDTLS_SELF_TEST */ -#if CONFIG_IDF_TARGET_ESP32 #include "esp32/sha.h" -#elif CONFIG_IDF_TARGET_ESP32S2 -#include "esp32s2/sha.h" -#endif /* Implementation that should never be optimized out by the compiler */ static void mbedtls_zeroize( void *v, size_t n ) { diff --git a/components/mbedtls/port/esp32/esp_sha512.c b/components/mbedtls/port/esp32/esp_sha512.c index d2b89122d8..58cd79097f 100644 --- a/components/mbedtls/port/esp32/esp_sha512.c +++ b/components/mbedtls/port/esp32/esp_sha512.c @@ -54,11 +54,7 @@ #endif /* MBEDTLS_PLATFORM_C */ #endif /* MBEDTLS_SELF_TEST */ -#if CONFIG_IDF_TARGET_ESP32 #include "esp32/sha.h" -#elif CONFIG_IDF_TARGET_ESP32S2 -#include "esp32s2/sha.h" -#endif inline static esp_sha_type sha_type(const mbedtls_sha512_context *ctx) { diff --git a/components/soc/include/hal/systimer_types.h b/components/soc/include/hal/systimer_types.h index ebe5fb4e1a..2ae7c4a2e7 100644 --- a/components/soc/include/hal/systimer_types.h +++ b/components/soc/include/hal/systimer_types.h @@ -46,7 +46,7 @@ _Static_assert(sizeof(systimer_counter_value_t) == 8, "systimer_counter_value_t typedef enum { SYSTIMER_COUNTER_0, /*!< systimer counter 0 */ #if SOC_SYSTIMER_COUNTER_NUM > 1 - SYSTIEMR_COUNTER_1, /*!< systimer counter 1 */ + SYSTIMER_COUNTER_1, /*!< systimer counter 1 */ #endif } systimer_counter_id_t; diff --git a/components/soc/src/esp32s3/soc_memory_layout.c b/components/soc/src/esp32s3/soc_memory_layout.c new file mode 100644 index 0000000000..b9a4784b15 --- /dev/null +++ b/components/soc/src/esp32s3/soc_memory_layout.c @@ -0,0 +1,111 @@ +// Copyright 2010-2020 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef BOOTLOADER_BUILD + +#include +#include +#include "sdkconfig.h" +#include "esp_attr.h" +#include "soc/soc.h" +#include "soc/soc_memory_layout.h" +#include "esp_heap_caps.h" + +/** + * @brief Memory type descriptors. These describe the capabilities of a type of memory in the SoC. + * Each type of memory map consists of one or more regions in the address space. + * Each type contains an array of prioritized capabilities. + * Types with later entries are only taken if earlier ones can't fulfill the memory request. + * + * - For a normal malloc (MALLOC_CAP_DEFAULT), give away the DRAM-only memory first, then pass off any dual-use IRAM regions, finally eat into the application memory. + * - For a malloc where 32-bit-aligned-only access is okay, first allocate IRAM, then DRAM, finally application IRAM. + * - Application mallocs (PIDx) will allocate IRAM first, if possible, then DRAM. + * - Most other malloc caps only fit in one region anyway. + * + */ +const soc_memory_type_desc_t soc_memory_types[] = { + // Type 0: DRAM + { "DRAM", { MALLOC_CAP_8BIT | MALLOC_CAP_DEFAULT, MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA | MALLOC_CAP_32BIT, 0 }, false, false}, + // Type 1: DRAM used for startup stacks + { "STACK/DRAM", { MALLOC_CAP_8BIT | MALLOC_CAP_DEFAULT, MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA | MALLOC_CAP_32BIT, 0 }, false, true}, + // Type 2: DRAM which has an alias on the I-port + { "D/IRAM", { 0, MALLOC_CAP_DMA | MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL | MALLOC_CAP_DEFAULT, MALLOC_CAP_32BIT | MALLOC_CAP_EXEC }, true, false}, + // Type 3: IRAM + { "IRAM", { MALLOC_CAP_EXEC | MALLOC_CAP_32BIT | MALLOC_CAP_INTERNAL, 0, 0 }, false, false}, + // Type 4: SPI SRAM data + { "SPIRAM", { MALLOC_CAP_SPIRAM | MALLOC_CAP_DEFAULT, 0, MALLOC_CAP_8BIT | MALLOC_CAP_32BIT}, false, false}, +}; + +const size_t soc_memory_type_count = sizeof(soc_memory_types) / sizeof(soc_memory_type_desc_t); + +/** + * @brief Region descriptors. These describe all regions of memory available, and map them to a type in the above type. + * + * @note Because of requirements in the coalescing code which merges adjacent regions, + * this list should always be sorted from low to high by start address. + * + */ +const soc_memory_region_t soc_memory_regions[] = { +#ifdef CONFIG_SPIRAM + { SOC_EXTRAM_DATA_LOW, SOC_EXTRAM_DATA_HIGH - SOC_EXTRAM_DATA_LOW, 4, 0}, //SPI SRAM, if available +#endif +#if CONFIG_ESP32S3_INSTRUCTION_CACHE_16KB + { 0x40374000, 0x4000, 3, 0}, //Level 1, IRAM +#endif + { 0x3FC88000, 0x8000, 2, 0x40378000}, //Level 2, IDRAM, can be used as trace memroy + { 0x3FC90000, 0x10000, 2, 0x40380000}, //Level 3, IDRAM, can be used as trace memroy + { 0x3FCA0000, 0x10000, 2, 0x40390000}, //Level 4, IDRAM, can be used as trace memroy + { 0x3FCB0000, 0x10000, 2, 0x403A0000}, //Level 5, IDRAM, can be used as trace memroy + { 0x3FCC0000, 0x10000, 2, 0x403B0000}, //Level 6, IDRAM, can be used as trace memroy + { 0x3FCD0000, 0x10000, 2, 0x403C0000}, //Level 7, IDRAM, can be used as trace memroy + { 0x3FCE0000, 0x10000, 1, 0}, //Level 8, IDRAM, can be used as trace memroy, contains stacks used by startup flow, recycled by heap allocator in app_main task +#if CONFIG_ESP32S3_DATA_CACHE_32KB + { 0x3FCF0000, 0x8000, 0, 0}, //Level 9, DRAM +#endif +}; + +const size_t soc_memory_region_count = sizeof(soc_memory_regions) / sizeof(soc_memory_region_t); + +extern int _dram0_rtos_reserved_start; // defined in esp32s3.rom.ld +extern int _data_start, _heap_start, _iram_start, _iram_end; // defined in esp32s3.project.ld.in + +/** + * Reserved memory regions. + * These are removed from the soc_memory_regions array when heaps are created. + * + */ +//ROM data region +SOC_RESERVE_MEMORY_REGION((intptr_t)&_dram0_rtos_reserved_start, SOC_DIRAM_DRAM_HIGH, rom_data_region); + +// Static data region. DRAM used by data+bss and possibly rodata +SOC_RESERVE_MEMORY_REGION((intptr_t)&_data_start, (intptr_t)&_heap_start, dram_data); + +// ESP32S3 has a big D/IRAM region, the part used by code is reserved +// The address of the D/I bus are in the same order, directly shift IRAM address to get reserved DRAM address +#define I_D_OFFSET (SOC_DIRAM_IRAM_LOW - SOC_DIRAM_DRAM_LOW) +#if CONFIG_ESP32S3_INSTRUCTION_CACHE_16KB +SOC_RESERVE_MEMORY_REGION((intptr_t)&_iram_start, (intptr_t)&_iram_start + 0x4000, iram_code_1); +SOC_RESERVE_MEMORY_REGION((intptr_t)&_iram_start + 0x4000 - I_D_OFFSET, (intptr_t)&_iram_end - I_D_OFFSET, iram_code_2); +#else +SOC_RESERVE_MEMORY_REGION((intptr_t)&_iram_start - I_D_OFFSET, (intptr_t)&_iram_end - I_D_OFFSET, iram_code); +#endif + +#ifdef CONFIG_SPIRAM +SOC_RESERVE_MEMORY_REGION( SOC_EXTRAM_DATA_LOW, SOC_EXTRAM_DATA_HIGH, extram_data_region); //SPI RAM gets added later if needed, in spiram.c; reserve it for now +#endif + +#if CONFIG_ESP32S3_TRACEMEM_RESERVE_DRAM > 0 +SOC_RESERVE_MEMORY_REGION(0x3fffc000 - CONFIG_ESP32S3_TRACEMEM_RESERVE_DRAM, 0x3fffc000, trace_mem); +#endif + +#endif // BOOTLOADER_BUILD