Merge branch 'feat/esp32p4_xip_psram' into 'master'

psram: support xip_psram on esp32p4

Closes IDF-7556

See merge request espressif/esp-idf!26903
This commit is contained in:
Armando (Dou Yiwen) 2024-05-22 21:18:19 +08:00
commit 92f9301ceb
52 changed files with 980 additions and 154 deletions

View File

@ -22,7 +22,7 @@
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
void bootloader_flash_update_id()
void IRAM_ATTR bootloader_flash_update_id()
{
esp_rom_spiflash_chip_t *chip = &rom_spiflash_legacy_data->chip;
chip->device_id = bootloader_read_flash_id();

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2010-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2010-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -10,6 +10,7 @@
#include <stdbool.h>
#include "soc/soc.h"
#include "soc/ext_mem_defs.h"
#include "soc/soc_caps.h"
#include "sdkconfig.h"
#include "esp_attr.h"
@ -179,6 +180,31 @@ inline static bool esp_ptr_in_tcm(const void *p) {
#endif //#if SOC_MEM_TCM_SUPPORTED
/** End of the common section that has to be in sync with esp_memory_utils.h **/
/**
* @brief Check if the pointer is in PSRAM vaddr space
*
* @note This function is only used when in bootloader, where the PSRAM isn't initialised.
* This function simply check if the pointer is the in the PSRAM vaddr space.
* The PSRAM vaddr space is not always the same as the actual PSRAM vaddr range used in APP
*
* @param p pointer
*
* @return true: is in PSRAM; false: not in PSRAM
*/
__attribute__((always_inline))
inline static bool esp_ptr_in_extram(const void *p) {
bool valid = false;
#if SOC_IRAM_PSRAM_ADDRESS_LOW
valid |= ((intptr_t)p >= SOC_IRAM_PSRAM_ADDRESS_LOW && (intptr_t)p < SOC_IRAM_PSRAM_ADDRESS_HIGH);
#endif
#if SOC_DRAM_PSRAM_ADDRESS_LOW
valid |= ((intptr_t)p >= SOC_DRAM_PSRAM_ADDRESS_LOW && (intptr_t)p < SOC_DRAM_PSRAM_ADDRESS_HIGH);
#endif
return valid;
}
/** Don't add new functions below **/
#ifdef __cplusplus

View File

@ -25,6 +25,7 @@
#include "soc/rtc_periph.h"
#include "soc/timer_periph.h"
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#include "hal/cache_types.h"
#include "hal/cache_ll.h"
#include "hal/cache_hal.h"
@ -46,6 +47,7 @@
#include "bootloader_sha.h"
#include "bootloader_console.h"
#include "bootloader_soc.h"
#include "bootloader_memory_utils.h"
#include "esp_efuse.h"
#include "esp_fault.h"
@ -718,10 +720,20 @@ static void unpack_load_app(const esp_image_metadata_t *data)
// Find DROM & IROM addresses, to configure MMU mappings
for (int i = 0; i < data->image.segment_count; i++) {
const esp_image_segment_header_t *header = &data->segments[i];
bool text_or_rodata = false;
//`SOC_DROM_LOW` and `SOC_DROM_HIGH` are the same as `SOC_IROM_LOW` and `SOC_IROM_HIGH`, reasons are in above `note`
if (header->load_addr >= SOC_DROM_LOW && header->load_addr < SOC_DROM_HIGH) {
text_or_rodata = true;
}
#if SOC_MMU_PER_EXT_MEM_TARGET
if (header->load_addr >= SOC_EXTRAM_LOW && header->load_addr < SOC_EXTRAM_HIGH) {
text_or_rodata = true;
}
#endif
if (text_or_rodata) {
/**
* D/I are shared, but there should not be a third segment on flash
* D/I are shared, but there should not be a third segment on flash/psram
*/
assert(rom_index < 2);
rom_addr[rom_index] = data->segment_data[i];
@ -788,6 +800,20 @@ static void unpack_load_app(const esp_image_metadata_t *data)
}
#endif //#if SOC_MMU_DI_VADDR_SHARED
//unused for esp32
__attribute__((unused))
static bool s_flash_seg_needs_map(uint32_t vaddr)
{
#if SOC_MMU_PER_EXT_MEM_TARGET
//For these chips, segments on PSRAM will be mapped in app
bool is_psram = esp_ptr_in_extram((void *)vaddr);
return !is_psram;
#else
//For these chips, segments on Flash always need to be mapped
return true;
#endif
}
static void set_cache_and_start_app(
uint32_t drom_addr,
uint32_t drom_load_addr,
@ -825,8 +851,13 @@ static void set_cache_and_start_app(
ESP_EARLY_LOGV(TAG, "after mapping rodata, starting from paddr=0x%08" PRIx32 " and vaddr=0x%08" PRIx32 ", 0x%" PRIx32 " bytes are mapped", drom_addr_aligned, drom_load_addr_aligned, drom_page_count * SPI_FLASH_MMU_PAGE_SIZE);
#else
uint32_t actual_mapped_len = 0;
mmu_hal_map_region(0, MMU_TARGET_FLASH0, drom_load_addr_aligned, drom_addr_aligned, drom_size, &actual_mapped_len);
ESP_EARLY_LOGV(TAG, "after mapping rodata, starting from paddr=0x%08" PRIx32 " and vaddr=0x%08" PRIx32 ", 0x%" PRIx32 " bytes are mapped", drom_addr_aligned, drom_load_addr_aligned, actual_mapped_len);
if (s_flash_seg_needs_map(drom_load_addr_aligned)) {
mmu_hal_map_region(0, MMU_TARGET_FLASH0, drom_load_addr_aligned, drom_addr_aligned, drom_size, &actual_mapped_len);
ESP_EARLY_LOGV(TAG, "after mapping rodata, starting from paddr=0x%08" PRIx32 " and vaddr=0x%08" PRIx32 ", 0x%" PRIx32 " bytes are mapped", drom_addr_aligned, drom_load_addr_aligned, actual_mapped_len);
}
//we use the MMU_LL_END_DROM_ENTRY_ID mmu entry as a map page for app to find the boot partition
mmu_hal_map_region(0, MMU_TARGET_FLASH0, MMU_LL_END_DROM_ENTRY_VADDR, drom_addr_aligned, CONFIG_MMU_PAGE_SIZE, &actual_mapped_len);
ESP_EARLY_LOGV(TAG, "mapped one page of the rodata, from paddr=0x%08" PRIx32 " and vaddr=0x%08" PRIx32 ", 0x%" PRIx32 " bytes are mapped", drom_addr_aligned, drom_load_addr_aligned, actual_mapped_len);
#endif
//-----------------------MAP IROM--------------------------
@ -843,8 +874,10 @@ static void set_cache_and_start_app(
ESP_LOGV(TAG, "rc=%d", rc);
ESP_EARLY_LOGV(TAG, "after mapping text, starting from paddr=0x%08" PRIx32 " and vaddr=0x%08" PRIx32 ", 0x%" PRIx32 " bytes are mapped", irom_addr_aligned, irom_load_addr_aligned, irom_page_count * SPI_FLASH_MMU_PAGE_SIZE);
#else
mmu_hal_map_region(0, MMU_TARGET_FLASH0, irom_load_addr_aligned, irom_addr_aligned, irom_size, &actual_mapped_len);
ESP_EARLY_LOGV(TAG, "after mapping text, starting from paddr=0x%08" PRIx32 " and vaddr=0x%08" PRIx32 ", 0x%" PRIx32 " bytes are mapped", irom_addr_aligned, irom_load_addr_aligned, actual_mapped_len);
if (s_flash_seg_needs_map(irom_load_addr_aligned)) {
mmu_hal_map_region(0, MMU_TARGET_FLASH0, irom_load_addr_aligned, irom_addr_aligned, irom_size, &actual_mapped_len);
ESP_EARLY_LOGW(TAG, "after mapping text, starting from paddr=0x%08" PRIx32 " and vaddr=0x%08" PRIx32 ", 0x%" PRIx32 " bytes are mapped", irom_addr_aligned, irom_load_addr_aligned, actual_mapped_len);
}
#endif
//----------------------Enable corresponding buses----------------

View File

@ -771,8 +771,14 @@ static esp_err_t verify_segment_header(int index, const esp_image_segment_header
static bool should_map(uint32_t load_addr)
{
return (load_addr >= SOC_IROM_LOW && load_addr < SOC_IROM_HIGH)
|| (load_addr >= SOC_DROM_LOW && load_addr < SOC_DROM_HIGH);
bool is_irom = (load_addr >= SOC_IROM_LOW) && (load_addr < SOC_IROM_HIGH);
bool is_drom = (load_addr >= SOC_DROM_LOW) && (load_addr < SOC_DROM_HIGH);
bool is_psram = false;
#if SOC_MMU_PER_EXT_MEM_TARGET
is_psram = (load_addr >= SOC_EXTRAM_LOW) && (load_addr < SOC_EXTRAM_HIGH);
#endif
return (is_irom || is_drom || is_psram);
}
static bool should_load(uint32_t load_addr)
@ -857,7 +863,7 @@ static esp_err_t process_appended_hash_and_sig(esp_image_metadata_t *data, uint3
// Case I: Bootloader part
if (part_offset == ESP_BOOTLOADER_OFFSET) {
// For bootloader with secure boot v1, signature stays in an independant flash
// For bootloader with secure boot v1, signature stays in an independent flash
// sector (offset 0x0) and does not get appended to the image.
#if CONFIG_SECURE_BOOT_V2_ENABLED
// Sanity check - secure boot v2 signature block starts on 4K boundary

View File

@ -185,7 +185,7 @@ idf_component_register(SRCS ${srcs}
PRIV_INCLUDE_DIRS port/include include/esp_private
REQUIRES ${requires}
PRIV_REQUIRES "${priv_requires}"
LDFRAGMENTS linker.lf dma/linker.lf)
LDFRAGMENTS linker.lf dma/linker.lf ldo/linker.lf)
idf_build_get_property(target IDF_TARGET)
add_subdirectory(port/${target})

View File

@ -10,6 +10,8 @@
#include "esp_ldo_regulator.h"
#include "esp_private/esp_clk_tree_common.h"
#include "esp_check.h"
#include "hal/clk_tree_hal.h"
#include "hal/clk_tree_ll.h"
#if SOC_CLK_MPLL_SUPPORTED
#include "rtc_clk.h"
#endif
@ -137,7 +139,7 @@ esp_err_t periph_rtc_apll_freq_set(uint32_t expt_freq, uint32_t *real_freq)
#endif // SOC_CLK_APLL_SUPPORTED
#if SOC_CLK_MPLL_SUPPORTED
esp_err_t periph_rtc_mpll_acquire(void)
esp_err_t IRAM_ATTR periph_rtc_mpll_acquire(void)
{
// power up LDO for the MPLL
#if defined(CONFIG_ESP_LDO_CHAN_PSRAM_DOMAIN) && CONFIG_ESP_LDO_CHAN_PSRAM_DOMAIN != -1
@ -176,7 +178,7 @@ void periph_rtc_mpll_release(void)
portEXIT_CRITICAL(&periph_spinlock);
}
esp_err_t periph_rtc_mpll_freq_set(uint32_t expt_freq, uint32_t *real_freq)
esp_err_t IRAM_ATTR periph_rtc_mpll_freq_set(uint32_t expt_freq, uint32_t *real_freq)
{
esp_err_t ret = ESP_OK;
@ -190,10 +192,9 @@ esp_err_t periph_rtc_mpll_freq_set(uint32_t expt_freq, uint32_t *real_freq)
/* If MPLL is not in use or only one peripheral in use, its frequency can be changed as will
* But when more than one peripheral refers MPLL, its frequency is not allowed to change once it is set */
if (s_cur_mpll_freq == 0 || s_mpll_ref_cnt < 2) {
uint32_t xtal_freq = 0;
ESP_ERROR_CHECK(esp_clk_tree_src_get_freq_hz(SOC_MOD_CLK_XTAL, ESP_CLK_TREE_SRC_FREQ_PRECISION_EXACT, &xtal_freq));
rtc_clk_mpll_configure(xtal_freq / MHZ, expt_freq / MHZ);
ESP_ERROR_CHECK(esp_clk_tree_src_get_freq_hz(SOC_MOD_CLK_MPLL, ESP_CLK_TREE_SRC_FREQ_PRECISION_EXACT, &s_cur_mpll_freq));
uint32_t xtal_freq_mhz = clk_ll_xtal_load_freq_mhz();
rtc_clk_mpll_configure(xtal_freq_mhz, expt_freq / MHZ);
s_cur_mpll_freq = clk_ll_mpll_get_freq_mhz(xtal_freq_mhz);
} else {
ret = ESP_ERR_INVALID_STATE;
}

View File

@ -0,0 +1,6 @@
[mapping:ldo_driver]
archive: libesp_hw_support.a
entries:
if SOC_GP_LDO_SUPPORTED = y:
if SPIRAM_FLASH_LOAD_TO_PSRAM = y:
esp_ldo_regulator: esp_ldo_acquire_channel (noflash)

View File

@ -3,5 +3,4 @@ CONFIG_SPIRAM_MODE_OCT=y
CONFIG_SPIRAM_SPEED_80M=y
# Enable the XIP-PSRAM feature, so the ext-mem cache won't be disabled when SPI1 is operating the main flash
CONFIG_SPIRAM_FETCH_INSTRUCTIONS=y
CONFIG_SPIRAM_RODATA=y
CONFIG_SPIRAM_XIP_FROM_PSRAM=y

View File

@ -38,7 +38,7 @@
#define MEM_REGION_MERGED -1
/**
* We have some hw related tests for vaddr region capabilites
* We have some hw related tests for vaddr region capabilities
* Use this macro to disable paddr check as we need to reuse certain paddr blocks
*/
#define ENABLE_PADDR_CHECK !ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR
@ -185,6 +185,13 @@ static void s_reserve_drom_region(mem_region_t *hw_mem_regions, int region_nums)
}
#endif //#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
#if SOC_MMU_PER_EXT_MEM_TARGET
static inline uint32_t s_get_mmu_id_from_target(mmu_target_t target)
{
return (target == MMU_TARGET_FLASH0) ? MMU_LL_FLASH_MMU_ID : MMU_LL_PSRAM_MMU_ID;
}
#endif
void esp_mmu_map_init(void)
{
mem_region_t hw_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {};
@ -381,16 +388,11 @@ static void IRAM_ATTR NOINLINE_ATTR s_do_cache_invalidate(uint32_t vaddr_start,
#endif // CONFIG_IDF_TARGET_ESP32
}
#if MMU_LL_MMU_PER_TARGET
#if SOC_MMU_PER_EXT_MEM_TARGET
FORCE_INLINE_ATTR uint32_t s_mapping_operation(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
{
uint32_t actual_mapped_len = 0;
uint32_t mmu_id = 0;
if (target == MMU_TARGET_FLASH0) {
mmu_id = MMU_LL_FLASH_MMU_ID;
} else {
mmu_id = MMU_LL_PSRAM_MMU_ID;
}
uint32_t mmu_id = s_get_mmu_id_from_target(target);
mmu_hal_map_region(mmu_id, target, vaddr_start, paddr_start, size, &actual_mapped_len);
return actual_mapped_len;
@ -584,17 +586,11 @@ err:
return ret;
}
#if MMU_LL_MMU_PER_TARGET
#if SOC_MMU_PER_EXT_MEM_TARGET
FORCE_INLINE_ATTR void s_unmapping_operation(uint32_t vaddr_start, uint32_t size)
{
uint32_t mmu_id = 0;
mmu_target_t target = mmu_ll_vaddr_to_target(vaddr_start);
if (target == MMU_TARGET_FLASH0) {
mmu_id = MMU_LL_FLASH_MMU_ID;
} else {
mmu_id = MMU_LL_PSRAM_MMU_ID;
}
uint32_t mmu_id = s_get_mmu_id_from_target(target);
mmu_hal_unmap_region(mmu_id, vaddr_start, size);
}
#else
@ -748,8 +744,12 @@ static bool NOINLINE_ATTR IRAM_ATTR s_vaddr_to_paddr(uint32_t vaddr, esp_paddr_t
{
//we call this for now, but this will be refactored to move out of `spi_flash`
spi_flash_disable_interrupts_caches_and_other_cpu();
//On ESP32, core 1 settings should be the same as the core 0
bool is_mapped = mmu_hal_vaddr_to_paddr(0, vaddr, out_paddr, out_target);
#if SOC_MMU_PER_EXT_MEM_TARGET
if (!is_mapped) {
is_mapped = mmu_hal_vaddr_to_paddr(1, vaddr, out_paddr, out_target);
}
#endif
spi_flash_enable_interrupts_caches_and_other_cpu();
return is_mapped;
@ -776,8 +776,11 @@ static bool NOINLINE_ATTR IRAM_ATTR s_paddr_to_vaddr(esp_paddr_t paddr, mmu_targ
{
//we call this for now, but this will be refactored to move out of `spi_flash`
spi_flash_disable_interrupts_caches_and_other_cpu();
//On ESP32, core 1 settings should be the same as the core 0
bool found = mmu_hal_paddr_to_vaddr(0, paddr, target, type, out_vaddr);
uint32_t mmu_id = 0;
#if SOC_MMU_PER_EXT_MEM_TARGET
mmu_id = s_get_mmu_id_from_target(target);
#endif
bool found = mmu_hal_paddr_to_vaddr(mmu_id, paddr, target, type, out_vaddr);
spi_flash_enable_interrupts_caches_and_other_cpu();
return found;

View File

@ -7,3 +7,10 @@ entries:
if IDF_TARGET_ESP32 = y:
cache_esp32 (noflash)
if SPIRAM_FLASH_LOAD_TO_PSRAM = y:
esp_mmu_map: s_get_bus_mask (noflash)
esp_mmu_map: s_reserve_irom_region (noflash)
esp_mmu_map: s_reserve_drom_region (noflash)
esp_mmu_map: esp_mmu_map_init (noflash)
ext_mem_layout (noflash)

View File

@ -8,7 +8,6 @@ set(includes "include")
set(priv_requires heap spi_flash esp_mm)
if(${target} STREQUAL "esp32")
list(APPEND priv_requires bootloader_support)
# [refactor-todo]: requires "driver" for `spicommon_periph_claim`
list(APPEND priv_requires driver)
endif()
@ -16,14 +15,25 @@ endif()
set(srcs)
if(CONFIG_SPIRAM)
list(APPEND srcs "esp_psram.c"
"mmu_psram_flash.c")
list(APPEND srcs "esp_psram.c")
if(${target} STREQUAL "esp32")
list(APPEND srcs "esp32/esp_psram_extram_cache.c"
"esp32/esp_himem.c")
endif()
if(${target} STREQUAL "esp32s2")
list(APPEND srcs "mmu_psram_flash.c")
endif()
if(${target} STREQUAL "esp32s3")
list(APPEND srcs "mmu_psram_flash.c")
endif()
if(CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM)
list(APPEND srcs "mmu_psram_flash_v2.c")
endif()
if(CONFIG_SPIRAM_MODE_QUAD)
list(APPEND srcs "${target}/esp_psram_impl_quad.c")
elseif(CONFIG_SPIRAM_MODE_OCT)
@ -37,6 +47,7 @@ endif()
idf_component_register(SRCS ${srcs}
INCLUDE_DIRS ${includes}
REQUIRES bootloader_support
PRIV_REQUIRES ${priv_requires}
LDFRAGMENTS linker.lf)

View File

@ -44,6 +44,40 @@ menu "PSRAM config"
default 100 if SPIRAM_SPEED_100M
default 200 if SPIRAM_SPEED_200M
config SPIRAM_FETCH_INSTRUCTIONS
bool
help
Enable this option allows moving application's instruction segment from the SPI Flash to
PSRAM
config SPIRAM_RODATA
bool
help
Enable this option allows moving application's rodata segment from the SPI Flash to
PSRAM
config SPIRAM_XIP_FROM_PSRAM
bool "Enable Executable in place from (XiP) from PSRAM feature"
default n
select SPIRAM_FETCH_INSTRUCTIONS
select SPIRAM_RODATA
select SPIRAM_FLASH_LOAD_TO_PSRAM
help
If enabled, firmware in flash including instructions and data will be moved into PSRAM on startup,
firmware code will execute directly from PSRAM.
With this option enabled, code that requires execution during an MSPI1 Flash operation
does not have to be placed in IRAM. Therefore codes that need to be executing during Flash
operations can continue working normally.
Enabling this option will have better performance (see External RAM documentation for more details).
config SPIRAM_FLASH_LOAD_TO_PSRAM
bool
help
This is a helper indicating this condition:
`CONFIG_SPIRAM_XIP_FROM_PSRAM && CONFIG_IDF_TARGET_ESP32P4`
config SPIRAM_ECC_ENABLE
bool "Enable PSRAM ECC"
default n

View File

@ -47,25 +47,39 @@ menu "SPI RAM config"
int
default 26
config SPIRAM_XIP_FROM_PSRAM
bool "Enable Executable in place from (XiP) from PSRAM feature"
default n
select SPIRAM_FETCH_INSTRUCTIONS
select SPIRAM_RODATA
help
Helper for selecting both `SPIRAM_FETCH_INSTRUCTIONS` and `SPIRAM_RODATA`
config SPIRAM_FETCH_INSTRUCTIONS
bool "Move Instructions in Flash to PSRAM"
default n
help
If enabled, instructions in flash will be moved into PSRAM on startup.
If SPIRAM_RODATA is also enabled, code that requires execution during an SPI1 Flash operation
can forgo being placed in IRAM, thus optimizing RAM usage (see External RAM documentation
for more details).
If SPIRAM_RODATA is also enabled, code that requires execution during an MSPI1 Flash operation
can forgo being placed in IRAM. Therefore codes that need to be executing during Flash
operation can continue working normally.
This feature is useful for high throughput peripheral involved applications to improve
the performance during MSPI1 flash operations. PSRAM access speed is faster than Flash access.
So the performance is better. (see External RAM documentation for more details).
config SPIRAM_RODATA
bool "Move Read-Only Data in Flash to PSRAM"
default n
help
If enabled, rodata in flash will be moved into PSRAM on startup.
If SPIRAM_FETCH_INSTRUCTIONS is also enabled, code that requires execution during an SPI1 Flash operation
can forgo being placed in IRAM, thus optimizing RAM usage (see External RAM documentation
for more details).
If SPIRAM_FETCH_INSTRUCTIONS is also enabled, code that requires execution during an MSPI1 Flash operation
is not necessary to be placed in IRAM. Therefore codes that need to be executing during Flash
operation can continue working normally.
This feature is useful for high throughput peripheral involved applications to improve
the performance during MSPI1 flash operations. PSRAM access speed is faster than Flash access.
So the performance is better. (see External RAM documentation for more details).
choice SPIRAM_SPEED
prompt "Set RAM clock speed"

View File

@ -56,24 +56,39 @@ menu "SPI RAM config"
int
default 26
config SPIRAM_XIP_FROM_PSRAM
bool "Enable Executable in place from (XiP) from PSRAM feature"
default n
select SPIRAM_FETCH_INSTRUCTIONS
select SPIRAM_RODATA
help
Helper for selecting both `SPIRAM_FETCH_INSTRUCTIONS` and `SPIRAM_RODATA`
config SPIRAM_FETCH_INSTRUCTIONS
bool "Move Instructions in Flash to PSRAM"
default n
help
If enabled, instructions in flash will be moved into PSRAM on startup.
If SPIRAM_RODATA is also enabled, code that requires execution during an SPI1 Flash operation
can forgo being placed in IRAM, thus optimizing RAM usage (see External RAM documentation
for more details).
If SPIRAM_RODATA is also enabled, code that requires execution during an MSPI1 Flash operation
can forgo being placed in IRAM. Therefore codes that need to be executing during Flash
operation can continue working normally.
This feature is useful for high throughput peripheral involved applications to improve
the performance during MSPI1 flash operations. PSRAM access speed is faster than Flash access.
So the performance is better. (see External RAM documentation for more details).
config SPIRAM_RODATA
bool "Move Read-Only Data in Flash to PSRAM"
default n
help
If enabled, rodata in flash will be moved into PSRAM on startup.
If SPIRAM_FETCH_INSTRUCTIONS is also enabled, code that requires execution during an SPI1 Flash operation
can forgo being placed in IRAM, thus optimizing RAM usage (see External RAM documentation
for more details).
If SPIRAM_FETCH_INSTRUCTIONS is also enabled, code that requires execution during an MSPI1 Flash operation
is not necessary to be placed in IRAM. Therefore codes that need to be executing during Flash
operation can continue working normally.
This feature is useful for high throughput peripheral involved applications to improve
the performance during MSPI1 flash operations. PSRAM access speed is faster than Flash access.
So the performance is better. (see External RAM documentation for more details).
choice SPIRAM_SPEED
prompt "Set RAM clock speed"

View File

@ -5,7 +5,7 @@
*/
/*----------------------------------------------------------------------------------------------------
* Abstraction layer for PSRAM. PSRAM device related registers and MMU/Cache related code shouls be
* Abstraction layer for PSRAM. PSRAM device related registers and MMU/Cache related code should be
* abstracted to lower layers.
*
* When we add more types of external RAM memory, this can be made into a more intelligent dispatcher.
@ -20,6 +20,7 @@
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#include "hal/cache_ll.h"
#include "soc/soc_caps.h"
#include "esp_private/esp_psram_io.h"
#include "esp_private/esp_psram_extram.h"
#include "esp_private/mmu_psram_flash.h"
@ -43,6 +44,12 @@
#define PSRAM_MEM_8BIT_ALIGNED 0
#define PSRAM_MEM_32BIT_ALIGNED 1
#if CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM
#define PSRAM_EARLY_LOGI ESP_DRAM_LOGI
#else
#define PSRAM_EARLY_LOGI ESP_EARLY_LOGI
#endif
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
extern uint8_t _ext_ram_bss_start;
extern uint8_t _ext_ram_bss_end;
@ -80,7 +87,7 @@ typedef struct {
} psram_ctx_t;
static psram_ctx_t s_psram_ctx;
static const char* TAG = "esp_psram";
static const DRAM_ATTR char TAG[] = "esp_psram";
ESP_SYSTEM_INIT_FN(init_psram, CORE, BIT(0), 103)
{
@ -120,7 +127,7 @@ static void IRAM_ATTR s_mapping(int v_start, int size)
}
#endif //CONFIG_IDF_TARGET_ESP32
esp_err_t esp_psram_init(void)
static esp_err_t s_psram_chip_init(uint32_t *out_available_size)
{
if (s_psram_ctx.is_initialised) {
return ESP_ERR_INVALID_STATE;
@ -140,8 +147,8 @@ esp_err_t esp_psram_init(void)
ret = esp_psram_impl_get_physical_size(&psram_physical_size);
assert(ret == ESP_OK);
ESP_EARLY_LOGI(TAG, "Found %" PRIu32 "MB PSRAM device", psram_physical_size / (1024 * 1024));
ESP_EARLY_LOGI(TAG, "Speed: %dMHz", CONFIG_SPIRAM_SPEED);
PSRAM_EARLY_LOGI(TAG, "Found %" PRIu32 "MB PSRAM device", psram_physical_size / (1024 * 1024));
PSRAM_EARLY_LOGI(TAG, "Speed: %dMHz", CONFIG_SPIRAM_SPEED);
#if CONFIG_IDF_TARGET_ESP32
#if CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
ESP_EARLY_LOGI(TAG, "PSRAM initialized, cache is in normal (1-core) mode.");
@ -154,7 +161,16 @@ esp_err_t esp_psram_init(void)
ret = esp_psram_impl_get_available_size(&psram_available_size);
assert(ret == ESP_OK);
__attribute__((unused)) uint32_t total_available_size = psram_available_size;
*out_available_size = psram_available_size;
return ESP_OK;
}
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
static void s_xip_psram_placement(uint32_t *psram_available_size, uint32_t *out_start_page)
{
__attribute__((unused)) uint32_t total_available_size = *psram_available_size;
uint32_t available_size = *psram_available_size;
/**
* `start_page` is the psram physical address in MMU page size.
* MMU page size on ESP32S2 is 64KB
@ -162,22 +178,9 @@ esp_err_t esp_psram_init(void)
*
* Here we plan to copy FLASH instructions to psram physical address 0, which is the No.0 page.
*/
__attribute__((unused)) uint32_t start_page = 0;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
uint32_t start_page = 0;
uint32_t used_page = 0;
#endif
//------------------------------------Copy Flash .text to PSRAM-------------------------------------//
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
ret = mmu_config_psram_text_segment(start_page, total_available_size, &used_page);
if (ret != ESP_OK) {
ESP_EARLY_LOGE(TAG, "No enough psram memory for instructon!");
abort();
}
start_page += used_page;
psram_available_size -= MMU_PAGE_TO_BYTES(used_page);
ESP_EARLY_LOGV(TAG, "after copy .text, used page is %" PRIu32 ", start_page is %" PRIu32 ", psram_available_size is %" PRIu32 " B", used_page, start_page, psram_available_size);
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
esp_err_t ret = ESP_FAIL;
//------------------------------------Copy Flash .rodata to PSRAM-------------------------------------//
#if CONFIG_SPIRAM_RODATA
@ -187,10 +190,30 @@ esp_err_t esp_psram_init(void)
abort();
}
start_page += used_page;
psram_available_size -= MMU_PAGE_TO_BYTES(used_page);
ESP_EARLY_LOGV(TAG, "after copy .rodata, used page is %" PRIu32 ", start_page is %" PRIu32 ", psram_available_size is %" PRIu32 " B", used_page, start_page, psram_available_size);
available_size -= MMU_PAGE_TO_BYTES(used_page);
ESP_EARLY_LOGV(TAG, "after copy .rodata, used page is %d, start_page is %d, available_size is %d B", used_page, start_page, available_size);
#endif //#if CONFIG_SPIRAM_RODATA
//------------------------------------Copy Flash .text to PSRAM-------------------------------------//
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
ret = mmu_config_psram_text_segment(start_page, total_available_size, &used_page);
if (ret != ESP_OK) {
ESP_EARLY_LOGE(TAG, "No enough psram memory for instructon!");
abort();
}
start_page += used_page;
available_size -= MMU_PAGE_TO_BYTES(used_page);
ESP_EARLY_LOGV(TAG, "after copy .text, used page is %" PRIu32 ", start_page is %" PRIu32 ", psram_available_size is %" PRIu32 " B", used_page, start_page, psram_available_size);
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
*psram_available_size = available_size;
*out_start_page = start_page;
}
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
static void s_psram_mapping(uint32_t psram_available_size, uint32_t start_page)
{
esp_err_t ret = ESP_FAIL;
//----------------------------------Map the PSRAM physical range to MMU-----------------------------//
/**
* @note 2
@ -213,7 +236,7 @@ esp_err_t esp_psram_init(void)
s_mapping((int)v_start_8bit_aligned, size_to_map);
#else
uint32_t actual_mapped_len = 0;
#if MMU_LL_MMU_PER_TARGET
#if SOC_MMU_PER_EXT_MEM_TARGET
mmu_hal_map_region(1, MMU_TARGET_PSRAM0, (intptr_t)v_start_8bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);
#else
mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_8bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);
@ -280,7 +303,7 @@ esp_err_t esp_psram_init(void)
}
/*------------------------------------------------------------------------------
* After mapping, we DON'T care about the PSRAM PHYSICAL ADDRESSS ANYMORE!
* After mapping, we DON'T care about the PSRAM PHYSICAL ADDRESS ANYMORE!
*----------------------------------------------------------------------------*/
//------------------------------------Configure .bss in PSRAM-------------------------------------//
@ -302,6 +325,31 @@ esp_err_t esp_psram_init(void)
#if CONFIG_IDF_TARGET_ESP32
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= esp_himem_reserved_area_size() - 1;
#endif
}
esp_err_t esp_psram_init(void)
{
esp_err_t ret = ESP_FAIL;
uint32_t psram_available_size = 0;
ret = s_psram_chip_init(&psram_available_size);
if (ret != ESP_OK) {
return ret;
}
/**
* `start_page` is the psram physical address in MMU page size.
* MMU page size on ESP32S2 is 64KB
* e.g.: psram physical address 16 is in page 0
*
* Here we plan to copy FLASH instructions to psram physical address 0, which is the No.0 page.
*/
__attribute__((unused)) uint32_t start_page = 0;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
s_xip_psram_placement(&psram_available_size, &start_page);
#endif
s_psram_mapping(psram_available_size, start_page);
//will be removed, TODO: IDF-6944
#if CONFIG_IDF_TARGET_ESP32

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -63,6 +63,18 @@ esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_si
/*----------------------------------------------------------------------------
Part 2 APIs (See @Backgrounds on top of this file)
-------------------------------------------------------------------------------*/
#if CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM
/**
* TODO: IDF-9049
* @brief Vaddr to paddr, when XIP on PSRAM
* @note This API only works for the original flash.text and flash.rodata, others vaddrs will return UINT32_MAX
*
* @param[in] ptr Pointer
*
* @return Pointer corresponding physical addr
*/
size_t mmu_xip_psram_flash_vaddr_to_paddr(const void *ptr);
#else
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/**
* @brief Init other file requested MMU variables
@ -130,6 +142,7 @@ uint32_t rodata_flash_end_page_get(void);
*/
int rodata_flash2spiram_offset(void);
#endif // #if CONFIG_SPIRAM_RODATA
#endif // #if CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM
#ifdef __cplusplus
}

View File

@ -16,3 +16,10 @@ entries:
if SPIRAM_MODE_HEX = y:
esp_psram_impl_ap_hex (noflash)
if SPIRAM_FLASH_LOAD_TO_PSRAM = y:
esp_psram_impl_ap_hex (noflash)
mmu_psram_flash_v2 (noflash)
esp_psram: esp_psram_init (noflash)
esp_psram: s_psram_chip_init (noflash)
esp_psram: s_xip_psram_placement (noflash)

View File

@ -0,0 +1,173 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief
* This is for P4 and future chips with similar arch.
* The XIP PSRAM is done by CPU copy, v1(see mmu_psram_flash.c) is done by Cache copy
*/
#include <sys/param.h>
#include <string.h>
#include "sdkconfig.h"
#include "esp_log.h"
#include "esp_attr.h"
#include "esp_err.h"
#include "soc/ext_mem_defs.h"
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#include "hal/cache_hal.h"
#include "esp_private/mmu_psram_flash.h"
#include "esp_mmu_map.h"
#include "esp_heap_caps.h"
#include "esp_private/image_process.h"
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
#define ALIGN_DOWN_BY(num, align) ((num) & (~((align) - 1)))
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
const static char *TAG = "mmu_psram";
static uint32_t s_irom_vaddr_start;
static uint32_t s_drom_vaddr_start;
static size_t s_irom_size;
static size_t s_drom_size;
static int s_irom_paddr_offset;
static int s_drom_paddr_offset;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
static uint32_t s_do_load_from_flash(uint32_t flash_paddr_start, uint32_t size, uint32_t target_vaddr_start, uint32_t target_paddr_start)
{
uint32_t flash_end_page_vaddr = SOC_DRAM_FLASH_ADDRESS_HIGH - CONFIG_MMU_PAGE_SIZE;
ESP_EARLY_LOGV(TAG, "flash_paddr_start: 0x%"PRIx32", flash_end_page_vaddr: 0x%"PRIx32", size: 0x%"PRIx32", target_vaddr_start: 0x%"PRIx32, flash_paddr_start, flash_end_page_vaddr, size, target_vaddr_start);
assert((flash_paddr_start % CONFIG_MMU_PAGE_SIZE) == 0);
assert((flash_end_page_vaddr % CONFIG_MMU_PAGE_SIZE) == 0);
assert((target_vaddr_start % CONFIG_MMU_PAGE_SIZE) == 0);
uint32_t mapped_size = 0;
while (mapped_size < size) {
uint32_t actual_mapped_len = 0;
mmu_hal_map_region(MMU_LL_PSRAM_MMU_ID, MMU_TARGET_PSRAM0, target_vaddr_start, target_paddr_start + mapped_size, CONFIG_MMU_PAGE_SIZE, &actual_mapped_len);
assert(actual_mapped_len == CONFIG_MMU_PAGE_SIZE);
mmu_hal_map_region(MMU_LL_FLASH_MMU_ID, MMU_TARGET_FLASH0, flash_end_page_vaddr, flash_paddr_start + mapped_size, CONFIG_MMU_PAGE_SIZE, &actual_mapped_len);
assert(actual_mapped_len == CONFIG_MMU_PAGE_SIZE);
cache_hal_invalidate_addr(target_vaddr_start, CONFIG_MMU_PAGE_SIZE);
cache_hal_invalidate_addr(flash_end_page_vaddr, CONFIG_MMU_PAGE_SIZE);
memcpy((void *)target_vaddr_start, (void *)flash_end_page_vaddr, CONFIG_MMU_PAGE_SIZE);
ESP_EARLY_LOGV(TAG, "target_vaddr_start: 0x%"PRIx32, target_vaddr_start);
mapped_size += CONFIG_MMU_PAGE_SIZE;
target_vaddr_start += CONFIG_MMU_PAGE_SIZE;
}
ESP_EARLY_LOGV(TAG, "mapped_size: 0x%"PRIx32, mapped_size);
assert(mapped_size == ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE));
return mapped_size;
}
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
size_t irom_size = ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
s_irom_size = irom_size;
uint32_t flash_drom_paddr_start = 0;
uint32_t flash_irom_paddr_start = 0;
image_process_get_flash_segments_info(&flash_drom_paddr_start, &flash_irom_paddr_start);
flash_irom_paddr_start = ALIGN_DOWN_BY(flash_irom_paddr_start, CONFIG_MMU_PAGE_SIZE);
ESP_EARLY_LOGI(TAG, "flash_irom_paddr_start: 0x%x", flash_irom_paddr_start);
if ((MMU_PAGE_TO_BYTES(start_page) + irom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash instructions, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", irom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + irom_size);
return ESP_ERR_NO_MEM;
}
uint32_t irom_load_addr_aligned = ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
s_irom_paddr_offset = flash_irom_paddr_start - MMU_PAGE_TO_BYTES(start_page);
s_irom_vaddr_start = irom_load_addr_aligned;
ESP_EARLY_LOGV(TAG, "flash_irom_paddr_start: 0x%"PRIx32", MMU_PAGE_TO_BYTES(start_page): 0x%"PRIx32", s_irom_paddr_offset: 0x%"PRIx32", s_irom_vaddr_start: 0x%"PRIx32, flash_irom_paddr_start, MMU_PAGE_TO_BYTES(start_page), s_irom_paddr_offset, s_irom_vaddr_start);
uint32_t mapped_size = 0;
mapped_size = s_do_load_from_flash(flash_irom_paddr_start, irom_size, irom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(irom_load_addr_aligned, irom_size);
ESP_EARLY_LOGV(TAG, "after mapping text, starting from paddr=0x%08"PRIx32" and vaddr=0x%08"PRIx32", 0x%"PRIx32" bytes are mapped", MMU_PAGE_TO_BYTES(start_page), irom_load_addr_aligned, mapped_size);
start_page += BYTES_TO_MMU_PAGE(irom_size);
*out_page = start_page;
return ESP_OK;
}
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
#if CONFIG_SPIRAM_RODATA
esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
size_t drom_size = ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
s_drom_size = drom_size;
uint32_t flash_drom_paddr_start = 0;
uint32_t flash_irom_paddr_start = 0;
image_process_get_flash_segments_info(&flash_drom_paddr_start, &flash_irom_paddr_start);
flash_drom_paddr_start = ALIGN_DOWN_BY(flash_drom_paddr_start, CONFIG_MMU_PAGE_SIZE);
ESP_EARLY_LOGI(TAG, "flash_drom_paddr_start: 0x%x", flash_drom_paddr_start);
if ((MMU_PAGE_TO_BYTES(start_page) + drom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash rodata, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", drom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + drom_size);
return ESP_ERR_NO_MEM;
}
uint32_t drom_load_addr_aligned = ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
s_drom_paddr_offset = flash_drom_paddr_start - MMU_PAGE_TO_BYTES(start_page);
s_drom_vaddr_start = drom_load_addr_aligned;
ESP_EARLY_LOGV(TAG, "flash_drom_paddr_start: 0x%"PRIx32", MMU_PAGE_TO_BYTES(start_page): 0x%"PRIx32", s_drom_paddr_offset: 0x%"PRIx32", s_drom_vaddr_start: 0x%"PRIx32, flash_drom_paddr_start, MMU_PAGE_TO_BYTES(start_page), s_drom_paddr_offset, s_drom_vaddr_start);
uint32_t mapped_size = 0;
mapped_size = s_do_load_from_flash(flash_drom_paddr_start, drom_size, drom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(drom_load_addr_aligned, drom_size);
ESP_EARLY_LOGV(TAG, "after mapping rodata, starting from paddr=0x%08"PRIx32" and vaddr=0x%08"PRIx32", 0x%"PRIx32" bytes are mapped", MMU_PAGE_TO_BYTES(start_page), drom_load_addr_aligned, mapped_size);
start_page += BYTES_TO_MMU_PAGE(drom_size);
*out_page = start_page;
return ESP_OK;
}
#endif //#if CONFIG_SPIRAM_RODATA
size_t mmu_xip_psram_flash_vaddr_to_paddr(const void *ptr)
{
if (ptr == NULL) {
return UINT32_MAX;
}
size_t paddr_on_flash = 0;
uint32_t psram_paddr = 0;
mmu_target_t target = MMU_TARGET_FLASH0;
if ((uint32_t)ptr >= s_irom_vaddr_start && (uint32_t)ptr < (s_irom_vaddr_start + s_irom_size)) {
bool is_mapped = mmu_hal_vaddr_to_paddr(MMU_LL_PSRAM_MMU_ID, (uint32_t)ptr, &psram_paddr, &target);
assert(is_mapped);
assert(target == MMU_TARGET_PSRAM0);
paddr_on_flash = psram_paddr + s_irom_paddr_offset;
} else if ((uint32_t)ptr >= s_drom_vaddr_start && (uint32_t)ptr < (s_drom_vaddr_start + s_drom_size)) {
bool is_mapped = mmu_hal_vaddr_to_paddr(MMU_LL_PSRAM_MMU_ID, (uint32_t)ptr, &psram_paddr, &target);
assert(is_mapped);
assert(target == MMU_TARGET_PSRAM0);
paddr_on_flash = psram_paddr + s_drom_paddr_offset;
} else {
paddr_on_flash = UINT32_MAX;
}
return paddr_on_flash;
}

View File

@ -1,6 +1,5 @@
# SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
from pytest_embedded import Dut
@ -81,6 +80,7 @@ def test_psram_esp32s3_octal(dut: Dut) -> None:
'config',
[
'esp32p4_200m_release',
'esp32p4_xip'
],
indirect=True,
)

View File

@ -0,0 +1,14 @@
CONFIG_IDF_TARGET="esp32p4"
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y
CONFIG_SPIRAM=y
CONFIG_IDF_EXPERIMENTAL_FEATURES=y
CONFIG_SPIRAM_SPEED_200M=y
CONFIG_SPIRAM_XIP_FROM_PSRAM=y
CONFIG_PARTITION_TABLE_CUSTOM=y
CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv"
CONFIG_PARTITION_TABLE_FILENAME="partitions.csv"

View File

@ -47,6 +47,10 @@ MEMORY
tcm_idram_seg (RX) : org = 0x30100000, len = 0x2000
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/* PSRAM mapped instruction data */
irom_seg (RX) : org = 0x48000020, len = IDROM_SEG_SIZE - 0x20
#else
/* Flash mapped instruction data */
irom_seg (RX) : org = 0x40000020, len = IDROM_SEG_SIZE - 0x20
@ -57,6 +61,7 @@ MEMORY
* header. Setting this offset makes it simple to meet the flash cache MMU's
* constraint that (paddr % 64KB == vaddr % 64KB).)
*/
#endif // CONFIG_SPIRAM_FETCH_INSTRUCTIONS
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/**
@ -68,8 +73,13 @@ MEMORY
sram_high (RW) : org = SRAM_HIGH_START, len = SRAM_HIGH_SIZE
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
#if CONFIG_SPIRAM_RODATA
/* PSRAM mapped constant data */
drom_seg (R) : org = 0x48000020, len = IDROM_SEG_SIZE - 0x20
#else
/* Flash mapped constant data */
drom_seg (R) : org = 0x40000020, len = IDROM_SEG_SIZE - 0x20
#endif // CONFIG_SPIRAM_RODATA
/* (See irom_seg for meaning of 0x20 offset in the above.) */
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS

View File

@ -30,6 +30,9 @@ entries:
usb_console:esp_usb_console_before_restart (noflash)
usb_console:esp_usb_console_on_restart_timeout (noflash)
if APP_BUILD_TYPE_RAM = n:
image_process (noflash)
[mapping:vfs_cdcacm]
archive: libvfs.a
entries:

View File

@ -8,6 +8,10 @@ target_include_directories(${COMPONENT_LIB} PRIVATE ${INCLUDE_FILES} include/pri
set(srcs "cpu_start.c" "panic_handler.c" "esp_system_chip.c")
if(NOT CONFIG_APP_BUILD_TYPE_PURE_RAM_APP)
list(APPEND srcs "image_process.c")
endif()
if(CONFIG_SOC_BOD_SUPPORTED)
list(APPEND srcs "brownout.c")
endif()

View File

@ -82,6 +82,7 @@
#endif // SOC_INT_CLIC_SUPPORTED
#include "esp_private/esp_mmu_map_private.h"
#include "esp_private/image_process.h"
#if CONFIG_SPIRAM
#include "esp_psram.h"
#include "esp_private/mmu_psram_flash.h"
@ -440,17 +441,10 @@ void IRAM_ATTR call_start_cpu0(void)
}
#endif
#if !CONFIG_APP_BUILD_TYPE_PURE_RAM_APP
#if CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
ESP_EARLY_LOGI(TAG, "Unicore app");
#else
ESP_EARLY_LOGI(TAG, "Multicore app");
#if !SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
#if !CONFIG_APP_BUILD_TYPE_PURE_RAM_APP && !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE && !SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
// It helps to fix missed cache settings for other cores. It happens when bootloader is unicore.
do_multicore_settings();
#endif // !SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
#endif
#endif // !CONFIG_APP_BUILD_TYPE_PURE_RAM_APP
// When the APP is loaded into ram for execution, some hardware initialization behaviors
// in the bootloader are still necessary
@ -538,7 +532,7 @@ void IRAM_ATTR call_start_cpu0(void)
#if CONFIG_ESPTOOLPY_OCT_FLASH && !CONFIG_ESPTOOLPY_FLASH_MODE_AUTO_DETECT
bool efuse_opflash_en = efuse_ll_get_flash_type();
if (!efuse_opflash_en) {
ESP_EARLY_LOGE(TAG, "Octal Flash option selected, but EFUSE not configured!");
ESP_DRAM_LOGE(TAG, "Octal Flash option selected, but EFUSE not configured!");
abort();
}
#endif
@ -568,23 +562,44 @@ void IRAM_ATTR call_start_cpu0(void)
esp_mmu_map_init();
#if !CONFIG_APP_BUILD_TYPE_ELF_RAM
#if CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM
ESP_ERROR_CHECK(image_process());
#endif
#endif
#if CONFIG_SPIRAM_BOOT_INIT
if (esp_psram_init() != ESP_OK) {
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
ESP_EARLY_LOGE(TAG, "Failed to init external RAM, needed for external .bss segment");
ESP_DRAM_LOGE(TAG, "Failed to init external RAM, needed for external .bss segment");
abort();
#endif
#if CONFIG_SPIRAM_IGNORE_NOTFOUND
ESP_EARLY_LOGI(TAG, "Failed to init external RAM; continuing without it.");
#else
ESP_EARLY_LOGE(TAG, "Failed to init external RAM!");
ESP_DRAM_LOGE(TAG, "Failed to init external RAM!");
abort();
#endif
}
#endif
//----------------------------------Separator-----------------------------//
/**
* @note
* After this stage, you can access the flash through the cache, i.e. run code which is not placed in IRAM
* or print string which locates on flash
*/
esp_mspi_pin_reserve();
#endif // !CONFIG_APP_BUILD_TYPE_PURE_RAM_APP
#if CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
ESP_EARLY_LOGI(TAG, "Unicore app");
#else
ESP_EARLY_LOGI(TAG, "Multicore app");
#endif
bootloader_init_mem();
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE

View File

@ -0,0 +1,226 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_types.h"
#include "sdkconfig.h"
#include "esp_err.h"
#include "esp_check.h"
#include "esp_log.h"
#include "esp_check.h"
#include "esp_image_format.h"
#include "esp_app_format.h"
#include "esp_flash_partitions.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#include "soc/soc.h"
#include "soc/soc_caps.h"
#include "soc/ext_mem_defs.h"
#include "esp_private/image_process.h"
#include "esp_private/esp_cache_esp32_private.h"
/**
* ESP32 bootloader size is not enough, not enable this feature for now
*/
#define IMAGE_PROCESS_SUPPORTED_TARGETS (!CONFIG_IDF_TARGET_ESP32)
#if CONFIG_IDF_TARGET_ESP32
#define MMAP_MMU_SIZE 0x320000
#elif CONFIG_IDF_TARGET_ESP32S2
#define MMAP_MMU_SIZE (SOC_DRAM0_CACHE_ADDRESS_HIGH - SOC_DRAM0_CACHE_ADDRESS_LOW)
#else
#define MMAP_MMU_SIZE (SOC_DRAM_FLASH_ADDRESS_HIGH - SOC_DRAM_FLASH_ADDRESS_LOW)
#endif
#if CONFIG_IDF_TARGET_ESP32
#define FLASH_READ_VADDR (SOC_DROM_LOW + MMAP_MMU_SIZE)
#else
#define FLASH_READ_VADDR (SOC_DROM_LOW + MMAP_MMU_SIZE - CONFIG_MMU_PAGE_SIZE)
#endif
#define MMU_FLASH_MASK (~(CONFIG_MMU_PAGE_SIZE - 1))
const static char *TAG = "image_process";
static uint32_t s_current_read_mapping = UINT32_MAX;
static uint32_t s_flash_drom_paddr_start = 0;
static uint32_t s_flash_irom_paddr_start = 0;
static esp_err_t process_segments(esp_image_metadata_t *data);
static image_process_driver_t s_image_process_driver = {
process_segments,
};
static esp_err_t flash_read(size_t src_addr, void *dest, size_t size)
{
if (src_addr & 3) {
ESP_EARLY_LOGE(TAG, "flash_read src_addr 0x%x not 4-byte aligned", src_addr);
return ESP_ERR_INVALID_ARG;
}
if (size & 3) {
ESP_EARLY_LOGE(TAG, "flash_read size 0x%x not 4-byte aligned", size);
return ESP_ERR_INVALID_ARG;
}
if ((intptr_t)dest & 3) {
ESP_EARLY_LOGE(TAG, "flash_read dest 0x%x not 4-byte aligned", (intptr_t)dest);
return ESP_ERR_INVALID_ARG;
}
uint32_t *dest_words = (uint32_t *)dest;
for (size_t word = 0; word < size / 4; word++) {
uint32_t word_src = src_addr + word * 4; /* Read this offset from flash */
uint32_t map_at = word_src & MMU_FLASH_MASK; /* Map this 64KB block from flash */
uint32_t *map_ptr;
/* Move the 64KB mmu mapping window to fit map_at */
if (map_at != s_current_read_mapping) {
cache_hal_suspend(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL);
uint32_t actual_mapped_len = 0;
mmu_hal_map_region(0, MMU_TARGET_FLASH0, FLASH_READ_VADDR, map_at, CONFIG_MMU_PAGE_SIZE - 1, &actual_mapped_len);
s_current_read_mapping = map_at;
ESP_EARLY_LOGD(TAG, "starting from paddr=0x%" PRIx32 " and vaddr=0x%" PRIx32 ", 0x%" PRIx32 " bytes are mapped", map_at, FLASH_READ_VADDR, actual_mapped_len);
#if CONFIG_IDF_TARGET_ESP32
cache_sync();
#else
cache_hal_invalidate_addr(FLASH_READ_VADDR, actual_mapped_len);
#endif
cache_hal_resume(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL);
}
map_ptr = (uint32_t *)(FLASH_READ_VADDR + (word_src - map_at));
dest_words[word] = *map_ptr;
}
return ESP_OK;
}
#if IMAGE_PROCESS_SUPPORTED_TARGETS
static esp_err_t process_image_header(esp_image_metadata_t *data, uint32_t part_offset)
{
bzero(data, sizeof(esp_image_metadata_t));
data->start_addr = part_offset;
ESP_RETURN_ON_ERROR_ISR(flash_read(data->start_addr, &data->image, sizeof(esp_image_header_t)), TAG, "failed to read image");
data->image_len = sizeof(esp_image_header_t);
ESP_EARLY_LOGD(TAG, "reading image header=0x%"PRIx32" image_len=0x%"PRIx32" image.segment_count=0x%x", data->start_addr, data->image_len, data->image.segment_count);
return ESP_OK;
}
#endif
static esp_err_t process_segment(int index, uint32_t flash_addr, esp_image_segment_header_t *header, esp_image_metadata_t *metadata, int *cnt)
{
/* read segment header */
ESP_RETURN_ON_ERROR_ISR(flash_read(flash_addr, header, sizeof(esp_image_segment_header_t)), TAG, "failed to do flash read");
intptr_t load_addr = header->load_addr;
uint32_t data_len = header->data_len;
uint32_t data_addr = flash_addr + sizeof(esp_image_segment_header_t);
#if SOC_MMU_DI_VADDR_SHARED
#if CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM
if (load_addr >= SOC_DRAM_PSRAM_ADDRESS_LOW && load_addr < SOC_DRAM_PSRAM_ADDRESS_HIGH) {
if (*cnt == 0) {
s_flash_drom_paddr_start = data_addr;
} else if (*cnt == 1) {
s_flash_irom_paddr_start = data_addr;
}
(*cnt)++;
}
#else
if (load_addr >= SOC_DRAM_FLASH_ADDRESS_LOW && load_addr < SOC_DRAM_FLASH_ADDRESS_HIGH) {
if (*cnt == 0) {
s_flash_drom_paddr_start = data_addr;
} else if (*cnt == 1) {
s_flash_irom_paddr_start = data_addr;
}
(*cnt)++;
}
#endif
#else
if (load_addr >= SOC_IRAM_FLASH_ADDRESS_LOW && load_addr < SOC_IRAM_FLASH_ADDRESS_HIGH) {
s_flash_drom_paddr_start = data_addr;
(*cnt)++;
}
if (load_addr >= SOC_DRAM_FLASH_ADDRESS_LOW && load_addr < SOC_DRAM_FLASH_ADDRESS_HIGH) {
s_flash_irom_paddr_start = data_addr;
(*cnt)++;
}
#endif
ESP_EARLY_LOGD(TAG, "load_addr: %x, data_len: %x, flash_addr: 0x%x, data_addr: %x", load_addr, data_len, flash_addr, data_addr);
if (data_len % 4 != 0) {
ESP_RETURN_ON_FALSE_ISR(false, ESP_ERR_INVALID_STATE, TAG, "unaligned segment length 0x%"PRIx32, data_len);
}
return ESP_OK;
}
static esp_err_t process_segments(esp_image_metadata_t *data)
{
uint32_t start_segments = data->start_addr + data->image_len;
uint32_t next_addr = start_segments;
int cnt = 0;
for (int i = 0; i < data->image.segment_count; i++) {
esp_image_segment_header_t *header = &data->segments[i];
ESP_EARLY_LOGD(TAG, "loading segment header %d at offset 0x%"PRIx32, i, next_addr);
ESP_RETURN_ON_ERROR_ISR(process_segment(i, next_addr, header, data, &cnt), TAG, "failed to process segment");
next_addr += sizeof(esp_image_segment_header_t);
data->segment_data[i] = next_addr;
next_addr += header->data_len;
}
assert(cnt == 2);
uint32_t end_addr = next_addr;
if (end_addr < data->start_addr) {
return ESP_FAIL;
}
data->image_len += end_addr - start_segments;
return ESP_OK;
}
void image_process_get_flash_segments_info(uint32_t *out_drom_paddr_start, uint32_t *out_irom_paddr_start)
{
assert(out_drom_paddr_start && out_irom_paddr_start);
*out_drom_paddr_start = s_flash_drom_paddr_start;
*out_irom_paddr_start = s_flash_irom_paddr_start;
}
esp_err_t image_process(void)
{
#if IMAGE_PROCESS_SUPPORTED_TARGETS
esp_err_t ret = ESP_FAIL;
/**
* We use the MMU_LL_END_DROM_ENTRY_ID mmu entry as a map page for app to find the boot partition
* This depends on 2nd bootloader to set the entry
*/
uint32_t paddr_base = mmu_ll_entry_id_to_paddr_base(0, MMU_LL_END_DROM_ENTRY_ID);
uint32_t part_offset = paddr_base;
esp_image_metadata_t image_data = {0};
ret = process_image_header(&image_data, part_offset);
if (ret != ESP_OK) {
ESP_EARLY_LOGE(TAG, "failed to process image header");
abort();
}
ret = s_image_process_driver.process_segments(&image_data);
if (ret != ESP_OK) {
ESP_EARLY_LOGE(TAG, "failed to process segments");
return ESP_FAIL;
}
mmu_ll_set_entry_invalid(0, MMU_LL_END_DROM_ENTRY_ID);
#else
(void)s_image_process_driver;
#endif
return ESP_OK;
}

View File

@ -0,0 +1,64 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include <stddef.h>
#include <string.h>
#include "esp_err.h"
#include "esp_image_format.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Image process driver
*/
typedef struct image_process_driver_s image_process_driver_t;
/**
* @brief Image process driver
*/
struct image_process_driver_s {
/**
* @brief Process segments
*
* @param[in] data image meta data
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: invalid argument
* - ESP_ERR_INVALID_STATE: invalid state
*/
esp_err_t (*process_segments)(esp_image_metadata_t *data);
};
/**
* @brief Image process flow
* @note This API first reads the image header, then process the segments from the image header.
* This API can be further inserted with more steps about the image processing by registering
* more function pointer in `image_process_driver_t`.
*
* @return
* - ESP_OK
* - ESP_FAIL: image process flow fails
*/
esp_err_t image_process(void);
/**
* @brief get flash segments info, only available after image_process() has been called
*
* @param[out] out_drom_paddr_start drom paddr start
* @param[out] out_irom_paddr_start irom paddr start
*/
void image_process_get_flash_segments_info(uint32_t *out_drom_paddr_start, uint32_t *out_irom_paddr_start);
#ifdef __cplusplus
}
#endif

View File

@ -20,6 +20,9 @@ extern "C" {
#endif
#define MMU_LL_PSRAM_ENTRY_START_ID 1152
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - SOC_MMU_PAGE_SIZE)
#define MMU_LL_END_DROM_ENTRY_ID (64 - 1)
/**
* Convert MMU virtual address to linear address
@ -302,7 +305,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -19,6 +19,9 @@
extern "C" {
#endif
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - SOC_MMU_PAGE_SIZE)
#define MMU_LL_END_DROM_ENTRY_ID (SOC_MMU_ENTRY_NUM - 1)
/**
* Convert MMU virtual address to linear address
*
@ -263,7 +266,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -19,6 +19,8 @@
extern "C" {
#endif
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - 0x10000)
#define MMU_LL_END_DROM_ENTRY_ID (SOC_MMU_ENTRY_NUM - 1)
/**
* Convert MMU virtual address to linear address
*
@ -230,7 +232,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -24,6 +24,9 @@
extern "C" {
#endif
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - SOC_MMU_PAGE_SIZE)
#define MMU_LL_END_DROM_ENTRY_ID (SOC_MMU_ENTRY_NUM - 1)
/**
* Convert MMU virtual address to linear address
*
@ -284,7 +287,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -19,6 +19,9 @@
extern "C" {
#endif
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - SOC_MMU_PAGE_SIZE)
#define MMU_LL_END_DROM_ENTRY_ID (SOC_MMU_ENTRY_NUM - 1)
/**
* Convert MMU virtual address to linear address
*
@ -275,7 +278,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -20,6 +20,9 @@
extern "C" {
#endif
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - SOC_MMU_PAGE_SIZE)
#define MMU_LL_END_DROM_ENTRY_ID (SOC_MMU_ENTRY_NUM - 1)
/**
* Convert MMU virtual address to linear address
*
@ -277,7 +280,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -19,6 +19,8 @@
extern "C" {
#endif
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - SOC_MMU_PAGE_SIZE)
#define MMU_LL_END_DROM_ENTRY_ID (SOC_MMU_ENTRY_NUM - 1)
/**
* Convert MMU virtual address to linear address
@ -282,7 +284,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -4,6 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_attr.h"
#include "hal/clk_tree_hal.h"
#include "hal/clk_tree_ll.h"
#include "hal/assert.h"
@ -70,7 +71,7 @@ uint32_t clk_hal_lp_slow_get_freq_hz(void)
}
}
uint32_t clk_hal_xtal_get_freq_mhz(void)
IRAM_ATTR uint32_t clk_hal_xtal_get_freq_mhz(void)
{
uint32_t freq = clk_ll_xtal_load_freq_mhz();
if (freq == 0) {

View File

@ -42,17 +42,6 @@ extern "C" {
#define LDO_LL_EXT_LDO_MUL_VOL_BASE 1000
#define LDO_LL_EXT_LDO_MUL_VOL_STEP 250
/**
* Trick to be adapted to the LDO register structure
*
* In pmu_ext_ldo_info_t ext_ldo[6] registers:
* - ext_ldo[0] is LDO1
* - ext_ldo[3] is LDO2
* - ext_ldo[1] is LDO3
* - ext_ldo[4] is LDO4
*/
#define LDO_ID2INDEX(id) (uint8_t[]){0,3,1,4}[id]
/**
* LDO ID to real unit ID
*/
@ -79,8 +68,8 @@ __attribute__((always_inline))
static inline void ldo_ll_enable(int ldo_id, bool enable)
{
HAL_ASSERT(ldo_id < LDO_LL_UNIT_NUM);
PMU.ext_ldo[LDO_ID2INDEX(ldo_id)].pmu_ext_ldo.xpd = enable;
uint8_t index_array[LDO_LL_UNIT_NUM] = {0,3,1,4};
PMU.ext_ldo[index_array[ldo_id]].pmu_ext_ldo.xpd = enable;
}
/**
@ -138,11 +127,12 @@ static inline void ldo_ll_set_output_voltage_mv(int ldo_id, int voltage_mv)
* - 0: efuse
* - 1: tieh_sel
*/
PMU.ext_ldo[LDO_ID2INDEX(ldo_id)].pmu_ext_ldo.tieh_sel = 0;
PMU.ext_ldo[LDO_ID2INDEX(ldo_id)].pmu_ext_ldo.tieh = 0;
PMU.ext_ldo[LDO_ID2INDEX(ldo_id)].pmu_ext_ldo.force_tieh_sel = 1;
PMU.ext_ldo[LDO_ID2INDEX(ldo_id)].pmu_ext_ldo_ana.dref = dref;
PMU.ext_ldo[LDO_ID2INDEX(ldo_id)].pmu_ext_ldo_ana.mul = mul;
uint8_t index_array[LDO_LL_UNIT_NUM] = {0,3,1,4};
PMU.ext_ldo[index_array[ldo_id]].pmu_ext_ldo.tieh_sel = 0;
PMU.ext_ldo[index_array[ldo_id]].pmu_ext_ldo.tieh = 0;
PMU.ext_ldo[index_array[ldo_id]].pmu_ext_ldo.force_tieh_sel = 1;
PMU.ext_ldo[index_array[ldo_id]].pmu_ext_ldo_ana.dref = dref;
PMU.ext_ldo[index_array[ldo_id]].pmu_ext_ldo_ana.mul = mul;
}
#ifdef __cplusplus

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -20,11 +20,12 @@
extern "C" {
#endif
///< MMU is per target
#define MMU_LL_MMU_PER_TARGET 1
#define MMU_LL_FLASH_MMU_ID 0
#define MMU_LL_PSRAM_MMU_ID 1
#define MMU_LL_FLASH_VADDR_TO_PSRAM_VADDR(flash_vaddr) ((flash_vaddr) + SOC_IRAM_FLASH_PSRAM_OFFSET)
#define MMU_LL_PSRAM_VADDR_TO_FLASH_VADDR(psram_vaddr) ((psram_vaddr) - SOC_IRAM_FLASH_PSRAM_OFFSET)
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - SOC_MMU_PAGE_SIZE)
#define MMU_LL_END_DROM_ENTRY_ID (SOC_MMU_ENTRY_NUM - 1)
/**
* Convert MMU virtual address to linear address
@ -344,7 +345,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -20,6 +20,9 @@
extern "C" {
#endif
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - 0x10000)
#define MMU_LL_END_DROM_ENTRY_ID (192 - 1)
/**
* Convert MMU virtual address to linear address
*
@ -249,7 +252,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -19,6 +19,8 @@
extern "C" {
#endif
#define MMU_LL_END_DROM_ENTRY_VADDR (SOC_DRAM_FLASH_ADDRESS_HIGH - 0x10000)
#define MMU_LL_END_DROM_ENTRY_ID (SOC_MMU_ENTRY_NUM - 1)
/**
* Convert MMU virtual address to linear address
*
@ -230,7 +232,7 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
* @return True for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{

View File

@ -9,6 +9,7 @@
#include <stdbool.h>
#include <stdint.h>
#include "soc/soc_caps.h"
#include "hal/cache_types.h"
#ifdef __cplusplus

View File

@ -24,5 +24,3 @@ entries:
spi_flash_hal_gpspi (noflash)
if SOC_PMU_SUPPORTED = y:
pmu_hal (noflash)
if SOC_CLK_MPLL_SUPPORTED = y:
clk_tree_hal: clk_hal_xtal_get_freq_mhz (noflash)

View File

@ -12,6 +12,7 @@
#include "hal/assert.h"
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#include "soc/soc_caps.h"
#include "rom/cache.h"
void mmu_hal_init(void)
@ -26,7 +27,7 @@ void mmu_hal_init(void)
void mmu_hal_unmap_all(void)
{
#if MMU_LL_MMU_PER_TARGET
#if SOC_MMU_PER_EXT_MEM_TARGET
mmu_ll_unmap_all(MMU_LL_FLASH_MMU_ID);
mmu_ll_unmap_all(MMU_LL_PSRAM_MMU_ID);
#else

View File

@ -34,9 +34,6 @@ extern "C" {
#define SOC_DRAM_FLASH_ADDRESS_LOW SOC_DROM0_CACHE_ADDRESS_LOW
#define SOC_DRAM_FLASH_ADDRESS_HIGH SOC_DROM0_CACHE_ADDRESS_HIGH
#define SOC_DRAM_PSRAM_ADDRESS_LOW SOC_DRAM1_CACHE_ADDRESS_LOW
#define SOC_DRAM_PSRAM_ADDRESS_HIGH SOC_DRAM1_CACHE_ADDRESS_HIGH
#define SOC_BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
#define SOC_ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)
#define SOC_ADDRESS_IN_IRAM0_CACHE(vaddr) SOC_ADDRESS_IN_BUS(SOC_IRAM0_CACHE, vaddr)

View File

@ -815,6 +815,10 @@ config SOC_MMU_DI_VADDR_SHARED
bool
default y
config SOC_MMU_PER_EXT_MEM_TARGET
bool
default y
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool
default n

View File

@ -43,6 +43,9 @@ extern "C" {
#define SOC_DRAM_PSRAM_ADDRESS_LOW SOC_IRAM_PSRAM_ADDRESS_LOW
#define SOC_DRAM_PSRAM_ADDRESS_HIGH SOC_IRAM_PSRAM_ADDRESS_HIGH
#define SOC_IRAM_FLASH_PSRAM_OFFSET (SOC_IRAM_PSRAM_ADDRESS_LOW - SOC_IRAM_FLASH_ADDRESS_LOW)
#define SOC_DRAM_FLASH_PSRAM_OFFSET SOC_IRAM_FLASH_PSRAM_OFFSET
#define SOC_BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
#define SOC_ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)

View File

@ -332,6 +332,7 @@
#define SOC_MMU_PERIPH_NUM (2U)
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (2U)
#define SOC_MMU_DI_VADDR_SHARED (1) /*!< D/I vaddr are shared */
#define SOC_MMU_PER_EXT_MEM_TARGET (1) /*!< MMU is per physical external memory target (flash, psram) */
/*-------------------------- MPU CAPS ----------------------------------------*/
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -29,6 +29,12 @@ extern "C" {
#define SOC_DRAM_FLASH_ADDRESS_LOW SOC_DRAM0_CACHE_ADDRESS_LOW
#define SOC_DRAM_FLASH_ADDRESS_HIGH SOC_DRAM0_CACHE_ADDRESS_HIGH
#define SOC_IRAM_PSRAM_ADDRESS_LOW SOC_IRAM0_CACHE_ADDRESS_LOW
#define SOC_IRAM_PSRAM_ADDRESS_HIGH SOC_IRAM0_CACHE_ADDRESS_HIGH
#define SOC_DRAM_PSRAM_ADDRESS_LOW SOC_DRAM0_CACHE_ADDRESS_LOW
#define SOC_DRAM_PSRAM_ADDRESS_HIGH SOC_DRAM0_CACHE_ADDRESS_HIGH
#define SOC_BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
#define SOC_ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -277,6 +277,7 @@ const void * spi_flash_phys2cache(size_t phys_offs, spi_flash_mmap_memory_t memo
mmu_target_t target = MMU_TARGET_FLASH0;
__attribute__((unused)) uint32_t phys_page = phys_offs / CONFIG_MMU_PAGE_SIZE;
#if !SOC_MMU_PER_EXT_MEM_TARGET
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
if (phys_page >= instruction_flash_start_page_get() && phys_page <= instruction_flash_end_page_get()) {
target = MMU_TARGET_PSRAM0;
@ -290,6 +291,7 @@ const void * spi_flash_phys2cache(size_t phys_offs, spi_flash_mmap_memory_t memo
phys_offs -= rodata_flash2spiram_offset() * CONFIG_MMU_PAGE_SIZE;
}
#endif
#endif //#if !SOC_MMU_PER_EXT_MEM_TARGET
mmu_vaddr_t type = (memory == SPI_FLASH_MMAP_DATA) ? MMU_VADDR_DATA : MMU_VADDR_INSTRUCTION;
ret = esp_mmu_paddr_to_vaddr(phys_offs, target, type, &ptr);
@ -366,12 +368,22 @@ size_t spi_flash_cache2phys(const void *cached)
uint32_t paddr = 0;
mmu_target_t target = 0;
#if CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM //TODO: IDF-9049
paddr = mmu_xip_psram_flash_vaddr_to_paddr(cached);
//SPI_FLASH_CACHE2PHYS_FAIL is UINT32_MAX
if (paddr != SPI_FLASH_CACHE2PHYS_FAIL) {
return paddr;
}
#endif
ret = esp_mmu_vaddr_to_paddr((void *)cached, &paddr, &target);
if (ret != ESP_OK) {
return SPI_FLASH_CACHE2PHYS_FAIL;
}
int offset = 0;
#if !SOC_MMU_PER_EXT_MEM_TARGET //TODO: IDF-9049
#if CONFIG_SPIRAM_RODATA
if ((uint32_t)cached >= (uint32_t)&_rodata_reserved_start && (uint32_t)cached <= (uint32_t)&_rodata_reserved_end) {
offset = rodata_flash2spiram_offset();
@ -382,6 +394,7 @@ size_t spi_flash_cache2phys(const void *cached)
offset = instruction_flash2spiram_offset();
}
#endif
#endif //#if !SOC_MMU_PER_EXT_MEM_TARGET
return paddr + offset * CONFIG_MMU_PAGE_SIZE;
}

View File

@ -150,7 +150,10 @@ void IRAM_ATTR esp_mspi_pin_init(void)
}
//Set F4R4 board pin drive strength. TODO: IDF-3663
#endif
/* Reserve the GPIO pins */
}
void esp_mspi_pin_reserve(void)
{
uint64_t reserve_pin_mask = 0;
for (esp_mspi_io_t i = 0; i < ESP_MSPI_IO_MAX; i++) {
reserve_pin_mask |= BIT64(esp_mspi_get_io(i));

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2019-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2019-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -55,6 +55,11 @@ esp_err_t spi_flash_init_chip_state(void);
*/
void esp_mspi_pin_init(void);
/**
* @brief Reserve MSPI IOs
*/
void esp_mspi_pin_reserve(void);
/**
* @brief Get the number of the GPIO corresponding to the given MSPI io
*

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
@ -23,7 +23,7 @@
static uint32_t buffer[1024];
/* read-only region used for mmap tests, intialised in setup_mmap_tests() */
/* read-only region used for mmap tests, initialised in setup_mmap_tests() */
static uint32_t start;
static uint32_t end;
@ -359,7 +359,14 @@ TEST_CASE("phys2cache/cache2phys basic checks", "[spi_flash][mmap]")
/* esp_partition_find is in IROM */
uint32_t phys = spi_flash_cache2phys(esp_partition_find);
TEST_ASSERT_NOT_EQUAL(SPI_FLASH_CACHE2PHYS_FAIL, phys);
#if !CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM
/**
* On CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM=y condition
* spi_flash_phys2cache will return exactly the flash paddr corresponding vaddr.
* Whereas `constant_data` is now actually on PSRAM
*/
TEST_ASSERT_EQUAL_PTR(esp_partition_find, spi_flash_phys2cache(phys, SPI_FLASH_MMAP_INST));
#endif
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
TEST_ASSERT_EQUAL_PTR(NULL, spi_flash_phys2cache(phys, SPI_FLASH_MMAP_DATA));
#endif //#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
@ -371,8 +378,14 @@ TEST_CASE("phys2cache/cache2phys basic checks", "[spi_flash][mmap]")
/* 'constant_data' should be in DROM */
phys = spi_flash_cache2phys(&constant_data);
TEST_ASSERT_NOT_EQUAL(SPI_FLASH_CACHE2PHYS_FAIL, phys);
TEST_ASSERT_EQUAL_PTR(&constant_data,
spi_flash_phys2cache(phys, SPI_FLASH_MMAP_DATA));
#if !CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM
/**
* On CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM=y condition,
* spi_flash_phys2cache will return exactly the flash paddr corresponding vaddr.
* Whereas `constant_data` is now actually on PSRAM
*/
TEST_ASSERT_EQUAL_PTR(&constant_data, spi_flash_phys2cache(phys, SPI_FLASH_MMAP_DATA));
#endif
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
TEST_ASSERT_EQUAL_PTR(NULL, spi_flash_phys2cache(phys, SPI_FLASH_MMAP_INST));
#endif //#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
@ -404,7 +417,18 @@ TEST_CASE("mmap consistent with phys2cache/cache2phys", "[spi_flash][mmap]")
spi_flash_munmap(handle1);
handle1 = 0;
esp_rom_printf("ptr; 0x%x\n", ptr);
#if !CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM
/**
* On CONFIG_SPIRAM_FLASH_LOAD_TO_PSRAM=y condition, this is reasonable as there are two MMUs.
* Unmapping flash one, if it's XIP_PSRAM, we can still find it via `spi_flash_cache2phys`
*
* TODO, design a new API dedicated for `esp_ota_get_running_partition` usage, then here we can
* update this `spi_flash_cache2phys` back to its normal behaviour
*/
TEST_ASSERT_EQUAL_HEX(SPI_FLASH_CACHE2PHYS_FAIL, spi_flash_cache2phys(ptr));
#endif
}
TEST_CASE("munmap followed by mmap flushes cache", "[spi_flash][mmap]")

View File

@ -137,35 +137,55 @@ Remaining external RAM can also be added to the capability heap allocator using
.. only:: SOC_SPIRAM_XIP_SUPPORTED
.. _external_ram_config_instructions:
.. only:: esp32s2 or esp32s3
Move Instructions in Flash to PSRAM
-----------------------------------
.. _external_ram_config_instructions:
The :ref:`CONFIG_SPIRAM_FETCH_INSTRUCTIONS` option allows the flash ``.text`` sections (use for instructions) to be placed in PSRAM.
Move Instructions in Flash to PSRAM
-----------------------------------
By enabling the :ref:`CONFIG_SPIRAM_FETCH_INSTRUCTIONS` option
The :ref:`CONFIG_SPIRAM_FETCH_INSTRUCTIONS` option allows the flash ``.text`` sections (for instructions) to be placed in PSRAM.
- Instructions from the ``.text`` sections of flash are moved into PSRAM on system startup.
By enabling the :ref:`CONFIG_SPIRAM_FETCH_INSTRUCTIONS` option,
- The corresponding virtual memory range of those instructions will also be re-mapped to PSRAM.
- Instructions from the ``.text`` sections of flash are moved into PSRAM on system startup.
If :ref:`CONFIG_SPIRAM_RODATA` is also enabled, the cache will not be disabled during an SPI1 flash operation. You do not need to make sure ISRs, ISR callbacks and involved data are placed in internal RAM, thus internal RAM usage can be optimized.
- The corresponding virtual memory range of those instructions will also be re-mapped to PSRAM.
.. _external_ram_config_rodata:
.. _external_ram_config_rodata:
Move Read-Only Data in Flash to PSRAM
---------------------------------------
Move Read-Only Data in Flash to PSRAM
---------------------------------------
The :ref:`CONFIG_SPIRAM_RODATA` option allows the flash ``.rodata`` sections (use for read only data) to be placed in PSRAM.
The :ref:`CONFIG_SPIRAM_RODATA` option allows the flash ``.rodata`` sections (for read only data) to be placed in PSRAM.
By enabling the :ref:`CONFIG_SPIRAM_RODATA` option
By enabling the :ref:`CONFIG_SPIRAM_RODATA` option,
- Instructions from the ``.rodata`` sections of flash are moved into PSRAM on system startup.
- Instructions from the ``.rodata`` sections of flash are moved into PSRAM on system startup.
- The corresponding virtual memory range of those rodata will also be re-mapped to PSRAM.
- The corresponding virtual memory range of those rodata will also be re-mapped to PSRAM.
Execute In Place (XiP) from PSRAM
------------------------------------
The :ref:`CONFIG_SPIRAM_XIP_FROM_PSRAM` is a helper option for you to select both the :ref:`CONFIG_SPIRAM_FETCH_INSTRUCTIONS` and :ref:`CONFIG_SPIRAM_RODATA`.
The benefits of XiP from PSRAM is:
- PSRAM access speed is faster than Flash access. So the performance is better.
- The cache will not be disabled during an SPI1 flash operation, thus optimizing the code execution performance during SPI1 flash operations. For ISRs, ISR callbacks and data which might be accessed during this period, you do not need to place them in internal RAM, thus internal RAM usage can be optimized. This feature is useful for high throughput peripheral involved applications to improve the performance during SPI1 flash operations.
.. only:: esp32p4
Execute In Place (XiP) from PSRAM
------------------------------------
The :ref:`CONFIG_SPIRAM_XIP_FROM_PSRAM` option enables the executable in place (XiP) from PSRAM feature. With this option sections that are normally placed in flash ,``.text`` (for instructions) and ``.rodata`` (for read only data), will be loaded in PSRAM.
With this option enabled, the cache will not be disabled during an SPI1 flash operation, so code that requires executing during an SPI1 Flash operation does not have to be placed in internal RAM. Because P4 Flash and PSRAM are using two separate SPI buses, moving Flash content to PSRAM will actually increase the load of the PSRAM MSPI bus, so the access speed is relatively slower. The exact impact on performance will be very dependent on your apps usage of PSRAM, and we suggest doing performance profiling to determine if enabling this option will significantly impact your app's performance.
If :ref:`CONFIG_SPIRAM_FETCH_INSTRUCTIONS` is also enabled, the cache will not be disabled during an SPI1 flash operation. You do not need to make sure ISRs, ISR callbacks and involved data are placed in internal RAM, thus internal RAM usage can be optimized.
Restrictions
============

View File

@ -3,3 +3,5 @@ sha256_coredump
gcc
clang_rt_builtins
freertos_common
esp_psram
esp_mm