mmu: driver framework, for vaddr maintenance

This commit gives basic mmu driver framework. Now it is able to maintain
mmu virtual address usage on esp32, esp32s2 and esp32s3. Usage to
external virtual address should rely on mmu functions to know which
address range is available, instead of hardcoded.

This commit also improves psram memory that is added to the heap
allocator. Now it's added to the heap, according to the memory
alignment.

Closes https://github.com/espressif/esp-idf/issues/8295
This commit is contained in:
Armando 2022-08-18 14:00:46 +08:00
parent dc5cab7730
commit 2d44dc1eed
27 changed files with 882 additions and 179 deletions

View File

@ -23,10 +23,7 @@ bool esp_ptr_dma_ext_capable(const void *p)
return false;
#endif //!SOC_PSRAM_DMA_CAPABLE
#if CONFIG_SPIRAM
intptr_t vaddr_start = 0;
intptr_t vaddr_end = 0;
esp_psram_extram_get_mapped_range(&vaddr_start, &vaddr_end);
return (intptr_t)p >= vaddr_start && (intptr_t)p < vaddr_end;
return esp_psram_check_ptr_addr(p);
#else
return false;
#endif //CONFIG_SPIRAM
@ -44,10 +41,7 @@ bool esp_ptr_byte_accessible(const void *p)
r |= (ip >= SOC_RTC_DRAM_LOW && ip < SOC_RTC_DRAM_HIGH);
#endif
#if CONFIG_SPIRAM
intptr_t vaddr_start = 0;
intptr_t vaddr_end = 0;
esp_psram_extram_get_mapped_range(&vaddr_start, &vaddr_end);
r |= (ip >= vaddr_start && ip < vaddr_end);
r |= esp_psram_check_ptr_addr(p);
#endif
return r;
}
@ -58,10 +52,7 @@ bool esp_ptr_external_ram(const void *p)
return false;
#endif //!SOC_SPIRAM_SUPPORTED
#if CONFIG_SPIRAM
intptr_t vaddr_start = 0;
intptr_t vaddr_end = 0;
esp_psram_extram_get_mapped_range(&vaddr_start, &vaddr_end);
return (intptr_t)p >= vaddr_start && (intptr_t)p < vaddr_end;
return esp_psram_check_ptr_addr(p);
#else
return false;
#endif //CONFIG_SPIRAM
@ -70,10 +61,7 @@ bool esp_ptr_external_ram(const void *p)
#if CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY
bool esp_stack_ptr_in_extram(uint32_t sp)
{
intptr_t vaddr_start = 0;
intptr_t vaddr_end = 0;
esp_psram_extram_get_mapped_range(&vaddr_start, &vaddr_end);
//Check if stack ptr is in between SOC_EXTRAM_DATA_LOW and SOC_EXTRAM_DATA_HIGH, and 16 byte aligned.
return !(sp < vaddr_start + 0x10 || sp > vaddr_end - 0x10 || ((sp & 0xF) != 0));
//Check if stack ptr is on PSRAM, and 16 byte aligned.
return (esp_psram_check_ptr_addr((void *)sp) && ((sp & 0xF) == 0));
}
#endif

View File

@ -14,7 +14,8 @@ set(srcs)
if(CONFIG_SPIRAM)
list(APPEND srcs "esp_psram.c"
"mmu.c"
"mmu_psram_flash.c")
"mmu_psram_flash.c"
"ext_mem_layout.c")
if(${target} STREQUAL "esp32")
list(APPEND srcs "esp32/esp_psram_extram_cache.c"

View File

@ -26,6 +26,7 @@
#include "esp_private/mmu_psram_flash.h"
#include "esp_psram_impl.h"
#include "esp_psram.h"
#include "mmu.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/himem.h"
@ -43,8 +44,14 @@
#define PSRAM_MODE PSRAM_VADDR_MODE_NORMAL
#endif
#if CONFIG_SPIRAM
/**
* Two types of PSRAM memory regions for now:
* - 8bit aligned
* - 32bit aligned
*/
#define PSRAM_MEM_TYPE_NUM 2
#define PSRAM_MEM_8BIT_ALIGNED 0
#define PSRAM_MEM_32BIT_ALIGNED 1
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
extern uint8_t _ext_ram_bss_start;
@ -56,13 +63,33 @@ extern uint8_t _ext_ram_noinit_start;
extern uint8_t _ext_ram_noinit_end;
#endif //#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
//These variables are in bytes
static intptr_t s_allocable_vaddr_start;
static intptr_t s_allocable_vaddr_end;
static intptr_t s_mapped_vaddr_start;
static intptr_t s_mapped_vaddr_end;
typedef struct {
intptr_t vaddr_start;
intptr_t vaddr_end;
size_t size; //in bytes
} psram_mem_t;
static bool s_spiram_inited;
typedef struct {
bool is_initialised;
/**
* @note 1
* As we can't use heap allocator during this stage, we need to statically declare these regions.
* Luckily only S2 has two different types of memory regions:
* - byte-aligned memory
* - word-aligned memory
* On the other hand, the type number usually won't be very big
*
* On other chips, only one region is needed.
* So for chips other than S2, size of `regions_to_heap[1]` and `mapped_regions[1]`will always be zero.
*
* If in the future, this condition is worse (dbus memory isn't consecutive), we need to delegate this context
* to chip-specific files, and only keep a (void *) pointer here pointing to those chip-specific contexts
*/
psram_mem_t regions_to_heap[PSRAM_MEM_TYPE_NUM]; //memory regions that are available to be added to the heap allocator
psram_mem_t mapped_regions[PSRAM_MEM_TYPE_NUM]; //mapped memory regions
} psram_ctx_t;
static psram_ctx_t s_psram_ctx;
static const char* TAG = "esp_psram";
@ -86,26 +113,28 @@ static void IRAM_ATTR s_mapping(int v_start, int size)
}
#endif //CONFIG_IDF_TARGET_ESP32
esp_err_t esp_psram_init(void)
{
if (s_spiram_inited) {
if (s_psram_ctx.is_initialised) {
return ESP_ERR_INVALID_STATE;
}
esp_err_t ret;
esp_err_t ret = ESP_FAIL;
ret = esp_psram_impl_enable(PSRAM_MODE);
if (ret != ESP_OK) {
#if CONFIG_SPIRAM_IGNORE_NOTFOUND
ESP_EARLY_LOGE(TAG, "SPI RAM enabled but initialization failed. Bailing out.");
ESP_EARLY_LOGE(TAG, "PSRAM enabled but initialization failed. Bailing out.");
#endif
return ret;
}
s_spiram_inited = true;
s_psram_ctx.is_initialised = true;
uint32_t psram_physical_size = 0;
ret = esp_psram_impl_get_physical_size(&psram_physical_size);
assert(ret == ESP_OK);
ESP_EARLY_LOGI(TAG, "Found %dMB SPI RAM device", psram_physical_size / (1024 * 1024));
ESP_EARLY_LOGI(TAG, "Found %dMB PSRAM device", psram_physical_size / (1024 * 1024));
ESP_EARLY_LOGI(TAG, "Speed: %dMHz", CONFIG_SPIRAM_SPEED);
#if CONFIG_IDF_TARGET_ESP32
ESP_EARLY_LOGI(TAG, "PSRAM initialized, cache is in %s mode.", \
@ -155,99 +184,168 @@ esp_err_t esp_psram_init(void)
ESP_EARLY_LOGV(TAG, "after copy .rodata, used page is %d, start_page is %d, psram_available_size is %d B", used_page, start_page, psram_available_size);
#endif //#if CONFIG_SPIRAM_RODATA
/**
* For now,
* - we only need to use MMU driver when PSRAM is enabled
* - MMU driver isn't public
*
* So we call `esp_mmu_init()` here, instead of calling it in startup code.
*/
esp_mmu_init();
//----------------------------------Map the PSRAM physical range to MMU-----------------------------//
intptr_t vaddr_start = mmu_get_psram_vaddr_start();
if (vaddr_start + psram_available_size > mmu_get_psram_vaddr_end()) {
psram_available_size = mmu_get_psram_vaddr_end() - vaddr_start;
ESP_EARLY_LOGV(TAG, "Virtual address not enough for PSRAM, map as much as we can. %dMB is mapped", psram_available_size / 1024 / 1024);
}
/**
* @note 2
* Similarly to @note 1, we expect HW DBUS memory to be consecutive.
*
* If situation is worse in the future (memory region isn't consecutive), we need to put these logics into chip-specific files
*/
size_t total_mapped_size = 0;
size_t size_to_map = 0;
size_t byte_aligned_size = 0;
ret = esp_mmu_get_largest_free_block(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, &byte_aligned_size);
assert(ret == ESP_OK);
size_to_map = MIN(byte_aligned_size, psram_available_size);
const void *v_start_8bit_aligned = NULL;
ret = esp_mmu_find_vaddr_range(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, &v_start_8bit_aligned);
assert(ret == ESP_OK);
#if CONFIG_IDF_TARGET_ESP32
s_mapping(vaddr_start, psram_available_size);
s_mapping((int)v_start_8bit_aligned, size_to_map);
#else
uint32_t actual_mapped_len = 0;
mmu_hal_map_region(0, MMU_TARGET_PSRAM0, vaddr_start, MMU_PAGE_TO_BYTES(start_page), psram_available_size, &actual_mapped_len);
ESP_EARLY_LOGV(TAG, "actual_mapped_len is 0x%x bytes", actual_mapped_len);
mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_8bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);
start_page += BYTES_TO_MMU_PAGE(actual_mapped_len);
ESP_EARLY_LOGV(TAG, "8bit-aligned-region: actual_mapped_len is 0x%x bytes", actual_mapped_len);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, vaddr_start, actual_mapped_len);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)v_start_8bit_aligned, actual_mapped_len);
cache_ll_l1_enable_bus(0, bus_mask);
#if !CONFIG_FREERTOS_UNICORE
bus_mask = cache_ll_l1_get_bus(1, vaddr_start, actual_mapped_len);
bus_mask = cache_ll_l1_get_bus(1, (uint32_t)v_start_8bit_aligned, actual_mapped_len);
cache_ll_l1_enable_bus(1, bus_mask);
#endif
#endif //#if CONFIG_IDF_TARGET_ESP32
s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size = size_to_map;
s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start = (intptr_t)v_start_8bit_aligned;
s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end = (intptr_t)v_start_8bit_aligned + size_to_map;
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size = size_to_map;
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start = (intptr_t)v_start_8bit_aligned;
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_end = (intptr_t)v_start_8bit_aligned + size_to_map;
ESP_EARLY_LOGV(TAG, "8bit-aligned-range: 0x%x B, starting from: 0x%x", s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size, v_start_8bit_aligned);
total_mapped_size += size_to_map;
#if CONFIG_IDF_TARGET_ESP32S2
/**
* On ESP32S2, there are 2 types of DBUS memory:
* - byte-aligned-memory
* - word-aligned-memory
*
* If byte-aligned-memory isn't enough, we search for word-aligned-memory to do mapping
*/
if (total_mapped_size < psram_available_size) {
size_to_map = psram_available_size - total_mapped_size;
size_t word_aligned_size = 0;
ret = esp_mmu_get_largest_free_block(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, &word_aligned_size);
assert(ret == ESP_OK);
size_to_map = MIN(word_aligned_size, size_to_map);
const void *v_start_32bit_aligned = NULL;
ret = esp_mmu_find_vaddr_range(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, &v_start_32bit_aligned);
assert(ret == ESP_OK);
mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_32bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);
ESP_EARLY_LOGV(TAG, "32bit-aligned-region: actual_mapped_len is 0x%x bytes", actual_mapped_len);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)v_start_32bit_aligned, actual_mapped_len);
cache_ll_l1_enable_bus(0, bus_mask);
s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size = size_to_map;
s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start = (intptr_t)v_start_32bit_aligned;
s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end = (intptr_t)v_start_32bit_aligned + size_to_map;
s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size = size_to_map;
s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_start = (intptr_t)v_start_32bit_aligned;
s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_end = (intptr_t)v_start_32bit_aligned + size_to_map;
ESP_EARLY_LOGV(TAG, "32bit-aligned-range: 0x%x B, starting from: 0x%x", s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size, v_start_32bit_aligned);
total_mapped_size += size_to_map;
}
#endif // #if CONFIG_IDF_TARGET_ESP32S2
if (total_mapped_size < psram_available_size) {
ESP_EARLY_LOGW(TAG, "Virtual address not enough for PSRAM, map as much as we can. %dMB is mapped", total_mapped_size / 1024 / 1024);
}
/*------------------------------------------------------------------------------
* After mapping, we DON'T care about the PSRAM PHYSICAL ADDRESSS ANYMORE!
*----------------------------------------------------------------------------*/
s_mapped_vaddr_start = vaddr_start;
s_mapped_vaddr_end = vaddr_start + psram_available_size;
s_allocable_vaddr_start = vaddr_start;
s_allocable_vaddr_end = vaddr_start + psram_available_size;
//------------------------------------Configure .bss in PSRAM-------------------------------------//
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
//should never be negative number
uint32_t ext_bss_size = ((intptr_t)&_ext_ram_bss_end - (intptr_t)&_ext_ram_bss_start);
ESP_EARLY_LOGV(TAG, "ext_bss_size is %d", ext_bss_size);
s_allocable_vaddr_start += ext_bss_size;
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start += ext_bss_size;
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= ext_bss_size;
#endif //#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
uint32_t ext_noinit_size = ((intptr_t)&_ext_ram_noinit_end - (intptr_t)&_ext_ram_noinit_start);
ESP_EARLY_LOGV(TAG, "ext_noinit_size is %d", ext_noinit_size);
s_allocable_vaddr_start += ext_noinit_size;
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start += ext_noinit_size;
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= ext_noinit_size;
#endif
#if CONFIG_IDF_TARGET_ESP32
s_allocable_vaddr_end -= esp_himem_reserved_area_size() - 1;
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= esp_himem_reserved_area_size() - 1;
#endif
ESP_EARLY_LOGV(TAG, "s_allocable_vaddr_start is 0x%x, s_allocable_vaddr_end is 0x%x", s_allocable_vaddr_start, s_allocable_vaddr_end);
return ESP_OK;
}
/**
* Add the PSRAM available region to heap allocator. Heap allocator knows the capabilities of this type of memory,
* so there's no need to explicitly specify them.
*/
esp_err_t esp_psram_extram_add_to_heap_allocator(void)
{
ESP_EARLY_LOGI(TAG, "Adding pool of %dK of external SPI memory to heap allocator", (s_allocable_vaddr_end - s_allocable_vaddr_start) / 1024);
return heap_caps_add_region(s_allocable_vaddr_start, s_allocable_vaddr_end);
}
esp_err_t ret = ESP_FAIL;
esp_err_t IRAM_ATTR esp_psram_extram_get_mapped_range(intptr_t *out_vstart, intptr_t *out_vend)
{
if (!out_vstart || !out_vend) {
return ESP_ERR_INVALID_ARG;
uint32_t byte_aligned_caps[] = {MALLOC_CAP_SPIRAM|MALLOC_CAP_DEFAULT, 0, MALLOC_CAP_8BIT|MALLOC_CAP_32BIT};
ret = heap_caps_add_region_with_caps(byte_aligned_caps,
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start,
s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_end);
if (ret != ESP_OK) {
return ret;
}
if (!s_spiram_inited) {
return ESP_ERR_INVALID_STATE;
if (s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size) {
assert(s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_start);
uint32_t word_aligned_caps[] = {MALLOC_CAP_SPIRAM|MALLOC_CAP_DEFAULT, 0, MALLOC_CAP_32BIT};
ret = heap_caps_add_region_with_caps(word_aligned_caps,
s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_start,
s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_end);
if (ret != ESP_OK) {
return ret;
}
}
*out_vstart = s_mapped_vaddr_start;
*out_vend = s_mapped_vaddr_end;
ESP_EARLY_LOGI(TAG, "Adding pool of %dK of PSRAM memory to heap allocator",
(s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size) / 1024);
return ESP_OK;
}
esp_err_t esp_psram_extram_get_alloced_range(intptr_t *out_vstart, intptr_t *out_vend)
bool IRAM_ATTR esp_psram_check_ptr_addr(const void *p)
{
if (!out_vstart || !out_vend) {
return ESP_ERR_INVALID_ARG;
if (!s_psram_ctx.is_initialised) {
return false;
}
if (!s_spiram_inited) {
return ESP_ERR_INVALID_STATE;
}
*out_vstart = s_allocable_vaddr_start;
*out_vend = s_allocable_vaddr_end;
return ESP_OK;
return ((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end) ||
((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end);
}
esp_err_t esp_psram_extram_reserve_dma_pool(size_t size)
{
if (size == 0) {
@ -276,9 +374,9 @@ esp_err_t esp_psram_extram_reserve_dma_pool(size_t size)
return ESP_OK;
}
bool IRAM_ATTR esp_psram_is_initialized(void)
bool IRAM_ATTR __attribute__((pure)) esp_psram_is_initialized(void)
{
return s_spiram_inited;
return s_psram_ctx.is_initialised;
}
size_t esp_psram_get_size(void)
@ -302,45 +400,68 @@ uint8_t esp_psram_io_get_cs_io(void)
true when RAM seems OK, false when test fails. WARNING: Do not run this before the 2nd cpu has been
initialized (in a two-core system) or after the heap allocator has taken ownership of the memory.
*/
bool esp_psram_extram_test(void)
static bool s_test_psram(intptr_t v_start, size_t size, intptr_t reserved_start, intptr_t reserved_end)
{
#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
const void *keepout_addr_low = (const void*)&_ext_ram_noinit_start;
const void *keepout_addr_high = (const void*)&_ext_ram_noinit_end;
#else
const void *keepout_addr_low = 0;
const void *keepout_addr_high = 0;
#endif
volatile int *spiram = (volatile int *)s_mapped_vaddr_start;
volatile int *spiram = (volatile int *)v_start;
size_t p;
size_t s = s_mapped_vaddr_end - s_mapped_vaddr_start;
int errct=0;
int initial_err=-1;
for (p=0; p<(s/sizeof(int)); p+=8) {
const void *addr = (const void *)&spiram[p];
if ((keepout_addr_low <= addr) && (addr < keepout_addr_high)) {
int errct = 0;
int initial_err = -1;
for (p = 0; p < (size / sizeof(int)); p += 8) {
intptr_t addr = (intptr_t)&spiram[p];
if ((reserved_start <= addr) && (addr < reserved_end)) {
continue;
}
spiram[p]=p^0xAAAAAAAA;
spiram[p] = p ^ 0xAAAAAAAA;
}
for (p=0; p<(s/sizeof(int)); p+=8) {
const void *addr = (const void *)&spiram[p];
if ((keepout_addr_low <= addr) && (addr < keepout_addr_high)) {
for (p = 0; p < (size / sizeof(int)); p += 8) {
intptr_t addr = (intptr_t)&spiram[p];
if ((reserved_start <= addr) && (addr < reserved_end)) {
continue;
}
if (spiram[p]!=(p^0xAAAAAAAA)) {
if (spiram[p] != (p ^ 0xAAAAAAAA)) {
errct++;
if (errct==1) initial_err=p*4;
if (errct == 1) {
initial_err = p * 4;
}
}
}
if (errct) {
ESP_EARLY_LOGE(TAG, "SPI SRAM memory test fail. %d/%d writes failed, first @ %X\n", errct, s/32, initial_err + s_mapped_vaddr_start);
ESP_EARLY_LOGE(TAG, "SPI SRAM memory test fail. %d/%d writes failed, first @ %X\n", errct, size/32, initial_err + v_start);
return false;
} else {
ESP_EARLY_LOGI(TAG, "SPI SRAM memory test OK");
return true;
}
}
#endif //#if CONFIG_SPIRAM
bool esp_psram_extram_test(void)
{
bool test_success = false;
#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
intptr_t noinit_vstart = (intptr_t)&_ext_ram_noinit_start;
intptr_t noinit_vend = (intptr_t)&_ext_ram_noinit_end;
#else
intptr_t noinit_vstart = 0;
intptr_t noinit_vend = 0;
#endif
test_success = s_test_psram(s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start,
s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size,
noinit_vstart,
noinit_vend);
if (!test_success) {
return false;
}
if (s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size) {
test_success = s_test_psram(s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start,
s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size,
0,
0);
}
if (!test_success) {
return false;
}
return true;
}

View File

@ -0,0 +1,77 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "ext_mem_layout.h"
#include "mmu.h"
#if CONFIG_IDF_TARGET_ESP32
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
/*linear start linear end bus size bus ID, bus capabilities */
//Can be used for text
{SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM0_LINEAR), CACHE_BUS_IBUS0, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for text
{SOC_MMU_IRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM1_LINEAR), CACHE_BUS_IBUS1, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for text
{SOC_MMU_IROM0_LINEAR_ADDRESS_LOW, SOC_MMU_IROM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IROM0_LINEAR), CACHE_BUS_IBUS2, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for rodata
{SOC_MMU_DROM0_LINEAR_ADDRESS_LOW, SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DROM0_LINEAR), CACHE_BUS_DBUS0, MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
//Can be used for PSRAM
{SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DRAM1_LINEAR), CACHE_BUS_DBUS1, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
};
#elif CONFIG_IDF_TARGET_ESP32S2
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
/*linear start linear end bus size bus ID, bus capabilities */
//Can be used for text
{SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM0_LINEAR), CACHE_BUS_IBUS0, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for text
{SOC_MMU_IRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM1_LINEAR), CACHE_BUS_IBUS1, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for Flash rodata, connected by IBUS
{SOC_MMU_DROM0_LINEAR_ADDRESS_LOW, SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DROM0_LINEAR), CACHE_BUS_IBUS2, MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
//Can be used for PSRAM
{SOC_MMU_DPORT_LINEAR_ADDRESS_LOW, SOC_MMU_DPORT_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DPORT_LINEAR), CACHE_BUS_DBUS2, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT},
//Can be used for PSRAM
{SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DRAM1_LINEAR), CACHE_BUS_DBUS1, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
//Can be used for PSRAM
{SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DRAM0_LINEAR), CACHE_BUS_DBUS0, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
};
#elif CONFIG_IDF_TARGET_ESP32S3
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
/*linear start linear end bus size bus ID, bus capabilities */
/**
* Can be used for Flash text, rodata, and PSRAM
* IRAM0 linear address should be always the same as DRAM0 linear address
*/
{SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM0_LINEAR), CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
};
#endif

View File

@ -0,0 +1,32 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include "sdkconfig.h"
#include "soc/soc_caps.h"
#include "hal/cache_types.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
intptr_t start;
intptr_t end;
size_t size;
cache_bus_mask_t bus_id;
uint32_t caps;
} mmu_mem_region_t;
//These regions is referring to linear address
extern const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
#ifdef __cplusplus
}
#endif

View File

@ -15,32 +15,15 @@ extern "C" {
#endif
/**
* @brief Get the psram mapped vaddr range
* @brief Check if the pointer is on PSRAM
*
* @param[out] out_vstart PSRAM virtual address start
* @param[out] out_vend PSRAM virtual address end
*
* @note [out_vstart, out_vend), `out_vend` isn't included.
* @param[in] p The pointer to check
*
* @return
* - ESP_OK On success
* - ESP_ERR_INVALID_STATE PSRAM is not initialized successfully
* - False: the pointer isn't on PSRAM, or PSRAM isn't initialised successfully
* - True: the pointer is on PSRAM
*/
esp_err_t esp_psram_extram_get_mapped_range(intptr_t *out_vstart, intptr_t *out_vend);
/**
* @brief Get the psram alloced vaddr range
*
* @param[out] out_vstart PSRAM virtual address start
* @param[out] out_vend PSRAM virtual address end
*
* @note [out_vstart, out_vend), `out_vend` isn't included.
*
* @return
* - ESP_OK On success
* - ESP_ERR_INVALID_STATE PSRAM is not initialized successfully
*/
esp_err_t esp_psram_extram_get_alloced_range(intptr_t *out_vstart, intptr_t *out_vend);
bool esp_psram_check_ptr_addr(const void *p);
/**
* @brief Add the initialized PSRAM to the heap allocator.

View File

@ -35,20 +35,6 @@ extern "C" {
#define BYTES_TO_MMU_PAGE(bytes) ((bytes) / MMU_PAGE_SIZE)
#endif
/**
* @brief Get the vaddr start for PSRAM
*
* @return PSRAM vaddr start address
*/
intptr_t mmu_get_psram_vaddr_start(void);
/**
* @brief Get the vaddr end for PSRAM
*
* @return PSRAM vaddr end address
*/
intptr_t mmu_get_psram_vaddr_end(void);
/*----------------------------------------------------------------------------
Part 1 APIs (See @Backgrounds on top of this file)
-------------------------------------------------------------------------------*/

View File

@ -15,52 +15,253 @@
*/
#include <stdint.h>
#include <string.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "soc/ext_mem_defs.h"
#include "esp_private/mmu_psram_flash.h"
#if CONFIG_IDF_TARGET_ESP32S2
#include "soc/extmem_reg.h"
#include "esp32s2/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32S3
#include "soc/extmem_reg.h"
#include "esp32s3/rom/cache.h"
#endif
#include "esp_check.h"
#include "soc/soc_caps.h"
#include "ext_mem_layout.h"
#include "freertos/FreeRTOS.h"
#include "hal/cache_types.h"
#include "hal/cache_ll.h"
#include "hal/mmu_types.h"
#include "hal/mmu_ll.h"
#include "mmu.h"
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
__attribute__((unused)) static const char *TAG = "mmu";
#define MMU_PAGE_SIZE CONFIG_MMU_PAGE_SIZE
//This flag indicates the memory region is merged, we don't care about it anymore
#define MEM_REGION_MERGED -1
static const char *TAG = "mmu";
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
typedef struct mmu_linear_mem_ {
cache_bus_mask_t bus_id;
intptr_t start;
intptr_t end;
size_t pool_size;
intptr_t free_head;
size_t free_size;
int caps;
} mmu_linear_mem_t;
intptr_t mmu_get_psram_vaddr_start(void)
typedef struct {
/**
* number of memory regions that are available, after coalescing, this number should be smaller than or equal to `SOC_MMU_LINEAR_ADDRESS_REGION_NUM`
*/
uint32_t num_regions;
/**
* This saves the available MMU linear address regions,
* after reserving flash .rodata and .text, and after coalescing.
* Only the first `num_regions` items are valid
*/
mmu_linear_mem_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
} mmu_ctx_t;
static mmu_ctx_t s_mmu_ctx;
static void s_reserve_irom_region(mmu_linear_mem_t *hw_mem_regions, int region_nums)
{
#if CONFIG_IDF_TARGET_ESP32S3
/**
* We follow the way how 1st bootloader load flash .text:
*
* - Now IBUS addresses (between `_instruction_reserved_start` and `_instruction_reserved_end`) are consecutive on all chips,
* we strongly rely on this to calculate the .text length
*/
size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve);
intptr_t rodata_end_aligned = ALIGN_UP_BY((intptr_t)&_rodata_reserved_end, MMU_PAGE_SIZE);
ESP_EARLY_LOGV(TAG, "rodata_end_aligned is 0x%x bytes", rodata_end_aligned);
return rodata_end_aligned;
irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_instruction_reserved_start, irom_len_to_reserve);
#elif CONFIG_IDF_TARGET_ESP32S2
return DPORT_CACHE_ADDRESS_LOW;
#else //CONFIG_IDF_TARGET_ESP32
return DRAM1_CACHE_ADDRESS_LOW;
#endif
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].pool_size <= irom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].free_size = 0;
irom_len_to_reserve -= hw_mem_regions[i].pool_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + irom_len_to_reserve;
hw_mem_regions[i].free_size -= irom_len_to_reserve;
}
}
}
}
intptr_t mmu_get_psram_vaddr_end(void)
static void s_reserve_drom_region(mmu_linear_mem_t *hw_mem_regions, int region_nums)
{
#if CONFIG_IDF_TARGET_ESP32S3
return DRAM0_CACHE_ADDRESS_HIGH;
#elif CONFIG_IDF_TARGET_ESP32S2
return DRAM0_CACHE_ADDRESS_HIGH;
#else //CONFIG_IDF_TARGET_ESP32
return DRAM1_CACHE_ADDRESS_HIGH;
#endif
/**
* Similarly, we follow the way how 1st bootloader load flash .rodata:
*/
size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve);
drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_rodata_reserved_start, drom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].pool_size <= drom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].free_size = 0;
drom_len_to_reserve -= hw_mem_regions[i].pool_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + drom_len_to_reserve;
hw_mem_regions[i].free_size -= drom_len_to_reserve;
}
}
}
}
void esp_mmu_init(void)
{
mmu_linear_mem_t hw_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {};
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
hw_mem_regions[i].start = g_mmu_mem_regions[i].start;
hw_mem_regions[i].end = g_mmu_mem_regions[i].end;
hw_mem_regions[i].pool_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].free_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].free_head = g_mmu_mem_regions[i].start;
hw_mem_regions[i].bus_id = g_mmu_mem_regions[i].bus_id;
hw_mem_regions[i].caps = g_mmu_mem_regions[i].caps;
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
assert(__builtin_popcount(hw_mem_regions[i].bus_id) == 1);
#endif
assert(hw_mem_regions[i].pool_size % MMU_PAGE_SIZE == 0);
}
//First reserve memory regions used for irom and drom, as we must follow the way how 1st bootloader load them
s_reserve_irom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
s_reserve_drom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
if (SOC_MMU_LINEAR_ADDRESS_REGION_NUM > 1) {
//Now we can coalesce adjacent regions
for (int i = 1; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
mmu_linear_mem_t *a = &hw_mem_regions[i - 1];
mmu_linear_mem_t *b = &hw_mem_regions[i];
if ((b->free_head == a->end) && (b->caps == a->caps)) {
a->caps = MEM_REGION_MERGED;
b->bus_id |= a->bus_id;
b->start = a->start;
b->pool_size += a->pool_size;
b->free_head = a->free_head;
b->free_size += a->free_size;
}
}
}
//Count the mem regions left after coalescing
uint32_t region_num = 0;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if(hw_mem_regions[i].caps != MEM_REGION_MERGED) {
region_num++;
}
}
ESP_EARLY_LOGV(TAG, "after coalescing, %d regions are left", region_num);
//Initialise `s_mmu_ctx.mem_regions[]`, as we've done all static allocation, to prepare available virtual memory regions
uint32_t available_region_idx = 0;
s_mmu_ctx.num_regions = region_num;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (hw_mem_regions[i].caps == MEM_REGION_MERGED) {
continue;
}
memcpy(&s_mmu_ctx.mem_regions[available_region_idx], &hw_mem_regions[i], sizeof(mmu_linear_mem_t));
available_region_idx++;
}
assert(available_region_idx == region_num);
}
esp_err_t esp_mmu_get_largest_free_block(int caps, size_t *out_len)
{
ESP_RETURN_ON_FALSE(out_len, ESP_ERR_INVALID_ARG, TAG, "null pointer");
if (caps & MMU_MEM_CAP_EXEC) {
if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
//None of the executable memory are expected to be 8-bit accessible or writable.
return ESP_ERR_INVALID_ARG;
}
}
*out_len = 0;
size_t max = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if ((s_mmu_ctx.mem_regions[i].caps & caps) == caps) {
if (s_mmu_ctx.mem_regions[i].free_size > max) {
max = s_mmu_ctx.mem_regions[i].free_size;
}
}
}
*out_len = max;
return ESP_OK;
}
esp_err_t esp_mmu_find_vaddr_range(size_t size, uint32_t caps, const void **out_ptr)
{
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
if (caps & MMU_MEM_CAP_EXEC) {
if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
//None of the executable memory are expected to be 8-bit accessible or writable.
return ESP_ERR_INVALID_ARG;
}
caps |= MMU_MEM_CAP_32BIT;
}
size_t aligned_size = ALIGN_UP_BY(size, MMU_PAGE_SIZE);
bool is_match = false;
uint32_t laddr = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if ((s_mmu_ctx.mem_regions[i].caps & caps) == caps) {
if (s_mmu_ctx.mem_regions[i].free_size < aligned_size) {
continue;
} else {
laddr = (uint32_t)s_mmu_ctx.mem_regions[i].free_head;
s_mmu_ctx.mem_regions[i].free_head += aligned_size;
s_mmu_ctx.mem_regions[i].free_size -= aligned_size;
is_match = true;
break;
}
}
}
ESP_RETURN_ON_FALSE(is_match, ESP_ERR_NOT_FOUND, TAG, "no such vaddr range");
ESP_EARLY_LOGV(TAG, "found laddr is 0x%x", laddr);
if (caps & MMU_MEM_CAP_EXEC) {
laddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION);
} else {
laddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA);
}
*out_ptr = (void *)laddr;
return ESP_OK;
}
esp_err_t esp_mmu_dump_region_usage(void)
{
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
ESP_EARLY_LOGI(TAG, "bus_id: 0x%x", s_mmu_ctx.mem_regions[i].bus_id);
ESP_EARLY_LOGI(TAG, "start: 0x%x", s_mmu_ctx.mem_regions[i].start);
ESP_EARLY_LOGI(TAG, "end: 0x%x", s_mmu_ctx.mem_regions[i].end);
ESP_EARLY_LOGI(TAG, "pool_size: 0x%x", s_mmu_ctx.mem_regions[i].pool_size);
ESP_EARLY_LOGI(TAG, "free_head: 0x%x", s_mmu_ctx.mem_regions[i].free_head);
ESP_EARLY_LOGI(TAG, "free_size: 0x%x", s_mmu_ctx.mem_regions[i].free_size);
ESP_EARLY_LOGI(TAG, "caps: 0x%x\n", s_mmu_ctx.mem_regions[i].caps);
}
return ESP_OK;
}

View File

@ -0,0 +1,76 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* This file will be moved out of `esp_psram` component. And will be
* future MMU driver, to maintain all the external memory contexts including:
* - Flash
* - PSRAM
* - DDR
*
* Now only support ESP32, ESP32S2, ESP32S3 virtual address maintenance, and is internal
*/
#define MMU_MEM_CAP_EXEC (1<<0)
#define MMU_MEM_CAP_READ (1<<1)
#define MMU_MEM_CAP_WRITE (1<<2)
#define MMU_MEM_CAP_32BIT (1<<3)
#define MMU_MEM_CAP_8BIT (1<<4)
/**
* @brief Initialise the MMU driver
*
* This is called once in the IDF startup code. Don't call it in applications
*/
void esp_mmu_init(void);
/**
* @brief Get largest consecutive free external virtual memory block, with given capabilities
*
* @param[in] caps Bitwise OR of MMU_MEM_CAP_* flags indicating the memory block
* @param[out] out_len Largest free block length, in bytes.
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be null pointer
*/
esp_err_t esp_mmu_get_largest_free_block(int caps, size_t *out_len);
/**
* @brief Find a consecutive external virtual memory range, with given capabilities and size
*
* @param[in] size Size, in bytes, the amount of memory to find
* @param[in] caps Bitwise OR of MMU_MEM_CAP_* flags indicating the memory block
* @param[out] out_ptr Pointer to the memory range found
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be wrong caps makeup, or null pointer
* - ESP_ERR_NOT_FOUND: Didn't find enough memory with give caps
*/
esp_err_t esp_mmu_find_vaddr_range(size_t size, uint32_t caps, const void **out_ptr);
/**
* @brief Dump internal memory region usage
*
* @return
* - ESP_OK: On success
*/
esp_err_t esp_mmu_dump_region_usage(void);
#ifdef __cplusplus
}
#endif

View File

@ -1,2 +1,3 @@
CONFIG_FREERTOS_HZ=1000
CONFIG_ESP_TASK_WDT=n
CONFIG_SPIRAM=y

View File

@ -245,6 +245,7 @@ SECTIONS
.flash.appdesc : ALIGN(0x10)
{
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@ -318,6 +319,7 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
. = ALIGN(4);
} >default_rodata_seg
@ -332,6 +334,7 @@ SECTIONS
.flash.text :
{
_stext = .;
_instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.);
mapping[flash_text]
@ -350,6 +353,7 @@ SECTIONS
. += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .;
/* Similar to _iram_start, this symbol goes here so it is

View File

@ -114,8 +114,8 @@ MEMORY
rtc_data_seg(RW) : org = 0x3ff9e000, len = 0x2000 - ESP_BOOTLOADER_RESERVE_RTC
/* external memory, covers the dport, dram0, dram1 cacheable address space */
extern_ram_seg(RWX) : org = 0x3F500000,
len = 0xA80000
extern_ram_seg(RWX) : org = 0x3F800000,
len = 0x780000
}
#if defined(CONFIG_ESP32S2_USE_FIXED_STATIC_RAM_SIZE)

View File

@ -264,7 +264,7 @@ SECTIONS
.flash.appdesc : ALIGN(0x10)
{
_rodata_reserved_start = ABSOLUTE(.);
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@ -336,7 +336,7 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
. = ALIGN(4);
} >default_rodata_seg
@ -351,7 +351,7 @@ SECTIONS
.flash.text :
{
_stext = .;
_instruction_reserved_start = ABSOLUTE(.);
_instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.);
mapping[flash_text]
@ -370,7 +370,7 @@ SECTIONS
. += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .;
/* Similar to _iram_start, this symbol goes here so it is

View File

@ -244,7 +244,7 @@ SECTIONS
.flash.text :
{
_stext = .;
_instruction_reserved_start = ABSOLUTE(.);
_instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.);
mapping[flash_text]
@ -263,7 +263,7 @@ SECTIONS
. += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .;
/**
@ -288,7 +288,7 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */
. = ALIGN(0x10000) + 0x20;
_rodata_reserved_start = .;
_rodata_reserved_start = .; /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg
.flash.appdesc : ALIGN(0x10)
@ -361,7 +361,7 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
. = ALIGN(4);
} > default_rodata_seg

View File

@ -18,6 +18,38 @@
extern "C" {
#endif
/**
* Convert MMU virtual address to linear address
*
* @param vaddr virtual address
*
* @return linear address
*/
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/**
* Get MMU page size
*

View File

@ -8,6 +8,7 @@
#pragma once
#include "stdint.h"
#include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/assert.h"
@ -18,6 +19,38 @@
extern "C" {
#endif
/**
* Convert MMU virtual address to linear address
*
* @param vaddr virtual address
*
* @return linear address
*/
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/**
* Get MMU page size
*

View File

@ -18,6 +18,38 @@
extern "C" {
#endif
/**
* Convert MMU virtual address to linear address
*
* @param vaddr virtual address
*
* @return linear address
*/
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/**
* Get MMU page size
*

View File

@ -20,6 +20,14 @@ typedef enum {
MMU_PAGE_64KB = 0x10000,
} mmu_page_size_t;
/**
* MMU virtual address type
*/
typedef enum {
MMU_VADDR_DATA,
MMU_VADDR_INSTRUCTION,
} mmu_vaddr_t;
/**
* External physical memory
*/

View File

@ -207,6 +207,10 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 5
config SOC_CPU_CORES_NUM
int
default 2

View File

@ -29,6 +29,7 @@ extern "C" {
#define DROM0_CACHE_ADDRESS_HIGH 0x3F800000
#define BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
#define ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)
#define ADDRESS_IN_IRAM0_CACHE(vaddr) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr)
#define ADDRESS_IN_IRAM1_CACHE(vaddr) ADDRESS_IN_BUS(IRAM1_CACHE, vaddr)
@ -36,10 +37,43 @@ extern "C" {
#define ADDRESS_IN_DRAM1_CACHE(vaddr) ADDRESS_IN_BUS(DRAM1_CACHE, vaddr)
#define ADDRESS_IN_DROM0_CACHE(vaddr) ADDRESS_IN_BUS(DROM0_CACHE, vaddr)
#define MMU_INVALID BIT(8)
#define MMU_INVALID BIT(8)
//MMU entry num, 384 entries that are used in IDF
#define MMU_ENTRY_NUM 384
#define SOC_MMU_DBUS_VADDR_BASE 0x3E000000
#define SOC_MMU_IBUS_VADDR_BASE 0x40000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 384 MMU entries, needs 0x1FF to hold it.
*
* Therefore, 0x1FF,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x1FFFFFF
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_IRAM1_LINEAR_ADDRESS_LOW (IRAM1_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_IRAM1_LINEAR_ADDRESS_HIGH (IRAM1_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_IROM0_LINEAR_ADDRESS_LOW (IROM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_IROM0_LINEAR_ADDRESS_HIGH (IROM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DROM0_LINEAR_ADDRESS_LOW (DROM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH (DROM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW (DRAM1_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH (DRAM1_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
//MMU entry num
#define MMU_ENTRY_NUM 256
#ifdef __cplusplus
}

View File

@ -130,8 +130,11 @@
#define SOC_BROWNOUT_RESET_SUPPORTED 1
#endif
/*-------------------------- CACHE CAPS --------------------------------------*/
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 5
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM 2

View File

@ -235,6 +235,10 @@ config SOC_BROWNOUT_RESET_SUPPORTED
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 6
config SOC_CP_DMA_MAX_BUFFER_SIZE
int
default 4095

View File

@ -137,6 +137,39 @@ extern "C" {
#define CACHE_MEMORY_BANK2_ADDR 0x3FFB4000
#define CACHE_MEMORY_BANK3_ADDR 0x3FFB6000
#define SOC_MMU_DBUS_VADDR_BASE 0x3E000000
#define SOC_MMU_IBUS_VADDR_BASE 0x40000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 384 MMU entries, needs 0x1FF to hold it.
*
* Therefore, 0x1FF,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x1FFFFFF
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_IRAM1_LINEAR_ADDRESS_LOW (IRAM1_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_IRAM1_LINEAR_ADDRESS_HIGH (IRAM1_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DROM0_LINEAR_ADDRESS_LOW (DROM0_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH (DROM0_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DPORT_LINEAR_ADDRESS_LOW (DPORT_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DPORT_LINEAR_ADDRESS_HIGH (DPORT_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW (DRAM1_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH (DRAM1_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#ifdef __cplusplus
}
#endif

View File

@ -113,6 +113,9 @@
/*-------------------------- BROWNOUT CAPS -----------------------------------*/
#define SOC_BROWNOUT_RESET_SUPPORTED 1
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 6
/*-------------------------- CP-DMA CAPS -------------------------------------*/
#define SOC_CP_DMA_MAX_BUFFER_SIZE (4095) /*!< Maximum size of the buffer that can be attached to descriptor */

View File

@ -299,6 +299,10 @@ config SOC_BROWNOUT_RESET_SUPPORTED
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_CPU_CORES_NUM
int
default 2

View File

@ -104,6 +104,46 @@ extern "C" {
#define CACHE_MEMORY_DBANK0_ADDR 0x3fcf0000
#define CACHE_MEMORY_DBANK1_ADDR 0x3fcf8000
#define SOC_MMU_DBUS_VADDR_BASE 0x3C000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 512 MMU entries, needs 0x1FF to hold it.
*
* Therefore, 0x1FF,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x1FFFFFF
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus
}
#endif

View File

@ -110,6 +110,9 @@
/*-------------------------- BROWNOUT CAPS -----------------------------------*/
#define SOC_BROWNOUT_RESET_SUPPORTED 1
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM 2
#define SOC_CPU_INTR_NUM 32