Merge branch 'feature/support_shared_vs_non_shared_mmap' into 'master'

esp_mm: share and non-share mapping

Closes IDF-6575

See merge request espressif/esp-idf!22320
This commit is contained in:
Armando (Dou Yiwen) 2023-02-18 08:02:15 +08:00
commit 0c635543ff
18 changed files with 243 additions and 51 deletions

View File

@ -180,7 +180,7 @@ const void *bootloader_mmap(uint32_t src_paddr, uint32_t size)
* Now simply check if it's valid vaddr, didn't check if it's readable, writable or executable.
* TODO: IDF-4710
*/
if (mmu_ll_check_valid_ext_vaddr_region(0, MMU_BLOCK0_VADDR, size_after_paddr_aligned) == 0) {
if (mmu_ll_check_valid_ext_vaddr_region(0, MMU_BLOCK0_VADDR, size_after_paddr_aligned, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION) == 0) {
ESP_EARLY_LOGE(TAG, "vaddr not valid");
return NULL;
}

View File

@ -114,8 +114,14 @@ typedef struct {
mem_region_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
} mmu_ctx_t;
static mmu_ctx_t s_mmu_ctx;
#if ENABLE_PADDR_CHECK
static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size);
static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size);
#endif //#if ENABLE_PADDR_CHECK
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums)
@ -387,7 +393,7 @@ static void IRAM_ATTR NOINLINE_ATTR s_do_mapping(mmu_target_t target, uint32_t v
ESP_EARLY_LOGV(TAG, "actual_mapped_len is 0x%"PRIx32, actual_mapped_len);
}
esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, mmu_target_t target, void **out_ptr)
esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target, mmu_mem_caps_t caps, int flags, void **out_ptr)
{
esp_err_t ret = ESP_FAIL;
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
@ -436,22 +442,35 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps,
mem_block_t *mem_block = NULL;
#if ENABLE_PADDR_CHECK
bool is_mapped = false;
bool is_enclosed = false;
bool is_overlapped = false;
bool allow_overlap = flags & ESP_MMU_MMAP_FLAG_PADDR_SHARED;
TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
if (target == mem_block->target) {
if ((paddr_start >= mem_block->paddr_start) && ((paddr_start + aligned_size) <= mem_block->paddr_end)) {
//the to-be-mapped paddr region is mapped already
is_mapped = true;
if ((s_is_enclosed(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) {
//the to-be-mapped paddr block is mapped already
is_enclosed = true;
break;
}
if (!allow_overlap && (s_is_overlapped(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) {
is_overlapped = true;
break;
}
}
}
if (is_mapped) {
ESP_LOGW(TAG, "paddr region is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size);
if (is_enclosed) {
ESP_LOGW(TAG, "paddr block is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size);
*out_ptr = (void *)mem_block->vaddr_start;
return ESP_ERR_INVALID_STATE;
}
if (!allow_overlap && is_overlapped) {
ESP_LOGE(TAG, "paddr block is overlapped with an already mapped paddr block");
return ESP_ERR_INVALID_ARG;
}
#endif //#if ENABLE_PADDR_CHECK
new_block = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
@ -511,9 +530,6 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps,
return ESP_OK;
err:
if (new_block) {
free(new_block);
}
if (dummy_tail) {
free(dummy_tail);
}
@ -682,7 +698,7 @@ static bool NOINLINE_ATTR IRAM_ATTR s_vaddr_to_paddr(uint32_t vaddr, esp_paddr_t
esp_err_t esp_mmu_vaddr_to_paddr(void *vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
{
ESP_RETURN_ON_FALSE(vaddr && out_paddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_FALSE(mmu_ll_check_valid_ext_vaddr_region(0, (uint32_t)vaddr, 1), ESP_ERR_INVALID_ARG, TAG, "not a valid external virtual address");
ESP_RETURN_ON_FALSE(mmu_hal_check_valid_ext_vaddr_region(0, (uint32_t)vaddr, 1, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION), ESP_ERR_INVALID_ARG, TAG, "not a valid external virtual address");
esp_paddr_t paddr = 0;
mmu_target_t target = 0;
@ -722,3 +738,76 @@ esp_err_t esp_mmu_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vad
return ESP_OK;
}
#if ENABLE_PADDR_CHECK
/*---------------------------------------------------------------
Helper functions to check block
---------------------------------------------------------------*/
/**
* Check if a new block is enclosed by another, e.g.
*
* This is enclosed:
*
* new_block_start new_block_end
* |-------- New Block --------|
* |--------------- Block ---------------|
* block_start block_end
*
* @note Note the difference between `s_is_overlapped()` below
*
* @param block_start An original block start
* @param block_end An original block end
* @param new_block_start New block start
* @param new_block_size New block size
*
* @return True: new block is enclosed; False: new block is not enclosed
*/
static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size)
{
bool is_enclosed = false;
uint32_t new_block_end = new_block_start + new_block_size;
if ((new_block_start >= block_start) && (new_block_end <= block_end)) {
is_enclosed = true;
} else {
is_enclosed = false;
}
return is_enclosed;
}
/**
* Check if a new block is overlapped by another, e.g.
*
* This is overlapped:
*
* new_block_start new_block_end
* |---------- New Block ----------|
* |--------------- Block ---------------|
* block_start block_end
*
* @note Note the difference between `s_is_enclosed()` above
*
* @param block_start An original block start
* @param block_end An original block end
* @param new_block_start New block start
* @param new_block_size New block size
*
* @return True: new block is overlapped; False: new block is not overlapped
*/
static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size)
{
bool is_overlapped = false;
uint32_t new_block_end = new_block_start + new_block_size;
if (((new_block_start < block_start) && (new_block_end > block_start)) ||
((new_block_start < block_end) && (new_block_end > block_end))) {
is_overlapped = true;
} else {
is_overlapped = false;
}
return is_overlapped;
}
#endif //#if ENABLE_PADDR_CHECK

View File

@ -8,6 +8,7 @@
#include <stdlib.h>
#include <stdint.h>
#include "esp_err.h"
#include "esp_bit_defs.h"
#include "hal/mmu_types.h"
#ifdef __cplusplus
@ -34,6 +35,24 @@ extern "C" {
* - A Slot is the vaddr range between 2 blocks.
*/
/**
* MMAP flags
*/
/**
* @brief Share this mapping
*
* - If this flag is set, a paddr block can be mapped to multiple vaddr blocks.
* 1. This happens when:
* - the to-be-mapped paddr block is overlapped with an already mapped paddr block.
* - the to-be-mapped paddr block encloses an already mapped paddr block.
* 2. If the to-be-mapped paddr block is enclosed by an already mapped paddr block, no new mapping will happen, return ESP_ERR_INVALID_STATE. The out pointer will be the already mapped paddr corresponding vaddr.
* 3. If the to-be-mapped paddr block is totally the same as an already mapped paddr block, no new mapping will happen, return ESP_ERR_INVALID_STATE. The out pointer will be the corresponding vaddr.
*
* - If this flag isn't set, overlapped, enclosed or same to-be-mapped paddr block will lead to ESP_ERR_INVALID_ARG.
*/
#define ESP_MMU_MMAP_FLAG_PADDR_SHARED BIT(0)
/**
* @brief Physical memory type
*/
@ -46,8 +65,9 @@ typedef uint32_t esp_paddr_t;
*
* @param[in] paddr_start Start address of the physical memory block
* @param[in] size Size to be mapped. Size will be rounded up by to the nearest multiple of MMU page size
* @param[in] caps Memory capabilities, see `mmu_mem_caps_t`
* @param[in] target Physical memory target you're going to map to, see `mmu_target_t`
* @param[in] caps Memory capabilities, see `mmu_mem_caps_t`
* @param[in] flags Mmap flags
* @param[out] out_ptr Start address of the mapped virtual memory
*
* @return
@ -64,7 +84,7 @@ typedef uint32_t esp_paddr_t;
* block_start block_end
*
*/
esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, mmu_target_t target, void **out_ptr);
esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target, mmu_mem_caps_t caps, int flags, void **out_ptr);
/**
* @brief Unmap a previously mapped virtual memory block

View File

@ -38,11 +38,11 @@ TEST_CASE("Can dump mapped block stats", "[mmu]")
ESP_LOGI(TAG, "found partition '%s' at offset 0x%"PRIx32" with size 0x%"PRIx32, part->label, part->address, part->size);
void *ptr0 = NULL;
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr0));
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_READ, 0, &ptr0));
void *ptr1 = NULL;
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_EXEC, MMU_TARGET_FLASH0, &ptr1));
TEST_ESP_OK(esp_mmu_map(part->address + TEST_BLOCK_SIZE, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_EXEC, 0, &ptr1));
void *ptr2 = NULL;
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr2));
TEST_ESP_OK(esp_mmu_map(part->address + 2 * TEST_BLOCK_SIZE, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_READ, 0, &ptr2));
esp_mmu_map_dump_mapped_blocks(stdout);

View File

@ -109,7 +109,8 @@ TEST_CASE("test all readable vaddr can map to flash", "[mmu]")
TEST_ASSERT(block_info && "no mem");
void *ptr = NULL;
ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr);
//No need to use flag, we enabled ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR in this test_app
ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_READ, 0, &ptr);
if (ret == ESP_OK) {
ESP_LOGI(TAG, "ptr is %p", ptr);
bool success = s_test_mmap_data_by_random((uint8_t *)ptr, sizeof(sector_buf), test_seed);
@ -156,7 +157,8 @@ TEST_CASE("test all executable vaddr can map to flash", "[mmu]")
TEST_ASSERT(block_info && "no mem");
void *ptr = NULL;
ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_EXEC, MMU_TARGET_FLASH0, &ptr);
//No need to use flag, we enabled ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR in this test_app
ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_EXEC, 0, &ptr);
if (ret == ESP_OK) {
ESP_LOGI(TAG, "ptr is %p", ptr);
for (int i = 0; i < TEST_BLOCK_SIZE; i += 0x100) {

View File

@ -12,6 +12,7 @@
#include "hal/cache_hal.h"
#include "hal/cache_types.h"
#include "hal/cache_ll.h"
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#include "soc/soc_caps.h"
@ -117,6 +118,6 @@ void cache_hal_enable(cache_type_t type)
void cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size)
{
//Now only esp32 has 2 MMUs, this file doesn't build on esp32
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(0, vaddr, size));
HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(0, vaddr, size, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
Cache_Invalidate_Addr(vaddr, size);
}

View File

@ -86,21 +86,30 @@ static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len)
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t vaddr_end = vaddr_start + len - 1;
bool valid = false;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) ||
(ADDRESS_IN_IRAM1_CACHE(vaddr_start) && ADDRESS_IN_IRAM1_CACHE(vaddr_end)) ||
(ADDRESS_IN_IROM0_CACHE(vaddr_start) && ADDRESS_IN_IROM0_CACHE(vaddr_end)) ||
(ADDRESS_IN_DRAM1_CACHE(vaddr_start) && ADDRESS_IN_DRAM1_CACHE(vaddr_end)) ||
(ADDRESS_IN_DROM0_CACHE(vaddr_start) && ADDRESS_IN_DROM0_CACHE(vaddr_end));
if (type & MMU_VADDR_DATA) {
valid |= (ADDRESS_IN_DRAM1_CACHE(vaddr_start) && ADDRESS_IN_DRAM1_CACHE(vaddr_end)) ||
(ADDRESS_IN_DROM0_CACHE(vaddr_start) && ADDRESS_IN_DROM0_CACHE(vaddr_end));
}
if (type & MMU_VADDR_INSTRUCTION) {
valid |= (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) ||
(ADDRESS_IN_IRAM1_CACHE(vaddr_start) && ADDRESS_IN_IRAM1_CACHE(vaddr_end)) ||
(ADDRESS_IN_IROM0_CACHE(vaddr_start) && ADDRESS_IN_IROM0_CACHE(vaddr_end));
}
return valid;
}
/**

View File

@ -84,16 +84,27 @@ static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len)
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t vaddr_end = vaddr_start + len - 1;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
bool valid = false;
if (type & MMU_VADDR_INSTRUCTION) {
valid |= (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end));
}
if (type & MMU_VADDR_DATA) {
valid |= (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
return valid;
}
/**

View File

@ -85,16 +85,27 @@ static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len)
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t vaddr_end = vaddr_start + len - 1;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
bool valid = false;
if (type & MMU_VADDR_INSTRUCTION) {
valid |= (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end));
}
if (type & MMU_VADDR_DATA) {
valid |= (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
return valid;
}
/**

View File

@ -92,14 +92,16 @@ static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len)
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
(void)mmu_id;
(void)type;
uint32_t vaddr_end = vaddr_start + len;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}

View File

@ -95,14 +95,16 @@ static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len)
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
(void)mmu_id;
(void)type;
uint32_t vaddr_end = vaddr_start + len;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}

View File

@ -85,16 +85,27 @@ static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len)
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t vaddr_end = vaddr_start + len - 1;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
bool valid = false;
if (type & MMU_VADDR_INSTRUCTION) {
valid |= (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end));
}
if (type & MMU_VADDR_DATA) {
valid |= (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
return valid;
}
/**

View File

@ -85,23 +85,27 @@ static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len)
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t vaddr_end = vaddr_start + len - 1;
bool valid = false;
//DROM0 is an alias of the IBUS2
bool on_ibus = ((vaddr_start >= DROM0_ADDRESS_LOW) && (vaddr_end < DROM0_ADDRESS_HIGH)) ||
((vaddr_start >= IRAM0_CACHE_ADDRESS_LOW) && (vaddr_end < IRAM1_ADDRESS_HIGH));
if (type & MMU_VADDR_DATA) {
valid |= ((vaddr_start >= DROM0_ADDRESS_LOW) && (vaddr_end < DROM0_ADDRESS_HIGH)) || ((vaddr_start >= DPORT_CACHE_ADDRESS_LOW) && (vaddr_end < DRAM0_CACHE_ADDRESS_HIGH));
}
bool on_dbus = (vaddr_start >= DPORT_CACHE_ADDRESS_LOW) && (vaddr_end < DRAM0_CACHE_ADDRESS_HIGH);
if (type & MMU_VADDR_INSTRUCTION) {
valid |= ((vaddr_start >= IRAM0_CACHE_ADDRESS_LOW) && (vaddr_end < IRAM1_ADDRESS_HIGH));
}
return (on_ibus || on_dbus);
return valid;
}
/**

View File

@ -85,16 +85,27 @@ static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len)
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t vaddr_end = vaddr_start + len - 1;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
bool valid = false;
if (type & MMU_VADDR_INSTRUCTION) {
valid |= (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end));
}
if (type & MMU_VADDR_DATA) {
valid |= (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
return valid;
}
/**

View File

@ -99,6 +99,20 @@ bool mmu_hal_vaddr_to_paddr(uint32_t mmu_id, uint32_t vaddr, uint32_t *out_paddr
*/
bool mmu_hal_paddr_to_vaddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr);
/**
* Check if the vaddr region is valid
*
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return
* True for valid
*/
bool mmu_hal_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type);
#ifdef __cplusplus
}
#endif

View File

@ -24,18 +24,18 @@ typedef enum {
* MMU Page size
*/
typedef enum {
MMU_PAGE_8KB = 0x2000,
MMU_PAGE_8KB = 0x2000,
MMU_PAGE_16KB = 0x4000,
MMU_PAGE_32KB = 0x8000,
MMU_PAGE_64KB = 0x10000,
} mmu_page_size_t;
/**
* MMU virtual address type
* MMU virtual address flags type
*/
typedef enum {
MMU_VADDR_DATA,
MMU_VADDR_INSTRUCTION,
MMU_VADDR_DATA = BIT(0),
MMU_VADDR_INSTRUCTION = BIT(1),
} mmu_vaddr_t;
/**

View File

@ -67,7 +67,7 @@ void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr,
HAL_ASSERT(vaddr % page_size_in_bytes == 0);
HAL_ASSERT(paddr % page_size_in_bytes == 0);
HAL_ASSERT(mmu_ll_check_valid_paddr_region(mmu_id, paddr, len));
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, len));
HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, len, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
uint32_t entry_id = 0;
@ -89,7 +89,7 @@ void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len)
{
uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
HAL_ASSERT(vaddr % page_size_in_bytes == 0);
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, len));
HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, len, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
uint32_t entry_id = 0;
@ -103,7 +103,7 @@ void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len)
bool mmu_hal_vaddr_to_paddr(uint32_t mmu_id, uint32_t vaddr, uint32_t *out_paddr, mmu_target_t *out_target)
{
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, 1));
HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, 1, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
uint32_t entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
if (!mmu_ll_check_entry_valid(mmu_id, entry_id)) {
return false;
@ -139,3 +139,8 @@ bool mmu_hal_paddr_to_vaddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target
return true;
}
bool mmu_hal_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
{
return mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr_start, len, type);
}

View File

@ -92,7 +92,7 @@ esp_err_t spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t m
} else {
caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT;
}
ret = esp_mmu_map(src_addr, size, caps, MMU_TARGET_FLASH0, &ptr);
ret = esp_mmu_map(src_addr, size, MMU_TARGET_FLASH0, caps, ESP_MMU_MMAP_FLAG_PADDR_SHARED, &ptr);
if (ret == ESP_OK) {
vaddr_list[0] = (uint32_t)ptr;
block->list_num = 1;
@ -202,7 +202,7 @@ esp_err_t spi_flash_mmap_pages(const int *pages, size_t page_count, spi_flash_mm
}
for (int i = 0; i < block_num; i++) {
void *ptr = NULL;
ret = esp_mmu_map(paddr_blocks[i][0], paddr_blocks[i][1], caps, MMU_TARGET_FLASH0, &ptr);
ret = esp_mmu_map(paddr_blocks[i][0], paddr_blocks[i][1], MMU_TARGET_FLASH0, caps, ESP_MMU_MMAP_FLAG_PADDR_SHARED, &ptr);
if (ret == ESP_OK) {
vaddr_list[i] = (uint32_t)ptr;
successful_cnt++;