esp-idf/components/hal/esp32s2/include/hal/mmu_ll.h
Armando 2d44dc1eed mmu: driver framework, for vaddr maintenance
This commit gives basic mmu driver framework. Now it is able to maintain
mmu virtual address usage on esp32, esp32s2 and esp32s3. Usage to
external virtual address should rely on mmu functions to know which
address range is available, instead of hardcoded.

This commit also improves psram memory that is added to the heap
allocator. Now it's added to the heap, according to the memory
alignment.

Closes https://github.com/espressif/esp-idf/issues/8295
2022-08-26 17:59:06 +08:00

238 lines
5.8 KiB
C

/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// The LL layer for MMU register operations
#pragma once
#include "stdint.h"
#include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/assert.h"
#include "hal/mmu_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Convert MMU virtual address to linear address
*
* @param vaddr virtual address
*
* @return linear address
*/
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/**
* Get MMU page size
*
* @param mmu_id MMU ID
*
* @return MMU page size code
*/
__attribute__((always_inline))
static inline mmu_page_size_t mmu_ll_get_page_size(uint32_t mmu_id)
{
//On esp32s2, MMU Page size is always 64KB
(void)mmu_id;
return MMU_PAGE_64KB;
}
/**
* Set MMU page size
*
* @param size MMU page size
*
* @note On esp32s2, only supports `MMU_PAGE_64KB`
*/
__attribute__((always_inline))
static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
{
HAL_ASSERT(size == MMU_PAGE_64KB);
}
/**
* Check if the external memory vaddr region is valid
*
* @param mmu_id MMU ID
* @param vaddr_start start of the virtual address
* @param len length, in bytes
*
* @return
* True for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len)
{
(void)mmu_id;
uint32_t vaddr_end = vaddr_start + len - 1;
//DROM0 is an alias of the IBUS2
bool on_ibus = ((vaddr_start >= DROM0_ADDRESS_LOW) && (vaddr_end < DROM0_ADDRESS_HIGH)) ||
((vaddr_start >= IRAM0_CACHE_ADDRESS_LOW) && (vaddr_end < IRAM1_ADDRESS_HIGH));
bool on_dbus = (vaddr_start >= DPORT_CACHE_ADDRESS_LOW) && (vaddr_end < DRAM0_CACHE_ADDRESS_HIGH);
return (on_ibus || on_dbus);
}
/**
* To get the MMU table entry id to be mapped
*
* @param mmu_id MMU ID
* @param vaddr virtual address to be mapped
*
* @return
* MMU table entry id
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
{
(void)mmu_id;
uint32_t offset = 0;
if (ADDRESS_IN_DROM0(vaddr)) {
offset = PRO_CACHE_IBUS2_MMU_START / 4;
} else if (ADDRESS_IN_IRAM0_CACHE(vaddr)) {
offset = PRO_CACHE_IBUS0_MMU_START / 4;
} else if (ADDRESS_IN_IRAM1(vaddr)) {
offset = PRO_CACHE_IBUS1_MMU_START / 4;
} else if (ADDRESS_IN_DPORT_CACHE(vaddr)) {
offset = PRO_CACHE_DBUS2_MMU_START / 4;
} else if (ADDRESS_IN_DRAM1(vaddr)) {
offset = PRO_CACHE_DBUS1_MMU_START / 4;
} else if (ADDRESS_IN_DRAM0_CACHE(vaddr)) {
offset = PRO_CACHE_DBUS0_MMU_START / 4;
} else {
HAL_ASSERT(false);
}
return offset + ((vaddr & MMU_VADDR_MASK) >> 16);
}
/**
* Format the paddr to be mappable
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
{
(void)mmu_id;
return paddr >> 16;
}
/**
* Write to the MMU table to map the virtual memory and the physical memory
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be set into an MMU entry, for physical address
* @param target MMU target physical memory.
*/
__attribute__((always_inline))
static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
uint32_t target_code = (target == MMU_TARGET_FLASH0) ? MMU_ACCESS_FLASH : MMU_ACCESS_SPIRAM;
*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | target_code | MMU_VALID;
}
/**
* Read the raw value from MMU table
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param mmu_val Value to be read from MMU table
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4);
}
/**
* Set MMU table entry as invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*/
__attribute__((always_inline))
static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = MMU_INVALID;
}
/**
* Unmap all the items in the MMU table
*
* @param mmu_id MMU ID
*/
__attribute__((always_inline))
static inline void mmu_ll_unmap_all(uint32_t mmu_id)
{
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
mmu_ll_set_entry_invalid(mmu_id, i);
}
}
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
}
#ifdef __cplusplus
}
#endif