mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'esp32c6/fix_pmp_config' into 'master'
esp32c6: Fix incorrect PMP configuration Closes IDF-6927 See merge request espressif/esp-idf!22435
This commit is contained in:
commit
00f30bb199
@ -12,7 +12,7 @@ set(requires soc)
|
||||
# only esp_hw_support/adc_share_hw_ctrl.c requires efuse component
|
||||
set(priv_requires efuse spi_flash bootloader_support)
|
||||
|
||||
set(srcs "cpu.c" "esp_memory_utils.c")
|
||||
set(srcs "cpu.c" "esp_memory_utils.c" "port/${IDF_TARGET}/cpu_region_protect.c")
|
||||
if(NOT BOOTLOADER_BUILD)
|
||||
list(APPEND srcs "esp_clk.c"
|
||||
"clk_ctrl_os.c"
|
||||
|
@ -21,20 +21,16 @@
|
||||
#endif
|
||||
|
||||
#include "hal/soc_hal.h"
|
||||
#include "hal/mpu_hal.h"
|
||||
#include "esp_bit_defs.h"
|
||||
#include "esp_attr.h"
|
||||
#include "esp_err.h"
|
||||
#include "esp_cpu.h"
|
||||
#include "esp_memory_utils.h"
|
||||
#include "esp_fault.h"
|
||||
#if __XTENSA__
|
||||
#include "xtensa/config/core-isa.h"
|
||||
#else
|
||||
#include "soc/system_reg.h" // For SYSTEM_CPU_PER_CONF_REG
|
||||
#include "soc/dport_access.h" // For Dport access
|
||||
#include "riscv/semihosting.h"
|
||||
#include "riscv/csr.h" // For PMP_ENTRY. [refactor-todo] create PMP abstraction in rv_utils.h
|
||||
#endif
|
||||
#if SOC_CPU_HAS_FLEXIBLE_INTC
|
||||
#include "riscv/instruction_decode.h"
|
||||
@ -285,509 +281,6 @@ void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_
|
||||
|
||||
#endif // SOC_CPU_HAS_FLEXIBLE_INTC
|
||||
|
||||
/* -------------------------------------------------- Memory Ports -----------------------------------------------------
|
||||
*
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
||||
|
||||
#if SOC_CPU_HAS_PMA
|
||||
static void esp_cpu_configure_invalid_regions(void)
|
||||
{
|
||||
const unsigned PMA_NONE = PMA_EN;
|
||||
__attribute__((unused)) const unsigned PMA_RW = PMA_EN | PMA_R | PMA_W;
|
||||
__attribute__((unused)) const unsigned PMA_RX = PMA_EN | PMA_R | PMA_X;
|
||||
__attribute__((unused)) const unsigned PMA_RWX = PMA_EN | PMA_R | PMA_W | PMA_X;
|
||||
|
||||
// 1. Gap at bottom of address space
|
||||
PMA_ENTRY_SET_TOR(0, SOC_DEBUG_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 2. Gap between debug region & IROM
|
||||
PMA_ENTRY_SET_TOR(1, SOC_DEBUG_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(2, SOC_IROM_MASK_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 3. Gap between ROM & RAM
|
||||
PMA_ENTRY_SET_TOR(3, SOC_DROM_MASK_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(4, SOC_IRAM_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 4. Gap between DRAM and I_Cache
|
||||
PMA_ENTRY_SET_TOR(5, SOC_IRAM_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(6, SOC_IROM_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 5. Gap between D_Cache & LP_RAM
|
||||
PMA_ENTRY_SET_TOR(7, SOC_DROM_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(8, SOC_RTC_IRAM_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 6. Gap between LP memory & peripheral addresses
|
||||
PMA_ENTRY_SET_TOR(9, SOC_RTC_IRAM_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(10, SOC_PERIPHERAL_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 7. End of address space
|
||||
PMA_ENTRY_SET_TOR(11, SOC_PERIPHERAL_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(12, UINT32_MAX, PMA_TOR | PMA_NONE);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2 || CONFIG_IDF_TARGET_ESP32S3
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Note: currently this is configured the same on all Xtensa targets
|
||||
*
|
||||
* Both chips have the address space divided into 8 regions, 512MB each.
|
||||
*/
|
||||
const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
|
||||
for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
|
||||
mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
|
||||
}
|
||||
|
||||
mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
|
||||
}
|
||||
#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H4
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Notes on implementation:
|
||||
*
|
||||
* 1) Note: ESP32-C3/H4 CPU doesn't support overlapping PMP regions
|
||||
*
|
||||
* 2) Therefore, we use TOR (top of range) entries to map the whole address
|
||||
* space, bottom to top.
|
||||
*
|
||||
* 3) There are not enough entries to describe all the memory regions 100% accurately.
|
||||
*
|
||||
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
|
||||
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
|
||||
* (executing unmapped addresses should always fault with invalid instruction, read-only means
|
||||
* stores will correctly fault even if reads may return some invalid value.)
|
||||
*
|
||||
* 5) Entries are grouped in order with some static asserts to try and verify everything is
|
||||
* correct.
|
||||
*/
|
||||
const unsigned NONE = PMP_L | PMP_TOR;
|
||||
const unsigned R = PMP_L | PMP_TOR | PMP_R;
|
||||
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
|
||||
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
|
||||
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
|
||||
|
||||
// 1. Gap at bottom of address space
|
||||
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
|
||||
|
||||
// 2. Debug region
|
||||
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
|
||||
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
|
||||
|
||||
// 3. Gap between debug region & DROM (flash cache)
|
||||
PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
|
||||
_Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 4. DROM (flash cache)
|
||||
// 5. Gap between DROM & DRAM
|
||||
// (Note: To save PMP entries these two are merged into one read-only region)
|
||||
PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
|
||||
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
|
||||
_Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 6. DRAM
|
||||
PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
|
||||
_Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
|
||||
|
||||
// 7. Gap between DRAM and Mask DROM
|
||||
// 8. Mask DROM
|
||||
// (Note: to save PMP entries these two are merged into one read-only region)
|
||||
PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
|
||||
_Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
|
||||
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
|
||||
|
||||
// 9. Gap between mask DROM and mask IROM
|
||||
// 10. Mask IROM
|
||||
// (Note: to save PMP entries these two are merged into one RX region)
|
||||
PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
|
||||
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
|
||||
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
|
||||
|
||||
// 11. Gap between mask IROM & IRAM
|
||||
PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 12. IRAM
|
||||
PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
|
||||
|
||||
// 13. Gap between IRAM and IROM
|
||||
// 14. IROM (flash cache)
|
||||
// (Note: to save PMP entries these two are merged into one RX region)
|
||||
PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
|
||||
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
|
||||
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
|
||||
|
||||
// 15. Gap between IROM & RTC slow memory
|
||||
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 16. RTC fast memory
|
||||
PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
|
||||
|
||||
// 17. Gap between RTC fast memory & peripheral addresses
|
||||
PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
|
||||
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 18. Peripheral addresses
|
||||
PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
|
||||
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
|
||||
|
||||
// 19. End of address space
|
||||
PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
|
||||
PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
|
||||
}
|
||||
#elif CONFIG_IDF_TARGET_ESP32C2
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
|
||||
extern int _iram_end;
|
||||
extern int _data_start;
|
||||
#define IRAM_END (int)&_iram_end
|
||||
#define DRAM_START (int)&_data_start
|
||||
#else
|
||||
#define IRAM_END SOC_DIRAM_IRAM_HIGH
|
||||
#define DRAM_START SOC_DIRAM_DRAM_LOW
|
||||
#endif
|
||||
|
||||
#ifdef BOOTLOADER_BUILD
|
||||
// Without L bit set
|
||||
#define CONDITIONAL_NONE 0x0
|
||||
#define CONDITIONAL_RX PMP_R | PMP_X
|
||||
#define CONDITIONAL_RW PMP_R | PMP_W
|
||||
#define CONDITIONAL_RWX PMP_R | PMP_W | PMP_X
|
||||
#else
|
||||
// With L bit set
|
||||
#define CONDITIONAL_NONE NONE
|
||||
#define CONDITIONAL_RX RX
|
||||
#define CONDITIONAL_RW RW
|
||||
#define CONDITIONAL_RWX RWX
|
||||
#endif
|
||||
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Notes on implementation:
|
||||
*
|
||||
* 1) ESP32-C2 CPU support overlapping PMP regions, configuration is based on static priority
|
||||
* feature(lowest numbered entry has highest priority).
|
||||
*
|
||||
* 2) Therefore, we use TOR (top of range) and NAOPT entries to map the effective area.
|
||||
* Finally, define any address without access permission.
|
||||
*
|
||||
* 3) 3-15 PMPADDR entries be hardcoded to fixed value, 0-2 PMPADDR be programmed to split ID SRAM
|
||||
* as IRAM/DRAM. All PMPCFG entryies be available.
|
||||
*
|
||||
* 4) Ideally, PMPADDR 0-2 entries should be configured twice, once during bootloader startup and another during app startup.
|
||||
* However, the CPU currently always executes in machine mode and to enforce these permissions in machine mode, we need
|
||||
* to set the Lock (L) bit but if set once, it cannot be reconfigured. So, we only configure 0-2 PMPADDR during app startup.
|
||||
*/
|
||||
const unsigned NONE = PMP_L ;
|
||||
const unsigned R = PMP_L | PMP_R;
|
||||
const unsigned X = PMP_L | PMP_X;
|
||||
const unsigned RW = PMP_L | PMP_R | PMP_W;
|
||||
const unsigned RX = PMP_L | PMP_R | PMP_X;
|
||||
const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
|
||||
|
||||
/* There are 4 configuration scenarios for PMPADDR 0-2
|
||||
*
|
||||
* 1. Bootloader build:
|
||||
* - We cannot set the lock bit as we need to reconfigure it again for the application.
|
||||
* We configure PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire valid DRAM range.
|
||||
*
|
||||
* 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
|
||||
* - We split the SRAM into IRAM and DRAM such that IRAM region cannot be accessed via DBUS
|
||||
* and DRAM region cannot be accessed via IBUS. We use _iram_end and _data_start markers to set the boundaries.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
|
||||
* - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
|
||||
* so for that we set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* 4. CPU is in OCD debug mode
|
||||
* - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
|
||||
* We set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* PMPADDR 3-15 are hard-coded and are appicable to both, bootloader and application. So we configure and lock
|
||||
* these during BOOTLOADER build itself. During application build, reconfiguration of these PMPADDR entries
|
||||
* are silently ignored by the CPU
|
||||
*/
|
||||
|
||||
if (esp_cpu_dbgr_is_attached()) {
|
||||
// Anti-FI check that cpu is really in ocd mode
|
||||
ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
|
||||
|
||||
// 1. IRAM
|
||||
PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, NONE);
|
||||
PMP_ENTRY_SET(1, SOC_DIRAM_IRAM_HIGH, PMP_TOR | RWX);
|
||||
|
||||
// 2. DRAM
|
||||
PMP_ENTRY_SET(2, SOC_DIRAM_DRAM_LOW, NONE);
|
||||
PMP_ENTRY_CFG_SET(3, PMP_TOR | RW);
|
||||
} else {
|
||||
// 1. IRAM
|
||||
PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, CONDITIONAL_NONE);
|
||||
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT
|
||||
PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RX);
|
||||
#else
|
||||
PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RWX);
|
||||
#endif
|
||||
|
||||
// 2. DRAM
|
||||
PMP_ENTRY_SET(2, DRAM_START, CONDITIONAL_NONE);
|
||||
PMP_ENTRY_CFG_SET(3, PMP_TOR | CONDITIONAL_RW);
|
||||
}
|
||||
|
||||
// 3. Debug region
|
||||
PMP_ENTRY_CFG_SET(4, PMP_NAPOT | RWX);
|
||||
|
||||
// 4. DROM (flash dcache)
|
||||
PMP_ENTRY_CFG_SET(5, PMP_NAPOT | R);
|
||||
|
||||
// 5. DROM_MASK
|
||||
PMP_ENTRY_CFG_SET(6, NONE);
|
||||
PMP_ENTRY_CFG_SET(7, PMP_TOR | R);
|
||||
|
||||
// 6. IROM_MASK
|
||||
PMP_ENTRY_CFG_SET(8, NONE);
|
||||
PMP_ENTRY_CFG_SET(9, PMP_TOR | RX);
|
||||
|
||||
// 7. IROM (flash icache)
|
||||
PMP_ENTRY_CFG_SET(10, PMP_NAPOT | RX);
|
||||
|
||||
// 8. Peripheral addresses
|
||||
PMP_ENTRY_CFG_SET(11, PMP_NAPOT | RW);
|
||||
|
||||
// 9. SRAM (used as ICache)
|
||||
PMP_ENTRY_CFG_SET(12, PMP_NAPOT | X);
|
||||
|
||||
// 10. no access to any address below(0x0-0xFFFF_FFFF)
|
||||
PMP_ENTRY_CFG_SET(13, PMP_NA4 | NONE);// last 4 bytes(0xFFFFFFFC)
|
||||
PMP_ENTRY_CFG_SET(14, NONE);
|
||||
PMP_ENTRY_CFG_SET(15, PMP_TOR | NONE);
|
||||
}
|
||||
#elif CONFIG_IDF_TARGET_ESP32C6
|
||||
|
||||
#ifdef BOOTLOADER_BUILD
|
||||
// Without L bit set
|
||||
#define CONDITIONAL_NONE 0x0
|
||||
#define CONDITIONAL_RX PMP_R | PMP_X
|
||||
#define CONDITIONAL_RW PMP_R | PMP_W
|
||||
#define CONDITIONAL_RWX PMP_R | PMP_W | PMP_X
|
||||
#else
|
||||
// With L bit set
|
||||
#define CONDITIONAL_NONE PMP_NONE
|
||||
#define CONDITIONAL_RX PMP_RX
|
||||
#define CONDITIONAL_RW PMP_RW
|
||||
#define CONDITIONAL_RWX PMP_RWX
|
||||
#endif
|
||||
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Notes on implementation:
|
||||
*
|
||||
* 1) Note: ESP32-C6 CPU doesn't support overlapping PMP regions
|
||||
*
|
||||
* 2) ESP32-C6 supports 16 PMA regions so we use this feature to block all the invalid address ranges
|
||||
*
|
||||
* 3) We use combination of NAPOT (Naturally Aligned Power Of Two) and TOR (top of range)
|
||||
* entries to map all the valid address space, bottom to top. This leaves us with some extra PMP entries
|
||||
* which can be used to provide more granular access
|
||||
*
|
||||
* 4) Entries are grouped in order with some static asserts to try and verify everything is
|
||||
* correct.
|
||||
*/
|
||||
|
||||
/* There are 4 configuration scenarios for SRAM
|
||||
*
|
||||
* 1. Bootloader build:
|
||||
* - We cannot set the lock bit as we need to reconfigure it again for the application.
|
||||
* We configure PMP to cover entire valid IRAM and DRAM range.
|
||||
*
|
||||
* 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
|
||||
* - We split the SRAM into IRAM and DRAM such that IRAM region cannot be written to
|
||||
* and DRAM region cannot be executed. We use _iram_end and _data_start markers to set the boundaries.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
|
||||
* - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
|
||||
* so for that we set PMP to cover entire valid IRAM and DRAM region.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* 4. CPU is in OCD debug mode
|
||||
* - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
|
||||
* We set PMP to cover entire valid IRAM and DRAM region.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*/
|
||||
const unsigned PMP_NONE = PMP_L;
|
||||
const unsigned PMP_RW = PMP_L | PMP_R | PMP_W;
|
||||
const unsigned PMP_RX = PMP_L | PMP_R | PMP_X;
|
||||
const unsigned PMP_RWX = PMP_L | PMP_R | PMP_W | PMP_X;
|
||||
|
||||
//
|
||||
// Configure all the invalid address regions using PMA
|
||||
//
|
||||
esp_cpu_configure_invalid_regions();
|
||||
|
||||
//
|
||||
// Configure all the valid address regions using PMP
|
||||
//
|
||||
|
||||
// 1. Debug region
|
||||
const uint32_t pmpaddr0 = PMPADDR_NAPOT(SOC_DEBUG_LOW, SOC_DEBUG_HIGH);
|
||||
PMP_ENTRY_SET(0, pmpaddr0, PMP_NAPOT | PMP_RWX);
|
||||
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
|
||||
|
||||
// 2.1 I-ROM
|
||||
PMP_ENTRY_SET(1, SOC_IROM_MASK_LOW, PMP_NONE);
|
||||
PMP_ENTRY_SET(2, SOC_IROM_MASK_HIGH, PMP_TOR | PMP_RX);
|
||||
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid I-ROM region");
|
||||
|
||||
// 2.2 D-ROM
|
||||
PMP_ENTRY_SET(3, SOC_DROM_MASK_LOW, PMP_NONE);
|
||||
PMP_ENTRY_SET(4, SOC_DROM_MASK_HIGH, PMP_TOR | PMP_R);
|
||||
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid D-ROM region");
|
||||
|
||||
if (esp_cpu_dbgr_is_attached()) {
|
||||
// Anti-FI check that cpu is really in ocd mode
|
||||
ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
|
||||
|
||||
// 5. IRAM and DRAM
|
||||
const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | PMP_RWX);
|
||||
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
|
||||
} else {
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
|
||||
extern int _iram_end;
|
||||
// 5. IRAM and DRAM
|
||||
PMP_ENTRY_SET(5, SOC_IRAM_LOW, PMP_NONE);
|
||||
PMP_ENTRY_SET(6, (int)&_iram_end, PMP_TOR | PMP_RX);
|
||||
PMP_ENTRY_SET(7, SOC_DRAM_HIGH, PMP_TOR | PMP_RW);
|
||||
#else
|
||||
// 5. IRAM and DRAM
|
||||
const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | CONDITIONAL_RWX);
|
||||
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
|
||||
#endif
|
||||
}
|
||||
|
||||
// 4. I_Cache (flash)
|
||||
const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
|
||||
PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | PMP_RX);
|
||||
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I_Cache region");
|
||||
|
||||
// 5. D_Cache (flash)
|
||||
const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_DROM_LOW, SOC_DROM_HIGH);
|
||||
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | PMP_R);
|
||||
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid D_Cache region");
|
||||
|
||||
// 6. LP memory
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
|
||||
extern int _rtc_text_end;
|
||||
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, PMP_NONE);
|
||||
#if CONFIG_ULP_COPROC_RESERVE_MEM
|
||||
// First part of LP mem is reserved for coprocessor
|
||||
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW + CONFIG_ULP_COPROC_RESERVE_MEM, PMP_TOR | PMP_RW);
|
||||
#endif //CONFIG_ULP_COPROC_RESERVE_MEM
|
||||
PMP_ENTRY_SET(12, (int)&_rtc_text_end, PMP_TOR | PMP_RX);
|
||||
PMP_ENTRY_SET(13, SOC_RTC_IRAM_HIGH, PMP_TOR | PMP_RW);
|
||||
#else
|
||||
const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX);
|
||||
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
|
||||
#endif
|
||||
|
||||
|
||||
// 7. Peripheral addresses
|
||||
const uint32_t pmpaddr14 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
|
||||
PMP_ENTRY_SET(14, pmpaddr14, PMP_NAPOT | PMP_RW);
|
||||
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
|
||||
}
|
||||
#elif CONFIG_IDF_TARGET_ESP32H2
|
||||
// ESP32H2-TODO: IDF-6452
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Notes on implementation:
|
||||
*
|
||||
* 1) Note: ESP32-H2 CPU doesn't support overlapping PMP regions
|
||||
*
|
||||
* 2) Therefore, we use TOR (top of range) entries to map the whole address
|
||||
* space, bottom to top.
|
||||
*
|
||||
* 3) There are not enough entries to describe all the memory regions 100% accurately.
|
||||
*
|
||||
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
|
||||
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
|
||||
* (executing unmapped addresses should always fault with invalid instruction, read-only means
|
||||
* stores will correctly fault even if reads may return some invalid value.)
|
||||
*
|
||||
* 5) Entries are grouped in order with some static asserts to try and verify everything is
|
||||
* correct.
|
||||
*/
|
||||
const unsigned NONE = PMP_L | PMP_TOR;
|
||||
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
|
||||
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
|
||||
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
|
||||
|
||||
// 1. Gap at bottom of address space
|
||||
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
|
||||
|
||||
// 2. Debug region
|
||||
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
|
||||
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
|
||||
|
||||
// 3. Gap between debug region & IROM
|
||||
PMP_ENTRY_SET(2, SOC_IROM_MASK_LOW, NONE);
|
||||
_Static_assert(SOC_DEBUG_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 4. ROM
|
||||
PMP_ENTRY_SET(3, SOC_DROM_MASK_HIGH, RX);
|
||||
_Static_assert(SOC_IROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid ROM region");
|
||||
|
||||
// 5. Gap between ROM & RAM
|
||||
PMP_ENTRY_SET(4, SOC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 6. RAM
|
||||
PMP_ENTRY_SET(5, SOC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
|
||||
|
||||
// 7. Gap between DRAM and I_Cache
|
||||
PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
|
||||
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 8. I_Cache (flash)
|
||||
PMP_ENTRY_SET(7, SOC_IROM_HIGH, RWX);
|
||||
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I_Cache region");
|
||||
|
||||
// 9. D_Cache (flash)
|
||||
PMP_ENTRY_SET(8, SOC_DROM_HIGH, RW);
|
||||
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid D_Cache region");
|
||||
|
||||
// 10. Gap between D_Cache & LP_RAM
|
||||
PMP_ENTRY_SET(9, SOC_RTC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_DROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 16. LP memory
|
||||
PMP_ENTRY_SET(10, SOC_RTC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
|
||||
|
||||
// 17. Gap between LP memory & peripheral addresses
|
||||
PMP_ENTRY_SET(11, SOC_PERIPHERAL_LOW, NONE);
|
||||
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 18. Peripheral addresses
|
||||
PMP_ENTRY_SET(12, SOC_PERIPHERAL_HIGH, RW);
|
||||
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
|
||||
|
||||
// 19. End of address space
|
||||
PMP_ENTRY_SET(13, UINT32_MAX, NONE); // all but last 4 bytes
|
||||
PMP_ENTRY_SET(14, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* ---------------------------------------------------- Debugging ------------------------------------------------------
|
||||
*
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
22
components/esp_hw_support/port/esp32/cpu_region_protect.c
Normal file
22
components/esp_hw_support/port/esp32/cpu_region_protect.c
Normal file
@ -0,0 +1,22 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include "hal/mpu_hal.h"
|
||||
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Note: currently this is configured the same on all Xtensa targets
|
||||
*
|
||||
* Both chips have the address space divided into 8 regions, 512MB each.
|
||||
*/
|
||||
const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
|
||||
for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
|
||||
mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
|
||||
}
|
||||
|
||||
mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
|
||||
}
|
140
components/esp_hw_support/port/esp32c2/cpu_region_protect.c
Normal file
140
components/esp_hw_support/port/esp32c2/cpu_region_protect.c
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "soc/soc.h"
|
||||
#include "esp_cpu.h"
|
||||
#include "esp_fault.h"
|
||||
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
|
||||
extern int _iram_end;
|
||||
extern int _data_start;
|
||||
#define IRAM_END (int)&_iram_end
|
||||
#define DRAM_START (int)&_data_start
|
||||
#else
|
||||
#define IRAM_END SOC_DIRAM_IRAM_HIGH
|
||||
#define DRAM_START SOC_DIRAM_DRAM_LOW
|
||||
#endif
|
||||
|
||||
#ifdef BOOTLOADER_BUILD
|
||||
// Without L bit set
|
||||
#define CONDITIONAL_NONE 0x0
|
||||
#define CONDITIONAL_RX PMP_R | PMP_X
|
||||
#define CONDITIONAL_RW PMP_R | PMP_W
|
||||
#define CONDITIONAL_RWX PMP_R | PMP_W | PMP_X
|
||||
#else
|
||||
// With L bit set
|
||||
#define CONDITIONAL_NONE NONE
|
||||
#define CONDITIONAL_RX RX
|
||||
#define CONDITIONAL_RW RW
|
||||
#define CONDITIONAL_RWX RWX
|
||||
#endif
|
||||
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Notes on implementation:
|
||||
*
|
||||
* 1) ESP32-C2 CPU support overlapping PMP regions, configuration is based on static priority
|
||||
* feature(lowest numbered entry has highest priority).
|
||||
*
|
||||
* 2) Therefore, we use TOR (top of range) and NAOPT entries to map the effective area.
|
||||
* Finally, define any address without access permission.
|
||||
*
|
||||
* 3) 3-15 PMPADDR entries be hardcoded to fixed value, 0-2 PMPADDR be programmed to split ID SRAM
|
||||
* as IRAM/DRAM. All PMPCFG entryies be available.
|
||||
*
|
||||
* 4) Ideally, PMPADDR 0-2 entries should be configured twice, once during bootloader startup and another during app startup.
|
||||
* However, the CPU currently always executes in machine mode and to enforce these permissions in machine mode, we need
|
||||
* to set the Lock (L) bit but if set once, it cannot be reconfigured. So, we only configure 0-2 PMPADDR during app startup.
|
||||
*/
|
||||
const unsigned NONE = PMP_L ;
|
||||
const unsigned R = PMP_L | PMP_R;
|
||||
const unsigned X = PMP_L | PMP_X;
|
||||
const unsigned RW = PMP_L | PMP_R | PMP_W;
|
||||
const unsigned RX = PMP_L | PMP_R | PMP_X;
|
||||
const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
|
||||
|
||||
/* There are 4 configuration scenarios for PMPADDR 0-2
|
||||
*
|
||||
* 1. Bootloader build:
|
||||
* - We cannot set the lock bit as we need to reconfigure it again for the application.
|
||||
* We configure PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire valid DRAM range.
|
||||
*
|
||||
* 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
|
||||
* - We split the SRAM into IRAM and DRAM such that IRAM region cannot be accessed via DBUS
|
||||
* and DRAM region cannot be accessed via IBUS. We use _iram_end and _data_start markers to set the boundaries.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
|
||||
* - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
|
||||
* so for that we set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* 4. CPU is in OCD debug mode
|
||||
* - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
|
||||
* We set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* PMPADDR 3-15 are hard-coded and are appicable to both, bootloader and application. So we configure and lock
|
||||
* these during BOOTLOADER build itself. During application build, reconfiguration of these PMPADDR entries
|
||||
* are silently ignored by the CPU
|
||||
*/
|
||||
|
||||
if (esp_cpu_dbgr_is_attached()) {
|
||||
// Anti-FI check that cpu is really in ocd mode
|
||||
ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
|
||||
|
||||
// 1. IRAM
|
||||
PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, NONE);
|
||||
PMP_ENTRY_SET(1, SOC_DIRAM_IRAM_HIGH, PMP_TOR | RWX);
|
||||
|
||||
// 2. DRAM
|
||||
PMP_ENTRY_SET(2, SOC_DIRAM_DRAM_LOW, NONE);
|
||||
PMP_ENTRY_CFG_SET(3, PMP_TOR | RW);
|
||||
} else {
|
||||
// 1. IRAM
|
||||
PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, CONDITIONAL_NONE);
|
||||
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT
|
||||
PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RX);
|
||||
#else
|
||||
PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RWX);
|
||||
#endif
|
||||
|
||||
// 2. DRAM
|
||||
PMP_ENTRY_SET(2, DRAM_START, CONDITIONAL_NONE);
|
||||
PMP_ENTRY_CFG_SET(3, PMP_TOR | CONDITIONAL_RW);
|
||||
}
|
||||
|
||||
// 3. Debug region
|
||||
PMP_ENTRY_CFG_SET(4, PMP_NAPOT | RWX);
|
||||
|
||||
// 4. DROM (flash dcache)
|
||||
PMP_ENTRY_CFG_SET(5, PMP_NAPOT | R);
|
||||
|
||||
// 5. DROM_MASK
|
||||
PMP_ENTRY_CFG_SET(6, NONE);
|
||||
PMP_ENTRY_CFG_SET(7, PMP_TOR | R);
|
||||
|
||||
// 6. IROM_MASK
|
||||
PMP_ENTRY_CFG_SET(8, NONE);
|
||||
PMP_ENTRY_CFG_SET(9, PMP_TOR | RX);
|
||||
|
||||
// 7. IROM (flash icache)
|
||||
PMP_ENTRY_CFG_SET(10, PMP_NAPOT | RX);
|
||||
|
||||
// 8. Peripheral addresses
|
||||
PMP_ENTRY_CFG_SET(11, PMP_NAPOT | RW);
|
||||
|
||||
// 9. SRAM (used as ICache)
|
||||
PMP_ENTRY_CFG_SET(12, PMP_NAPOT | X);
|
||||
|
||||
// 10. no access to any address below(0x0-0xFFFF_FFFF)
|
||||
PMP_ENTRY_CFG_SET(13, PMP_NA4 | NONE);// last 4 bytes(0xFFFFFFFC)
|
||||
PMP_ENTRY_CFG_SET(14, NONE);
|
||||
PMP_ENTRY_CFG_SET(15, PMP_TOR | NONE);
|
||||
}
|
108
components/esp_hw_support/port/esp32c3/cpu_region_protect.c
Normal file
108
components/esp_hw_support/port/esp32c3/cpu_region_protect.c
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "soc/soc.h"
|
||||
#include "esp_cpu.h"
|
||||
#include "esp_fault.h"
|
||||
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Notes on implementation:
|
||||
*
|
||||
* 1) Note: ESP32-C3 CPU doesn't support overlapping PMP regions
|
||||
*
|
||||
* 2) Therefore, we use TOR (top of range) entries to map the whole address
|
||||
* space, bottom to top.
|
||||
*
|
||||
* 3) There are not enough entries to describe all the memory regions 100% accurately.
|
||||
*
|
||||
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
|
||||
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
|
||||
* (executing unmapped addresses should always fault with invalid instruction, read-only means
|
||||
* stores will correctly fault even if reads may return some invalid value.)
|
||||
*
|
||||
* 5) Entries are grouped in order with some static asserts to try and verify everything is
|
||||
* correct.
|
||||
*/
|
||||
const unsigned NONE = PMP_L | PMP_TOR;
|
||||
const unsigned R = PMP_L | PMP_TOR | PMP_R;
|
||||
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
|
||||
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
|
||||
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
|
||||
|
||||
// 1. Gap at bottom of address space
|
||||
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
|
||||
|
||||
// 2. Debug region
|
||||
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
|
||||
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
|
||||
|
||||
// 3. Gap between debug region & DROM (flash cache)
|
||||
PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
|
||||
_Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 4. DROM (flash cache)
|
||||
// 5. Gap between DROM & DRAM
|
||||
// (Note: To save PMP entries these two are merged into one read-only region)
|
||||
PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
|
||||
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
|
||||
_Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 6. DRAM
|
||||
PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
|
||||
_Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
|
||||
|
||||
// 7. Gap between DRAM and Mask DROM
|
||||
// 8. Mask DROM
|
||||
// (Note: to save PMP entries these two are merged into one read-only region)
|
||||
PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
|
||||
_Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
|
||||
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
|
||||
|
||||
// 9. Gap between mask DROM and mask IROM
|
||||
// 10. Mask IROM
|
||||
// (Note: to save PMP entries these two are merged into one RX region)
|
||||
PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
|
||||
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
|
||||
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
|
||||
|
||||
// 11. Gap between mask IROM & IRAM
|
||||
PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 12. IRAM
|
||||
PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
|
||||
|
||||
// 13. Gap between IRAM and IROM
|
||||
// 14. IROM (flash cache)
|
||||
// (Note: to save PMP entries these two are merged into one RX region)
|
||||
PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
|
||||
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
|
||||
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
|
||||
|
||||
// 15. Gap between IROM & RTC slow memory
|
||||
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 16. RTC fast memory
|
||||
PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
|
||||
|
||||
// 17. Gap between RTC fast memory & peripheral addresses
|
||||
PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
|
||||
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 18. Peripheral addresses
|
||||
PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
|
||||
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
|
||||
|
||||
// 19. End of address space
|
||||
PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
|
||||
PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
|
||||
}
|
198
components/esp_hw_support/port/esp32c6/cpu_region_protect.c
Normal file
198
components/esp_hw_support/port/esp32c6/cpu_region_protect.c
Normal file
@ -0,0 +1,198 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "soc/soc.h"
|
||||
#include "esp_cpu.h"
|
||||
#include "esp_fault.h"
|
||||
|
||||
#ifdef BOOTLOADER_BUILD
|
||||
// Without L bit set
|
||||
#define CONDITIONAL_NONE 0x0
|
||||
#define CONDITIONAL_RX PMP_R | PMP_X
|
||||
#define CONDITIONAL_RW PMP_R | PMP_W
|
||||
#define CONDITIONAL_RWX PMP_R | PMP_W | PMP_X
|
||||
#else
|
||||
// With L bit set
|
||||
#define CONDITIONAL_NONE NONE
|
||||
#define CONDITIONAL_RX RX
|
||||
#define CONDITIONAL_RW RW
|
||||
#define CONDITIONAL_RWX RWX
|
||||
#endif
|
||||
|
||||
static void esp_cpu_configure_invalid_regions(void)
|
||||
{
|
||||
const unsigned PMA_NONE = PMA_L | PMA_EN;
|
||||
__attribute__((unused)) const unsigned PMA_RW = PMA_L | PMA_EN | PMA_R | PMA_W;
|
||||
__attribute__((unused)) const unsigned PMA_RX = PMA_L | PMA_EN | PMA_R | PMA_X;
|
||||
__attribute__((unused)) const unsigned PMA_RWX = PMA_L | PMA_EN | PMA_R | PMA_W | PMA_X;
|
||||
|
||||
// 1. Gap at bottom of address space
|
||||
PMA_ENTRY_SET_TOR(0, SOC_DEBUG_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 2. Gap between debug region & IROM
|
||||
PMA_ENTRY_SET_TOR(1, SOC_DEBUG_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(2, SOC_IROM_MASK_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 3. Gap between ROM & RAM
|
||||
PMA_ENTRY_SET_TOR(3, SOC_DROM_MASK_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(4, SOC_IRAM_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 4. Gap between DRAM and I_Cache
|
||||
PMA_ENTRY_SET_TOR(5, SOC_IRAM_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(6, SOC_IROM_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 5. Gap between D_Cache & LP_RAM
|
||||
PMA_ENTRY_SET_TOR(7, SOC_DROM_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(8, SOC_RTC_IRAM_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 6. Gap between LP memory & peripheral addresses
|
||||
PMA_ENTRY_SET_TOR(9, SOC_RTC_IRAM_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(10, SOC_PERIPHERAL_LOW, PMA_TOR | PMA_NONE);
|
||||
|
||||
// 7. End of address space
|
||||
PMA_ENTRY_SET_TOR(11, SOC_PERIPHERAL_HIGH, PMA_NONE);
|
||||
PMA_ENTRY_SET_TOR(12, UINT32_MAX, PMA_TOR | PMA_NONE);
|
||||
}
|
||||
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Notes on implementation:
|
||||
*
|
||||
* 1) Note: ESP32-C6 CPU doesn't support overlapping PMP regions
|
||||
*
|
||||
* 2) ESP32-C6 supports 16 PMA regions so we use this feature to block all the invalid address ranges
|
||||
*
|
||||
* 3) We use combination of NAPOT (Naturally Aligned Power Of Two) and TOR (top of range)
|
||||
* entries to map all the valid address space, bottom to top. This leaves us with some extra PMP entries
|
||||
* which can be used to provide more granular access
|
||||
*
|
||||
* 4) Entries are grouped in order with some static asserts to try and verify everything is
|
||||
* correct.
|
||||
*/
|
||||
|
||||
/* There are 4 configuration scenarios for SRAM
|
||||
*
|
||||
* 1. Bootloader build:
|
||||
* - We cannot set the lock bit as we need to reconfigure it again for the application.
|
||||
* We configure PMP to cover entire valid IRAM and DRAM range.
|
||||
*
|
||||
* 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
|
||||
* - We split the SRAM into IRAM and DRAM such that IRAM region cannot be written to
|
||||
* and DRAM region cannot be executed. We use _iram_end and _data_start markers to set the boundaries.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
|
||||
* - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
|
||||
* so for that we set PMP to cover entire valid IRAM and DRAM region.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*
|
||||
* 4. CPU is in OCD debug mode
|
||||
* - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
|
||||
* We set PMP to cover entire valid IRAM and DRAM region.
|
||||
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
|
||||
*/
|
||||
const unsigned NONE = PMP_L;
|
||||
const unsigned R = PMP_L | PMP_R;
|
||||
const unsigned RW = PMP_L | PMP_R | PMP_W;
|
||||
const unsigned RX = PMP_L | PMP_R | PMP_X;
|
||||
const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
|
||||
|
||||
//
|
||||
// Configure all the invalid address regions using PMA
|
||||
//
|
||||
esp_cpu_configure_invalid_regions();
|
||||
|
||||
//
|
||||
// Configure all the valid address regions using PMP
|
||||
//
|
||||
|
||||
// 1. Debug region
|
||||
const uint32_t pmpaddr0 = PMPADDR_NAPOT(SOC_DEBUG_LOW, SOC_DEBUG_HIGH);
|
||||
PMP_ENTRY_SET(0, pmpaddr0, PMP_NAPOT | RWX);
|
||||
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
|
||||
|
||||
// 2.1 I-ROM
|
||||
PMP_ENTRY_SET(1, SOC_IROM_MASK_LOW, NONE);
|
||||
PMP_ENTRY_SET(2, SOC_IROM_MASK_HIGH, PMP_TOR | RX);
|
||||
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid I-ROM region");
|
||||
|
||||
// 2.2 D-ROM
|
||||
PMP_ENTRY_SET(3, SOC_DROM_MASK_LOW, NONE);
|
||||
PMP_ENTRY_SET(4, SOC_DROM_MASK_HIGH, PMP_TOR | R);
|
||||
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid D-ROM region");
|
||||
|
||||
if (esp_cpu_dbgr_is_attached()) {
|
||||
// Anti-FI check that cpu is really in ocd mode
|
||||
ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
|
||||
|
||||
// 5. IRAM and DRAM
|
||||
const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | RWX);
|
||||
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
|
||||
} else {
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
|
||||
extern int _iram_end;
|
||||
// 5. IRAM and DRAM
|
||||
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
|
||||
* Bootloader might have given extra permissions and those won't be cleared
|
||||
*/
|
||||
PMP_ENTRY_CFG_RESET(5);
|
||||
PMP_ENTRY_CFG_RESET(6);
|
||||
PMP_ENTRY_CFG_RESET(7);
|
||||
PMP_ENTRY_SET(5, SOC_IRAM_LOW, NONE);
|
||||
PMP_ENTRY_SET(6, (int)&_iram_end, PMP_TOR | RX);
|
||||
PMP_ENTRY_SET(7, SOC_DRAM_HIGH, PMP_TOR | RW);
|
||||
#else
|
||||
// 5. IRAM and DRAM
|
||||
const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | CONDITIONAL_RWX);
|
||||
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
|
||||
#endif
|
||||
}
|
||||
|
||||
// 4. I_Cache (flash)
|
||||
const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
|
||||
PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | RX);
|
||||
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I_Cache region");
|
||||
|
||||
// 5. D_Cache (flash)
|
||||
const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_DROM_LOW, SOC_DROM_HIGH);
|
||||
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | R);
|
||||
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid D_Cache region");
|
||||
|
||||
// 6. LP memory
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
|
||||
extern int _rtc_text_end;
|
||||
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
|
||||
* Bootloader might have given extra permissions and those won't be cleared
|
||||
*/
|
||||
PMP_ENTRY_CFG_RESET(10);
|
||||
PMP_ENTRY_CFG_RESET(11);
|
||||
PMP_ENTRY_CFG_RESET(12);
|
||||
PMP_ENTRY_CFG_RESET(13);
|
||||
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
|
||||
#if CONFIG_ULP_COPROC_RESERVE_MEM
|
||||
// First part of LP mem is reserved for coprocessor
|
||||
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW + CONFIG_ULP_COPROC_RESERVE_MEM, PMP_TOR | RW);
|
||||
PMP_ENTRY_SET(12, (int)&_rtc_text_end, PMP_TOR | RX);
|
||||
PMP_ENTRY_SET(13, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
|
||||
#endif //CONFIG_ULP_COPROC_RESERVE_MEM
|
||||
PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | RX);
|
||||
PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
|
||||
#else
|
||||
const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX);
|
||||
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
|
||||
#endif
|
||||
|
||||
|
||||
// 7. Peripheral addresses
|
||||
const uint32_t pmpaddr14 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
|
||||
PMP_ENTRY_SET(14, pmpaddr14, PMP_NAPOT | RW);
|
||||
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
|
||||
}
|
92
components/esp_hw_support/port/esp32h2/cpu_region_protect.c
Normal file
92
components/esp_hw_support/port/esp32h2/cpu_region_protect.c
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "soc/soc.h"
|
||||
#include "esp_cpu.h"
|
||||
#include "esp_fault.h"
|
||||
|
||||
// ESP32H2-TODO: IDF-6452
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Notes on implementation:
|
||||
*
|
||||
* 1) Note: ESP32-H2 CPU doesn't support overlapping PMP regions
|
||||
*
|
||||
* 2) Therefore, we use TOR (top of range) entries to map the whole address
|
||||
* space, bottom to top.
|
||||
*
|
||||
* 3) There are not enough entries to describe all the memory regions 100% accurately.
|
||||
*
|
||||
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
|
||||
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
|
||||
* (executing unmapped addresses should always fault with invalid instruction, read-only means
|
||||
* stores will correctly fault even if reads may return some invalid value.)
|
||||
*
|
||||
* 5) Entries are grouped in order with some static asserts to try and verify everything is
|
||||
* correct.
|
||||
*/
|
||||
const unsigned NONE = PMP_L | PMP_TOR;
|
||||
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
|
||||
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
|
||||
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
|
||||
|
||||
// 1. Gap at bottom of address space
|
||||
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
|
||||
|
||||
// 2. Debug region
|
||||
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
|
||||
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
|
||||
|
||||
// 3. Gap between debug region & IROM
|
||||
PMP_ENTRY_SET(2, SOC_IROM_MASK_LOW, NONE);
|
||||
_Static_assert(SOC_DEBUG_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 4. ROM
|
||||
PMP_ENTRY_SET(3, SOC_DROM_MASK_HIGH, RX);
|
||||
_Static_assert(SOC_IROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid ROM region");
|
||||
|
||||
// 5. Gap between ROM & RAM
|
||||
PMP_ENTRY_SET(4, SOC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 6. RAM
|
||||
PMP_ENTRY_SET(5, SOC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
|
||||
|
||||
// 7. Gap between DRAM and I_Cache
|
||||
PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
|
||||
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 8. I_Cache (flash)
|
||||
PMP_ENTRY_SET(7, SOC_IROM_HIGH, RWX);
|
||||
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I_Cache region");
|
||||
|
||||
// 9. D_Cache (flash)
|
||||
PMP_ENTRY_SET(8, SOC_DROM_HIGH, RW);
|
||||
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid D_Cache region");
|
||||
|
||||
// 10. Gap between D_Cache & LP_RAM
|
||||
PMP_ENTRY_SET(9, SOC_RTC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_DROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 16. LP memory
|
||||
PMP_ENTRY_SET(10, SOC_RTC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
|
||||
|
||||
// 17. Gap between LP memory & peripheral addresses
|
||||
PMP_ENTRY_SET(11, SOC_PERIPHERAL_LOW, NONE);
|
||||
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 18. Peripheral addresses
|
||||
PMP_ENTRY_SET(12, SOC_PERIPHERAL_HIGH, RW);
|
||||
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
|
||||
|
||||
// 19. End of address space
|
||||
PMP_ENTRY_SET(13, UINT32_MAX, NONE); // all but last 4 bytes
|
||||
PMP_ENTRY_SET(14, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
|
||||
}
|
107
components/esp_hw_support/port/esp32h4/cpu_region_protect.c
Normal file
107
components/esp_hw_support/port/esp32h4/cpu_region_protect.c
Normal file
@ -0,0 +1,107 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "soc/soc.h"
|
||||
#include "riscv/csr.h"
|
||||
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Notes on implementation:
|
||||
*
|
||||
* 1) Note: ESP32-H4 CPU doesn't support overlapping PMP regions
|
||||
*
|
||||
* 2) Therefore, we use TOR (top of range) entries to map the whole address
|
||||
* space, bottom to top.
|
||||
*
|
||||
* 3) There are not enough entries to describe all the memory regions 100% accurately.
|
||||
*
|
||||
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
|
||||
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
|
||||
* (executing unmapped addresses should always fault with invalid instruction, read-only means
|
||||
* stores will correctly fault even if reads may return some invalid value.)
|
||||
*
|
||||
* 5) Entries are grouped in order with some static asserts to try and verify everything is
|
||||
* correct.
|
||||
*/
|
||||
const unsigned NONE = PMP_L | PMP_TOR;
|
||||
const unsigned R = PMP_L | PMP_TOR | PMP_R;
|
||||
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
|
||||
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
|
||||
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
|
||||
|
||||
// 1. Gap at bottom of address space
|
||||
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
|
||||
|
||||
// 2. Debug region
|
||||
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
|
||||
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
|
||||
|
||||
// 3. Gap between debug region & DROM (flash cache)
|
||||
PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
|
||||
_Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 4. DROM (flash cache)
|
||||
// 5. Gap between DROM & DRAM
|
||||
// (Note: To save PMP entries these two are merged into one read-only region)
|
||||
PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
|
||||
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
|
||||
_Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 6. DRAM
|
||||
PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
|
||||
_Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
|
||||
|
||||
// 7. Gap between DRAM and Mask DROM
|
||||
// 8. Mask DROM
|
||||
// (Note: to save PMP entries these two are merged into one read-only region)
|
||||
PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
|
||||
_Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
|
||||
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
|
||||
|
||||
// 9. Gap between mask DROM and mask IROM
|
||||
// 10. Mask IROM
|
||||
// (Note: to save PMP entries these two are merged into one RX region)
|
||||
PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
|
||||
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
|
||||
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
|
||||
|
||||
// 11. Gap between mask IROM & IRAM
|
||||
PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 12. IRAM
|
||||
PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
|
||||
|
||||
// 13. Gap between IRAM and IROM
|
||||
// 14. IROM (flash cache)
|
||||
// (Note: to save PMP entries these two are merged into one RX region)
|
||||
PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
|
||||
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
|
||||
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
|
||||
|
||||
// 15. Gap between IROM & RTC slow memory
|
||||
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
|
||||
_Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 16. RTC fast memory
|
||||
PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
|
||||
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
|
||||
|
||||
// 17. Gap between RTC fast memory & peripheral addresses
|
||||
PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
|
||||
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
|
||||
|
||||
// 18. Peripheral addresses
|
||||
PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
|
||||
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
|
||||
|
||||
// 19. End of address space
|
||||
PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
|
||||
PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
|
||||
}
|
22
components/esp_hw_support/port/esp32s2/cpu_region_protect.c
Normal file
22
components/esp_hw_support/port/esp32s2/cpu_region_protect.c
Normal file
@ -0,0 +1,22 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include "hal/mpu_hal.h"
|
||||
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Note: currently this is configured the same on all Xtensa targets
|
||||
*
|
||||
* Both chips have the address space divided into 8 regions, 512MB each.
|
||||
*/
|
||||
const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
|
||||
for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
|
||||
mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
|
||||
}
|
||||
|
||||
mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
|
||||
}
|
22
components/esp_hw_support/port/esp32s3/cpu_region_protect.c
Normal file
22
components/esp_hw_support/port/esp32s3/cpu_region_protect.c
Normal file
@ -0,0 +1,22 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include "hal/mpu_hal.h"
|
||||
|
||||
void esp_cpu_configure_region_protection(void)
|
||||
{
|
||||
/* Note: currently this is configured the same on all Xtensa targets
|
||||
*
|
||||
* Both chips have the address space divided into 8 regions, 512MB each.
|
||||
*/
|
||||
const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
|
||||
for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
|
||||
mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
|
||||
}
|
||||
|
||||
mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
|
||||
}
|
@ -128,6 +128,11 @@ extern "C" {
|
||||
RV_SET_CSR((CSR_PMPCFG0) + (ENTRY)/4, ((CFG)&0xFF) << (ENTRY%4)*8); \
|
||||
} while(0)
|
||||
|
||||
/*Reset all permissions of a particular PMPCFG entry*/
|
||||
#define PMP_ENTRY_CFG_RESET(ENTRY) do {\
|
||||
RV_CLEAR_CSR((CSR_PMPCFG0) + (ENTRY)/4, (0xFF) << (ENTRY%4)*8); \
|
||||
} while(0)
|
||||
|
||||
/********************************************************
|
||||
Trigger Module register fields (Debug specification)
|
||||
********************************************************/
|
||||
|
@ -179,6 +179,9 @@
|
||||
#define SOC_DIRAM_DRAM_LOW 0x40800000
|
||||
#define SOC_DIRAM_DRAM_HIGH 0x40880000
|
||||
|
||||
#define MAP_DRAM_TO_IRAM(addr) (addr)
|
||||
#define MAP_IRAM_TO_DRAM(addr) (addr)
|
||||
|
||||
// Region of memory accessible via DMA. See esp_ptr_dma_capable().
|
||||
#define SOC_DMA_LOW 0x40800000
|
||||
#define SOC_DMA_HIGH 0x40880000
|
||||
|
@ -161,7 +161,7 @@ tools/test_apps/system/panic:
|
||||
enable:
|
||||
- if: INCLUDE_DEFAULT == 1 or IDF_TARGET == "esp32h4"
|
||||
disable_test:
|
||||
- if: IDF_TARGET not in ["esp32", "esp32s2", "esp32c3", "esp32s3", "esp32c2"]
|
||||
- if: IDF_TARGET not in ["esp32", "esp32s2", "esp32c3", "esp32s3", "esp32c2", "esp32c6"]
|
||||
temporary: true
|
||||
reason: test app not ported to this target yet
|
||||
|
||||
|
@ -480,11 +480,12 @@ def test_panic_delay(dut: PanicTestDut) -> None:
|
||||
#########################
|
||||
|
||||
# Memprot-related tests are supported only on targets with PMS/PMA peripheral;
|
||||
# currently ESP32-S2, ESP32-C3 and ESP32-C2 are supported
|
||||
# currently ESP32-S2, ESP32-C3, ESP32-C2, and ESP32-C6 are supported
|
||||
CONFIGS_MEMPROT_IDRAM = [
|
||||
pytest.param('memprot_esp32s2', marks=[pytest.mark.esp32s2]),
|
||||
pytest.param('memprot_esp32c3', marks=[pytest.mark.esp32c3]),
|
||||
pytest.param('memprot_esp32c2', marks=[pytest.mark.esp32c2])
|
||||
pytest.param('memprot_esp32c2', marks=[pytest.mark.esp32c2]),
|
||||
pytest.param('memprot_esp32c6', marks=[pytest.mark.esp32c6])
|
||||
]
|
||||
|
||||
CONFIGS_MEMPROT_DCACHE = [
|
||||
@ -494,6 +495,7 @@ CONFIGS_MEMPROT_DCACHE = [
|
||||
CONFIGS_MEMPROT_RTC_FAST_MEM = [
|
||||
pytest.param('memprot_esp32s2', marks=[pytest.mark.esp32s2]),
|
||||
pytest.param('memprot_esp32c3', marks=[pytest.mark.esp32c3]),
|
||||
pytest.param('memprot_esp32c6', marks=[pytest.mark.esp32c6])
|
||||
]
|
||||
|
||||
CONFIGS_MEMPROT_RTC_SLOW_MEM = [
|
||||
@ -532,7 +534,7 @@ def test_iram_reg1_write_violation(dut: PanicTestDut, test_func_name: str) -> No
|
||||
dut.expect_backtrace()
|
||||
elif dut.target == 'esp32c3':
|
||||
dut.expect_exact(r'Test error: Test function has returned')
|
||||
elif dut.target == 'esp32c2':
|
||||
elif dut.target in ['esp32c2', 'esp32c6']:
|
||||
dut.expect_gme('Store access fault')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
@ -555,7 +557,7 @@ def test_iram_reg2_write_violation(dut: PanicTestDut, test_func_name: str) -> No
|
||||
dut.expect(r' operation type: (\S+)')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
elif dut.target == 'esp32c2':
|
||||
elif dut.target in ['esp32c2', 'esp32c6']:
|
||||
dut.expect_gme('Store access fault')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
@ -578,7 +580,7 @@ def test_iram_reg3_write_violation(dut: PanicTestDut, test_func_name: str) -> No
|
||||
dut.expect(r' operation type: (\S+)')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
elif dut.target == 'esp32c2':
|
||||
elif dut.target in ['esp32c2', 'esp32c6']:
|
||||
dut.expect_gme('Store access fault')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
@ -603,7 +605,7 @@ def test_iram_reg4_write_violation(dut: PanicTestDut, test_func_name: str) -> No
|
||||
dut.expect(r' operation type: (\S+)')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
elif dut.target == 'esp32c2':
|
||||
elif dut.target in ['esp32c2', 'esp32c6']:
|
||||
dut.expect_gme('Store access fault')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
@ -621,7 +623,7 @@ def test_dram_reg1_execute_violation(dut: PanicTestDut, test_func_name: str) ->
|
||||
dut.expect(r'Unknown operation at address [0-9xa-f]+ not permitted \((\S+)\)')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_corrupted_backtrace()
|
||||
elif dut.target in ['esp32c3', 'esp32c2']:
|
||||
elif dut.target in ['esp32c3', 'esp32c2', 'esp32c6']:
|
||||
dut.expect_gme('Instruction access fault')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
@ -636,7 +638,7 @@ def test_dram_reg2_execute_violation(dut: PanicTestDut, test_func_name: str) ->
|
||||
dut.expect_gme('InstructionFetchError')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_corrupted_backtrace()
|
||||
elif dut.target in ['esp32c3', 'esp32c2']:
|
||||
elif dut.target in ['esp32c3', 'esp32c2', 'esp32c6']:
|
||||
dut.expect_gme('Instruction access fault')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
@ -651,6 +653,7 @@ def test_rtc_fast_reg1_execute_violation(dut: PanicTestDut, test_func_name: str)
|
||||
|
||||
@pytest.mark.parametrize('config', CONFIGS_MEMPROT_RTC_FAST_MEM, indirect=True)
|
||||
@pytest.mark.generic
|
||||
@pytest.mark.skipif('config.getvalue("target") == "esp32c6"', reason='Not a violation condition because it does not have PMS peripheral')
|
||||
def test_rtc_fast_reg2_execute_violation(dut: PanicTestDut, test_func_name: str) -> None:
|
||||
dut.run_test_func(test_func_name)
|
||||
dut.expect_gme('Memory protection fault')
|
||||
@ -673,18 +676,23 @@ def test_rtc_fast_reg2_execute_violation(dut: PanicTestDut, test_func_name: str)
|
||||
@pytest.mark.xfail('config.getvalue("target") == "esp32s2"', reason='Multiple panic reasons for the same test may surface', run=False)
|
||||
def test_rtc_fast_reg3_execute_violation(dut: PanicTestDut, test_func_name: str) -> None:
|
||||
dut.run_test_func(test_func_name)
|
||||
dut.expect_gme('Memory protection fault')
|
||||
|
||||
if dut.target == 'esp32s2':
|
||||
dut.expect_gme('Memory protection fault')
|
||||
dut.expect(r'Unknown operation at address [0-9xa-f]+ not permitted \((\S+)\)')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_backtrace()
|
||||
elif dut.target == 'esp32c3':
|
||||
dut.expect_gme('Memory protection fault')
|
||||
dut.expect(r' memory type: (\S+)')
|
||||
dut.expect(r' faulting address: [0-9xa-f]+')
|
||||
dut.expect(r' operation type: (\S+)')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
elif dut.target == 'esp32c6':
|
||||
dut.expect_gme('Instruction access fault')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_stack_dump()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('config', CONFIGS_MEMPROT_RTC_SLOW_MEM, indirect=True)
|
||||
|
@ -0,0 +1,8 @@
|
||||
# Restricting to ESP32C6
|
||||
CONFIG_IDF_TARGET="esp32c6"
|
||||
|
||||
# Enabling memory protection
|
||||
CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT=y
|
||||
|
||||
# Enable memprot test
|
||||
CONFIG_TEST_MEMPROT=y
|
@ -16,3 +16,6 @@ CONFIG_FREERTOS_USE_TRACE_FACILITY=y
|
||||
|
||||
# Reduce IRAM size
|
||||
CONFIG_FREERTOS_PLACE_FUNCTIONS_INTO_FLASH=y
|
||||
|
||||
# Increase main task stack size
|
||||
CONFIG_ESP_MAIN_TASK_STACK_SIZE=4096
|
||||
|
Loading…
x
Reference in New Issue
Block a user