mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
fix(esp_hw_support): Fix the flash I/DROM region PMP protection
This commit is contained in:
parent
fc326374e0
commit
48503dd39f
@ -29,29 +29,6 @@
|
||||
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
|
||||
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
|
||||
|
||||
/**
|
||||
* @brief Generate the PMP address field value for PMPCFG.A == NAPOT
|
||||
*
|
||||
* NOTE: Here, (end-start) must be a power of 2 size and start must
|
||||
* be aligned to this size. This API returns UINT32_MAX on failing
|
||||
* these conditions, which when plugged into the PMP entry registers
|
||||
* does nothing. This skips the corresponding region's protection.
|
||||
*
|
||||
* @param start Region starting address
|
||||
* @param end Region ending address
|
||||
*
|
||||
* @return uint32_t PMP address field value
|
||||
*/
|
||||
static inline uint32_t pmpaddr_napot(uint32_t start, uint32_t end)
|
||||
{
|
||||
uint32_t size = end - start;
|
||||
if ((size & (size - 1)) || (start % size)) {
|
||||
return UINT32_MAX;
|
||||
}
|
||||
|
||||
return start | ((size - 1) >> 1);
|
||||
}
|
||||
|
||||
static void esp_cpu_configure_invalid_regions(void)
|
||||
{
|
||||
const unsigned PMA_NONE = PMA_L | PMA_EN;
|
||||
@ -180,22 +157,18 @@ void esp_cpu_configure_region_protection(void)
|
||||
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
|
||||
extern int _instruction_reserved_end;
|
||||
extern int _rodata_reserved_start;
|
||||
extern int _rodata_reserved_end;
|
||||
|
||||
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
|
||||
const uint32_t drom_resv_start = ALIGN_DOWN_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_start));
|
||||
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
|
||||
|
||||
// 4. I_Cache (flash)
|
||||
// 4. I_Cache / D_Cache (flash)
|
||||
PMP_ENTRY_CFG_RESET(8);
|
||||
const uint32_t pmpaddr8 = pmpaddr_napot(SOC_IROM_LOW, irom_resv_end);
|
||||
PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | RX);
|
||||
|
||||
// 5. D_Cache (flash)
|
||||
PMP_ENTRY_CFG_RESET(9);
|
||||
const uint32_t pmpaddr9 = pmpaddr_napot(drom_resv_start, drom_resv_end);
|
||||
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | R);
|
||||
PMP_ENTRY_CFG_RESET(10);
|
||||
PMP_ENTRY_SET(8, SOC_IROM_LOW, NONE);
|
||||
PMP_ENTRY_SET(9, irom_resv_end, PMP_TOR | RX);
|
||||
PMP_ENTRY_SET(10, drom_resv_end, PMP_TOR | R);
|
||||
#else
|
||||
// 4. I_Cache / D_Cache (flash)
|
||||
const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
|
||||
@ -209,29 +182,29 @@ void esp_cpu_configure_region_protection(void)
|
||||
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
|
||||
* Bootloader might have given extra permissions and those won't be cleared
|
||||
*/
|
||||
PMP_ENTRY_CFG_RESET(10);
|
||||
PMP_ENTRY_CFG_RESET(11);
|
||||
PMP_ENTRY_CFG_RESET(12);
|
||||
PMP_ENTRY_CFG_RESET(13);
|
||||
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
|
||||
PMP_ENTRY_CFG_RESET(14);
|
||||
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
|
||||
#if CONFIG_ULP_COPROC_RESERVE_MEM
|
||||
// First part of LP mem is reserved for coprocessor
|
||||
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW + CONFIG_ULP_COPROC_RESERVE_MEM, PMP_TOR | RW);
|
||||
PMP_ENTRY_SET(12, SOC_RTC_IRAM_LOW + CONFIG_ULP_COPROC_RESERVE_MEM, PMP_TOR | RW);
|
||||
#else // CONFIG_ULP_COPROC_RESERVE_MEM
|
||||
// Repeat same previous entry, to ensure next entry has correct base address (TOR)
|
||||
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
|
||||
PMP_ENTRY_SET(12, SOC_RTC_IRAM_LOW, NONE);
|
||||
#endif // !CONFIG_ULP_COPROC_RESERVE_MEM
|
||||
PMP_ENTRY_SET(12, (int)&_rtc_text_end, PMP_TOR | RX);
|
||||
PMP_ENTRY_SET(13, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
|
||||
PMP_ENTRY_SET(13, (int)&_rtc_text_end, PMP_TOR | RX);
|
||||
PMP_ENTRY_SET(14, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
|
||||
#else
|
||||
const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX);
|
||||
const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
|
||||
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
|
||||
#endif
|
||||
|
||||
|
||||
// 7. Peripheral addresses
|
||||
const uint32_t pmpaddr14 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
|
||||
PMP_ENTRY_SET(14, pmpaddr14, PMP_NAPOT | RW);
|
||||
const uint32_t pmpaddr15 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
|
||||
PMP_ENTRY_SET(15, pmpaddr15, PMP_NAPOT | RW);
|
||||
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
|
||||
}
|
||||
|
@ -29,29 +29,6 @@
|
||||
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
|
||||
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
|
||||
|
||||
/**
|
||||
* @brief Generate the PMP address field value for PMPCFG.A == NAPOT
|
||||
*
|
||||
* NOTE: Here, (end-start) must be a power of 2 size and start must
|
||||
* be aligned to this size. This API returns UINT32_MAX on failing
|
||||
* these conditions, which when plugged into the PMP entry registers
|
||||
* does nothing. This skips the corresponding region's protection.
|
||||
*
|
||||
* @param start Region starting address
|
||||
* @param end Region ending address
|
||||
*
|
||||
* @return uint32_t PMP address field value
|
||||
*/
|
||||
static inline uint32_t pmpaddr_napot(uint32_t start, uint32_t end)
|
||||
{
|
||||
uint32_t size = end - start;
|
||||
if ((size & (size - 1)) || (start % size)) {
|
||||
return UINT32_MAX;
|
||||
}
|
||||
|
||||
return start | ((size - 1) >> 1);
|
||||
}
|
||||
|
||||
static void esp_cpu_configure_invalid_regions(void)
|
||||
{
|
||||
const unsigned PMA_NONE = PMA_L | PMA_EN;
|
||||
@ -180,22 +157,18 @@ void esp_cpu_configure_region_protection(void)
|
||||
|
||||
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
|
||||
extern int _instruction_reserved_end;
|
||||
extern int _rodata_reserved_start;
|
||||
extern int _rodata_reserved_end;
|
||||
|
||||
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
|
||||
const uint32_t drom_resv_start = ALIGN_DOWN_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_start));
|
||||
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
|
||||
|
||||
// 4. I_Cache (flash)
|
||||
// 4. I_Cache / D_Cache (flash)
|
||||
PMP_ENTRY_CFG_RESET(8);
|
||||
const uint32_t pmpaddr8 = pmpaddr_napot(SOC_IROM_LOW, irom_resv_end);
|
||||
PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | RX);
|
||||
|
||||
// 5. D_Cache (flash)
|
||||
PMP_ENTRY_CFG_RESET(9);
|
||||
const uint32_t pmpaddr9 = pmpaddr_napot(drom_resv_start, drom_resv_end);
|
||||
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | R);
|
||||
PMP_ENTRY_CFG_RESET(10);
|
||||
PMP_ENTRY_SET(8, SOC_IROM_LOW, NONE);
|
||||
PMP_ENTRY_SET(9, irom_resv_end, PMP_TOR | RX);
|
||||
PMP_ENTRY_SET(10, drom_resv_end, PMP_TOR | R);
|
||||
#else
|
||||
// 4. I_Cache / D_Cache (flash)
|
||||
const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
|
||||
@ -209,20 +182,20 @@ void esp_cpu_configure_region_protection(void)
|
||||
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
|
||||
* Bootloader might have given extra permissions and those won't be cleared
|
||||
*/
|
||||
PMP_ENTRY_CFG_RESET(10);
|
||||
PMP_ENTRY_CFG_RESET(11);
|
||||
PMP_ENTRY_CFG_RESET(12);
|
||||
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
|
||||
PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | RX);
|
||||
PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
|
||||
PMP_ENTRY_CFG_RESET(13);
|
||||
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
|
||||
PMP_ENTRY_SET(12, (int)&_rtc_text_end, PMP_TOR | RX);
|
||||
PMP_ENTRY_SET(13, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
|
||||
#else
|
||||
const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX);
|
||||
const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
|
||||
PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
|
||||
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
|
||||
#endif
|
||||
|
||||
// 7. Peripheral addresses
|
||||
const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
|
||||
PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | RW);
|
||||
const uint32_t pmpaddr14 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
|
||||
PMP_ENTRY_SET(14, pmpaddr14, PMP_NAPOT | RW);
|
||||
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
|
||||
}
|
||||
|
@ -35,7 +35,7 @@
|
||||
|
||||
/**
|
||||
* Function called when a cache error occurs. It prints details such as the
|
||||
* explanation of why the panic occured.
|
||||
* explanation of why the panic occurred.
|
||||
*/
|
||||
static inline void print_cache_err_details(const void *frame)
|
||||
{
|
||||
@ -55,7 +55,7 @@ static inline void print_assist_debug_details(const void *frame)
|
||||
{
|
||||
uint32_t core_id = esp_hw_stack_guard_get_fired_cpu();
|
||||
if (core_id == ESP_HW_STACK_GUARD_NOT_FIRED) {
|
||||
panic_print_str("ASSIST_DEBUG is not triggered BUT interrupt occured!\r\n\r\n");
|
||||
panic_print_str("ASSIST_DEBUG is not triggered BUT interrupt occurred!\r\n\r\n");
|
||||
core_id = 0;
|
||||
}
|
||||
uint32_t sp_min, sp_max;
|
||||
@ -82,7 +82,7 @@ static inline void print_assist_debug_details(const void *frame)
|
||||
|
||||
/**
|
||||
* Function called when a memory protection error occurs (PMS). It prints details such as the
|
||||
* explanation of why the panic occured.
|
||||
* explanation of why the panic occurred.
|
||||
*/
|
||||
#if CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
|
||||
|
||||
@ -214,7 +214,7 @@ void panic_soc_fill_info(void *f, panic_info_t *info)
|
||||
info->reason = "Unknown reason";
|
||||
info->addr = (void *) frame->mepc;
|
||||
|
||||
/* The mcause has been set by the CPU when the panic occured.
|
||||
/* The mcause has been set by the CPU when the panic occurred.
|
||||
* All SoC-level panic will call this function, thus, this register
|
||||
* lets us know which error was triggered. */
|
||||
if (frame->mcause == ETS_CACHEERR_INUM) {
|
||||
|
@ -807,10 +807,7 @@ def test_rtc_slow_reg2_execute_violation(dut: PanicTestDut, test_func_name: str)
|
||||
@pytest.mark.generic
|
||||
def test_irom_reg_write_violation(dut: PanicTestDut, test_func_name: str) -> None:
|
||||
dut.run_test_func(test_func_name)
|
||||
if dut.target == 'esp32c6':
|
||||
dut.expect_gme('Store access fault')
|
||||
elif dut.target == 'esp32h2':
|
||||
dut.expect_gme('Cache error')
|
||||
dut.expect_gme('Store access fault')
|
||||
dut.expect_reg_dump(0)
|
||||
dut.expect_cpu_reset()
|
||||
|
||||
@ -851,7 +848,7 @@ def test_gdbstub_coredump(dut: PanicTestDut) -> None:
|
||||
|
||||
def test_hw_stack_guard_cpu(dut: PanicTestDut, cpu: int) -> None:
|
||||
dut.expect_exact(f'Guru Meditation Error: Core {cpu} panic\'ed (Stack protection fault).')
|
||||
dut.expect_none('ASSIST_DEBUG is not triggered BUT interrupt occured!')
|
||||
dut.expect_none('ASSIST_DEBUG is not triggered BUT interrupt occurred!')
|
||||
dut.expect_exact(f'Detected in task "HWSG{cpu}"')
|
||||
addr = dut.expect('at 0x([0-9a-fA-F]{8})')
|
||||
assert addr.group(1) != b'00000000'
|
||||
|
Loading…
Reference in New Issue
Block a user