Merge branch 'fix/pmp_idcache_prot_c6_h2_v5.2' into 'release/v5.2'

fix(esp_hw_support): Fix the flash I/DROM region PMP protection (v5.2)

See merge request espressif/esp-idf!30022
This commit is contained in:
Mahavir Jain 2024-04-04 12:20:07 +08:00
commit 8a151a51cf
4 changed files with 34 additions and 91 deletions

View File

@ -29,29 +29,6 @@
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
/**
* @brief Generate the PMP address field value for PMPCFG.A == NAPOT
*
* NOTE: Here, (end-start) must be a power of 2 size and start must
* be aligned to this size. This API returns UINT32_MAX on failing
* these conditions, which when plugged into the PMP entry registers
* does nothing. This skips the corresponding region's protection.
*
* @param start Region starting address
* @param end Region ending address
*
* @return uint32_t PMP address field value
*/
static inline uint32_t pmpaddr_napot(uint32_t start, uint32_t end)
{
uint32_t size = end - start;
if ((size & (size - 1)) || (start % size)) {
return UINT32_MAX;
}
return start | ((size - 1) >> 1);
}
static void esp_cpu_configure_invalid_regions(void)
{
const unsigned PMA_NONE = PMA_L | PMA_EN;
@ -180,22 +157,18 @@ void esp_cpu_configure_region_protection(void)
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
extern int _instruction_reserved_end;
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_start = ALIGN_DOWN_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_start));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
// 4. I_Cache (flash)
// 4. I_Cache / D_Cache (flash)
PMP_ENTRY_CFG_RESET(8);
const uint32_t pmpaddr8 = pmpaddr_napot(SOC_IROM_LOW, irom_resv_end);
PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | RX);
// 5. D_Cache (flash)
PMP_ENTRY_CFG_RESET(9);
const uint32_t pmpaddr9 = pmpaddr_napot(drom_resv_start, drom_resv_end);
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | R);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(8, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(9, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(10, drom_resv_end, PMP_TOR | R);
#else
// 4. I_Cache / D_Cache (flash)
const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
@ -209,29 +182,29 @@ void esp_cpu_configure_region_protection(void)
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
* Bootloader might have given extra permissions and those won't be cleared
*/
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_CFG_RESET(12);
PMP_ENTRY_CFG_RESET(13);
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
PMP_ENTRY_CFG_RESET(14);
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
#if CONFIG_ULP_COPROC_RESERVE_MEM
// First part of LP mem is reserved for coprocessor
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW + CONFIG_ULP_COPROC_RESERVE_MEM, PMP_TOR | RW);
PMP_ENTRY_SET(12, SOC_RTC_IRAM_LOW + CONFIG_ULP_COPROC_RESERVE_MEM, PMP_TOR | RW);
#else // CONFIG_ULP_COPROC_RESERVE_MEM
// Repeat same previous entry, to ensure next entry has correct base address (TOR)
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
PMP_ENTRY_SET(12, SOC_RTC_IRAM_LOW, NONE);
#endif // !CONFIG_ULP_COPROC_RESERVE_MEM
PMP_ENTRY_SET(12, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(13, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
PMP_ENTRY_SET(13, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(14, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
#else
const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX);
const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
#endif
// 7. Peripheral addresses
const uint32_t pmpaddr14 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(14, pmpaddr14, PMP_NAPOT | RW);
const uint32_t pmpaddr15 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(15, pmpaddr15, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
}

View File

@ -29,29 +29,6 @@
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
/**
* @brief Generate the PMP address field value for PMPCFG.A == NAPOT
*
* NOTE: Here, (end-start) must be a power of 2 size and start must
* be aligned to this size. This API returns UINT32_MAX on failing
* these conditions, which when plugged into the PMP entry registers
* does nothing. This skips the corresponding region's protection.
*
* @param start Region starting address
* @param end Region ending address
*
* @return uint32_t PMP address field value
*/
static inline uint32_t pmpaddr_napot(uint32_t start, uint32_t end)
{
uint32_t size = end - start;
if ((size & (size - 1)) || (start % size)) {
return UINT32_MAX;
}
return start | ((size - 1) >> 1);
}
static void esp_cpu_configure_invalid_regions(void)
{
const unsigned PMA_NONE = PMA_L | PMA_EN;
@ -180,22 +157,18 @@ void esp_cpu_configure_region_protection(void)
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
extern int _instruction_reserved_end;
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_start = ALIGN_DOWN_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_start));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
// 4. I_Cache (flash)
// 4. I_Cache / D_Cache (flash)
PMP_ENTRY_CFG_RESET(8);
const uint32_t pmpaddr8 = pmpaddr_napot(SOC_IROM_LOW, irom_resv_end);
PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | RX);
// 5. D_Cache (flash)
PMP_ENTRY_CFG_RESET(9);
const uint32_t pmpaddr9 = pmpaddr_napot(drom_resv_start, drom_resv_end);
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | R);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(8, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(9, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(10, drom_resv_end, PMP_TOR | R);
#else
// 4. I_Cache / D_Cache (flash)
const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
@ -209,20 +182,20 @@ void esp_cpu_configure_region_protection(void)
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
* Bootloader might have given extra permissions and those won't be cleared
*/
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_CFG_RESET(12);
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
PMP_ENTRY_CFG_RESET(13);
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
PMP_ENTRY_SET(12, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(13, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
#else
const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX);
const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
#endif
// 7. Peripheral addresses
const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | RW);
const uint32_t pmpaddr14 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(14, pmpaddr14, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
}

View File

@ -82,7 +82,7 @@ static inline bool test_and_print_register_bits(const uint32_t status,
/**
* Function called when a cache error occurs. It prints details such as the
* explanation of why the panic occured.
* explanation of why the panic occurred.
*/
static inline void print_cache_err_details(const void *frame)
{
@ -172,7 +172,7 @@ static inline void print_assist_debug_details(const void *frame)
panic_print_str("\r\n");
if (!esp_hw_stack_guard_is_fired()) {
panic_print_str("ASSIST_DEBUG is not triggered BUT interrupt occured!\r\n\r\n");
panic_print_str("ASSIST_DEBUG is not triggered BUT interrupt occurred!\r\n\r\n");
}
panic_print_str("Detected in task \"");
@ -193,7 +193,7 @@ static inline void print_assist_debug_details(const void *frame)
/**
* Function called when a memory protection error occurs (PMS). It prints details such as the
* explanation of why the panic occured.
* explanation of why the panic occurred.
*/
#if CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
@ -305,7 +305,7 @@ void panic_soc_fill_info(void *f, panic_info_t *info)
info->reason = "Unknown reason";
info->addr = (void *) frame->mepc;
/* The mcause has been set by the CPU when the panic occured.
/* The mcause has been set by the CPU when the panic occurred.
* All SoC-level panic will call this function, thus, this register
* lets us know which error was triggered. */
if (frame->mcause == ETS_CACHEERR_INUM) {

View File

@ -823,10 +823,7 @@ def test_rtc_slow_reg2_execute_violation(dut: PanicTestDut, test_func_name: str)
@pytest.mark.generic
def test_irom_reg_write_violation(dut: PanicTestDut, test_func_name: str) -> None:
dut.run_test_func(test_func_name)
if dut.target == 'esp32c6':
dut.expect_gme('Store access fault')
elif dut.target == 'esp32h2':
dut.expect_gme('Cache error')
dut.expect_gme('Store access fault')
dut.expect_reg_dump(0)
dut.expect_cpu_reset()
@ -870,7 +867,7 @@ def test_gdbstub_coredump(dut: PanicTestDut) -> None:
def test_hw_stack_guard_cpu0(dut: PanicTestDut, config: str, test_func_name: str) -> None:
dut.run_test_func(test_func_name)
dut.expect_exact('Guru Meditation Error: Core 0 panic\'ed (Stack protection fault).')
dut.expect_none('ASSIST_DEBUG is not triggered BUT interrupt occured!')
dut.expect_none('ASSIST_DEBUG is not triggered BUT interrupt occurred!')
dut.expect(r'Detected in task(.*)at 0x')
dut.expect_exact('Stack pointer: 0x')
dut.expect(r'Stack bounds: 0x(.*) - 0x')