esp_hw_support: Add esp_cpu.h abstraction and API

This commit updates the esp_cpu.h API. The new API presents a new
abstraction of the CPU where CPU presents the following interfaces:

- CPU Control (to stall/unstall/reset the CPU)
- CPU Registers (to read registers commonly used in SW such as SP, PC)
- CPU Interrupts (to inquire/allocate/control the CPUs 32 interrupts)
- Memory Port (to configure the CPU's memory bus for memory protection)
- Debugging (to configure/control the CPU's debugging port)

Note: Also added FORCE_INLINE_ATTR to the DoxyFile in order to pass doc
        builds for esp_cpu.h
This commit is contained in:
Darian Leung 2022-06-07 14:46:23 +08:00
parent 556ec30457
commit 61eb7baa6b
28 changed files with 1744 additions and 579 deletions

View File

@ -4,81 +4,270 @@
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_attr.h"
#include "esp_cpu.h"
#include "sdkconfig.h"
#include <stdint.h>
#include <assert.h>
#include "soc/soc.h"
#include "soc/rtc_periph.h"
#include "sdkconfig.h"
#include "hal/cpu_hal.h"
#include "hal/cpu_types.h"
#include "hal/mpu_hal.h"
#include "esp_cpu.h"
#include "hal/soc_hal.h"
#include "soc/soc_caps.h"
#include "soc/rtc_cntl_reg.h"
#include "hal/soc_hal.h"
#include "hal/mpu_hal.h"
#include "esp_bit_defs.h"
#include "esp_attr.h"
#include "esp_err.h"
#include "esp_cpu.h"
#include "esp_memory_utils.h"
#if __XTENSA__
#include "xtensa/config/core-isa.h"
#else
#include "soc/system_reg.h" // For SYSTEM_CPU_PER_CONF_REG
#include "soc/dport_access.h" // For Dport access
#include "riscv/semihosting.h"
#include "riscv/csr.h" // For PMP_ENTRY. [refactor-todo] create PMP abstraction in rv_utils.h
#endif
#if SOC_CPU_HAS_FLEXIBLE_INTC
#include "riscv/instruction_decode.h"
#endif
#include "sdkconfig.h"
void IRAM_ATTR esp_cpu_stall(int cpu_id)
/* --------------------------------------------------- CPU Control -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
void esp_cpu_stall(int core_id)
{
#if SOC_CPU_CORES_NUM > 1
soc_hal_stall_core(cpu_id);
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
#if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
/*
We need to write the value "0x86" to stall a particular core. The write location is split into two separate
bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has its own pair of
"c0" and "c1" bit fields.
Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
"rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
file's "rodata" section (see IDF-5214).
*/
int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
int rtc_cntl_c0_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_S : RTC_CNTL_SW_STALL_APPCPU_C0_S;
int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
int rtc_cntl_c1_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_S : RTC_CNTL_SW_STALL_APPCPU_C1_S;
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s);
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s);
#endif
}
void IRAM_ATTR esp_cpu_unstall(int cpu_id)
void esp_cpu_unstall(int core_id)
{
#if SOC_CPU_CORES_NUM > 1
soc_hal_unstall_core(cpu_id);
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
#if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
/*
We need to write clear the value "0x86" to unstall a particular core. The location of this value is split into
two separate bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has
its own pair of "c0" and "c1" bit fields.
Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
"rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
file's "rodata" section (see IDF-5214).
*/
int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
#endif
}
void IRAM_ATTR esp_cpu_reset(int cpu_id)
void esp_cpu_reset(int core_id)
{
soc_hal_reset_core(cpu_id);
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
#if SOC_CPU_CORES_NUM > 1
/*
Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
"rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
file's "rodata" section (see IDF-5214).
*/
int rtc_cntl_rst_m = (core_id == 0) ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M;
#else // SOC_CPU_CORES_NUM > 1
int rtc_cntl_rst_m = RTC_CNTL_SW_PROCPU_RST_M;
#endif // SOC_CPU_CORES_NUM > 1
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_rst_m);
}
esp_err_t IRAM_ATTR esp_cpu_set_watchpoint(int no, void *adr, int size, int flags)
void esp_cpu_wait_for_intr(void)
{
watchpoint_trigger_t trigger;
#if __XTENSA__
xt_utils_wait_for_intr();
#else
if (esp_cpu_dbgr_is_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
/* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
so do not enter that mode when debugger is connected */
return;
}
rv_utils_wait_for_intr();
#endif // __XTENSA__
}
switch (flags)
{
case ESP_CPU_WATCHPOINT_LOAD:
trigger = WATCHPOINT_TRIGGER_ON_RO;
break;
case ESP_CPU_WATCHPOINT_STORE:
trigger = WATCHPOINT_TRIGGER_ON_WO;
break;
case ESP_CPU_WATCHPOINT_ACCESS:
trigger = WATCHPOINT_TRIGGER_ON_RW;
break;
default:
return ESP_ERR_INVALID_ARG;
/* -------------------------------------------------- CPU Registers ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// ---------------- Interrupt Descriptors ------------------
#if SOC_CPU_HAS_FLEXIBLE_INTC
static bool is_intr_num_resv(int intr_num)
{
// Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt"
// [TODO: IDF-2465]
const uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8);
if (reserved & BIT(intr_num)) {
return true;
}
cpu_hal_set_watchpoint(no, adr, size, trigger);
return ESP_OK;
extern int _vector_table;
extern int _interrupt_handler;
const intptr_t pc = (intptr_t)(&_vector_table + intr_num);
/* JAL instructions are relative to the PC there are executed from. */
const intptr_t destination = pc + riscv_decode_offset_from_jal_instruction(pc);
return destination != (intptr_t)&_interrupt_handler;
}
void IRAM_ATTR esp_cpu_clear_watchpoint(int no)
void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
{
cpu_hal_clear_watchpoint(no);
}
bool IRAM_ATTR esp_cpu_in_ocd_debug_mode(void)
{
#if CONFIG_ESP_DEBUG_OCDAWARE
return cpu_ll_is_debugger_attached();
intr_desc_ret->priority = 1; //Todo: We should make this -1
intr_desc_ret->type = ESP_CPU_INTR_TYPE_NA;
#if __riscv
intr_desc_ret->flags = is_intr_num_resv(intr_num) ? ESP_CPU_INTR_DESC_FLAG_RESVD : 0;
#else
return false; // Always return false if "OCD aware" is disabled
intr_desc_ret->flags = 0;
#endif
}
#if __XTENSA__
#else // SOC_CPU_HAS_FLEXIBLE_INTC
typedef struct {
int priority;
esp_cpu_intr_type_t type;
uint32_t flags[SOC_CPU_CORES_NUM];
} intr_desc_t;
#if SOC_CPU_CORES_NUM > 1
// Note: We currently only have dual core targets, so the table initializer is hard coded
const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //2
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //3
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //4
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
#if CONFIG_FREERTOS_CORETIMER_0
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
#else
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
#endif
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //9
{ 1, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //10
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //12
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //13
{ 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
#if CONFIG_FREERTOS_CORETIMER_1
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
#else
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
#endif
{ 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //17
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //18
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //19
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //20
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //21
{ 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //22
{ 3, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //23
{ 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //24
{ 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
{ 5, ESP_CPU_INTR_TYPE_LEVEL, { 0, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //26
{ 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
{ 4, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //28
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
{ 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
{ 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
};
#else // SOC_CPU_CORES_NUM > 1
const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //2
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //3
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //4
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
#if CONFIG_FREERTOS_CORETIMER_0
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
#else
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
#endif
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //9
{ 1, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //10
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //12
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //13
{ 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
#if CONFIG_FREERTOS_CORETIMER_1
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
#else
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
#endif
{ 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //17
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //18
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //19
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //20
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //21
{ 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //22
{ 3, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //23
{ 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //24
{ 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
{ 5, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //26
{ 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
{ 4, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //28
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
{ 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
{ 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
};
#endif // SOC_CPU_CORES_NUM > 1
void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
{
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
#if SOC_CPU_CORES_NUM == 1
core_id = 0; //If this is a single core target, hard code CPU ID to 0
#endif
intr_desc_ret->priority = intr_desc_table[intr_num].priority;
intr_desc_ret->type = intr_desc_table[intr_num].type;
intr_desc_ret->flags = intr_desc_table[intr_num].flags[core_id];
}
#endif // SOC_CPU_HAS_FLEXIBLE_INTC
/* -------------------------------------------------- Memory Ports -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2 || CONFIG_IDF_TARGET_ESP32S3
void esp_cpu_configure_region_protection(void)
{
/* Note: currently this is configured the same on all Xtensa targets
@ -92,5 +281,364 @@ void esp_cpu_configure_region_protection(void)
mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
}
#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) Note: ESP32-C3/H2 CPU doesn't support overlapping PMP regions
*
* 2) Therefore, we use TOR (top of range) entries to map the whole address
* space, bottom to top.
*
* 3) There are not enough entries to describe all the memory regions 100% accurately.
*
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
* (executing unmapped addresses should always fault with invalid instruction, read-only means
* stores will correctly fault even if reads may return some invalid value.)
*
* 5) Entries are grouped in order with some static asserts to try and verify everything is
* correct.
*/
const unsigned NONE = PMP_L | PMP_TOR;
const unsigned R = PMP_L | PMP_TOR | PMP_R;
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
// 1. Gap at bottom of address space
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
// 2. Debug region
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
// 3. Gap between debug region & DROM (flash cache)
PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
_Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
// 4. DROM (flash cache)
// 5. Gap between DROM & DRAM
// (Note: To save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
_Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
// 6. DRAM
PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
_Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
// 7. Gap between DRAM and Mask DROM
// 8. Mask DROM
// (Note: to save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
_Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
// 9. Gap between mask DROM and mask IROM
// 10. Mask IROM
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
// 11. Gap between mask IROM & IRAM
PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
// 12. IRAM
PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
// 13. Gap between IRAM and IROM
// 14. IROM (flash cache)
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
// 15. Gap between IROM & RTC slow memory
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
// 16. RTC fast memory
PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
// 17. Gap between RTC fast memory & peripheral addresses
PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
// 18. Peripheral addresses
PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
// 19. End of address space
PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
}
#elif CONFIG_IDF_TARGET_ESP32C2
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
extern int _iram_end;
extern int _data_start;
#define IRAM_END (int)&_iram_end
#define DRAM_START (int)&_data_start
#else
#define IRAM_END SOC_DIRAM_IRAM_HIGH
#define DRAM_START SOC_DIRAM_DRAM_LOW
#endif
#ifdef BOOTLOADER_BUILD
// Without L bit set
#define CONDITIONAL_NONE 0x0
#define CONDITIONAL_RX PMP_R | PMP_X
#define CONDITIONAL_RW PMP_R | PMP_W
#else
// With L bit set
#define CONDITIONAL_NONE NONE
#define CONDITIONAL_RX RX
#define CONDITIONAL_RW RW
#endif
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) ESP32-C2 CPU support overlapping PMP regions, configuration is based on static priority
* feature(lowest numbered entry has highest priority).
*
* 2) Therefore, we use TOR (top of range) and NAOPT entries to map the effective area.
* Finally, define any address without access permission.
*
* 3) 3-15 PMPADDR entries be hardcoded to fixed value, 0-2 PMPADDR be programmed to split ID SRAM
* as IRAM/DRAM. All PMPCFG entryies be available.
*
* 4) Ideally, PMPADDR 0-2 entries should be configured twice, once during bootloader startup and another during app startup.
* However, the CPU currently always executes in machine mode and to enforce these permissions in machine mode, we need
* to set the Lock (L) bit but if set once, it cannot be reconfigured. So, we only configure 0-2 PMPADDR during app startup.
*/
const unsigned NONE = PMP_L ;
const unsigned R = PMP_L | PMP_R;
const unsigned X = PMP_L | PMP_X;
const unsigned RW = PMP_L | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
/* There are 3 configuration scenarios for PMPADDR 0-2
*
* 1. Bootloader build:
* - We cannot set the lock bit as we need to reconfigure it again for the application.
* We configure PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire valid DRAM range.
*
* 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
* - We split the SRAM into IRAM and DRAM such that IRAM region cannot be accessed via DBUS
* and DRAM region cannot be accessed via IBUS. We use _iram_end and _data_start markers to set the boundaries.
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
*
* 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
* - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
* so for that we set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
*
* PMPADDR 3-15 are hard-coded and are appicable to both, bootloader and application. So we configure and lock
* these during BOOTLOADER build itself. During application build, reconfiguration of these PMPADDR entries
* are silently ignored by the CPU
*/
// 1. IRAM
PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, CONDITIONAL_NONE);
PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RX);
// 2. DRAM
PMP_ENTRY_SET(2, DRAM_START, CONDITIONAL_NONE);
PMP_ENTRY_CFG_SET(3, PMP_TOR | CONDITIONAL_RW);
// 3. Debug region
PMP_ENTRY_CFG_SET(4, PMP_NAPOT | RWX);
// 4. DROM (flash dcache)
PMP_ENTRY_CFG_SET(5, PMP_NAPOT | R);
// 5. DROM_MASK
PMP_ENTRY_CFG_SET(6, NONE);
PMP_ENTRY_CFG_SET(7, PMP_TOR | R);
// 6. IROM_MASK
PMP_ENTRY_CFG_SET(8, NONE);
PMP_ENTRY_CFG_SET(9, PMP_TOR | RX);
// 7. IROM (flash icache)
PMP_ENTRY_CFG_SET(10, PMP_NAPOT | RX);
// 8. Peripheral addresses
PMP_ENTRY_CFG_SET(11, PMP_NAPOT | RW);
// 9. SRAM (used as ICache)
PMP_ENTRY_CFG_SET(12, PMP_NAPOT | X);
// 10. no access to any address below(0x0-0xFFFF_FFFF)
PMP_ENTRY_CFG_SET(13, PMP_NA4 | NONE);// last 4 bytes(0xFFFFFFFC)
PMP_ENTRY_CFG_SET(14, NONE);
PMP_ENTRY_CFG_SET(15, PMP_TOR | NONE);
}
#endif
/* ---------------------------------------------------- Debugging ------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// --------------- Breakpoints/Watchpoints -----------------
#if SOC_CPU_BREAKPOINTS_NUM > 0
esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr)
{
/*
Todo:
- Check that bp_num is in range
*/
#if __XTENSA__
xt_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
#else
if (esp_cpu_dbgr_is_attached()) {
/* If we want to set breakpoint which when hit transfers control to debugger
* we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
* That `action` value is supported only when `dmode` of `tdata1` is set.
* But `dmode` can be modified by debugger only (from Debug Mode).
*
* So when debugger is connected we use special syscall to ask it to set breakpoint for us.
*/
long args[] = {true, bp_num, (long)bp_addr};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0) {
return ESP_ERR_INVALID_RESPONSE;
}
}
rv_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
#endif // __XTENSA__
return ESP_OK;
}
esp_err_t esp_cpu_clear_breakpoint(int bp_num)
{
/*
Todo:
- Check if the bp_num is valid
*/
#if __XTENSA__
xt_utils_clear_breakpoint(bp_num);
#else
if (esp_cpu_dbgr_is_attached()) {
// See description in esp_cpu_set_breakpoint()
long args[] = {false, bp_num};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0) {
return ESP_ERR_INVALID_RESPONSE;
}
}
rv_utils_clear_breakpoint(bp_num);
#endif // __XTENSA__
return ESP_OK;
}
#endif // SOC_CPU_BREAKPOINTS_NUM > 0
#if SOC_CPU_WATCHPOINTS_NUM > 0
esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger)
{
/*
Todo:
- Check that wp_num is in range
- Check if the wp_num is already in use
*/
// Check if size is 2^n, where n is in [0...6]
if (size < 1 || size > 64 || (size & (size - 1)) != 0) {
return ESP_ERR_INVALID_ARG;
}
bool on_read = (trigger == ESP_CPU_WATCHPOINT_LOAD || trigger == ESP_CPU_WATCHPOINT_ACCESS);
bool on_write = (trigger == ESP_CPU_WATCHPOINT_STORE || trigger == ESP_CPU_WATCHPOINT_ACCESS);
#if __XTENSA__
xt_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
#else
if (esp_cpu_dbgr_is_attached()) {
// See description in esp_cpu_set_breakpoint()
long args[] = {true, wp_num, (long)wp_addr, (long)size,
(long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))
};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0) {
return ESP_ERR_INVALID_RESPONSE;
}
}
rv_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
#endif // __XTENSA__
return ESP_OK;
}
esp_err_t esp_cpu_clear_watchpoint(int wp_num)
{
/*
Todo:
- Check if the wp_num is valid
*/
#if __XTENSA__
xt_utils_clear_watchpoint(wp_num);
#else
if (esp_cpu_dbgr_is_attached()) {
// See description in esp_cpu_dbgr_is_attached()
long args[] = {false, wp_num};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0) {
return ESP_ERR_INVALID_RESPONSE;
}
}
rv_utils_clear_watchpoint(wp_num);
#endif // __XTENSA__
return ESP_OK;
}
#endif // SOC_CPU_WATCHPOINTS_NUM > 0
/* ------------------------------------------------------ Misc ---------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
#if __XTENSA__ && XCHAL_HAVE_S32C1I && SOC_SPIRAM_SUPPORTED
static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
#endif
bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
{
#if __XTENSA__
bool ret;
#if XCHAL_HAVE_S32C1I && SOC_SPIRAM_SUPPORTED
if (esp_ptr_external_ram((const void *)addr)) {
uint32_t intr_level;
// Atomicity is achieved by disabling interrupts then acquiring a an external RAM CAS lock
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
: "=r"(intr_level));
while (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
;
}
// Now we compare and set the target address
uint32_t old_value;
old_value = *addr;
if (old_value == compare_value) {
*addr = new_value;
}
// Release the external RAM CAS lock and reenable interrupts
external_ram_cas_lock = 0;
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intr_level));
ret = (old_value == compare_value);
} else
#endif //XCHAL_HAVE_S32C1I && SOC_SPIRAM_SUPPORTED
{
ret = xt_utils_compare_and_set(addr, compare_value, new_value);
}
return ret;
#else
// Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
return rv_utils_compare_and_set(addr, compare_value, new_value);
#endif
}

View File

@ -1,107 +1,557 @@
/*
* SPDX-FileCopyrightText: 2010-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _ESP_CPU_H
#define _ESP_CPU_H
#pragma once
#include <stdint.h>
#include "sdkconfig.h"
#include <stdbool.h>
#include <stddef.h>
#include "hal/cpu_hal.h"
#include <stdint.h>
#include <assert.h>
#include "soc/soc_caps.h"
#ifdef __XTENSA__
#include "xtensa/xtensa_api.h"
#include "xt_utils.h"
#elif __riscv
#include "riscv/rv_utils.h"
#endif
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
#define ESP_CPU_WATCHPOINT_LOAD 0x40000000
#define ESP_CPU_WATCHPOINT_STORE 0x80000000
#define ESP_CPU_WATCHPOINT_ACCESS 0xC0000000
typedef uint32_t esp_cpu_ccount_t;
/** @brief Read current stack pointer address
/**
* @brief CPU cycle count type
*
* This data type represents the CPU's clock cycle count
*/
static inline void *esp_cpu_get_sp(void)
typedef uint32_t esp_cpu_cycle_count_t;
/**
* @brief CPU interrupt type
*/
typedef enum {
ESP_CPU_INTR_TYPE_LEVEL,
ESP_CPU_INTR_TYPE_EDGE,
ESP_CPU_INTR_TYPE_NA,
} esp_cpu_intr_type_t;
/**
* @brief CPU interrupt descriptor
*
* Each particular CPU interrupt has an associated descriptor describing that
* particular interrupt's characteristics. Call esp_cpu_intr_get_desc() to get
* the descriptors of a particular interrupt.
*/
typedef struct {
int priority; /**< Priority of the interrupt if it has a fixed priority, (-1) if the priority is configurable. */
esp_cpu_intr_type_t type; /**< Whether the interrupt is an edge or level type interrupt, ESP_CPU_INTR_TYPE_NA if the type is configurable. */
uint32_t flags; /**< Flags indicating extra details. */
} esp_cpu_intr_desc_t;
/**
* @brief Interrupt descriptor flags of esp_cpu_intr_desc_t
*/
#define ESP_CPU_INTR_DESC_FLAG_SPECIAL 0x01 /**< The interrupt is a special interrupt (e.g., a CPU timer interrupt) */
#define ESP_CPU_INTR_DESC_FLAG_RESVD 0x02 /**< The interrupt is reserved for internal use */
/**
* @brief CPU interrupt handler type
*/
typedef void (*esp_cpu_intr_handler_t)(void *arg);
/**
* @brief CPU watchpoint trigger type
*/
typedef enum {
ESP_CPU_WATCHPOINT_LOAD,
ESP_CPU_WATCHPOINT_STORE,
ESP_CPU_WATCHPOINT_ACCESS,
} esp_cpu_watchpoint_trigger_t;
/* --------------------------------------------------- CPU Control -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/**
* @brief Stall a CPU core
*
* @param core_id The core's ID
*/
void esp_cpu_stall(int core_id);
/**
* @brief Resume a previously stalled CPU core
*
* @param core_id The core's ID
*/
void esp_cpu_unstall(int core_id);
/**
* @brief Reset a CPU core
*
* @param core_id The core's ID
*/
void esp_cpu_reset(int core_id);
/**
* @brief Wait for Interrupt
*
* This function causes the current CPU core to execute its Wait For Interrupt
* (WFI or equivalent) instruction. After executing this function, the CPU core
* will stop execution until an interrupt occurs.
*/
void esp_cpu_wait_for_intr(void);
/* -------------------------------------------------- CPU Registers ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/**
* @brief Get the current core's ID
*
* This function will return the ID of the current CPU (i.e., the CPU that calls
* this function).
*
* @return The current core's ID [0..SOC_CPU_CORES_NUM - 1]
*/
FORCE_INLINE_ATTR __attribute__((pure)) int esp_cpu_get_core_id(void)
{
return cpu_hal_get_sp();
//Note: Made "pure" to optimize for single core target
#ifdef __XTENSA__
return (int)xt_utils_get_core_id();
#else
return (int)rv_utils_get_core_id();
#endif
}
/**
* @brief Stall CPU using RTC controller
* @param cpu_id ID of the CPU to stall (0 = PRO, 1 = APP)
*/
void esp_cpu_stall(int cpu_id);
/**
* @brief Un-stall CPU using RTC controller
* @param cpu_id ID of the CPU to un-stall (0 = PRO, 1 = APP)
*/
void esp_cpu_unstall(int cpu_id);
/**
* @brief Reset CPU using RTC controller
* @param cpu_id ID of the CPU to reset (0 = PRO, 1 = APP)
*/
void esp_cpu_reset(int cpu_id);
/**
* @brief Returns true if a JTAG debugger is attached to CPU
* OCD (on chip debug) port.
* @brief Read the current stack pointer address
*
* @note If "Make exception and panic handlers JTAG/OCD aware"
* is disabled, this function always returns false.
* @return Stack pointer address
*/
bool esp_cpu_in_ocd_debug_mode(void);
static inline esp_cpu_ccount_t esp_cpu_get_ccount(void)
FORCE_INLINE_ATTR void *esp_cpu_get_sp(void)
{
return cpu_hal_get_cycle_count();
}
static inline void esp_cpu_set_ccount(esp_cpu_ccount_t val)
{
cpu_hal_set_cycle_count(val);
#ifdef __XTENSA__
return xt_utils_get_sp();
#else
return rv_utils_get_sp();
#endif
}
/**
* @brief Configure CPU to disable access to invalid memory regions
* @brief Get the current CPU core's cycle count
*
* Each CPU core maintains an internal counter (i.e., cycle count) that increments
* every CPU clock cycle.
*
* @return Current CPU's cycle count, 0 if not supported.
*/
FORCE_INLINE_ATTR esp_cpu_cycle_count_t esp_cpu_get_cycle_count(void)
{
#ifdef __XTENSA__
return (esp_cpu_cycle_count_t)xt_utils_get_cycle_count();
#else
return (esp_cpu_cycle_count_t)rv_utils_get_cycle_count();
#endif
}
/**
* @brief Set the current CPU core's cycle count
*
* Set the given value into the internal counter that increments every
* CPU clock cycle.
*
* @param cycle_count CPU cycle count
*/
FORCE_INLINE_ATTR void esp_cpu_set_cycle_count(esp_cpu_cycle_count_t cycle_count)
{
#ifdef __XTENSA__
xt_utils_set_cycle_count((uint32_t)cycle_count);
#else
rv_utils_set_cycle_count((uint32_t)cycle_count);
#endif
}
/**
* @brief Convert a program counter (PC) value to address
*
* If the architecture does not store the true virtual address in the CPU's PC
* or return addresses, this function will convert the PC value to a virtual
* address. Otherwise, the PC is just returned
*
* @param pc PC value
* @return Virtual address
*/
FORCE_INLINE_ATTR __attribute__((pure)) void *esp_cpu_pc_to_addr(uint32_t pc)
{
#ifdef __XTENSA__
// Xtensa stores window rotation in PC[31:30]
return (void *)((pc & 0x3fffffffU) | 0x40000000U);
#else
return (void *)pc;
#endif
}
/* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// ---------------- Interrupt Descriptors ------------------
/**
* @brief Get a CPU interrupt's descriptor
*
* Each CPU interrupt has a descriptor describing the interrupt's capabilities
* and restrictions. This function gets the descriptor of a particular interrupt
* on a particular CPU.
*
* @param[in] core_id The core's ID
* @param[in] intr_num Interrupt number
* @param[out] intr_desc_ret The interrupt's descriptor
*/
void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret);
// --------------- Interrupt Configuration -----------------
/**
* @brief Set the base address of the current CPU's Interrupt Vector Table (IVT)
*
* @param ivt_addr Interrupt Vector Table's base address
*/
FORCE_INLINE_ATTR void esp_cpu_intr_set_ivt_addr(const void *ivt_addr)
{
#ifdef __XTENSA__
xt_utils_set_vecbase((uint32_t)ivt_addr);
#else
rv_utils_set_mtvec((uint32_t)ivt_addr);
#endif
}
#if SOC_CPU_HAS_FLEXIBLE_INTC
/**
* @brief Set the interrupt type of a particular interrupt
*
* Set the interrupt type (Level or Edge) of a particular interrupt on the
* current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @param intr_type The interrupt's type
*/
FORCE_INLINE_ATTR void esp_cpu_intr_set_type(int intr_num, esp_cpu_intr_type_t intr_type)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
enum intr_type type = (intr_type == ESP_CPU_INTR_TYPE_LEVEL) ? INTR_TYPE_LEVEL : INTR_TYPE_EDGE;
esprv_intc_int_set_type(intr_num, type);
}
/**
* @brief Get the current configured type of a particular interrupt
*
* Get the currently configured type (i.e., level or edge) of a particular
* interrupt on the current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @return Interrupt type
*/
FORCE_INLINE_ATTR esp_cpu_intr_type_t esp_cpu_intr_get_type(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
enum intr_type type = esprv_intc_int_get_type(intr_num);
return (type == INTR_TYPE_LEVEL) ? ESP_CPU_INTR_TYPE_LEVEL : ESP_CPU_INTR_TYPE_EDGE;
}
/**
* @brief Set the priority of a particular interrupt
*
* Set the priority of a particular interrupt on the current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @param intr_priority The interrupt's priority
*/
FORCE_INLINE_ATTR void esp_cpu_intr_set_priority(int intr_num, int intr_priority)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
esprv_intc_int_set_priority(intr_num, intr_priority);
}
/**
* @brief Get the current configured priority of a particular interrupt
*
* Get the currently configured priority of a particular interrupt on the
* current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @return Interrupt's priority
*/
FORCE_INLINE_ATTR int esp_cpu_intr_get_priority(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
return esprv_intc_int_get_priority(intr_num);
}
#endif // SOC_CPU_HAS_FLEXIBLE_INTC
/**
* @brief Check if a particular interrupt already has a handler function
*
* Check if a particular interrupt on the current CPU already has a handler
* function assigned.
*
* @note This function simply checks if the IVT of the current CPU already has
* a handler assigned.
* @param intr_num Interrupt number (from 0 to 31)
* @return True if the interrupt has a handler function, false otherwise.
*/
FORCE_INLINE_ATTR bool esp_cpu_intr_has_handler(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
bool has_handler;
#ifdef __XTENSA__
has_handler = xt_int_has_handler(intr_num, esp_cpu_get_core_id());
#else
has_handler = intr_handler_get(intr_num);
#endif
return has_handler;
}
/**
* @brief Set the handler function of a particular interrupt
*
* Assign a handler function (i.e., ISR) to a particular interrupt on the
* current CPU.
*
* @note This function simply sets the handler function (in the IVT) and does
* not actually enable the interrupt.
* @param intr_num Interrupt number (from 0 to 31)
* @param handler Handler function
* @param handler_arg Argument passed to the handler function
*/
FORCE_INLINE_ATTR void esp_cpu_intr_set_handler(int intr_num, esp_cpu_intr_handler_t handler, void *handler_arg)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
#ifdef __XTENSA__
xt_set_interrupt_handler(intr_num, (xt_handler)handler, handler_arg);
#else
intr_handler_set(intr_num, (intr_handler_t)handler, handler_arg);
#endif
}
/**
* @brief Get a handler function's argument of
*
* Get the argument of a previously assigned handler function on the current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @return The the argument passed to the handler function
*/
FORCE_INLINE_ATTR void *esp_cpu_intr_get_handler_arg(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
void *handler_arg;
#ifdef __XTENSA__
handler_arg = xt_get_interrupt_handler_arg(intr_num);
#else
handler_arg = intr_handler_get_arg(intr_num);
#endif
return handler_arg;
}
// ------------------ Interrupt Control --------------------
/**
* @brief Enable particular interrupts on the current CPU
*
* @param intr_mask Bit mask of the interrupts to enable
*/
FORCE_INLINE_ATTR void esp_cpu_intr_enable(uint32_t intr_mask)
{
#ifdef __XTENSA__
xt_ints_on(intr_mask);
#else
rv_utils_intr_enable(intr_mask);
#endif
}
/**
* @brief Disable particular interrupts on the current CPU
*
* @param intr_mask Bit mask of the interrupts to disable
*/
FORCE_INLINE_ATTR void esp_cpu_intr_disable(uint32_t intr_mask)
{
#ifdef __XTENSA__
xt_ints_off(intr_mask);
#else
rv_utils_intr_disable(intr_mask);
#endif
}
/**
* @brief Get the enabled interrupts on the current CPU
*
* @return Bit mask of the enabled interrupts
*/
FORCE_INLINE_ATTR uint32_t esp_cpu_intr_get_enabled_mask(void)
{
#ifdef __XTENSA__
return xt_utils_intr_get_enabled_mask();
#else
return rv_utils_intr_get_enabled_mask();
#endif
}
/**
* @brief Acknowledge an edge interrupt
*
* @param intr_num Interrupt number (from 0 to 31)
*/
FORCE_INLINE_ATTR void esp_cpu_intr_edge_ack(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
#ifdef __XTENSA__
xthal_set_intclear(1 << intr_num);
#else
rv_utils_intr_edge_ack(intr_num);
#endif
}
/* -------------------------------------------------- Memory Ports -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/**
* @brief Configure the CPU to disable access to invalid memory regions
*/
void esp_cpu_configure_region_protection(void);
/**
* @brief Set a watchpoint to break/panic when a certain memory range is accessed.
/* ---------------------------------------------------- Debugging ------------------------------------------------------
*
* @param no Watchpoint number. On the ESP32, this can be 0 or 1.
* @param adr Base address to watch
* @param size Size of the region, starting at the base address, to watch. Must
* be one of 2^n, with n in [0..6].
* @param flags One of ESP_CPU_WATCHPOINT_* flags
*
* @return ESP_ERR_INVALID_ARG on invalid arg, ESP_OK otherwise
*
* @warning The ESP32 watchpoint hardware watches a region of bytes by effectively
* masking away the lower n bits for a region with size 2^n. If adr does
* not have zero for these lower n bits, you may not be watching the
* region you intended.
*/
esp_err_t esp_cpu_set_watchpoint(int no, void *adr, int size, int flags);
* ------------------------------------------------------------------------------------------------------------------ */
// --------------- Breakpoints/Watchpoints -----------------
#if SOC_CPU_BREAKPOINTS_NUM > 0
/**
* @brief Clear a watchpoint
*
* @param no Watchpoint to clear
* @brief Set and enable a hardware breakpoint on the current CPU
*
* @note This function is meant to be called by the panic handler to set a
* breakpoint for an attached debugger during a panic.
* @note Overwrites previously set breakpoint with same breakpoint number.
* @param bp_num Hardware breakpoint number [0..SOC_CPU_BREAKPOINTS_NUM - 1]
* @param bp_addr Address to set a breakpoint on
* @return ESP_OK if breakpoint is set. Failure otherwise
*/
void esp_cpu_clear_watchpoint(int no);
esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr);
/**
* @brief Clear a hardware breakpoint on the current CPU
*
* @note Clears a breakpoint regardless of whether it was previously set
* @param bp_num Hardware breakpoint number [0..SOC_CPU_BREAKPOINTS_NUM - 1]
* @return ESP_OK if breakpoint is cleared. Failure otherwise
*/
esp_err_t esp_cpu_clear_breakpoint(int bp_num);
#endif // SOC_CPU_BREAKPOINTS_NUM > 0
/**
* @brief Set and enable a hardware watchpoint on the current CPU
*
* Set and enable a hardware watchpoint on the current CPU, specifying the
* memory range and trigger operation. Watchpoints will break/panic the CPU when
* the CPU accesses (according to the trigger type) on a certain memory range.
*
* @note Overwrites previously set watchpoint with same watchpoint number.
* @param wp_num Hardware watchpoint number [0..SOC_CPU_WATCHPOINTS_NUM - 1]
* @param wp_addr Watchpoint's base address
* @param size Size of the region to watch. Must be one of 2^n, with n in [0..6].
* @param trigger Trigger type
* @return ESP_ERR_INVALID_ARG on invalid arg, ESP_OK otherwise
*/
esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger);
/**
* @brief Clear a hardware watchpoint on the current CPU
*
* @note Clears a watchpoint regardless of whether it was previously set
* @param wp_num Hardware watchpoint number [0..SOC_CPU_WATCHPOINTS_NUM - 1]
* @return ESP_OK if watchpoint was cleared. Failure otherwise.
*/
esp_err_t esp_cpu_clear_watchpoint(int wp_num);
// ---------------------- Debugger -------------------------
/**
* @brief Check if the current CPU has a debugger attached
*
* @return True if debugger is attached, false otherwise
*/
FORCE_INLINE_ATTR bool esp_cpu_dbgr_is_attached(void)
{
#ifdef __XTENSA__
return xt_utils_dbgr_is_attached();
#else
return rv_utils_dbgr_is_attached();
#endif
}
/**
* @brief Trigger a call to the current CPU's attached debugger
*/
FORCE_INLINE_ATTR void esp_cpu_dbgr_break(void)
{
#ifdef __XTENSA__
xt_utils_dbgr_break();
#else
rv_utils_dbgr_break();
#endif
}
/* ------------------------------------------------------ Misc ---------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/**
* @brief Atomic compare-and-set operation
*
* @param addr Address of atomic variable
* @param compare_value Value to compare the atomic variable to
* @param new_value New value to set the atomic variable to
* @return Whether the atomic variable was set or not
*/
bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value);
/* ---------------------------------------------------- Deprecate ------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/*
[refactor-todo] Make these deprecated inline
*/
typedef esp_cpu_cycle_count_t esp_cpu_ccount_t;
#define esp_cpu_get_ccount() esp_cpu_get_cycle_count()
#define esp_cpu_set_ccount(ccount) esp_cpu_set_cycle_count(ccount)
/**
* @brief Returns true if a JTAG debugger is attached to CPU OCD (on chip debug) port.
*
* [refactor-todo] See if this can be replaced with esp_cpu_dbgr_is_attached directly
*
* @note Always returns false if CONFIG_ESP_DEBUG_OCDAWARE is not enabled
*/
FORCE_INLINE_ATTR bool esp_cpu_in_ocd_debug_mode(void)
{
#if CONFIG_ESP_DEBUG_OCDAWARE
return esp_cpu_dbgr_is_attached();
#else // CONFIG_ESP_DEBUG_OCDAWARE
return false; // Always return false if "OCD aware" is disabled
#endif // CONFIG_ESP_DEBUG_OCDAWARE
}
#ifdef __cplusplus
}
#endif
#endif // _ESP_CPU_H

View File

@ -1,7 +1,16 @@
[mapping:esp_hw_support]
archive: libesp_hw_support.a
entries:
cpu (noflash_text)
cpu: esp_cpu_stall (noflash)
cpu: esp_cpu_unstall (noflash)
cpu: esp_cpu_reset (noflash)
cpu: esp_cpu_wait_for_intr (noflash)
if ESP_PANIC_HANDLER_IRAM = y:
cpu: esp_cpu_set_breakpoint (noflash)
cpu: esp_cpu_clear_breakpoint (noflash)
cpu: esp_cpu_set_watchpoint (noflash)
cpu: esp_cpu_clear_watchpoint (noflash)
cpu: esp_cpu_compare_and_set (noflash)
esp_memory_utils (noflash)
rtc_clk (noflash)
rtc_init:rtc_vddsdio_set_config (noflash)

View File

@ -1,5 +1,4 @@
set(srcs "cpu_esp32c2.c"
"rtc_clk_init.c"
set(srcs "rtc_clk_init.c"
"rtc_clk.c"
"rtc_init.c"
"rtc_pm.c"

View File

@ -1,112 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <assert.h>
#include "esp_cpu.h"
#include "sdkconfig.h"
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
extern int _iram_end;
extern int _data_start;
#define IRAM_END (int)&_iram_end
#define DRAM_START (int)&_data_start
#else
#define IRAM_END SOC_DIRAM_IRAM_HIGH
#define DRAM_START SOC_DIRAM_DRAM_LOW
#endif
#ifdef BOOTLOADER_BUILD
// Without L bit set
#define CONDITIONAL_NONE 0x0
#define CONDITIONAL_RX PMP_R | PMP_X
#define CONDITIONAL_RW PMP_R | PMP_W
#else
// With L bit set
#define CONDITIONAL_NONE NONE
#define CONDITIONAL_RX RX
#define CONDITIONAL_RW RW
#endif
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) ESP32-C2 CPU support overlapping PMP regions, configuration is based on static priority
* feature(lowest numbered entry has highest priority).
*
* 2) Therefore, we use TOR (top of range) and NAOPT entries to map the effective area.
* Finally, define any address without access permission.
*
* 3) 3-15 PMPADDR entries be hardcoded to fixed value, 0-2 PMPADDR be programmed to split ID SRAM
* as IRAM/DRAM. All PMPCFG entryies be available.
*
* 4) Ideally, PMPADDR 0-2 entries should be configured twice, once during bootloader startup and another during app startup.
* However, the CPU currently always executes in machine mode and to enforce these permissions in machine mode, we need
* to set the Lock (L) bit but if set once, it cannot be reconfigured. So, we only configure 0-2 PMPADDR during app startup.
*/
const unsigned NONE = PMP_L ;
const unsigned R = PMP_L | PMP_R;
const unsigned X = PMP_L | PMP_X;
const unsigned RW = PMP_L | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
/* There are 3 configuration scenarios for PMPADDR 0-2
*
* 1. Bootloader build:
* - We cannot set the lock bit as we need to reconfigure it again for the application.
* We configure PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire valid DRAM range.
*
* 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
* - We split the SRAM into IRAM and DRAM such that IRAM region cannot be accessed via DBUS
* and DRAM region cannot be accessed via IBUS. We use _iram_end and _data_start markers to set the boundaries.
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
*
* 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
* - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
* so for that we set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
*
* PMPADDR 3-15 are hard-coded and are appicable to both, bootloader and application. So we configure and lock
* these during BOOTLOADER build itself. During application build, reconfiguration of these PMPADDR entries
* are silently ignored by the CPU
*/
// 1. IRAM
PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, CONDITIONAL_NONE);
PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RX);
// 2. DRAM
PMP_ENTRY_SET(2, DRAM_START, CONDITIONAL_NONE);
PMP_ENTRY_CFG_SET(3, PMP_TOR | CONDITIONAL_RW);
// 3. Debug region
PMP_ENTRY_CFG_SET(4, PMP_NAPOT | RWX);
// 4. DROM (flash dcache)
PMP_ENTRY_CFG_SET(5, PMP_NAPOT | R);
// 5. DROM_MASK
PMP_ENTRY_CFG_SET(6, NONE);
PMP_ENTRY_CFG_SET(7, PMP_TOR | R);
// 6. IROM_MASK
PMP_ENTRY_CFG_SET(8, NONE);
PMP_ENTRY_CFG_SET(9, PMP_TOR | RX);
// 7. IROM (flash icache)
PMP_ENTRY_CFG_SET(10, PMP_NAPOT | RX);
// 8. Peripheral addresses
PMP_ENTRY_CFG_SET(11, PMP_NAPOT | RW);
// 9. SRAM (used as ICache)
PMP_ENTRY_CFG_SET(12, PMP_NAPOT | X);
// 10. no access to any address below(0x0-0xFFFF_FFFF)
PMP_ENTRY_CFG_SET(13, PMP_NA4 | NONE);// last 4 bytes(0xFFFFFFFC)
PMP_ENTRY_CFG_SET(14, NONE);
PMP_ENTRY_CFG_SET(15, PMP_TOR | NONE);
}

View File

@ -1,5 +1,4 @@
set(srcs "cpu_esp32c3.c"
"rtc_clk_init.c"
set(srcs "rtc_clk_init.c"
"rtc_clk.c"
"rtc_init.c"
"rtc_pm.c"

View File

@ -1,104 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <assert.h>
#include "esp_cpu.h"
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) Note: ESP32-C3 CPU doesn't support overlapping PMP regions
*
* 2) Therefore, we use TOR (top of range) entries to map the whole address
* space, bottom to top.
*
* 3) There are not enough entries to describe all the memory regions 100% accurately.
*
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
* (executing unmapped addresses should always fault with invalid instruction, read-only means
* stores will correctly fault even if reads may return some invalid value.)
*
* 5) Entries are grouped in order with some static asserts to try and verify everything is
* correct.
*/
const unsigned NONE = PMP_L | PMP_TOR;
const unsigned R = PMP_L | PMP_TOR | PMP_R;
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
// 1. Gap at bottom of address space
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
// 2. Debug region
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
// 3. Gap between debug region & DROM (flash cache)
PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
_Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
// 4. DROM (flash cache)
// 5. Gap between DROM & DRAM
// (Note: To save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
_Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
// 6. DRAM
PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
_Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
// 7. Gap between DRAM and Mask DROM
// 8. Mask DROM
// (Note: to save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
_Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
// 9. Gap between mask DROM and mask IROM
// 10. Mask IROM
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
// 11. Gap between mask IROM & IRAM
PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
// 12. IRAM
PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
// 13. Gap between IRAM and IROM
// 14. IROM (flash cache)
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
// 15. Gap between IROM & RTC slow memory
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
// 16. RTC fast memory
PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
// 17. Gap between RTC fast memory & peripheral addresses
PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
// 18. Peripheral addresses
PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
// 19. End of address space
PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
}

View File

@ -1,5 +1,4 @@
set(srcs "cpu_esp32h2.c"
"rtc_clk_init.c"
set(srcs "rtc_clk_init.c"
"rtc_clk.c"
"rtc_init.c"
"rtc_pm.c"

View File

@ -1,104 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <assert.h>
#include "esp_cpu.h"
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) Note: ESP32-C3 CPU doesn't support overlapping PMP regions
*
* 2) Therefore, we use TOR (top of range) entries to map the whole address
* space, bottom to top.
*
* 3) There are not enough entries to describe all the memory regions 100% accurately.
*
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
* (executing unmapped addresses should always fault with invalid instruction, read-only means
* stores will correctly fault even if reads may return some invalid value.)
*
* 5) Entries are grouped in order with some static asserts to try and verify everything is
* correct.
*/
const unsigned NONE = PMP_L | PMP_TOR;
const unsigned R = PMP_L | PMP_TOR | PMP_R;
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
// 1. Gap at bottom of address space
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
// 2. Debug region
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
// 3. Gap between debug region & DROM (flash cache)
PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
_Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
// 4. DROM (flash cache)
// 5. Gap between DROM & DRAM
// (Note: To save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
_Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
// 6. DRAM
PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
_Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
// 7. Gap between DRAM and Mask DROM
// 8. Mask DROM
// (Note: to save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
_Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
// 9. Gap between mask DROM and mask IROM
// 10. Mask IROM
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
// 11. Gap between mask IROM & IRAM
PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
// 12. IRAM
PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
// 13. Gap between IRAM and IROM
// 14. IROM (flash cache)
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
// 15. Gap between IROM & RTC slow memory
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
// 16. RTC fast memory
PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
// 17. Gap between RTC fast memory & peripheral addresses
PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
// 18. Peripheral addresses
PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
// 19. End of address space
PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
}

View File

@ -1,16 +1,8 @@
// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
@ -77,24 +69,46 @@ void esprv_intc_int_enable(uint32_t unmask);
void esprv_intc_int_disable(uint32_t mask);
/**
* @brief Set interrupt type, level or edge
*
* @param int intr_num, interrupt number
*
* @param enum intr_type type, interrupt type, the level interrupt
can be cleared automatically once the interrupt source cleared, the edge interrupt should be clear by software after handled
*
* return none
*/
* @brief Set interrupt type
*
* Set the type of a particular interrupt (level or edge).
* - Level interrupts are cleared automatically once their interrupt source has
* been cleared
* - Edge interrupts must be cleared by software when they are handled.
*
* @param intr_num Interrupt number
* @param type Interrupt type
*/
void esprv_intc_int_set_type(int intr_num, enum intr_type type);
/**
* @brief Get the current type of an interrupt
*
* Get the current type of a particular interrupt (level or edge). An interrupt's
* type can be set by calling esprv_intc_int_set_type().
*
* @param intr_num Interrupt number
* @return Interrupt type
*/
enum intr_type esprv_intc_int_get_type(int intr_num);
/**
* Set interrupt priority in the interrupt controller
* @param rv_int_num CPU interrupt number
* @param priority Interrupt priority level, 1 to 7
* @param rv_int_num CPU interrupt number
* @param priority Interrupt priority level, 1 to 7
*/
void esprv_intc_int_set_priority(int rv_int_num, int priority);
/**
* @brief Get the current priority of an interrupt
*
* Get the current priority of an interrupt.
*
* @param rv_int_num CPU interrupt number
* @return Interrupt priority level, 1 to 7
*/
int esprv_intc_int_get_priority(int rv_int_num);
/**
* Set interrupt priority threshold.
* Interrupts with priority levels lower than the threshold are masked.

View File

@ -0,0 +1,205 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/assist_debug_reg.h"
#include "soc/interrupt_core0_reg.h"
#include "esp_attr.h"
#include "riscv/csr.h"
#include "riscv/interrupt.h"
#ifdef __cplusplus
extern "C" {
#endif
/*performance counter*/
#define CSR_PCER_MACHINE 0x7e0
#define CSR_PCMR_MACHINE 0x7e1
#define CSR_PCCR_MACHINE 0x7e2
/* --------------------------------------------------- CPU Control -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR void __attribute__((always_inline)) rv_utils_wait_for_intr(void)
{
asm volatile ("wfi\n");
}
/* -------------------------------------------------- CPU Registers ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t rv_utils_get_core_id(void)
{
#if SOC_CPU_CORES_NUM == 1
return 0; // No need to check core ID on single core hardware
#else
uint32_t cpuid;
cpuid = RV_READ_CSR(mhartid);
return cpuid;
#endif
}
FORCE_INLINE_ATTR void *rv_utils_get_sp(void)
{
void *sp;
asm volatile ("mv %0, sp;" : "=r" (sp));
return sp;
}
FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_get_cycle_count(void)
{
return RV_READ_CSR(CSR_PCCR_MACHINE);
}
FORCE_INLINE_ATTR void __attribute__((always_inline)) rv_utils_set_cycle_count(uint32_t ccount)
{
RV_WRITE_CSR(CSR_PCCR_MACHINE, ccount);
}
/* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// ---------------- Interrupt Descriptors ------------------
// --------------- Interrupt Configuration -----------------
FORCE_INLINE_ATTR void rv_utils_set_mtvec(uint32_t mtvec_val)
{
mtvec_val |= 1; // Set MODE field to treat MTVEC as a vector base address
RV_WRITE_CSR(mtvec, mtvec_val);
}
// ------------------ Interrupt Control --------------------
FORCE_INLINE_ATTR void rv_utils_intr_enable(uint32_t intr_mask)
{
//Disable all interrupts to make updating of the interrupt mask atomic.
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_enable(intr_mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
FORCE_INLINE_ATTR void rv_utils_intr_disable(uint32_t intr_mask)
{
//Disable all interrupts to make updating of the interrupt mask atomic.
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_disable(intr_mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
FORCE_INLINE_ATTR uint32_t rv_utils_intr_get_enabled_mask(void)
{
return REG_READ(INTERRUPT_CORE0_CPU_INT_ENABLE_REG);
}
FORCE_INLINE_ATTR void rv_utils_intr_edge_ack(int intr_num)
{
REG_SET_BIT(INTERRUPT_CORE0_CPU_INT_CLEAR_REG, intr_num);
}
/* -------------------------------------------------- Memory Ports -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------- Debugging ------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// --------------- Breakpoints/Watchpoints -----------------
FORCE_INLINE_ATTR void rv_utils_set_breakpoint(int bp_num, uint32_t bp_addr)
{
/* The code bellow sets breakpoint which will trigger `Breakpoint` exception
* instead transfering control to debugger. */
RV_WRITE_CSR(tselect, bp_num);
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER | TDATA1_MACHINE | TDATA1_EXECUTE);
RV_WRITE_CSR(tdata2, bp_addr);
}
FORCE_INLINE_ATTR void rv_utils_clear_breakpoint(int bp_num)
{
RV_WRITE_CSR(tselect, bp_num);
RV_CLEAR_CSR(CSR_TCONTROL, TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER | TDATA1_MACHINE | TDATA1_EXECUTE);
}
FORCE_INLINE_ATTR void rv_utils_set_watchpoint(int wp_num,
uint32_t wp_addr,
size_t size,
bool on_read,
bool on_write)
{
RV_WRITE_CSR(tselect, wp_num);
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER | TDATA1_MACHINE);
RV_SET_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH, 1);
// add 0 in napot encoding
uint32_t addr_napot;
addr_napot = ((uint32_t) wp_addr) | ((size >> 1) - 1);
if (on_read) {
RV_SET_CSR(CSR_TDATA1, TDATA1_LOAD);
}
if (on_write) {
RV_SET_CSR(CSR_TDATA1, TDATA1_STORE);
}
RV_WRITE_CSR(tdata2, addr_napot);
}
FORCE_INLINE_ATTR void rv_utils_clear_watchpoint(int wp_num)
{
RV_WRITE_CSR(tselect, wp_num);
RV_CLEAR_CSR(CSR_TCONTROL, TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER | TDATA1_MACHINE);
RV_CLEAR_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_MACHINE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_LOAD | TDATA1_STORE | TDATA1_EXECUTE);
}
// ---------------------- Debugger -------------------------
FORCE_INLINE_ATTR bool rv_utils_dbgr_is_attached(void)
{
return REG_GET_BIT(ASSIST_DEBUG_CORE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
}
FORCE_INLINE_ATTR void rv_utils_dbgr_break(void)
{
asm volatile("ebreak\n");
}
/* ------------------------------------------------------ Misc ---------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR bool rv_utils_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
{
// Single core target has no atomic CAS instruction. We can achieve atomicity by disabling interrupts
unsigned old_mstatus;
old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
// Compare and set
uint32_t old_value;
old_value = *addr;
if (old_value == compare_value) {
*addr = new_value;
}
// Restore interrupts
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
return (old_value == compare_value);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,19 +1,12 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <stddef.h>
#include <assert.h>
#include "soc/soc.h"
#include "riscv/interrupt.h"
#include "soc/interrupt_reg.h"
#include "riscv/csr.h"
@ -89,6 +82,20 @@ uint32_t esprv_intc_get_interrupt_unmask(void)
return REG_READ(INTERRUPT_CORE0_CPU_INT_ENABLE_REG);
}
/*************************** ESP-RV Interrupt Controller ***************************/
enum intr_type esprv_intc_int_get_type(int intr_num)
{
uint32_t intr_type_reg = REG_READ(INTERRUPT_CORE0_CPU_INT_TYPE_REG);
return (intr_type_reg & (1 << intr_num)) ? INTR_TYPE_EDGE : INTR_TYPE_LEVEL;
}
int esprv_intc_int_get_priority(int rv_int_num)
{
uint32_t intr_priority_reg = REG_READ(INTC_INT_PRIO_REG(rv_int_num));
return intr_priority_reg;
}
/*************************** Exception names. Used in .gdbinit file. ***************************/
const char *riscv_excp_names[16] __attribute__((used)) = {

View File

@ -63,10 +63,6 @@ config SOC_EMAC_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 2
config SOC_ULP_SUPPORTED
bool
default y
@ -183,6 +179,18 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 2
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FPU
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 2
@ -195,10 +203,6 @@ config SOC_CPU_WATCHPOINT_SIZE
int
default 64
config SOC_CPU_HAS_FPU
bool
default y
config SOC_DAC_PERIPH_NUM
int
default 2

View File

@ -75,7 +75,6 @@
#define SOC_SDIO_SLAVE_SUPPORTED 1
#define SOC_TWAI_SUPPORTED 1
#define SOC_EMAC_SUPPORTED 1
#define SOC_CPU_CORES_NUM 2
#define SOC_ULP_SUPPORTED 1
#define SOC_CCOMP_TIMER_SUPPORTED 1
#define SOC_RTC_FAST_MEM_SUPPORTED 1
@ -135,13 +134,14 @@
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM 2
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FPU 1
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 64 // bytes
#define SOC_CPU_HAS_FPU 1
/*-------------------------- DAC CAPS ----------------------------------------*/
#define SOC_DAC_PERIPH_NUM 2
#define SOC_DAC_RESOLUTION 8 // DAC resolution ratio 8 bit

View File

@ -3,10 +3,6 @@
# using gen_soc_caps_kconfig.py, do not edit manually
#####################################################
config SOC_CPU_CORES_NUM
bool
default y
config SOC_ADC_SUPPORTED
bool
default y
@ -139,6 +135,18 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 1
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 2
@ -147,10 +155,6 @@ config SOC_CPU_WATCHPOINTS_NUM
int
default 2
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_WATCHPOINT_SIZE
hex
default 0x80000000

View File

@ -25,7 +25,6 @@
#pragma once
/*-------------------------- COMMON CAPS ---------------------------------------*/
#define SOC_CPU_CORES_NUM 1
#define SOC_ADC_SUPPORTED 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
@ -75,10 +74,12 @@
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_CORES_NUM (1U)
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FLEXIBLE_INTC 1
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 0x80000000 // bytes
#define SOC_CPU_IDRAM_SPLIT_USING_PMP 1

View File

@ -3,10 +3,6 @@
# using gen_soc_caps_kconfig.py, do not edit manually
#####################################################
config SOC_CPU_CORES_NUM
bool
default y
config SOC_ADC_SUPPORTED
bool
default y
@ -203,6 +199,18 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 1
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 8
@ -211,10 +219,6 @@ config SOC_CPU_WATCHPOINTS_NUM
int
default 8
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_WATCHPOINT_SIZE
hex
default 0x80000000

View File

@ -25,7 +25,6 @@
#pragma once
/*-------------------------- COMMON CAPS ---------------------------------------*/
#define SOC_CPU_CORES_NUM 1
#define SOC_ADC_SUPPORTED 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
@ -102,10 +101,12 @@
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_BREAKPOINTS_NUM 8
#define SOC_CPU_WATCHPOINTS_NUM 8
#define SOC_CPU_CORES_NUM (1U)
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FLEXIBLE_INTC 1
#define SOC_CPU_BREAKPOINTS_NUM 8
#define SOC_CPU_WATCHPOINTS_NUM 8
#define SOC_CPU_WATCHPOINT_SIZE 0x80000000 // bytes
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/

View File

@ -3,10 +3,6 @@
# using gen_soc_caps_kconfig.py, do not edit manually
#####################################################
config SOC_CPU_CORES_NUM
bool
default y
config SOC_ADC_SUPPORTED
bool
default y
@ -191,6 +187,18 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 1
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 8
@ -199,10 +207,6 @@ config SOC_CPU_WATCHPOINTS_NUM
int
default 8
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_WATCHPOINT_SIZE
hex
default 0x80000000

View File

@ -33,7 +33,6 @@
#endif
/*-------------------------- COMMON CAPS ---------------------------------------*/
#define SOC_CPU_CORES_NUM 1
#define SOC_ADC_SUPPORTED 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
@ -103,10 +102,12 @@
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_BREAKPOINTS_NUM 8
#define SOC_CPU_WATCHPOINTS_NUM 8
#define SOC_CPU_CORES_NUM (1U)
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FLEXIBLE_INTC 1
#define SOC_CPU_BREAKPOINTS_NUM 8
#define SOC_CPU_WATCHPOINTS_NUM 8
#define SOC_CPU_WATCHPOINT_SIZE 0x80000000 // bytes
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/

View File

@ -19,10 +19,6 @@ config SOC_CP_DMA_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
bool
default y
config SOC_DEDICATED_GPIO_SUPPORTED
bool
default y
@ -207,6 +203,14 @@ config SOC_CP_DMA_MAX_BUFFER_SIZE
int
default 4095
config SOC_CPU_CORES_NUM
int
default 1
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_BREAKPOINTS_NUM
int
default 2

View File

@ -43,7 +43,6 @@
#define SOC_DAC_SUPPORTED 1
#define SOC_TWAI_SUPPORTED 1
#define SOC_CP_DMA_SUPPORTED 1
#define SOC_CPU_CORES_NUM 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_SUPPORTS_SECURE_DL_MODE 1
#define SOC_RISCV_COPROC_SUPPORTED 1
@ -108,9 +107,11 @@
#define SOC_CP_DMA_MAX_BUFFER_SIZE (4095) /*!< Maximum size of the buffer that can be attached to descriptor */
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM (1U)
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 64 // bytes
/*-------------------------- DAC CAPS ----------------------------------------*/

View File

@ -3,22 +3,6 @@
# using gen_soc_caps_kconfig.py, do not edit manually
#####################################################
config SOC_CPU_BREAKPOINTS_NUM
int
default 2
config SOC_CPU_WATCHPOINTS_NUM
int
default 2
config SOC_CPU_WATCHPOINT_SIZE
int
default 64
config SOC_CPU_HAS_FPU
bool
default y
config SOC_LEDC_SUPPORT_APB_CLOCK
bool
default y
@ -91,10 +75,6 @@ config SOC_DEDICATED_GPIO_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 2
config SOC_CACHE_SUPPORT_WRAP
bool
default y
@ -283,6 +263,30 @@ config SOC_BROWNOUT_RESET_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 2
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FPU
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 2
config SOC_CPU_WATCHPOINTS_NUM
int
default 2
config SOC_CPU_WATCHPOINT_SIZE
int
default 64
config SOC_DS_SIGNATURE_MAX_BIT_LEN
int
default 4096

View File

@ -1,22 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 64 // bytes
#define SOC_CPU_HAS_FPU 1

View File

@ -33,7 +33,6 @@
#define SOC_LCDCAM_SUPPORTED 1
#define SOC_MCPWM_SUPPORTED 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_CPU_CORES_NUM 2
#define SOC_CACHE_SUPPORT_WRAP 1
#define SOC_ULP_SUPPORTED 1
#define SOC_RISCV_COPROC_SUPPORTED 1
@ -102,7 +101,13 @@
#define SOC_BROWNOUT_RESET_SUPPORTED 1
/*-------------------------- CPU CAPS ----------------------------------------*/
#include "cpu_caps.h"
#define SOC_CPU_CORES_NUM 2
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FPU 1
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 64 // bytes
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/
/** The maximum length of a Digital Signature in bits. */

View File

@ -0,0 +1,245 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "xtensa/config/core-isa.h"
#include "xtensa/config/core.h"
#include "xtensa/config/extreg.h"
#include "xtensa/config/specreg.h"
#include "xtensa/xtruntime.h"
#include "xt_instr_macros.h"
#include "esp_bit_defs.h"
#include "esp_attr.h"
#ifdef __cplusplus
extern "C" {
#endif
/* -------------------------------------------------- CPU Registers ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t xt_utils_get_core_id(void)
{
/*
Note: We depend on SOC_CPU_CORES_NUM instead of XCHAL_HAVE_PRID as some single Xtensa targets (such as ESP32-S2) have
the PRID register even though they are single core.
*/
#if SOC_CPU_CORES_NUM > 1
// Read and extract bit 13 of special register PRID
uint32_t id;
asm volatile (
"rsr.prid %0\n"
"extui %0,%0,13,1"
:"=r"(id));
return id;
#else
return 0;
#endif // SOC_CPU_CORES_NUM > 1
}
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t xt_utils_get_raw_core_id(void)
{
#if XCHAL_HAVE_PRID
// Read the raw value of special register PRID
uint32_t id;
asm volatile (
"rsr.prid %0\n"
:"=r"(id));
return id;
#else
return 0;
#endif // XCHAL_HAVE_PRID
}
FORCE_INLINE_ATTR void *xt_utils_get_sp(void)
{
void *sp;
asm volatile ("mov %0, sp;" : "=r" (sp));
return sp;
}
FORCE_INLINE_ATTR uint32_t xt_utils_get_cycle_count(void)
{
uint32_t ccount;
RSR(CCOUNT, ccount);
return ccount;
}
static inline void xt_utils_set_cycle_count(uint32_t ccount)
{
WSR(CCOUNT, ccount);
}
FORCE_INLINE_ATTR void xt_utils_wait_for_intr(void)
{
asm volatile ("waiti 0\n");
}
/* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// ---------------- Interrupt Descriptors ------------------
// --------------- Interrupt Configuration -----------------
FORCE_INLINE_ATTR void xt_utils_set_vecbase(uint32_t vecbase)
{
asm volatile ("wsr %0, vecbase" :: "r" (vecbase));
}
// ------------------ Interrupt Control --------------------
FORCE_INLINE_ATTR uint32_t xt_utils_intr_get_enabled_mask(void)
{
uint32_t intr_mask;
RSR(INTENABLE, intr_mask);
return intr_mask;
}
/* -------------------------------------------------- Memory Ports -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------- Debugging ------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// --------------- Breakpoints/Watchpoints -----------------
FORCE_INLINE_ATTR void xt_utils_set_breakpoint(int bp_num, uint32_t bp_addr)
{
//Set the breakpoint's address
if (bp_num == 0) {
WSR(IBREAKA_1, bp_addr);
} else {
WSR(IBREAKA_0, bp_addr);
}
//Enable the breakpoint
uint32_t brk_ena_reg;
RSR(IBREAKENABLE, brk_ena_reg);
brk_ena_reg |= BIT(bp_num);
WSR(IBREAKENABLE, brk_ena_reg);
}
FORCE_INLINE_ATTR void xt_utils_clear_breakpoint(int bp_num)
{
// Disable the breakpoint using the break enable register
uint32_t bp_en = 0;
RSR(IBREAKENABLE, bp_en);
bp_en &= ~BIT(bp_num);
WSR(IBREAKENABLE, bp_en);
// Zero the break address register
uint32_t bp_addr = 0;
if (bp_num == 0) {
WSR(IBREAKA_1, bp_addr);
} else {
WSR(IBREAKA_0, bp_addr);
}
}
FORCE_INLINE_ATTR void xt_utils_set_watchpoint(int wp_num,
uint32_t wp_addr,
size_t size,
bool on_read,
bool on_write)
{
// Initialize DBREAKC bits (see Table 4143 or isa_rm.pdf)
uint32_t dbreakc_reg = 0x3F;
dbreakc_reg = dbreakc_reg << (__builtin_ffs(size) - 1);
dbreakc_reg = dbreakc_reg & 0x3F;
if (on_read) {
dbreakc_reg |= BIT(30);
}
if (on_write) {
dbreakc_reg |= BIT(31);
}
// Enable break address and break control register
if (wp_num == 0) {
WSR(DBREAKA_1, (uint32_t) wp_addr);
WSR(DBREAKC_1, dbreakc_reg);
} else {
WSR(DBREAKA_0, (uint32_t) wp_addr);
WSR(DBREAKC_0, dbreakc_reg);
}
}
FORCE_INLINE_ATTR void xt_utils_clear_watchpoint(int wp_num)
{
// Clear both break control and break address register
if (wp_num == 0) {
WSR(DBREAKC_1, 0);
WSR(DBREAKA_1, 0);
} else {
WSR(DBREAKC_0, 0);
WSR(DBREAKA_0, 0);
}
}
// ---------------------- Debugger -------------------------
FORCE_INLINE_ATTR bool xt_utils_dbgr_is_attached(void)
{
uint32_t dcr = 0;
uint32_t reg = DSRSET;
RER(reg, dcr);
return (bool)(dcr & 0x1);
}
FORCE_INLINE_ATTR void xt_utils_dbgr_break(void)
{
__asm__ ("break 1,15");
}
/* ------------------------------------------------------ Misc ---------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR bool xt_utils_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
{
#if XCHAL_HAVE_S32C1I
#ifdef __clang_analyzer__
//Teach clang-tidy that "addr" cannot be const as it can be updated by S32C1I instruction
volatile uint32_t temp;
temp = *addr;
*addr = temp;
#endif
// Atomic compare and set using S32C1I instruction
uint32_t old_value = new_value;
__asm__ __volatile__ (
"WSR %2, SCOMPARE1 \n"
"S32C1I %0, %1, 0 \n"
:"=r"(old_value)
:"r"(addr), "r"(compare_value), "0"(old_value)
);
return (old_value == compare_value);
#else // XCHAL_HAVE_S32C1I
// Single core target has no atomic CAS instruction. We can achieve atomicity by disabling interrupts
uint32_t intr_level;
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
: "=r"(intr_level));
// Compare and set
uint32_t old_value;
old_value = *addr;
if (old_value == compare_value) {
*addr = new_value;
}
// Restore interrupts
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intr_level));
return (old_value == compare_value);
#endif // XCHAL_HAVE_S32C1I
}
#ifdef __cplusplus
}
#endif

View File

@ -251,6 +251,7 @@ PREDEFINED = \
_Static_assert()= \
IDF_DEPRECATED(X)= \
IRAM_ATTR= \
FORCE_INLINE_ATTR= \
configSUPPORT_DYNAMIC_ALLOCATION=1 \
configSUPPORT_STATIC_ALLOCATION=1 \
configQUEUE_REGISTRY_SIZE=1 \

View File

@ -1130,11 +1130,9 @@ components/pthread/test/test_pthread_local_storage.c
components/riscv/include/riscv/csr.h
components/riscv/include/riscv/encoding.h
components/riscv/include/riscv/instruction_decode.h
components/riscv/include/riscv/interrupt.h
components/riscv/include/riscv/riscv_interrupts.h
components/riscv/include/riscv/rvruntime-frames.h
components/riscv/instruction_decode.c
components/riscv/interrupt.c
components/sdmmc/sdmmc_common.c
components/sdmmc/sdmmc_common.h
components/sdmmc/sdmmc_init.c