Merge branch 'refactor/esp_hw_support_cpu' into 'master'

esp_hw_support: Add new esp_cpu.h abstraction

Closes IDF-4769

See merge request espressif/esp-idf!17091
This commit is contained in:
Darian 2022-06-14 21:11:30 +08:00
commit e213e66ba3
92 changed files with 2637 additions and 3804 deletions

View File

@ -15,8 +15,6 @@
void bootloader_init_mem(void)
{
cpu_hal_init_hwloop();
#ifdef CONFIG_BOOTLOADER_REGION_PROTECTION_ENABLE
// protect memory region
esp_cpu_configure_region_protection();

View File

@ -27,6 +27,8 @@
#include "soc/rtc.h"
#include "soc/spi_periph.h"
#include "hal/gpio_hal.h"
#include "xtensa/config/core.h"
#include "xt_instr_macros.h"
#include "esp32/rom/cache.h"
#include "esp_rom_gpio.h"
@ -350,6 +352,11 @@ esp_err_t bootloader_init(void)
{
esp_err_t ret = ESP_OK;
#if XCHAL_ERRATUM_572
uint32_t memctl = XCHAL_CACHE_MEMCTL_DEFAULT;
WSR(MEMCTL, memctl);
#endif // XCHAL_ERRATUM_572
bootloader_init_mem();
// check that static RAM is after the stack

View File

@ -37,6 +37,8 @@
#include "esp_efuse.h"
#include "hal/mmu_hal.h"
#include "hal/cache_hal.h"
#include "xtensa/config/core.h"
#include "xt_instr_macros.h"
static const char *TAG = "boot.esp32s3";
@ -316,6 +318,12 @@ static inline void bootloader_ana_reset_config(void)
esp_err_t bootloader_init(void)
{
esp_err_t ret = ESP_OK;
#if XCHAL_ERRATUM_572
uint32_t memctl = XCHAL_CACHE_MEMCTL_DEFAULT;
WSR(MEMCTL, memctl);
#endif // XCHAL_ERRATUM_572
bootloader_ana_reset_config();
bootloader_super_wdt_auto_feed();
// protect memory region

View File

@ -19,7 +19,7 @@
#include "soc/gpio_periph.h"
#include "soc/io_mux_reg.h"
#include "hal/cpu_hal.h"
#include "hal/cpu_ll.h"
#include "hal/dedic_gpio_cpu_ll.h"
#include "hal/gpio_hal.h"
#include "esp_private/periph_ctrl.h"
#include "esp_rom_gpio.h"
@ -271,7 +271,7 @@ esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_
esp_rom_gpio_connect_out_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].out_sig_per_channel[out_offset + i], config->flags.out_invert, false);
}
#if !SOC_DEDIC_GPIO_OUT_AUTO_ENABLE
cpu_ll_enable_dedic_gpio_output(s_platform[core_id]->out_occupied_mask);
dedic_gpio_cpu_ll_enable_output(s_platform[core_id]->out_occupied_mask);
#endif // !SOC_DEDIC_GPIO_OUT_AUTO_ENABLE
}
@ -353,14 +353,14 @@ void dedic_gpio_bundle_write(dedic_gpio_bundle_handle_t bundle, uint32_t mask, u
{
// For performance reasons, we don't want to check the validation of parameters here
// Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
cpu_ll_write_dedic_gpio_mask(bundle->out_mask & (mask << bundle->out_offset), value << bundle->out_offset);
dedic_gpio_cpu_ll_write_mask(bundle->out_mask & (mask << bundle->out_offset), value << bundle->out_offset);
}
uint32_t dedic_gpio_bundle_read_out(dedic_gpio_bundle_handle_t bundle)
{
// For performance reasons, we don't want to check the validation of parameters here
// Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
uint32_t value = cpu_ll_read_dedic_gpio_out();
uint32_t value = dedic_gpio_cpu_ll_read_out();
return (value & bundle->out_mask) >> (bundle->out_offset);
}
@ -368,7 +368,7 @@ uint32_t dedic_gpio_bundle_read_in(dedic_gpio_bundle_handle_t bundle)
{
// For performance reasons, we don't want to check the validation of parameters here
// Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
uint32_t value = cpu_ll_read_dedic_gpio_in();
uint32_t value = dedic_gpio_cpu_ll_read_in();
return (value & bundle->in_mask) >> (bundle->in_offset);
}

View File

@ -74,7 +74,7 @@ esp_err_t dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle);
* - ESP_FAIL: Get channel mask failed because of other error
*
* @note Each bundle should have at least one mask (in or/and out), based on bundle configuration.
* @note With the returned mask, user can directly invoke LL function like "cpu_ll_write_dedic_gpio_mask"
* @note With the returned mask, user can directly invoke LL function like "dedic_gpio_cpu_ll_write_mask"
* or write assembly code with dedicated GPIO instructions, to get better performance on GPIO manipulation.
*/
esp_err_t dedic_gpio_get_out_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask);

View File

@ -11,7 +11,7 @@
#include "unity_test_utils.h"
#include "esp_rom_sys.h"
#include "soc/soc_caps.h"
#include "hal/cpu_ll.h"
#include "hal/dedic_gpio_cpu_ll.h"
#include "driver/gpio.h"
#include "driver/dedic_gpio.h"
@ -67,7 +67,7 @@ static void test_dedic_gpio_on_specific_core(void *args)
{
test_dedic_task_context_t *ctx = (test_dedic_task_context_t *)args;
uint32_t value = 0;
cpu_ll_write_dedic_gpio_all(0x0); // clear all out channels
dedic_gpio_cpu_ll_write_all(0x0); // clear all out channels
// configure a group of GPIOs, output only
const int bundleA_gpios[] = {ctx->gpios[0], ctx->gpios[1]};
@ -112,21 +112,21 @@ static void test_dedic_gpio_on_specific_core(void *args)
dedic_gpio_bundle_write(bundleA, 0x01, 0x01);
dedic_gpio_bundle_write(bundleB, 0x03, 0x03);
value = cpu_ll_read_dedic_gpio_out();
value = dedic_gpio_cpu_ll_read_out();
TEST_ASSERT_EQUAL(0x0D, value); // 1101
value = cpu_ll_read_dedic_gpio_in();
value = dedic_gpio_cpu_ll_read_in();
TEST_ASSERT_EQUAL(0x03, value); // 11
dedic_gpio_bundle_write(bundleB, 0x02, 0x0);
value = cpu_ll_read_dedic_gpio_out();
value = dedic_gpio_cpu_ll_read_out();
TEST_ASSERT_EQUAL(0x05, value); // 0101
value = cpu_ll_read_dedic_gpio_in();
value = dedic_gpio_cpu_ll_read_in();
TEST_ASSERT_EQUAL(0x01, value); // 01
cpu_ll_write_dedic_gpio_all(0x0F); // Set all out channels
value = cpu_ll_read_dedic_gpio_out();
dedic_gpio_cpu_ll_write_all(0x0F); // Set all out channels
value = dedic_gpio_cpu_ll_read_out();
TEST_ASSERT_EQUAL(0x0F, value);
value = cpu_ll_read_dedic_gpio_in();
value = dedic_gpio_cpu_ll_read_in();
TEST_ASSERT_EQUAL(0x03, value); // 11
TEST_ASSERT_EQUAL(0x03, dedic_gpio_bundle_read_out(bundleA)); // 11
TEST_ASSERT_EQUAL(0x00, dedic_gpio_bundle_read_in(bundleA)); // input is not enabled for bundleA

View File

@ -6,7 +6,7 @@
#include <string.h>
#include "esp_gdbstub_common.h"
#include "esp_cpu.h"
#include "hal/cpu_hal.h"
#include "soc/soc_memory_layout.h"
#include "xtensa/config/specreg.h"
#include "sdkconfig.h"

View File

@ -3,7 +3,7 @@ idf_build_get_property(target IDF_TARGET)
set(requires soc)
set(priv_requires efuse spi_flash bootloader_support)
set(srcs "compare_set.c" "cpu_util.c" "esp_memory_utils.c")
set(srcs "compare_set.c" "cpu.c" "esp_memory_utils.c")
if(NOT BOOTLOADER_BUILD)
list(APPEND srcs "esp_async_memcpy.c"
"esp_clk.c"

View File

@ -17,18 +17,18 @@ void compare_and_set_extram(volatile uint32_t *addr, uint32_t compare, uint32_t
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
: "=r"(intlevel));
spinlock_acquire(&global_extram_lock, SPINLOCK_WAIT_FOREVER);
spinlock_acquire(&global_extram_lock, SPINLOCK_WAIT_FOREVER);
old_value = *addr;
if (old_value == compare) {
*addr = *set;
}
spinlock_release(&global_extram_lock);
spinlock_release(&global_extram_lock);
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intlevel));
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intlevel));
*set = old_value;
}

View File

@ -0,0 +1,644 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include <stdint.h>
#include <assert.h>
#include "soc/soc.h"
#include "soc/soc_caps.h"
#include "soc/rtc_cntl_reg.h"
#include "hal/soc_hal.h"
#include "hal/mpu_hal.h"
#include "esp_bit_defs.h"
#include "esp_attr.h"
#include "esp_err.h"
#include "esp_cpu.h"
#include "esp_memory_utils.h"
#if __XTENSA__
#include "xtensa/config/core-isa.h"
#else
#include "soc/system_reg.h" // For SYSTEM_CPU_PER_CONF_REG
#include "soc/dport_access.h" // For Dport access
#include "riscv/semihosting.h"
#include "riscv/csr.h" // For PMP_ENTRY. [refactor-todo] create PMP abstraction in rv_utils.h
#endif
#if SOC_CPU_HAS_FLEXIBLE_INTC
#include "riscv/instruction_decode.h"
#endif
/* --------------------------------------------------- CPU Control -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
void esp_cpu_stall(int core_id)
{
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
#if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
/*
We need to write the value "0x86" to stall a particular core. The write location is split into two separate
bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has its own pair of
"c0" and "c1" bit fields.
Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
"rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
file's "rodata" section (see IDF-5214).
*/
int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
int rtc_cntl_c0_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_S : RTC_CNTL_SW_STALL_APPCPU_C0_S;
int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
int rtc_cntl_c1_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_S : RTC_CNTL_SW_STALL_APPCPU_C1_S;
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s);
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s);
#endif
}
void esp_cpu_unstall(int core_id)
{
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
#if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
/*
We need to write clear the value "0x86" to unstall a particular core. The location of this value is split into
two separate bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has
its own pair of "c0" and "c1" bit fields.
Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
"rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
file's "rodata" section (see IDF-5214).
*/
int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
#endif
}
void esp_cpu_reset(int core_id)
{
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
#if SOC_CPU_CORES_NUM > 1
/*
Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
"rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
file's "rodata" section (see IDF-5214).
*/
int rtc_cntl_rst_m = (core_id == 0) ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M;
#else // SOC_CPU_CORES_NUM > 1
int rtc_cntl_rst_m = RTC_CNTL_SW_PROCPU_RST_M;
#endif // SOC_CPU_CORES_NUM > 1
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_rst_m);
}
void esp_cpu_wait_for_intr(void)
{
#if __XTENSA__
xt_utils_wait_for_intr();
#else
if (esp_cpu_dbgr_is_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
/* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
so do not enter that mode when debugger is connected */
return;
}
rv_utils_wait_for_intr();
#endif // __XTENSA__
}
/* -------------------------------------------------- CPU Registers ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// ---------------- Interrupt Descriptors ------------------
#if SOC_CPU_HAS_FLEXIBLE_INTC
static bool is_intr_num_resv(int intr_num)
{
// Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt"
// [TODO: IDF-2465]
const uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8);
if (reserved & BIT(intr_num)) {
return true;
}
extern int _vector_table;
extern int _interrupt_handler;
const intptr_t pc = (intptr_t)(&_vector_table + intr_num);
/* JAL instructions are relative to the PC there are executed from. */
const intptr_t destination = pc + riscv_decode_offset_from_jal_instruction(pc);
return destination != (intptr_t)&_interrupt_handler;
}
void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
{
intr_desc_ret->priority = 1; //Todo: We should make this -1
intr_desc_ret->type = ESP_CPU_INTR_TYPE_NA;
#if __riscv
intr_desc_ret->flags = is_intr_num_resv(intr_num) ? ESP_CPU_INTR_DESC_FLAG_RESVD : 0;
#else
intr_desc_ret->flags = 0;
#endif
}
#else // SOC_CPU_HAS_FLEXIBLE_INTC
typedef struct {
int priority;
esp_cpu_intr_type_t type;
uint32_t flags[SOC_CPU_CORES_NUM];
} intr_desc_t;
#if SOC_CPU_CORES_NUM > 1
// Note: We currently only have dual core targets, so the table initializer is hard coded
const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //2
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //3
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //4
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
#if CONFIG_FREERTOS_CORETIMER_0
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
#else
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
#endif
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //9
{ 1, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //10
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //12
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //13
{ 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
#if CONFIG_FREERTOS_CORETIMER_1
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
#else
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
#endif
{ 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //17
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //18
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //19
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //20
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //21
{ 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //22
{ 3, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //23
{ 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //24
{ 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
{ 5, ESP_CPU_INTR_TYPE_LEVEL, { 0, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //26
{ 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
{ 4, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //28
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
{ 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
{ 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
};
#else // SOC_CPU_CORES_NUM > 1
const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //2
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //3
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //4
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
#if CONFIG_FREERTOS_CORETIMER_0
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
#else
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
#endif
{ 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //9
{ 1, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //10
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //12
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //13
{ 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
#if CONFIG_FREERTOS_CORETIMER_1
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
#else
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
#endif
{ 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //17
{ 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //18
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //19
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //20
{ 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //21
{ 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //22
{ 3, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //23
{ 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //24
{ 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
{ 5, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //26
{ 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
{ 4, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //28
{ 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
{ 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
{ 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
};
#endif // SOC_CPU_CORES_NUM > 1
void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
{
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
#if SOC_CPU_CORES_NUM == 1
core_id = 0; //If this is a single core target, hard code CPU ID to 0
#endif
intr_desc_ret->priority = intr_desc_table[intr_num].priority;
intr_desc_ret->type = intr_desc_table[intr_num].type;
intr_desc_ret->flags = intr_desc_table[intr_num].flags[core_id];
}
#endif // SOC_CPU_HAS_FLEXIBLE_INTC
/* -------------------------------------------------- Memory Ports -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2 || CONFIG_IDF_TARGET_ESP32S3
void esp_cpu_configure_region_protection(void)
{
/* Note: currently this is configured the same on all Xtensa targets
*
* Both chips have the address space divided into 8 regions, 512MB each.
*/
const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
}
mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
}
#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) Note: ESP32-C3/H2 CPU doesn't support overlapping PMP regions
*
* 2) Therefore, we use TOR (top of range) entries to map the whole address
* space, bottom to top.
*
* 3) There are not enough entries to describe all the memory regions 100% accurately.
*
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
* (executing unmapped addresses should always fault with invalid instruction, read-only means
* stores will correctly fault even if reads may return some invalid value.)
*
* 5) Entries are grouped in order with some static asserts to try and verify everything is
* correct.
*/
const unsigned NONE = PMP_L | PMP_TOR;
const unsigned R = PMP_L | PMP_TOR | PMP_R;
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
// 1. Gap at bottom of address space
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
// 2. Debug region
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
// 3. Gap between debug region & DROM (flash cache)
PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
_Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
// 4. DROM (flash cache)
// 5. Gap between DROM & DRAM
// (Note: To save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
_Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
// 6. DRAM
PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
_Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
// 7. Gap between DRAM and Mask DROM
// 8. Mask DROM
// (Note: to save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
_Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
// 9. Gap between mask DROM and mask IROM
// 10. Mask IROM
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
// 11. Gap between mask IROM & IRAM
PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
// 12. IRAM
PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
// 13. Gap between IRAM and IROM
// 14. IROM (flash cache)
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
// 15. Gap between IROM & RTC slow memory
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
// 16. RTC fast memory
PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
// 17. Gap between RTC fast memory & peripheral addresses
PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
// 18. Peripheral addresses
PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
// 19. End of address space
PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
}
#elif CONFIG_IDF_TARGET_ESP32C2
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
extern int _iram_end;
extern int _data_start;
#define IRAM_END (int)&_iram_end
#define DRAM_START (int)&_data_start
#else
#define IRAM_END SOC_DIRAM_IRAM_HIGH
#define DRAM_START SOC_DIRAM_DRAM_LOW
#endif
#ifdef BOOTLOADER_BUILD
// Without L bit set
#define CONDITIONAL_NONE 0x0
#define CONDITIONAL_RX PMP_R | PMP_X
#define CONDITIONAL_RW PMP_R | PMP_W
#else
// With L bit set
#define CONDITIONAL_NONE NONE
#define CONDITIONAL_RX RX
#define CONDITIONAL_RW RW
#endif
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) ESP32-C2 CPU support overlapping PMP regions, configuration is based on static priority
* feature(lowest numbered entry has highest priority).
*
* 2) Therefore, we use TOR (top of range) and NAOPT entries to map the effective area.
* Finally, define any address without access permission.
*
* 3) 3-15 PMPADDR entries be hardcoded to fixed value, 0-2 PMPADDR be programmed to split ID SRAM
* as IRAM/DRAM. All PMPCFG entryies be available.
*
* 4) Ideally, PMPADDR 0-2 entries should be configured twice, once during bootloader startup and another during app startup.
* However, the CPU currently always executes in machine mode and to enforce these permissions in machine mode, we need
* to set the Lock (L) bit but if set once, it cannot be reconfigured. So, we only configure 0-2 PMPADDR during app startup.
*/
const unsigned NONE = PMP_L ;
const unsigned R = PMP_L | PMP_R;
const unsigned X = PMP_L | PMP_X;
const unsigned RW = PMP_L | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
/* There are 3 configuration scenarios for PMPADDR 0-2
*
* 1. Bootloader build:
* - We cannot set the lock bit as we need to reconfigure it again for the application.
* We configure PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire valid DRAM range.
*
* 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
* - We split the SRAM into IRAM and DRAM such that IRAM region cannot be accessed via DBUS
* and DRAM region cannot be accessed via IBUS. We use _iram_end and _data_start markers to set the boundaries.
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
*
* 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
* - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
* so for that we set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
*
* PMPADDR 3-15 are hard-coded and are appicable to both, bootloader and application. So we configure and lock
* these during BOOTLOADER build itself. During application build, reconfiguration of these PMPADDR entries
* are silently ignored by the CPU
*/
// 1. IRAM
PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, CONDITIONAL_NONE);
PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RX);
// 2. DRAM
PMP_ENTRY_SET(2, DRAM_START, CONDITIONAL_NONE);
PMP_ENTRY_CFG_SET(3, PMP_TOR | CONDITIONAL_RW);
// 3. Debug region
PMP_ENTRY_CFG_SET(4, PMP_NAPOT | RWX);
// 4. DROM (flash dcache)
PMP_ENTRY_CFG_SET(5, PMP_NAPOT | R);
// 5. DROM_MASK
PMP_ENTRY_CFG_SET(6, NONE);
PMP_ENTRY_CFG_SET(7, PMP_TOR | R);
// 6. IROM_MASK
PMP_ENTRY_CFG_SET(8, NONE);
PMP_ENTRY_CFG_SET(9, PMP_TOR | RX);
// 7. IROM (flash icache)
PMP_ENTRY_CFG_SET(10, PMP_NAPOT | RX);
// 8. Peripheral addresses
PMP_ENTRY_CFG_SET(11, PMP_NAPOT | RW);
// 9. SRAM (used as ICache)
PMP_ENTRY_CFG_SET(12, PMP_NAPOT | X);
// 10. no access to any address below(0x0-0xFFFF_FFFF)
PMP_ENTRY_CFG_SET(13, PMP_NA4 | NONE);// last 4 bytes(0xFFFFFFFC)
PMP_ENTRY_CFG_SET(14, NONE);
PMP_ENTRY_CFG_SET(15, PMP_TOR | NONE);
}
#endif
/* ---------------------------------------------------- Debugging ------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// --------------- Breakpoints/Watchpoints -----------------
#if SOC_CPU_BREAKPOINTS_NUM > 0
esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr)
{
/*
Todo:
- Check that bp_num is in range
*/
#if __XTENSA__
xt_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
#else
if (esp_cpu_dbgr_is_attached()) {
/* If we want to set breakpoint which when hit transfers control to debugger
* we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
* That `action` value is supported only when `dmode` of `tdata1` is set.
* But `dmode` can be modified by debugger only (from Debug Mode).
*
* So when debugger is connected we use special syscall to ask it to set breakpoint for us.
*/
long args[] = {true, bp_num, (long)bp_addr};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0) {
return ESP_ERR_INVALID_RESPONSE;
}
}
rv_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
#endif // __XTENSA__
return ESP_OK;
}
esp_err_t esp_cpu_clear_breakpoint(int bp_num)
{
/*
Todo:
- Check if the bp_num is valid
*/
#if __XTENSA__
xt_utils_clear_breakpoint(bp_num);
#else
if (esp_cpu_dbgr_is_attached()) {
// See description in esp_cpu_set_breakpoint()
long args[] = {false, bp_num};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0) {
return ESP_ERR_INVALID_RESPONSE;
}
}
rv_utils_clear_breakpoint(bp_num);
#endif // __XTENSA__
return ESP_OK;
}
#endif // SOC_CPU_BREAKPOINTS_NUM > 0
#if SOC_CPU_WATCHPOINTS_NUM > 0
esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger)
{
/*
Todo:
- Check that wp_num is in range
- Check if the wp_num is already in use
*/
// Check if size is 2^n, where n is in [0...6]
if (size < 1 || size > 64 || (size & (size - 1)) != 0) {
return ESP_ERR_INVALID_ARG;
}
bool on_read = (trigger == ESP_CPU_WATCHPOINT_LOAD || trigger == ESP_CPU_WATCHPOINT_ACCESS);
bool on_write = (trigger == ESP_CPU_WATCHPOINT_STORE || trigger == ESP_CPU_WATCHPOINT_ACCESS);
#if __XTENSA__
xt_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
#else
if (esp_cpu_dbgr_is_attached()) {
// See description in esp_cpu_set_breakpoint()
long args[] = {true, wp_num, (long)wp_addr, (long)size,
(long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))
};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0) {
return ESP_ERR_INVALID_RESPONSE;
}
}
rv_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
#endif // __XTENSA__
return ESP_OK;
}
esp_err_t esp_cpu_clear_watchpoint(int wp_num)
{
/*
Todo:
- Check if the wp_num is valid
*/
#if __XTENSA__
xt_utils_clear_watchpoint(wp_num);
#else
if (esp_cpu_dbgr_is_attached()) {
// See description in esp_cpu_dbgr_is_attached()
long args[] = {false, wp_num};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0) {
return ESP_ERR_INVALID_RESPONSE;
}
}
rv_utils_clear_watchpoint(wp_num);
#endif // __XTENSA__
return ESP_OK;
}
#endif // SOC_CPU_WATCHPOINTS_NUM > 0
/* ------------------------------------------------------ Misc ---------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
#if __XTENSA__ && XCHAL_HAVE_S32C1I && SOC_SPIRAM_SUPPORTED
static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
#endif
bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
{
#if __XTENSA__
bool ret;
#if XCHAL_HAVE_S32C1I && SOC_SPIRAM_SUPPORTED
if (esp_ptr_external_ram((const void *)addr)) {
uint32_t intr_level;
// Atomicity is achieved by disabling interrupts then acquiring a an external RAM CAS lock
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
: "=r"(intr_level));
while (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
;
}
// Now we compare and set the target address
uint32_t old_value;
old_value = *addr;
if (old_value == compare_value) {
*addr = new_value;
}
// Release the external RAM CAS lock and reenable interrupts
external_ram_cas_lock = 0;
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intr_level));
ret = (old_value == compare_value);
} else
#endif //XCHAL_HAVE_S32C1I && SOC_SPIRAM_SUPPORTED
{
ret = xt_utils_compare_and_set(addr, compare_value, new_value);
}
return ret;
#else
// Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
return rv_utils_compare_and_set(addr, compare_value, new_value);
#endif
}

View File

@ -1,96 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_attr.h"
#include "esp_cpu.h"
#include "soc/soc.h"
#include "soc/rtc_periph.h"
#include "sdkconfig.h"
#include "hal/cpu_hal.h"
#include "hal/cpu_types.h"
#include "hal/mpu_hal.h"
#include "esp_cpu.h"
#include "hal/soc_hal.h"
#include "soc/soc_caps.h"
#include "sdkconfig.h"
void IRAM_ATTR esp_cpu_stall(int cpu_id)
{
#if SOC_CPU_CORES_NUM > 1
soc_hal_stall_core(cpu_id);
#endif
}
void IRAM_ATTR esp_cpu_unstall(int cpu_id)
{
#if SOC_CPU_CORES_NUM > 1
soc_hal_unstall_core(cpu_id);
#endif
}
void IRAM_ATTR esp_cpu_reset(int cpu_id)
{
soc_hal_reset_core(cpu_id);
}
esp_err_t IRAM_ATTR esp_cpu_set_watchpoint(int no, void *adr, int size, int flags)
{
watchpoint_trigger_t trigger;
switch (flags)
{
case ESP_CPU_WATCHPOINT_LOAD:
trigger = WATCHPOINT_TRIGGER_ON_RO;
break;
case ESP_CPU_WATCHPOINT_STORE:
trigger = WATCHPOINT_TRIGGER_ON_WO;
break;
case ESP_CPU_WATCHPOINT_ACCESS:
trigger = WATCHPOINT_TRIGGER_ON_RW;
break;
default:
return ESP_ERR_INVALID_ARG;
}
cpu_hal_set_watchpoint(no, adr, size, trigger);
return ESP_OK;
}
void IRAM_ATTR esp_cpu_clear_watchpoint(int no)
{
cpu_hal_clear_watchpoint(no);
}
bool IRAM_ATTR esp_cpu_in_ocd_debug_mode(void)
{
#if CONFIG_ESP_DEBUG_OCDAWARE
return cpu_ll_is_debugger_attached();
#else
return false; // Always return false if "OCD aware" is disabled
#endif
}
#if __XTENSA__
void esp_cpu_configure_region_protection(void)
{
/* Note: currently this is configured the same on all Xtensa targets
*
* Both chips have the address space divided into 8 regions, 512MB each.
*/
const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
}
mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
}
#endif

View File

@ -8,8 +8,10 @@
#include <stdint.h>
#include <stdbool.h>
#include "esp_attr.h"
#include "esp_cpu.h"
#include "esp_memory_utils.h"
#include "hal/cpu_hal.h"
#ifdef __cplusplus
extern "C" {

View File

@ -1,107 +1,557 @@
/*
* SPDX-FileCopyrightText: 2010-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _ESP_CPU_H
#define _ESP_CPU_H
#pragma once
#include <stdint.h>
#include "sdkconfig.h"
#include <stdbool.h>
#include <stddef.h>
#include "hal/cpu_hal.h"
#include <stdint.h>
#include <assert.h>
#include "soc/soc_caps.h"
#ifdef __XTENSA__
#include "xtensa/xtensa_api.h"
#include "xt_utils.h"
#elif __riscv
#include "riscv/rv_utils.h"
#endif
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
#define ESP_CPU_WATCHPOINT_LOAD 0x40000000
#define ESP_CPU_WATCHPOINT_STORE 0x80000000
#define ESP_CPU_WATCHPOINT_ACCESS 0xC0000000
typedef uint32_t esp_cpu_ccount_t;
/** @brief Read current stack pointer address
/**
* @brief CPU cycle count type
*
* This data type represents the CPU's clock cycle count
*/
static inline void *esp_cpu_get_sp(void)
typedef uint32_t esp_cpu_cycle_count_t;
/**
* @brief CPU interrupt type
*/
typedef enum {
ESP_CPU_INTR_TYPE_LEVEL,
ESP_CPU_INTR_TYPE_EDGE,
ESP_CPU_INTR_TYPE_NA,
} esp_cpu_intr_type_t;
/**
* @brief CPU interrupt descriptor
*
* Each particular CPU interrupt has an associated descriptor describing that
* particular interrupt's characteristics. Call esp_cpu_intr_get_desc() to get
* the descriptors of a particular interrupt.
*/
typedef struct {
int priority; /**< Priority of the interrupt if it has a fixed priority, (-1) if the priority is configurable. */
esp_cpu_intr_type_t type; /**< Whether the interrupt is an edge or level type interrupt, ESP_CPU_INTR_TYPE_NA if the type is configurable. */
uint32_t flags; /**< Flags indicating extra details. */
} esp_cpu_intr_desc_t;
/**
* @brief Interrupt descriptor flags of esp_cpu_intr_desc_t
*/
#define ESP_CPU_INTR_DESC_FLAG_SPECIAL 0x01 /**< The interrupt is a special interrupt (e.g., a CPU timer interrupt) */
#define ESP_CPU_INTR_DESC_FLAG_RESVD 0x02 /**< The interrupt is reserved for internal use */
/**
* @brief CPU interrupt handler type
*/
typedef void (*esp_cpu_intr_handler_t)(void *arg);
/**
* @brief CPU watchpoint trigger type
*/
typedef enum {
ESP_CPU_WATCHPOINT_LOAD,
ESP_CPU_WATCHPOINT_STORE,
ESP_CPU_WATCHPOINT_ACCESS,
} esp_cpu_watchpoint_trigger_t;
/* --------------------------------------------------- CPU Control -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/**
* @brief Stall a CPU core
*
* @param core_id The core's ID
*/
void esp_cpu_stall(int core_id);
/**
* @brief Resume a previously stalled CPU core
*
* @param core_id The core's ID
*/
void esp_cpu_unstall(int core_id);
/**
* @brief Reset a CPU core
*
* @param core_id The core's ID
*/
void esp_cpu_reset(int core_id);
/**
* @brief Wait for Interrupt
*
* This function causes the current CPU core to execute its Wait For Interrupt
* (WFI or equivalent) instruction. After executing this function, the CPU core
* will stop execution until an interrupt occurs.
*/
void esp_cpu_wait_for_intr(void);
/* -------------------------------------------------- CPU Registers ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/**
* @brief Get the current core's ID
*
* This function will return the ID of the current CPU (i.e., the CPU that calls
* this function).
*
* @return The current core's ID [0..SOC_CPU_CORES_NUM - 1]
*/
FORCE_INLINE_ATTR __attribute__((pure)) int esp_cpu_get_core_id(void)
{
return cpu_hal_get_sp();
//Note: Made "pure" to optimize for single core target
#ifdef __XTENSA__
return (int)xt_utils_get_core_id();
#else
return (int)rv_utils_get_core_id();
#endif
}
/**
* @brief Stall CPU using RTC controller
* @param cpu_id ID of the CPU to stall (0 = PRO, 1 = APP)
*/
void esp_cpu_stall(int cpu_id);
/**
* @brief Un-stall CPU using RTC controller
* @param cpu_id ID of the CPU to un-stall (0 = PRO, 1 = APP)
*/
void esp_cpu_unstall(int cpu_id);
/**
* @brief Reset CPU using RTC controller
* @param cpu_id ID of the CPU to reset (0 = PRO, 1 = APP)
*/
void esp_cpu_reset(int cpu_id);
/**
* @brief Returns true if a JTAG debugger is attached to CPU
* OCD (on chip debug) port.
* @brief Read the current stack pointer address
*
* @note If "Make exception and panic handlers JTAG/OCD aware"
* is disabled, this function always returns false.
* @return Stack pointer address
*/
bool esp_cpu_in_ocd_debug_mode(void);
static inline esp_cpu_ccount_t esp_cpu_get_ccount(void)
FORCE_INLINE_ATTR void *esp_cpu_get_sp(void)
{
return cpu_hal_get_cycle_count();
}
static inline void esp_cpu_set_ccount(esp_cpu_ccount_t val)
{
cpu_hal_set_cycle_count(val);
#ifdef __XTENSA__
return xt_utils_get_sp();
#else
return rv_utils_get_sp();
#endif
}
/**
* @brief Configure CPU to disable access to invalid memory regions
* @brief Get the current CPU core's cycle count
*
* Each CPU core maintains an internal counter (i.e., cycle count) that increments
* every CPU clock cycle.
*
* @return Current CPU's cycle count, 0 if not supported.
*/
FORCE_INLINE_ATTR esp_cpu_cycle_count_t esp_cpu_get_cycle_count(void)
{
#ifdef __XTENSA__
return (esp_cpu_cycle_count_t)xt_utils_get_cycle_count();
#else
return (esp_cpu_cycle_count_t)rv_utils_get_cycle_count();
#endif
}
/**
* @brief Set the current CPU core's cycle count
*
* Set the given value into the internal counter that increments every
* CPU clock cycle.
*
* @param cycle_count CPU cycle count
*/
FORCE_INLINE_ATTR void esp_cpu_set_cycle_count(esp_cpu_cycle_count_t cycle_count)
{
#ifdef __XTENSA__
xt_utils_set_cycle_count((uint32_t)cycle_count);
#else
rv_utils_set_cycle_count((uint32_t)cycle_count);
#endif
}
/**
* @brief Convert a program counter (PC) value to address
*
* If the architecture does not store the true virtual address in the CPU's PC
* or return addresses, this function will convert the PC value to a virtual
* address. Otherwise, the PC is just returned
*
* @param pc PC value
* @return Virtual address
*/
FORCE_INLINE_ATTR __attribute__((pure)) void *esp_cpu_pc_to_addr(uint32_t pc)
{
#ifdef __XTENSA__
// Xtensa stores window rotation in PC[31:30]
return (void *)((pc & 0x3fffffffU) | 0x40000000U);
#else
return (void *)pc;
#endif
}
/* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// ---------------- Interrupt Descriptors ------------------
/**
* @brief Get a CPU interrupt's descriptor
*
* Each CPU interrupt has a descriptor describing the interrupt's capabilities
* and restrictions. This function gets the descriptor of a particular interrupt
* on a particular CPU.
*
* @param[in] core_id The core's ID
* @param[in] intr_num Interrupt number
* @param[out] intr_desc_ret The interrupt's descriptor
*/
void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret);
// --------------- Interrupt Configuration -----------------
/**
* @brief Set the base address of the current CPU's Interrupt Vector Table (IVT)
*
* @param ivt_addr Interrupt Vector Table's base address
*/
FORCE_INLINE_ATTR void esp_cpu_intr_set_ivt_addr(const void *ivt_addr)
{
#ifdef __XTENSA__
xt_utils_set_vecbase((uint32_t)ivt_addr);
#else
rv_utils_set_mtvec((uint32_t)ivt_addr);
#endif
}
#if SOC_CPU_HAS_FLEXIBLE_INTC
/**
* @brief Set the interrupt type of a particular interrupt
*
* Set the interrupt type (Level or Edge) of a particular interrupt on the
* current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @param intr_type The interrupt's type
*/
FORCE_INLINE_ATTR void esp_cpu_intr_set_type(int intr_num, esp_cpu_intr_type_t intr_type)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
enum intr_type type = (intr_type == ESP_CPU_INTR_TYPE_LEVEL) ? INTR_TYPE_LEVEL : INTR_TYPE_EDGE;
esprv_intc_int_set_type(intr_num, type);
}
/**
* @brief Get the current configured type of a particular interrupt
*
* Get the currently configured type (i.e., level or edge) of a particular
* interrupt on the current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @return Interrupt type
*/
FORCE_INLINE_ATTR esp_cpu_intr_type_t esp_cpu_intr_get_type(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
enum intr_type type = esprv_intc_int_get_type(intr_num);
return (type == INTR_TYPE_LEVEL) ? ESP_CPU_INTR_TYPE_LEVEL : ESP_CPU_INTR_TYPE_EDGE;
}
/**
* @brief Set the priority of a particular interrupt
*
* Set the priority of a particular interrupt on the current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @param intr_priority The interrupt's priority
*/
FORCE_INLINE_ATTR void esp_cpu_intr_set_priority(int intr_num, int intr_priority)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
esprv_intc_int_set_priority(intr_num, intr_priority);
}
/**
* @brief Get the current configured priority of a particular interrupt
*
* Get the currently configured priority of a particular interrupt on the
* current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @return Interrupt's priority
*/
FORCE_INLINE_ATTR int esp_cpu_intr_get_priority(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
return esprv_intc_int_get_priority(intr_num);
}
#endif // SOC_CPU_HAS_FLEXIBLE_INTC
/**
* @brief Check if a particular interrupt already has a handler function
*
* Check if a particular interrupt on the current CPU already has a handler
* function assigned.
*
* @note This function simply checks if the IVT of the current CPU already has
* a handler assigned.
* @param intr_num Interrupt number (from 0 to 31)
* @return True if the interrupt has a handler function, false otherwise.
*/
FORCE_INLINE_ATTR bool esp_cpu_intr_has_handler(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
bool has_handler;
#ifdef __XTENSA__
has_handler = xt_int_has_handler(intr_num, esp_cpu_get_core_id());
#else
has_handler = intr_handler_get(intr_num);
#endif
return has_handler;
}
/**
* @brief Set the handler function of a particular interrupt
*
* Assign a handler function (i.e., ISR) to a particular interrupt on the
* current CPU.
*
* @note This function simply sets the handler function (in the IVT) and does
* not actually enable the interrupt.
* @param intr_num Interrupt number (from 0 to 31)
* @param handler Handler function
* @param handler_arg Argument passed to the handler function
*/
FORCE_INLINE_ATTR void esp_cpu_intr_set_handler(int intr_num, esp_cpu_intr_handler_t handler, void *handler_arg)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
#ifdef __XTENSA__
xt_set_interrupt_handler(intr_num, (xt_handler)handler, handler_arg);
#else
intr_handler_set(intr_num, (intr_handler_t)handler, handler_arg);
#endif
}
/**
* @brief Get a handler function's argument of
*
* Get the argument of a previously assigned handler function on the current CPU.
*
* @param intr_num Interrupt number (from 0 to 31)
* @return The the argument passed to the handler function
*/
FORCE_INLINE_ATTR void *esp_cpu_intr_get_handler_arg(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
void *handler_arg;
#ifdef __XTENSA__
handler_arg = xt_get_interrupt_handler_arg(intr_num);
#else
handler_arg = intr_handler_get_arg(intr_num);
#endif
return handler_arg;
}
// ------------------ Interrupt Control --------------------
/**
* @brief Enable particular interrupts on the current CPU
*
* @param intr_mask Bit mask of the interrupts to enable
*/
FORCE_INLINE_ATTR void esp_cpu_intr_enable(uint32_t intr_mask)
{
#ifdef __XTENSA__
xt_ints_on(intr_mask);
#else
rv_utils_intr_enable(intr_mask);
#endif
}
/**
* @brief Disable particular interrupts on the current CPU
*
* @param intr_mask Bit mask of the interrupts to disable
*/
FORCE_INLINE_ATTR void esp_cpu_intr_disable(uint32_t intr_mask)
{
#ifdef __XTENSA__
xt_ints_off(intr_mask);
#else
rv_utils_intr_disable(intr_mask);
#endif
}
/**
* @brief Get the enabled interrupts on the current CPU
*
* @return Bit mask of the enabled interrupts
*/
FORCE_INLINE_ATTR uint32_t esp_cpu_intr_get_enabled_mask(void)
{
#ifdef __XTENSA__
return xt_utils_intr_get_enabled_mask();
#else
return rv_utils_intr_get_enabled_mask();
#endif
}
/**
* @brief Acknowledge an edge interrupt
*
* @param intr_num Interrupt number (from 0 to 31)
*/
FORCE_INLINE_ATTR void esp_cpu_intr_edge_ack(int intr_num)
{
assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
#ifdef __XTENSA__
xthal_set_intclear(1 << intr_num);
#else
rv_utils_intr_edge_ack(intr_num);
#endif
}
/* -------------------------------------------------- Memory Ports -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/**
* @brief Configure the CPU to disable access to invalid memory regions
*/
void esp_cpu_configure_region_protection(void);
/**
* @brief Set a watchpoint to break/panic when a certain memory range is accessed.
/* ---------------------------------------------------- Debugging ------------------------------------------------------
*
* @param no Watchpoint number. On the ESP32, this can be 0 or 1.
* @param adr Base address to watch
* @param size Size of the region, starting at the base address, to watch. Must
* be one of 2^n, with n in [0..6].
* @param flags One of ESP_CPU_WATCHPOINT_* flags
*
* @return ESP_ERR_INVALID_ARG on invalid arg, ESP_OK otherwise
*
* @warning The ESP32 watchpoint hardware watches a region of bytes by effectively
* masking away the lower n bits for a region with size 2^n. If adr does
* not have zero for these lower n bits, you may not be watching the
* region you intended.
*/
esp_err_t esp_cpu_set_watchpoint(int no, void *adr, int size, int flags);
* ------------------------------------------------------------------------------------------------------------------ */
// --------------- Breakpoints/Watchpoints -----------------
#if SOC_CPU_BREAKPOINTS_NUM > 0
/**
* @brief Clear a watchpoint
*
* @param no Watchpoint to clear
* @brief Set and enable a hardware breakpoint on the current CPU
*
* @note This function is meant to be called by the panic handler to set a
* breakpoint for an attached debugger during a panic.
* @note Overwrites previously set breakpoint with same breakpoint number.
* @param bp_num Hardware breakpoint number [0..SOC_CPU_BREAKPOINTS_NUM - 1]
* @param bp_addr Address to set a breakpoint on
* @return ESP_OK if breakpoint is set. Failure otherwise
*/
void esp_cpu_clear_watchpoint(int no);
esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr);
/**
* @brief Clear a hardware breakpoint on the current CPU
*
* @note Clears a breakpoint regardless of whether it was previously set
* @param bp_num Hardware breakpoint number [0..SOC_CPU_BREAKPOINTS_NUM - 1]
* @return ESP_OK if breakpoint is cleared. Failure otherwise
*/
esp_err_t esp_cpu_clear_breakpoint(int bp_num);
#endif // SOC_CPU_BREAKPOINTS_NUM > 0
/**
* @brief Set and enable a hardware watchpoint on the current CPU
*
* Set and enable a hardware watchpoint on the current CPU, specifying the
* memory range and trigger operation. Watchpoints will break/panic the CPU when
* the CPU accesses (according to the trigger type) on a certain memory range.
*
* @note Overwrites previously set watchpoint with same watchpoint number.
* @param wp_num Hardware watchpoint number [0..SOC_CPU_WATCHPOINTS_NUM - 1]
* @param wp_addr Watchpoint's base address
* @param size Size of the region to watch. Must be one of 2^n, with n in [0..6].
* @param trigger Trigger type
* @return ESP_ERR_INVALID_ARG on invalid arg, ESP_OK otherwise
*/
esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger);
/**
* @brief Clear a hardware watchpoint on the current CPU
*
* @note Clears a watchpoint regardless of whether it was previously set
* @param wp_num Hardware watchpoint number [0..SOC_CPU_WATCHPOINTS_NUM - 1]
* @return ESP_OK if watchpoint was cleared. Failure otherwise.
*/
esp_err_t esp_cpu_clear_watchpoint(int wp_num);
// ---------------------- Debugger -------------------------
/**
* @brief Check if the current CPU has a debugger attached
*
* @return True if debugger is attached, false otherwise
*/
FORCE_INLINE_ATTR bool esp_cpu_dbgr_is_attached(void)
{
#ifdef __XTENSA__
return xt_utils_dbgr_is_attached();
#else
return rv_utils_dbgr_is_attached();
#endif
}
/**
* @brief Trigger a call to the current CPU's attached debugger
*/
FORCE_INLINE_ATTR void esp_cpu_dbgr_break(void)
{
#ifdef __XTENSA__
xt_utils_dbgr_break();
#else
rv_utils_dbgr_break();
#endif
}
/* ------------------------------------------------------ Misc ---------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/**
* @brief Atomic compare-and-set operation
*
* @param addr Address of atomic variable
* @param compare_value Value to compare the atomic variable to
* @param new_value New value to set the atomic variable to
* @return Whether the atomic variable was set or not
*/
bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value);
/* ---------------------------------------------------- Deprecate ------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/*
[refactor-todo] Make these deprecated inline
*/
typedef esp_cpu_cycle_count_t esp_cpu_ccount_t;
#define esp_cpu_get_ccount() esp_cpu_get_cycle_count()
#define esp_cpu_set_ccount(ccount) esp_cpu_set_cycle_count(ccount)
/**
* @brief Returns true if a JTAG debugger is attached to CPU OCD (on chip debug) port.
*
* [refactor-todo] See if this can be replaced with esp_cpu_dbgr_is_attached directly
*
* @note Always returns false if CONFIG_ESP_DEBUG_OCDAWARE is not enabled
*/
FORCE_INLINE_ATTR bool esp_cpu_in_ocd_debug_mode(void)
{
#if CONFIG_ESP_DEBUG_OCDAWARE
return esp_cpu_dbgr_is_attached();
#else // CONFIG_ESP_DEBUG_OCDAWARE
return false; // Always return false if "OCD aware" is disabled
#endif // CONFIG_ESP_DEBUG_OCDAWARE
}
#ifdef __cplusplus
}
#endif
#endif // _ESP_CPU_H

View File

@ -6,19 +6,27 @@
#pragma once
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include <stdint.h>
#include <stdbool.h>
#include "esp_err.h"
#include <stddef.h>
#include "soc/soc_caps.h"
#include "hal/cpu_types.h"
#include "hal/cpu_ll.h"
#include "esp_cpu.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
WATCHPOINT_TRIGGER_ON_RO = ESP_CPU_WATCHPOINT_LOAD, // on read
WATCHPOINT_TRIGGER_ON_WO = ESP_CPU_WATCHPOINT_STORE, // on write
WATCHPOINT_TRIGGER_ON_RW = ESP_CPU_WATCHPOINT_ACCESS, // on either read or write
} watchpoint_trigger_t;
/**
* Return the ID of the core currently executing this code.
*
@ -45,7 +53,7 @@ extern "C" {
* Set the given value into the internal counter that increments
* every processor-clock cycle.
*/
#define cpu_hal_set_cycle_count(val) cpu_ll_set_cycle_count(val)
#define cpu_hal_set_cycle_count(val) cpu_ll_set_cycle_count(val)
/**
* Check if some form of debugger is attached to CPU.
@ -80,14 +88,19 @@ extern "C" {
* @param id breakpoint to set [0..SOC_CPU_BREAKPOINTS_NUM - 1]
* @param addr address to set a breakpoint on
*/
void cpu_hal_set_breakpoint(int id, const void* addr);
static inline void cpu_hal_set_breakpoint(int id, const void *addr)
{
esp_cpu_set_breakpoint(id, addr);
}
/**
* Clear and disable breakpoint.
*
* @param id breakpoint to clear [0..SOC_CPU_BREAKPOINTS_NUM - 1]
*/
void cpu_hal_clear_breakpoint(int id);
static inline void cpu_hal_clear_breakpoint(int id)
{
esp_cpu_clear_breakpoint(id);
}
#endif // SOC_CPU_BREAKPOINTS_NUM > 0
@ -101,14 +114,20 @@ void cpu_hal_clear_breakpoint(int id);
* @param size number of bytes from starting address to watch
* @param trigger operation on specified memory range that triggers the watchpoint (read, write, read/write)
*/
void cpu_hal_set_watchpoint(int id, const void* addr, size_t size, watchpoint_trigger_t trigger);
static inline void cpu_hal_set_watchpoint(int id, const void *addr, size_t size, watchpoint_trigger_t trigger)
{
esp_cpu_set_watchpoint(id, addr, size, (esp_cpu_watchpoint_trigger_t)trigger);
}
/**
* Clear and disable watchpoint.
*
* @param id watchpoint to clear [0..SOC_CPU_WATCHPOINTS_NUM - 1]
*/
void cpu_hal_clear_watchpoint(int id);
static inline void cpu_hal_clear_watchpoint(int id)
{
esp_cpu_clear_watchpoint(id);
}
#endif // SOC_CPU_WATCHPOINTS_NUM > 0
@ -117,7 +136,10 @@ void cpu_hal_clear_watchpoint(int id);
*
* @param base address to move the exception vector table to
*/
void cpu_hal_set_vecbase(const void* base);
static inline void cpu_hal_set_vecbase(const void *base)
{
esp_cpu_intr_set_ivt_addr(base);
}
#ifdef __cplusplus
}

View File

@ -0,0 +1,168 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include <stdint.h>
#include <stdbool.h>
#include "soc/soc_caps.h"
#include "esp_attr.h"
#include "esp_cpu.h"
#ifdef __cplusplus
extern "C" {
#endif
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t cpu_ll_get_core_id(void)
{
return esp_cpu_get_core_id();
}
FORCE_INLINE_ATTR uint32_t cpu_ll_get_cycle_count(void)
{
return (uint32_t)esp_cpu_get_ccount();
}
FORCE_INLINE_ATTR void cpu_ll_set_cycle_count(uint32_t val)
{
esp_cpu_set_cycle_count((esp_cpu_ccount_t)val);
}
FORCE_INLINE_ATTR void *cpu_ll_get_sp(void)
{
return esp_cpu_get_sp();
}
FORCE_INLINE_ATTR void cpu_ll_init_hwloop(void)
{
; // Nothing to do. Contents moved to bootloader directly
}
#if SOC_CPU_BREAKPOINTS_NUM > 0
FORCE_INLINE_ATTR void cpu_ll_set_breakpoint(int id, uint32_t pc)
{
esp_cpu_set_breakpoint(id, (const void *)pc);
}
FORCE_INLINE_ATTR void cpu_ll_clear_breakpoint(int id)
{
esp_cpu_clear_breakpoint(id);
}
#endif // SOC_CPU_BREAKPOINTS_NUM > 0
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t cpu_ll_ptr_to_pc(const void *addr)
{
return ((uint32_t) addr);
}
FORCE_INLINE_ATTR __attribute__((pure)) void *cpu_ll_pc_to_ptr(uint32_t pc)
{
return esp_cpu_pc_to_addr(pc);
}
FORCE_INLINE_ATTR void cpu_ll_set_watchpoint(int id,
const void* addr,
size_t size,
bool on_read,
bool on_write)
{
esp_cpu_watchpoint_trigger_t trigger;
if (on_read && on_write) {
trigger = ESP_CPU_WATCHPOINT_ACCESS;
} else if (on_read) {
trigger = ESP_CPU_WATCHPOINT_LOAD;
} else {
trigger = ESP_CPU_WATCHPOINT_STORE;
}
esp_cpu_set_watchpoint(id, addr, size, trigger);
}
FORCE_INLINE_ATTR void cpu_ll_clear_watchpoint(int id)
{
esp_cpu_clear_watchpoint(id);
}
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
{
return esp_cpu_dbgr_is_attached();
}
FORCE_INLINE_ATTR void cpu_ll_break(void)
{
esp_cpu_dbgr_break();
}
FORCE_INLINE_ATTR void cpu_ll_set_vecbase(const void *base)
{
esp_cpu_intr_set_ivt_addr(base);
}
FORCE_INLINE_ATTR void cpu_ll_waiti(void)
{
esp_cpu_wait_for_intr();
}
FORCE_INLINE_ATTR void cpu_ll_compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
#ifdef __clang_analyzer__
//Teach clang-tidy that "addr" and "set" cannot be const as they can both be updated by S32C1I instruction
volatile uint32_t temp;
temp = *addr;
*addr = temp;
temp = *set;
*set = temp;
#endif
#ifdef __XTENSA__
#if XCHAL_HAVE_S32C1I
__asm__ __volatile__ (
"WSR %2, SCOMPARE1 \n"
"S32C1I %0, %1, 0 \n"
:"=r"(*set)
:"r"(addr), "r"(compare), "0"(*set)
);
#else // XCHAL_HAVE_S32C1I
uint32_t old_value;
// No S32C1I, so do this by disabling and re-enabling interrupts (slower)
uint32_t intlevel;
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
: "=r"(intlevel));
old_value = *addr;
if (old_value == compare) {
*addr = *set;
}
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intlevel));
*set = old_value;
#endif // XCHAL_HAVE_S32C1I
#else
uint32_t old_value;
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
old_value = *addr;
if (old_value == compare) {
*addr = *set;
}
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
*set = old_value;
#endif
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,235 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include <stdint.h>
#include <stdbool.h>
#include "soc/soc_caps.h"
#include "esp_attr.h"
#include "esp_cpu.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
INTDESC_NORMAL = 0,
INTDESC_RESVD,
INTDESC_SPECIAL,
} int_desc_flag_t;
typedef enum {
INTTP_LEVEL = ESP_CPU_INTR_TYPE_LEVEL,
INTTP_EDGE = ESP_CPU_INTR_TYPE_EDGE,
INTTP_NA = ESP_CPU_INTR_TYPE_NA,
} int_type_t;
typedef struct {
int level;
int_type_t type;
int_desc_flag_t cpuflags[SOC_CPU_CORES_NUM];
} int_desc_t;
typedef void (*interrupt_handler_t)(void *arg);
// ---------------- Interrupt Descriptors ------------------
/**
* @brief Gets the interrupt type given an interrupt number.
*
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt type
*/
FORCE_INLINE_ATTR int_type_t interrupt_controller_hal_desc_type(int interrupt_number)
{
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(esp_cpu_get_core_id(), interrupt_number, &intr_desc);
return (int_type_t)intr_desc.type;
}
/**
* @brief Gets the interrupt level given an interrupt number.
*
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt level bitmask
*/
FORCE_INLINE_ATTR int interrupt_controller_hal_desc_level(int interrupt_number)
{
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(esp_cpu_get_core_id(), interrupt_number, &intr_desc);
return intr_desc.priority;
}
/**
* @brief Gets the cpu flags given the interrupt number and target cpu.
*
* @param interrupt_number Interrupt number 0 to 31
* @param cpu_number CPU number between 0 and SOC_CPU_CORES_NUM - 1
* @return flags for that interrupt number
*/
FORCE_INLINE_ATTR int_desc_flag_t interrupt_controller_hal_desc_flags(int interrupt_number, int cpu_number)
{
esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(cpu_number, interrupt_number, &intr_desc);
int_desc_flag_t ret;
if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL) {
ret = INTDESC_SPECIAL;
} else if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD) {
ret = INTDESC_RESVD;
} else {
ret = INTDESC_NORMAL;
}
return ret;
}
/**
* @brief Gets the interrupt type given an interrupt number.
*
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt type
*/
FORCE_INLINE_ATTR int_type_t interrupt_controller_hal_get_type(int interrupt_number)
{
return interrupt_controller_hal_desc_type(interrupt_number);
}
/**
* @brief Gets the interrupt level given an interrupt number.
*
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt level bitmask
*/
FORCE_INLINE_ATTR int interrupt_controller_hal_get_level(int interrupt_number)
{
return interrupt_controller_hal_desc_level(interrupt_number);
}
/**
* @brief Gets the cpu flags given the interrupt number and target cpu.
*
* @param interrupt_number Interrupt number 0 to 31
* @param cpu_number CPU number between 0 and SOC_CPU_CORES_NUM - 1
* @return flags for that interrupt number
*/
FORCE_INLINE_ATTR uint32_t interrupt_controller_hal_get_cpu_desc_flags(int interrupt_number, int cpu_number)
{
return (uint32_t)interrupt_controller_hal_desc_flags(interrupt_number, cpu_number);
}
// --------------- Interrupt Configuration -----------------
#if SOC_CPU_HAS_FLEXIBLE_INTC
/**
* @brief Set the type of an interrupt in the controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param type interrupt type as edge or level triggered
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_set_int_type(int intr, int_type_t type)
{
esp_cpu_intr_set_type(intr, (esp_cpu_intr_type_t)type);
}
/**
* @brief Sets the interrupt level int the interrupt controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param level priority between 1 (lowest) to 7 (highest)
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_set_int_level(int intr, int level)
{
esp_cpu_intr_set_priority(intr, level);
}
#endif // SOC_CPU_HAS_FLEXIBLE_INTC
/**
* @brief checks if given interrupt number has a valid handler
*
* @param intr interrupt number ranged from 0 to 31
* @param cpu this argument is ignored
* @return true for valid handler, false otherwise
*/
FORCE_INLINE_ATTR bool interrupt_controller_hal_has_handler(int intr, int cpu)
{
(void) cpu;
return esp_cpu_intr_has_handler(intr);
}
/**
* @brief sets interrupt handler and optional argument of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
* @param handler handler invoked when an interrupt occurs
* @param arg optional argument to pass to the handler
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
{
esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)handler, arg);
}
/**
* @brief Gets argument passed to handler of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
*
* @return argument used by handler of passed interrupt number
*/
FORCE_INLINE_ATTR void *interrupt_controller_hal_get_int_handler_arg(uint8_t intr)
{
return esp_cpu_intr_get_handler_arg(intr);
}
// ------------------ Interrupt Control --------------------
/**
* @brief enable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be enabled
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_enable_interrupts(uint32_t mask)
{
esp_cpu_intr_enable(mask);
}
/**
* @brief disable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be disabled
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_disable_interrupts(uint32_t mask)
{
esp_cpu_intr_disable(mask);
}
/**
* @brief Read the current interrupt mask.
*
* @return The bitmask of current interrupts
*/
FORCE_INLINE_ATTR uint32_t interrupt_controller_hal_read_interrupt_mask(void)
{
return esp_cpu_intr_get_enabled_mask();
}
/**
* @brief Acknowledge an edge-trigger interrupt by clearing its pending flag
*
* @param intr interrupt number ranged from 0 to 31
*/
FORCE_INLINE_ATTR void interrupt_controller_hal_edge_int_acknowledge(int intr)
{
esp_cpu_intr_edge_ack(intr);
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,52 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include "soc/soc_caps.h"
#include "hal/soc_ll.h"
#ifdef __cplusplus
extern "C" {
#endif
#if SOC_CPU_CORES_NUM > 1 // We only allow stalling/unstalling of other cores
/**
* Stall the specified CPU core.
*
* @note Has no effect if the core is already stalled - does not return an
* ESP_ERR_INVALID_STATE.
*
* @param core core to stall [0..SOC_CPU_CORES_NUM - 1]
*/
#define soc_hal_stall_core(core) soc_ll_stall_core(core)
/**
* Unstall the specified CPU core.
*
* @note Has no effect if the core is already unstalled - does not return an
* ESP_ERR_INVALID_STATE.
*
* @param core core to unstall [0..SOC_CPU_CORES_NUM - 1]
*/
#define soc_hal_unstall_core(core) soc_ll_unstall_core(core)
#endif // SOC_CPU_CORES_NUM > 1
/**
* Reset the specified core.
*
* @param core core to reset [0..SOC_CPU_CORES_NUM - 1]
*/
#define soc_hal_reset_core(core) soc_ll_reset_core((core))
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,38 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
/*
Note: This is a compatibility header. Call the interfaces in esp_cpu.h instead
[refactor-todo]: Mark all API in this header as deprecated
*/
#include "esp_attr.h"
#include "esp_cpu.h"
#ifdef __cplusplus
extern "C" {
#endif
FORCE_INLINE_ATTR void soc_ll_stall_core(int core)
{
esp_cpu_stall(core);
}
FORCE_INLINE_ATTR void soc_ll_unstall_core(int core)
{
esp_cpu_unstall(core);
}
FORCE_INLINE_ATTR void soc_ll_reset_core(int core)
{
esp_cpu_reset(core);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,7 +1,16 @@
[mapping:esp_hw_support]
archive: libesp_hw_support.a
entries:
cpu_util (noflash_text)
cpu: esp_cpu_stall (noflash)
cpu: esp_cpu_unstall (noflash)
cpu: esp_cpu_reset (noflash)
cpu: esp_cpu_wait_for_intr (noflash)
if ESP_PANIC_HANDLER_IRAM = y:
cpu: esp_cpu_set_breakpoint (noflash)
cpu: esp_cpu_clear_breakpoint (noflash)
cpu: esp_cpu_set_watchpoint (noflash)
cpu: esp_cpu_clear_watchpoint (noflash)
cpu: esp_cpu_compare_and_set (noflash)
esp_memory_utils (noflash)
rtc_clk (noflash)
rtc_init:rtc_vddsdio_set_config (noflash)

View File

@ -1,5 +1,4 @@
set(srcs "cpu_util_esp32c2.c"
"rtc_clk_init.c"
set(srcs "rtc_clk_init.c"
"rtc_clk.c"
"rtc_init.c"
"rtc_pm.c"

View File

@ -1,112 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <assert.h>
#include "esp_cpu.h"
#include "sdkconfig.h"
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
extern int _iram_end;
extern int _data_start;
#define IRAM_END (int)&_iram_end
#define DRAM_START (int)&_data_start
#else
#define IRAM_END SOC_DIRAM_IRAM_HIGH
#define DRAM_START SOC_DIRAM_DRAM_LOW
#endif
#ifdef BOOTLOADER_BUILD
// Without L bit set
#define CONDITIONAL_NONE 0x0
#define CONDITIONAL_RX PMP_R | PMP_X
#define CONDITIONAL_RW PMP_R | PMP_W
#else
// With L bit set
#define CONDITIONAL_NONE NONE
#define CONDITIONAL_RX RX
#define CONDITIONAL_RW RW
#endif
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) ESP32-C2 CPU support overlapping PMP regions, configuration is based on static priority
* feature(lowest numbered entry has highest priority).
*
* 2) Therefore, we use TOR (top of range) and NAOPT entries to map the effective area.
* Finally, define any address without access permission.
*
* 3) 3-15 PMPADDR entries be hardcoded to fixed value, 0-2 PMPADDR be programmed to split ID SRAM
* as IRAM/DRAM. All PMPCFG entryies be available.
*
* 4) Ideally, PMPADDR 0-2 entries should be configured twice, once during bootloader startup and another during app startup.
* However, the CPU currently always executes in machine mode and to enforce these permissions in machine mode, we need
* to set the Lock (L) bit but if set once, it cannot be reconfigured. So, we only configure 0-2 PMPADDR during app startup.
*/
const unsigned NONE = PMP_L ;
const unsigned R = PMP_L | PMP_R;
const unsigned X = PMP_L | PMP_X;
const unsigned RW = PMP_L | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
/* There are 3 configuration scenarios for PMPADDR 0-2
*
* 1. Bootloader build:
* - We cannot set the lock bit as we need to reconfigure it again for the application.
* We configure PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire valid DRAM range.
*
* 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
* - We split the SRAM into IRAM and DRAM such that IRAM region cannot be accessed via DBUS
* and DRAM region cannot be accessed via IBUS. We use _iram_end and _data_start markers to set the boundaries.
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
*
* 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
* - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
* so for that we set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
* We also lock these entries so the R/W/X permissions are enforced even for machine mode
*
* PMPADDR 3-15 are hard-coded and are appicable to both, bootloader and application. So we configure and lock
* these during BOOTLOADER build itself. During application build, reconfiguration of these PMPADDR entries
* are silently ignored by the CPU
*/
// 1. IRAM
PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, CONDITIONAL_NONE);
PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RX);
// 2. DRAM
PMP_ENTRY_SET(2, DRAM_START, CONDITIONAL_NONE);
PMP_ENTRY_CFG_SET(3, PMP_TOR | CONDITIONAL_RW);
// 3. Debug region
PMP_ENTRY_CFG_SET(4, PMP_NAPOT | RWX);
// 4. DROM (flash dcache)
PMP_ENTRY_CFG_SET(5, PMP_NAPOT | R);
// 5. DROM_MASK
PMP_ENTRY_CFG_SET(6, NONE);
PMP_ENTRY_CFG_SET(7, PMP_TOR | R);
// 6. IROM_MASK
PMP_ENTRY_CFG_SET(8, NONE);
PMP_ENTRY_CFG_SET(9, PMP_TOR | RX);
// 7. IROM (flash icache)
PMP_ENTRY_CFG_SET(10, PMP_NAPOT | RX);
// 8. Peripheral addresses
PMP_ENTRY_CFG_SET(11, PMP_NAPOT | RW);
// 9. SRAM (used as ICache)
PMP_ENTRY_CFG_SET(12, PMP_NAPOT | X);
// 10. no access to any address below(0x0-0xFFFF_FFFF)
PMP_ENTRY_CFG_SET(13, PMP_NA4 | NONE);// last 4 bytes(0xFFFFFFFC)
PMP_ENTRY_CFG_SET(14, NONE);
PMP_ENTRY_CFG_SET(15, PMP_TOR | NONE);
}

View File

@ -1,5 +1,4 @@
set(srcs "cpu_util_esp32c3.c"
"rtc_clk_init.c"
set(srcs "rtc_clk_init.c"
"rtc_clk.c"
"rtc_init.c"
"rtc_pm.c"

View File

@ -1,104 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <assert.h>
#include "esp_cpu.h"
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) Note: ESP32-C3 CPU doesn't support overlapping PMP regions
*
* 2) Therefore, we use TOR (top of range) entries to map the whole address
* space, bottom to top.
*
* 3) There are not enough entries to describe all the memory regions 100% accurately.
*
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
* (executing unmapped addresses should always fault with invalid instruction, read-only means
* stores will correctly fault even if reads may return some invalid value.)
*
* 5) Entries are grouped in order with some static asserts to try and verify everything is
* correct.
*/
const unsigned NONE = PMP_L | PMP_TOR;
const unsigned R = PMP_L | PMP_TOR | PMP_R;
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
// 1. Gap at bottom of address space
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
// 2. Debug region
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
// 3. Gap between debug region & DROM (flash cache)
PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
_Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
// 4. DROM (flash cache)
// 5. Gap between DROM & DRAM
// (Note: To save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
_Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
// 6. DRAM
PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
_Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
// 7. Gap between DRAM and Mask DROM
// 8. Mask DROM
// (Note: to save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
_Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
// 9. Gap between mask DROM and mask IROM
// 10. Mask IROM
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
// 11. Gap between mask IROM & IRAM
PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
// 12. IRAM
PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
// 13. Gap between IRAM and IROM
// 14. IROM (flash cache)
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
// 15. Gap between IROM & RTC slow memory
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
// 16. RTC fast memory
PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
// 17. Gap between RTC fast memory & peripheral addresses
PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
// 18. Peripheral addresses
PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
// 19. End of address space
PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
}

View File

@ -1,5 +1,4 @@
set(srcs "cpu_util_esp32h2.c"
"rtc_clk_init.c"
set(srcs "rtc_clk_init.c"
"rtc_clk.c"
"rtc_init.c"
"rtc_pm.c"

View File

@ -1,104 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <assert.h>
#include "esp_cpu.h"
void esp_cpu_configure_region_protection(void)
{
/* Notes on implementation:
*
* 1) Note: ESP32-C3 CPU doesn't support overlapping PMP regions
*
* 2) Therefore, we use TOR (top of range) entries to map the whole address
* space, bottom to top.
*
* 3) There are not enough entries to describe all the memory regions 100% accurately.
*
* 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
* to cover gaps is to extend read-only or read-execute regions or read-only regions only
* (executing unmapped addresses should always fault with invalid instruction, read-only means
* stores will correctly fault even if reads may return some invalid value.)
*
* 5) Entries are grouped in order with some static asserts to try and verify everything is
* correct.
*/
const unsigned NONE = PMP_L | PMP_TOR;
const unsigned R = PMP_L | PMP_TOR | PMP_R;
const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
// 1. Gap at bottom of address space
PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
// 2. Debug region
PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
_Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
// 3. Gap between debug region & DROM (flash cache)
PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
_Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
// 4. DROM (flash cache)
// 5. Gap between DROM & DRAM
// (Note: To save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
_Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
// 6. DRAM
PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
_Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
// 7. Gap between DRAM and Mask DROM
// 8. Mask DROM
// (Note: to save PMP entries these two are merged into one read-only region)
PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
_Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
// 9. Gap between mask DROM and mask IROM
// 10. Mask IROM
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
_Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
// 11. Gap between mask IROM & IRAM
PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
// 12. IRAM
PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
// 13. Gap between IRAM and IROM
// 14. IROM (flash cache)
// (Note: to save PMP entries these two are merged into one RX region)
PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
_Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
// 15. Gap between IROM & RTC slow memory
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
_Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
// 16. RTC fast memory
PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
// 17. Gap between RTC fast memory & peripheral addresses
PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
_Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
// 18. Peripheral addresses
PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
// 19. End of address space
PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
}

View File

@ -153,8 +153,13 @@ static void panic_handler(void *frame, bool pseudo_excause)
esp_panic_handler_reconfigure_wdts();
esp_rom_delay_us(1);
SOC_HAL_STALL_OTHER_CORES();
#endif
// Stall all other cores
for (uint32_t i = 0; i < SOC_CPU_CORES_NUM; i++) {
if (i != core_id) {
esp_cpu_stall(i);
}
}
#endif // !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
esp_ipc_isr_stall_abort();

View File

@ -23,6 +23,7 @@
#include "soc/syscon_reg.h"
#include "soc/system_reg.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "esp_private/cache_err_int.h"
#include "esp32c2/rom/cache.h"

View File

@ -24,6 +24,7 @@
#include "soc/system_reg.h"
#include "soc/uart_reg.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "esp_private/cache_err_int.h"
#include "esp32c3/rom/cache.h"

View File

@ -23,6 +23,7 @@
#include "soc/syscon_reg.h"
#include "soc/system_reg.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "esp_private/cache_err_int.h"
#include "esp32h2/rom/cache.h"

View File

@ -22,6 +22,7 @@
#include "soc/syscon_reg.h"
#include "soc/rtc_periph.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "freertos/xtensa_api.h"
#include "soc/soc_memory_layout.h"
#include "hal/cpu_hal.h"

View File

@ -21,6 +21,7 @@
#include "soc/syscon_reg.h"
#include "soc/rtc_periph.h"
#include "hal/wdt_hal.h"
#include "hal/cpu_hal.h"
#include "freertos/xtensa_api.h"
#include "soc/soc_memory_layout.h"

View File

@ -17,6 +17,7 @@
#include "portbenchmark.h"
#include "esp_macros.h"
#include "hal/cpu_hal.h"
#include "compare_set.h" /* For compare_and_set_native(). [refactor-todo] Use esp_cpu.h instead */
#include "esp_private/crosscore_int.h"
/*

View File

@ -50,6 +50,7 @@
#include "esp_heap_caps.h"
#include "esp_system.h" /* required by esp_get_...() functions in portable.h. [refactor-todo] Update portable.h */
#include "esp_newlib.h"
#include "compare_set.h" /* For compare_and_set_native(). [refactor-todo] Use esp_cpu.h instead */
/* [refactor-todo] These includes are not directly used in this file. They are kept into to prevent a breaking change. Remove these. */
#include <limits.h>

View File

@ -80,6 +80,7 @@
#include "esp_heap_caps.h"
#include "esp_rom_sys.h"
#include "esp_system.h" /* required by esp_get_...() functions in portable.h. [refactor-todo] Update portable.h */
#include "compare_set.h" /* For compare_and_set_native(). [refactor-todo] Use esp_cpu.h instead */
#include "portbenchmark.h"
/* [refactor-todo] These includes are not directly used in this file. They are kept into to prevent a breaking change. Remove these. */

View File

@ -1,7 +1,6 @@
idf_build_get_property(target IDF_TARGET)
set(srcs "mpu_hal.c"
"cpu_hal.c"
"efuse_hal.c"
"${target}/efuse_hal.c"
"mmu_hal.c")
@ -43,8 +42,6 @@ if(NOT BOOTLOADER_BUILD)
"spi_flash_hal.c"
"spi_flash_hal_iram.c"
"spi_flash_encrypt_hal_iram.c"
"soc_hal.c"
"interrupt_controller_hal.c"
"sha_hal.c"
"adc_hal_common.c"
"adc_hal.c")
@ -97,7 +94,6 @@ if(NOT BOOTLOADER_BUILD)
"aes_hal.c"
"esp32/adc_hal.c"
"esp32/brownout_hal.c"
"esp32/interrupt_descriptor_table.c"
"esp32/touch_sensor_hal.c"
"esp32/gpio_hal_workaround.c")
endif()
@ -116,7 +112,6 @@ if(NOT BOOTLOADER_BUILD)
"esp32s2/cp_dma_hal.c"
"esp32s2/touch_sensor_hal.c"
"esp32s2/dac_hal.c"
"esp32s2/interrupt_descriptor_table.c"
"usbh_hal.c")
endif()
@ -132,7 +127,6 @@ if(NOT BOOTLOADER_BUILD)
"aes_hal.c"
"esp32s3/brownout_hal.c"
"esp32s3/hmac_hal.c"
"esp32s3/interrupt_descriptor_table.c"
"esp32s3/touch_sensor_hal.c"
"esp32s3/rtc_cntl_hal.c"
"usbh_hal.c")

View File

@ -1,64 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdint.h>
#include <stdlib.h>
#include "sdkconfig.h"
#include "esp_err.h"
#include "hal/cpu_hal.h"
#include "hal/cpu_types.h"
#include "soc/soc_caps.h"
#if SOC_CPU_BREAKPOINTS_NUM > 0
void cpu_hal_set_breakpoint(int id, const void* addr)
{
cpu_ll_set_breakpoint(id, cpu_ll_ptr_to_pc(addr));
}
void cpu_hal_clear_breakpoint(int id)
{
cpu_ll_clear_breakpoint(id);
}
#endif // SOC_CPU_BREAKPOINTS_NUM > 0
#if SOC_CPU_WATCHPOINTS_NUM > 0
void cpu_hal_set_watchpoint(int id, const void* addr, size_t size, watchpoint_trigger_t trigger)
{
bool on_read = false, on_write = false;
if (trigger == WATCHPOINT_TRIGGER_ON_RO) {
on_read = true;
} else if (trigger == WATCHPOINT_TRIGGER_ON_WO) {
on_write = true;
} else {
on_read = on_write = true;
}
cpu_ll_set_watchpoint(id, addr, size, on_read, on_write);
}
void cpu_hal_clear_watchpoint(int id)
{
cpu_ll_clear_watchpoint(id);
}
#endif // SOC_CPU_WATCHPOINTS_NUM > 0
void cpu_hal_set_vecbase(const void* base)
{
cpu_ll_set_vecbase(base);
}

View File

@ -1,190 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "esp_attr.h"
#include "soc/soc_caps.h"
#include "xt_instr_macros.h"
#include "xtensa/config/specreg.h"
#include "xtensa/config/extreg.h"
#include "esp_bit_defs.h"
#include "xtensa/config/core.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline uint32_t IRAM_ATTR cpu_ll_get_core_id(void)
{
uint32_t id;
asm volatile (
"rsr.prid %0\n"
"extui %0,%0,13,1"
:"=r"(id));
return id;
}
static inline uint32_t IRAM_ATTR cpu_ll_get_cycle_count(void)
{
uint32_t result;
RSR(CCOUNT, result);
return result;
}
static inline void IRAM_ATTR cpu_ll_set_cycle_count(uint32_t val)
{
WSR(CCOUNT, val);
}
static inline void* cpu_ll_get_sp(void)
{
void *sp;
asm volatile ("mov %0, sp;" : "=r" (sp));
return sp;
}
static inline void cpu_ll_init_hwloop(void)
{
#if XCHAL_ERRATUM_572
uint32_t memctl = XCHAL_CACHE_MEMCTL_DEFAULT;
WSR(MEMCTL, memctl);
#endif // XCHAL_ERRATUM_572
}
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
{
uint32_t en;
// Set the break address register to the appropriate PC
if (id) {
WSR(IBREAKA_1, pc);
} else {
WSR(IBREAKA_0, pc);
}
// Enable the breakpoint using the break enable register
RSR(IBREAKENABLE, en);
en |= BIT(id);
WSR(IBREAKENABLE, en);
}
static inline void cpu_ll_clear_breakpoint(int id)
{
uint32_t en = 0;
uint32_t pc = 0;
// Set the break address register to the appropriate PC
if (id) {
WSR(IBREAKA_1, pc);
} else {
WSR(IBREAKA_0, pc);
}
// Enable the breakpoint using the break enable register
RSR(IBREAKENABLE, en);
en &= ~BIT(id);
WSR(IBREAKENABLE, en);
}
static inline uint32_t cpu_ll_ptr_to_pc(const void* addr)
{
return ((uint32_t) addr);
}
static inline void* cpu_ll_pc_to_ptr(uint32_t pc)
{
return (void*) ((pc & 0x3fffffffU) | 0x40000000U);
}
static inline void cpu_ll_set_watchpoint(int id,
const void* addr,
size_t size,
bool on_read,
bool on_write)
{
uint32_t dbreakc = 0x3F;
//We support watching 2^n byte values, from 1 to 64. Calculate the mask for that.
for (int x = 0; x < 7; x++) {
if (size == (size_t)(1U << x)) {
break;
}
dbreakc <<= 1;
}
dbreakc = (dbreakc & 0x3F);
if (on_read) {
dbreakc |= BIT(30);
}
if (on_write) {
dbreakc |= BIT(31);
}
// Write the break address register and the size to control
// register.
if (id) {
WSR(DBREAKA_1, (uint32_t) addr);
WSR(DBREAKC_1, dbreakc);
} else {
WSR(DBREAKA_0, (uint32_t) addr);
WSR(DBREAKC_0, dbreakc);
}
}
static inline void cpu_ll_clear_watchpoint(int id)
{
// Clear both break address register and control register
if (id) {
WSR(DBREAKA_1, 0);
WSR(DBREAKC_1, 0);
} else {
WSR(DBREAKA_0, 0);
WSR(DBREAKC_0, 0);
}
}
static inline bool cpu_ll_is_debugger_attached(void)
{
uint32_t dcr = 0;
uint32_t reg = DSRSET;
RER(reg, dcr);
return (dcr&0x1);
}
static inline void cpu_ll_break(void)
{
__asm__ ("break 1,15");
}
static inline void cpu_ll_set_vecbase(const void* vecbase)
{
asm volatile ("wsr %0, vecbase" :: "r" (vecbase));
}
static inline void cpu_ll_waiti(void)
{
asm volatile ("waiti 0\n");
}
static inline void cpu_ll_compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
__asm__ __volatile__ (
"WSR %2,SCOMPARE1 \n"
"S32C1I %0, %1, 0 \n"
:"=r"(*set)
:"r"(addr), "r"(compare), "0"(*set)
);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,108 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/soc.h"
#include "xtensa/xtensa_api.h"
#include "xt_instr_macros.h"
#include "xtensa/config/specreg.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief enable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be enabled
*/
static inline void intr_cntrl_ll_enable_interrupts(uint32_t mask)
{
xt_ints_on(mask);
}
/**
* @brief disable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be disabled
*/
static inline void intr_cntrl_ll_disable_interrupts(uint32_t mask)
{
xt_ints_off(mask);
}
/**
* @brief Read the current interrupt mask of the CPU running this code.
*
* @return The current interrupt bitmask.
*/
static inline uint32_t intr_cntrl_ll_read_interrupt_mask(void)
{
uint32_t int_mask;
RSR(INTENABLE, int_mask);
return int_mask;
}
/**
* @brief checks if given interrupt number has a valid handler
*
* @param intr interrupt number ranged from 0 to 31
* @param cpu cpu number ranged betweeen 0 to SOC_CPU_CORES_NUM - 1
* @return true for valid handler, false otherwise
*/
static inline bool intr_cntrl_ll_has_handler(uint8_t intr, uint8_t cpu)
{
return xt_int_has_handler(intr, cpu);
}
/**
* @brief sets interrupt handler and optional argument of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
* @param handler handler invoked when an interrupt occurs
* @param arg optional argument to pass to the handler
*/
static inline void intr_cntrl_ll_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
{
xt_set_interrupt_handler(intr, (xt_handler)handler, arg);
}
/**
* @brief Gets argument passed to handler of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
*
* @return argument used by handler of passed interrupt number
*/
static inline void *intr_cntrl_ll_get_int_handler_arg(uint8_t intr)
{
return xt_get_interrupt_handler_arg(intr);
}
/**
* @brief Acknowledge an edge-trigger interrupt by clearing its pending flag
*
* @param intr interrupt number ranged from 0 to 31
*/
static inline void intr_cntrl_ll_edge_int_acknowledge(int intr)
{
xthal_set_intclear(1 << intr);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,54 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "soc/soc.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/soc_caps.h"
#include "soc/rtc.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void soc_ll_stall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M, RTC_CNTL_SW_STALL_APPCPU_C1_M};
const int rtc_cntl_c1_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_S, RTC_CNTL_SW_STALL_APPCPU_C1_S};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M, RTC_CNTL_SW_STALL_APPCPU_C0_M};
const int rtc_cntl_c0_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_S, RTC_CNTL_SW_STALL_APPCPU_C0_S};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s[core]);
}
static inline void soc_ll_unstall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M, RTC_CNTL_SW_STALL_APPCPU_C1_M};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M, RTC_CNTL_SW_STALL_APPCPU_C0_M};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
}
static inline void soc_ll_reset_core(int core)
{
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG,
core == 0 ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,75 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sdkconfig.h"
#include "hal/interrupt_controller_hal.h"
#include "hal/interrupt_controller_ll.h"
#include "soc/soc_caps.h"
#include "soc/soc.h"
//We should mark the interrupt for the timer used by FreeRTOS as reserved. The specific timer
//is selectable using menuconfig; we use these cpp bits to convert that into something we can use in
//the table below.
#if CONFIG_FREERTOS_CORETIMER_0
#define INT6RES INTDESC_RESVD
#else
#define INT6RES INTDESC_SPECIAL
#endif
#if CONFIG_FREERTOS_CORETIMER_1
#define INT15RES INTDESC_RESVD
#else
#define INT15RES INTDESC_SPECIAL
#endif
//This is basically a software-readable version of the interrupt usage table in include/soc/soc.h
const static int_desc_t interrupt_descriptor_table [32]={
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //0
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //1
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //2
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //3
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_NORMAL} }, //4
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //5
{ 1, INTTP_NA, {INT6RES, INT6RES } }, //6
{ 1, INTTP_NA, {INTDESC_SPECIAL,INTDESC_SPECIAL}}, //7
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //8
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //9
{ 1, INTTP_EDGE , {INTDESC_NORMAL, INTDESC_NORMAL} }, //10
{ 3, INTTP_NA, {INTDESC_SPECIAL,INTDESC_SPECIAL}}, //11
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //12
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //13
{ 7, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //14, NMI
{ 3, INTTP_NA, {INT15RES, INT15RES } }, //15
{ 5, INTTP_NA, {INTDESC_SPECIAL,INTDESC_SPECIAL} }, //16
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //17
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //18
{ 2, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //19
{ 2, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //20
{ 2, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //21
{ 3, INTTP_EDGE, {INTDESC_RESVD, INTDESC_NORMAL} }, //22
{ 3, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //23
{ 4, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_NORMAL} }, //24
{ 4, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //25
{ 5, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_RESVD } }, //26
{ 3, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //27
{ 4, INTTP_EDGE, {INTDESC_NORMAL, INTDESC_NORMAL} }, //28
{ 3, INTTP_NA, {INTDESC_SPECIAL,INTDESC_SPECIAL}}, //29
{ 4, INTTP_EDGE, {INTDESC_RESVD, INTDESC_RESVD } }, //30
{ 5, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //31
};
const int_desc_t *interrupt_controller_hal_desc_table(void)
{
return interrupt_descriptor_table;
}

View File

@ -1,252 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/dport_access.h"
#include "soc/system_reg.h"
#include "esp_bit_defs.h"
#include "soc/assist_debug_reg.h"
#include "esp_attr.h"
#include "riscv/csr.h"
#include "riscv/semihosting.h"
/*performance counter*/
#define CSR_PCER_MACHINE 0x7e0
#define CSR_PCMR_MACHINE 0x7e1
#define CSR_PCCR_MACHINE 0x7e2
/*fast gpio*/
#define CSR_GPIO_OEN_USER 0x803
#define CSR_GPIO_IN_USER 0x804
#define CSR_GPIO_OUT_USER 0x805
#ifdef __cplusplus
extern "C" {
#endif
static inline int IRAM_ATTR cpu_ll_get_core_id(void)
{
#if SOC_CPU_CORES_NUM == 1
return 0; // No need to check core ID on single core hardware
#else
int cpuid;
cpuid = RV_READ_CSR(mhartid);
return cpuid;
#endif
}
static inline void cpu_ll_enable_cycle_count(void)
{
RV_WRITE_CSR(CSR_PCER_MACHINE,1);
RV_WRITE_CSR(CSR_PCMR_MACHINE,1);
return;
}
static inline uint32_t IRAM_ATTR cpu_ll_get_cycle_count(void)
{
uint32_t result;
result = RV_READ_CSR(CSR_PCCR_MACHINE);
return result;
}
static inline void IRAM_ATTR cpu_ll_set_cycle_count(uint32_t val)
{
RV_WRITE_CSR(CSR_PCCR_MACHINE, val);
}
static inline void* cpu_ll_get_sp(void)
{
void *sp;
asm volatile ("mv %0, sp;" : "=r" (sp));
return sp;
}
static inline void cpu_ll_init_hwloop(void)
{
// Nothing needed here for ESP32-C3
}
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
{
return REG_GET_BIT(ASSIST_DEBUG_CORE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
}
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
{
if (cpu_ll_is_debugger_attached()) {
/* If we want to set breakpoint which when hit transfers control to debugger
* we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
* That `action` value is supported only when `dmode` of `tdata1` is set.
* But `dmode` can be modified by debugger only (from Debug Mode).
*
* So when debugger is connected we use special syscall to ask it to set breakpoint for us.
*/
long args[] = {true, id, (long)pc};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0) {
return;
}
}
/* The code bellow sets breakpoint which will trigger `Breakpoint` exception
* instead transfering control to debugger. */
RV_WRITE_CSR(tselect,id);
RV_SET_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
RV_WRITE_CSR(tdata2,pc);
return;
}
static inline void cpu_ll_clear_breakpoint(int id)
{
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {false, id};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0){
return;
}
}
RV_WRITE_CSR(tselect,id);
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
return;
}
static inline uint32_t cpu_ll_ptr_to_pc(const void* addr)
{
return ((uint32_t) addr);
}
static inline void* cpu_ll_pc_to_ptr(uint32_t pc)
{
return (void*) ((pc & 0x3fffffff) | 0x40000000);
}
static inline void cpu_ll_set_watchpoint(int id,
const void* addr,
size_t size,
bool on_read,
bool on_write)
{
uint32_t addr_napot;
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {true, id, (long)addr, (long)size,
(long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0) {
return;
}
}
RV_WRITE_CSR(tselect,id);
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
RV_SET_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH, 1);
// add 0 in napot encoding
addr_napot = ((uint32_t) addr) | ((size >> 1) - 1);
if (on_read) {
RV_SET_CSR(CSR_TDATA1, TDATA1_LOAD);
}
if (on_write) {
RV_SET_CSR(CSR_TDATA1, TDATA1_STORE);
}
RV_WRITE_CSR(tdata2,addr_napot);
return;
}
static inline void cpu_ll_clear_watchpoint(int id)
{
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {false, id};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0){
return;
}
}
RV_WRITE_CSR(tselect,id);
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
RV_CLEAR_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_MACHINE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_LOAD|TDATA1_STORE|TDATA1_EXECUTE);
return;
}
static inline void cpu_ll_break(void)
{
asm volatile("ebreak\n");
return;
}
static inline void cpu_ll_set_vecbase(const void* vecbase)
{
uintptr_t vecbase_int = (uintptr_t)vecbase;
vecbase_int |= 1; // Set MODE field to treat MTVEC as a vector base address
RV_WRITE_CSR(mtvec, vecbase_int);
}
static inline void cpu_ll_waiti(void)
{
if (cpu_ll_is_debugger_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
/* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
so do not enter that mode when debugger is connected */
return;
}
asm volatile ("wfi\n");
}
static inline void cpu_ll_enable_dedic_gpio_output(uint32_t mask)
{
RV_WRITE_CSR(CSR_GPIO_OEN_USER, mask);
}
static inline void cpu_ll_write_dedic_gpio_all(uint32_t value)
{
RV_WRITE_CSR(CSR_GPIO_OUT_USER, value);
}
static inline uint32_t cpu_ll_read_dedic_gpio_in(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_IN_USER);
return value;
}
static inline uint32_t cpu_ll_read_dedic_gpio_out(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_OUT_USER);
return value;
}
static inline void cpu_ll_write_dedic_gpio_mask(uint32_t mask, uint32_t value)
{
RV_SET_CSR(CSR_GPIO_OUT_USER, mask & value);
RV_CLEAR_CSR(CSR_GPIO_OUT_USER, mask & ~(value));
}
static inline void cpu_ll_compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
uint32_t old_value;
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
old_value = *addr;
if (old_value == compare) {
*addr = *set;
}
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
*set = old_value;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,51 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "riscv/csr.h"
/*fast gpio*/
#define CSR_GPIO_OEN_USER 0x803
#define CSR_GPIO_IN_USER 0x804
#define CSR_GPIO_OUT_USER 0x805
#ifdef __cplusplus
extern "C" {
#endif
static inline void dedic_gpio_cpu_ll_enable_output(uint32_t mask)
{
RV_WRITE_CSR(CSR_GPIO_OEN_USER, mask);
}
static inline void dedic_gpio_cpu_ll_write_all(uint32_t value)
{
RV_WRITE_CSR(CSR_GPIO_OUT_USER, value);
}
static inline uint32_t dedic_gpio_cpu_ll_read_in(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_IN_USER);
return value;
}
static inline uint32_t dedic_gpio_cpu_ll_read_out(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_OUT_USER);
return value;
}
static inline void dedic_gpio_cpu_ll_write_mask(uint32_t mask, uint32_t value)
{
RV_SET_CSR(CSR_GPIO_OUT_USER, mask & value);
RV_CLEAR_CSR(CSR_GPIO_OUT_USER, mask & ~(value));
}
#ifdef __cplusplus
}
#endif

View File

@ -1,124 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/soc.h"
#include "soc/interrupt_core0_reg.h"
#include "riscv/interrupt.h"
#include "riscv/csr.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief enable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be enabled
*/
static inline void intr_cntrl_ll_enable_interrupts(uint32_t mask)
{
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_enable(mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
/**
* @brief disable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be disabled
*/
static inline void intr_cntrl_ll_disable_interrupts(uint32_t mask)
{
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_disable(mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
/**
* @brief Read the current interrupt mask of the CPU running this code.
*
* @return The current interrupt bitmask.
*/
static inline uint32_t intr_cntrl_ll_read_interrupt_mask(void)
{
return REG_READ(INTERRUPT_CORE0_CPU_INT_ENABLE_REG);
}
/**
* @brief checks if given interrupt number has a valid handler
*
* @param intr interrupt number ranged from 0 to 31
* @param cpu cpu number ranged betweeen 0 to SOC_CPU_CORES_NUM - 1
* @return true for valid handler, false otherwise
*/
static inline bool intr_cntrl_ll_has_handler(uint8_t intr, uint8_t cpu)
{
return intr_handler_get(intr);
}
/**
* @brief sets interrupt handler and optional argument of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
* @param handler handler invoked when an interrupt occurs
* @param arg optional argument to pass to the handler
*/
static inline void intr_cntrl_ll_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
{
intr_handler_set(intr, (void *)handler, arg);
}
/**
* @brief Gets argument passed to handler of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
*
* @return argument used by handler of passed interrupt number
*/
static inline void *intr_cntrl_ll_get_int_handler_arg(uint8_t intr)
{
return intr_handler_get_arg(intr);
}
/**
* @brief Acknowledge an edge-trigger interrupt by clearing its pending flag
*
* @param intr interrupt number ranged from 0 to 31
*/
static inline void intr_cntrl_ll_edge_int_acknowledge(int intr)
{
REG_SET_BIT(INTERRUPT_CORE0_CPU_INT_CLEAR_REG, intr);
}
/**
* @brief Sets the interrupt level int the interrupt controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param level priority between 1 (lowest) to 7 (highest)
*/
static inline void intr_cntrl_ll_set_int_level(int intr, int level)
{
esprv_intc_int_set_priority(intr, level);
}
/**
* @brief Set the type of an interrupt in the controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param type interrupt type as edge or level triggered
*/
static inline void intr_cntrl_ll_set_int_type(int intr, int_type_t type)
{
esprv_intc_int_set_type(BIT(intr), type);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,44 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "soc/soc.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/soc_caps.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void soc_ll_stall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M};
const int rtc_cntl_c1_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_S};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M};
const int rtc_cntl_c0_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_S};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s[core]);
}
static inline void soc_ll_unstall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
}
static inline void soc_ll_reset_core(int core)
{
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, RTC_CNTL_SW_PROCPU_RST_M);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,252 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/dport_access.h"
#include "soc/system_reg.h"
#include "esp_bit_defs.h"
#include "soc/assist_debug_reg.h"
#include "esp_attr.h"
#include "riscv/csr.h"
#include "riscv/semihosting.h"
/*performance counter*/
#define CSR_PCER_MACHINE 0x7e0
#define CSR_PCMR_MACHINE 0x7e1
#define CSR_PCCR_MACHINE 0x7e2
/*fast gpio*/
#define CSR_GPIO_OEN_USER 0x803
#define CSR_GPIO_IN_USER 0x804
#define CSR_GPIO_OUT_USER 0x805
#ifdef __cplusplus
extern "C" {
#endif
static inline int IRAM_ATTR cpu_ll_get_core_id(void)
{
#if SOC_CPU_CORES_NUM == 1
return 0; // No need to check core ID on single core hardware
#else
int cpuid;
cpuid = RV_READ_CSR(mhartid);
return cpuid;
#endif
}
static inline void cpu_ll_enable_cycle_count(void)
{
RV_WRITE_CSR(CSR_PCER_MACHINE,1);
RV_WRITE_CSR(CSR_PCMR_MACHINE,1);
return;
}
static inline uint32_t IRAM_ATTR cpu_ll_get_cycle_count(void)
{
uint32_t result;
result = RV_READ_CSR(CSR_PCCR_MACHINE);
return result;
}
static inline void IRAM_ATTR cpu_ll_set_cycle_count(uint32_t val)
{
RV_WRITE_CSR(CSR_PCCR_MACHINE, val);
}
static inline void* cpu_ll_get_sp(void)
{
void *sp;
asm volatile ("mv %0, sp;" : "=r" (sp));
return sp;
}
static inline void cpu_ll_init_hwloop(void)
{
// Nothing needed here for ESP32-C3
}
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
{
return REG_GET_BIT(ASSIST_DEBUG_CORE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
}
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
{
if (cpu_ll_is_debugger_attached()) {
/* If we want to set breakpoint which when hit transfers control to debugger
* we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
* That `action` value is supported only when `dmode` of `tdata1` is set.
* But `dmode` can be modified by debugger only (from Debug Mode).
*
* So when debugger is connected we use special syscall to ask it to set breakpoint for us.
*/
long args[] = {true, id, (long)pc};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0) {
return;
}
}
/* The code bellow sets breakpoint which will trigger `Breakpoint` exception
* instead transfering control to debugger. */
RV_WRITE_CSR(tselect,id);
RV_SET_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
RV_WRITE_CSR(tdata2,pc);
return;
}
static inline void cpu_ll_clear_breakpoint(int id)
{
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {false, id};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0){
return;
}
}
RV_WRITE_CSR(tselect,id);
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
return;
}
static inline uint32_t cpu_ll_ptr_to_pc(const void* addr)
{
return ((uint32_t) addr);
}
static inline void* cpu_ll_pc_to_ptr(uint32_t pc)
{
return (void*) ((pc & 0x3fffffff) | 0x40000000);
}
static inline void cpu_ll_set_watchpoint(int id,
const void* addr,
size_t size,
bool on_read,
bool on_write)
{
uint32_t addr_napot;
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {true, id, (long)addr, (long)size,
(long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0) {
return;
}
}
RV_WRITE_CSR(tselect,id);
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
RV_SET_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH, 1);
// add 0 in napot encoding
addr_napot = ((uint32_t) addr) | ((size >> 1) - 1);
if (on_read) {
RV_SET_CSR(CSR_TDATA1, TDATA1_LOAD);
}
if (on_write) {
RV_SET_CSR(CSR_TDATA1, TDATA1_STORE);
}
RV_WRITE_CSR(tdata2,addr_napot);
return;
}
static inline void cpu_ll_clear_watchpoint(int id)
{
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {false, id};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0){
return;
}
}
RV_WRITE_CSR(tselect,id);
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
RV_CLEAR_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_MACHINE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_LOAD|TDATA1_STORE|TDATA1_EXECUTE);
return;
}
static inline void cpu_ll_break(void)
{
asm volatile("ebreak\n");
return;
}
static inline void cpu_ll_set_vecbase(const void* vecbase)
{
uintptr_t vecbase_int = (uintptr_t)vecbase;
vecbase_int |= 1; // Set MODE field to treat MTVEC as a vector base address
RV_WRITE_CSR(mtvec, vecbase_int);
}
static inline void cpu_ll_waiti(void)
{
if (cpu_ll_is_debugger_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
/* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
so do not enter that mode when debugger is connected */
return;
}
asm volatile ("wfi\n");
}
static inline void cpu_ll_enable_dedic_gpio_output(uint32_t mask)
{
RV_WRITE_CSR(CSR_GPIO_OEN_USER, mask);
}
static inline void cpu_ll_write_dedic_gpio_all(uint32_t value)
{
RV_WRITE_CSR(CSR_GPIO_OUT_USER, value);
}
static inline uint32_t cpu_ll_read_dedic_gpio_in(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_IN_USER);
return value;
}
static inline uint32_t cpu_ll_read_dedic_gpio_out(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_OUT_USER);
return value;
}
static inline void cpu_ll_write_dedic_gpio_mask(uint32_t mask, uint32_t value)
{
RV_SET_CSR(CSR_GPIO_OUT_USER, mask & value);
RV_CLEAR_CSR(CSR_GPIO_OUT_USER, mask & ~(value));
}
static inline void cpu_ll_compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
uint32_t old_value;
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
old_value = *addr;
if (old_value == compare) {
*addr = *set;
}
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
*set = old_value;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,51 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "riscv/csr.h"
/*fast gpio*/
#define CSR_GPIO_OEN_USER 0x803
#define CSR_GPIO_IN_USER 0x804
#define CSR_GPIO_OUT_USER 0x805
#ifdef __cplusplus
extern "C" {
#endif
static inline void dedic_gpio_cpu_ll_enable_output(uint32_t mask)
{
RV_WRITE_CSR(CSR_GPIO_OEN_USER, mask);
}
static inline void dedic_gpio_cpu_ll_write_all(uint32_t value)
{
RV_WRITE_CSR(CSR_GPIO_OUT_USER, value);
}
static inline uint32_t dedic_gpio_cpu_ll_read_in(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_IN_USER);
return value;
}
static inline uint32_t dedic_gpio_cpu_ll_read_out(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_OUT_USER);
return value;
}
static inline void dedic_gpio_cpu_ll_write_mask(uint32_t mask, uint32_t value)
{
RV_SET_CSR(CSR_GPIO_OUT_USER, mask & value);
RV_CLEAR_CSR(CSR_GPIO_OUT_USER, mask & ~(value));
}
#ifdef __cplusplus
}
#endif

View File

@ -1,132 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/soc.h"
#include "soc/interrupt_core0_reg.h"
#include "riscv/interrupt.h"
#include "riscv/csr.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief enable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be enabled
*/
static inline void intr_cntrl_ll_enable_interrupts(uint32_t mask)
{
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_enable(mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
/**
* @brief disable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be disabled
*/
static inline void intr_cntrl_ll_disable_interrupts(uint32_t mask)
{
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_disable(mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
/**
* @brief Read the current interrupt mask of the CPU running this code.
*
* @return The current interrupt bitmask.
*/
static inline uint32_t intr_cntrl_ll_read_interrupt_mask(void)
{
return REG_READ(INTERRUPT_CORE0_CPU_INT_ENABLE_REG);
}
/**
* @brief checks if given interrupt number has a valid handler
*
* @param intr interrupt number ranged from 0 to 31
* @param cpu cpu number ranged betweeen 0 to SOC_CPU_CORES_NUM - 1
* @return true for valid handler, false otherwise
*/
static inline bool intr_cntrl_ll_has_handler(uint8_t intr, uint8_t cpu)
{
return intr_handler_get(intr);
}
/**
* @brief sets interrupt handler and optional argument of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
* @param handler handler invoked when an interrupt occurs
* @param arg optional argument to pass to the handler
*/
static inline void intr_cntrl_ll_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
{
intr_handler_set(intr, (void *)handler, arg);
}
/**
* @brief Gets argument passed to handler of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
*
* @return argument used by handler of passed interrupt number
*/
static inline void *intr_cntrl_ll_get_int_handler_arg(uint8_t intr)
{
return intr_handler_get_arg(intr);
}
/**
* @brief Acknowledge an edge-trigger interrupt by clearing its pending flag
*
* @param intr interrupt number ranged from 0 to 31
*/
static inline void intr_cntrl_ll_edge_int_acknowledge(int intr)
{
REG_SET_BIT(INTERRUPT_CORE0_CPU_INT_CLEAR_REG, intr);
}
/**
* @brief Sets the interrupt level int the interrupt controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param level priority between 1 (lowest) to 7 (highest)
*/
static inline void intr_cntrl_ll_set_int_level(int intr, int level)
{
esprv_intc_int_set_priority(intr, level);
}
/**
* @brief Set the type of an interrupt in the controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param type interrupt type as edge or level triggered
*/
static inline void intr_cntrl_ll_set_int_type(int intr, int_type_t type)
{
esprv_intc_int_set_type(BIT(intr), type);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,52 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "soc/soc.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/soc_caps.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void soc_ll_stall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M};
const int rtc_cntl_c1_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_S};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M};
const int rtc_cntl_c0_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_S};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s[core]);
}
static inline void soc_ll_unstall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
}
static inline void soc_ll_reset_core(int core)
{
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, RTC_CNTL_SW_PROCPU_RST_M);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,245 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "esp_bit_defs.h"
#include "soc/assist_debug_reg.h"
#include "esp_attr.h"
#include "riscv/csr.h"
#include "riscv/semihosting.h"
/*performance counter*/
#define CSR_PCER_MACHINE 0x7e0
#define CSR_PCMR_MACHINE 0x7e1
#define CSR_PCCR_MACHINE 0x7e2
/*fast gpio*/
#define CSR_GPIO_OEN_USER 0x803
#define CSR_GPIO_IN_USER 0x804
#define CSR_GPIO_OUT_USER 0x805
#ifdef __cplusplus
extern "C" {
#endif
static inline int IRAM_ATTR cpu_ll_get_core_id(void)
{
#if SOC_CPU_CORES_NUM == 1
return 0; // No need to check core ID on single core hardware
#else
int cpuid;
cpuid = RV_READ_CSR(mhartid);
return cpuid;
#endif
}
static inline void cpu_ll_enable_cycle_count(void)
{
RV_WRITE_CSR(CSR_PCER_MACHINE,1);
RV_WRITE_CSR(CSR_PCMR_MACHINE,1);
return;
}
static inline uint32_t IRAM_ATTR cpu_ll_get_cycle_count(void)
{
uint32_t result;
result = RV_READ_CSR(CSR_PCCR_MACHINE);
return result;
}
static inline void IRAM_ATTR cpu_ll_set_cycle_count(uint32_t val)
{
RV_WRITE_CSR(CSR_PCCR_MACHINE, val);
}
static inline void* cpu_ll_get_sp(void)
{
void *sp;
asm volatile ("mv %0, sp;" : "=r" (sp));
return sp;
}
static inline void cpu_ll_init_hwloop(void)
{
// Nothing needed here for ESP32-H2
}
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
{
return REG_GET_BIT(ASSIST_DEBUG_CORE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
}
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
{
if (cpu_ll_is_debugger_attached()) {
/* If we want to set breakpoint which when hit transfers control to debugger
* we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
* That `action` value is supported only when `dmode` of `tdata1` is set.
* But `dmode` can be modified by debugger only (from Debug Mode).
*
* So when debugger is connected we use special syscall to ask it to set breakpoint for us.
*/
long args[] = {true, id, (long)pc};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0) {
return;
}
}
/* The code bellow sets breakpoint which will trigger `Breakpoint` exception
* instead transfering control to debugger. */
RV_WRITE_CSR(tselect,id);
RV_SET_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
RV_WRITE_CSR(tdata2,pc);
return;
}
static inline void cpu_ll_clear_breakpoint(int id)
{
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {false, id};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0){
return;
}
}
RV_WRITE_CSR(tselect,id);
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
return;
}
static inline uint32_t cpu_ll_ptr_to_pc(const void* addr)
{
return ((uint32_t) addr);
}
static inline void* cpu_ll_pc_to_ptr(uint32_t pc)
{
return (void*) ((pc & 0x3fffffff) | 0x40000000);
}
static inline void cpu_ll_set_watchpoint(int id,
const void* addr,
size_t size,
bool on_read,
bool on_write)
{
uint32_t addr_napot;
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {true, id, (long)addr, (long)size,
(long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0) {
return;
}
}
RV_WRITE_CSR(tselect,id);
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
RV_SET_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH, 1);
// add 0 in napot encoding
addr_napot = ((uint32_t) addr) | ((size >> 1) - 1);
if (on_read) {
RV_SET_CSR(CSR_TDATA1, TDATA1_LOAD);
}
if (on_write) {
RV_SET_CSR(CSR_TDATA1, TDATA1_STORE);
}
RV_WRITE_CSR(tdata2,addr_napot);
return;
}
static inline void cpu_ll_clear_watchpoint(int id)
{
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {false, id};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0){
return;
}
}
RV_WRITE_CSR(tselect,id);
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
RV_CLEAR_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_MACHINE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_LOAD|TDATA1_STORE|TDATA1_EXECUTE);
return;
}
static inline void cpu_ll_break(void)
{
asm volatile("ebreak\n");
return;
}
static inline void cpu_ll_set_vecbase(const void* vecbase)
{
uintptr_t vecbase_int = (uintptr_t)vecbase;
vecbase_int |= 1; // Set MODE field to treat MTVEC as a vector base address
RV_WRITE_CSR(mtvec, vecbase_int);
}
static inline void cpu_ll_waiti(void)
{
asm volatile ("wfi\n");
}
static inline void cpu_ll_enable_dedic_gpio_output(uint32_t mask)
{
RV_WRITE_CSR(CSR_GPIO_OEN_USER, mask);
}
static inline void cpu_ll_write_dedic_gpio_all(uint32_t value)
{
RV_WRITE_CSR(CSR_GPIO_OUT_USER, value);
}
static inline uint32_t cpu_ll_read_dedic_gpio_in(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_IN_USER);
return value;
}
static inline uint32_t cpu_ll_read_dedic_gpio_out(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_OUT_USER);
return value;
}
static inline void cpu_ll_write_dedic_gpio_mask(uint32_t mask, uint32_t value)
{
RV_SET_CSR(CSR_GPIO_OUT_USER, mask & value);
RV_CLEAR_CSR(CSR_GPIO_OUT_USER, mask & ~(value));
}
static inline void cpu_ll_compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
uint32_t old_value;
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
old_value = *addr;
if (old_value == compare) {
*addr = *set;
}
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
*set = old_value;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,51 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "riscv/csr.h"
/*fast gpio*/
#define CSR_GPIO_OEN_USER 0x803
#define CSR_GPIO_IN_USER 0x804
#define CSR_GPIO_OUT_USER 0x805
#ifdef __cplusplus
extern "C" {
#endif
static inline void dedic_gpio_cpu_ll_enable_output(uint32_t mask)
{
RV_WRITE_CSR(CSR_GPIO_OEN_USER, mask);
}
static inline void dedic_gpio_cpu_ll_write_all(uint32_t value)
{
RV_WRITE_CSR(CSR_GPIO_OUT_USER, value);
}
static inline uint32_t dedic_gpio_cpu_ll_read_in(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_IN_USER);
return value;
}
static inline uint32_t dedic_gpio_cpu_ll_read_out(void)
{
uint32_t value = RV_READ_CSR(CSR_GPIO_OUT_USER);
return value;
}
static inline void dedic_gpio_cpu_ll_write_mask(uint32_t mask, uint32_t value)
{
RV_SET_CSR(CSR_GPIO_OUT_USER, mask & value);
RV_CLEAR_CSR(CSR_GPIO_OUT_USER, mask & ~(value));
}
#ifdef __cplusplus
}
#endif

View File

@ -1,132 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/soc.h"
#include "soc/interrupt_core0_reg.h"
#include "riscv/interrupt.h"
#include "riscv/csr.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief enable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be enabled
*/
static inline void intr_cntrl_ll_enable_interrupts(uint32_t mask)
{
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_enable(mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
/**
* @brief disable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be disabled
*/
static inline void intr_cntrl_ll_disable_interrupts(uint32_t mask)
{
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_disable(mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
/**
* @brief Read the current interrupt mask of the CPU running this code.
*
* @return The current interrupt bitmask.
*/
static inline uint32_t intr_cntrl_ll_read_interrupt_mask(void)
{
return REG_READ(INTERRUPT_CORE0_CPU_INT_ENABLE_REG);
}
/**
* @brief checks if given interrupt number has a valid handler
*
* @param intr interrupt number ranged from 0 to 31
* @param cpu cpu number ranged betweeen 0 to SOC_CPU_CORES_NUM - 1
* @return true for valid handler, false otherwise
*/
static inline bool intr_cntrl_ll_has_handler(uint8_t intr, uint8_t cpu)
{
return intr_handler_get(intr);
}
/**
* @brief sets interrupt handler and optional argument of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
* @param handler handler invoked when an interrupt occurs
* @param arg optional argument to pass to the handler
*/
static inline void intr_cntrl_ll_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
{
intr_handler_set(intr, (void *)handler, arg);
}
/**
* @brief Gets argument passed to handler of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
*
* @return argument used by handler of passed interrupt number
*/
static inline void *intr_cntrl_ll_get_int_handler_arg(uint8_t intr)
{
return intr_handler_get_arg(intr);
}
/**
* @brief Acknowledge an edge-trigger interrupt by clearing its pending flag
*
* @param intr interrupt number ranged from 0 to 31
*/
static inline void intr_cntrl_ll_edge_int_acknowledge(int intr)
{
REG_SET_BIT(INTERRUPT_CORE0_CPU_INT_CLEAR_REG, intr);
}
/**
* @brief Sets the interrupt level int the interrupt controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param level priority between 1 (lowest) to 7 (highest)
*/
static inline void intr_cntrl_ll_set_int_level(int intr, int level)
{
esprv_intc_int_set_priority(intr, level);
}
/**
* @brief Set the type of an interrupt in the controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param type interrupt type as edge or level triggered
*/
static inline void intr_cntrl_ll_set_int_type(int intr, int_type_t type)
{
esprv_intc_int_set_type(BIT(intr), type);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,52 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "soc/soc.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/soc_caps.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void soc_ll_stall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M};
const int rtc_cntl_c1_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_S};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M};
const int rtc_cntl_c0_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_S};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s[core]);
}
static inline void soc_ll_unstall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
}
static inline void soc_ll_reset_core(int core)
{
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, RTC_CNTL_SW_PROCPU_RST_M);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,221 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "esp_attr.h"
#include "soc/soc_caps.h"
#include "xt_instr_macros.h"
#include "xtensa/config/specreg.h"
#include "xtensa/config/extreg.h"
#include "esp_bit_defs.h"
#include "xtensa/config/core.h"
#include "xtensa/xtruntime.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline uint32_t IRAM_ATTR cpu_ll_get_core_id(void)
{
return 0;
}
static inline uint32_t IRAM_ATTR cpu_ll_get_cycle_count(void)
{
uint32_t result;
RSR(CCOUNT, result);
return result;
}
static inline void IRAM_ATTR cpu_ll_set_cycle_count(uint32_t val)
{
WSR(CCOUNT, val);
}
static inline void* cpu_ll_get_sp(void)
{
void *sp;
asm volatile ("mov %0, sp;" : "=r" (sp));
return sp;
}
static inline void cpu_ll_init_hwloop(void)
{
#if XCHAL_ERRATUM_572
uint32_t memctl = XCHAL_CACHE_MEMCTL_DEFAULT;
WSR(MEMCTL, memctl);
#endif // XCHAL_ERRATUM_572
}
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
{
uint32_t en;
// Set the break address register to the appropriate PC
if (id) {
WSR(IBREAKA_1, pc);
} else {
WSR(IBREAKA_0, pc);
}
// Enable the breakpoint using the break enable register
RSR(IBREAKENABLE, en);
en |= BIT(id);
WSR(IBREAKENABLE, en);
}
static inline void cpu_ll_clear_breakpoint(int id)
{
uint32_t en = 0;
uint32_t pc = 0;
// Set the break address register to the appropriate PC
if (id) {
WSR(IBREAKA_1, pc);
} else {
WSR(IBREAKA_0, pc);
}
// Enable the breakpoint using the break enable register
RSR(IBREAKENABLE, en);
en &= ~BIT(id);
WSR(IBREAKENABLE, en);
}
static inline uint32_t cpu_ll_ptr_to_pc(const void* addr)
{
return ((uint32_t) addr);
}
static inline void* cpu_ll_pc_to_ptr(uint32_t pc)
{
return (void*) ((pc & 0x3fffffffU) | 0x40000000U);
}
static inline void cpu_ll_set_watchpoint(int id,
const void* addr,
size_t size,
bool on_read,
bool on_write)
{
uint32_t dbreakc = 0x3F;
//We support watching 2^n byte values, from 1 to 64. Calculate the mask for that.
for (int x = 0; x < 7; x++) {
if (size == (size_t)(1U << x)) {
break;
}
dbreakc <<= 1;
}
dbreakc = (dbreakc & 0x3F);
if (on_read) {
dbreakc |= BIT(30);
}
if (on_write) {
dbreakc |= BIT(31);
}
// Write the break address register and the size to control
// register.
if (id) {
WSR(DBREAKA_1, (uint32_t) addr);
WSR(DBREAKC_1, dbreakc);
} else {
WSR(DBREAKA_0, (uint32_t) addr);
WSR(DBREAKC_0, dbreakc);
}
}
static inline void cpu_ll_clear_watchpoint(int id)
{
// Clear both break address register and control register
if (id) {
WSR(DBREAKA_1, 0);
WSR(DBREAKC_1, 0);
} else {
WSR(DBREAKA_0, 0);
WSR(DBREAKC_0, 0);
}
}
static inline bool cpu_ll_is_debugger_attached(void)
{
uint32_t dcr = 0;
uint32_t reg = DSRSET;
RER(reg, dcr);
return (dcr & 0x1);
}
static inline void cpu_ll_break(void)
{
__asm__ ("break 1,15");
}
static inline void cpu_ll_set_vecbase(const void* vecbase)
{
asm volatile ("wsr %0, vecbase" :: "r" (vecbase));
}
static inline uint32_t cpu_ll_read_dedic_gpio_in(void)
{
uint32_t value = 0;
asm volatile("get_gpio_in %0" : "=r"(value) : :);
return value;
}
static inline uint32_t cpu_ll_read_dedic_gpio_out(void)
{
uint32_t value = 0;
asm volatile("rur.gpio_out %0" : "=r"(value) : :);
return value;
}
static inline void cpu_ll_write_dedic_gpio_all(uint32_t value)
{
asm volatile("wur.gpio_out %0"::"r"(value):);
}
static inline void cpu_ll_write_dedic_gpio_mask(uint32_t mask, uint32_t value)
{
asm volatile("wr_mask_gpio_out %0, %1" : : "r"(value), "r"(mask):);
}
static inline void cpu_ll_waiti(void)
{
asm volatile ("waiti 0\n");
}
static inline void cpu_ll_compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
uint32_t old_value;
// No S32C1I, so do this by disabling and re-enabling interrupts (slower)
uint32_t intlevel;
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
: "=r"(intlevel));
old_value = *addr;
if (old_value == compare) {
*addr = *set;
}
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intlevel));
*set = old_value;
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,41 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
static inline uint32_t dedic_gpio_cpu_ll_read_in(void)
{
uint32_t value = 0;
asm volatile("get_gpio_in %0" : "=r"(value) : :);
return value;
}
static inline uint32_t dedic_gpio_cpu_ll_read_out(void)
{
uint32_t value = 0;
asm volatile("rur.gpio_out %0" : "=r"(value) : :);
return value;
}
static inline void dedic_gpio_cpu_ll_write_all(uint32_t value)
{
asm volatile("wur.gpio_out %0"::"r"(value):);
}
static inline void dedic_gpio_cpu_ll_write_mask(uint32_t mask, uint32_t value)
{
asm volatile("wr_mask_gpio_out %0, %1" : : "r"(value), "r"(mask):);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,107 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/soc.h"
#include "xtensa/xtensa_api.h"
#include "xtensa/config/specreg.h"
#include "xt_instr_macros.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief enable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be enabled
*/
static inline void intr_cntrl_ll_enable_interrupts(uint32_t mask)
{
xt_ints_on(mask);
}
/**
* @brief disable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be disabled
*/
static inline void intr_cntrl_ll_disable_interrupts(uint32_t mask)
{
xt_ints_off(mask);
}
/**
* @brief Read the current interrupt mask of the CPU running this code.
*
* @return The current interrupt bitmask.
*/
static inline uint32_t intr_cntrl_ll_read_interrupt_mask(void)
{
uint32_t int_mask;
RSR(INTENABLE, int_mask);
return int_mask;
}
/**
* @brief checks if given interrupt number has a valid handler
*
* @param intr interrupt number ranged from 0 to 31
* @param cpu cpu number ranged betweeen 0 to SOC_CPU_CORES_NUM - 1
* @return true for valid handler, false otherwise
*/
static inline bool intr_cntrl_ll_has_handler(uint8_t intr, uint8_t cpu)
{
return xt_int_has_handler(intr, cpu);
}
/**
* @brief sets interrupt handler and optional argument of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
* @param handler handler invoked when an interrupt occurs
* @param arg optional argument to pass to the handler
*/
static inline void intr_cntrl_ll_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
{
xt_set_interrupt_handler(intr, (xt_handler)handler, arg);
}
/**
* @brief Gets argument passed to handler of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
*
* @return argument used by handler of passed interrupt number
*/
static inline void *intr_cntrl_ll_get_int_handler_arg(uint8_t intr)
{
return xt_get_interrupt_handler_arg(intr);
}
/**
* @brief Acknowledge an edge-trigger interrupt by clearing its pending flag
*
* @param intr interrupt number ranged from 0 to 31
*/
static inline void intr_cntrl_ll_edge_int_acknowledge (int intr)
{
xthal_set_intclear(1 << intr);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,32 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "soc/soc.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/soc_caps.h"
#include "soc/rtc.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void soc_ll_reset_core(int core)
{
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, RTC_CNTL_SW_PROCPU_RST_M);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,75 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sdkconfig.h"
#include "hal/interrupt_controller_hal.h"
#include "hal/interrupt_controller_ll.h"
#include "soc/soc_caps.h"
#include "soc/soc.h"
//We should mark the interrupt for the timer used by FreeRTOS as reserved. The specific timer
//is selectable using menuconfig; we use these cpp bits to convert that into something we can use in
//the table below.
#if CONFIG_FREERTOS_CORETIMER_0
#define INT6RES INTDESC_RESVD
#else
#define INT6RES INTDESC_SPECIAL
#endif
#if CONFIG_FREERTOS_CORETIMER_1
#define INT15RES INTDESC_RESVD
#else
#define INT15RES INTDESC_SPECIAL
#endif
//This is basically a software-readable version of the interrupt usage table in include/soc/soc.h
const static int_desc_t interrupt_descriptor_table [32]={
{ 1, INTTP_LEVEL, {INTDESC_RESVD} }, //0
{ 1, INTTP_LEVEL, {INTDESC_RESVD} }, //1
{ 1, INTTP_LEVEL, {INTDESC_NORMAL} }, //2
{ 1, INTTP_LEVEL, {INTDESC_NORMAL} }, //3
{ 1, INTTP_LEVEL, {INTDESC_RESVD} }, //4
{ 1, INTTP_LEVEL, {INTDESC_RESVD} }, //5
{ 1, INTTP_NA, {INT6RES} }, //6
{ 1, INTTP_NA, {INTDESC_SPECIAL}}, //7
{ 1, INTTP_LEVEL, {INTDESC_RESVD } }, //8
{ 1, INTTP_LEVEL, {INTDESC_NORMAL } }, //9
{ 1, INTTP_EDGE , {INTDESC_NORMAL } }, //10
{ 3, INTTP_NA, {INTDESC_SPECIAL }}, //11
{ 1, INTTP_LEVEL, {INTDESC_NORMAL } }, //12
{ 1, INTTP_LEVEL, {INTDESC_NORMAL} }, //13
{ 7, INTTP_LEVEL, {INTDESC_RESVD} }, //14, NMI
{ 3, INTTP_NA, {INT15RES} }, //15
{ 5, INTTP_NA, {INTDESC_SPECIAL } }, //16
{ 1, INTTP_LEVEL, {INTDESC_NORMAL } }, //17
{ 1, INTTP_LEVEL, {INTDESC_NORMAL } }, //18
{ 2, INTTP_LEVEL, {INTDESC_NORMAL } }, //19
{ 2, INTTP_LEVEL, {INTDESC_NORMAL } }, //20
{ 2, INTTP_LEVEL, {INTDESC_NORMAL } }, //21
{ 3, INTTP_EDGE, {INTDESC_RESVD } }, //22
{ 3, INTTP_LEVEL, {INTDESC_NORMAL } }, //23
{ 4, INTTP_LEVEL, {INTDESC_RESVD } }, //24
{ 4, INTTP_LEVEL, {INTDESC_RESVD } }, //25
{ 5, INTTP_LEVEL, {INTDESC_NORMAL } }, //26
{ 3, INTTP_LEVEL, {INTDESC_RESVD } }, //27
{ 4, INTTP_EDGE, {INTDESC_NORMAL } }, //28
{ 3, INTTP_NA, {INTDESC_SPECIAL }}, //29
{ 4, INTTP_EDGE, {INTDESC_RESVD } }, //30
{ 5, INTTP_LEVEL, {INTDESC_RESVD } }, //31
};
const int_desc_t *interrupt_controller_hal_desc_table(void)
{
return interrupt_descriptor_table;
}

View File

@ -1,213 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "xt_instr_macros.h"
#include "xtensa/config/specreg.h"
#include "xtensa/config/extreg.h"
#include "esp_bit_defs.h"
#include "esp_attr.h"
#include "xtensa/config/core.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline uint32_t IRAM_ATTR cpu_ll_get_core_id(void)
{
uint32_t id;
asm volatile (
"rsr.prid %0\n"
"extui %0,%0,13,1"
:"=r"(id));
return id;
}
static inline uint32_t IRAM_ATTR cpu_ll_get_cycle_count(void)
{
uint32_t result;
RSR(CCOUNT, result);
return result;
}
static inline void IRAM_ATTR cpu_ll_set_cycle_count(uint32_t val)
{
WSR(CCOUNT, val);
}
static inline void *cpu_ll_get_sp(void)
{
void *sp;
asm volatile ("mov %0, sp;" : "=r" (sp));
return sp;
}
static inline void cpu_ll_init_hwloop(void)
{
#if XCHAL_ERRATUM_572
uint32_t memctl = XCHAL_CACHE_MEMCTL_DEFAULT;
WSR(MEMCTL, memctl);
#endif // XCHAL_ERRATUM_572
}
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
{
uint32_t en;
// Set the break address register to the appropriate PC
if (id) {
WSR(IBREAKA_1, pc);
} else {
WSR(IBREAKA_0, pc);
}
// Enable the breakpoint using the break enable register
RSR(IBREAKENABLE, en);
en |= BIT(id);
WSR(IBREAKENABLE, en);
}
static inline void cpu_ll_clear_breakpoint(int id)
{
uint32_t en = 0;
uint32_t pc = 0;
// Set the break address register to the appropriate PC
if (id) {
WSR(IBREAKA_1, pc);
} else {
WSR(IBREAKA_0, pc);
}
// Enable the breakpoint using the break enable register
RSR(IBREAKENABLE, en);
en &= ~BIT(id);
WSR(IBREAKENABLE, en);
}
static inline uint32_t cpu_ll_ptr_to_pc(const void *addr)
{
return ((uint32_t) addr);
}
static inline void *cpu_ll_pc_to_ptr(uint32_t pc)
{
return (void *) ((pc & 0x3fffffff) | 0x40000000);
}
static inline void cpu_ll_set_watchpoint(int id,
const void *addr,
size_t size,
bool on_read,
bool on_write)
{
uint32_t dbreakc = 0x3F;
//We support watching 2^n byte values, from 1 to 64. Calculate the mask for that.
for (int x = 0; x < 7; x++) {
if (size == (size_t)(1U << x)) {
break;
}
dbreakc <<= 1;
}
dbreakc = (dbreakc & 0x3F);
if (on_read) {
dbreakc |= BIT(30);
}
if (on_write) {
dbreakc |= BIT(31);
}
// Write the break address register and the size to control
// register.
if (id) {
WSR(DBREAKA_1, (uint32_t) addr);
WSR(DBREAKC_1, dbreakc);
} else {
WSR(DBREAKA_0, (uint32_t) addr);
WSR(DBREAKC_0, dbreakc);
}
}
static inline void cpu_ll_clear_watchpoint(int id)
{
// Clear both break address register and control register
if (id) {
WSR(DBREAKA_1, 0);
WSR(DBREAKC_1, 0);
} else {
WSR(DBREAKA_0, 0);
WSR(DBREAKC_0, 0);
}
}
static inline bool cpu_ll_is_debugger_attached(void)
{
uint32_t dcr = 0;
uint32_t reg = DSRSET;
RER(reg, dcr);
return (dcr & 0x1);
}
static inline void cpu_ll_break(void)
{
__asm__ ("break 1,15");
}
static inline void cpu_ll_set_vecbase(const void *vecbase)
{
asm volatile ("wsr %0, vecbase" :: "r" (vecbase));
}
static inline void cpu_ll_waiti(void)
{
asm volatile ("waiti 0\n");
}
static inline uint32_t cpu_ll_read_dedic_gpio_in(void)
{
uint32_t value = 0;
asm volatile("ee.get_gpio_in %0" : "=r"(value) : :);
return value;
}
static inline uint32_t cpu_ll_read_dedic_gpio_out(void)
{
uint32_t value = 0;
asm volatile("rur.gpio_out %0" : "=r"(value) : :);
return value;
}
static inline void cpu_ll_write_dedic_gpio_all(uint32_t value)
{
asm volatile("wur.gpio_out %0"::"r"(value):);
}
static inline void cpu_ll_write_dedic_gpio_mask(uint32_t mask, uint32_t value)
{
asm volatile("ee.wr_mask_gpio_out %0, %1" : : "r"(value), "r"(mask):);
}
static inline void cpu_ll_compare_and_set_native(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
__asm__ __volatile__ (
"WSR %2,SCOMPARE1 \n"
"S32C1I %0, %1, 0 \n"
:"=r"(*set)
:"r"(addr), "r"(compare), "0"(*set)
);
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,50 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "xt_instr_macros.h"
#include "xtensa/config/specreg.h"
#include "xtensa/config/extreg.h"
#include "esp_bit_defs.h"
#include "esp_attr.h"
#include "xtensa/config/core.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline uint32_t dedic_gpio_cpu_ll_read_in(void)
{
uint32_t value = 0;
asm volatile("ee.get_gpio_in %0" : "=r"(value) : :);
return value;
}
static inline uint32_t dedic_gpio_cpu_ll_read_out(void)
{
uint32_t value = 0;
asm volatile("rur.gpio_out %0" : "=r"(value) : :);
return value;
}
static inline void dedic_gpio_cpu_ll_write_all(uint32_t value)
{
asm volatile("wur.gpio_out %0"::"r"(value):);
}
static inline void dedic_gpio_cpu_ll_write_mask(uint32_t mask, uint32_t value)
{
asm volatile("ee.wr_mask_gpio_out %0, %1" : : "r"(value), "r"(mask):);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,107 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/soc.h"
#include "xtensa/xtensa_api.h"
#include "xtensa/config/specreg.h"
#include "xt_instr_macros.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief enable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be enabled
*/
static inline void intr_cntrl_ll_enable_interrupts(uint32_t mask)
{
xt_ints_on(mask);
}
/**
* @brief disable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be disabled
*/
static inline void intr_cntrl_ll_disable_interrupts(uint32_t mask)
{
xt_ints_off(mask);
}
/**
* @brief Read the current interrupt mask of the CPU running this code.
*
* @return The current interrupt bitmask.
*/
static inline uint32_t intr_cntrl_ll_read_interrupt_mask(void)
{
uint32_t int_mask;
RSR(INTENABLE, int_mask);
return int_mask;
}
/**
* @brief checks if given interrupt number has a valid handler
*
* @param intr interrupt number ranged from 0 to 31
* @param cpu cpu number ranged betweeen 0 to SOC_CPU_CORES_NUM - 1
* @return true for valid handler, false otherwise
*/
static inline bool intr_cntrl_ll_has_handler(uint8_t intr, uint8_t cpu)
{
return xt_int_has_handler(intr, cpu);
}
/**
* @brief sets interrupt handler and optional argument of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
* @param handler handler invoked when an interrupt occurs
* @param arg optional argument to pass to the handler
*/
static inline void intr_cntrl_ll_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
{
xt_set_interrupt_handler(intr, (xt_handler)handler, arg);
}
/**
* @brief Gets argument passed to handler of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
*
* @return argument used by handler of passed interrupt number
*/
static inline void *intr_cntrl_ll_get_int_handler_arg(uint8_t intr)
{
return xt_get_interrupt_handler_arg(intr);
}
/**
* @brief Acknowledge an edge-trigger interrupt by clearing its pending flag
*
* @param intr interrupt number ranged from 0 to 31
*/
static inline void intr_cntrl_ll_edge_int_acknowledge (int intr)
{
xthal_set_intclear(1 << intr);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,54 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "soc/soc.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/soc_caps.h"
#include "soc/rtc.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void soc_ll_stall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M, RTC_CNTL_SW_STALL_APPCPU_C1_M};
const int rtc_cntl_c1_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_S, RTC_CNTL_SW_STALL_APPCPU_C1_S};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M, RTC_CNTL_SW_STALL_APPCPU_C0_M};
const int rtc_cntl_c0_s[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_S, RTC_CNTL_SW_STALL_APPCPU_C0_S};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s[core]);
}
static inline void soc_ll_unstall_core(int core)
{
const int rtc_cntl_c1_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C1_M, RTC_CNTL_SW_STALL_APPCPU_C1_M};
const int rtc_cntl_c0_m[SOC_CPU_CORES_NUM] = {RTC_CNTL_SW_STALL_PROCPU_C0_M, RTC_CNTL_SW_STALL_APPCPU_C0_M};
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m[core]);
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m[core]);
}
static inline void soc_ll_reset_core(int core)
{
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG,
core == 0 ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,75 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sdkconfig.h"
#include "hal/interrupt_controller_hal.h"
#include "hal/interrupt_controller_ll.h"
#include "soc/soc_caps.h"
#include "soc/soc.h"
//We should mark the interrupt for the timer used by FreeRTOS as reserved. The specific timer
//is selectable using menuconfig; we use these cpp bits to convert that into something we can use in
//the table below.
#if CONFIG_FREERTOS_CORETIMER_0
#define INT6RES INTDESC_RESVD
#else
#define INT6RES INTDESC_SPECIAL
#endif
#if CONFIG_FREERTOS_CORETIMER_1
#define INT15RES INTDESC_RESVD
#else
#define INT15RES INTDESC_SPECIAL
#endif
//This is basically a software-readable version of the interrupt usage table in include/soc/soc.h
const static int_desc_t interrupt_descriptor_table [32]={
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //0
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //1
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //2
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //3
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_NORMAL} }, //4
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //5
{ 1, INTTP_NA, {INT6RES, INT6RES } }, //6
{ 1, INTTP_NA, {INTDESC_SPECIAL,INTDESC_SPECIAL}}, //7
{ 1, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //8
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //9
{ 1, INTTP_EDGE , {INTDESC_NORMAL, INTDESC_NORMAL} }, //10
{ 3, INTTP_NA, {INTDESC_SPECIAL,INTDESC_SPECIAL}}, //11
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //12
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //13
{ 7, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //14, NMI
{ 3, INTTP_NA, {INT15RES, INT15RES } }, //15
{ 5, INTTP_NA, {INTDESC_SPECIAL,INTDESC_SPECIAL} }, //16
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //17
{ 1, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //18
{ 2, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //19
{ 2, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //20
{ 2, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //21
{ 3, INTTP_EDGE, {INTDESC_RESVD, INTDESC_NORMAL} }, //22
{ 3, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_NORMAL} }, //23
{ 4, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_NORMAL} }, //24
{ 4, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //25
{ 5, INTTP_LEVEL, {INTDESC_NORMAL, INTDESC_RESVD } }, //26
{ 3, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //27
{ 4, INTTP_EDGE, {INTDESC_NORMAL, INTDESC_NORMAL} }, //28
{ 3, INTTP_NA, {INTDESC_SPECIAL,INTDESC_SPECIAL}}, //29
{ 4, INTTP_EDGE, {INTDESC_RESVD, INTDESC_RESVD } }, //30
{ 5, INTTP_LEVEL, {INTDESC_RESVD, INTDESC_RESVD } }, //31
};
const int_desc_t *interrupt_controller_hal_desc_table(void)
{
return interrupt_descriptor_table;
}

View File

@ -1,21 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
typedef enum {
WATCHPOINT_TRIGGER_ON_RO, // on read
WATCHPOINT_TRIGGER_ON_WO, // on write
WATCHPOINT_TRIGGER_ON_RW // on either read or write
} watchpoint_trigger_t;

View File

@ -1,196 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdbool.h>
#include "hal/interrupt_controller_types.h"
#include "hal/interrupt_controller_ll.h"
#include "soc/soc_caps.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef SOC_CPU_HAS_FLEXIBLE_INTC
/**
* @brief Gets target platform interrupt descriptor table
*
* @return Address of interrupt descriptor table
*/
__attribute__((pure)) const int_desc_t *interrupt_controller_hal_desc_table(void);
#endif
/**
* @brief Gets the interrupt type given an interrupt number.
*
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt type
*/
__attribute__((pure)) int_type_t interrupt_controller_hal_desc_type(int interrupt_number);
/**
* @brief Gets the interrupt level given an interrupt number.
*
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt level bitmask
*/
__attribute__((pure)) int interrupt_controller_hal_desc_level(int interrupt_number);
/**
* @brief Gets the cpu flags given the interrupt number and target cpu.
*
* @param interrupt_number Interrupt number 0 to 31
* @param cpu_number CPU number between 0 and SOC_CPU_CORES_NUM - 1
* @return flags for that interrupt number
*/
__attribute__((pure)) int_desc_flag_t interrupt_controller_hal_desc_flags(int interrupt_number, int cpu_number);
/**
* @brief Gets the interrupt type given an interrupt number.
*
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt type
*/
static inline int_type_t interrupt_controller_hal_get_type(int interrupt_number)
{
return interrupt_controller_hal_desc_type(interrupt_number);
}
/**
* @brief Gets the interrupt level given an interrupt number.
*
* @param interrupt_number Interrupt number 0 to 31
* @return interrupt level bitmask
*/
static inline int interrupt_controller_hal_get_level(int interrupt_number)
{
return interrupt_controller_hal_desc_level(interrupt_number);
}
#ifdef SOC_CPU_HAS_FLEXIBLE_INTC
/**
* @brief Set the type of an interrupt in the controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param type interrupt type as edge or level triggered
*/
static inline void interrupt_controller_hal_set_int_type(int intr, int_type_t type)
{
intr_cntrl_ll_set_int_type(intr, type);
}
/**
* @brief Sets the interrupt level int the interrupt controller.
*
* @param interrupt_number Interrupt number 0 to 31
* @param level priority between 1 (lowest) to 7 (highest)
*/
static inline void interrupt_controller_hal_set_int_level(int intr, int level)
{
intr_cntrl_ll_set_int_level(intr, level);
}
#endif
/**
* @brief Gets the cpu flags given the interrupt number and target cpu.
*
* @param interrupt_number Interrupt number 0 to 31
* @param cpu_number CPU number between 0 and SOC_CPU_CORES_NUM - 1
* @return flags for that interrupt number
*/
static inline uint32_t interrupt_controller_hal_get_cpu_desc_flags(int interrupt_number, int cpu_number)
{
return interrupt_controller_hal_desc_flags(interrupt_number, cpu_number);
}
/**
* @brief enable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be enabled
*/
static inline void interrupt_controller_hal_enable_interrupts(uint32_t mask)
{
intr_cntrl_ll_enable_interrupts(mask);
}
/**
* @brief disable interrupts specified by the mask
*
* @param mask bitmask of interrupts that needs to be disabled
*/
static inline void interrupt_controller_hal_disable_interrupts(uint32_t mask)
{
intr_cntrl_ll_disable_interrupts(mask);
}
/**
* @brief Read the current interrupt mask.
*
* @return The bitmask of current interrupts
*/
static inline uint32_t interrupt_controller_hal_read_interrupt_mask(void)
{
return intr_cntrl_ll_read_interrupt_mask();
}
/**
* @brief checks if given interrupt number has a valid handler
*
* @param intr interrupt number ranged from 0 to 31
* @param cpu cpu number ranged betweeen 0 to SOC_CPU_CORES_NUM - 1
* @return true for valid handler, false otherwise
*/
static inline bool interrupt_controller_hal_has_handler(int intr, int cpu)
{
return intr_cntrl_ll_has_handler(intr, cpu);
}
/**
* @brief sets interrupt handler and optional argument of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
* @param handler handler invoked when an interrupt occurs
* @param arg optional argument to pass to the handler
*/
static inline void interrupt_controller_hal_set_int_handler(uint8_t intr, interrupt_handler_t handler, void *arg)
{
intr_cntrl_ll_set_int_handler(intr, handler, arg);
}
/**
* @brief Gets argument passed to handler of a given interrupt number
*
* @param intr interrupt number ranged from 0 to 31
*
* @return argument used by handler of passed interrupt number
*/
static inline void * interrupt_controller_hal_get_int_handler_arg(uint8_t intr)
{
return intr_cntrl_ll_get_int_handler_arg(intr);
}
/**
* @brief Acknowledge an edge-trigger interrupt by clearing its pending flag
*
* @param intr interrupt number ranged from 0 to 31
*/
static inline void interrupt_controller_hal_edge_int_acknowledge(int intr)
{
intr_cntrl_ll_edge_int_acknowledge(intr);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,46 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "soc/soc_caps.h"
#include "soc/soc.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
INTDESC_NORMAL=0,
INTDESC_RESVD,
INTDESC_SPECIAL,
} int_desc_flag_t;
typedef enum {
INTTP_LEVEL=0,
INTTP_EDGE,
INTTP_NA,
} int_type_t;
typedef struct {
int level;
int_type_t type;
int_desc_flag_t cpuflags[SOC_CPU_CORES_NUM];
} int_desc_t;
typedef void (*interrupt_handler_t)(void *arg);
#ifdef __cplusplus
}
#endif

View File

@ -1,75 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "esp_err.h"
#include "soc/soc_caps.h"
#include "hal/cpu_hal.h"
#include "hal/soc_ll.h"
#ifdef __cplusplus
extern "C" {
#endif
#if SOC_CPU_CORES_NUM > 1
// Utility functions for multicore targets
#define __SOC_HAL_PERFORM_ON_OTHER_CORES(action) { \
for (uint32_t i = 0, cur = cpu_hal_get_core_id(); i < SOC_CPU_CORES_NUM; i++) { \
if (i != cur) { \
action(i); \
} \
} \
}
#define SOC_HAL_STALL_OTHER_CORES() __SOC_HAL_PERFORM_ON_OTHER_CORES(soc_hal_stall_core);
#define SOC_HAL_UNSTALL_OTHER_CORES() __SOC_HAL_PERFORM_ON_OTHER_CORES(soc_hal_unstall_core);
#define SOC_HAL_RESET_OTHER_CORES() __SOC_HAL_PERFORM_ON_OTHER_CORES(soc_hal_reset_core);
/**
* Stall the specified CPU core.
*
* @note Has no effect if the core is already stalled - does not return an
* ESP_ERR_INVALID_STATE.
*
* @param core core to stall [0..SOC_CPU_CORES_NUM - 1]
*/
void soc_hal_stall_core(int core);
/**
* Unstall the specified CPU core.
*
* @note Has no effect if the core is already unstalled - does not return an
* ESP_ERR_INVALID_STATE.
*
* @param core core to unstall [0..SOC_CPU_CORES_NUM - 1]
*/
void soc_hal_unstall_core(int core);
#endif // SOC_CPU_CORES_NUM > 1
/**
* Reset the specified core.
*
* @param core core to reset [0..SOC_CPU_CORES_NUM - 1]
*/
#define soc_hal_reset_core(core) soc_ll_reset_core((core))
#ifdef __cplusplus
}
#endif

View File

@ -1,73 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "hal/interrupt_controller_hal.h"
#include "soc/soc_caps.h"
#if __riscv
#include "riscv/instruction_decode.h"
static bool is_interrupt_number_reserved(int interrupt_number)
{
// Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt"
// [TODO: IDF-2465]
const uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8);
if (reserved & BIT(interrupt_number)) {
return true;
}
extern int _vector_table;
extern int _interrupt_handler;
const intptr_t pc = (intptr_t)(&_vector_table + interrupt_number);
/* JAL instructions are relative to the PC there are executed from. */
const intptr_t destination = pc + riscv_decode_offset_from_jal_instruction(pc);
return destination != (intptr_t)&_interrupt_handler;
}
#endif
int_type_t interrupt_controller_hal_desc_type(int interrupt_number)
{
#ifndef SOC_CPU_HAS_FLEXIBLE_INTC
const int_desc_t *int_desc = interrupt_controller_hal_desc_table();
return (int_desc[interrupt_number].type);
#else
return (INTTP_NA);
#endif
}
int interrupt_controller_hal_desc_level(int interrupt_number)
{
#ifndef SOC_CPU_HAS_FLEXIBLE_INTC
const int_desc_t *int_desc = interrupt_controller_hal_desc_table();
return (int_desc[interrupt_number].level);
#else
return 1;
#endif
}
int_desc_flag_t interrupt_controller_hal_desc_flags(int interrupt_number, int cpu_number)
{
#ifndef SOC_CPU_HAS_FLEXIBLE_INTC
const int_desc_t *int_desc = interrupt_controller_hal_desc_table();
return (int_desc[interrupt_number].cpuflags[cpu_number]);
#else
#if __riscv
return is_interrupt_number_reserved(interrupt_number) ? INTDESC_RESVD : INTDESC_NORMAL;
#else
return INTDESC_NORMAL;
#endif
#endif
}

View File

@ -14,8 +14,6 @@ entries:
spi_flash_encrypt_hal_iram (noflash)
ledc_hal_iram (noflash)
i2c_hal_iram (noflash)
cpu_hal (noflash)
soc_hal (noflash)
if HAL_WDT_USE_ROM_IMPL = n:
wdt_hal_iram (noflash)
if SOC_SYSTIMER_SUPPORTED = y && HAL_SYSTIMER_USE_ROM_IMPL = n:

View File

@ -1,36 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdint.h>
#include <stdlib.h>
#include "esp_err.h"
#include "hal/soc_hal.h"
#include "hal/soc_ll.h"
#include "hal/rtc_cntl_ll.h"
#include "soc/soc_caps.h"
#include "soc/rtc.h"
#if SOC_CPU_CORES_NUM > 1
void soc_hal_stall_core(int core)
{
soc_ll_stall_core(core);
}
void soc_hal_unstall_core(int core)
{
soc_ll_unstall_core(core);
}
#endif // SOC_CPU_CORES_NUM > 1

View File

@ -1,16 +1,8 @@
// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
@ -77,24 +69,46 @@ void esprv_intc_int_enable(uint32_t unmask);
void esprv_intc_int_disable(uint32_t mask);
/**
* @brief Set interrupt type, level or edge
*
* @param int intr_num, interrupt number
*
* @param enum intr_type type, interrupt type, the level interrupt
can be cleared automatically once the interrupt source cleared, the edge interrupt should be clear by software after handled
*
* return none
*/
* @brief Set interrupt type
*
* Set the type of a particular interrupt (level or edge).
* - Level interrupts are cleared automatically once their interrupt source has
* been cleared
* - Edge interrupts must be cleared by software when they are handled.
*
* @param intr_num Interrupt number
* @param type Interrupt type
*/
void esprv_intc_int_set_type(int intr_num, enum intr_type type);
/**
* @brief Get the current type of an interrupt
*
* Get the current type of a particular interrupt (level or edge). An interrupt's
* type can be set by calling esprv_intc_int_set_type().
*
* @param intr_num Interrupt number
* @return Interrupt type
*/
enum intr_type esprv_intc_int_get_type(int intr_num);
/**
* Set interrupt priority in the interrupt controller
* @param rv_int_num CPU interrupt number
* @param priority Interrupt priority level, 1 to 7
* @param rv_int_num CPU interrupt number
* @param priority Interrupt priority level, 1 to 7
*/
void esprv_intc_int_set_priority(int rv_int_num, int priority);
/**
* @brief Get the current priority of an interrupt
*
* Get the current priority of an interrupt.
*
* @param rv_int_num CPU interrupt number
* @return Interrupt priority level, 1 to 7
*/
int esprv_intc_int_get_priority(int rv_int_num);
/**
* Set interrupt priority threshold.
* Interrupts with priority levels lower than the threshold are masked.

View File

@ -0,0 +1,205 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "soc/assist_debug_reg.h"
#include "soc/interrupt_core0_reg.h"
#include "esp_attr.h"
#include "riscv/csr.h"
#include "riscv/interrupt.h"
#ifdef __cplusplus
extern "C" {
#endif
/*performance counter*/
#define CSR_PCER_MACHINE 0x7e0
#define CSR_PCMR_MACHINE 0x7e1
#define CSR_PCCR_MACHINE 0x7e2
/* --------------------------------------------------- CPU Control -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR void __attribute__((always_inline)) rv_utils_wait_for_intr(void)
{
asm volatile ("wfi\n");
}
/* -------------------------------------------------- CPU Registers ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t rv_utils_get_core_id(void)
{
#if SOC_CPU_CORES_NUM == 1
return 0; // No need to check core ID on single core hardware
#else
uint32_t cpuid;
cpuid = RV_READ_CSR(mhartid);
return cpuid;
#endif
}
FORCE_INLINE_ATTR void *rv_utils_get_sp(void)
{
void *sp;
asm volatile ("mv %0, sp;" : "=r" (sp));
return sp;
}
FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_get_cycle_count(void)
{
return RV_READ_CSR(CSR_PCCR_MACHINE);
}
FORCE_INLINE_ATTR void __attribute__((always_inline)) rv_utils_set_cycle_count(uint32_t ccount)
{
RV_WRITE_CSR(CSR_PCCR_MACHINE, ccount);
}
/* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// ---------------- Interrupt Descriptors ------------------
// --------------- Interrupt Configuration -----------------
FORCE_INLINE_ATTR void rv_utils_set_mtvec(uint32_t mtvec_val)
{
mtvec_val |= 1; // Set MODE field to treat MTVEC as a vector base address
RV_WRITE_CSR(mtvec, mtvec_val);
}
// ------------------ Interrupt Control --------------------
FORCE_INLINE_ATTR void rv_utils_intr_enable(uint32_t intr_mask)
{
//Disable all interrupts to make updating of the interrupt mask atomic.
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_enable(intr_mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
FORCE_INLINE_ATTR void rv_utils_intr_disable(uint32_t intr_mask)
{
//Disable all interrupts to make updating of the interrupt mask atomic.
unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
esprv_intc_int_disable(intr_mask);
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
}
FORCE_INLINE_ATTR uint32_t rv_utils_intr_get_enabled_mask(void)
{
return REG_READ(INTERRUPT_CORE0_CPU_INT_ENABLE_REG);
}
FORCE_INLINE_ATTR void rv_utils_intr_edge_ack(int intr_num)
{
REG_SET_BIT(INTERRUPT_CORE0_CPU_INT_CLEAR_REG, intr_num);
}
/* -------------------------------------------------- Memory Ports -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------- Debugging ------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// --------------- Breakpoints/Watchpoints -----------------
FORCE_INLINE_ATTR void rv_utils_set_breakpoint(int bp_num, uint32_t bp_addr)
{
/* The code bellow sets breakpoint which will trigger `Breakpoint` exception
* instead transfering control to debugger. */
RV_WRITE_CSR(tselect, bp_num);
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER | TDATA1_MACHINE | TDATA1_EXECUTE);
RV_WRITE_CSR(tdata2, bp_addr);
}
FORCE_INLINE_ATTR void rv_utils_clear_breakpoint(int bp_num)
{
RV_WRITE_CSR(tselect, bp_num);
RV_CLEAR_CSR(CSR_TCONTROL, TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER | TDATA1_MACHINE | TDATA1_EXECUTE);
}
FORCE_INLINE_ATTR void rv_utils_set_watchpoint(int wp_num,
uint32_t wp_addr,
size_t size,
bool on_read,
bool on_write)
{
RV_WRITE_CSR(tselect, wp_num);
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER | TDATA1_MACHINE);
RV_SET_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH, 1);
// add 0 in napot encoding
uint32_t addr_napot;
addr_napot = ((uint32_t) wp_addr) | ((size >> 1) - 1);
if (on_read) {
RV_SET_CSR(CSR_TDATA1, TDATA1_LOAD);
}
if (on_write) {
RV_SET_CSR(CSR_TDATA1, TDATA1_STORE);
}
RV_WRITE_CSR(tdata2, addr_napot);
}
FORCE_INLINE_ATTR void rv_utils_clear_watchpoint(int wp_num)
{
RV_WRITE_CSR(tselect, wp_num);
RV_CLEAR_CSR(CSR_TCONTROL, TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER | TDATA1_MACHINE);
RV_CLEAR_CSR_FIELD(CSR_TDATA1, (long unsigned int) TDATA1_MATCH);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_MACHINE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_LOAD | TDATA1_STORE | TDATA1_EXECUTE);
}
// ---------------------- Debugger -------------------------
FORCE_INLINE_ATTR bool rv_utils_dbgr_is_attached(void)
{
return REG_GET_BIT(ASSIST_DEBUG_CORE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
}
FORCE_INLINE_ATTR void rv_utils_dbgr_break(void)
{
asm volatile("ebreak\n");
}
/* ------------------------------------------------------ Misc ---------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR bool rv_utils_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
{
// Single core target has no atomic CAS instruction. We can achieve atomicity by disabling interrupts
unsigned old_mstatus;
old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
// Compare and set
uint32_t old_value;
old_value = *addr;
if (old_value == compare_value) {
*addr = new_value;
}
// Restore interrupts
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
return (old_value == compare_value);
}
#ifdef __cplusplus
}
#endif

View File

@ -1,19 +1,12 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <stddef.h>
#include <assert.h>
#include "soc/soc.h"
#include "riscv/interrupt.h"
#include "soc/interrupt_reg.h"
#include "riscv/csr.h"
@ -89,6 +82,20 @@ uint32_t esprv_intc_get_interrupt_unmask(void)
return REG_READ(INTERRUPT_CORE0_CPU_INT_ENABLE_REG);
}
/*************************** ESP-RV Interrupt Controller ***************************/
enum intr_type esprv_intc_int_get_type(int intr_num)
{
uint32_t intr_type_reg = REG_READ(INTERRUPT_CORE0_CPU_INT_TYPE_REG);
return (intr_type_reg & (1 << intr_num)) ? INTR_TYPE_EDGE : INTR_TYPE_LEVEL;
}
int esprv_intc_int_get_priority(int rv_int_num)
{
uint32_t intr_priority_reg = REG_READ(INTC_INT_PRIO_REG(rv_int_num));
return intr_priority_reg;
}
/*************************** Exception names. Used in .gdbinit file. ***************************/
const char *riscv_excp_names[16] __attribute__((used)) = {

View File

@ -63,10 +63,6 @@ config SOC_EMAC_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 2
config SOC_ULP_SUPPORTED
bool
default y
@ -183,6 +179,18 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 2
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FPU
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 2
@ -195,10 +203,6 @@ config SOC_CPU_WATCHPOINT_SIZE
int
default 64
config SOC_CPU_HAS_FPU
bool
default y
config SOC_DAC_PERIPH_NUM
int
default 2

View File

@ -75,7 +75,6 @@
#define SOC_SDIO_SLAVE_SUPPORTED 1
#define SOC_TWAI_SUPPORTED 1
#define SOC_EMAC_SUPPORTED 1
#define SOC_CPU_CORES_NUM 2
#define SOC_ULP_SUPPORTED 1
#define SOC_CCOMP_TIMER_SUPPORTED 1
#define SOC_RTC_FAST_MEM_SUPPORTED 1
@ -135,13 +134,14 @@
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM 2
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FPU 1
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 64 // bytes
#define SOC_CPU_HAS_FPU 1
/*-------------------------- DAC CAPS ----------------------------------------*/
#define SOC_DAC_PERIPH_NUM 2
#define SOC_DAC_RESOLUTION 8 // DAC resolution ratio 8 bit

View File

@ -3,10 +3,6 @@
# using gen_soc_caps_kconfig.py, do not edit manually
#####################################################
config SOC_CPU_CORES_NUM
bool
default y
config SOC_ADC_SUPPORTED
bool
default y
@ -139,6 +135,18 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 1
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 2
@ -147,10 +155,6 @@ config SOC_CPU_WATCHPOINTS_NUM
int
default 2
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_WATCHPOINT_SIZE
hex
default 0x80000000

View File

@ -25,7 +25,6 @@
#pragma once
/*-------------------------- COMMON CAPS ---------------------------------------*/
#define SOC_CPU_CORES_NUM 1
#define SOC_ADC_SUPPORTED 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
@ -75,10 +74,12 @@
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_CORES_NUM (1U)
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FLEXIBLE_INTC 1
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 0x80000000 // bytes
#define SOC_CPU_IDRAM_SPLIT_USING_PMP 1

View File

@ -3,10 +3,6 @@
# using gen_soc_caps_kconfig.py, do not edit manually
#####################################################
config SOC_CPU_CORES_NUM
bool
default y
config SOC_ADC_SUPPORTED
bool
default y
@ -203,6 +199,18 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 1
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 8
@ -211,10 +219,6 @@ config SOC_CPU_WATCHPOINTS_NUM
int
default 8
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_WATCHPOINT_SIZE
hex
default 0x80000000

View File

@ -25,7 +25,6 @@
#pragma once
/*-------------------------- COMMON CAPS ---------------------------------------*/
#define SOC_CPU_CORES_NUM 1
#define SOC_ADC_SUPPORTED 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
@ -102,10 +101,12 @@
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_BREAKPOINTS_NUM 8
#define SOC_CPU_WATCHPOINTS_NUM 8
#define SOC_CPU_CORES_NUM (1U)
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FLEXIBLE_INTC 1
#define SOC_CPU_BREAKPOINTS_NUM 8
#define SOC_CPU_WATCHPOINTS_NUM 8
#define SOC_CPU_WATCHPOINT_SIZE 0x80000000 // bytes
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/

View File

@ -3,10 +3,6 @@
# using gen_soc_caps_kconfig.py, do not edit manually
#####################################################
config SOC_CPU_CORES_NUM
bool
default y
config SOC_ADC_SUPPORTED
bool
default y
@ -191,6 +187,18 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 1
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 8
@ -199,10 +207,6 @@ config SOC_CPU_WATCHPOINTS_NUM
int
default 8
config SOC_CPU_HAS_FLEXIBLE_INTC
bool
default y
config SOC_CPU_WATCHPOINT_SIZE
hex
default 0x80000000

View File

@ -33,7 +33,6 @@
#endif
/*-------------------------- COMMON CAPS ---------------------------------------*/
#define SOC_CPU_CORES_NUM 1
#define SOC_ADC_SUPPORTED 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
@ -103,10 +102,12 @@
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_BREAKPOINTS_NUM 8
#define SOC_CPU_WATCHPOINTS_NUM 8
#define SOC_CPU_CORES_NUM (1U)
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FLEXIBLE_INTC 1
#define SOC_CPU_BREAKPOINTS_NUM 8
#define SOC_CPU_WATCHPOINTS_NUM 8
#define SOC_CPU_WATCHPOINT_SIZE 0x80000000 // bytes
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/

View File

@ -19,10 +19,6 @@ config SOC_CP_DMA_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
bool
default y
config SOC_DEDICATED_GPIO_SUPPORTED
bool
default y
@ -207,6 +203,14 @@ config SOC_CP_DMA_MAX_BUFFER_SIZE
int
default 4095
config SOC_CPU_CORES_NUM
int
default 1
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_BREAKPOINTS_NUM
int
default 2

View File

@ -43,7 +43,6 @@
#define SOC_DAC_SUPPORTED 1
#define SOC_TWAI_SUPPORTED 1
#define SOC_CP_DMA_SUPPORTED 1
#define SOC_CPU_CORES_NUM 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_SUPPORTS_SECURE_DL_MODE 1
#define SOC_RISCV_COPROC_SUPPORTED 1
@ -108,9 +107,11 @@
#define SOC_CP_DMA_MAX_BUFFER_SIZE (4095) /*!< Maximum size of the buffer that can be attached to descriptor */
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM (1U)
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 64 // bytes
/*-------------------------- DAC CAPS ----------------------------------------*/

View File

@ -3,22 +3,6 @@
# using gen_soc_caps_kconfig.py, do not edit manually
#####################################################
config SOC_CPU_BREAKPOINTS_NUM
int
default 2
config SOC_CPU_WATCHPOINTS_NUM
int
default 2
config SOC_CPU_WATCHPOINT_SIZE
int
default 64
config SOC_CPU_HAS_FPU
bool
default y
config SOC_LEDC_SUPPORT_APB_CLOCK
bool
default y
@ -91,10 +75,6 @@ config SOC_DEDICATED_GPIO_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 2
config SOC_CACHE_SUPPORT_WRAP
bool
default y
@ -283,6 +263,30 @@ config SOC_BROWNOUT_RESET_SUPPORTED
bool
default y
config SOC_CPU_CORES_NUM
int
default 2
config SOC_CPU_INTR_NUM
int
default 32
config SOC_CPU_HAS_FPU
bool
default y
config SOC_CPU_BREAKPOINTS_NUM
int
default 2
config SOC_CPU_WATCHPOINTS_NUM
int
default 2
config SOC_CPU_WATCHPOINT_SIZE
int
default 64
config SOC_DS_SIGNATURE_MAX_BIT_LEN
int
default 4096

View File

@ -1,22 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 64 // bytes
#define SOC_CPU_HAS_FPU 1

View File

@ -33,7 +33,6 @@
#define SOC_LCDCAM_SUPPORTED 1
#define SOC_MCPWM_SUPPORTED 1
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_CPU_CORES_NUM 2
#define SOC_CACHE_SUPPORT_WRAP 1
#define SOC_ULP_SUPPORTED 1
#define SOC_RISCV_COPROC_SUPPORTED 1
@ -102,7 +101,13 @@
#define SOC_BROWNOUT_RESET_SUPPORTED 1
/*-------------------------- CPU CAPS ----------------------------------------*/
#include "cpu_caps.h"
#define SOC_CPU_CORES_NUM 2
#define SOC_CPU_INTR_NUM 32
#define SOC_CPU_HAS_FPU 1
#define SOC_CPU_BREAKPOINTS_NUM 2
#define SOC_CPU_WATCHPOINTS_NUM 2
#define SOC_CPU_WATCHPOINT_SIZE 64 // bytes
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/
/** The maximum length of a Digital Signature in bits. */

View File

@ -132,6 +132,16 @@
#define XCHAL_ERRATUM_497 0
#endif
/*
* Erratum 572 (releases TBD, but present in ESP32S3)
* Disable zero-overhead loop buffer to prevent rare illegal instruction
* exceptions while executing zero-overhead loops.
*/
#if ( XCHAL_HAVE_LOOPS && XCHAL_LOOP_BUFFER_SIZE != 0 )
#define XCHAL_ERRATUM_572 1
#else
#define XCHAL_ERRATUM_572 0
#endif
/*----------------------------------------------------------------------
ISA

View File

@ -0,0 +1,245 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#include "xtensa/config/core-isa.h"
#include "xtensa/config/core.h"
#include "xtensa/config/extreg.h"
#include "xtensa/config/specreg.h"
#include "xtensa/xtruntime.h"
#include "xt_instr_macros.h"
#include "esp_bit_defs.h"
#include "esp_attr.h"
#ifdef __cplusplus
extern "C" {
#endif
/* -------------------------------------------------- CPU Registers ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t xt_utils_get_core_id(void)
{
/*
Note: We depend on SOC_CPU_CORES_NUM instead of XCHAL_HAVE_PRID as some single Xtensa targets (such as ESP32-S2) have
the PRID register even though they are single core.
*/
#if SOC_CPU_CORES_NUM > 1
// Read and extract bit 13 of special register PRID
uint32_t id;
asm volatile (
"rsr.prid %0\n"
"extui %0,%0,13,1"
:"=r"(id));
return id;
#else
return 0;
#endif // SOC_CPU_CORES_NUM > 1
}
FORCE_INLINE_ATTR __attribute__((pure)) uint32_t xt_utils_get_raw_core_id(void)
{
#if XCHAL_HAVE_PRID
// Read the raw value of special register PRID
uint32_t id;
asm volatile (
"rsr.prid %0\n"
:"=r"(id));
return id;
#else
return 0;
#endif // XCHAL_HAVE_PRID
}
FORCE_INLINE_ATTR void *xt_utils_get_sp(void)
{
void *sp;
asm volatile ("mov %0, sp;" : "=r" (sp));
return sp;
}
FORCE_INLINE_ATTR uint32_t xt_utils_get_cycle_count(void)
{
uint32_t ccount;
RSR(CCOUNT, ccount);
return ccount;
}
static inline void xt_utils_set_cycle_count(uint32_t ccount)
{
WSR(CCOUNT, ccount);
}
FORCE_INLINE_ATTR void xt_utils_wait_for_intr(void)
{
asm volatile ("waiti 0\n");
}
/* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// ---------------- Interrupt Descriptors ------------------
// --------------- Interrupt Configuration -----------------
FORCE_INLINE_ATTR void xt_utils_set_vecbase(uint32_t vecbase)
{
asm volatile ("wsr %0, vecbase" :: "r" (vecbase));
}
// ------------------ Interrupt Control --------------------
FORCE_INLINE_ATTR uint32_t xt_utils_intr_get_enabled_mask(void)
{
uint32_t intr_mask;
RSR(INTENABLE, intr_mask);
return intr_mask;
}
/* -------------------------------------------------- Memory Ports -----------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
/* ---------------------------------------------------- Debugging ------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// --------------- Breakpoints/Watchpoints -----------------
FORCE_INLINE_ATTR void xt_utils_set_breakpoint(int bp_num, uint32_t bp_addr)
{
//Set the breakpoint's address
if (bp_num == 0) {
WSR(IBREAKA_1, bp_addr);
} else {
WSR(IBREAKA_0, bp_addr);
}
//Enable the breakpoint
uint32_t brk_ena_reg;
RSR(IBREAKENABLE, brk_ena_reg);
brk_ena_reg |= BIT(bp_num);
WSR(IBREAKENABLE, brk_ena_reg);
}
FORCE_INLINE_ATTR void xt_utils_clear_breakpoint(int bp_num)
{
// Disable the breakpoint using the break enable register
uint32_t bp_en = 0;
RSR(IBREAKENABLE, bp_en);
bp_en &= ~BIT(bp_num);
WSR(IBREAKENABLE, bp_en);
// Zero the break address register
uint32_t bp_addr = 0;
if (bp_num == 0) {
WSR(IBREAKA_1, bp_addr);
} else {
WSR(IBREAKA_0, bp_addr);
}
}
FORCE_INLINE_ATTR void xt_utils_set_watchpoint(int wp_num,
uint32_t wp_addr,
size_t size,
bool on_read,
bool on_write)
{
// Initialize DBREAKC bits (see Table 4143 or isa_rm.pdf)
uint32_t dbreakc_reg = 0x3F;
dbreakc_reg = dbreakc_reg << (__builtin_ffs(size) - 1);
dbreakc_reg = dbreakc_reg & 0x3F;
if (on_read) {
dbreakc_reg |= BIT(30);
}
if (on_write) {
dbreakc_reg |= BIT(31);
}
// Enable break address and break control register
if (wp_num == 0) {
WSR(DBREAKA_1, (uint32_t) wp_addr);
WSR(DBREAKC_1, dbreakc_reg);
} else {
WSR(DBREAKA_0, (uint32_t) wp_addr);
WSR(DBREAKC_0, dbreakc_reg);
}
}
FORCE_INLINE_ATTR void xt_utils_clear_watchpoint(int wp_num)
{
// Clear both break control and break address register
if (wp_num == 0) {
WSR(DBREAKC_1, 0);
WSR(DBREAKA_1, 0);
} else {
WSR(DBREAKC_0, 0);
WSR(DBREAKA_0, 0);
}
}
// ---------------------- Debugger -------------------------
FORCE_INLINE_ATTR bool xt_utils_dbgr_is_attached(void)
{
uint32_t dcr = 0;
uint32_t reg = DSRSET;
RER(reg, dcr);
return (bool)(dcr & 0x1);
}
FORCE_INLINE_ATTR void xt_utils_dbgr_break(void)
{
__asm__ ("break 1,15");
}
/* ------------------------------------------------------ Misc ---------------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
FORCE_INLINE_ATTR bool xt_utils_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
{
#if XCHAL_HAVE_S32C1I
#ifdef __clang_analyzer__
//Teach clang-tidy that "addr" cannot be const as it can be updated by S32C1I instruction
volatile uint32_t temp;
temp = *addr;
*addr = temp;
#endif
// Atomic compare and set using S32C1I instruction
uint32_t old_value = new_value;
__asm__ __volatile__ (
"WSR %2, SCOMPARE1 \n"
"S32C1I %0, %1, 0 \n"
:"=r"(old_value)
:"r"(addr), "r"(compare_value), "0"(old_value)
);
return (old_value == compare_value);
#else // XCHAL_HAVE_S32C1I
// Single core target has no atomic CAS instruction. We can achieve atomicity by disabling interrupts
uint32_t intr_level;
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
: "=r"(intr_level));
// Compare and set
uint32_t old_value;
old_value = *addr;
if (old_value == compare_value) {
*addr = new_value;
}
// Restore interrupts
__asm__ __volatile__ ("memw \n"
"wsr %0, ps\n"
:: "r"(intr_level));
return (old_value == compare_value);
#endif // XCHAL_HAVE_S32C1I
}
#ifdef __cplusplus
}
#endif

View File

@ -251,6 +251,7 @@ PREDEFINED = \
_Static_assert()= \
IDF_DEPRECATED(X)= \
IRAM_ATTR= \
FORCE_INLINE_ATTR= \
configSUPPORT_DYNAMIC_ALLOCATION=1 \
configSUPPORT_STATIC_ALLOCATION=1 \
configQUEUE_REGISTRY_SIZE=1 \

View File

@ -86,7 +86,7 @@ For advanced users, they can always manipulate the GPIOs by writing assembly cod
1. Allocate a GPIO bundle: :cpp:func:`dedic_gpio_new_bundle`
2. Query the mask occupied by that bundle: :cpp:func:`dedic_gpio_get_out_mask` or/and :cpp:func:`dedic_gpio_get_in_mask`
3. Call CPU LL apis (e.g. `cpu_ll_write_dedic_gpio_mask`) or write assembly code with that mask
3. Call CPU LL apis (e.g. `dedic_gpio_cpu_ll_write_mask`) or write assembly code with that mask
4. The fasted way of toggling IO is to use the dedicated "set/clear" instructions:
.. only:: esp32s2 or esp32s3
@ -113,7 +113,7 @@ For advanced users, they can always manipulate the GPIOs by writing assembly cod
For details of supported dedicated GPIO instructions, please refer to *{IDF_TARGET_NAME} Technical Reference Manual* > *ESP-RISC-V CPU* [`PDF <{IDF_TARGET_TRM_EN_URL}#riscvcpu>`__].
Some of the dedicated CPU instructions are also wrapped inside `hal/dedic_gpio_ll.h` as helper inline functions.
Some of the dedicated CPU instructions are also wrapped inside ``hal/dedic_gpio_cpu_ll.h`` as helper inline functions.
.. note::
Writing assembly code in application could make your code hard to port between targets, because those customized instructions are not guaranteed to remain the same format on different targets.

View File

@ -248,3 +248,10 @@ LCD
- ``mcpwm_sync_enable`` is removed. To configure synchronization, please use :cpp:func:`mcpwm_sync_configure`.
- ``mcpwm_isr_register`` is removed. You can register event callbacks, for capture channels. e.g. :cpp:member:`mcpwm_capture_config_t::capture_cb`.
- ``mcpwm_carrier_oneshot_mode_disable`` is removed. Disable the first pulse (a.k.a the one-shot pulse) in the carrier is not supported by hardware.
.. only:: SOC_DEDICATED_GPIO_SUPPORTED
Dedicated GPIO Driver
---------------------
- All of the dedicated GPIO related LL functionsn in ``cpu_ll.h`` have been moved to ``dedic_gpio_cpu_ll.h`` and renamed.

View File

@ -113,7 +113,7 @@ GPIO 捆绑包操作
有关支持的专用 GPIO 指令的详细信息,请参考 *{IDF_TARGET_NAME} 技术参考手册* > *ESP-RISC-V CPU* [`PDF <{IDF_TARGET_TRM_CN_URL}#riscvcpu>`__].
一些专用的 CPU 指令也包含在 `hal/dedic_gpio_ll.h` 中,作为辅助内联函数。
一些专用的 CPU 指令也包含在 `hal/dedic_gpio_cpu_ll.h` 中,作为辅助内联函数。
.. note::
由于自定义指令在不同目标上可能会有不同的格式,在应用程序中编写汇编代码可能会让代码难以在不同的芯片架构之间移植。

View File

@ -766,35 +766,29 @@ components/freertos/FreeRTOS-Kernel-SMP/stream_buffer.c
components/freertos/FreeRTOS-Kernel-SMP/tasks.c
components/freertos/FreeRTOS-Kernel-SMP/timers.c
components/hal/aes_hal.c
components/hal/cpu_hal.c
components/hal/dac_hal.c
components/hal/ds_hal.c
components/hal/esp32/gpio_hal_workaround.c
components/hal/esp32/include/hal/aes_ll.h
components/hal/esp32/include/hal/can_hal.h
components/hal/esp32/include/hal/can_types.h
components/hal/esp32/include/hal/interrupt_controller_ll.h
components/hal/esp32/include/hal/mpu_ll.h
components/hal/esp32/include/hal/rtc_io_ll.h
components/hal/esp32/include/hal/rwdt_ll.h
components/hal/esp32/include/hal/sigmadelta_ll.h
components/hal/esp32/include/hal/soc_ll.h
components/hal/esp32/include/hal/spi_flash_encrypted_ll.h
components/hal/esp32/include/hal/touch_sensor_hal.h
components/hal/esp32/include/hal/trace_ll.h
components/hal/esp32/include/hal/uart_ll.h
components/hal/esp32/interrupt_descriptor_table.c
components/hal/esp32c3/hmac_hal.c
components/hal/esp32c3/include/hal/aes_ll.h
components/hal/esp32c3/include/hal/ds_ll.h
components/hal/esp32c3/include/hal/hmac_hal.h
components/hal/esp32c3/include/hal/hmac_ll.h
components/hal/esp32c3/include/hal/interrupt_controller_ll.h
components/hal/esp32c3/include/hal/mpu_ll.h
components/hal/esp32c3/include/hal/rtc_cntl_ll.h
components/hal/esp32c3/include/hal/sha_ll.h
components/hal/esp32c3/include/hal/sigmadelta_ll.h
components/hal/esp32c3/include/hal/soc_ll.h
components/hal/esp32c3/include/hal/spi_flash_encrypted_ll.h
components/hal/esp32c3/include/hal/systimer_ll.h
components/hal/esp32c3/include/hal/uhci_ll.h
@ -805,12 +799,10 @@ components/hal/esp32h2/include/hal/aes_ll.h
components/hal/esp32h2/include/hal/ds_ll.h
components/hal/esp32h2/include/hal/hmac_hal.h
components/hal/esp32h2/include/hal/hmac_ll.h
components/hal/esp32h2/include/hal/interrupt_controller_ll.h
components/hal/esp32h2/include/hal/mpu_ll.h
components/hal/esp32h2/include/hal/rtc_cntl_ll.h
components/hal/esp32h2/include/hal/sha_ll.h
components/hal/esp32h2/include/hal/sigmadelta_ll.h
components/hal/esp32h2/include/hal/soc_ll.h
components/hal/esp32h2/include/hal/spi_flash_encrypted_ll.h
components/hal/esp32h2/include/hal/uhci_ll.h
components/hal/esp32h2/include/hal/uhci_types.h
@ -823,42 +815,33 @@ components/hal/esp32s2/include/hal/crypto_dma_ll.h
components/hal/esp32s2/include/hal/dac_hal.h
components/hal/esp32s2/include/hal/dedic_gpio_ll.h
components/hal/esp32s2/include/hal/i2c_ll.h
components/hal/esp32s2/include/hal/interrupt_controller_ll.h
components/hal/esp32s2/include/hal/memprot_peri_ll.h
components/hal/esp32s2/include/hal/mpu_ll.h
components/hal/esp32s2/include/hal/rtc_io_ll.h
components/hal/esp32s2/include/hal/sha_ll.h
components/hal/esp32s2/include/hal/sigmadelta_ll.h
components/hal/esp32s2/include/hal/soc_ll.h
components/hal/esp32s2/include/hal/spi_flash_encrypted_ll.h
components/hal/esp32s2/include/hal/systimer_ll.h
components/hal/esp32s2/include/hal/trace_ll.h
components/hal/esp32s2/include/hal/usb_ll.h
components/hal/esp32s2/interrupt_descriptor_table.c
components/hal/esp32s2/touch_sensor_hal.c
components/hal/esp32s3/include/hal/aes_ll.h
components/hal/esp32s3/include/hal/interrupt_controller_ll.h
components/hal/esp32s3/include/hal/memprot_ll.h
components/hal/esp32s3/include/hal/mpu_ll.h
components/hal/esp32s3/include/hal/rwdt_ll.h
components/hal/esp32s3/include/hal/sha_ll.h
components/hal/esp32s3/include/hal/sigmadelta_ll.h
components/hal/esp32s3/include/hal/soc_ll.h
components/hal/esp32s3/include/hal/spi_flash_encrypted_ll.h
components/hal/esp32s3/include/hal/systimer_ll.h
components/hal/esp32s3/include/hal/uhci_ll.h
components/hal/esp32s3/include/hal/usb_ll.h
components/hal/esp32s3/include/hal/usb_serial_jtag_ll.h
components/hal/esp32s3/interrupt_descriptor_table.c
components/hal/include/hal/aes_hal.h
components/hal/include/hal/aes_types.h
components/hal/include/hal/cpu_types.h
components/hal/include/hal/dac_hal.h
components/hal/include/hal/dac_types.h
components/hal/include/hal/ds_hal.h
components/hal/include/hal/esp_flash_err.h
components/hal/include/hal/interrupt_controller_hal.h
components/hal/include/hal/interrupt_controller_types.h
components/hal/include/hal/mpu_hal.h
components/hal/include/hal/mpu_types.h
components/hal/include/hal/rtc_io_types.h
@ -866,7 +849,6 @@ components/hal/include/hal/sdio_slave_hal.h
components/hal/include/hal/sdio_slave_ll.h
components/hal/include/hal/sha_hal.h
components/hal/include/hal/sigmadelta_hal.h
components/hal/include/hal/soc_hal.h
components/hal/include/hal/spi_flash_encrypt_hal.h
components/hal/include/hal/spi_slave_hal.h
components/hal/include/hal/spi_slave_hd_hal.h
@ -877,7 +859,6 @@ components/hal/include/hal/usb_hal.h
components/hal/include/hal/usb_types_private.h
components/hal/include/hal/wdt_hal.h
components/hal/include/hal/wdt_types.h
components/hal/interrupt_controller_hal.c
components/hal/ledc_hal_iram.c
components/hal/mpu_hal.c
components/hal/platform_port/include/hal/check.h
@ -885,7 +866,6 @@ components/hal/platform_port/include/hal/misc.h
components/hal/rtc_io_hal.c
components/hal/sha_hal.c
components/hal/sigmadelta_hal.c
components/hal/soc_hal.c
components/hal/spi_flash_encrypt_hal_iram.c
components/hal/spi_flash_hal_gpspi.c
components/hal/spi_slave_hal.c
@ -1129,11 +1109,9 @@ components/pthread/test/test_pthread_local_storage.c
components/riscv/include/riscv/csr.h
components/riscv/include/riscv/encoding.h
components/riscv/include/riscv/instruction_decode.h
components/riscv/include/riscv/interrupt.h
components/riscv/include/riscv/riscv_interrupts.h
components/riscv/include/riscv/rvruntime-frames.h
components/riscv/instruction_decode.c
components/riscv/interrupt.c
components/sdmmc/sdmmc_common.c
components/sdmmc/sdmmc_common.h
components/sdmmc/sdmmc_init.c
@ -1404,7 +1382,6 @@ components/soc/esp32s3/include/soc/boot_mode.h
components/soc/esp32s3/include/soc/brownout_caps.h
components/soc/esp32s3/include/soc/clkout_channel.h
components/soc/esp32s3/include/soc/cpu.h
components/soc/esp32s3/include/soc/cpu_caps.h
components/soc/esp32s3/include/soc/extmem_reg.h
components/soc/esp32s3/include/soc/extmem_struct.h
components/soc/esp32s3/include/soc/fe_reg.h