2021-08-05 11:35:07 -04:00
|
|
|
/*
|
2024-01-11 12:48:56 -05:00
|
|
|
* SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
|
2021-08-05 11:35:07 -04:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2016-11-21 04:15:37 -05:00
|
|
|
|
2020-01-18 21:47:20 -05:00
|
|
|
#include "sdkconfig.h"
|
2022-06-07 02:46:23 -04:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include "soc/soc.h"
|
|
|
|
#include "soc/soc_caps.h"
|
2022-07-12 07:46:23 -04:00
|
|
|
|
|
|
|
// TODO: IDF-5645
|
2024-03-13 02:24:43 -04:00
|
|
|
#if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2 || CONFIG_IDF_TARGET_ESP32C5 || CONFIG_IDF_TARGET_ESP32C61
|
2022-07-12 07:46:23 -04:00
|
|
|
#include "soc/lp_aon_reg.h"
|
|
|
|
#include "soc/pcr_reg.h"
|
|
|
|
#define SYSTEM_CPU_PER_CONF_REG PCR_CPU_WAITI_CONF_REG
|
|
|
|
#define SYSTEM_CPU_WAIT_MODE_FORCE_ON PCR_CPU_WAIT_MODE_FORCE_ON
|
2023-07-19 04:02:33 -04:00
|
|
|
#elif CONFIG_IDF_TARGET_ESP32P4
|
|
|
|
#include "soc/lp_clkrst_reg.h"
|
|
|
|
#include "soc/pmu_reg.h"
|
2022-07-12 07:46:23 -04:00
|
|
|
#else
|
2022-06-07 02:46:23 -04:00
|
|
|
#include "soc/rtc_cntl_reg.h"
|
2022-07-12 07:46:23 -04:00
|
|
|
#endif
|
|
|
|
|
2022-06-07 02:46:23 -04:00
|
|
|
#include "hal/soc_hal.h"
|
|
|
|
#include "esp_bit_defs.h"
|
|
|
|
#include "esp_attr.h"
|
|
|
|
#include "esp_err.h"
|
2021-02-19 07:23:32 -05:00
|
|
|
#include "esp_cpu.h"
|
2022-06-07 02:46:23 -04:00
|
|
|
#if __XTENSA__
|
|
|
|
#include "xtensa/config/core-isa.h"
|
|
|
|
#else
|
|
|
|
#include "soc/system_reg.h" // For SYSTEM_CPU_PER_CONF_REG
|
|
|
|
#include "soc/dport_access.h" // For Dport access
|
|
|
|
#include "riscv/semihosting.h"
|
|
|
|
#endif
|
|
|
|
#if SOC_CPU_HAS_FLEXIBLE_INTC
|
|
|
|
#include "riscv/instruction_decode.h"
|
|
|
|
#endif
|
2021-02-19 07:23:32 -05:00
|
|
|
|
2020-01-18 21:02:21 -05:00
|
|
|
|
2022-06-07 02:46:23 -04:00
|
|
|
/* --------------------------------------------------- CPU Control -----------------------------------------------------
|
|
|
|
*
|
|
|
|
* ------------------------------------------------------------------------------------------------------------------ */
|
2016-11-21 04:15:37 -05:00
|
|
|
|
2022-06-07 02:46:23 -04:00
|
|
|
void esp_cpu_stall(int core_id)
|
2016-11-21 04:15:37 -05:00
|
|
|
{
|
2022-06-07 02:46:23 -04:00
|
|
|
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
|
|
|
|
#if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
|
2023-07-19 04:02:33 -04:00
|
|
|
#if CONFIG_IDF_TARGET_ESP32P4
|
|
|
|
//TODO: IDF-7848
|
2024-01-11 12:48:56 -05:00
|
|
|
if (core_id == 0) {
|
|
|
|
REG_SET_FIELD(PMU_CPU_SW_STALL_REG, PMU_HPCORE0_SW_STALL_CODE, 0x86);
|
|
|
|
} else {
|
|
|
|
REG_SET_FIELD(PMU_CPU_SW_STALL_REG, PMU_HPCORE1_SW_STALL_CODE, 0x86);
|
|
|
|
}
|
2023-07-19 04:02:33 -04:00
|
|
|
#else
|
2022-06-07 02:46:23 -04:00
|
|
|
/*
|
|
|
|
We need to write the value "0x86" to stall a particular core. The write location is split into two separate
|
|
|
|
bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has its own pair of
|
|
|
|
"c0" and "c1" bit fields.
|
|
|
|
|
|
|
|
Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
|
|
|
|
"rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
|
|
|
|
file's "rodata" section (see IDF-5214).
|
|
|
|
*/
|
|
|
|
int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
|
|
|
|
int rtc_cntl_c0_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_S : RTC_CNTL_SW_STALL_APPCPU_C0_S;
|
|
|
|
int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
|
|
|
|
int rtc_cntl_c1_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_S : RTC_CNTL_SW_STALL_APPCPU_C1_S;
|
|
|
|
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
|
|
|
|
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s);
|
|
|
|
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
|
|
|
|
SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s);
|
2023-07-19 04:02:33 -04:00
|
|
|
#endif // CONFIG_IDF_TARGET_ESP32P4
|
|
|
|
#endif // SOC_CPU_CORES_NUM > 1
|
2016-11-21 04:15:37 -05:00
|
|
|
}
|
|
|
|
|
2022-06-07 02:46:23 -04:00
|
|
|
void esp_cpu_unstall(int core_id)
|
2016-11-21 04:15:37 -05:00
|
|
|
{
|
2022-06-07 02:46:23 -04:00
|
|
|
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
|
|
|
|
#if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
|
2023-07-19 04:02:33 -04:00
|
|
|
#if CONFIG_IDF_TARGET_ESP32P4
|
|
|
|
//TODO: IDF-7848
|
2024-01-11 12:48:56 -05:00
|
|
|
int pmu_core_stall_mask = (core_id == 0) ? PMU_HPCORE0_SW_STALL_CODE_M : PMU_HPCORE1_SW_STALL_CODE_M;
|
|
|
|
CLEAR_PERI_REG_MASK(PMU_CPU_SW_STALL_REG, pmu_core_stall_mask);
|
2023-07-19 04:02:33 -04:00
|
|
|
#else
|
2022-06-07 02:46:23 -04:00
|
|
|
/*
|
2024-03-13 02:24:43 -04:00
|
|
|
We need to write clear the value "0x86" to uninstall a particular core. The location of this value is split into
|
2022-06-07 02:46:23 -04:00
|
|
|
two separate bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has
|
|
|
|
its own pair of "c0" and "c1" bit fields.
|
|
|
|
|
|
|
|
Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
|
|
|
|
"rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
|
|
|
|
file's "rodata" section (see IDF-5214).
|
|
|
|
*/
|
|
|
|
int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
|
|
|
|
int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
|
|
|
|
CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
|
|
|
|
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
|
2023-07-19 04:02:33 -04:00
|
|
|
#endif // CONFIG_IDF_TARGET_ESP32P4
|
|
|
|
#endif // SOC_CPU_CORES_NUM > 1
|
2016-11-21 04:15:37 -05:00
|
|
|
}
|
2016-12-06 19:33:24 -05:00
|
|
|
|
2022-06-07 02:46:23 -04:00
|
|
|
void esp_cpu_reset(int core_id)
|
esp_restart: fix possible race while stalling other CPU, enable WDT early
Previously esp_restart would stall the other CPU before enabling RTC_WDT.
If the other CPU was executing an s32c1i instruction, the lock signal
from CPU to the arbiter would still be held after CPU was stalled. If
the CPU running esp_restart would then try to access the same locked
memory pool, it would be stuck, because lock signal would never be
released.
With this change, esp_restart resets the other CPU before stalling it.
Ideally, we would want to reset the CPU and keep it in reset, but the
hardware doesn't have such feature for PRO_CPU (it is possible to hold
APP_CPU in reset using DPORT register). Given that ROM code will not use
s32c1i in the first few hundred cycles, doing reset and then stall seems
to be safe.
In addition to than, RTC_WDT initialization is moved to the beginning of
the function, to prevent possible lock-up if CPU stalling still has any
issue.
2017-10-26 07:11:47 -04:00
|
|
|
{
|
2023-07-19 04:02:33 -04:00
|
|
|
#if CONFIG_IDF_TARGET_ESP32P4
|
|
|
|
//TODO: IDF-7848
|
|
|
|
if (core_id == 0)
|
|
|
|
REG_SET_BIT(LP_CLKRST_HPCPU_RESET_CTRL0_REG, LP_CLKRST_HPCORE0_SW_RESET);
|
|
|
|
else
|
|
|
|
REG_SET_BIT(LP_CLKRST_HPCPU_RESET_CTRL0_REG, LP_CLKRST_HPCORE1_SW_RESET);
|
|
|
|
#else
|
2024-03-13 02:24:43 -04:00
|
|
|
#if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2 || CONFIG_IDF_TARGET_ESP32C5 || CONFIG_IDF_TARGET_ESP32C61 // TODO: IDF-5645
|
2022-07-12 07:46:23 -04:00
|
|
|
SET_PERI_REG_MASK(LP_AON_CPUCORE0_CFG_REG, LP_AON_CPU_CORE0_SW_RESET);
|
|
|
|
#else
|
2022-06-07 02:46:23 -04:00
|
|
|
assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
|
|
|
|
#if SOC_CPU_CORES_NUM > 1
|
|
|
|
/*
|
|
|
|
Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
|
|
|
|
"rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
|
|
|
|
file's "rodata" section (see IDF-5214).
|
|
|
|
*/
|
|
|
|
int rtc_cntl_rst_m = (core_id == 0) ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M;
|
|
|
|
#else // SOC_CPU_CORES_NUM > 1
|
|
|
|
int rtc_cntl_rst_m = RTC_CNTL_SW_PROCPU_RST_M;
|
|
|
|
#endif // SOC_CPU_CORES_NUM > 1
|
|
|
|
SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_rst_m);
|
2022-07-12 07:46:23 -04:00
|
|
|
#endif
|
2023-07-19 04:02:33 -04:00
|
|
|
#endif // CONFIG_IDF_TARGET_ESP32P4
|
esp_restart: fix possible race while stalling other CPU, enable WDT early
Previously esp_restart would stall the other CPU before enabling RTC_WDT.
If the other CPU was executing an s32c1i instruction, the lock signal
from CPU to the arbiter would still be held after CPU was stalled. If
the CPU running esp_restart would then try to access the same locked
memory pool, it would be stuck, because lock signal would never be
released.
With this change, esp_restart resets the other CPU before stalling it.
Ideally, we would want to reset the CPU and keep it in reset, but the
hardware doesn't have such feature for PRO_CPU (it is possible to hold
APP_CPU in reset using DPORT register). Given that ROM code will not use
s32c1i in the first few hundred cycles, doing reset and then stall seems
to be safe.
In addition to than, RTC_WDT initialization is moved to the beginning of
the function, to prevent possible lock-up if CPU stalling still has any
issue.
2017-10-26 07:11:47 -04:00
|
|
|
}
|
|
|
|
|
2022-06-07 02:46:23 -04:00
|
|
|
void esp_cpu_wait_for_intr(void)
|
2016-12-06 19:33:24 -05:00
|
|
|
{
|
2022-06-07 02:46:23 -04:00
|
|
|
#if __XTENSA__
|
|
|
|
xt_utils_wait_for_intr();
|
|
|
|
#else
|
2023-07-19 04:02:33 -04:00
|
|
|
//TODO: IDF-7848
|
|
|
|
#if !CONFIG_IDF_TARGET_ESP32P4
|
2022-07-12 07:46:23 -04:00
|
|
|
// TODO: IDF-5645 (better to implement with ll) C6 register names converted in the #include section at the top
|
2022-06-07 02:46:23 -04:00
|
|
|
if (esp_cpu_dbgr_is_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
|
|
|
|
/* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
|
|
|
|
so do not enter that mode when debugger is connected */
|
|
|
|
return;
|
|
|
|
}
|
2023-07-19 04:02:33 -04:00
|
|
|
#endif
|
2022-06-07 02:46:23 -04:00
|
|
|
rv_utils_wait_for_intr();
|
|
|
|
#endif // __XTENSA__
|
|
|
|
}
|
2020-01-18 21:02:21 -05:00
|
|
|
|
2022-06-07 02:46:23 -04:00
|
|
|
/* ---------------------------------------------------- Debugging ------------------------------------------------------
|
|
|
|
*
|
|
|
|
* ------------------------------------------------------------------------------------------------------------------ */
|
|
|
|
|
|
|
|
// --------------- Breakpoints/Watchpoints -----------------
|
|
|
|
|
|
|
|
#if SOC_CPU_BREAKPOINTS_NUM > 0
|
|
|
|
esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Todo:
|
|
|
|
- Check that bp_num is in range
|
|
|
|
*/
|
|
|
|
#if __XTENSA__
|
|
|
|
xt_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
|
|
|
|
#else
|
|
|
|
if (esp_cpu_dbgr_is_attached()) {
|
|
|
|
/* If we want to set breakpoint which when hit transfers control to debugger
|
|
|
|
* we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
|
|
|
|
* That `action` value is supported only when `dmode` of `tdata1` is set.
|
|
|
|
* But `dmode` can be modified by debugger only (from Debug Mode).
|
|
|
|
*
|
|
|
|
* So when debugger is connected we use special syscall to ask it to set breakpoint for us.
|
|
|
|
*/
|
|
|
|
long args[] = {true, bp_num, (long)bp_addr};
|
|
|
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
|
|
|
|
if (ret == 0) {
|
|
|
|
return ESP_ERR_INVALID_RESPONSE;
|
|
|
|
}
|
2023-08-25 11:37:37 -04:00
|
|
|
} else {
|
|
|
|
rv_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
|
|
|
|
}
|
2022-06-07 02:46:23 -04:00
|
|
|
#endif // __XTENSA__
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t esp_cpu_clear_breakpoint(int bp_num)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Todo:
|
|
|
|
- Check if the bp_num is valid
|
|
|
|
*/
|
|
|
|
#if __XTENSA__
|
|
|
|
xt_utils_clear_breakpoint(bp_num);
|
|
|
|
#else
|
|
|
|
if (esp_cpu_dbgr_is_attached()) {
|
|
|
|
// See description in esp_cpu_set_breakpoint()
|
|
|
|
long args[] = {false, bp_num};
|
|
|
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
|
|
|
|
if (ret == 0) {
|
|
|
|
return ESP_ERR_INVALID_RESPONSE;
|
|
|
|
}
|
2023-08-25 11:37:37 -04:00
|
|
|
} else {
|
|
|
|
rv_utils_clear_breakpoint(bp_num);
|
|
|
|
}
|
2022-06-07 02:46:23 -04:00
|
|
|
#endif // __XTENSA__
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
#endif // SOC_CPU_BREAKPOINTS_NUM > 0
|
|
|
|
|
|
|
|
#if SOC_CPU_WATCHPOINTS_NUM > 0
|
|
|
|
esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Todo:
|
|
|
|
- Check if the wp_num is already in use
|
|
|
|
*/
|
2023-11-15 01:00:09 -05:00
|
|
|
if (wp_num < 0 || wp_num >= SOC_CPU_WATCHPOINTS_NUM) {
|
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the watched region's start address is naturally aligned to the size of the region
|
|
|
|
if ((uint32_t)wp_addr % size) {
|
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if size is 2^n, and size is in the range of [1 ... SOC_CPU_WATCHPOINT_MAX_REGION_SIZE]
|
|
|
|
if (size < 1 || size > SOC_CPU_WATCHPOINT_MAX_REGION_SIZE || (size & (size - 1)) != 0) {
|
2022-06-07 02:46:23 -04:00
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
|
|
|
bool on_read = (trigger == ESP_CPU_WATCHPOINT_LOAD || trigger == ESP_CPU_WATCHPOINT_ACCESS);
|
|
|
|
bool on_write = (trigger == ESP_CPU_WATCHPOINT_STORE || trigger == ESP_CPU_WATCHPOINT_ACCESS);
|
|
|
|
#if __XTENSA__
|
|
|
|
xt_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
|
|
|
|
#else
|
|
|
|
if (esp_cpu_dbgr_is_attached()) {
|
|
|
|
// See description in esp_cpu_set_breakpoint()
|
|
|
|
long args[] = {true, wp_num, (long)wp_addr, (long)size,
|
|
|
|
(long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))
|
|
|
|
};
|
|
|
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
|
|
|
|
if (ret == 0) {
|
|
|
|
return ESP_ERR_INVALID_RESPONSE;
|
|
|
|
}
|
2023-08-25 11:37:37 -04:00
|
|
|
} else {
|
|
|
|
rv_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
|
|
|
|
}
|
2022-06-07 02:46:23 -04:00
|
|
|
#endif // __XTENSA__
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t esp_cpu_clear_watchpoint(int wp_num)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Todo:
|
|
|
|
- Check if the wp_num is valid
|
|
|
|
*/
|
|
|
|
#if __XTENSA__
|
|
|
|
xt_utils_clear_watchpoint(wp_num);
|
|
|
|
#else
|
|
|
|
if (esp_cpu_dbgr_is_attached()) {
|
|
|
|
// See description in esp_cpu_dbgr_is_attached()
|
|
|
|
long args[] = {false, wp_num};
|
|
|
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
|
|
|
|
if (ret == 0) {
|
|
|
|
return ESP_ERR_INVALID_RESPONSE;
|
|
|
|
}
|
2023-08-25 11:37:37 -04:00
|
|
|
} else {
|
|
|
|
rv_utils_clear_watchpoint(wp_num);
|
|
|
|
}
|
2022-06-07 02:46:23 -04:00
|
|
|
#endif // __XTENSA__
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
#endif // SOC_CPU_WATCHPOINTS_NUM > 0
|
|
|
|
|
|
|
|
/* ------------------------------------------------------ Misc ---------------------------------------------------------
|
|
|
|
*
|
|
|
|
* ------------------------------------------------------------------------------------------------------------------ */
|
|
|
|
|
2022-07-21 07:14:10 -04:00
|
|
|
#if __XTENSA__ && XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
|
2022-06-07 02:46:23 -04:00
|
|
|
static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
|
|
|
|
{
|
|
|
|
#if __XTENSA__
|
|
|
|
bool ret;
|
2022-07-21 07:14:10 -04:00
|
|
|
#if XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
|
|
|
|
// Check if the target address is in external RAM
|
|
|
|
if ((uint32_t)addr >= SOC_EXTRAM_DATA_LOW && (uint32_t)addr < SOC_EXTRAM_DATA_HIGH) {
|
|
|
|
/* The target address is in external RAM, thus the native CAS instruction cannot be used. Instead, we achieve
|
|
|
|
atomicity by disabling interrupts and then acquiring an external RAM CAS lock. */
|
2022-06-07 02:46:23 -04:00
|
|
|
uint32_t intr_level;
|
|
|
|
__asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
|
|
|
|
: "=r"(intr_level));
|
2022-07-21 07:14:10 -04:00
|
|
|
if (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
|
|
|
|
// External RAM CAS lock already taken. Exit
|
|
|
|
ret = false;
|
|
|
|
goto exit;
|
2022-06-07 02:46:23 -04:00
|
|
|
}
|
|
|
|
// Now we compare and set the target address
|
2022-07-21 07:14:10 -04:00
|
|
|
ret = (*addr == compare_value);
|
|
|
|
if (ret) {
|
2022-06-07 02:46:23 -04:00
|
|
|
*addr = new_value;
|
|
|
|
}
|
2022-07-21 07:14:10 -04:00
|
|
|
// Release the external RAM CAS lock
|
2022-06-07 02:46:23 -04:00
|
|
|
external_ram_cas_lock = 0;
|
2022-07-21 07:14:10 -04:00
|
|
|
exit:
|
2024-03-13 02:24:43 -04:00
|
|
|
// Re-enable interrupts
|
2022-06-07 02:46:23 -04:00
|
|
|
__asm__ __volatile__ ("memw \n"
|
|
|
|
"wsr %0, ps\n"
|
|
|
|
:: "r"(intr_level));
|
|
|
|
} else
|
2022-07-21 07:14:10 -04:00
|
|
|
#endif // XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
|
2022-06-07 02:46:23 -04:00
|
|
|
{
|
2022-07-21 07:14:10 -04:00
|
|
|
// The target address is in internal RAM. Use the CPU's native CAS instruction
|
2022-06-07 02:46:23 -04:00
|
|
|
ret = xt_utils_compare_and_set(addr, compare_value, new_value);
|
|
|
|
}
|
|
|
|
return ret;
|
2023-07-20 23:36:28 -04:00
|
|
|
|
|
|
|
#else // __riscv
|
2022-06-07 02:46:23 -04:00
|
|
|
return rv_utils_compare_and_set(addr, compare_value, new_value);
|
|
|
|
#endif
|
|
|
|
}
|