2021-08-05 10:30:10 -04:00
|
|
|
/*
|
2024-02-15 04:57:03 -05:00
|
|
|
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
|
2021-08-05 10:30:10 -04:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2020-11-05 23:00:07 -05:00
|
|
|
#pragma once
|
2020-01-21 17:20:34 -05:00
|
|
|
|
2022-07-21 07:18:51 -04:00
|
|
|
#include "sdkconfig.h"
|
2020-01-21 17:20:34 -05:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
2022-07-21 07:18:51 -04:00
|
|
|
#include "esp_cpu.h"
|
2020-11-05 23:00:07 -05:00
|
|
|
|
|
|
|
#if __XTENSA__
|
2020-01-21 17:20:34 -05:00
|
|
|
#include "xtensa/xtruntime.h"
|
2022-07-21 07:18:51 -04:00
|
|
|
#include "xt_utils.h"
|
2023-07-20 23:36:28 -04:00
|
|
|
#else
|
|
|
|
#include "riscv/rv_utils.h"
|
2020-11-05 23:00:07 -05:00
|
|
|
#endif
|
2020-01-21 17:20:34 -05:00
|
|
|
|
2023-07-20 23:36:28 -04:00
|
|
|
|
2020-09-30 03:24:04 -04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
2020-01-21 17:20:34 -05:00
|
|
|
|
2019-12-18 01:36:58 -05:00
|
|
|
#ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
|
|
|
|
#define NEED_VOLATILE_MUX volatile
|
|
|
|
#else
|
|
|
|
#define NEED_VOLATILE_MUX
|
|
|
|
#endif
|
|
|
|
|
2020-01-21 17:20:34 -05:00
|
|
|
#define SPINLOCK_FREE 0xB33FFFFF
|
2020-11-10 02:40:01 -05:00
|
|
|
#define SPINLOCK_WAIT_FOREVER (-1)
|
|
|
|
#define SPINLOCK_NO_WAIT 0
|
2020-01-21 17:20:34 -05:00
|
|
|
#define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
|
2023-07-27 03:05:26 -04:00
|
|
|
|
|
|
|
#define SPINLOCK_OWNER_ID_0 0xCDCD /* Use these values to avoid 0 being a valid lock owner, same as CORE_ID_REGVAL_PRO on Xtensa */
|
|
|
|
#define SPINLOCK_OWNER_ID_1 0xABAB /* Same as CORE_ID_REGVAL_APP on Xtensa*/
|
|
|
|
|
2020-01-21 17:20:34 -05:00
|
|
|
#define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
|
2023-07-27 03:05:26 -04:00
|
|
|
#define SPINLOCK_OWNER_ID_XOR_SWAP CORE_ID_REGVAL_XOR_SWAP
|
2020-01-21 17:20:34 -05:00
|
|
|
|
|
|
|
typedef struct {
|
2019-12-18 01:36:58 -05:00
|
|
|
NEED_VOLATILE_MUX uint32_t owner;
|
|
|
|
NEED_VOLATILE_MUX uint32_t count;
|
2022-07-21 07:18:51 -04:00
|
|
|
} spinlock_t;
|
2022-03-23 08:16:08 -04:00
|
|
|
|
2020-01-21 17:20:34 -05:00
|
|
|
/**
|
|
|
|
* @brief Initialize a lock to its default state - unlocked
|
|
|
|
* @param lock - spinlock object to initialize
|
|
|
|
*/
|
|
|
|
static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
|
|
|
|
{
|
2019-11-28 13:27:47 -05:00
|
|
|
assert(lock);
|
2023-10-30 02:23:23 -04:00
|
|
|
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
|
2020-01-21 17:20:34 -05:00
|
|
|
lock->owner = SPINLOCK_FREE;
|
|
|
|
lock->count = 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Top level spinlock acquire function, spins until get the lock
|
2021-10-29 12:48:19 -04:00
|
|
|
*
|
|
|
|
* This function will:
|
|
|
|
* - Save current interrupt state, then disable interrupts
|
|
|
|
* - Spin until lock is acquired or until timeout occurs
|
|
|
|
* - Restore interrupt state
|
|
|
|
*
|
|
|
|
* @note Spinlocks alone do no constitute true critical sections (as this
|
|
|
|
* function reenables interrupts once the spinlock is acquired). For critical
|
|
|
|
* sections, use the interface provided by the operating system.
|
2020-01-21 17:20:34 -05:00
|
|
|
* @param lock - target spinlock object
|
|
|
|
* @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
|
|
|
|
*/
|
|
|
|
static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
|
|
|
|
{
|
2023-10-30 02:23:23 -04:00
|
|
|
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE && !BOOTLOADER_BUILD
|
2020-01-21 17:20:34 -05:00
|
|
|
uint32_t irq_status;
|
2023-07-27 03:05:26 -04:00
|
|
|
uint32_t core_owner_id, other_core_owner_id;
|
2022-07-21 07:18:51 -04:00
|
|
|
bool lock_set;
|
|
|
|
esp_cpu_cycle_count_t start_count;
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2020-01-21 17:20:34 -05:00
|
|
|
assert(lock);
|
2023-07-20 23:36:28 -04:00
|
|
|
#if __XTENSA__
|
2020-11-10 02:40:01 -05:00
|
|
|
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
|
|
|
|
|
2022-07-21 07:18:51 -04:00
|
|
|
// Note: The core IDs are the full 32 bit (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) values
|
2023-07-27 03:05:26 -04:00
|
|
|
core_owner_id = xt_utils_get_raw_core_id();
|
2023-07-20 23:36:28 -04:00
|
|
|
#else //__riscv
|
|
|
|
|
2024-02-15 04:57:03 -05:00
|
|
|
irq_status = rv_utils_set_intlevel_regval(RVHAL_EXCM_LEVEL_CLIC);
|
2023-07-27 03:05:26 -04:00
|
|
|
core_owner_id = rv_utils_get_core_id() == 0 ? SPINLOCK_OWNER_ID_0 : SPINLOCK_OWNER_ID_1;
|
2023-07-20 23:36:28 -04:00
|
|
|
#endif
|
2023-07-27 03:05:26 -04:00
|
|
|
other_core_owner_id = CORE_ID_REGVAL_XOR_SWAP ^ core_owner_id;
|
2020-01-21 17:20:34 -05:00
|
|
|
|
2022-07-21 07:18:51 -04:00
|
|
|
/* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
|
|
|
|
* CORE_ID_REGVAL_APP:
|
2023-07-27 03:05:26 -04:00
|
|
|
* - If SPINLOCK_FREE, we want to atomically set to 'core_owner_id'.
|
|
|
|
* - If "our" core_owner_id, we can drop through immediately.
|
|
|
|
* - If "other_core_owner_id", we spin here.
|
2022-07-21 07:18:51 -04:00
|
|
|
*/
|
|
|
|
|
|
|
|
// The caller is already the owner of the lock. Simply increment the nesting count
|
2023-07-27 03:05:26 -04:00
|
|
|
if (lock->owner == core_owner_id) {
|
2022-07-21 07:18:51 -04:00
|
|
|
assert(lock->count > 0 && lock->count < 0xFF); // Bad count value implies memory corruption
|
|
|
|
lock->count++;
|
2023-07-20 23:36:28 -04:00
|
|
|
#if __XTENSA__
|
2022-07-21 07:18:51 -04:00
|
|
|
XTOS_RESTORE_INTLEVEL(irq_status);
|
2023-07-20 23:36:28 -04:00
|
|
|
#else
|
2024-02-15 04:57:03 -05:00
|
|
|
rv_utils_restore_intlevel_regval(irq_status);
|
2023-07-20 23:36:28 -04:00
|
|
|
#endif
|
2022-07-21 07:18:51 -04:00
|
|
|
return true;
|
|
|
|
}
|
2020-01-21 17:20:34 -05:00
|
|
|
|
2022-07-21 07:18:51 -04:00
|
|
|
/* First attempt to take the lock.
|
|
|
|
*
|
|
|
|
* Note: We do a first attempt separately (instead of putting this into a loop) in order to avoid call to
|
|
|
|
* esp_cpu_get_cycle_count(). This doing a first attempt separately makes acquiring a free lock quicker, which
|
|
|
|
* is the case for the majority of spinlock_acquire() calls (as spinlocks are free most of the time since they
|
|
|
|
* aren't meant to be held for long).
|
|
|
|
*/
|
2023-07-27 03:05:26 -04:00
|
|
|
lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_owner_id);
|
2022-07-21 07:18:51 -04:00
|
|
|
if (lock_set || timeout == SPINLOCK_NO_WAIT) {
|
|
|
|
// We've successfully taken the lock, or we are not retrying
|
|
|
|
goto exit;
|
|
|
|
}
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2022-07-21 07:18:51 -04:00
|
|
|
// First attempt to take the lock has failed. Retry until the lock is taken, or until we timeout.
|
|
|
|
start_count = esp_cpu_get_cycle_count();
|
2020-01-21 17:20:34 -05:00
|
|
|
do {
|
2023-07-27 03:05:26 -04:00
|
|
|
lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_owner_id);
|
2022-07-21 07:18:51 -04:00
|
|
|
if (lock_set) {
|
2020-01-21 17:20:34 -05:00
|
|
|
break;
|
|
|
|
}
|
2022-07-21 07:18:51 -04:00
|
|
|
// Keep looping if we are waiting forever, or check if we have timed out
|
|
|
|
} while ((timeout == SPINLOCK_WAIT_FOREVER) || (esp_cpu_get_cycle_count() - start_count) <= timeout);
|
|
|
|
|
|
|
|
exit:
|
|
|
|
if (lock_set) {
|
2023-07-27 03:05:26 -04:00
|
|
|
assert(lock->owner == core_owner_id);
|
2022-07-21 07:18:51 -04:00
|
|
|
assert(lock->count == 0); // This is the first time the lock is set, so count should still be 0
|
|
|
|
lock->count++; // Finally, we increment the lock count
|
|
|
|
} else { // We timed out waiting for lock
|
2023-07-27 03:05:26 -04:00
|
|
|
assert(lock->owner == SPINLOCK_FREE || lock->owner == other_core_owner_id);
|
2022-07-21 07:18:51 -04:00
|
|
|
assert(lock->count < 0xFF); // Bad count value implies memory corruption
|
|
|
|
}
|
2020-01-21 17:20:34 -05:00
|
|
|
|
2023-07-20 23:36:28 -04:00
|
|
|
#if __XTENSA__
|
2020-01-21 17:20:34 -05:00
|
|
|
XTOS_RESTORE_INTLEVEL(irq_status);
|
2023-07-20 23:36:28 -04:00
|
|
|
#else
|
2024-02-15 04:57:03 -05:00
|
|
|
rv_utils_restore_intlevel_regval(irq_status);
|
2023-07-20 23:36:28 -04:00
|
|
|
#endif
|
2022-07-21 07:18:51 -04:00
|
|
|
return lock_set;
|
2020-01-21 17:20:34 -05:00
|
|
|
|
2023-10-30 02:23:23 -04:00
|
|
|
#else // !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
|
2020-01-21 17:20:34 -05:00
|
|
|
return true;
|
2020-11-10 02:40:01 -05:00
|
|
|
#endif
|
2020-01-21 17:20:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Top level spinlock unlock function, unlocks a previously locked spinlock
|
2021-10-29 12:48:19 -04:00
|
|
|
*
|
|
|
|
* This function will:
|
|
|
|
* - Save current interrupt state, then disable interrupts
|
|
|
|
* - Release the spinlock
|
|
|
|
* - Restore interrupt state
|
|
|
|
*
|
|
|
|
* @note Spinlocks alone do no constitute true critical sections (as this
|
|
|
|
* function reenables interrupts once the spinlock is acquired). For critical
|
|
|
|
* sections, use the interface provided by the operating system.
|
2020-01-21 17:20:34 -05:00
|
|
|
* @param lock - target, locked before, spinlock object
|
|
|
|
*/
|
|
|
|
static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
|
|
|
|
{
|
2023-10-30 02:23:23 -04:00
|
|
|
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE && !BOOTLOADER_BUILD
|
2020-11-10 02:40:01 -05:00
|
|
|
uint32_t irq_status;
|
2023-07-27 03:05:26 -04:00
|
|
|
uint32_t core_owner_id;
|
2020-01-21 17:20:34 -05:00
|
|
|
|
|
|
|
assert(lock);
|
2023-07-20 23:36:28 -04:00
|
|
|
#if __XTENSA__
|
2020-07-09 11:15:54 -04:00
|
|
|
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2023-07-27 03:05:26 -04:00
|
|
|
core_owner_id = xt_utils_get_raw_core_id();
|
2023-07-20 23:36:28 -04:00
|
|
|
#else
|
2024-02-15 04:57:03 -05:00
|
|
|
irq_status = rv_utils_set_intlevel_regval(RVHAL_EXCM_LEVEL_CLIC);
|
2023-07-27 03:05:26 -04:00
|
|
|
core_owner_id = rv_utils_get_core_id() == 0 ? SPINLOCK_OWNER_ID_0 : SPINLOCK_OWNER_ID_1;
|
2023-07-20 23:36:28 -04:00
|
|
|
#endif
|
2023-07-27 03:05:26 -04:00
|
|
|
assert(core_owner_id == lock->owner); // This is a lock that we didn't acquire, or the lock is corrupt
|
2020-01-21 17:20:34 -05:00
|
|
|
lock->count--;
|
|
|
|
|
2022-07-21 07:18:51 -04:00
|
|
|
if (!lock->count) { // If this is the last recursive release of the lock, mark the lock as free
|
2020-01-21 17:20:34 -05:00
|
|
|
lock->owner = SPINLOCK_FREE;
|
|
|
|
} else {
|
|
|
|
assert(lock->count < 0x100); // Indicates memory corruption
|
|
|
|
}
|
|
|
|
|
2023-07-20 23:36:28 -04:00
|
|
|
#if __XTENSA__
|
2020-01-21 17:20:34 -05:00
|
|
|
XTOS_RESTORE_INTLEVEL(irq_status);
|
2023-07-20 23:36:28 -04:00
|
|
|
#else
|
2024-02-15 04:57:03 -05:00
|
|
|
rv_utils_restore_intlevel_regval(irq_status);
|
2023-07-20 23:36:28 -04:00
|
|
|
#endif //#if __XTENSA__
|
2023-10-30 02:23:23 -04:00
|
|
|
#endif //#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE && !BOOTLOADER_BUILD
|
2020-01-21 17:20:34 -05:00
|
|
|
}
|
|
|
|
|
2020-09-30 03:24:04 -04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|