2021-08-05 10:30:10 -04:00
|
|
|
/*
|
|
|
|
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2020-11-05 23:00:07 -05:00
|
|
|
#pragma once
|
2020-01-21 17:20:34 -05:00
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
2020-11-05 23:00:07 -05:00
|
|
|
#include "sdkconfig.h"
|
2020-01-21 17:20:34 -05:00
|
|
|
#include "soc/cpu.h"
|
2020-11-05 23:00:07 -05:00
|
|
|
#include "hal/cpu_hal.h"
|
2020-01-21 17:20:34 -05:00
|
|
|
#include "soc/compare_set.h"
|
2020-11-05 23:00:07 -05:00
|
|
|
|
|
|
|
#if __XTENSA__
|
2020-01-21 17:20:34 -05:00
|
|
|
#include "xtensa/xtruntime.h"
|
2020-11-05 23:00:07 -05:00
|
|
|
#endif
|
2020-01-21 17:20:34 -05:00
|
|
|
|
2020-09-30 03:24:04 -04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
2020-01-21 17:20:34 -05:00
|
|
|
|
2019-12-18 01:36:58 -05:00
|
|
|
#ifdef CONFIG_SPIRAM_WORKAROUND_NEED_VOLATILE_SPINLOCK
|
|
|
|
#define NEED_VOLATILE_MUX volatile
|
|
|
|
#else
|
|
|
|
#define NEED_VOLATILE_MUX
|
|
|
|
#endif
|
|
|
|
|
2020-01-21 17:20:34 -05:00
|
|
|
#define SPINLOCK_FREE 0xB33FFFFF
|
2020-11-10 02:40:01 -05:00
|
|
|
#define SPINLOCK_WAIT_FOREVER (-1)
|
|
|
|
#define SPINLOCK_NO_WAIT 0
|
2020-01-21 17:20:34 -05:00
|
|
|
#define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
|
|
|
|
#define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
|
|
|
|
|
|
|
|
typedef struct {
|
2019-12-18 01:36:58 -05:00
|
|
|
NEED_VOLATILE_MUX uint32_t owner;
|
|
|
|
NEED_VOLATILE_MUX uint32_t count;
|
2020-01-21 17:20:34 -05:00
|
|
|
}spinlock_t;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Initialize a lock to its default state - unlocked
|
|
|
|
* @param lock - spinlock object to initialize
|
|
|
|
*/
|
|
|
|
static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t *lock)
|
|
|
|
{
|
2019-11-28 13:27:47 -05:00
|
|
|
assert(lock);
|
2020-09-24 16:25:52 -04:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE
|
2020-01-21 17:20:34 -05:00
|
|
|
lock->owner = SPINLOCK_FREE;
|
|
|
|
lock->count = 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Top level spinlock acquire function, spins until get the lock
|
2021-10-29 12:48:19 -04:00
|
|
|
*
|
|
|
|
* This function will:
|
|
|
|
* - Save current interrupt state, then disable interrupts
|
|
|
|
* - Spin until lock is acquired or until timeout occurs
|
|
|
|
* - Restore interrupt state
|
|
|
|
*
|
|
|
|
* @note Spinlocks alone do no constitute true critical sections (as this
|
|
|
|
* function reenables interrupts once the spinlock is acquired). For critical
|
|
|
|
* sections, use the interface provided by the operating system.
|
2020-01-21 17:20:34 -05:00
|
|
|
* @param lock - target spinlock object
|
|
|
|
* @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
|
|
|
|
*/
|
|
|
|
static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
|
|
|
|
{
|
2020-11-05 23:00:07 -05:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
|
2020-01-21 17:20:34 -05:00
|
|
|
uint32_t result;
|
|
|
|
uint32_t irq_status;
|
|
|
|
uint32_t ccount_start;
|
|
|
|
uint32_t core_id, other_core_id;
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2020-01-21 17:20:34 -05:00
|
|
|
assert(lock);
|
2020-11-10 02:40:01 -05:00
|
|
|
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
|
|
|
|
|
2020-01-21 17:20:34 -05:00
|
|
|
if(timeout != SPINLOCK_WAIT_FOREVER){
|
2020-11-10 02:40:01 -05:00
|
|
|
RSR(CCOUNT, ccount_start);
|
2020-01-21 17:20:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*spin until we own a core */
|
|
|
|
RSR(PRID, core_id);
|
|
|
|
|
|
|
|
/* Note: coreID is the full 32 bit core ID (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) */
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2020-01-21 17:20:34 -05:00
|
|
|
other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
|
|
|
|
do {
|
|
|
|
|
2020-11-10 02:40:01 -05:00
|
|
|
/* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
|
2020-01-21 17:20:34 -05:00
|
|
|
* CORE_ID_REGVAL_APP:
|
|
|
|
* - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
|
|
|
|
* - If "our" core_id, we can drop through immediately.
|
|
|
|
* - If "other_core_id", we spin here.
|
|
|
|
*/
|
|
|
|
result = core_id;
|
|
|
|
|
|
|
|
#if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
|
|
|
|
if (esp_ptr_external_ram(lock)) {
|
|
|
|
compare_and_set_extram(&lock->owner, SPINLOCK_FREE, &result);
|
|
|
|
} else {
|
|
|
|
#endif
|
|
|
|
compare_and_set_native(&lock->owner, SPINLOCK_FREE, &result);
|
|
|
|
#if defined(CONFIG_ESP32_SPIRAM_SUPPORT)
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if(result != other_core_id) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout != SPINLOCK_WAIT_FOREVER) {
|
|
|
|
uint32_t ccount_now;
|
2020-11-05 23:00:07 -05:00
|
|
|
ccount_now = cpu_hal_get_cycle_count();
|
2020-01-21 17:20:34 -05:00
|
|
|
if (ccount_now - ccount_start > (unsigned)timeout) {
|
|
|
|
XTOS_RESTORE_INTLEVEL(irq_status);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}while(1);
|
|
|
|
|
|
|
|
/* any other value implies memory corruption or uninitialized mux */
|
2020-11-10 02:40:01 -05:00
|
|
|
assert(result == core_id || result == SPINLOCK_FREE);
|
2020-01-21 17:20:34 -05:00
|
|
|
assert((result == SPINLOCK_FREE) == (lock->count == 0)); /* we're first to lock iff count is zero */
|
|
|
|
assert(lock->count < 0xFF); /* Bad count value implies memory corruption */
|
|
|
|
|
|
|
|
lock->count++;
|
|
|
|
XTOS_RESTORE_INTLEVEL(irq_status);
|
|
|
|
return true;
|
|
|
|
|
2020-11-05 23:00:07 -05:00
|
|
|
#else // !CONFIG_FREERTOS_UNICORE
|
2020-01-21 17:20:34 -05:00
|
|
|
return true;
|
2020-11-10 02:40:01 -05:00
|
|
|
#endif
|
2020-01-21 17:20:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Top level spinlock unlock function, unlocks a previously locked spinlock
|
2021-10-29 12:48:19 -04:00
|
|
|
*
|
|
|
|
* This function will:
|
|
|
|
* - Save current interrupt state, then disable interrupts
|
|
|
|
* - Release the spinlock
|
|
|
|
* - Restore interrupt state
|
|
|
|
*
|
|
|
|
* @note Spinlocks alone do no constitute true critical sections (as this
|
|
|
|
* function reenables interrupts once the spinlock is acquired). For critical
|
|
|
|
* sections, use the interface provided by the operating system.
|
2020-01-21 17:20:34 -05:00
|
|
|
* @param lock - target, locked before, spinlock object
|
|
|
|
*/
|
|
|
|
static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)
|
|
|
|
{
|
2020-11-05 23:00:07 -05:00
|
|
|
#if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
|
2020-11-10 02:40:01 -05:00
|
|
|
uint32_t irq_status;
|
2020-01-21 17:20:34 -05:00
|
|
|
uint32_t core_id;
|
|
|
|
|
|
|
|
assert(lock);
|
2020-07-09 11:15:54 -04:00
|
|
|
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2020-01-21 17:20:34 -05:00
|
|
|
RSR(PRID, core_id);
|
|
|
|
assert(core_id == lock->owner); // This is a mutex we didn't lock, or it's corrupt
|
|
|
|
lock->count--;
|
|
|
|
|
|
|
|
if(!lock->count) {
|
|
|
|
lock->owner = SPINLOCK_FREE;
|
|
|
|
} else {
|
|
|
|
assert(lock->count < 0x100); // Indicates memory corruption
|
|
|
|
}
|
|
|
|
|
|
|
|
XTOS_RESTORE_INTLEVEL(irq_status);
|
2020-11-10 02:40:01 -05:00
|
|
|
#endif
|
2020-01-21 17:20:34 -05:00
|
|
|
}
|
|
|
|
|
2020-09-30 03:24:04 -04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|