mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'bugfix/spinlock_performance' into 'master'
freertos: Reduce CPU spinlock (portmux) overhead See merge request !497
This commit is contained in:
commit
d531b56d93
@ -538,6 +538,12 @@ UT_001_06:
|
||||
- ESP32_IDF
|
||||
- UT_T1_SPIMODE
|
||||
|
||||
UT_001_07:
|
||||
<<: *unit_test_template
|
||||
tags:
|
||||
- ESP32_IDF
|
||||
- UT_T1_1
|
||||
|
||||
IT_001_01:
|
||||
<<: *test_template
|
||||
tags:
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
// TODO: get actual clock from PLL config
|
||||
#define ESP_APPTRACE_CPUTICKS2US(_t_) ((_t_)/(XT_CLOCK_FREQ/1000000))
|
||||
#define ESP_APPTRACE_US2CPUTICKS(_t_) ((_t_)*(XT_CLOCK_FREQ/1000000))
|
||||
|
||||
esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
@ -45,81 +46,38 @@ esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo)
|
||||
|
||||
esp_err_t esp_apptrace_lock_take(esp_apptrace_lock_t *lock, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
uint32_t res;
|
||||
#if CONFIG_SYSVIEW_ENABLE
|
||||
uint32_t recCnt;
|
||||
#endif
|
||||
int res;
|
||||
|
||||
while (1) {
|
||||
res = (xPortGetCoreID() << portMUX_VAL_SHIFT) | portMUX_MAGIC_VAL;
|
||||
// first disable IRQs on this CPU, this will prevent current task from been
|
||||
// preempted by higher prio tasks, otherwise deadlock can happen:
|
||||
// when lower prio task took mux and then preempted by higher prio one which also tries to
|
||||
// get mux with INFINITE timeout
|
||||
unsigned int irq_stat = portENTER_CRITICAL_NESTED();
|
||||
// Now try to lock mux
|
||||
uxPortCompareSet(&lock->portmux.mux, portMUX_FREE_VAL, &res);
|
||||
if (res == portMUX_FREE_VAL) {
|
||||
// do not enable IRQs, we will held them disabled until mux is unlocked
|
||||
// we do not need to flush cache region for mux->irq_stat because it is used
|
||||
// to hold and restore IRQ state only for CPU which took mux, other CPUs will not use this value
|
||||
lock->irq_stat = irq_stat;
|
||||
break;
|
||||
// do not overwrite lock->int_state before we actually acquired the mux
|
||||
unsigned int_state = portENTER_CRITICAL_NESTED();
|
||||
// FIXME: if mux is busy it is not good idea to loop during the whole tmo with disabled IRQs.
|
||||
// So we check mux state using zero tmo, restore IRQs and let others tasks/IRQs to run on this CPU
|
||||
// while we are doing our own tmo check.
|
||||
bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0);
|
||||
if (success) {
|
||||
lock->int_state = int_state;
|
||||
return ESP_OK;
|
||||
}
|
||||
#if CONFIG_SYSVIEW_ENABLE
|
||||
else if (((res & portMUX_VAL_MASK) >> portMUX_VAL_SHIFT) == xPortGetCoreID()) {
|
||||
recCnt = (res & portMUX_CNT_MASK) >> portMUX_CNT_SHIFT;
|
||||
recCnt++;
|
||||
// ets_printf("Recursive lock: recCnt=%d\n", recCnt);
|
||||
lock->portmux.mux = portMUX_MAGIC_VAL | (recCnt << portMUX_CNT_SHIFT) | (xPortGetCoreID() << portMUX_VAL_SHIFT);
|
||||
portEXIT_CRITICAL_NESTED(int_state);
|
||||
// we can be preempted from this place till the next call (above) to portENTER_CRITICAL_NESTED()
|
||||
res = esp_apptrace_tmo_check(tmo);
|
||||
if (res != ESP_OK) {
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
// if mux is locked by other task/ISR enable IRQs and let other guys work
|
||||
portEXIT_CRITICAL_NESTED(irq_stat);
|
||||
|
||||
int err = esp_apptrace_tmo_check(tmo);
|
||||
if (err != ESP_OK) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return ESP_OK;
|
||||
return res;
|
||||
}
|
||||
|
||||
esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock)
|
||||
{
|
||||
esp_err_t ret = ESP_OK;
|
||||
uint32_t res = 0;
|
||||
unsigned int irq_stat;
|
||||
#if CONFIG_SYSVIEW_ENABLE
|
||||
uint32_t recCnt;
|
||||
#endif
|
||||
res = portMUX_FREE_VAL;
|
||||
|
||||
// first of all save a copy of IRQ status for this locker because uxPortCompareSet will unlock mux and tasks/ISRs
|
||||
// from other core can overwrite mux->irq_stat
|
||||
irq_stat = lock->irq_stat;
|
||||
uxPortCompareSet(&lock->portmux.mux, (xPortGetCoreID() << portMUX_VAL_SHIFT) | portMUX_MAGIC_VAL, &res);
|
||||
|
||||
if ( ((res & portMUX_VAL_MASK) >> portMUX_VAL_SHIFT) == xPortGetCoreID() ) {
|
||||
#if CONFIG_SYSVIEW_ENABLE
|
||||
//Lock is valid, we can return safely. Just need to check if it's a recursive lock; if so we need to decrease the refcount.
|
||||
if ( ((res & portMUX_CNT_MASK) >> portMUX_CNT_SHIFT) != 0) {
|
||||
//We locked this, but the reccount isn't zero. Decrease refcount and continue.
|
||||
recCnt = (res & portMUX_CNT_MASK) >> portMUX_CNT_SHIFT;
|
||||
recCnt--;
|
||||
lock->portmux.mux = portMUX_MAGIC_VAL | (recCnt << portMUX_CNT_SHIFT) | (xPortGetCoreID() << portMUX_VAL_SHIFT);
|
||||
}
|
||||
#endif
|
||||
} else if ( res == portMUX_FREE_VAL ) {
|
||||
ret = ESP_FAIL; // should never get here
|
||||
} else {
|
||||
ret = ESP_FAIL; // should never get here
|
||||
}
|
||||
// restore local interrupts
|
||||
portEXIT_CRITICAL_NESTED(irq_stat);
|
||||
return ret;
|
||||
// save lock's irq state value for this CPU
|
||||
unsigned int_state = lock->int_state;
|
||||
// after call to the following func we can not be sure that lock->int_state
|
||||
// is not overwritten by other CPU who has acquired the mux just after we released it. See esp_apptrace_lock_take().
|
||||
vPortCPUReleaseMutex(&lock->mux);
|
||||
portEXIT_CRITICAL_NESTED(int_state);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -59,8 +59,8 @@ static inline uint32_t esp_apptrace_tmo_remaining_us(esp_apptrace_tmo_t *tmo)
|
||||
|
||||
/** Tracing module synchronization lock */
|
||||
typedef struct {
|
||||
volatile unsigned int irq_stat; ///< local (on 1 CPU) IRQ state
|
||||
portMUX_TYPE portmux; ///< mux for synchronization
|
||||
portMUX_TYPE mux;
|
||||
unsigned int_state;
|
||||
} esp_apptrace_lock_t;
|
||||
|
||||
/**
|
||||
@ -70,8 +70,8 @@ typedef struct {
|
||||
*/
|
||||
static inline void esp_apptrace_lock_init(esp_apptrace_lock_t *lock)
|
||||
{
|
||||
lock->portmux.mux = portMUX_FREE_VAL;
|
||||
lock->irq_stat = 0;
|
||||
vPortCPUInitializeMutex(&lock->mux);
|
||||
lock->int_state = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -163,4 +163,4 @@ uint32_t esp_apptrace_rb_read_size_get(esp_apptrace_rb_t *rb);
|
||||
*/
|
||||
uint32_t esp_apptrace_rb_write_size_get(esp_apptrace_rb_t *rb);
|
||||
|
||||
#endif //ESP_APP_TRACE_UTIL_H_
|
||||
#endif //ESP_APP_TRACE_UTIL_H_
|
||||
|
@ -115,7 +115,7 @@ static timer_group_t s_ts_timer_group;
|
||||
// everything is fine, so for multi-core env we have to wait on underlying lock forever
|
||||
#define SEGGER_LOCK_WAIT_TMO ESP_APPTRACE_TMO_INFINITE
|
||||
|
||||
static esp_apptrace_lock_t s_sys_view_lock = {.irq_stat = 0, .portmux = portMUX_INITIALIZER_UNLOCKED};
|
||||
static esp_apptrace_lock_t s_sys_view_lock = {.mux = portMUX_INITIALIZER_UNLOCKED, .int_state = 0};
|
||||
|
||||
static const char * const s_isr_names[] = {
|
||||
[0] = "WIFI_MAC",
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 64b0ff4199614a8a5066fe3a05d446acb52575e6
|
||||
Subproject commit fc2118052ec3863e228840d15df58bc66ff0ce57
|
@ -978,13 +978,14 @@ typedef struct xSTATIC_QUEUE
|
||||
uint8_t ucDummy9;
|
||||
#endif
|
||||
|
||||
struct {
|
||||
volatile uint32_t ucDummy10;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
void *pvDummy8;
|
||||
UBaseType_t uxDummy11;
|
||||
#endif
|
||||
} sDummy12;
|
||||
struct {
|
||||
volatile uint32_t ucDummy10;
|
||||
uint32_t ucDummy11;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
void *pvDummy8;
|
||||
UBaseType_t uxDummy12;
|
||||
#endif
|
||||
} sDummy1;
|
||||
|
||||
} StaticQueue_t;
|
||||
typedef StaticQueue_t StaticSemaphore_t;
|
||||
|
@ -199,7 +199,7 @@ BaseType_t xPortInIsrContext();
|
||||
/* Multi-core: get current core ID */
|
||||
static inline uint32_t IRAM_ATTR xPortGetCoreID() {
|
||||
int id;
|
||||
asm volatile(
|
||||
asm (
|
||||
"rsr.prid %0\n"
|
||||
" extui %0,%0,13,1"
|
||||
:"=r"(id));
|
||||
|
@ -127,7 +127,8 @@ typedef unsigned portBASE_TYPE UBaseType_t;
|
||||
|
||||
|
||||
typedef struct {
|
||||
volatile uint32_t mux;
|
||||
uint32_t owner;
|
||||
uint32_t count;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
const char *lastLockedFn;
|
||||
int lastLockedLine;
|
||||
@ -143,24 +144,23 @@ typedef struct {
|
||||
* The magic number in the top 16 bits is there so we can detect uninitialized and corrupted muxes.
|
||||
*/
|
||||
|
||||
#define portMUX_MAGIC_VAL 0xB33F0000
|
||||
#define portMUX_FREE_VAL 0xB33FFFFF
|
||||
#define portMUX_MAGIC_MASK 0xFFFF0000
|
||||
#define portMUX_MAGIC_SHIFT 16
|
||||
#define portMUX_CNT_MASK 0x0000FF00
|
||||
#define portMUX_CNT_SHIFT 8
|
||||
#define portMUX_VAL_MASK 0x000000FF
|
||||
#define portMUX_VAL_SHIFT 0
|
||||
|
||||
/* Special constants for vPortCPUAcquireMutexTimeout() */
|
||||
#define portMUX_NO_TIMEOUT (-1) /* When passed for 'timeout_cycles', spin forever if necessary */
|
||||
#define portMUX_TRY_LOCK 0 /* Try to acquire the spinlock a single time only */
|
||||
|
||||
//Keep this in sync with the portMUX_TYPE struct definition please.
|
||||
#ifndef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
#define portMUX_INITIALIZER_UNLOCKED { \
|
||||
.mux = portMUX_MAGIC_VAL|portMUX_FREE_VAL \
|
||||
#define portMUX_INITIALIZER_UNLOCKED { \
|
||||
.owner = portMUX_FREE_VAL, \
|
||||
.count = 0, \
|
||||
}
|
||||
#else
|
||||
#define portMUX_INITIALIZER_UNLOCKED { \
|
||||
.mux = portMUX_MAGIC_VAL|portMUX_FREE_VAL, \
|
||||
.lastLockedFn = "(never locked)", \
|
||||
#define portMUX_INITIALIZER_UNLOCKED { \
|
||||
.owner = portMUX_FREE_VAL, \
|
||||
.count = 0, \
|
||||
.lastLockedFn = "(never locked)", \
|
||||
.lastLockedLine = -1 \
|
||||
}
|
||||
#endif
|
||||
@ -173,8 +173,7 @@ typedef struct {
|
||||
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
|
||||
void vPortAssertIfInISR();
|
||||
|
||||
|
||||
#define portCRITICAL_NESTING_IN_TCB 1
|
||||
#define portCRITICAL_NESTING_IN_TCB 1
|
||||
|
||||
/*
|
||||
Modifications to portENTER_CRITICAL:
|
||||
@ -203,7 +202,10 @@ behaviour; please keep this in mind if you need any compatibility with other Fre
|
||||
void vPortCPUInitializeMutex(portMUX_TYPE *mux);
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
void vPortCPUAcquireMutex(portMUX_TYPE *mux, const char *function, int line);
|
||||
bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles, const char *function, int line);
|
||||
portBASE_TYPE vPortCPUReleaseMutex(portMUX_TYPE *mux, const char *function, int line);
|
||||
|
||||
|
||||
void vTaskEnterCritical( portMUX_TYPE *mux, const char *function, int line );
|
||||
void vTaskExitCritical( portMUX_TYPE *mux, const char *function, int line );
|
||||
#define portENTER_CRITICAL(mux) vTaskEnterCritical(mux, __FUNCTION__, __LINE__)
|
||||
@ -214,7 +216,18 @@ void vTaskExitCritical( portMUX_TYPE *mux, const char *function, int line );
|
||||
void vTaskExitCritical( portMUX_TYPE *mux );
|
||||
void vTaskEnterCritical( portMUX_TYPE *mux );
|
||||
void vPortCPUAcquireMutex(portMUX_TYPE *mux);
|
||||
portBASE_TYPE vPortCPUReleaseMutex(portMUX_TYPE *mux);
|
||||
|
||||
/** @brief Acquire a portmux spinlock with a timeout
|
||||
*
|
||||
* @param mux Pointer to portmux to acquire.
|
||||
* @param timeout_cycles Timeout to spin, in CPU cycles. Pass portMUX_NO_TIMEOUT to wait forever,
|
||||
* portMUX_TRY_LOCK to try a single time to acquire the lock.
|
||||
*
|
||||
* @return true if mutex is successfully acquired, false on timeout.
|
||||
*/
|
||||
bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles);
|
||||
void vPortCPUReleaseMutex(portMUX_TYPE *mux);
|
||||
|
||||
#define portENTER_CRITICAL(mux) vTaskEnterCritical(mux)
|
||||
#define portEXIT_CRITICAL(mux) vTaskExitCritical(mux)
|
||||
#define portENTER_CRITICAL_ISR(mux) vTaskEnterCritical(mux)
|
||||
@ -243,9 +256,8 @@ static inline unsigned portENTER_CRITICAL_NESTED() { unsigned state = XTOS_SET_I
|
||||
* ESP32, though. (Would show up directly if it did because the magic wouldn't match.)
|
||||
*/
|
||||
static inline void uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set) {
|
||||
__asm__ __volatile__(
|
||||
__asm__ __volatile__ (
|
||||
"WSR %2,SCOMPARE1 \n"
|
||||
"ISYNC \n"
|
||||
"S32C1I %0, %1, 0 \n"
|
||||
:"=r"(*set)
|
||||
:"r"(addr), "r"(compare), "0"(*set)
|
||||
|
@ -314,11 +314,10 @@ STRUCT_END(XtSolFrame)
|
||||
|
||||
/*
|
||||
Macro to get the current core ID. Only uses the reg given as an argument.
|
||||
Reading PRID on the ESP108 architecture gives us 0xCDCD on the PRO processor
|
||||
and 0xABAB on the APP CPU. We distinguish between the two by simply checking
|
||||
bit 1: it's 1 on the APP and 0 on the PRO processor.
|
||||
Reading PRID on the ESP32 gives us 0xCDCD on the PRO processor (0)
|
||||
and 0xABAB on the APP CPU (1). We distinguish between the two by simply checking
|
||||
bit 13: it's 1 on the APP and 0 on the PRO processor.
|
||||
*/
|
||||
|
||||
#ifdef __ASSEMBLER__
|
||||
.macro getcoreid reg
|
||||
rsr.prid \reg
|
||||
@ -326,7 +325,8 @@ STRUCT_END(XtSolFrame)
|
||||
.endm
|
||||
#endif
|
||||
|
||||
|
||||
#define CORE_ID_PRO 0xCDCD
|
||||
#define CORE_ID_APP 0xABAB
|
||||
|
||||
/*
|
||||
-------------------------------------------------------------------------------
|
||||
|
@ -97,6 +97,7 @@
|
||||
#include "xtensa_rtos.h"
|
||||
|
||||
#include "rom/ets_sys.h"
|
||||
#include "soc/cpu.h"
|
||||
|
||||
#include "FreeRTOS.h"
|
||||
#include "task.h"
|
||||
@ -302,122 +303,63 @@ void vPortCPUInitializeMutex(portMUX_TYPE *mux) {
|
||||
mux->lastLockedFn="(never locked)";
|
||||
mux->lastLockedLine=-1;
|
||||
#endif
|
||||
mux->mux=portMUX_FREE_VAL;
|
||||
mux->owner=portMUX_FREE_VAL;
|
||||
mux->count=0;
|
||||
}
|
||||
|
||||
#include "portmux_impl.h"
|
||||
|
||||
/*
|
||||
* For kernel use: Acquire a per-CPU mux. Spinlocks, so don't hold on to these muxes for too long.
|
||||
*/
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
void vPortCPUAcquireMutex(portMUX_TYPE *mux, const char *fnName, int line) {
|
||||
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
|
||||
vPortCPUAcquireMutexIntsDisabled(mux, portMUX_NO_TIMEOUT, fnName, line);
|
||||
portEXIT_CRITICAL_NESTED(irqStatus);
|
||||
}
|
||||
|
||||
bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, uint32_t timeout_cycles, const char *fnName, int line) {
|
||||
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
|
||||
bool result = vPortCPUAcquireMutexIntsDisabled(mux, timeout_cycles, fnName, line);
|
||||
portEXIT_CRITICAL_NESTED(irqStatus);
|
||||
return result;
|
||||
}
|
||||
|
||||
#else
|
||||
void vPortCPUAcquireMutex(portMUX_TYPE *mux) {
|
||||
#endif
|
||||
#if !CONFIG_FREERTOS_UNICORE
|
||||
uint32_t res;
|
||||
uint32_t recCnt;
|
||||
unsigned int irqStatus;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
uint32_t cnt=(1<<16);
|
||||
if ( (mux->mux & portMUX_MAGIC_MASK) != portMUX_MAGIC_VAL ) {
|
||||
ets_printf("ERROR: vPortCPUAcquireMutex: mux %p is uninitialized (0x%X)! Called from %s line %d.\n", mux, mux->mux, fnName, line);
|
||||
mux->mux=portMUX_FREE_VAL;
|
||||
}
|
||||
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
|
||||
vPortCPUAcquireMutexIntsDisabled(mux, portMUX_NO_TIMEOUT);
|
||||
portEXIT_CRITICAL_NESTED(irqStatus);
|
||||
}
|
||||
|
||||
bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles) {
|
||||
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
|
||||
bool result = vPortCPUAcquireMutexIntsDisabled(mux, timeout_cycles);
|
||||
portEXIT_CRITICAL_NESTED(irqStatus);
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
irqStatus=portENTER_CRITICAL_NESTED();
|
||||
do {
|
||||
//Lock mux if it's currently unlocked
|
||||
res=(xPortGetCoreID()<<portMUX_VAL_SHIFT)|portMUX_MAGIC_VAL;
|
||||
uxPortCompareSet(&mux->mux, portMUX_FREE_VAL, &res);
|
||||
//If it wasn't free and we're the owner of the lock, we are locking recursively.
|
||||
if ( (res != portMUX_FREE_VAL) && (((res&portMUX_VAL_MASK)>>portMUX_VAL_SHIFT) == xPortGetCoreID()) ) {
|
||||
//Mux was already locked by us. Just bump the recurse count by one.
|
||||
recCnt=(res&portMUX_CNT_MASK)>>portMUX_CNT_SHIFT;
|
||||
recCnt++;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG_RECURSIVE
|
||||
ets_printf("Recursive lock: recCnt=%d last non-recursive lock %s line %d, curr %s line %d\n", recCnt, mux->lastLockedFn, mux->lastLockedLine, fnName, line);
|
||||
#endif
|
||||
mux->mux=portMUX_MAGIC_VAL|(recCnt<<portMUX_CNT_SHIFT)|(xPortGetCoreID()<<portMUX_VAL_SHIFT);
|
||||
break;
|
||||
}
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
cnt--;
|
||||
if (cnt==0) {
|
||||
ets_printf("Timeout on mux! last non-recursive lock %s line %d, curr %s line %d\n", mux->lastLockedFn, mux->lastLockedLine, fnName, line);
|
||||
ets_printf("Mux value %X\n", mux->mux);
|
||||
}
|
||||
#endif
|
||||
} while (res!=portMUX_FREE_VAL);
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
if (res==portMUX_FREE_VAL) { //initial lock
|
||||
mux->lastLockedFn=fnName;
|
||||
mux->lastLockedLine=line;
|
||||
}
|
||||
#endif
|
||||
portEXIT_CRITICAL_NESTED(irqStatus);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* For kernel use: Release a per-CPU mux. Returns true if everything is OK, false if mux
|
||||
* was already unlocked or is locked by a different core.
|
||||
* For kernel use: Release a per-CPU mux
|
||||
*
|
||||
* Mux must be already locked by this core
|
||||
*/
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
portBASE_TYPE vPortCPUReleaseMutex(portMUX_TYPE *mux, const char *fnName, int line) {
|
||||
#else
|
||||
portBASE_TYPE vPortCPUReleaseMutex(portMUX_TYPE *mux) {
|
||||
#endif
|
||||
#if !CONFIG_FREERTOS_UNICORE
|
||||
uint32_t res=0;
|
||||
uint32_t recCnt;
|
||||
unsigned int irqStatus;
|
||||
portBASE_TYPE ret=pdTRUE;
|
||||
// ets_printf("Unlock %p\n", mux);
|
||||
irqStatus=portENTER_CRITICAL_NESTED();
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
const char *lastLockedFn=mux->lastLockedFn;
|
||||
int lastLockedLine=mux->lastLockedLine;
|
||||
mux->lastLockedFn=fnName;
|
||||
mux->lastLockedLine=line;
|
||||
if ( (mux->mux & portMUX_MAGIC_MASK) != portMUX_MAGIC_VAL ) ets_printf("ERROR: vPortCPUReleaseMutex: mux %p is uninitialized (0x%X)!\n", mux, mux->mux);
|
||||
#endif
|
||||
//Unlock mux if it's currently locked with a recurse count of 0
|
||||
res=portMUX_FREE_VAL;
|
||||
uxPortCompareSet(&mux->mux, (xPortGetCoreID()<<portMUX_VAL_SHIFT)|portMUX_MAGIC_VAL, &res);
|
||||
|
||||
if ( ((res&portMUX_VAL_MASK)>>portMUX_VAL_SHIFT) == xPortGetCoreID() ) {
|
||||
//Lock is valid, we can return safely. Just need to check if it's a recursive lock; if so we need to decrease the refcount.
|
||||
if ( ((res&portMUX_CNT_MASK)>>portMUX_CNT_SHIFT)!=0) {
|
||||
//We locked this, but the reccount isn't zero. Decrease refcount and continue.
|
||||
recCnt=(res&portMUX_CNT_MASK)>>portMUX_CNT_SHIFT;
|
||||
recCnt--;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG_RECURSIVE
|
||||
ets_printf("Recursive unlock: recCnt=%d last locked %s line %d, curr %s line %d\n", recCnt, lastLockedFn, lastLockedLine, fnName, line);
|
||||
#endif
|
||||
mux->mux=portMUX_MAGIC_VAL|(recCnt<<portMUX_CNT_SHIFT)|(xPortGetCoreID()<<portMUX_VAL_SHIFT);
|
||||
}
|
||||
} else if ( res == portMUX_FREE_VAL ) {
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
ets_printf("ERROR: vPortCPUReleaseMutex: mux %p was already unlocked!\n", mux);
|
||||
ets_printf("Last non-recursive unlock %s line %d, curr unlock %s line %d\n", lastLockedFn, lastLockedLine, fnName, line);
|
||||
#endif
|
||||
ret=pdFALSE;
|
||||
} else {
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
ets_printf("ERROR: vPortCPUReleaseMutex: mux %p wasn't locked by this core (%d) but by core %d (ret=%x, mux=%x).\n", mux, xPortGetCoreID(), ((res&portMUX_VAL_MASK)>>portMUX_VAL_SHIFT), res, mux->mux);
|
||||
ets_printf("Last non-recursive lock %s line %d\n", lastLockedFn, lastLockedLine);
|
||||
ets_printf("Called by %s line %d\n", fnName, line);
|
||||
#endif
|
||||
ret=pdFALSE;
|
||||
}
|
||||
void vPortCPUReleaseMutex(portMUX_TYPE *mux, const char *fnName, int line) {
|
||||
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
|
||||
vPortCPUReleaseMutexIntsDisabled(mux, fnName, line);
|
||||
portEXIT_CRITICAL_NESTED(irqStatus);
|
||||
return ret;
|
||||
#else //!CONFIG_FREERTOS_UNICORE
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
void vPortCPUReleaseMutex(portMUX_TYPE *mux) {
|
||||
unsigned int irqStatus = portENTER_CRITICAL_NESTED();
|
||||
vPortCPUReleaseMutexIntsDisabled(mux);
|
||||
portEXIT_CRITICAL_NESTED(irqStatus);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_FREERTOS_BREAK_ON_SCHEDULER_START_JTAG
|
||||
void vPortFirstTaskHook(TaskFunction_t function) {
|
||||
@ -425,7 +367,6 @@ void vPortFirstTaskHook(TaskFunction_t function) {
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void vPortSetStackWatchpoint( void* pxStackStart ) {
|
||||
//Set watchpoint 1 to watch the last 32 bytes of the stack.
|
||||
//Unfortunately, the Xtensa watchpoints can't set a watchpoint on a random [base - base+n] region because
|
||||
|
169
components/freertos/portmux_impl.h
Normal file
169
components/freertos/portmux_impl.h
Normal file
@ -0,0 +1,169 @@
|
||||
/*
|
||||
Copyright (C) 2016-2017 Espressif Shanghai PTE LTD
|
||||
Copyright (C) 2015 Real Time Engineers Ltd.
|
||||
|
||||
All rights reserved
|
||||
|
||||
FreeRTOS is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License (version 2) as published by the
|
||||
Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
|
||||
|
||||
***************************************************************************
|
||||
>>! NOTE: The modification to the GPL is included to allow you to !<<
|
||||
>>! distribute a combined work that includes FreeRTOS without being !<<
|
||||
>>! obliged to provide the source code for proprietary components !<<
|
||||
>>! outside of the FreeRTOS kernel. !<<
|
||||
***************************************************************************
|
||||
|
||||
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. Full license text is available on the following
|
||||
link: http://www.freertos.org/a00114.html
|
||||
*/
|
||||
|
||||
/* This header exists for performance reasons, in order to inline the
|
||||
implementation of vPortCPUAcquireMutexIntsDisabled and
|
||||
vPortCPUReleaseMutexIntsDisabled into the
|
||||
vTaskEnterCritical/vTaskExitCritical functions in task.c as well as the
|
||||
vPortCPUAcquireMutex/vPortCPUReleaseMutex implementations.
|
||||
|
||||
Normally this kind of performance hack is over the top, but
|
||||
vTaskEnterCritical/vTaskExitCritical is called a great
|
||||
deal by FreeRTOS internals.
|
||||
|
||||
It should be #included by freertos port.c or tasks.c, in esp-idf.
|
||||
*/
|
||||
#include "soc/cpu.h"
|
||||
|
||||
/* XOR one core ID with this value to get the other core ID */
|
||||
#define CORE_ID_XOR_SWAP (CORE_ID_PRO ^ CORE_ID_APP)
|
||||
|
||||
static inline bool __attribute__((always_inline))
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
vPortCPUAcquireMutexIntsDisabled(portMUX_TYPE *mux, int timeout_cycles, const char *fnName, int line) {
|
||||
#else
|
||||
vPortCPUAcquireMutexIntsDisabled(portMUX_TYPE *mux, int timeout_cycles) {
|
||||
#endif
|
||||
#if !CONFIG_FREERTOS_UNICORE
|
||||
uint32_t res;
|
||||
portBASE_TYPE coreID, otherCoreID;
|
||||
uint32_t ccount_start;
|
||||
bool set_timeout = timeout_cycles > portMUX_NO_TIMEOUT;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
if (!set_timeout) {
|
||||
timeout_cycles = 10000; // Always set a timeout in debug mode
|
||||
set_timeout = true;
|
||||
}
|
||||
#endif
|
||||
if (set_timeout) { // Timeout
|
||||
RSR(CCOUNT, ccount_start);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
uint32_t owner = mux->owner;
|
||||
if (owner != portMUX_FREE_VAL && owner != CORE_ID_PRO && owner != CORE_ID_APP) {
|
||||
ets_printf("ERROR: vPortCPUAcquireMutex: mux %p is uninitialized (0x%X)! Called from %s line %d.\n", mux, owner, fnName, line);
|
||||
mux->owner=portMUX_FREE_VAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Spin until we own the core */
|
||||
|
||||
RSR(PRID, coreID);
|
||||
/* Note: coreID is the full 32 bit core ID (CORE_ID_PRO/CORE_ID_APP),
|
||||
not the 0/1 value returned by xPortGetCoreID()
|
||||
*/
|
||||
otherCoreID = CORE_ID_XOR_SWAP ^ coreID;
|
||||
do {
|
||||
/* mux->owner should be one of portMUX_FREE_VAL, CORE_ID_PRO,
|
||||
CORE_ID_APP:
|
||||
|
||||
- If portMUX_FREE_VAL, we want to atomically set to 'coreID'.
|
||||
- If "our" coreID, we can drop through immediately.
|
||||
- If "otherCoreID", we spin here.
|
||||
*/
|
||||
res = coreID;
|
||||
uxPortCompareSet(&mux->owner, portMUX_FREE_VAL, &res);
|
||||
|
||||
if (res != otherCoreID) {
|
||||
break; // mux->owner is "our" coreID
|
||||
}
|
||||
|
||||
if (set_timeout) {
|
||||
uint32_t ccount_now;
|
||||
RSR(CCOUNT, ccount_now);
|
||||
if (ccount_now - ccount_start > (unsigned)timeout_cycles) {
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
ets_printf("Timeout on mux! last non-recursive lock %s line %d, curr %s line %d\n", mux->lastLockedFn, mux->lastLockedLine, fnName, line);
|
||||
ets_printf("Owner 0x%x count %d\n", mux->owner, mux->count);
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} while (1);
|
||||
|
||||
assert(res == coreID || res == portMUX_FREE_VAL); /* any other value implies memory corruption or uninitialized mux */
|
||||
assert((res == portMUX_FREE_VAL) == (mux->count == 0)); /* we're first to lock iff count is zero */
|
||||
assert(mux->count < 0xFF); /* Bad count value implies memory corruption */
|
||||
|
||||
/* now we own it, we can increment the refcount */
|
||||
mux->count++;
|
||||
|
||||
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
if (res==portMUX_FREE_VAL) { //initial lock
|
||||
mux->lastLockedFn=fnName;
|
||||
mux->lastLockedLine=line;
|
||||
} else {
|
||||
ets_printf("Recursive lock: count=%d last non-recursive lock %s line %d, curr %s line %d\n", mux->count-1,
|
||||
mux->lastLockedFn, mux->lastLockedLine, fnName, line);
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_PORTMUX_DEBUG */
|
||||
#endif /* CONFIG_FREERTOS_UNICORE */
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
static inline void vPortCPUReleaseMutexIntsDisabled(portMUX_TYPE *mux, const char *fnName, int line) {
|
||||
#else
|
||||
static inline void vPortCPUReleaseMutexIntsDisabled(portMUX_TYPE *mux) {
|
||||
#endif
|
||||
#if !CONFIG_FREERTOS_UNICORE
|
||||
portBASE_TYPE coreID;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
const char *lastLockedFn=mux->lastLockedFn;
|
||||
int lastLockedLine=mux->lastLockedLine;
|
||||
mux->lastLockedFn=fnName;
|
||||
mux->lastLockedLine=line;
|
||||
uint32_t owner = mux->owner;
|
||||
if (owner != portMUX_FREE_VAL && owner != CORE_ID_PRO && owner != CORE_ID_APP) {
|
||||
ets_printf("ERROR: vPortCPUReleaseMutex: mux %p is invalid (0x%x)!\n", mux, mux->owner);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_FREERTOS_PORTMUX_DEBUG || !defined(NDEBUG)
|
||||
RSR(PRID, coreID);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
if (coreID != mux->owner) {
|
||||
ets_printf("ERROR: vPortCPUReleaseMutex: mux %p was already unlocked!\n", mux);
|
||||
ets_printf("Last non-recursive unlock %s line %d, curr unlock %s line %d\n", lastLockedFn, lastLockedLine, fnName, line);
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(coreID == mux->owner); // This is a mutex we didn't lock, or it's corrupt
|
||||
assert(mux->count > 0); // Indicates memory corruption
|
||||
assert(mux->count < 0x100); // Indicates memory corruption
|
||||
|
||||
mux->count--;
|
||||
if(mux->count == 0) {
|
||||
mux->owner = portMUX_FREE_VAL;
|
||||
}
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG_RECURSIVE
|
||||
else {
|
||||
ets_printf("Recursive unlock: count=%d last locked %s line %d, curr %s line %d\n", mux->count, lastLockedFn, lastLockedLine, fnName, line);
|
||||
}
|
||||
#endif
|
||||
#endif //!CONFIG_FREERTOS_UNICORE
|
||||
}
|
@ -4103,6 +4103,7 @@ For ESP32 FreeRTOS, vTaskEnterCritical implements both portENTER_CRITICAL and po
|
||||
|
||||
#if ( portCRITICAL_NESTING_IN_TCB == 1 )
|
||||
|
||||
#include "portmux_impl.h"
|
||||
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
void vTaskEnterCritical( portMUX_TYPE *mux, const char *function, int line )
|
||||
@ -4111,7 +4112,8 @@ For ESP32 FreeRTOS, vTaskEnterCritical implements both portENTER_CRITICAL and po
|
||||
#endif
|
||||
{
|
||||
BaseType_t oldInterruptLevel=0;
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
BaseType_t schedulerRunning = xSchedulerRunning;
|
||||
if( schedulerRunning != pdFALSE )
|
||||
{
|
||||
//Interrupts may already be disabled (because we're doing this recursively) but we can't get the interrupt level after
|
||||
//vPortCPUAquireMutex, because it also may mess with interrupts. Get it here first, then later figure out if we're nesting
|
||||
@ -4119,26 +4121,27 @@ For ESP32 FreeRTOS, vTaskEnterCritical implements both portENTER_CRITICAL and po
|
||||
oldInterruptLevel=portENTER_CRITICAL_NESTED();
|
||||
}
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
vPortCPUAcquireMutex( mux, function, line );
|
||||
vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT, function, line );
|
||||
#else
|
||||
vPortCPUAcquireMutex( mux );
|
||||
vPortCPUAcquireMutexIntsDisabled( mux, portMUX_NO_TIMEOUT );
|
||||
#endif
|
||||
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
if( schedulerRunning != pdFALSE )
|
||||
{
|
||||
( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting )++;
|
||||
|
||||
if( xSchedulerRunning != pdFALSE && pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 1 )
|
||||
TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
|
||||
BaseType_t newNesting = tcb->uxCriticalNesting + 1;
|
||||
tcb->uxCriticalNesting = newNesting;
|
||||
if( newNesting == 1 )
|
||||
{
|
||||
//This is the first time we get called. Save original interrupt level.
|
||||
pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState=oldInterruptLevel;
|
||||
tcb->uxOldInterruptState = oldInterruptLevel;
|
||||
}
|
||||
|
||||
/* Original FreeRTOS comment, saved for reference:
|
||||
This is not the interrupt safe version of the enter critical
|
||||
function so assert() if it is being called from an interrupt
|
||||
context. Only API functions that end in "FromISR" can be used in an
|
||||
interrupt. Only assert if the critical nesting count is 1 to
|
||||
interrupt. Only assert if the critical nesting count is 1 to
|
||||
protect against recursive calls if the assert function also uses a
|
||||
critical section. */
|
||||
|
||||
@ -4149,7 +4152,7 @@ For ESP32 FreeRTOS, vTaskEnterCritical implements both portENTER_CRITICAL and po
|
||||
both from ISR as well as non-ISR code, thus we re-organized
|
||||
vTaskEnterCritical to also work in ISRs. */
|
||||
#if 0
|
||||
if( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 1 )
|
||||
if( newNesting == 1 )
|
||||
{
|
||||
portASSERT_IF_IN_ISR();
|
||||
}
|
||||
@ -4178,19 +4181,22 @@ For ESP32 FreeRTOS, vTaskExitCritical implements both portEXIT_CRITICAL and port
|
||||
#endif
|
||||
{
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
vPortCPUReleaseMutex( mux, function, line );
|
||||
vPortCPUReleaseMutexIntsDisabled( mux, function, line );
|
||||
#else
|
||||
vPortCPUReleaseMutex( mux );
|
||||
vPortCPUReleaseMutexIntsDisabled( mux );
|
||||
#endif
|
||||
if( xSchedulerRunning != pdFALSE )
|
||||
{
|
||||
if( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting > 0U )
|
||||
TCB_t *tcb = pxCurrentTCB[xPortGetCoreID()];
|
||||
BaseType_t nesting = tcb->uxCriticalNesting;
|
||||
if( nesting > 0U )
|
||||
{
|
||||
( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting )--;
|
||||
nesting--;
|
||||
tcb->uxCriticalNesting = nesting;
|
||||
|
||||
if( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 0U )
|
||||
if( nesting == 0U )
|
||||
{
|
||||
portEXIT_CRITICAL_NESTED(pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState);
|
||||
portEXIT_CRITICAL_NESTED(tcb->uxOldInterruptState);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
133
components/freertos/test/test_spinlocks.c
Normal file
133
components/freertos/test/test_spinlocks.c
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
Combined unit tests & benchmarking for spinlock "portMUX" functionality
|
||||
*/
|
||||
|
||||
#include <esp_types.h>
|
||||
#include <stdio.h>
|
||||
#include "rom/ets_sys.h"
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "freertos/queue.h"
|
||||
#include "freertos/xtensa_api.h"
|
||||
#include "unity.h"
|
||||
#include "soc/uart_reg.h"
|
||||
#include "soc/dport_reg.h"
|
||||
#include "soc/io_mux_reg.h"
|
||||
#include "soc/cpu.h"
|
||||
|
||||
#define REPEAT_OPS 10000
|
||||
|
||||
static uint32_t start, end;
|
||||
|
||||
#define BENCHMARK_START() do { \
|
||||
RSR(CCOUNT, start); \
|
||||
} while(0)
|
||||
|
||||
#define BENCHMARK_END(OPERATION) do { \
|
||||
RSR(CCOUNT, end); \
|
||||
printf("%s took %d cycles/op (%d cycles for %d ops)\n", \
|
||||
OPERATION, (end - start)/REPEAT_OPS, \
|
||||
(end - start), REPEAT_OPS); \
|
||||
} while(0)
|
||||
|
||||
TEST_CASE("portMUX spinlocks (no contention)", "[freertos]")
|
||||
{
|
||||
portMUX_TYPE mux = portMUX_INITIALIZER_UNLOCKED;
|
||||
BENCHMARK_START();
|
||||
|
||||
for (int i = 0; i < REPEAT_OPS; i++) {
|
||||
portENTER_CRITICAL(&mux);
|
||||
portEXIT_CRITICAL(&mux);
|
||||
}
|
||||
BENCHMARK_END("no contention lock");
|
||||
}
|
||||
|
||||
TEST_CASE("portMUX recursive locks (no contention)", "[freertos]")
|
||||
{
|
||||
portMUX_TYPE mux = portMUX_INITIALIZER_UNLOCKED;
|
||||
BENCHMARK_START();
|
||||
|
||||
const int RECURSE_COUNT = 25;
|
||||
|
||||
for (int i = 0; i < REPEAT_OPS / RECURSE_COUNT; i++) {
|
||||
for (int j = 0; j < RECURSE_COUNT; j++) {
|
||||
portENTER_CRITICAL(&mux);
|
||||
}
|
||||
for (int j = 0; j < RECURSE_COUNT; j++) {
|
||||
portEXIT_CRITICAL(&mux);
|
||||
}
|
||||
}
|
||||
BENCHMARK_END("no contention recursive");
|
||||
}
|
||||
|
||||
static volatile int shared_value;
|
||||
static portMUX_TYPE shared_mux;
|
||||
static xSemaphoreHandle done_sem;
|
||||
|
||||
static void task_shared_value_increment(void *ignore)
|
||||
{
|
||||
for (int i = 0; i < REPEAT_OPS; i++) {
|
||||
portENTER_CRITICAL(&shared_mux);
|
||||
shared_value++;
|
||||
portEXIT_CRITICAL(&shared_mux);
|
||||
}
|
||||
xSemaphoreGive(done_sem);
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
TEST_CASE("portMUX cross-core locking", "[freertos]")
|
||||
{
|
||||
done_sem = xSemaphoreCreateCounting(2, 0);
|
||||
vPortCPUInitializeMutex(&shared_mux);
|
||||
shared_value = 0;
|
||||
|
||||
BENCHMARK_START();
|
||||
|
||||
xTaskCreatePinnedToCore(task_shared_value_increment, "INC0", 2048, NULL, UNITY_FREERTOS_PRIORITY + 1, NULL, UNITY_FREERTOS_CPU ? 0 : 1);
|
||||
xTaskCreatePinnedToCore(task_shared_value_increment, "INC1", 2048, NULL, UNITY_FREERTOS_PRIORITY + 1, NULL, UNITY_FREERTOS_CPU);
|
||||
|
||||
for(int i = 0; i < 2; i++) {
|
||||
if(!xSemaphoreTake(done_sem, 10000/portTICK_PERIOD_MS)) {
|
||||
TEST_FAIL_MESSAGE("done_sem not released by test task");
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_END("cross-core incrementing");
|
||||
vSemaphoreDelete(done_sem);
|
||||
|
||||
TEST_ASSERT_EQUAL_INT(REPEAT_OPS * 2, shared_value);
|
||||
}
|
||||
|
||||
TEST_CASE("portMUX high contention", "[freertos]")
|
||||
{
|
||||
const int TOTAL_TASKS = 8; /* half on each core */
|
||||
done_sem = xSemaphoreCreateCounting(TOTAL_TASKS, 0);
|
||||
vPortCPUInitializeMutex(&shared_mux);
|
||||
shared_value = 0;
|
||||
|
||||
BENCHMARK_START();
|
||||
|
||||
for (int i = 0; i < TOTAL_TASKS / 2; i++) {
|
||||
/* as each task has a higher priority than previous, expect
|
||||
them to preempt the earlier created task, at least on the
|
||||
other core (this core has the unity task, until that
|
||||
blocks)... */
|
||||
xTaskCreatePinnedToCore(task_shared_value_increment, "INC0", 2048, NULL, tskIDLE_PRIORITY + 1 + i, NULL, UNITY_FREERTOS_CPU ? 0 : 1);
|
||||
xTaskCreatePinnedToCore(task_shared_value_increment, "INC1", 2048, NULL, tskIDLE_PRIORITY + 1 + i, NULL, UNITY_FREERTOS_CPU);
|
||||
}
|
||||
|
||||
for(int i = 0; i < TOTAL_TASKS; i++) {
|
||||
if(!xSemaphoreTake(done_sem, 10000/portTICK_PERIOD_MS)) {
|
||||
TEST_FAIL_MESSAGE("done_sem not released by test task");
|
||||
}
|
||||
}
|
||||
|
||||
BENCHMARK_END("cross-core high contention");
|
||||
vSemaphoreDelete(done_sem);
|
||||
|
||||
TEST_ASSERT_EQUAL_INT(REPEAT_OPS * TOTAL_TASKS, shared_value);
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,7 @@ TEST_CASE("Handle pending context switch while scheduler disabled", "[freertos]"
|
||||
isr_count = 0;
|
||||
isr_semaphore = xSemaphoreCreateMutex();
|
||||
TaskHandle_t counter_task;
|
||||
intr_handle_t isr_handle = NULL;
|
||||
|
||||
xTaskCreatePinnedToCore(counter_task_fn, "counter", 2048,
|
||||
NULL, UNITY_FREERTOS_PRIORITY + 1,
|
||||
@ -69,7 +70,7 @@ TEST_CASE("Handle pending context switch while scheduler disabled", "[freertos]"
|
||||
timer_set_counter_value(TIMER_GROUP_0, TIMER_0, 0);
|
||||
timer_set_alarm_value(TIMER_GROUP_0, TIMER_0, 1000);
|
||||
timer_enable_intr(TIMER_GROUP_0, TIMER_0);
|
||||
timer_isr_register(TIMER_GROUP_0, TIMER_0, timer_group0_isr, NULL, 0, NULL);
|
||||
timer_isr_register(TIMER_GROUP_0, TIMER_0, timer_group0_isr, NULL, 0, &isr_handle);
|
||||
timer_start(TIMER_GROUP_0, TIMER_0);
|
||||
|
||||
vTaskDelay(5);
|
||||
@ -101,6 +102,9 @@ TEST_CASE("Handle pending context switch while scheduler disabled", "[freertos]"
|
||||
TEST_ASSERT_NOT_EQUAL(task_count, no_sched_task);
|
||||
}
|
||||
|
||||
esp_intr_free(isr_handle);
|
||||
timer_disable_intr(TIMER_GROUP_0, TIMER_0);
|
||||
|
||||
vTaskDelete(counter_task);
|
||||
vSemaphoreDelete(isr_semaphore);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user