freertos-smp: Task Deletion Coproc context save area cleanup

This commit adds FPU and other Co-processor context save area cleanup in
the task TCB for the FreeRTOS SMP kernel.
This commit is contained in:
Sudeep Mohanty 2022-03-25 19:03:37 +05:30
parent 56914d3c81
commit 44ee07ef23
5 changed files with 131 additions and 23 deletions

View File

@ -299,13 +299,6 @@ extern uint32_t port_switch_flag[];
#else
#define configCHECK_MUTEX_GIVEN_BY_OWNER 0
#endif
#ifndef __ASSEMBLER__
#if CONFIG_FREERTOS_ENABLE_STATIC_TASK_CLEAN_UP
extern void vPortCleanUpTCB ( void *pxTCB );
#define portCLEAN_UP_TCB( pxTCB ) vPortCleanUpTCB( pxTCB )
#endif
#endif
#endif //0
// -------------------- Compatibility ----------------------

View File

@ -126,6 +126,10 @@ static inline void __attribute__((always_inline)) vPortYieldFromISR( void );
static inline BaseType_t __attribute__((always_inline)) xPortGetCoreID( void );
// ----------------------- TCB Cleanup --------------------------
void vPortCleanUpTCB ( void *pxTCB );
/* ----------------------------------------- FreeRTOS SMP Porting Interface --------------------------------------------
* - Contains all the mappings of the macros required by FreeRTOS SMP
* - Must come after forward declare as some porting macros map to declared functions
@ -215,6 +219,10 @@ extern void vTaskExitCritical( void );
#define portALT_GET_RUN_TIME_COUNTER_VALUE(x) ({x = (uint32_t)esp_timer_get_time();})
#endif
// ------------------- TCB Cleanup ----------------------
#define portCLEAN_UP_TCB( pxTCB ) vPortCleanUpTCB( pxTCB )
/* --------------------------------------------- Inline Implementations ------------------------------------------------
* - Implementation of inline functions of the forward declares
* - Should come after forward declare and FreeRTOS Porting interface, as implementation may use both.

View File

@ -554,6 +554,31 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
return sp;
}
// -------------------- Co-Processor -----------------------
#if XCHAL_CP_NUM > 0
void _xt_coproc_release(volatile void *coproc_sa_base, BaseType_t xCoreID);
void vPortCleanUpCoprocArea( void * pxTCB )
{
StackType_t * coproc_area;
BaseType_t xCoreID;
/* Calculate the coproc save area in the stack from the TCB base */
coproc_area = ( StackType_t * ) ( ( uint32_t ) ( pxTCB + offset_pxEndOfStack ));
coproc_area = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) coproc_area ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
coproc_area = ( StackType_t * ) ( ( ( uint32_t ) coproc_area - XT_CP_SIZE ) & ~0xf );
/* Extract core ID from the affinity mask */
xCoreID = __builtin_ffs( * ( UBaseType_t * ) ( pxTCB + offset_uxCoreAffinityMask ) );
assert( xCoreID >= 1 );
xCoreID -= 1;
/* If task has live floating point registers somewhere, release them */
_xt_coproc_release( coproc_area, xCoreID );
}
#endif /* XCHAL_CP_NUM > 0 */
// -------------------- Tick Handler -----------------------
extern void esp_vApplicationIdleHook(void);
@ -626,3 +651,17 @@ void vApplicationMinimalIdleHook( void )
esp_vApplicationIdleHook(); //Run IDF style hooks
}
#endif // CONFIG_FREERTOS_USE_MINIMAL_IDLE_HOOK
/*
* Hook function called during prvDeleteTCB() to cleanup any
* user defined static memory areas in the TCB.
* Currently, this hook function is used by the port to cleanup
* the Co-processor save area for targets that support co-processors.
*/
void vPortCleanUpTCB ( void *pxTCB )
{
#if XCHAL_CP_NUM > 0
/* Cleanup coproc save area */
vPortCleanUpCoprocArea( pxTCB );
#endif /* XCHAL_CP_NUM > 0 */
}

View File

@ -402,12 +402,13 @@ won't have an entry here because they get pinned as soon they use a coprocessor.
Entry Conditions:
A2 = Pointer to base of co-processor state save area.
A3 = Core ID of the pinned task
Exit conditions:
None.
Obeys ABI conventions per prototype:
void _xt_coproc_release(void * coproc_sa_base)
void _xt_coproc_release(void * coproc_sa_base, BaseType_t xCoreID)
*******************************************************************************/
@ -420,25 +421,43 @@ Obeys ABI conventions per prototype:
.align 4
_xt_coproc_release:
ENTRY0 /* a2 = base of save area */
/* a3 = core ID */
getcoreid a5
movi a3, XCHAL_CP_MAX << 2
mull a5, a5, a3
movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
add a3, a3, a5
rsil a7, XCHAL_EXCM_LEVEL /* lock interrupts */
addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */
movi a5, 0 /* a5 = 0 (unowned) */
/* Aquire spinlock before proceeding with the routine.
* Refer _xt_coproc_exc for details on the puspose of
* the _xt_coproc_owner_sa_lock lock and its intended use.
*/
.L_spinlock_loop:
mov a8, a3 /* Save a copy of the core ID in a8 */
movi a10, _xt_coproc_owner_sa_lock /* a10 = base address of lock variable */
addx4 a10, a8, a10 /* Use core ID in a8 to calculate the offset to the lock variable for the core */
movi a11, 0 /* a11 = 0 */
wsr a11, scompare1 /* scompare1 = a11 :- Expect the spinlock to be free (value = 0) */
movi a11, 1 /* a11 = 1 :- Write 1 to take the spinlock */
s32c1i a11, a10, 0 /* if (lock == scompare1) {tmp = lock; lock = a11; a11 = tmp} else {a11 = lock} */
bnez a11, .L_spinlock_loop /* if (a11 != 0) {loop} :- Keep spinning until the spinlock is available */
rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */
movi a4, XCHAL_CP_MAX << 2
mull a3, a3, a4
movi a4, _xt_coproc_owner_sa /* a4 = base of owner array */
add a4, a4, a3
1: l32i a7, a3, 0 /* a7 = owner at a3 */
bne a2, a7, 2f /* if (coproc_sa_base == owner) */
s32i a5, a3, 0 /* owner = unowned */
2: addi a3, a3, 1<<2 /* a3 = next entry in owner array */
bltu a3, a4, 1b /* repeat until end of array */
addi a5, a4, XCHAL_CP_MAX << 2 /* a5 = top+1 of owner array */
movi a6, 0 /* a6 = 0 (unowned) */
3: wsr a6, PS /* restore interrupts */
1: l32i a8, a4, 0 /* a8 = owner at a4 */
bne a2, a8, 2f /* if (coproc_sa_base == owner) */
s32i a3, a4, 0 /* owner = unowned */
2: addi a4, a4, 1<<2 /* a4 = next entry in owner array */
bltu a4, a5, 1b /* repeat until end of array */
3: wsr a7, PS /* restore interrupts */
/* Release spinlock */
movi a11, 0 /* a11 = 0 */
s32ri a11, a10, 0 /* a10 = base address of lock variable. Write 0 to release the lock */
RET0

View File

@ -854,8 +854,24 @@ _xt_coproc_mask:
_xt_coproc_owner_sa:
.space (XCHAL_CP_MAX * portNUM_PROCESSORS) << 2
.section .iram1,"ax"
/* Spinlock per core for accessing _xt_coproc_owner_sa array
*
* 0 = Spinlock available
* 1 = Spinlock taken
*
* The lock provides mutual exclusion for accessing the _xt_coproc_owner_sa array.
* This array can be modified by both _xt_coproc_exc and _xt_coproc_release routines
* simultaneously owing to the fact that the FreeRTOS SMP Kernel allows cross-core
* task deletion. Therefore, the same memory location in the owner save-area array
* could be modified at the same time.
*/
.global _xt_coproc_owner_sa_lock
.type _xt_coproc_owner_sa_lock,@object
.align 16 /* minimize crossing cache boundaries */
_xt_coproc_owner_sa_lock:
.space (portNUM_PROCESSORS) << 2
.section .iram1,"ax"
.align 4
.L_goto_invalid:
@ -906,6 +922,23 @@ _xt_coproc_exc:
s32i a4, sp, XT_STK_A4
s32i a15, sp, XT_STK_A15
/* Aquire spinlock before proceeding with the exception handler.
* (Refer _xt_coproc_release for competing routine for the lock.)
*
* [refactor-todo]: The spinlock aquire/release routine can be
* refactored in to a macro later if the need arises to use it
* at more than one place in the port assembler files.
*/
.L_spinlock_loop:
movi a2, _xt_coproc_owner_sa_lock /* a2 = base address of lock variable */
getcoreid a0 /* get the core ID in a0 to calculate the offset of the lock variable */
addx4 a2, a0, a2 /* a2 = address of desired lock variable */
movi a0, 0 /* a0 = 0 */
wsr a0, scompare1 /* scompare1 = a0 :- Expect the spinlock to be free (value = 0) */
movi a0, 1 /* a0 = 1 :- Write 1 to take the spinlock */
s32c1i a0, a2, 0 /* if (lock == scompare1) {tmp = lock; lock = a0; a0 = tmp} else {a0 = lock} */
bnez a0, .L_spinlock_loop /* if (a0 != 0) {loop} :- Keep spinning until the spinlock is available */
/* Get co-processor state save area of new owner thread. */
call0 XT_RTOS_CP_STATE /* a15 = new owner's save area */
#if CONFIG_FREERTOS_FPU_IN_ISR
@ -1034,6 +1067,14 @@ locking.
/* Restore interruptee's saved registers. */
/* Can omit rsync for wsr.CPENABLE here because _xt_user_exit does it. */
.L_xt_coproc_done:
/* Release spinlock */
movi a2, _xt_coproc_owner_sa_lock /* a2 = base address of the lock variable */
getcoreid a0 /* a0 = core ID to calculate the offset of the lock variable */
addx4 a2, a0, a2 /* a2 = address of the lock variable */
movi a0, 0 /* a0 = 0 */
s32ri a0, a2, 0 /* a2 = a0 :- Write 0 to release the lock */
l32i a15, sp, XT_STK_A15
l32i a5, sp, XT_STK_A5
l32i a4, sp, XT_STK_A4
@ -1061,6 +1102,14 @@ locking.
/* Co-processor exception occurred outside a thread (not supported). */
.L_xt_coproc_invalid:
/* Release spinlock */
movi a2, _xt_coproc_owner_sa_lock /* a2 = base address of the lock variable */
getcoreid a0 /* a0 = core ID to calculate the offset of the lock variable */
addx4 a2, a0, a2 /* a2 = address of the lock variable */
movi a0, 0 /* a0 = 0 */
s32ri a0, a2, 0 /* a2 = a0 :- Write 0 to release the lock */
movi a0,PANIC_RSN_COPROCEXCEPTION
wsr a0,EXCCAUSE
call0 _xt_panic /* not in a thread (invalid) */