2022-01-29 03:49:56 -05:00
|
|
|
/*
|
2023-07-18 04:21:15 -04:00
|
|
|
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
|
2022-01-29 03:49:56 -05:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2023-05-04 11:31:31 -04:00
|
|
|
#include "sdkconfig.h"
|
|
|
|
#include "portmacro.h"
|
2023-08-01 04:04:29 -04:00
|
|
|
#include "freertos/FreeRTOSConfig.h"
|
|
|
|
#include "soc/soc_caps.h"
|
|
|
|
|
2023-05-04 11:31:31 -04:00
|
|
|
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
|
|
|
|
#include "esp_private/hw_stack_guard.h"
|
|
|
|
#endif
|
2020-11-05 23:03:21 -05:00
|
|
|
|
2023-08-01 04:04:29 -04:00
|
|
|
.global port_uxInterruptNesting
|
|
|
|
.global port_xSchedulerRunning
|
2020-11-05 23:03:21 -05:00
|
|
|
.global xIsrStackTop
|
|
|
|
.global pxCurrentTCB
|
|
|
|
.global vTaskSwitchContext
|
|
|
|
.global xPortSwitchFlag
|
2023-05-04 11:31:31 -04:00
|
|
|
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
|
2023-08-14 03:44:24 -04:00
|
|
|
.global xIsrStackBottom
|
2023-05-04 11:31:31 -04:00
|
|
|
.global port_offset_pxStack
|
|
|
|
.global port_offset_pxEndOfStack
|
|
|
|
.global esp_hw_stack_guard_monitor_stop
|
|
|
|
.global esp_hw_stack_guard_monitor_start
|
|
|
|
.global esp_hw_stack_guard_set_bounds
|
|
|
|
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
|
2020-11-05 23:03:21 -05:00
|
|
|
|
|
|
|
.section .text
|
|
|
|
|
|
|
|
/**
|
2023-08-01 04:04:29 -04:00
|
|
|
* This function makes the RTOS aware about an ISR entering. It takes the
|
|
|
|
* current task stack pointer and places it into the pxCurrentTCB.
|
|
|
|
* It then loads the ISR stack into sp.
|
2023-05-04 11:31:31 -04:00
|
|
|
* TODO: ISR nesting code improvements ?
|
2023-08-14 03:44:24 -04:00
|
|
|
* In the routines below, let's use a0-a5 registers to let the compiler generate
|
|
|
|
* 16-bit instructions.
|
2020-11-05 23:03:21 -05:00
|
|
|
*/
|
|
|
|
.global rtos_int_enter
|
|
|
|
.type rtos_int_enter, @function
|
|
|
|
rtos_int_enter:
|
2023-08-01 04:04:29 -04:00
|
|
|
#if ( configNUM_CORES > 1 )
|
2023-08-14 03:44:24 -04:00
|
|
|
csrr a5, mhartid /* a5 = coreID */
|
|
|
|
slli a5, a5, 2 /* a5 = coreID * 4 */
|
|
|
|
la a0, port_xSchedulerRunning /* a0 = &port_xSchedulerRunning */
|
|
|
|
add a0, a0, a5 /* a0 = &port_xSchedulerRunning[coreID] */
|
|
|
|
lw a0, (a0) /* a0 = port_xSchedulerRunning[coreID] */
|
2023-07-18 04:21:15 -04:00
|
|
|
#else
|
2023-08-14 03:44:24 -04:00
|
|
|
lw a0, port_xSchedulerRunning /* a0 = port_xSchedulerRunning */
|
|
|
|
#endif /* ( configNUM_CORES > 1 ) */
|
|
|
|
beqz a0, rtos_int_enter_end /* if (port_xSchedulerRunning[coreID] == 0) jump to rtos_int_enter_end */
|
2023-08-01 04:04:29 -04:00
|
|
|
|
|
|
|
/* Increment the ISR nesting count */
|
2023-08-14 03:44:24 -04:00
|
|
|
la a0, port_uxInterruptNesting /* a0 = &port_uxInterruptNesting */
|
2023-08-01 04:04:29 -04:00
|
|
|
#if ( configNUM_CORES > 1 )
|
2023-08-14 03:44:24 -04:00
|
|
|
add a0, a0, a5 /* a0 = &port_uxInterruptNesting[coreID] // a5 already contains coreID * 4 */
|
2023-08-01 04:04:29 -04:00
|
|
|
#endif /* ( configNUM_CORES > 1 ) */
|
2023-08-14 03:44:24 -04:00
|
|
|
lw a1, 0(a0) /* a1 = port_uxInterruptNesting[coreID] */
|
|
|
|
addi a2, a1, 1 /* a2 = a1 + 1 */
|
|
|
|
sw a2, 0(a0) /* port_uxInterruptNesting[coreID] = a2 */
|
2023-08-01 04:04:29 -04:00
|
|
|
|
2023-08-14 03:44:24 -04:00
|
|
|
/* If we reached here from another low-priority ISR, i.e, port_uxInterruptNesting[coreID] > 0, then skip stack pushing to TCB */
|
|
|
|
bnez a1, rtos_int_enter_end /* if (port_uxInterruptNesting[coreID] > 0) jump to rtos_int_enter_end */
|
2020-11-04 16:34:47 -05:00
|
|
|
|
2023-05-04 11:31:31 -04:00
|
|
|
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
|
2023-08-14 03:44:24 -04:00
|
|
|
/* esp_hw_stack_guard_monitor_stop(); pass the scratch registers */
|
|
|
|
ESP_HW_STACK_GUARD_MONITOR_STOP_CUR_CORE a0 a1
|
2023-05-04 11:31:31 -04:00
|
|
|
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
|
|
|
|
|
2023-08-01 04:04:29 -04:00
|
|
|
/* Save the current sp in pxCurrentTCB[coreID] and load the ISR stack on to sp */
|
|
|
|
#if ( configNUM_CORES > 1 )
|
2023-08-14 03:44:24 -04:00
|
|
|
la a0, pxCurrentTCB /* a0 = &pxCurrentTCB */
|
|
|
|
add a0, a0, a5 /* a0 = &pxCurrentTCB[coreID] // a5 already contains coreID * 4 */
|
|
|
|
lw a0, (a0) /* a0 = pxCurrentTCB[coreID] */
|
|
|
|
sw sp, 0(a0) /* pxCurrentTCB[coreID] = sp */
|
|
|
|
la a0, xIsrStackTop /* a0 = &xIsrStackTop */
|
|
|
|
add a0, a0, a5 /* a0 = &xIsrStackTop[coreID] // a5 already contains coreID * 4 */
|
|
|
|
lw sp, (a0) /* sp = xIsrStackTop[coreID] */
|
2023-07-18 04:21:15 -04:00
|
|
|
#else
|
2023-08-14 03:44:24 -04:00
|
|
|
lw a0, pxCurrentTCB /* a0 = pxCurrentTCB */
|
|
|
|
sw sp, 0(a0) /* pxCurrentTCB[0] = sp */
|
2023-08-01 04:04:29 -04:00
|
|
|
lw sp, xIsrStackTop /* sp = xIsrStackTop */
|
|
|
|
#endif /* ( configNUM_CORES > 1 ) */
|
2020-11-05 23:03:21 -05:00
|
|
|
|
2023-05-04 11:31:31 -04:00
|
|
|
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
|
2023-08-14 03:44:24 -04:00
|
|
|
/* Prepare the parameters for esp_hw_stack_guard_set_bounds(xIsrStackBottom, xIsrStackTop); */
|
|
|
|
#if ( configNUM_CORES > 1 )
|
|
|
|
/* Load the xIsrStack for the current core and set the new bounds */
|
|
|
|
la a0, xIsrStackBottom
|
|
|
|
add a0, a0, a5 /* a0 = &xIsrStackBottom[coreID] */
|
|
|
|
lw a0, (a0) /* a0 = xIsrStackBottom[coreID] */
|
|
|
|
#else
|
|
|
|
lw a0, xIsrStackBottom
|
|
|
|
#endif /* ( configNUM_CORES > 1 ) */
|
2023-05-04 11:31:31 -04:00
|
|
|
mv a1, sp
|
2023-08-14 03:44:24 -04:00
|
|
|
/* esp_hw_stack_guard_set_bounds(xIsrStackBottom[coreID], xIsrStackTop[coreID]);
|
|
|
|
*/
|
|
|
|
ESP_HW_STACK_GUARD_SET_BOUNDS_CUR_CORE a2
|
|
|
|
ESP_HW_STACK_GUARD_MONITOR_START_CUR_CORE a0 a1
|
2023-05-04 11:31:31 -04:00
|
|
|
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
|
|
|
|
|
2023-08-01 04:04:29 -04:00
|
|
|
rtos_int_enter_end:
|
2020-11-05 23:03:21 -05:00
|
|
|
ret
|
|
|
|
|
|
|
|
/**
|
2023-08-01 04:04:29 -04:00
|
|
|
* Restore the stack pointer of the next task to run.
|
2020-11-05 23:03:21 -05:00
|
|
|
*/
|
|
|
|
.global rtos_int_exit
|
|
|
|
.type rtos_int_exit, @function
|
|
|
|
rtos_int_exit:
|
2023-08-01 04:04:29 -04:00
|
|
|
#if ( configNUM_CORES > 1 )
|
2023-08-14 03:44:24 -04:00
|
|
|
csrr a1, mhartid /* a1 = coreID */
|
|
|
|
slli a1, a1, 2 /* a1 = a1 * 4 */
|
|
|
|
la a0, port_xSchedulerRunning /* a0 = &port_xSchedulerRunning */
|
|
|
|
add a0, a0, a1 /* a0 = &port_xSchedulerRunning[coreID] */
|
|
|
|
lw a0, (a0) /* a0 = port_xSchedulerRunning[coreID] */
|
2023-07-18 04:21:15 -04:00
|
|
|
#else
|
2023-08-14 03:44:24 -04:00
|
|
|
lw a0, port_xSchedulerRunning /* a0 = port_xSchedulerRunning */
|
2023-08-01 04:04:29 -04:00
|
|
|
#endif /* ( configNUM_CORES > 1 ) */
|
2023-08-14 03:44:24 -04:00
|
|
|
beqz a0, rtos_int_exit_end /* if (port_uxSchewdulerRunning == 0) jump to rtos_int_exit_end */
|
2023-08-01 04:04:29 -04:00
|
|
|
|
2023-08-14 03:44:24 -04:00
|
|
|
/* Update nesting interrupts counter */
|
|
|
|
la a0, port_uxInterruptNesting /* a0 = &port_uxInterruptNesting */
|
2023-08-01 04:04:29 -04:00
|
|
|
#if ( configNUM_CORES > 1 )
|
2023-08-14 03:44:24 -04:00
|
|
|
add a0, a0, a1 /* a0 = &port_uxInterruptNesting[coreID] // a1 already contains coreID * 4 */
|
|
|
|
#endif /* ( configNUM_CORES > 1 ) */
|
|
|
|
lw a2, 0(a0) /* a2 = port_uxInterruptNesting[coreID] */
|
2020-11-04 16:34:47 -05:00
|
|
|
|
2023-08-14 03:44:24 -04:00
|
|
|
/* Already zero, protect against underflow */
|
|
|
|
beqz a2, isr_skip_decrement /* if (port_uxInterruptNesting[coreID] == 0) jump to isr_skip_decrement */
|
|
|
|
addi a2, a2, -1 /* a2 = a2 - 1 */
|
|
|
|
sw a2, 0(a0) /* port_uxInterruptNesting[coreID] = a2 */
|
|
|
|
/* May still have interrupts pending, skip section below and exit */
|
|
|
|
bnez a2, rtos_int_exit_end
|
2020-11-04 16:34:47 -05:00
|
|
|
|
|
|
|
isr_skip_decrement:
|
2023-08-14 03:44:24 -04:00
|
|
|
/* If the CPU reached this label, a2 (uxInterruptNesting) is 0 for sure */
|
2020-11-04 16:34:47 -05:00
|
|
|
|
2023-08-14 03:44:24 -04:00
|
|
|
/* Schedule the next task if a yield is pending */
|
|
|
|
la a0, xPortSwitchFlag /* a0 = &xPortSwitchFlag */
|
2023-08-01 04:04:29 -04:00
|
|
|
#if ( configNUM_CORES > 1 )
|
2023-08-14 03:44:24 -04:00
|
|
|
add a0, a0, a1 /* a0 = &xPortSwitchFlag[coreID] // a1 already contains coreID * 4 */
|
2023-08-01 04:04:29 -04:00
|
|
|
#endif /* ( configNUM_CORES > 1 ) */
|
2023-08-14 03:44:24 -04:00
|
|
|
lw a2, 0(a0) /* a2 = xPortSwitchFlag[coreID] */
|
|
|
|
beqz a2, no_switch /* if (xPortSwitchFlag[coreID] == 0) jump to no_switch */
|
2023-08-01 04:04:29 -04:00
|
|
|
|
2023-08-14 03:44:24 -04:00
|
|
|
/* Preserve return address and schedule next task. To speed up the process, instead of allocating stack
|
|
|
|
* space, let's use a callee-saved register: s0. Since the caller is not using it, let's use it. */
|
|
|
|
mv s0, ra
|
|
|
|
call vTaskSwitchContext
|
|
|
|
mv ra, s0
|
|
|
|
|
|
|
|
/* Clears the switch pending flag */
|
|
|
|
la a0, xPortSwitchFlag /* a0 = &xPortSwitchFlag */
|
2023-08-01 04:04:29 -04:00
|
|
|
#if ( configNUM_CORES > 1 )
|
2023-08-14 03:44:24 -04:00
|
|
|
/* C routine vTaskSwitchContext may change the temp registers, so we read again */
|
|
|
|
csrr a1, mhartid /* a1 = coreID */
|
|
|
|
slli a1, a1, 2 /* a1 = a1 * 4 */
|
|
|
|
add a0, a0, a1 /* a0 = &xPortSwitchFlag[coreID]; */
|
2023-08-01 04:04:29 -04:00
|
|
|
#endif /* ( configNUM_CORES > 1 ) */
|
2023-08-14 03:44:24 -04:00
|
|
|
sw zero, 0(a0) /* xPortSwitchFlag[coreID] = 0; */
|
2020-11-05 23:03:21 -05:00
|
|
|
|
|
|
|
no_switch:
|
2023-05-04 11:31:31 -04:00
|
|
|
|
|
|
|
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
|
2023-08-14 03:44:24 -04:00
|
|
|
/* esp_hw_stack_guard_monitor_stop(); pass the scratch registers */
|
|
|
|
ESP_HW_STACK_GUARD_MONITOR_STOP_CUR_CORE a0 a1
|
2023-05-04 11:31:31 -04:00
|
|
|
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
|
|
|
|
|
2023-08-14 03:44:24 -04:00
|
|
|
|
|
|
|
#if ( configNUM_CORES > 1 )
|
|
|
|
/* Recover the stack of next task and prepare to exit */
|
|
|
|
csrr a1, mhartid
|
|
|
|
slli a1, a1, 2
|
|
|
|
la a0, pxCurrentTCB /* a0 = &pxCurrentTCB */
|
|
|
|
add a0, a0, a1 /* a0 = &pxCurrentTCB[coreID] */
|
|
|
|
lw a0, 0(a0) /* a0 = pxCurrentTCB[coreID] */
|
|
|
|
lw sp, 0(a0) /* sp = previous sp */
|
|
|
|
#else
|
2023-05-04 11:31:31 -04:00
|
|
|
/* Recover the stack of next task */
|
2023-08-14 03:44:24 -04:00
|
|
|
lw a0, pxCurrentTCB
|
|
|
|
lw sp, 0(a0)
|
|
|
|
#endif /* ( configNUM_CORES > 1 ) */
|
|
|
|
|
2023-05-04 11:31:31 -04:00
|
|
|
|
|
|
|
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
|
|
|
|
/* esp_hw_stack_guard_set_bounds(pxCurrentTCB[0]->pxStack,
|
|
|
|
* pxCurrentTCB[0]->pxEndOfStack);
|
|
|
|
*/
|
2023-08-14 03:44:24 -04:00
|
|
|
lw a1, PORT_OFFSET_PX_END_OF_STACK(a0)
|
|
|
|
lw a0, PORT_OFFSET_PX_STACK(a0)
|
|
|
|
ESP_HW_STACK_GUARD_SET_BOUNDS_CUR_CORE a2
|
2023-05-04 11:31:31 -04:00
|
|
|
/* esp_hw_stack_guard_monitor_start(); */
|
2023-08-14 03:44:24 -04:00
|
|
|
ESP_HW_STACK_GUARD_MONITOR_START_CUR_CORE a0 a1
|
2023-05-04 11:31:31 -04:00
|
|
|
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
|
2020-11-05 23:03:21 -05:00
|
|
|
|
2023-08-01 04:04:29 -04:00
|
|
|
rtos_int_exit_end:
|
2020-11-05 23:03:21 -05:00
|
|
|
ret
|