mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
a7994b1a42
Some cases are not immediately obvious, so document them in comments.
154 lines
5.7 KiB
C
154 lines
5.7 KiB
C
/*
|
|
* Test backported deletion behavior by creating tasks of various affinities and
|
|
* check if the task memory is freed immediately under the correct conditions.
|
|
*
|
|
* The behavior of vTaskDelete() results in the immediate freeing of task memory
|
|
* and the immediate execution of deletion callbacks for tasks which are not
|
|
* running, provided they are not pinned to the other core (due to FPU cleanup
|
|
* requirements).
|
|
*
|
|
* If the condition is not met, freeing of task memory and execution of
|
|
* deletion callbacks will still be carried out by the Idle Task.
|
|
*/
|
|
#include <stdio.h>
|
|
|
|
#include "freertos/FreeRTOS.h"
|
|
#include "freertos/task.h"
|
|
#include "freertos/semphr.h"
|
|
#include "esp_heap_caps.h"
|
|
|
|
#include "unity.h"
|
|
#include "test_utils.h"
|
|
|
|
#include "esp_rom_sys.h"
|
|
|
|
#define NO_OF_TSKS 3
|
|
#define DELAY_TICKS 2
|
|
/* Caps of all memory which is allocated from when a task is created */
|
|
#define HEAP_CAPS (portTcbMemoryCaps | portStackMemoryCaps)
|
|
|
|
#define DELAY_US_ITERATIONS 1000
|
|
|
|
|
|
static void tsk_self_del(void *param)
|
|
{
|
|
vTaskDelete(NULL); //Deleting self means deleting currently running task
|
|
}
|
|
|
|
static void tsk_extern_del(void *param)
|
|
{
|
|
vTaskDelay(portMAX_DELAY); //Await external deletion
|
|
}
|
|
|
|
static void tsk_self_del_us_delay(void *param)
|
|
{
|
|
uint32_t delay = (uint32_t)param;
|
|
esp_rom_delay_us(delay);
|
|
vTaskDelete(NULL);
|
|
}
|
|
|
|
TEST_CASE("FreeRTOS Delete Tasks", "[freertos]")
|
|
{
|
|
/* -------------- Test vTaskDelete() on currently running tasks ----------------*/
|
|
uint32_t before_count = uxTaskGetNumberOfTasks();
|
|
uint32_t before_heap = heap_caps_get_free_size(HEAP_CAPS);
|
|
for(int i = 0; i < portNUM_PROCESSORS; i++){
|
|
for(int j = 0; j < NO_OF_TSKS; j++){
|
|
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(tsk_self_del, "tsk_self", 1024, NULL, configMAX_PRIORITIES - 1, NULL, i));
|
|
}
|
|
}
|
|
vTaskDelay(DELAY_TICKS); //Minimal delay to see if Idle task cleans up all tasks awaiting deletion in a single tick
|
|
TEST_ASSERT_EQUAL(before_count, uxTaskGetNumberOfTasks());
|
|
TEST_ASSERT_EQUAL(before_heap, heap_caps_get_free_size(HEAP_CAPS));
|
|
|
|
/* ------------- Test vTaskDelete() on not currently running tasks ------------ */
|
|
TaskHandle_t handles[NO_OF_TSKS];
|
|
before_heap = heap_caps_get_free_size(HEAP_CAPS);
|
|
//Create task pinned to the same core that will not run during task deletion
|
|
for(int j = 0 ; j < NO_OF_TSKS; j++){
|
|
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(tsk_extern_del, "tsk_extern", 4096, NULL, configMAX_PRIORITIES - 1, &handles[j], xPortGetCoreID()));
|
|
}
|
|
TEST_ASSERT_NOT_EQUAL(before_heap, heap_caps_get_free_size(HEAP_CAPS)); //Check tasks have been created
|
|
//Delete the tasks, memory should be freed immediately
|
|
for(int j = 0; j < NO_OF_TSKS; j++){
|
|
vTaskDelete(handles[j]);
|
|
}
|
|
TEST_ASSERT_EQUAL(before_heap, heap_caps_get_free_size(HEAP_CAPS));
|
|
|
|
/* Test self deleting no affinity task is not removed by idle task of other core before context switch */
|
|
for(int i = 0; i < DELAY_US_ITERATIONS; i+= 10){
|
|
vTaskDelay(1); //Sync to next tick interrupt
|
|
xTaskCreatePinnedToCore(tsk_self_del_us_delay, "delay", 1024, (void *)i, UNITY_FREERTOS_PRIORITY - 1, NULL, tskNO_AFFINITY);
|
|
esp_rom_delay_us(10); //Busy wait to ensure no affinity task runs on opposite core
|
|
}
|
|
|
|
}
|
|
|
|
|
|
typedef struct {
|
|
SemaphoreHandle_t sem;
|
|
volatile bool deleted; // Check the deleted task doesn't keep running after being deleted
|
|
} tsk_blocks_param_t;
|
|
|
|
/* Task blocks as often as possible
|
|
(two or more of these can share the same semaphore and "juggle" it around)
|
|
*/
|
|
static void tsk_blocks_frequently(void *param)
|
|
{
|
|
tsk_blocks_param_t *p = (tsk_blocks_param_t *)param;
|
|
SemaphoreHandle_t sem = p->sem;
|
|
srand(xTaskGetTickCount() ^ (int)xTaskGetCurrentTaskHandle());
|
|
while (1) {
|
|
assert(!p->deleted);
|
|
esp_rom_delay_us(rand() % 10);
|
|
assert(!p->deleted);
|
|
xSemaphoreTake(sem, portMAX_DELAY);
|
|
assert(!p->deleted);
|
|
esp_rom_delay_us(rand() % 10);
|
|
assert(!p->deleted);
|
|
xSemaphoreGive(sem);
|
|
}
|
|
}
|
|
|
|
TEST_CASE("FreeRTOS Delete Blocked Tasks", "[freertos]")
|
|
{
|
|
TaskHandle_t blocking_tasks[portNUM_PROCESSORS + 1]; // one per CPU, plus one unpinned task
|
|
tsk_blocks_param_t params[portNUM_PROCESSORS + 1] = { 0 };
|
|
|
|
unsigned before = heap_caps_get_free_size(MALLOC_CAP_8BIT);
|
|
printf("Free memory at start %u\n", before);
|
|
|
|
/* Any bugs will depend on relative timing of destroying the tasks, so create & delete many times.
|
|
|
|
Stop early if it looks like some resources have not been properly cleaned up.
|
|
|
|
(1000 iterations takes about 9 seconds on ESP32 dual core)
|
|
*/
|
|
for(unsigned iter = 0; iter < 1000; iter++) {
|
|
// Create everything
|
|
SemaphoreHandle_t sem = xSemaphoreCreateMutex();
|
|
for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
|
|
params[i].deleted = false;
|
|
params[i].sem = sem;
|
|
|
|
TEST_ASSERT_EQUAL(pdTRUE,
|
|
xTaskCreatePinnedToCore(tsk_blocks_frequently, "tsk_block", 4096, ¶ms[i],
|
|
UNITY_FREERTOS_PRIORITY - 1, &blocking_tasks[i],
|
|
i < portNUM_PROCESSORS ? i : tskNO_AFFINITY));
|
|
}
|
|
|
|
vTaskDelay(5); // Let the tasks juggle the mutex for a bit
|
|
|
|
for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
|
|
vTaskDelete(blocking_tasks[i]);
|
|
params[i].deleted = true;
|
|
}
|
|
vTaskDelay(4); // Yield to the idle task for cleanup
|
|
|
|
vSemaphoreDelete(sem);
|
|
|
|
// Check we haven't leaked resources yet
|
|
TEST_ASSERT_GREATER_OR_EQUAL(before - 256, heap_caps_get_free_size(MALLOC_CAP_8BIT));
|
|
}
|
|
}
|