2022-03-15 06:25:05 -04:00
|
|
|
/*
|
|
|
|
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2022-04-20 07:31:49 -04:00
|
|
|
|
2017-03-15 23:07:50 -04:00
|
|
|
/*
|
|
|
|
Unit tests for FreeRTOS preemption
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <esp_types.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include "freertos/FreeRTOS.h"
|
|
|
|
#include "freertos/task.h"
|
|
|
|
#include "freertos/semphr.h"
|
|
|
|
#include "freertos/queue.h"
|
|
|
|
#include "unity.h"
|
2020-11-05 23:03:21 -05:00
|
|
|
#include "hal/cpu_hal.h"
|
2018-10-25 00:52:32 -04:00
|
|
|
#include "test_utils.h"
|
2020-09-15 11:14:31 -04:00
|
|
|
#include "sdkconfig.h"
|
2017-03-15 23:07:50 -04:00
|
|
|
|
2017-04-02 21:59:30 -04:00
|
|
|
static volatile bool trigger;
|
2017-03-15 23:07:50 -04:00
|
|
|
static volatile bool flag;
|
|
|
|
|
2022-05-30 03:21:07 -04:00
|
|
|
#ifndef CONFIG_FREERTOS_SMP
|
|
|
|
#define MAX_YIELD_COUNT 10000
|
|
|
|
#else
|
|
|
|
//TODO: IDF-5081
|
2022-06-17 10:03:34 -04:00
|
|
|
#define MAX_YIELD_COUNT 17000
|
2022-05-30 03:21:07 -04:00
|
|
|
#endif // CONFIG_FREERTOS_SMP
|
|
|
|
|
|
|
|
|
2017-04-02 21:59:30 -04:00
|
|
|
/* Task:
|
|
|
|
- Waits for 'trigger' variable to be set
|
|
|
|
- Reads the cycle count on this CPU
|
|
|
|
- Pushes it into a queue supplied as a param
|
|
|
|
- Busy-waits until the main task terminates it
|
|
|
|
*/
|
|
|
|
static void task_send_to_queue(void *param)
|
2017-03-15 23:07:50 -04:00
|
|
|
{
|
|
|
|
QueueHandle_t queue = (QueueHandle_t) param;
|
|
|
|
uint32_t ccount;
|
2017-04-02 21:59:30 -04:00
|
|
|
|
|
|
|
while(!trigger) {}
|
|
|
|
|
2020-11-05 23:03:21 -05:00
|
|
|
ccount = cpu_hal_get_cycle_count();
|
2017-03-15 23:07:50 -04:00
|
|
|
flag = true;
|
|
|
|
xQueueSendToBack(queue, &ccount, 0);
|
|
|
|
/* This is to ensure that higher priority task
|
|
|
|
won't wake anyhow, due to this task terminating.
|
2017-04-02 21:59:30 -04:00
|
|
|
|
|
|
|
The task runs until terminated by the main task.
|
2017-03-15 23:07:50 -04:00
|
|
|
*/
|
2017-04-02 21:59:30 -04:00
|
|
|
while(1) {}
|
2017-03-15 23:07:50 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE("Yield from lower priority task, same CPU", "[freertos]")
|
|
|
|
{
|
|
|
|
/* Do this 3 times, mostly for the benchmark value - the first
|
|
|
|
run includes a cache miss so uses more cycles than it should. */
|
|
|
|
for (int i = 0; i < 3; i++) {
|
2017-04-02 21:59:30 -04:00
|
|
|
TaskHandle_t sender_task;
|
2017-03-15 23:07:50 -04:00
|
|
|
QueueHandle_t queue = xQueueCreate(1, sizeof(uint32_t));
|
|
|
|
flag = false;
|
2017-04-02 21:59:30 -04:00
|
|
|
trigger = false;
|
2017-03-15 23:07:50 -04:00
|
|
|
|
|
|
|
/* "yield" task sits on our CPU, lower priority to us */
|
2017-04-02 21:59:30 -04:00
|
|
|
xTaskCreatePinnedToCore(task_send_to_queue, "YIELD", 2048, (void *)queue, UNITY_FREERTOS_PRIORITY - 1, &sender_task, UNITY_FREERTOS_CPU);
|
|
|
|
|
|
|
|
vTaskDelay(1); /* make sure everything is set up */
|
|
|
|
trigger = true;
|
2017-03-15 23:07:50 -04:00
|
|
|
|
|
|
|
uint32_t yield_ccount, now_ccount, delta;
|
|
|
|
TEST_ASSERT( xQueueReceive(queue, &yield_ccount, 100 / portTICK_PERIOD_MS) );
|
2020-11-05 23:03:21 -05:00
|
|
|
now_ccount = cpu_hal_get_cycle_count();
|
2017-03-15 23:07:50 -04:00
|
|
|
TEST_ASSERT( flag );
|
|
|
|
|
|
|
|
delta = now_ccount - yield_ccount;
|
|
|
|
printf("Yielding from lower priority task took %u cycles\n", delta);
|
2022-05-30 03:21:07 -04:00
|
|
|
TEST_ASSERT(delta < MAX_YIELD_COUNT);
|
2017-03-15 23:07:50 -04:00
|
|
|
|
2017-04-02 21:59:30 -04:00
|
|
|
vTaskDelete(sender_task);
|
2017-05-10 03:23:33 -04:00
|
|
|
vQueueDelete(queue);
|
2017-04-02 21:59:30 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-03-15 06:25:05 -04:00
|
|
|
#if (portNUM_PROCESSORS == 2) && !CONFIG_FREERTOS_PLACE_FUNCTIONS_INTO_FLASH
|
2017-04-02 21:59:30 -04:00
|
|
|
TEST_CASE("Yield from lower priority task, other CPU", "[freertos]")
|
|
|
|
{
|
|
|
|
uint32_t trigger_ccount, yield_ccount, now_ccount, delta;
|
|
|
|
|
|
|
|
/* Do this 3 times, mostly for the benchmark value - the first
|
|
|
|
run includes a cache miss so uses more cycles than it should. */
|
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
TaskHandle_t sender_task;
|
|
|
|
QueueHandle_t queue = xQueueCreate(1, sizeof(uint32_t));
|
|
|
|
trigger = false;
|
|
|
|
flag = false;
|
|
|
|
|
|
|
|
/* "send_to_queue" task sits on the other CPU, lower priority to us */
|
|
|
|
xTaskCreatePinnedToCore(task_send_to_queue, "YIELD", 2048, (void *)queue, UNITY_FREERTOS_PRIORITY - 1,
|
|
|
|
&sender_task, !UNITY_FREERTOS_CPU);
|
|
|
|
|
|
|
|
vTaskDelay(2); /* make sure everything is set up */
|
|
|
|
trigger = true;
|
2020-11-05 23:03:21 -05:00
|
|
|
trigger_ccount = cpu_hal_get_cycle_count();
|
2017-04-02 21:59:30 -04:00
|
|
|
|
|
|
|
// yield_ccount is not useful in this test as it's the other core's CCOUNT
|
|
|
|
// so we use trigger_ccount instead
|
|
|
|
TEST_ASSERT( xQueueReceive(queue, &yield_ccount, 100 / portTICK_PERIOD_MS) );
|
2020-11-05 23:03:21 -05:00
|
|
|
now_ccount = cpu_hal_get_cycle_count();
|
2017-04-02 21:59:30 -04:00
|
|
|
TEST_ASSERT( flag );
|
|
|
|
|
|
|
|
delta = now_ccount - trigger_ccount;
|
|
|
|
printf("Yielding from task on other core took %u cycles\n", delta);
|
2022-05-30 03:21:07 -04:00
|
|
|
TEST_ASSERT(delta < MAX_YIELD_COUNT);
|
2017-04-02 21:59:30 -04:00
|
|
|
|
|
|
|
vQueueDelete(queue);
|
|
|
|
vTaskDelete(sender_task);
|
2017-03-15 23:07:50 -04:00
|
|
|
}
|
|
|
|
}
|
2020-09-15 11:14:31 -04:00
|
|
|
#endif
|