2022-03-22 04:46:37 -04:00
|
|
|
/*
|
2024-02-12 05:10:33 -05:00
|
|
|
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
|
2022-03-22 04:46:37 -04:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2017-05-03 04:03:28 -04:00
|
|
|
#include "heap_private.h"
|
|
|
|
#include <assert.h>
|
|
|
|
#include <string.h>
|
2017-08-28 03:12:29 -04:00
|
|
|
#include <sys/lock.h>
|
|
|
|
|
|
|
|
#include "esp_log.h"
|
|
|
|
#include "multi_heap.h"
|
2019-12-18 11:04:49 -05:00
|
|
|
#include "multi_heap_platform.h"
|
2017-08-28 03:12:29 -04:00
|
|
|
#include "esp_heap_caps_init.h"
|
2021-06-18 02:51:11 -04:00
|
|
|
#include "heap_memory_layout.h"
|
2017-08-28 03:12:29 -04:00
|
|
|
|
2024-02-12 05:10:33 -05:00
|
|
|
#include "esp_private/startup_internal.h"
|
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
static const char *TAG = "heap_init";
|
|
|
|
|
2017-08-28 03:12:29 -04:00
|
|
|
/* Linked-list of registered heaps */
|
|
|
|
struct registered_heap_ll registered_heaps;
|
2017-05-03 04:03:28 -04:00
|
|
|
|
2024-02-12 05:10:33 -05:00
|
|
|
ESP_SYSTEM_INIT_FN(init_heap, CORE, BIT(0), 100)
|
|
|
|
{
|
|
|
|
heap_caps_init();
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
static void register_heap(heap_t *region)
|
|
|
|
{
|
2019-03-10 19:49:51 -04:00
|
|
|
size_t heap_size = region->end - region->start;
|
|
|
|
assert(heap_size <= HEAP_SIZE_MAX);
|
|
|
|
region->heap = multi_heap_register((void *)region->start, heap_size);
|
2017-07-19 03:10:33 -04:00
|
|
|
if (region->heap != NULL) {
|
|
|
|
ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap);
|
|
|
|
}
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
|
2019-07-16 05:33:30 -04:00
|
|
|
void heap_caps_enable_nonos_stack_heaps(void)
|
2017-05-03 04:03:28 -04:00
|
|
|
{
|
2017-08-28 03:12:29 -04:00
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
2017-05-03 04:03:28 -04:00
|
|
|
// Assume any not-yet-registered heap is
|
|
|
|
// a nonos-stack heap
|
|
|
|
if (heap->heap == NULL) {
|
|
|
|
register_heap(heap);
|
2017-07-19 03:10:33 -04:00
|
|
|
if (heap->heap != NULL) {
|
|
|
|
multi_heap_set_lock(heap->heap, &heap->heap_mux);
|
|
|
|
}
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-22 03:32:58 -04:00
|
|
|
/* Initialize the heap allocator to use all of the memory not
|
|
|
|
used by static data or reserved for other purposes
|
|
|
|
*/
|
2019-07-16 05:33:30 -04:00
|
|
|
void heap_caps_init(void)
|
2017-05-03 04:03:28 -04:00
|
|
|
{
|
2022-05-06 07:20:05 -04:00
|
|
|
#ifdef CONFIG_HEAP_TLSF_USE_ROM_IMPL
|
2021-03-19 05:29:42 -04:00
|
|
|
extern void multi_heap_in_rom_init(void);
|
|
|
|
multi_heap_in_rom_init();
|
|
|
|
#endif
|
2018-06-22 03:32:58 -04:00
|
|
|
/* Get the array of regions that we can use for heaps
|
|
|
|
(with reserved memory removed already.)
|
|
|
|
*/
|
|
|
|
size_t num_regions = soc_get_available_memory_region_max_count();
|
|
|
|
soc_memory_region_t regions[num_regions];
|
|
|
|
num_regions = soc_get_available_memory_regions(regions);
|
2017-05-03 04:03:28 -04:00
|
|
|
|
2022-10-07 07:22:30 -04:00
|
|
|
// the following for loop will calculate the number of possible heaps
|
|
|
|
// based on how many regions were coalesed.
|
|
|
|
size_t num_heaps = num_regions;
|
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
//The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
|
2017-07-04 00:46:39 -04:00
|
|
|
//it's useful to coalesce adjacent regions that have the same type.
|
2020-11-16 23:48:35 -05:00
|
|
|
for (size_t i = 1; i < num_regions; i++) {
|
2017-05-03 04:03:28 -04:00
|
|
|
soc_memory_region_t *a = ®ions[i - 1];
|
|
|
|
soc_memory_region_t *b = ®ions[i];
|
2023-09-13 05:37:34 -04:00
|
|
|
if (b->start == (intptr_t)(a->start + a->size) && b->type == a->type && b->startup_stack == a->startup_stack ) {
|
2017-07-04 00:46:39 -04:00
|
|
|
a->type = -1;
|
2017-05-03 04:03:28 -04:00
|
|
|
b->start = a->start;
|
|
|
|
b->size += a->size;
|
|
|
|
|
2022-10-07 07:22:30 -04:00
|
|
|
// remove one heap from the number of heaps as
|
|
|
|
// 2 regions just got coalesed.
|
|
|
|
num_heaps--;
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Start by allocating the registered heap data on the stack.
|
|
|
|
|
|
|
|
Once we have a heap to copy it to, we will copy it to a heap buffer.
|
|
|
|
*/
|
2017-08-28 03:12:29 -04:00
|
|
|
heap_t temp_heaps[num_heaps];
|
2017-05-03 04:03:28 -04:00
|
|
|
size_t heap_idx = 0;
|
|
|
|
|
|
|
|
ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
|
2020-11-16 23:48:35 -05:00
|
|
|
for (size_t i = 0; i < num_regions; i++) {
|
2017-05-03 04:03:28 -04:00
|
|
|
soc_memory_region_t *region = ®ions[i];
|
2017-07-04 00:46:39 -04:00
|
|
|
const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
|
2017-05-03 04:03:28 -04:00
|
|
|
heap_t *heap = &temp_heaps[heap_idx];
|
2017-07-04 00:46:39 -04:00
|
|
|
if (region->type == -1) {
|
2017-05-03 04:03:28 -04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
heap_idx++;
|
2017-08-28 03:12:29 -04:00
|
|
|
assert(heap_idx <= num_heaps);
|
2017-05-03 04:03:28 -04:00
|
|
|
|
2017-08-28 03:12:29 -04:00
|
|
|
memcpy(heap->caps, type->caps, sizeof(heap->caps));
|
2017-05-03 04:03:28 -04:00
|
|
|
heap->start = region->start;
|
|
|
|
heap->end = region->start + region->size;
|
2019-12-18 11:04:49 -05:00
|
|
|
MULTI_HEAP_LOCK_INIT(&heap->heap_mux);
|
2023-09-13 05:37:34 -04:00
|
|
|
if (region->startup_stack) {
|
2017-05-03 04:03:28 -04:00
|
|
|
/* Will be registered when OS scheduler starts */
|
|
|
|
heap->heap = NULL;
|
|
|
|
} else {
|
|
|
|
register_heap(heap);
|
|
|
|
}
|
2017-08-28 03:12:29 -04:00
|
|
|
SLIST_NEXT(heap, next) = NULL;
|
|
|
|
|
|
|
|
ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
|
|
|
|
region->start, region->size, region->size / 1024, type->name);
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
|
2017-08-28 03:12:29 -04:00
|
|
|
assert(heap_idx == num_heaps);
|
|
|
|
|
|
|
|
/* Allocate the permanent heap data that we'll use as a linked list at runtime.
|
|
|
|
|
|
|
|
Allocate this part of data contiguously, even though it's a linked list... */
|
|
|
|
assert(SLIST_EMPTY(®istered_heaps));
|
2017-07-19 03:10:33 -04:00
|
|
|
|
2017-08-28 03:12:29 -04:00
|
|
|
heap_t *heaps_array = NULL;
|
2020-11-16 23:48:35 -05:00
|
|
|
for (size_t i = 0; i < num_heaps; i++) {
|
2017-09-05 05:29:57 -04:00
|
|
|
if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
|
2017-07-19 03:10:33 -04:00
|
|
|
/* use the first DRAM heap which can fit the data */
|
2017-08-28 03:12:29 -04:00
|
|
|
heaps_array = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_heaps);
|
|
|
|
if (heaps_array != NULL) {
|
2017-07-19 03:10:33 -04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-08-28 03:12:29 -04:00
|
|
|
assert(heaps_array != NULL); /* if NULL, there's not enough free startup heap space */
|
|
|
|
|
|
|
|
memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
|
|
|
|
|
|
|
|
/* Iterate the heaps and set their locks, also add them to the linked list. */
|
2020-11-16 23:48:35 -05:00
|
|
|
for (size_t i = 0; i < num_heaps; i++) {
|
2017-08-28 03:12:29 -04:00
|
|
|
if (heaps_array[i].heap != NULL) {
|
|
|
|
multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
|
|
|
|
}
|
|
|
|
if (i == 0) {
|
|
|
|
SLIST_INSERT_HEAD(®istered_heaps, &heaps_array[0], next);
|
|
|
|
} else {
|
|
|
|
SLIST_INSERT_AFTER(&heaps_array[i-1], &heaps_array[i], next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-07-19 03:10:33 -04:00
|
|
|
|
2017-08-28 03:12:29 -04:00
|
|
|
esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
|
|
|
|
{
|
|
|
|
if (start == 0) {
|
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
2017-05-03 04:03:28 -04:00
|
|
|
|
2020-11-16 23:48:35 -05:00
|
|
|
for (size_t i = 0; i < soc_memory_region_count; i++) {
|
2017-08-28 03:12:29 -04:00
|
|
|
const soc_memory_region_t *region = &soc_memory_regions[i];
|
2017-09-12 10:36:17 -04:00
|
|
|
// Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
|
2020-11-16 23:48:35 -05:00
|
|
|
if (region->start <= start && (intptr_t)(region->start + region->size) > start) {
|
2017-08-28 03:12:29 -04:00
|
|
|
const uint32_t *caps = soc_memory_types[region->type].caps;
|
|
|
|
return heap_caps_add_region_with_caps(caps, start, end);
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
}
|
2017-08-28 03:12:29 -04:00
|
|
|
|
|
|
|
return ESP_ERR_NOT_FOUND;
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
|
2022-03-24 10:25:11 -04:00
|
|
|
/* This API is used for internal test purpose and hence its not marked as static */
|
2022-03-24 02:55:37 -04:00
|
|
|
bool heap_caps_check_add_region_allowed(intptr_t heap_start, intptr_t heap_end, intptr_t start, intptr_t end)
|
2017-08-28 03:12:29 -04:00
|
|
|
{
|
2017-11-13 22:52:12 -05:00
|
|
|
/*
|
2022-10-13 04:03:33 -04:00
|
|
|
* We assume that in any region, the "start" must be strictly less than the end.
|
2022-03-22 04:46:37 -04:00
|
|
|
* Specially, the 3rd scenario can be allowed. For example, allocate memory from heap,
|
2017-11-13 22:52:12 -05:00
|
|
|
* then change the capability and call this function to create a new region for special
|
|
|
|
* application.
|
2022-10-07 07:22:30 -04:00
|
|
|
* This 'start = start' and 'end = end' scenario is incorrect because the same region
|
2022-10-13 04:03:33 -04:00
|
|
|
* cannot be added twice. In fact, registering the same memory region as a heap twice
|
|
|
|
* would cause a corruption and then an exception at runtime.
|
2017-11-13 22:52:12 -05:00
|
|
|
*
|
|
|
|
* the existing heap region s(tart) e(nd)
|
|
|
|
* |----------------------|
|
2022-03-22 04:46:37 -04:00
|
|
|
*
|
2022-03-24 02:55:37 -04:00
|
|
|
* 1.add region (e1<s) |-----| correct: bool condition_1 = end < heap_start;
|
2022-03-22 04:46:37 -04:00
|
|
|
*
|
2022-03-24 02:55:37 -04:00
|
|
|
* 2.add region (s2<s && e2>s) |-----------------| wrong: bool condition_2 = start < heap_start && end > heap_start;
|
2022-03-22 04:46:37 -04:00
|
|
|
* |---------------------------------| wrong
|
|
|
|
*
|
2022-03-24 02:55:37 -04:00
|
|
|
* 3.add region (s3>=s && e3<e) |---------------| correct: bool condition_3 = start >= heap_start && end < heap_end;
|
2022-03-22 04:46:37 -04:00
|
|
|
* |--------------| correct
|
|
|
|
*
|
2022-10-07 07:22:30 -04:00
|
|
|
* 4.add region (s4<e && e4>e) |------------------------| wrong: bool condition_4 = start < heap_end && end > heap_end;
|
2022-03-22 04:46:37 -04:00
|
|
|
* |---------------------| wrong
|
|
|
|
*
|
2022-03-24 02:55:37 -04:00
|
|
|
* 5.add region (s5>=e) |----| correct: bool condition_5 = start >= heap_end;
|
2022-10-07 07:22:30 -04:00
|
|
|
*
|
|
|
|
* 6.add region (s6==s && e6==e) |----------------------| wrong: bool condition_6 = start == heap_start && end == heap_end;
|
2017-11-13 22:52:12 -05:00
|
|
|
*/
|
|
|
|
|
2022-03-24 02:55:37 -04:00
|
|
|
bool condition_2 = start < heap_start && end > heap_start; // if true then region not allowed
|
2022-10-07 07:22:30 -04:00
|
|
|
bool condition_4 = start < heap_end && end > heap_end; // if true then region not allowed
|
|
|
|
bool condition_6 = start == heap_start && end == heap_end; // if true then region not allowed
|
2022-03-24 02:55:37 -04:00
|
|
|
|
2022-10-13 04:03:33 -04:00
|
|
|
return !(condition_2 || condition_4 || condition_6);
|
2022-03-24 02:55:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
|
|
|
|
{
|
|
|
|
esp_err_t err = ESP_FAIL;
|
|
|
|
if (caps == NULL || start == 0 || end == 0 || end <= start) {
|
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
|
|
|
|
|
|
|
//Check if region overlaps the start and/or end of an existing region. If so, the
|
|
|
|
//region is invalid (or maybe added twice)
|
2017-09-22 04:02:39 -04:00
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
2022-03-24 02:55:37 -04:00
|
|
|
if (!heap_caps_check_add_region_allowed(heap->start, heap->end, start, end)) {
|
|
|
|
ESP_EARLY_LOGD(TAG, "invalid overlap detected with existing heap region");
|
2017-11-13 22:52:12 -05:00
|
|
|
return ESP_FAIL;
|
|
|
|
}
|
2017-09-22 04:02:39 -04:00
|
|
|
}
|
|
|
|
|
2018-08-20 02:12:14 -04:00
|
|
|
heap_t *p_new = heap_caps_malloc(sizeof(heap_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
|
2017-08-28 03:12:29 -04:00
|
|
|
if (p_new == NULL) {
|
|
|
|
err = ESP_ERR_NO_MEM;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
memcpy(p_new->caps, caps, sizeof(p_new->caps));
|
|
|
|
p_new->start = start;
|
|
|
|
p_new->end = end;
|
2019-12-18 11:04:49 -05:00
|
|
|
MULTI_HEAP_LOCK_INIT(&p_new->heap_mux);
|
2017-08-28 03:12:29 -04:00
|
|
|
p_new->heap = multi_heap_register((void *)start, end - start);
|
|
|
|
SLIST_NEXT(p_new, next) = NULL;
|
|
|
|
if (p_new->heap == NULL) {
|
2018-08-28 04:01:07 -04:00
|
|
|
err = ESP_ERR_INVALID_SIZE;
|
2017-08-28 03:12:29 -04:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
multi_heap_set_lock(p_new->heap, &p_new->heap_mux);
|
|
|
|
|
|
|
|
/* (This insertion is atomic to registered_heaps, so
|
|
|
|
we don't need to worry about thread safety for readers,
|
|
|
|
only for writers. */
|
2019-12-18 11:04:49 -05:00
|
|
|
static multi_heap_lock_t registered_heaps_write_lock = MULTI_HEAP_LOCK_STATIC_INITIALIZER;
|
|
|
|
MULTI_HEAP_LOCK(®istered_heaps_write_lock);
|
2017-08-28 03:12:29 -04:00
|
|
|
SLIST_INSERT_HEAD(®istered_heaps, p_new, next);
|
2019-12-18 11:04:49 -05:00
|
|
|
MULTI_HEAP_UNLOCK(®istered_heaps_write_lock);
|
2017-08-28 03:12:29 -04:00
|
|
|
|
|
|
|
err = ESP_OK;
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (err != ESP_OK) {
|
|
|
|
free(p_new);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|