2021-11-02 01:36:18 -04:00
|
|
|
/*
|
2024-02-14 07:03:09 -05:00
|
|
|
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
|
2021-11-02 01:36:18 -04:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2017-05-03 04:03:28 -04:00
|
|
|
#include <stdbool.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include "esp_attr.h"
|
|
|
|
#include "esp_heap_caps.h"
|
|
|
|
#include "multi_heap.h"
|
|
|
|
#include "esp_log.h"
|
|
|
|
#include "heap_private.h"
|
2020-04-27 16:09:15 -04:00
|
|
|
#include "esp_system.h"
|
2021-11-02 01:36:18 -04:00
|
|
|
|
2024-02-14 07:03:09 -05:00
|
|
|
#if CONFIG_HEAP_TLSF_USE_ROM_IMPL
|
|
|
|
#include "esp_rom_multi_heap.h"
|
|
|
|
#endif
|
|
|
|
|
2023-03-08 04:48:17 -05:00
|
|
|
#ifdef CONFIG_HEAP_USE_HOOKS
|
2023-02-21 01:37:59 -05:00
|
|
|
#define CALL_HOOK(hook, ...) { \
|
|
|
|
if (hook != NULL) { \
|
|
|
|
hook(__VA_ARGS__); \
|
|
|
|
} \
|
|
|
|
}
|
2023-03-08 04:48:17 -05:00
|
|
|
#else
|
|
|
|
#define CALL_HOOK(hook, ...) {}
|
|
|
|
#endif
|
2023-02-21 01:37:59 -05:00
|
|
|
|
2022-06-08 02:33:11 -04:00
|
|
|
/* Forward declaration for base function, put in IRAM.
|
|
|
|
* These functions don't check for errors after trying to allocate memory. */
|
|
|
|
static void *heap_caps_realloc_base( void *ptr, size_t size, uint32_t caps );
|
|
|
|
static void *heap_caps_calloc_base( size_t n, size_t size, uint32_t caps );
|
|
|
|
static void *heap_caps_malloc_base( size_t size, uint32_t caps );
|
2021-11-02 01:36:18 -04:00
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
/*
|
|
|
|
This file, combined with a region allocator that supports multiple heaps, solves the problem that the ESP32 has RAM
|
|
|
|
that's slightly heterogeneous. Some RAM can be byte-accessed, some allows only 32-bit accesses, some can execute memory,
|
|
|
|
some can be remapped by the MMU to only be accessed by a certain PID etc. In order to allow the most flexible memory
|
|
|
|
allocation possible, this code makes it possible to request memory that has certain capabilities. The code will then use
|
|
|
|
its knowledge of how the memory is configured along with a priority scheme to allocate that memory in the most sane way
|
|
|
|
possible. This should optimize the amount of RAM accessible to the code without hardwiring addresses.
|
|
|
|
*/
|
|
|
|
|
2020-04-27 16:09:15 -04:00
|
|
|
static esp_alloc_failed_hook_t alloc_failed_callback;
|
2023-01-30 05:47:18 -05:00
|
|
|
|
|
|
|
#ifdef CONFIG_HEAP_ABORT_WHEN_ALLOCATION_FAILS
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR static void hex_to_str(char buf[8], uint32_t n)
|
2023-01-30 05:47:18 -05:00
|
|
|
{
|
|
|
|
for (int i = 0; i < 8; i++) {
|
2023-02-10 01:41:30 -05:00
|
|
|
uint8_t b4 = (n >> (28 - i * 4)) & 0b1111;
|
|
|
|
buf[i] = b4 <= 9 ? '0' + b4 : 'a' + b4 - 10;
|
2023-01-30 05:47:18 -05:00
|
|
|
}
|
|
|
|
}
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR static void fmt_abort_str(char dest[48], size_t size, uint32_t caps)
|
2023-01-30 05:47:18 -05:00
|
|
|
{
|
|
|
|
char sSize[8];
|
|
|
|
char sCaps[8];
|
2023-02-10 01:41:30 -05:00
|
|
|
hex_to_str(sSize, size);
|
|
|
|
hex_to_str(sCaps, caps);
|
2023-01-30 05:47:18 -05:00
|
|
|
memcpy(dest, "Mem alloc fail. size 0x00000000 caps 0x00000000", 48);
|
2023-02-10 01:41:30 -05:00
|
|
|
memcpy(dest + 23, sSize, 8);
|
|
|
|
memcpy(dest + 39, sCaps, 8);
|
2023-01-30 05:47:18 -05:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
/*
|
|
|
|
This takes a memory chunk in a region that can be addressed as both DRAM as well as IRAM. It will convert it to
|
2019-03-10 19:49:51 -04:00
|
|
|
IRAM in such a way that it can be later freed. It assumes both the address as well as the length to be word-aligned.
|
2017-05-03 04:03:28 -04:00
|
|
|
It returns a region that's 1 word smaller than the region given because it stores the original Dram address there.
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR static void *dram_alloc_to_iram_addr(void *addr, size_t len)
|
2017-05-03 04:03:28 -04:00
|
|
|
{
|
2019-03-10 19:49:51 -04:00
|
|
|
uintptr_t dstart = (uintptr_t)addr; //First word
|
2021-02-12 00:01:05 -05:00
|
|
|
uintptr_t dend __attribute__((unused)) = dstart + len - 4; //Last word
|
2019-03-10 19:49:51 -04:00
|
|
|
assert(esp_ptr_in_diram_dram((void *)dstart));
|
|
|
|
assert(esp_ptr_in_diram_dram((void *)dend));
|
2017-05-03 04:03:28 -04:00
|
|
|
assert((dstart & 3) == 0);
|
|
|
|
assert((dend & 3) == 0);
|
2020-09-12 05:58:30 -04:00
|
|
|
#if SOC_DIRAM_INVERTED // We want the word before the result to hold the DRAM address
|
2020-02-16 00:51:42 -05:00
|
|
|
uint32_t *iptr = esp_ptr_diram_dram_to_iram((void *)dend);
|
2019-12-18 11:04:49 -05:00
|
|
|
#else
|
2020-02-16 00:51:42 -05:00
|
|
|
uint32_t *iptr = esp_ptr_diram_dram_to_iram((void *)dstart);
|
2019-08-14 04:07:24 -04:00
|
|
|
#endif
|
2017-05-03 04:03:28 -04:00
|
|
|
*iptr = dstart;
|
2019-03-10 19:49:51 -04:00
|
|
|
return iptr + 1;
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR NOINLINE_ATTR static void heap_caps_alloc_failed(size_t requested_size, uint32_t caps, const char *function_name)
|
2020-04-27 16:09:15 -04:00
|
|
|
{
|
|
|
|
if (alloc_failed_callback) {
|
2022-11-14 02:21:33 -05:00
|
|
|
alloc_failed_callback(requested_size, caps, function_name);
|
2020-04-27 16:09:15 -04:00
|
|
|
}
|
|
|
|
|
2022-08-07 10:21:22 -04:00
|
|
|
#ifdef CONFIG_HEAP_ABORT_WHEN_ALLOCATION_FAILS
|
2023-01-30 05:47:18 -05:00
|
|
|
char buf[48];
|
|
|
|
fmt_abort_str(buf, requested_size, caps);
|
|
|
|
esp_system_abort(buf);
|
2022-08-07 10:21:22 -04:00
|
|
|
#endif
|
2020-04-27 16:09:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t heap_caps_register_failed_alloc_callback(esp_alloc_failed_hook_t callback)
|
|
|
|
{
|
|
|
|
if (callback == NULL) {
|
|
|
|
return ESP_ERR_INVALID_ARG;
|
|
|
|
}
|
|
|
|
|
|
|
|
alloc_failed_callback = callback;
|
|
|
|
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2017-07-19 03:10:33 -04:00
|
|
|
bool heap_caps_match(const heap_t *heap, uint32_t caps)
|
|
|
|
{
|
|
|
|
return heap->heap != NULL && ((get_all_caps(heap) & caps) == caps);
|
|
|
|
}
|
|
|
|
|
2021-11-02 01:36:18 -04:00
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
/*
|
2021-11-02 01:36:18 -04:00
|
|
|
This function should not be called directly as it does not
|
|
|
|
check for failure / call heap_caps_alloc_failed()
|
2017-05-03 04:03:28 -04:00
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR static void *heap_caps_malloc_base( size_t size, uint32_t caps)
|
2017-05-03 04:03:28 -04:00
|
|
|
{
|
|
|
|
void *ret = NULL;
|
|
|
|
|
2023-10-27 09:06:58 -04:00
|
|
|
// remove block owner size to HEAP_SIZE_MAX rather than adding the block owner size
|
|
|
|
// to size to prevent overflows.
|
|
|
|
if (size == 0 || size > MULTI_HEAP_REMOVE_BLOCK_OWNER_SIZE(HEAP_SIZE_MAX) ) {
|
2019-03-10 19:49:51 -04:00
|
|
|
// Avoids int overflow when adding small numbers to size, or
|
|
|
|
// calculating 'end' from start+size, by limiting 'size' to the possible range
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
if (caps & MALLOC_CAP_EXEC) {
|
|
|
|
//MALLOC_CAP_EXEC forces an alloc from IRAM. There is a region which has both this as well as the following
|
|
|
|
//caps, but the following caps are not possible for IRAM. Thus, the combination is impossible and we return
|
|
|
|
//NULL directly, even although our heap capabilities (based on soc_memory_tags & soc_memory_regions) would
|
|
|
|
//indicate there is a tag for this.
|
|
|
|
if ((caps & MALLOC_CAP_8BIT) || (caps & MALLOC_CAP_DMA)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-07-31 00:29:31 -04:00
|
|
|
caps |= MALLOC_CAP_32BIT; // IRAM is 32-bit accessible RAM
|
|
|
|
}
|
|
|
|
|
|
|
|
if (caps & MALLOC_CAP_32BIT) {
|
|
|
|
/* 32-bit accessible RAM should allocated in 4 byte aligned sizes
|
|
|
|
* (Future versions of ESP-IDF should possibly fail if an invalid size is requested)
|
|
|
|
*/
|
2019-03-10 19:49:51 -04:00
|
|
|
size = (size + 3) & (~3); // int overflow checked above
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
2018-07-31 00:29:31 -04:00
|
|
|
|
2017-07-04 00:46:39 -04:00
|
|
|
for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
|
2017-05-03 04:03:28 -04:00
|
|
|
//Iterate over heaps and check capabilities at this priority
|
2017-08-28 03:12:29 -04:00
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
2017-07-19 03:10:33 -04:00
|
|
|
if (heap->heap == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
2017-05-03 04:03:28 -04:00
|
|
|
if ((heap->caps[prio] & caps) != 0) {
|
|
|
|
//Heap has at least one of the caps requested. If caps has other bits set that this prio
|
|
|
|
//doesn't cover, see if they're available in other prios.
|
2017-10-18 04:25:17 -04:00
|
|
|
if ((get_all_caps(heap) & caps) == caps) {
|
2017-05-03 04:03:28 -04:00
|
|
|
//This heap can satisfy all the requested capabilities. See if we can grab some memory using it.
|
2023-01-18 05:19:15 -05:00
|
|
|
// If MALLOC_CAP_EXEC is requested but the DRAM and IRAM are on the same addresses (like on esp32c6)
|
|
|
|
// proceed as for a default allocation.
|
|
|
|
if ((caps & MALLOC_CAP_EXEC) && !esp_dram_match_iram() && esp_ptr_in_diram_dram((void *)heap->start)) {
|
2017-05-03 04:03:28 -04:00
|
|
|
//This is special, insofar that what we're going to get back is a DRAM address. If so,
|
|
|
|
//we need to 'invert' it (lowest address in DRAM == highest address in IRAM and vice-versa) and
|
|
|
|
//add a pointer to the DRAM equivalent before the address we're going to return.
|
2023-10-04 08:42:00 -04:00
|
|
|
ret = multi_heap_malloc(heap->heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size) + 4); // int overflow checked above
|
2017-05-03 04:03:28 -04:00
|
|
|
if (ret != NULL) {
|
2023-10-04 08:42:00 -04:00
|
|
|
MULTI_HEAP_SET_BLOCK_OWNER(ret);
|
|
|
|
ret = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret);
|
2023-02-21 01:37:59 -05:00
|
|
|
uint32_t *iptr = dram_alloc_to_iram_addr(ret, size + 4); // int overflow checked above
|
|
|
|
CALL_HOOK(esp_heap_trace_alloc_hook, iptr, size, caps);
|
|
|
|
return iptr;
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
//Just try to alloc, nothing special.
|
2023-10-04 08:42:00 -04:00
|
|
|
ret = multi_heap_malloc(heap->heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size));
|
2017-05-03 04:03:28 -04:00
|
|
|
if (ret != NULL) {
|
2023-10-04 08:42:00 -04:00
|
|
|
MULTI_HEAP_SET_BLOCK_OWNER(ret);
|
|
|
|
ret = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret);
|
2023-02-21 01:37:59 -05:00
|
|
|
CALL_HOOK(esp_heap_trace_alloc_hook, ret, size, caps);
|
2017-05-03 04:03:28 -04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-27 16:09:15 -04:00
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
//Nothing usable found.
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-09-22 04:02:39 -04:00
|
|
|
|
2021-11-02 01:36:18 -04:00
|
|
|
/*
|
|
|
|
Routine to allocate a bit of memory with certain capabilities. caps is a bitfield of MALLOC_CAP_* bits.
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void *heap_caps_malloc( size_t size, uint32_t caps){
|
2021-11-02 01:36:18 -04:00
|
|
|
|
|
|
|
void* ptr = heap_caps_malloc_base(size, caps);
|
|
|
|
|
2023-02-17 16:21:13 -05:00
|
|
|
|
2022-08-07 10:21:22 -04:00
|
|
|
if (!ptr && size > 0){
|
2021-11-02 01:36:18 -04:00
|
|
|
heap_caps_alloc_failed(size, caps, __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 04:02:39 -04:00
|
|
|
#define MALLOC_DISABLE_EXTERNAL_ALLOCS -1
|
|
|
|
//Dual-use: -1 (=MALLOC_DISABLE_EXTERNAL_ALLOCS) disables allocations in external memory, >=0 sets the limit for allocations preferring internal memory.
|
|
|
|
static int malloc_alwaysinternal_limit=MALLOC_DISABLE_EXTERNAL_ALLOCS;
|
|
|
|
|
|
|
|
void heap_caps_malloc_extmem_enable(size_t limit)
|
|
|
|
{
|
|
|
|
malloc_alwaysinternal_limit=limit;
|
|
|
|
}
|
|
|
|
|
2017-09-05 05:29:57 -04:00
|
|
|
/*
|
|
|
|
Default memory allocation implementation. Should return standard 8-bit memory. malloc() essentially resolves to this function.
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void *heap_caps_malloc_default( size_t size )
|
2017-09-05 05:29:57 -04:00
|
|
|
{
|
2017-09-22 04:02:39 -04:00
|
|
|
if (malloc_alwaysinternal_limit==MALLOC_DISABLE_EXTERNAL_ALLOCS) {
|
|
|
|
return heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL);
|
|
|
|
} else {
|
2021-11-02 01:36:18 -04:00
|
|
|
|
|
|
|
// use heap_caps_malloc_base() since we'll
|
|
|
|
// check for allocation failure ourselves
|
|
|
|
|
2017-09-22 04:02:39 -04:00
|
|
|
void *r;
|
2020-11-16 23:48:35 -05:00
|
|
|
if (size <= (size_t)malloc_alwaysinternal_limit) {
|
2021-11-02 01:36:18 -04:00
|
|
|
r=heap_caps_malloc_base( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
|
2017-09-22 04:02:39 -04:00
|
|
|
} else {
|
2021-11-02 01:36:18 -04:00
|
|
|
r=heap_caps_malloc_base( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM );
|
2017-09-22 04:02:39 -04:00
|
|
|
}
|
2022-08-07 10:21:22 -04:00
|
|
|
if (r==NULL && size > 0) {
|
2017-09-22 04:02:39 -04:00
|
|
|
//try again while being less picky
|
2021-11-02 01:36:18 -04:00
|
|
|
r=heap_caps_malloc_base( size, MALLOC_CAP_DEFAULT );
|
2017-09-22 04:02:39 -04:00
|
|
|
}
|
2021-11-02 01:36:18 -04:00
|
|
|
|
|
|
|
// allocation failure?
|
2022-08-07 10:21:22 -04:00
|
|
|
if (r==NULL && size > 0){
|
2021-11-02 01:36:18 -04:00
|
|
|
heap_caps_alloc_failed(size, MALLOC_CAP_DEFAULT, __func__);
|
|
|
|
}
|
|
|
|
|
2017-09-22 04:02:39 -04:00
|
|
|
return r;
|
|
|
|
}
|
2017-09-05 05:29:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Same for realloc()
|
|
|
|
Note: keep the logic in here the same as in heap_caps_malloc_default (or merge the two as soon as this gets more complex...)
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void *heap_caps_realloc_default( void *ptr, size_t size )
|
2017-09-05 05:29:57 -04:00
|
|
|
{
|
2017-09-22 04:02:39 -04:00
|
|
|
if (malloc_alwaysinternal_limit==MALLOC_DISABLE_EXTERNAL_ALLOCS) {
|
|
|
|
return heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
|
|
|
|
} else {
|
2021-11-02 01:36:18 -04:00
|
|
|
|
|
|
|
// We use heap_caps_realloc_base() since we'll
|
|
|
|
// handle allocation failure ourselves
|
|
|
|
|
2017-09-22 04:02:39 -04:00
|
|
|
void *r;
|
2020-11-16 23:48:35 -05:00
|
|
|
if (size <= (size_t)malloc_alwaysinternal_limit) {
|
2021-11-02 01:36:18 -04:00
|
|
|
r=heap_caps_realloc_base( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL);
|
2017-09-22 04:02:39 -04:00
|
|
|
} else {
|
2021-11-02 01:36:18 -04:00
|
|
|
r=heap_caps_realloc_base( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM);
|
2017-09-22 04:02:39 -04:00
|
|
|
}
|
2021-11-02 01:36:18 -04:00
|
|
|
|
2017-09-22 04:02:39 -04:00
|
|
|
if (r==NULL && size>0) {
|
|
|
|
//We needed to allocate memory, but we didn't. Try again while being less picky.
|
2021-11-02 01:36:18 -04:00
|
|
|
r=heap_caps_realloc_base( ptr, size, MALLOC_CAP_DEFAULT);
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocation failure?
|
|
|
|
if (r==NULL && size>0){
|
|
|
|
heap_caps_alloc_failed(size, MALLOC_CAP_DEFAULT, __func__);
|
2017-09-22 04:02:39 -04:00
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
2017-09-05 05:29:57 -04:00
|
|
|
}
|
|
|
|
|
2017-09-30 03:28:41 -04:00
|
|
|
/*
|
|
|
|
Memory allocation as preference in decreasing order.
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void *heap_caps_malloc_prefer( size_t size, size_t num, ... )
|
2017-09-30 03:28:41 -04:00
|
|
|
{
|
|
|
|
va_list argp;
|
|
|
|
va_start( argp, num );
|
|
|
|
void *r = NULL;
|
2022-06-08 02:33:11 -04:00
|
|
|
uint32_t caps = MALLOC_CAP_DEFAULT;
|
2017-09-30 03:28:41 -04:00
|
|
|
while (num--) {
|
2022-06-08 02:33:11 -04:00
|
|
|
caps = va_arg( argp, uint32_t );
|
|
|
|
r = heap_caps_malloc_base( size, caps );
|
2022-08-08 03:39:25 -04:00
|
|
|
if (r != NULL || size == 0) {
|
2017-09-30 03:28:41 -04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-02-21 01:37:59 -05:00
|
|
|
|
2022-08-07 10:21:22 -04:00
|
|
|
if (r == NULL && size > 0){
|
2022-06-08 02:33:11 -04:00
|
|
|
heap_caps_alloc_failed(size, caps, __func__);
|
|
|
|
}
|
2017-09-30 03:28:41 -04:00
|
|
|
va_end( argp );
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Memory reallocation as preference in decreasing order.
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void *heap_caps_realloc_prefer( void *ptr, size_t size, size_t num, ... )
|
2017-09-30 03:28:41 -04:00
|
|
|
{
|
|
|
|
va_list argp;
|
|
|
|
va_start( argp, num );
|
|
|
|
void *r = NULL;
|
2022-06-08 02:33:11 -04:00
|
|
|
uint32_t caps = MALLOC_CAP_DEFAULT;
|
2017-09-30 03:28:41 -04:00
|
|
|
while (num--) {
|
2022-06-08 02:33:11 -04:00
|
|
|
caps = va_arg( argp, uint32_t );
|
|
|
|
r = heap_caps_realloc_base( ptr, size, caps );
|
2017-09-30 03:28:41 -04:00
|
|
|
if (r != NULL || size == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-02-21 01:37:59 -05:00
|
|
|
|
2022-08-07 10:21:22 -04:00
|
|
|
if (r == NULL && size > 0){
|
2022-06-08 02:33:11 -04:00
|
|
|
heap_caps_alloc_failed(size, caps, __func__);
|
|
|
|
}
|
2017-09-30 03:28:41 -04:00
|
|
|
va_end( argp );
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Memory callocation as preference in decreasing order.
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void *heap_caps_calloc_prefer( size_t n, size_t size, size_t num, ... )
|
2017-09-30 03:28:41 -04:00
|
|
|
{
|
|
|
|
va_list argp;
|
|
|
|
va_start( argp, num );
|
|
|
|
void *r = NULL;
|
2022-06-08 02:33:11 -04:00
|
|
|
uint32_t caps = MALLOC_CAP_DEFAULT;
|
2017-09-30 03:28:41 -04:00
|
|
|
while (num--) {
|
2022-06-08 02:33:11 -04:00
|
|
|
caps = va_arg( argp, uint32_t );
|
|
|
|
r = heap_caps_calloc_base( n, size, caps );
|
2022-08-07 10:21:22 -04:00
|
|
|
if (r != NULL || size == 0){
|
2022-06-08 02:33:11 -04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-02-21 01:37:59 -05:00
|
|
|
|
2022-08-07 10:21:22 -04:00
|
|
|
if (r == NULL && size > 0){
|
2022-06-08 02:33:11 -04:00
|
|
|
heap_caps_alloc_failed(size, caps, __func__);
|
2017-09-30 03:28:41 -04:00
|
|
|
}
|
|
|
|
va_end( argp );
|
|
|
|
return r;
|
|
|
|
}
|
2017-09-05 05:29:57 -04:00
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
/* Find the heap which belongs to ptr, or return NULL if it's
|
|
|
|
not in any heap.
|
|
|
|
|
|
|
|
(This confirms if ptr is inside the heap's region, doesn't confirm if 'ptr'
|
|
|
|
is an allocated block or is some other random address inside the heap.)
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR static heap_t *find_containing_heap(void *ptr )
|
2017-05-03 04:03:28 -04:00
|
|
|
{
|
|
|
|
intptr_t p = (intptr_t)ptr;
|
2017-08-28 03:12:29 -04:00
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
2017-07-19 03:10:33 -04:00
|
|
|
if (heap->heap != NULL && p >= heap->start && p < heap->end) {
|
2017-05-03 04:03:28 -04:00
|
|
|
return heap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void heap_caps_free( void *ptr)
|
2017-05-03 04:03:28 -04:00
|
|
|
{
|
|
|
|
if (ptr == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-03-10 19:49:51 -04:00
|
|
|
if (esp_ptr_in_diram_iram(ptr)) {
|
2017-05-03 04:03:28 -04:00
|
|
|
//Memory allocated here is actually allocated in the DRAM alias region and
|
|
|
|
//cannot be de-allocated as usual. dram_alloc_to_iram_addr stores a pointer to
|
|
|
|
//the equivalent DRAM address, though; free that.
|
|
|
|
uint32_t *dramAddrPtr = (uint32_t *)ptr;
|
|
|
|
ptr = (void *)dramAddrPtr[-1];
|
|
|
|
}
|
2023-10-04 08:42:00 -04:00
|
|
|
void *block_owner_ptr = MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(ptr);
|
|
|
|
heap_t *heap = find_containing_heap(block_owner_ptr);
|
2017-05-03 04:03:28 -04:00
|
|
|
assert(heap != NULL && "free() target pointer is outside heap areas");
|
2023-10-04 08:42:00 -04:00
|
|
|
multi_heap_free(heap->heap, block_owner_ptr);
|
2023-02-17 16:21:13 -05:00
|
|
|
|
2023-02-21 01:37:59 -05:00
|
|
|
CALL_HOOK(esp_heap_trace_free_hook, ptr);
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
|
2021-11-02 01:36:18 -04:00
|
|
|
/*
|
|
|
|
This function should not be called directly as it does not
|
|
|
|
check for failure / call heap_caps_alloc_failed()
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR static void *heap_caps_realloc_base( void *ptr, size_t size, uint32_t caps)
|
2017-05-03 04:03:28 -04:00
|
|
|
{
|
2019-11-19 13:36:22 -05:00
|
|
|
bool ptr_in_diram_case = false;
|
|
|
|
heap_t *heap = NULL;
|
|
|
|
void *dram_ptr = NULL;
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
if (ptr == NULL) {
|
2021-11-02 01:36:18 -04:00
|
|
|
return heap_caps_malloc_base(size, caps);
|
2017-05-03 04:03:28 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
heap_caps_free(ptr);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-10-27 09:06:58 -04:00
|
|
|
// remove block owner size to HEAP_SIZE_MAX rather than adding the block owner size
|
|
|
|
// to size to prevent overflows.
|
|
|
|
if (size > MULTI_HEAP_REMOVE_BLOCK_OWNER_SIZE(HEAP_SIZE_MAX)) {
|
2019-03-10 19:49:51 -04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-11-10 02:40:01 -05:00
|
|
|
//The pointer to memory may be aliased, we need to
|
2019-12-09 10:05:19 -05:00
|
|
|
//recover the corresponding address before to manage a new allocation:
|
2019-11-12 01:01:27 -05:00
|
|
|
if(esp_ptr_in_diram_iram((void *)ptr)) {
|
|
|
|
uint32_t *dram_addr = (uint32_t *)ptr;
|
2019-11-19 13:36:22 -05:00
|
|
|
dram_ptr = (void *)dram_addr[-1];
|
2023-10-27 09:06:58 -04:00
|
|
|
dram_ptr = MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(dram_ptr);
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2019-11-19 13:36:22 -05:00
|
|
|
heap = find_containing_heap(dram_ptr);
|
|
|
|
assert(heap != NULL && "realloc() pointer is outside heap areas");
|
2020-11-10 02:40:01 -05:00
|
|
|
|
|
|
|
//with pointers that reside on diram space, we avoid using
|
2019-12-09 10:05:19 -05:00
|
|
|
//the realloc implementation due to address translation issues,
|
2019-11-19 13:36:22 -05:00
|
|
|
//instead force a malloc/copy/free
|
|
|
|
ptr_in_diram_case = true;
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2019-11-19 13:36:22 -05:00
|
|
|
} else {
|
|
|
|
heap = find_containing_heap(ptr);
|
|
|
|
assert(heap != NULL && "realloc() pointer is outside heap areas");
|
2019-11-12 01:01:27 -05:00
|
|
|
}
|
2017-05-03 04:03:28 -04:00
|
|
|
|
2023-10-27 09:06:58 -04:00
|
|
|
// shift ptr by block owner offset. Since the ptr returned to the user
|
|
|
|
// does not include the block owner bytes (that are located at the
|
|
|
|
// beginning of the allocated memory) we have to add them back before
|
|
|
|
// processing the realloc.
|
|
|
|
ptr = MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(ptr);
|
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
// are the existing heap's capabilities compatible with the
|
|
|
|
// requested ones?
|
|
|
|
bool compatible_caps = (caps & get_all_caps(heap)) == caps;
|
|
|
|
|
2019-11-19 13:36:22 -05:00
|
|
|
if (compatible_caps && !ptr_in_diram_case) {
|
2017-05-03 04:03:28 -04:00
|
|
|
// try to reallocate this memory within the same heap
|
|
|
|
// (which will resize the block if it can)
|
2023-10-04 08:42:00 -04:00
|
|
|
void *r = multi_heap_realloc(heap->heap, ptr, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size));
|
2017-05-03 04:03:28 -04:00
|
|
|
if (r != NULL) {
|
2023-10-04 08:42:00 -04:00
|
|
|
MULTI_HEAP_SET_BLOCK_OWNER(r);
|
|
|
|
r = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(r);
|
2023-02-21 01:37:59 -05:00
|
|
|
CALL_HOOK(esp_heap_trace_alloc_hook, r, size, caps);
|
2017-05-03 04:03:28 -04:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we couldn't do that, try to see if we can reallocate
|
|
|
|
// in a different heap with requested capabilities.
|
2021-11-02 01:36:18 -04:00
|
|
|
void *new_p = heap_caps_malloc_base(size, caps);
|
2017-05-03 04:03:28 -04:00
|
|
|
if (new_p != NULL) {
|
2019-11-19 13:36:22 -05:00
|
|
|
size_t old_size = 0;
|
|
|
|
|
|
|
|
//If we're dealing with aliased ptr, information regarding its containing
|
|
|
|
//heap can only be obtained with translated address.
|
|
|
|
if(ptr_in_diram_case) {
|
|
|
|
old_size = multi_heap_get_allocated_size(heap->heap, dram_ptr);
|
|
|
|
} else {
|
|
|
|
old_size = multi_heap_get_allocated_size(heap->heap, ptr);
|
|
|
|
}
|
2019-11-12 01:01:27 -05:00
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
assert(old_size > 0);
|
2023-10-27 09:06:58 -04:00
|
|
|
// do not copy the block owner bytes
|
|
|
|
memcpy(new_p, MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ptr), MIN(size, old_size));
|
|
|
|
// add the block owner bytes to ptr since they are removed in heap_caps_free
|
|
|
|
heap_caps_free(MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ptr));
|
2017-05-03 04:03:28 -04:00
|
|
|
return new_p;
|
|
|
|
}
|
2020-04-27 16:09:15 -04:00
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void *heap_caps_realloc( void *ptr, size_t size, uint32_t caps)
|
2021-11-02 01:36:18 -04:00
|
|
|
{
|
|
|
|
ptr = heap_caps_realloc_base(ptr, size, caps);
|
|
|
|
|
2023-02-21 01:37:59 -05:00
|
|
|
|
2021-11-02 01:36:18 -04:00
|
|
|
if (ptr == NULL && size > 0){
|
|
|
|
heap_caps_alloc_failed(size, caps, __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2022-06-08 02:33:11 -04:00
|
|
|
/*
|
|
|
|
This function should not be called directly as it does not
|
|
|
|
check for failure / call heap_caps_alloc_failed()
|
|
|
|
*/
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR static void *heap_caps_calloc_base( size_t n, size_t size, uint32_t caps)
|
2017-09-30 03:28:41 -04:00
|
|
|
{
|
2018-06-18 02:24:49 -04:00
|
|
|
void *result;
|
|
|
|
size_t size_bytes;
|
|
|
|
|
|
|
|
if (__builtin_mul_overflow(n, size, &size_bytes)) {
|
|
|
|
return NULL;
|
2017-09-30 03:28:41 -04:00
|
|
|
}
|
2018-06-18 02:24:49 -04:00
|
|
|
|
2022-06-08 02:33:11 -04:00
|
|
|
result = heap_caps_malloc_base(size_bytes, caps);
|
2018-06-18 02:24:49 -04:00
|
|
|
if (result != NULL) {
|
2023-01-25 02:33:37 -05:00
|
|
|
memset(result, 0, size_bytes);
|
2018-06-18 02:24:49 -04:00
|
|
|
}
|
|
|
|
return result;
|
2017-09-30 03:28:41 -04:00
|
|
|
}
|
|
|
|
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void *heap_caps_calloc( size_t n, size_t size, uint32_t caps)
|
2022-06-08 02:33:11 -04:00
|
|
|
{
|
|
|
|
void* ptr = heap_caps_calloc_base(n, size, caps);
|
|
|
|
|
2023-02-21 01:37:59 -05:00
|
|
|
|
2022-08-07 10:21:22 -04:00
|
|
|
if (!ptr && size > 0){
|
2024-03-26 01:48:33 -04:00
|
|
|
heap_caps_alloc_failed(n * size, caps, __func__);
|
2022-06-08 02:33:11 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2019-10-21 02:55:58 -04:00
|
|
|
size_t heap_caps_get_total_size(uint32_t caps)
|
|
|
|
{
|
|
|
|
size_t total_size = 0;
|
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
|
|
|
if (heap_caps_match(heap, caps)) {
|
|
|
|
total_size += (heap->end - heap->start);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return total_size;
|
|
|
|
}
|
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
size_t heap_caps_get_free_size( uint32_t caps )
|
|
|
|
{
|
|
|
|
size_t ret = 0;
|
2017-08-28 03:12:29 -04:00
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
2017-07-19 03:10:33 -04:00
|
|
|
if (heap_caps_match(heap, caps)) {
|
2017-05-03 04:03:28 -04:00
|
|
|
ret += multi_heap_free_size(heap->heap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t heap_caps_get_minimum_free_size( uint32_t caps )
|
|
|
|
{
|
|
|
|
size_t ret = 0;
|
2017-08-28 03:12:29 -04:00
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
2017-07-19 03:10:33 -04:00
|
|
|
if (heap_caps_match(heap, caps)) {
|
2017-05-03 04:03:28 -04:00
|
|
|
ret += multi_heap_minimum_free_size(heap->heap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t heap_caps_get_largest_free_block( uint32_t caps )
|
|
|
|
{
|
|
|
|
multi_heap_info_t info;
|
|
|
|
heap_caps_get_info(&info, caps);
|
|
|
|
return info.largest_free_block;
|
|
|
|
}
|
|
|
|
|
2023-12-01 00:55:32 -05:00
|
|
|
static struct {
|
|
|
|
size_t *values; // Array of minimum_free_bytes used to keep the different values when starting monitoring
|
|
|
|
size_t counter; // Keep count of registered heap when monitoring to prevent any added heap to create an out of bound access on values
|
|
|
|
multi_heap_lock_t mux; // protect access to min_free_bytes_monitoring fields in start/stop monitoring functions
|
|
|
|
} min_free_bytes_monitoring = {NULL, 0, MULTI_HEAP_LOCK_STATIC_INITIALIZER};
|
|
|
|
|
|
|
|
esp_err_t heap_caps_monitor_local_minimum_free_size_start(void)
|
|
|
|
{
|
|
|
|
// update minimum_free_bytes on all affected heap, and store the "old value"
|
|
|
|
// as a snapshot of the heaps minimum_free_bytes state.
|
|
|
|
heap_t *heap = NULL;
|
|
|
|
MULTI_HEAP_LOCK(&min_free_bytes_monitoring.mux);
|
|
|
|
if (min_free_bytes_monitoring.values == NULL) {
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
|
|
|
min_free_bytes_monitoring.counter++;
|
|
|
|
}
|
|
|
|
min_free_bytes_monitoring.values = heap_caps_malloc(sizeof(size_t) * min_free_bytes_monitoring.counter, MALLOC_CAP_DEFAULT);
|
|
|
|
assert(min_free_bytes_monitoring.values != NULL && "not enough memory to store min_free_bytes value");
|
|
|
|
memset(min_free_bytes_monitoring.values, 0xFF, sizeof(size_t) * min_free_bytes_monitoring.counter);
|
|
|
|
}
|
|
|
|
|
|
|
|
heap = SLIST_FIRST(®istered_heaps);
|
|
|
|
for (size_t counter = 0; counter < min_free_bytes_monitoring.counter; counter++) {
|
|
|
|
size_t old_minimum = multi_heap_reset_minimum_free_bytes(heap->heap);
|
|
|
|
|
|
|
|
if (min_free_bytes_monitoring.values[counter] > old_minimum) {
|
|
|
|
min_free_bytes_monitoring.values[counter] = old_minimum;
|
|
|
|
}
|
|
|
|
|
|
|
|
heap = SLIST_NEXT(heap, next);
|
|
|
|
}
|
|
|
|
MULTI_HEAP_UNLOCK(&min_free_bytes_monitoring.mux);
|
|
|
|
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t heap_caps_monitor_local_minimum_free_size_stop(void)
|
|
|
|
{
|
|
|
|
if (min_free_bytes_monitoring.values == NULL) {
|
|
|
|
return ESP_FAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
MULTI_HEAP_LOCK(&min_free_bytes_monitoring.mux);
|
|
|
|
heap_t *heap = SLIST_FIRST(®istered_heaps);
|
|
|
|
for (size_t counter = 0; counter < min_free_bytes_monitoring.counter; counter++) {
|
|
|
|
multi_heap_restore_minimum_free_bytes(heap->heap, min_free_bytes_monitoring.values[counter]);
|
|
|
|
|
|
|
|
heap = SLIST_NEXT(heap, next);
|
|
|
|
}
|
|
|
|
|
|
|
|
heap_caps_free(min_free_bytes_monitoring.values);
|
|
|
|
min_free_bytes_monitoring.values = NULL;
|
|
|
|
min_free_bytes_monitoring.counter = 0;
|
|
|
|
MULTI_HEAP_UNLOCK(&min_free_bytes_monitoring.mux);
|
|
|
|
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-05-03 04:03:28 -04:00
|
|
|
void heap_caps_get_info( multi_heap_info_t *info, uint32_t caps )
|
|
|
|
{
|
2023-01-25 02:33:37 -05:00
|
|
|
memset(info, 0, sizeof(multi_heap_info_t));
|
2017-05-03 04:03:28 -04:00
|
|
|
|
2017-08-28 03:12:29 -04:00
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
2017-07-19 03:10:33 -04:00
|
|
|
if (heap_caps_match(heap, caps)) {
|
2017-05-03 04:03:28 -04:00
|
|
|
multi_heap_info_t hinfo;
|
|
|
|
multi_heap_get_info(heap->heap, &hinfo);
|
|
|
|
|
2023-12-19 01:07:04 -05:00
|
|
|
info->total_free_bytes += hinfo.total_free_bytes - MULTI_HEAP_BLOCK_OWNER_SIZE();
|
2023-10-27 09:06:58 -04:00
|
|
|
info->total_allocated_bytes += (hinfo.total_allocated_bytes -
|
2023-12-19 01:07:04 -05:00
|
|
|
hinfo.allocated_blocks * MULTI_HEAP_BLOCK_OWNER_SIZE());
|
2017-05-03 04:03:28 -04:00
|
|
|
info->largest_free_block = MAX(info->largest_free_block,
|
|
|
|
hinfo.largest_free_block);
|
2023-12-19 01:07:04 -05:00
|
|
|
info->largest_free_block -= info->largest_free_block ? MULTI_HEAP_BLOCK_OWNER_SIZE() : 0;
|
|
|
|
info->minimum_free_bytes += hinfo.minimum_free_bytes - MULTI_HEAP_BLOCK_OWNER_SIZE();
|
2017-05-03 04:03:28 -04:00
|
|
|
info->allocated_blocks += hinfo.allocated_blocks;
|
|
|
|
info->free_blocks += hinfo.free_blocks;
|
|
|
|
info->total_blocks += hinfo.total_blocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void heap_caps_print_heap_info( uint32_t caps )
|
|
|
|
{
|
|
|
|
multi_heap_info_t info;
|
2023-03-12 16:45:11 -04:00
|
|
|
printf("Heap summary for capabilities 0x%08"PRIX32":\n", caps);
|
2017-08-28 03:12:29 -04:00
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
2017-07-19 03:10:33 -04:00
|
|
|
if (heap_caps_match(heap, caps)) {
|
2017-05-03 04:03:28 -04:00
|
|
|
multi_heap_get_info(heap->heap, &info);
|
|
|
|
|
|
|
|
printf(" At 0x%08x len %d free %d allocated %d min_free %d\n",
|
|
|
|
heap->start, heap->end - heap->start, info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes);
|
|
|
|
printf(" largest_free_block %d alloc_blocks %d free_blocks %d total_blocks %d\n",
|
|
|
|
info.largest_free_block, info.allocated_blocks,
|
|
|
|
info.free_blocks, info.total_blocks);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf(" Totals:\n");
|
|
|
|
heap_caps_get_info(&info, caps);
|
|
|
|
|
|
|
|
printf(" free %d allocated %d min_free %d largest_free_block %d\n", info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes, info.largest_free_block);
|
|
|
|
}
|
|
|
|
|
2017-05-10 03:17:52 -04:00
|
|
|
bool heap_caps_check_integrity(uint32_t caps, bool print_errors)
|
|
|
|
{
|
|
|
|
bool all_heaps = caps & MALLOC_CAP_INVALID;
|
|
|
|
bool valid = true;
|
|
|
|
|
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
|
|
|
if (heap->heap != NULL
|
|
|
|
&& (all_heaps || (get_all_caps(heap) & caps) == caps)) {
|
|
|
|
valid = multi_heap_check(heap->heap, print_errors) && valid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return valid;
|
|
|
|
}
|
2017-10-01 23:31:20 -04:00
|
|
|
|
|
|
|
bool heap_caps_check_integrity_all(bool print_errors)
|
|
|
|
{
|
|
|
|
return heap_caps_check_integrity(MALLOC_CAP_INVALID, print_errors);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool heap_caps_check_integrity_addr(intptr_t addr, bool print_errors)
|
|
|
|
{
|
|
|
|
heap_t *heap = find_containing_heap((void *)addr);
|
|
|
|
if (heap == NULL) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return multi_heap_check(heap->heap, print_errors);
|
|
|
|
}
|
2017-10-18 04:25:17 -04:00
|
|
|
|
|
|
|
void heap_caps_dump(uint32_t caps)
|
|
|
|
{
|
|
|
|
bool all_heaps = caps & MALLOC_CAP_INVALID;
|
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
|
|
|
if (heap->heap != NULL
|
|
|
|
&& (all_heaps || (get_all_caps(heap) & caps) == caps)) {
|
|
|
|
multi_heap_dump(heap->heap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-16 05:33:30 -04:00
|
|
|
void heap_caps_dump_all(void)
|
2017-10-18 04:25:17 -04:00
|
|
|
{
|
|
|
|
heap_caps_dump(MALLOC_CAP_INVALID);
|
|
|
|
}
|
2019-10-16 04:30:42 -04:00
|
|
|
|
|
|
|
size_t heap_caps_get_allocated_size( void *ptr )
|
|
|
|
{
|
2023-10-27 09:06:58 -04:00
|
|
|
// add the block owner bytes back to ptr before handing over
|
|
|
|
// to multi heap layer.
|
|
|
|
ptr = MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(ptr);
|
2019-10-16 04:30:42 -04:00
|
|
|
heap_t *heap = find_containing_heap(ptr);
|
2022-04-21 02:46:06 -04:00
|
|
|
assert(heap);
|
2019-10-16 04:30:42 -04:00
|
|
|
size_t size = multi_heap_get_allocated_size(heap->heap, ptr);
|
2023-10-04 08:42:00 -04:00
|
|
|
return MULTI_HEAP_REMOVE_BLOCK_OWNER_SIZE(size);
|
2019-10-16 04:30:42 -04:00
|
|
|
}
|
2019-11-12 23:49:57 -05:00
|
|
|
|
2023-10-06 16:51:26 -04:00
|
|
|
static HEAP_IRAM_ATTR void *heap_caps_aligned_alloc_base(size_t alignment, size_t size, uint32_t caps)
|
2019-11-12 23:49:57 -05:00
|
|
|
{
|
|
|
|
for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
|
|
|
|
//Iterate over heaps and check capabilities at this priority
|
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
|
|
|
if (heap->heap == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((heap->caps[prio] & caps) != 0) {
|
|
|
|
//Heap has at least one of the caps requested. If caps has other bits set that this prio
|
|
|
|
//doesn't cover, see if they're available in other prios.
|
|
|
|
if ((get_all_caps(heap) & caps) == caps) {
|
2023-10-27 09:06:58 -04:00
|
|
|
// Just try to alloc, nothing special. Provide the size of the block owner
|
|
|
|
// as an offset to prevent a miscalculation of the alignment.
|
|
|
|
void *ret = multi_heap_aligned_alloc_offs(heap->heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size), alignment, MULTI_HEAP_BLOCK_OWNER_SIZE());
|
2023-02-21 01:37:59 -05:00
|
|
|
if (ret != NULL) {
|
2023-10-04 08:42:00 -04:00
|
|
|
MULTI_HEAP_SET_BLOCK_OWNER(ret);
|
|
|
|
ret = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret);
|
2023-02-21 01:37:59 -05:00
|
|
|
CALL_HOOK(esp_heap_trace_alloc_hook, ret, size, caps);
|
|
|
|
return ret;
|
2019-11-12 23:49:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-27 16:09:15 -04:00
|
|
|
|
2023-02-21 01:37:59 -05:00
|
|
|
//Nothing usable found.
|
|
|
|
return NULL;
|
2019-11-12 23:49:57 -05:00
|
|
|
}
|
|
|
|
|
2023-10-06 16:51:26 -04:00
|
|
|
static HEAP_IRAM_ATTR esp_err_t heap_caps_aligned_check_args(size_t alignment, size_t size, uint32_t caps, const char *funcname)
|
|
|
|
{
|
|
|
|
if (!alignment) {
|
|
|
|
return ESP_FAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Alignment must be a power of two:
|
|
|
|
if ((alignment & (alignment - 1)) != 0) {
|
|
|
|
return ESP_FAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
return ESP_FAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size) > HEAP_SIZE_MAX) {
|
|
|
|
// Avoids int overflow when adding small numbers to size, or
|
|
|
|
// calculating 'end' from start+size, by limiting 'size' to the possible range
|
|
|
|
heap_caps_alloc_failed(size, caps, funcname);
|
|
|
|
return ESP_FAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
HEAP_IRAM_ATTR void *heap_caps_aligned_alloc_default(size_t alignment, size_t size)
|
|
|
|
{
|
|
|
|
void *ret = NULL;
|
|
|
|
|
|
|
|
if (malloc_alwaysinternal_limit == MALLOC_DISABLE_EXTERNAL_ALLOCS) {
|
|
|
|
return heap_caps_aligned_alloc(alignment, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (heap_caps_aligned_check_args(alignment, size, MALLOC_CAP_DEFAULT, __func__) != ESP_OK) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size <= (size_t)malloc_alwaysinternal_limit) {
|
|
|
|
ret = heap_caps_aligned_alloc_base(alignment, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL);
|
|
|
|
} else {
|
|
|
|
ret = heap_caps_aligned_alloc_base(alignment, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret != NULL) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = heap_caps_aligned_alloc_base(alignment, size, MALLOC_CAP_DEFAULT);
|
|
|
|
|
|
|
|
if (ret == NULL) {
|
|
|
|
heap_caps_alloc_failed(size, MALLOC_CAP_DEFAULT, __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
HEAP_IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps)
|
|
|
|
{
|
|
|
|
void *ret = NULL;
|
|
|
|
|
|
|
|
if (heap_caps_aligned_check_args(alignment, size, caps, __func__) != ESP_OK) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = heap_caps_aligned_alloc_base(alignment, size, caps);
|
|
|
|
|
|
|
|
if (ret == NULL) {
|
|
|
|
heap_caps_alloc_failed(size, caps, __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-04-03 09:16:55 -04:00
|
|
|
HEAP_IRAM_ATTR void heap_caps_aligned_free(void *ptr)
|
2020-01-16 13:37:19 -05:00
|
|
|
{
|
|
|
|
heap_caps_free(ptr);
|
|
|
|
}
|
|
|
|
|
2019-12-20 10:48:35 -05:00
|
|
|
void *heap_caps_aligned_calloc(size_t alignment, size_t n, size_t size, uint32_t caps)
|
2020-11-10 02:40:01 -05:00
|
|
|
{
|
2019-12-20 10:48:35 -05:00
|
|
|
size_t size_bytes;
|
|
|
|
if (__builtin_mul_overflow(n, size, &size_bytes)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *ptr = heap_caps_aligned_alloc(alignment,size_bytes, caps);
|
|
|
|
if(ptr != NULL) {
|
|
|
|
memset(ptr, 0, size_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ptr;
|
2020-11-10 02:40:01 -05:00
|
|
|
}
|
2024-02-14 07:03:09 -05:00
|
|
|
|
|
|
|
typedef struct walker_data {
|
|
|
|
void *opaque_ptr;
|
|
|
|
heap_caps_walker_cb_t cb_func;
|
|
|
|
heap_t *heap;
|
|
|
|
} walker_data_t;
|
|
|
|
|
2024-02-27 06:32:15 -05:00
|
|
|
__attribute__((noinline)) static bool heap_caps_walker(void* block_ptr, size_t block_size, int block_used, void *user_data)
|
2024-02-14 07:03:09 -05:00
|
|
|
{
|
|
|
|
walker_data_t *walker_data = (walker_data_t*)user_data;
|
|
|
|
|
|
|
|
walker_heap_into_t heap_info = {
|
|
|
|
(intptr_t)walker_data->heap->start,
|
|
|
|
(intptr_t)walker_data->heap->end
|
|
|
|
};
|
|
|
|
walker_block_info_t block_info = {
|
|
|
|
block_ptr,
|
|
|
|
block_size,
|
|
|
|
(bool)block_used
|
|
|
|
};
|
|
|
|
|
2024-02-27 06:32:15 -05:00
|
|
|
return walker_data->cb_func(heap_info, block_info, walker_data->opaque_ptr);
|
2024-02-14 07:03:09 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void heap_caps_walk(uint32_t caps, heap_caps_walker_cb_t walker_func, void *user_data)
|
|
|
|
{
|
|
|
|
assert(walker_func != NULL);
|
|
|
|
|
|
|
|
bool all_heaps = caps & MALLOC_CAP_INVALID;
|
|
|
|
heap_t *heap;
|
|
|
|
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
|
|
|
if (heap->heap != NULL
|
|
|
|
&& (all_heaps || (get_all_caps(heap) & caps) == caps)) {
|
|
|
|
walker_data_t walker_data = {user_data, walker_func, heap};
|
|
|
|
multi_heap_walk(heap->heap, heap_caps_walker, &walker_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void heap_caps_walk_all(heap_caps_walker_cb_t walker_func, void *user_data)
|
|
|
|
{
|
|
|
|
heap_caps_walk(MALLOC_CAP_INVALID, walker_func, user_data);
|
|
|
|
}
|