feat(heap): Dissociate heap poisoning from task tracking

In order to enable CONFIG_HEAP_TASK_TRACKING, some kind
of poisoning had to be enabled (!HEAP_POISONING_DISABLED).
However since those functionalities don't seem to be related
in any way, this commit decouple them by removing
MULTI_HEAP_BLOCK_OWNER from poison_head_t in multi_heap_poisoning.c
and handling the block ownership in heap_caps.c instead.

Note that handling task tracking in multi_heap.c would necessitate
updating the ROM implementation of multi_heap.c as well. For this
reason, the task tracking feature has to be handled in heap_caps.c.
This commit is contained in:
Guillaume Souchere 2023-10-04 14:42:00 +02:00
parent fac7cb4b78
commit 7492c862af
9 changed files with 34 additions and 41 deletions

View File

@ -63,7 +63,6 @@ menu "Heap memory debugging"
config HEAP_TASK_TRACKING config HEAP_TASK_TRACKING
bool "Enable heap task tracking" bool "Enable heap task tracking"
depends on !HEAP_POISONING_DISABLED
help help
Enables tracking the task responsible for each heap allocation. Enables tracking the task responsible for each heap allocation.

View File

@ -122,7 +122,7 @@ HEAP_IRAM_ATTR static void *heap_caps_malloc_base( size_t size, uint32_t caps)
{ {
void *ret = NULL; void *ret = NULL;
if (size == 0 || size > HEAP_SIZE_MAX ) { if (size == 0 || MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size) > HEAP_SIZE_MAX ) {
// Avoids int overflow when adding small numbers to size, or // Avoids int overflow when adding small numbers to size, or
// calculating 'end' from start+size, by limiting 'size' to the possible range // calculating 'end' from start+size, by limiting 'size' to the possible range
return NULL; return NULL;
@ -164,17 +164,20 @@ HEAP_IRAM_ATTR static void *heap_caps_malloc_base( size_t size, uint32_t caps)
//This is special, insofar that what we're going to get back is a DRAM address. If so, //This is special, insofar that what we're going to get back is a DRAM address. If so,
//we need to 'invert' it (lowest address in DRAM == highest address in IRAM and vice-versa) and //we need to 'invert' it (lowest address in DRAM == highest address in IRAM and vice-versa) and
//add a pointer to the DRAM equivalent before the address we're going to return. //add a pointer to the DRAM equivalent before the address we're going to return.
ret = multi_heap_malloc(heap->heap, size + 4); // int overflow checked above ret = multi_heap_malloc(heap->heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size) + 4); // int overflow checked above
if (ret != NULL) { if (ret != NULL) {
MULTI_HEAP_SET_BLOCK_OWNER(ret);
ret = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret);
uint32_t *iptr = dram_alloc_to_iram_addr(ret, size + 4); // int overflow checked above uint32_t *iptr = dram_alloc_to_iram_addr(ret, size + 4); // int overflow checked above
CALL_HOOK(esp_heap_trace_alloc_hook, iptr, size, caps); CALL_HOOK(esp_heap_trace_alloc_hook, iptr, size, caps);
return iptr; return iptr;
} }
} else { } else {
//Just try to alloc, nothing special. //Just try to alloc, nothing special.
ret = multi_heap_malloc(heap->heap, size); ret = multi_heap_malloc(heap->heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size));
if (ret != NULL) { if (ret != NULL) {
MULTI_HEAP_SET_BLOCK_OWNER(ret);
ret = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret);
CALL_HOOK(esp_heap_trace_alloc_hook, ret, size, caps); CALL_HOOK(esp_heap_trace_alloc_hook, ret, size, caps);
return ret; return ret;
} }
@ -382,10 +385,10 @@ HEAP_IRAM_ATTR void heap_caps_free( void *ptr)
uint32_t *dramAddrPtr = (uint32_t *)ptr; uint32_t *dramAddrPtr = (uint32_t *)ptr;
ptr = (void *)dramAddrPtr[-1]; ptr = (void *)dramAddrPtr[-1];
} }
void *block_owner_ptr = MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(ptr);
heap_t *heap = find_containing_heap(ptr); heap_t *heap = find_containing_heap(block_owner_ptr);
assert(heap != NULL && "free() target pointer is outside heap areas"); assert(heap != NULL && "free() target pointer is outside heap areas");
multi_heap_free(heap->heap, ptr); multi_heap_free(heap->heap, block_owner_ptr);
CALL_HOOK(esp_heap_trace_free_hook, ptr); CALL_HOOK(esp_heap_trace_free_hook, ptr);
} }
@ -409,7 +412,7 @@ HEAP_IRAM_ATTR static void *heap_caps_realloc_base( void *ptr, size_t size, uint
return NULL; return NULL;
} }
if (size > HEAP_SIZE_MAX) { if (MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size) > HEAP_SIZE_MAX) {
return NULL; return NULL;
} }
@ -439,8 +442,10 @@ HEAP_IRAM_ATTR static void *heap_caps_realloc_base( void *ptr, size_t size, uint
if (compatible_caps && !ptr_in_diram_case) { if (compatible_caps && !ptr_in_diram_case) {
// try to reallocate this memory within the same heap // try to reallocate this memory within the same heap
// (which will resize the block if it can) // (which will resize the block if it can)
void *r = multi_heap_realloc(heap->heap, ptr, size); void *r = multi_heap_realloc(heap->heap, ptr, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size));
if (r != NULL) { if (r != NULL) {
MULTI_HEAP_SET_BLOCK_OWNER(r);
r = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(r);
CALL_HOOK(esp_heap_trace_alloc_hook, r, size, caps); CALL_HOOK(esp_heap_trace_alloc_hook, r, size, caps);
return r; return r;
} }
@ -652,7 +657,7 @@ size_t heap_caps_get_allocated_size( void *ptr )
heap_t *heap = find_containing_heap(ptr); heap_t *heap = find_containing_heap(ptr);
assert(heap); assert(heap);
size_t size = multi_heap_get_allocated_size(heap->heap, ptr); size_t size = multi_heap_get_allocated_size(heap->heap, ptr);
return size; return MULTI_HEAP_REMOVE_BLOCK_OWNER_SIZE(size);
} }
HEAP_IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps) HEAP_IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps)
@ -672,7 +677,7 @@ HEAP_IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint
return NULL; return NULL;
} }
if (size > HEAP_SIZE_MAX) { if (MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size) > HEAP_SIZE_MAX) {
// Avoids int overflow when adding small numbers to size, or // Avoids int overflow when adding small numbers to size, or
// calculating 'end' from start+size, by limiting 'size' to the possible range // calculating 'end' from start+size, by limiting 'size' to the possible range
heap_caps_alloc_failed(size, caps, __func__); heap_caps_alloc_failed(size, caps, __func__);
@ -692,8 +697,10 @@ HEAP_IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint
//doesn't cover, see if they're available in other prios. //doesn't cover, see if they're available in other prios.
if ((get_all_caps(heap) & caps) == caps) { if ((get_all_caps(heap) & caps) == caps) {
//Just try to alloc, nothing special. //Just try to alloc, nothing special.
ret = multi_heap_aligned_alloc(heap->heap, size, alignment); ret = multi_heap_aligned_alloc(heap->heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size), alignment);
if (ret != NULL) { if (ret != NULL) {
MULTI_HEAP_SET_BLOCK_OWNER(ret);
ret = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret);
CALL_HOOK(esp_heap_trace_alloc_hook, ret, size, caps); CALL_HOOK(esp_heap_trace_alloc_hook, ret, size, caps);
return ret; return ret;
} }

View File

@ -68,8 +68,7 @@ size_t heap_caps_get_per_task_info(heap_task_info_params_t *params)
} }
void *p = multi_heap_get_block_address(b); // Safe, only arithmetic void *p = multi_heap_get_block_address(b); // Safe, only arithmetic
size_t bsize = multi_heap_get_allocated_size(heap, p); // Validates size_t bsize = multi_heap_get_allocated_size(heap, p); // Validates
TaskHandle_t btask = (TaskHandle_t)multi_heap_get_block_owner(b); TaskHandle_t btask = MULTI_HEAP_GET_BLOCK_OWNER(p);
// Accumulate per-task allocation totals. // Accumulate per-task allocation totals.
if (params->totals) { if (params->totals) {
size_t i; size_t i;

View File

@ -45,7 +45,6 @@ entries:
multi_heap_poisoning:multi_heap_aligned_free (noflash) multi_heap_poisoning:multi_heap_aligned_free (noflash)
multi_heap_poisoning:multi_heap_realloc (noflash) multi_heap_poisoning:multi_heap_realloc (noflash)
multi_heap_poisoning:multi_heap_get_block_address (noflash) multi_heap_poisoning:multi_heap_get_block_address (noflash)
multi_heap_poisoning:multi_heap_get_block_owner (noflash)
multi_heap_poisoning:multi_heap_get_allocated_size (noflash) multi_heap_poisoning:multi_heap_get_allocated_size (noflash)
multi_heap_poisoning:multi_heap_internal_check_block_poisoning (noflash) multi_heap_poisoning:multi_heap_internal_check_block_poisoning (noflash)
multi_heap_poisoning:multi_heap_internal_poison_fill_region (noflash) multi_heap_poisoning:multi_heap_internal_poison_fill_region (noflash)

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -60,12 +60,6 @@ size_t multi_heap_minimum_free_size(multi_heap_handle_t heap)
void *multi_heap_get_block_address(multi_heap_block_handle_t block) void *multi_heap_get_block_address(multi_heap_block_handle_t block)
__attribute__((alias("multi_heap_get_block_address_impl"))); __attribute__((alias("multi_heap_get_block_address_impl")));
void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
{
return NULL;
}
#endif #endif
#define ALIGN(X) ((X) & ~(sizeof(void *)-1)) #define ALIGN(X) ((X) & ~(sizeof(void *)-1))

View File

@ -87,6 +87,3 @@ bool multi_heap_is_free(const multi_heap_block_handle_t block);
/* Get the data address of a heap block */ /* Get the data address of a heap block */
void *multi_heap_get_block_address(multi_heap_block_handle_t block); void *multi_heap_get_block_address(multi_heap_block_handle_t block);
/* Get the owner identification for a heap block */
void *multi_heap_get_block_owner(multi_heap_block_handle_t block);

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -66,14 +66,20 @@ inline static void multi_heap_assert(bool condition, const char *format, int lin
#ifdef CONFIG_HEAP_TASK_TRACKING #ifdef CONFIG_HEAP_TASK_TRACKING
#include <freertos/task.h> #include <freertos/task.h>
#define MULTI_HEAP_BLOCK_OWNER TaskHandle_t task; #define MULTI_HEAP_SET_BLOCK_OWNER(HEAD) *((TaskHandle_t*)HEAD) = xTaskGetCurrentTaskHandle()
#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD) (HEAD)->task = xTaskGetCurrentTaskHandle() #define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) *((TaskHandle_t*)HEAD)
#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) ((HEAD)->task) #define MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(HEAD) ((TaskHandle_t*)(HEAD) + 1)
#define MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(HEAD) ((TaskHandle_t*)(HEAD) - 1)
#define MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(SIZE) ((SIZE) + sizeof(TaskHandle_t))
#define MULTI_HEAP_REMOVE_BLOCK_OWNER_SIZE(SIZE) ((SIZE) - sizeof(TaskHandle_t))
#else #else
#define MULTI_HEAP_BLOCK_OWNER
#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD) #define MULTI_HEAP_SET_BLOCK_OWNER(HEAD)
#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL) #define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL)
#endif #define MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(HEAD) (HEAD)
#define MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(HEAD) (HEAD)
#define MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(SIZE) (SIZE)
#define MULTI_HEAP_REMOVE_BLOCK_OWNER_SIZE(SIZE) (SIZE)
#endif // CONFIG_HEAP_TASK_TRACKING
#else // MULTI_HEAP_FREERTOS #else // MULTI_HEAP_FREERTOS

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -51,7 +51,6 @@
typedef struct { typedef struct {
uint32_t head_canary; uint32_t head_canary;
MULTI_HEAP_BLOCK_OWNER
size_t alloc_size; size_t alloc_size;
} poison_head_t; } poison_head_t;
@ -72,7 +71,6 @@ __attribute__((noinline)) static uint8_t *poison_allocated_region(poison_head_t
poison_tail_t *tail = (poison_tail_t *)(data + alloc_size); poison_tail_t *tail = (poison_tail_t *)(data + alloc_size);
head->alloc_size = alloc_size; head->alloc_size = alloc_size;
head->head_canary = HEAD_CANARY_PATTERN; head->head_canary = HEAD_CANARY_PATTERN;
MULTI_HEAP_SET_BLOCK_OWNER(head);
uint32_t tail_canary = TAIL_CANARY_PATTERN; uint32_t tail_canary = TAIL_CANARY_PATTERN;
if ((intptr_t)tail % sizeof(void *) == 0) { if ((intptr_t)tail % sizeof(void *) == 0) {
@ -351,11 +349,6 @@ void *multi_heap_get_block_address(multi_heap_block_handle_t block)
return head + sizeof(poison_head_t); return head + sizeof(poison_head_t);
} }
void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
{
return MULTI_HEAP_GET_BLOCK_OWNER((poison_head_t*)multi_heap_get_block_address_impl(block));
}
multi_heap_handle_t multi_heap_register(void *start, size_t size) multi_heap_handle_t multi_heap_register(void *start, size_t size)
{ {
#ifdef SLOW #ifdef SLOW

View File

@ -1,2 +1 @@
CONFIG_HEAP_POISONING_LIGHT=y
CONFIG_HEAP_TASK_TRACKING=y CONFIG_HEAP_TASK_TRACKING=y