2021-03-19 05:29:42 -04:00
|
|
|
/*
|
2023-10-04 08:42:00 -04:00
|
|
|
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
|
2021-03-19 05:29:42 -04:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2017-05-08 01:25:30 -04:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <multi_heap.h>
|
|
|
|
#include "multi_heap_internal.h"
|
|
|
|
|
|
|
|
/* Note: Keep platform-specific parts in this header, this source
|
|
|
|
file should depend on libc only */
|
|
|
|
#include "multi_heap_platform.h"
|
|
|
|
|
|
|
|
/* Defines compile-time configuration macros */
|
|
|
|
#include "multi_heap_config.h"
|
|
|
|
|
2022-10-18 07:52:11 -04:00
|
|
|
#if CONFIG_HEAP_TLSF_USE_ROM_IMPL
|
2022-08-03 02:17:06 -04:00
|
|
|
/* Header containing the declaration of tlsf_poison_fill_pfunc_set()
|
2022-08-12 05:28:16 -04:00
|
|
|
* and tlsf_poison_check_pfunc_set() used to register callbacks to
|
|
|
|
* fill and check memory region with given patterns in the heap
|
2022-08-03 02:17:06 -04:00
|
|
|
* components.
|
2022-07-29 05:35:59 -04:00
|
|
|
*/
|
2022-10-18 07:52:11 -04:00
|
|
|
#include "esp_rom_tlsf.h"
|
2022-07-29 05:35:59 -04:00
|
|
|
#endif
|
2021-03-19 05:29:42 -04:00
|
|
|
|
2017-05-08 01:25:30 -04:00
|
|
|
#ifdef MULTI_HEAP_POISONING
|
|
|
|
|
|
|
|
/* Alias MULTI_HEAP_POISONING_SLOW to SLOW for better readabilty */
|
|
|
|
#ifdef SLOW
|
|
|
|
#error "external header has defined SLOW"
|
|
|
|
#endif
|
|
|
|
#ifdef MULTI_HEAP_POISONING_SLOW
|
|
|
|
#define SLOW 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define MALLOC_FILL_PATTERN 0xce
|
|
|
|
#define FREE_FILL_PATTERN 0xfe
|
|
|
|
|
|
|
|
#define HEAD_CANARY_PATTERN 0xABBA1234
|
|
|
|
#define TAIL_CANARY_PATTERN 0xBAAD5678
|
|
|
|
|
2019-11-12 20:24:08 -05:00
|
|
|
|
|
|
|
#define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
|
|
|
|
|
2017-05-08 01:25:30 -04:00
|
|
|
typedef struct {
|
|
|
|
uint32_t head_canary;
|
|
|
|
size_t alloc_size;
|
|
|
|
} poison_head_t;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint32_t tail_canary;
|
|
|
|
} poison_tail_t;
|
|
|
|
|
|
|
|
#define POISON_OVERHEAD (sizeof(poison_head_t) + sizeof(poison_tail_t))
|
|
|
|
|
|
|
|
/* Given a "poisoned" region with pre-data header 'head', and actual data size 'alloc_size', fill in the head and tail
|
|
|
|
region checks.
|
|
|
|
|
|
|
|
Returns the pointer to the actual usable data buffer (ie after 'head')
|
|
|
|
*/
|
2022-11-14 02:21:33 -05:00
|
|
|
__attribute__((noinline)) static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
|
2017-05-08 01:25:30 -04:00
|
|
|
{
|
|
|
|
uint8_t *data = (uint8_t *)(&head[1]); /* start of data ie 'real' allocated buffer */
|
|
|
|
poison_tail_t *tail = (poison_tail_t *)(data + alloc_size);
|
|
|
|
head->alloc_size = alloc_size;
|
|
|
|
head->head_canary = HEAD_CANARY_PATTERN;
|
|
|
|
|
|
|
|
uint32_t tail_canary = TAIL_CANARY_PATTERN;
|
|
|
|
if ((intptr_t)tail % sizeof(void *) == 0) {
|
|
|
|
tail->tail_canary = tail_canary;
|
|
|
|
} else {
|
|
|
|
/* unaligned tail_canary */
|
|
|
|
memcpy(&tail->tail_canary, &tail_canary, sizeof(uint32_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Given a pointer to some allocated data, check the head & tail poison structures (before & after it) that were
|
|
|
|
previously injected by poison_allocated_region().
|
|
|
|
|
|
|
|
Returns a pointer to the poison header structure, or NULL if the poison structures are corrupt.
|
|
|
|
*/
|
2022-11-14 02:21:33 -05:00
|
|
|
__attribute__((noinline)) static poison_head_t *verify_allocated_region(void *data, bool print_errors)
|
2017-05-08 01:25:30 -04:00
|
|
|
{
|
|
|
|
poison_head_t *head = (poison_head_t *)((intptr_t)data - sizeof(poison_head_t));
|
|
|
|
poison_tail_t *tail = (poison_tail_t *)((intptr_t)data + head->alloc_size);
|
|
|
|
|
|
|
|
/* check if the beginning of the data was overwritten */
|
|
|
|
if (head->head_canary != HEAD_CANARY_PATTERN) {
|
|
|
|
if (print_errors) {
|
2017-09-18 02:54:28 -04:00
|
|
|
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Bad head at %p. Expected 0x%08x got 0x%08x\n", &head->head_canary,
|
2017-05-08 01:25:30 -04:00
|
|
|
HEAD_CANARY_PATTERN, head->head_canary);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if the end of the data was overrun */
|
|
|
|
uint32_t canary;
|
|
|
|
if ((intptr_t)tail % sizeof(void *) == 0) {
|
|
|
|
canary = tail->tail_canary;
|
|
|
|
} else {
|
|
|
|
/* tail is unaligned */
|
|
|
|
memcpy(&canary, &tail->tail_canary, sizeof(canary));
|
|
|
|
}
|
|
|
|
if (canary != TAIL_CANARY_PATTERN) {
|
|
|
|
if (print_errors) {
|
2019-09-09 09:56:39 -04:00
|
|
|
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Bad tail at %p. Expected 0x%08x got 0x%08x\n", &tail->tail_canary,
|
2017-05-08 01:25:30 -04:00
|
|
|
TAIL_CANARY_PATTERN, canary);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef SLOW
|
|
|
|
/* Go through a region that should have the specified fill byte 'pattern',
|
|
|
|
verify it.
|
|
|
|
|
|
|
|
if expect_free is true, expect FREE_FILL_PATTERN otherwise MALLOC_FILL_PATTERN.
|
|
|
|
|
|
|
|
if swap_pattern is true, swap patterns in the buffer (ie replace MALLOC_FILL_PATTERN with FREE_FILL_PATTERN, and vice versa.)
|
|
|
|
|
|
|
|
Returns true if verification checks out.
|
2022-11-14 02:21:33 -05:00
|
|
|
|
|
|
|
This function has the attribute noclone to prevent the compiler to create a clone on flash where expect_free is removed (as this
|
|
|
|
function is called only with expect_free == true throughout the component).
|
2017-05-08 01:25:30 -04:00
|
|
|
*/
|
2022-11-14 02:21:33 -05:00
|
|
|
__attribute__((noinline)) NOCLONE_ATTR
|
|
|
|
static bool verify_fill_pattern(void *data, size_t size, const bool print_errors, const bool expect_free, bool swap_pattern)
|
2017-05-08 01:25:30 -04:00
|
|
|
{
|
|
|
|
const uint32_t FREE_FILL_WORD = (FREE_FILL_PATTERN << 24) | (FREE_FILL_PATTERN << 16) | (FREE_FILL_PATTERN << 8) | FREE_FILL_PATTERN;
|
|
|
|
const uint32_t MALLOC_FILL_WORD = (MALLOC_FILL_PATTERN << 24) | (MALLOC_FILL_PATTERN << 16) | (MALLOC_FILL_PATTERN << 8) | MALLOC_FILL_PATTERN;
|
|
|
|
|
|
|
|
const uint32_t EXPECT_WORD = expect_free ? FREE_FILL_WORD : MALLOC_FILL_WORD;
|
|
|
|
const uint32_t REPLACE_WORD = expect_free ? MALLOC_FILL_WORD : FREE_FILL_WORD;
|
|
|
|
bool valid = true;
|
|
|
|
|
|
|
|
/* Use 4-byte operations as much as possible */
|
|
|
|
if ((intptr_t)data % 4 == 0) {
|
|
|
|
uint32_t *p = data;
|
|
|
|
while (size >= 4) {
|
|
|
|
if (*p != EXPECT_WORD) {
|
|
|
|
if (print_errors) {
|
2017-09-18 02:54:28 -04:00
|
|
|
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Invalid data at %p. Expected 0x%08x got 0x%08x\n", p, EXPECT_WORD, *p);
|
2017-05-08 01:25:30 -04:00
|
|
|
}
|
|
|
|
valid = false;
|
2018-07-31 00:51:40 -04:00
|
|
|
#ifndef NDEBUG
|
|
|
|
/* If an assertion is going to fail as soon as we're done verifying the pattern, leave the rest of the
|
|
|
|
buffer contents as-is for better post-mortem analysis
|
|
|
|
*/
|
|
|
|
swap_pattern = false;
|
|
|
|
#endif
|
2017-05-08 01:25:30 -04:00
|
|
|
}
|
|
|
|
if (swap_pattern) {
|
|
|
|
*p = REPLACE_WORD;
|
|
|
|
}
|
|
|
|
p++;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
data = p;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t *p = data;
|
2020-11-16 23:48:35 -05:00
|
|
|
for (size_t i = 0; i < size; i++) {
|
2017-05-08 01:25:30 -04:00
|
|
|
if (p[i] != (uint8_t)EXPECT_WORD) {
|
|
|
|
if (print_errors) {
|
2017-09-18 02:54:28 -04:00
|
|
|
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Invalid data at %p. Expected 0x%02x got 0x%02x\n", p, (uint8_t)EXPECT_WORD, *p);
|
2017-05-08 01:25:30 -04:00
|
|
|
}
|
|
|
|
valid = false;
|
2018-07-31 00:51:40 -04:00
|
|
|
#ifndef NDEBUG
|
|
|
|
swap_pattern = false; // same as above
|
|
|
|
#endif
|
2017-05-08 01:25:30 -04:00
|
|
|
}
|
|
|
|
if (swap_pattern) {
|
|
|
|
p[i] = (uint8_t)REPLACE_WORD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return valid;
|
|
|
|
}
|
2022-07-20 07:59:14 -04:00
|
|
|
|
|
|
|
/*!
|
|
|
|
* @brief Definition of the weak function declared in TLSF repository.
|
|
|
|
* The call of this function assures that the header of an absorbed
|
|
|
|
* block is filled with the correct pattern in case of comprehensive
|
|
|
|
* heap poisoning.
|
|
|
|
*
|
|
|
|
* @param start: pointer to the start of the memory region to fill
|
|
|
|
* @param size: size of the memory region to fill
|
|
|
|
* @param is_free: Indicate if the pattern to use the fill the region should be
|
|
|
|
* an after free or after allocation pattern.
|
|
|
|
*/
|
|
|
|
void block_absorb_post_hook(void *start, size_t size, bool is_free)
|
|
|
|
{
|
|
|
|
multi_heap_internal_poison_fill_region(start, size, is_free);
|
|
|
|
}
|
2017-05-08 01:25:30 -04:00
|
|
|
#endif
|
|
|
|
|
2019-11-12 20:24:08 -05:00
|
|
|
void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment)
|
|
|
|
{
|
2020-03-17 14:58:25 -04:00
|
|
|
if (!size) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-02-28 11:49:29 -05:00
|
|
|
if (size > SIZE_MAX - POISON_OVERHEAD) {
|
2019-11-12 20:24:08 -05:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
multi_heap_internal_lock(heap);
|
2021-01-22 03:50:19 -05:00
|
|
|
poison_head_t *head = multi_heap_aligned_alloc_impl_offs(heap, size + POISON_OVERHEAD,
|
|
|
|
alignment, sizeof(poison_head_t));
|
2019-11-12 23:00:30 -05:00
|
|
|
uint8_t *data = NULL;
|
2019-11-12 20:24:08 -05:00
|
|
|
if (head != NULL) {
|
2020-02-28 11:49:29 -05:00
|
|
|
data = poison_allocated_region(head, size);
|
2019-11-12 20:24:08 -05:00
|
|
|
#ifdef SLOW
|
|
|
|
/* check everything we got back is FREE_FILL_PATTERN & swap for MALLOC_FILL_PATTERN */
|
2019-11-12 23:00:30 -05:00
|
|
|
bool ret = verify_fill_pattern(data, size, true, true, true);
|
|
|
|
assert( ret );
|
2019-11-12 20:24:08 -05:00
|
|
|
#endif
|
2019-11-13 03:37:07 -05:00
|
|
|
} else {
|
|
|
|
multi_heap_internal_unlock(heap);
|
|
|
|
return NULL;
|
2019-11-12 20:24:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
multi_heap_internal_unlock(heap);
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2020-02-28 11:49:29 -05:00
|
|
|
return data;
|
2019-11-12 20:24:08 -05:00
|
|
|
}
|
|
|
|
|
2017-05-08 01:25:30 -04:00
|
|
|
void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
|
|
|
|
{
|
2020-03-17 14:58:25 -04:00
|
|
|
if (!size) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-11-10 02:40:01 -05:00
|
|
|
|
2018-06-18 02:24:49 -04:00
|
|
|
if(size > SIZE_MAX - POISON_OVERHEAD) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-03-17 14:58:25 -04:00
|
|
|
|
2017-10-18 02:54:55 -04:00
|
|
|
multi_heap_internal_lock(heap);
|
2017-05-08 01:25:30 -04:00
|
|
|
poison_head_t *head = multi_heap_malloc_impl(heap, size + POISON_OVERHEAD);
|
2017-10-18 02:54:55 -04:00
|
|
|
uint8_t *data = NULL;
|
|
|
|
if (head != NULL) {
|
|
|
|
data = poison_allocated_region(head, size);
|
2017-05-08 01:25:30 -04:00
|
|
|
#ifdef SLOW
|
2017-10-18 02:54:55 -04:00
|
|
|
/* check everything we got back is FREE_FILL_PATTERN & swap for MALLOC_FILL_PATTERN */
|
2018-06-22 06:17:55 -04:00
|
|
|
bool ret = verify_fill_pattern(data, size, true, true, true);
|
|
|
|
assert( ret );
|
2017-05-08 01:25:30 -04:00
|
|
|
#endif
|
2017-10-18 02:54:55 -04:00
|
|
|
}
|
2017-05-08 01:25:30 -04:00
|
|
|
|
2017-10-18 02:54:55 -04:00
|
|
|
multi_heap_internal_unlock(heap);
|
2017-05-08 01:25:30 -04:00
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2022-11-14 02:21:33 -05:00
|
|
|
/* This function has the noclone attribute to prevent the compiler to optimize out the
|
|
|
|
* check for p == NULL and create a clone function placed in flash. */
|
|
|
|
NOCLONE_ATTR void multi_heap_free(multi_heap_handle_t heap, void *p)
|
2017-05-08 01:25:30 -04:00
|
|
|
{
|
|
|
|
if (p == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
2017-10-18 02:54:55 -04:00
|
|
|
multi_heap_internal_lock(heap);
|
|
|
|
|
2017-05-08 01:25:30 -04:00
|
|
|
poison_head_t *head = verify_allocated_region(p, true);
|
|
|
|
assert(head != NULL);
|
|
|
|
|
|
|
|
#ifdef SLOW
|
|
|
|
/* replace everything with FREE_FILL_PATTERN, including the poison head/tail */
|
|
|
|
memset(head, FREE_FILL_PATTERN,
|
|
|
|
head->alloc_size + POISON_OVERHEAD);
|
|
|
|
#endif
|
|
|
|
multi_heap_free_impl(heap, head);
|
2017-10-18 02:54:55 -04:00
|
|
|
|
|
|
|
multi_heap_internal_unlock(heap);
|
2017-05-08 01:25:30 -04:00
|
|
|
}
|
|
|
|
|
2020-01-16 13:37:19 -05:00
|
|
|
void multi_heap_aligned_free(multi_heap_handle_t heap, void *p)
|
|
|
|
{
|
|
|
|
multi_heap_free(heap, p);
|
|
|
|
}
|
|
|
|
|
2017-05-08 01:25:30 -04:00
|
|
|
void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size)
|
|
|
|
{
|
|
|
|
poison_head_t *head = NULL;
|
2017-10-18 02:54:55 -04:00
|
|
|
poison_head_t *new_head;
|
|
|
|
void *result = NULL;
|
2017-05-08 01:25:30 -04:00
|
|
|
|
2018-06-18 02:24:49 -04:00
|
|
|
if(size > SIZE_MAX - POISON_OVERHEAD) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-05-08 01:25:30 -04:00
|
|
|
if (p == NULL) {
|
|
|
|
return multi_heap_malloc(heap, size);
|
|
|
|
}
|
|
|
|
if (size == 0) {
|
|
|
|
multi_heap_free(heap, p);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* p != NULL, size != 0 */
|
|
|
|
head = verify_allocated_region(p, true);
|
|
|
|
assert(head != NULL);
|
|
|
|
|
2017-10-18 02:54:55 -04:00
|
|
|
multi_heap_internal_lock(heap);
|
|
|
|
|
2017-05-08 01:25:30 -04:00
|
|
|
#ifndef SLOW
|
2017-10-18 02:54:55 -04:00
|
|
|
new_head = multi_heap_realloc_impl(heap, head, size + POISON_OVERHEAD);
|
|
|
|
if (new_head != NULL) {
|
|
|
|
/* For "fast" poisoning, we only overwrite the head/tail of the new block so it's safe
|
|
|
|
to poison, so no problem doing this even if realloc resized in place.
|
|
|
|
*/
|
|
|
|
result = poison_allocated_region(new_head, size);
|
2017-05-08 01:25:30 -04:00
|
|
|
}
|
|
|
|
#else // SLOW
|
2017-10-18 02:54:55 -04:00
|
|
|
/* When slow poisoning is enabled, it becomes very fiddly to try and correctly fill memory when resizing in place
|
2017-05-08 01:25:30 -04:00
|
|
|
(where the buffer may be moved (including to an overlapping address with the old buffer), grown, or shrunk in
|
|
|
|
place.)
|
|
|
|
|
|
|
|
For now we just malloc a new buffer, copy, and free. :|
|
2018-03-22 10:58:20 -04:00
|
|
|
|
|
|
|
Note: If this ever changes, multi_heap defrag realloc test should be enabled.
|
2017-05-08 01:25:30 -04:00
|
|
|
*/
|
|
|
|
size_t orig_alloc_size = head->alloc_size;
|
|
|
|
|
2017-10-18 02:54:55 -04:00
|
|
|
new_head = multi_heap_malloc_impl(heap, size + POISON_OVERHEAD);
|
|
|
|
if (new_head != NULL) {
|
|
|
|
result = poison_allocated_region(new_head, size);
|
2020-11-10 02:40:01 -05:00
|
|
|
memcpy(result, p, MIN(size, orig_alloc_size));
|
|
|
|
multi_heap_free(heap, p);
|
2017-05-08 01:25:30 -04:00
|
|
|
}
|
|
|
|
#endif
|
2017-10-18 02:54:55 -04:00
|
|
|
|
|
|
|
multi_heap_internal_unlock(heap);
|
|
|
|
|
|
|
|
return result;
|
2017-05-08 01:25:30 -04:00
|
|
|
}
|
|
|
|
|
2018-01-10 04:14:47 -05:00
|
|
|
void *multi_heap_get_block_address(multi_heap_block_handle_t block)
|
|
|
|
{
|
|
|
|
char *head = multi_heap_get_block_address_impl(block);
|
|
|
|
return head + sizeof(poison_head_t);
|
|
|
|
}
|
|
|
|
|
2017-05-08 01:25:30 -04:00
|
|
|
multi_heap_handle_t multi_heap_register(void *start, size_t size)
|
|
|
|
{
|
2020-04-02 01:32:45 -04:00
|
|
|
#ifdef SLOW
|
2017-05-08 01:25:30 -04:00
|
|
|
if (start != NULL) {
|
|
|
|
memset(start, FREE_FILL_PATTERN, size);
|
|
|
|
}
|
2021-03-19 05:29:42 -04:00
|
|
|
#endif
|
2022-08-29 07:46:49 -04:00
|
|
|
#if CONFIG_HEAP_TLSF_USE_ROM_IMPL
|
2021-03-19 05:29:42 -04:00
|
|
|
tlsf_poison_fill_pfunc_set(multi_heap_internal_poison_fill_region);
|
2022-08-12 05:28:16 -04:00
|
|
|
tlsf_poison_check_pfunc_set(multi_heap_internal_check_block_poisoning);
|
2022-08-29 07:46:49 -04:00
|
|
|
#endif // CONFIG_HEAP_TLSF_USE_ROM_IMPL
|
2017-05-08 01:25:30 -04:00
|
|
|
return multi_heap_register_impl(start, size);
|
|
|
|
}
|
|
|
|
|
2023-02-21 01:37:59 -05:00
|
|
|
static inline __attribute__((always_inline)) void subtract_poison_overhead(size_t *arg) {
|
2017-05-08 01:25:30 -04:00
|
|
|
if (*arg > POISON_OVERHEAD) {
|
|
|
|
*arg -= POISON_OVERHEAD;
|
|
|
|
} else {
|
|
|
|
*arg = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-16 13:37:19 -05:00
|
|
|
size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p)
|
|
|
|
{
|
|
|
|
poison_head_t *head = verify_allocated_region(p, true);
|
|
|
|
assert(head != NULL);
|
|
|
|
size_t result = multi_heap_get_allocated_size_impl(heap, head);
|
2023-01-31 03:41:56 -05:00
|
|
|
subtract_poison_overhead(&result);
|
2020-01-16 13:37:19 -05:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-05-08 01:25:30 -04:00
|
|
|
void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info)
|
|
|
|
{
|
|
|
|
multi_heap_get_info_impl(heap, info);
|
|
|
|
/* don't count the heap poison head & tail overhead in the allocated bytes size */
|
|
|
|
info->total_allocated_bytes -= info->allocated_blocks * POISON_OVERHEAD;
|
|
|
|
/* trim largest_free_block to account for poison overhead */
|
|
|
|
subtract_poison_overhead(&info->largest_free_block);
|
|
|
|
/* similarly, trim total_free_bytes so there's no suggestion that
|
|
|
|
a block this big may be available. */
|
|
|
|
subtract_poison_overhead(&info->total_free_bytes);
|
|
|
|
subtract_poison_overhead(&info->minimum_free_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t multi_heap_free_size(multi_heap_handle_t heap)
|
|
|
|
{
|
|
|
|
size_t r = multi_heap_free_size_impl(heap);
|
|
|
|
subtract_poison_overhead(&r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t multi_heap_minimum_free_size(multi_heap_handle_t heap)
|
|
|
|
{
|
|
|
|
size_t r = multi_heap_minimum_free_size_impl(heap);
|
|
|
|
subtract_poison_overhead(&r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Internal hooks used by multi_heap to manage poisoning, while keeping some modularity */
|
|
|
|
|
|
|
|
bool multi_heap_internal_check_block_poisoning(void *start, size_t size, bool is_free, bool print_errors)
|
|
|
|
{
|
|
|
|
if (is_free) {
|
|
|
|
#ifdef SLOW
|
|
|
|
return verify_fill_pattern(start, size, print_errors, true, false);
|
|
|
|
#else
|
|
|
|
return true; /* can only verify empty blocks in SLOW mode */
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
void *data = (void *)((intptr_t)start + sizeof(poison_head_t));
|
|
|
|
poison_head_t *head = verify_allocated_region(data, print_errors);
|
|
|
|
if (head != NULL && head->alloc_size > size - POISON_OVERHEAD) {
|
|
|
|
/* block can be bigger than alloc_size, for reasons of alignment & fragmentation,
|
|
|
|
but block can never be smaller than head->alloc_size... */
|
|
|
|
if (print_errors) {
|
2017-09-18 02:54:28 -04:00
|
|
|
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Size at %p expected <=0x%08x got 0x%08x\n", &head->alloc_size,
|
2017-05-08 01:25:30 -04:00
|
|
|
size - POISON_OVERHEAD, head->alloc_size);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return head != NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void multi_heap_internal_poison_fill_region(void *start, size_t size, bool is_free)
|
|
|
|
{
|
|
|
|
memset(start, is_free ? FREE_FILL_PATTERN : MALLOC_FILL_PATTERN, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else // !MULTI_HEAP_POISONING
|
|
|
|
|
|
|
|
#ifdef MULTI_HEAP_POISONING_SLOW
|
|
|
|
#error "MULTI_HEAP_POISONING_SLOW requires MULTI_HEAP_POISONING"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif // MULTI_HEAP_POISONING
|