heap: Add trace hash map config

- and place all added functions and vairables related to the hashmap in RAM
when the config is enabled only.

- add number of hash map entry as a Kconfig value and remove the hash map init function.
This prevents the user from allocating the hash map in flash and pass the pointer to the
init function (as the heap trace manipulate the hash map from functions placed in IRAM).

- add max linear value to the KConfig to make it configurable by the users.

- protect access to static variable "tracing"

- remove unecessary field in heap_trace_hashmap_entry_t
This commit is contained in:
Guillaume Souchere 2023-03-01 12:42:14 +01:00
parent b699033ab3
commit bdfc348ab3
3 changed files with 152 additions and 149 deletions

View File

@ -70,6 +70,41 @@ menu "Heap memory debugging"
This function depends on heap poisoning being enabled and adds four more bytes of overhead for each block
allocated.
config HEAP_TRACE_HASH_MAP
bool "Use hash map mechanism to access heap trace records"
depends on HEAP_TRACING_STANDALONE
default n
help
Enable this flag to use a hash map to increase performance in handling
heap trace records.
Keeping this as "n" in your project will save RAM and heap memory but will lower
the performance of the heap trace in adding, retrieving and removing trace records.
Making this as "y" in your project, you will decrease free RAM and heap memory but,
the heap trace performances in adding retrieving and removing trace records will be
enhanced.
config HEAP_TRACE_HASH_MAP_SIZE
int "The number of entries in the hash map"
depends on HEAP_TRACE_HASH_MAP
range 10 10000
default 100
help
Defines the number of entries in the heap trace hashmap. The bigger this number is,
the bigger the hash map will be in the memory and the lower the miss probability will
be when handling heap trace records.
config HEAP_TRACE_HASH_MAP_MAX_LINEAR
int "The number of iterations when searching for an entry in the hash map."
depends on HEAP_TRACE_HASH_MAP
range 1 HEAP_TRACE_HASH_MAP_SIZE
default 10
help
Defines the number of iterations when searching for an entry in the hashmap. The closer
this number is from the number of entries in the hashmap, the lower the miss chances are
but the slower the hashmap mechanism is. The lower this number is, the higher the miss
probability is but the faster the hashmap mechanism is.
config HEAP_ABORT_WHEN_ALLOCATION_FAILS
bool "Abort if memory allocation fails"
default n

View File

@ -5,6 +5,7 @@
*/
#include <string.h>
#include <sdkconfig.h>
#include <inttypes.h>
#define HEAP_TRACE_SRCFILE /* don't warn on inclusion here */
#include "esp_heap_trace.h"
@ -56,15 +57,6 @@ typedef struct {
bool has_overflowed;
} records_t;
typedef struct {
/* Buffer used for hashmap entries */
heap_trace_hashmap_entry_t *buffer;
/* length of 'buffer' */
size_t count;
} hashmap_t;
// Forward Defines
static void heap_trace_dump_base(bool internal_ram, bool psram);
static void record_deep_copy(heap_trace_record_t *r_dest, const heap_trace_record_t *r_src);
@ -73,30 +65,82 @@ static void list_remove(heap_trace_record_t *r_remove);
static heap_trace_record_t* list_add(const heap_trace_record_t *r_append);
static heap_trace_record_t* list_pop_unused(void);
static heap_trace_record_t* list_find_address_reverse(void *p);
static void map_add(const heap_trace_record_t *r_add);
static void map_remove(void *p);
static heap_trace_record_t* map_find(void *p);
/* The actual records. */
static records_t records;
/* The hashmap */
static hashmap_t map;
/* Actual number of allocations logged */
static size_t total_allocations;
/* Actual number of frees logged */
static size_t total_frees;
/* track hits and misses */
static size_t total_hashmap_hits;
static size_t total_hashmap_miss;
/* Used to speed up heap_trace_get */
static heap_trace_record_t* r_get;
static size_t r_get_idx;
#if CONFIG_HEAP_TRACE_HASH_MAP
typedef struct heap_trace_hashmap_entry_t {
heap_trace_record_t* record; // associated record
} heap_trace_hashmap_entry_t;
static heap_trace_hashmap_entry_t hash_map[(size_t)CONFIG_HEAP_TRACE_HASH_MAP_SIZE]; // Buffer used for hashmap entries
static size_t total_hashmap_hits;
static size_t total_hashmap_miss;
static size_t hash_idx(void* p)
{
static const uint32_t fnv_prime = 16777619UL; // expression 224 + 28 + 0x93 (32 bits size)
return ((uint32_t)p * fnv_prime) % (uint32_t)CONFIG_HEAP_TRACE_HASH_MAP_SIZE;
}
static void map_add(const heap_trace_record_t *r_add)
{
size_t idx = hash_idx(r_add->address);
// linear search: find empty location
for(size_t i = 0; i < CONFIG_HEAP_TRACE_HASH_MAP_MAX_LINEAR; i++) {
size_t n = (i + idx) % (size_t)CONFIG_HEAP_TRACE_HASH_MAP_SIZE;
if (hash_map[n].record == NULL) {
hash_map[n].record = (heap_trace_record_t*) r_add;
break;
}
}
}
static void map_remove(void *p)
{
size_t idx = hash_idx(p);
// linear search: find matching address
for(size_t i = 0; i < CONFIG_HEAP_TRACE_HASH_MAP_MAX_LINEAR; i++) {
size_t n = (i + idx) % (size_t)CONFIG_HEAP_TRACE_HASH_MAP_SIZE;
if (hash_map[n].record != NULL && hash_map[n].record->address == p) {
hash_map[n].record = NULL;
break;
}
}
}
static heap_trace_record_t* map_find(void *p)
{
size_t idx = hash_idx(p);
// linear search: find matching address
for(size_t i = 0; i < CONFIG_HEAP_TRACE_HASH_MAP_MAX_LINEAR; i++) {
size_t n = (i + idx) % (size_t)CONFIG_HEAP_TRACE_HASH_MAP_SIZE;
if (hash_map[n].record != NULL && hash_map[n].record->address == p) {
total_hashmap_hits++;
return hash_map[n].record;
}
}
total_hashmap_miss++;
return NULL;
}
#endif // CONFIG_HEAP_TRACE_HASH_MAP
esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records)
{
if (tracing) {
@ -113,16 +157,12 @@ esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t
return ESP_OK;
}
esp_err_t heap_trace_set_hashmap(heap_trace_hashmap_entry_t *entries_buffer, size_t num_entries)
static esp_err_t set_tracing(bool enable)
{
if (tracing) {
if (tracing == enable) {
return ESP_ERR_INVALID_STATE;
}
map.buffer = entries_buffer;
map.count = num_entries;
tracing = enable;
return ESP_OK;
}
@ -134,14 +174,17 @@ esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
portENTER_CRITICAL(&trace_mux);
tracing = false;
set_tracing(false);
mode = mode_param;
// clear buffers
memset(records.buffer, 0, sizeof(heap_trace_record_t) * records.capacity);
if (map.buffer) {
memset(map.buffer, 0, sizeof(heap_trace_hashmap_entry_t) * map.count);
}
#if CONFIG_HEAP_TRACE_HASH_MAP
memset(hash_map, 0, sizeof(hash_map));
total_hashmap_hits = 0;
total_hashmap_miss = 0;
#endif // CONFIG_HEAP_TRACE_HASH_MAP
records.count = 0;
records.has_overflowed = false;
@ -149,31 +192,27 @@ esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
total_allocations = 0;
total_frees = 0;
total_hashmap_hits = 0;
total_hashmap_miss = 0;
heap_trace_resume();
const esp_err_t ret_val = set_tracing(true);
portEXIT_CRITICAL(&trace_mux);
return ESP_OK;
}
static esp_err_t set_tracing(bool enable)
{
if (tracing == enable) {
return ESP_ERR_INVALID_STATE;
}
tracing = enable;
return ESP_OK;
return ret_val;
}
esp_err_t heap_trace_stop(void)
{
return set_tracing(false);
portENTER_CRITICAL(&trace_mux);
const esp_err_t ret_val = set_tracing(false);
portEXIT_CRITICAL(&trace_mux);
return ret_val;
}
esp_err_t heap_trace_resume(void)
{
return set_tracing(true);
portENTER_CRITICAL(&trace_mux);
const esp_err_t ret_val = set_tracing(true);
portEXIT_CRITICAL(&trace_mux);
return ret_val;
}
size_t heap_trace_get_count(void)
@ -244,8 +283,10 @@ esp_err_t heap_trace_summary(heap_trace_summary_t *summary)
summary->capacity = records.capacity;
summary->high_water_mark = records.high_water_mark;
summary->has_overflowed = records.has_overflowed;
#if CONFIG_HEAP_TRACE_HASH_MAP
summary->total_hashmap_hits = total_hashmap_hits;
summary->total_hashmap_miss = total_hashmap_miss;
#endif // CONFIG_HEAP_TRACE_HASH_MAP
portEXIT_CRITICAL(&trace_mux);
return ESP_OK;
@ -267,7 +308,7 @@ static void heap_trace_dump_base(bool internal_ram, bool psram)
size_t delta_allocs = 0;
size_t start_count = records.count;
esp_rom_printf("====== Heap Trace: %u records (%u capacity) ======\n",
esp_rom_printf("====== Heap Trace: %"PRIu32" records (%"PRIu32" capacity) ======\n",
records.count, records.capacity);
// Iterate through through the linked list
@ -325,21 +366,23 @@ static void heap_trace_dump_base(bool internal_ram, bool psram)
if (mode == HEAP_TRACE_ALL) {
esp_rom_printf("Mode: Heap Trace All\n");
esp_rom_printf("%u bytes alive in trace (%u/%u allocations)\n",
esp_rom_printf("%"PRIu32" bytes alive in trace (%"PRIu32"/%"PRIu32" allocations)\n",
delta_size, delta_allocs, heap_trace_get_count());
} else {
esp_rom_printf("Mode: Heap Trace Leaks\n");
esp_rom_printf("%u bytes 'leaked' in trace (%u allocations)\n", delta_size, delta_allocs);
esp_rom_printf("%"PRIu32" bytes 'leaked' in trace (%"PRIu32" allocations)\n", delta_size, delta_allocs);
}
esp_rom_printf("records: %u (%u capacity, %u high water mark)\n",
esp_rom_printf("records: %"PRIu32" (%"PRIu32" capacity, %"PRIu32" high water mark)\n",
records.count, records.capacity, records.high_water_mark);
esp_rom_printf("hashmap: %u capacity (%u hits, %u misses)\n",
map.count, total_hashmap_hits, total_hashmap_miss);
#if CONFIG_HEAP_TRACE_HASH_MAP
esp_rom_printf("hashmap: %"PRIu32" capacity (%"PRIu32" hits, %"PRIu32" misses)\n",
(size_t)CONFIG_HEAP_TRACE_HASH_MAP_SIZE, total_hashmap_hits, total_hashmap_miss);
#endif // CONFIG_HEAP_TRACE_HASH_MAP
esp_rom_printf("total allocations: %u\n", total_allocations);
esp_rom_printf("total frees: %u\n", total_frees);
esp_rom_printf("total allocations: %"PRIu32"\n", total_allocations);
esp_rom_printf("total frees: %"PRIu32"\n", total_frees);
if (start_count != records.count) { // only a problem if trace isn't stopped before dumping
esp_rom_printf("(NB: New entries were traced while dumping, so trace dump may have duplicate entries.)\n");
@ -372,16 +415,20 @@ static IRAM_ATTR void record_allocation(const heap_trace_record_t *r_allocation)
heap_trace_record_t *r_first = TAILQ_FIRST(&records.list);
list_remove(r_first);
#if CONFIG_HEAP_TRACE_HASH_MAP
map_remove(r_first->address);
}
// push onto end of list
heap_trace_record_t *r_dest = list_add(r_allocation);
// add to hashmap
if (r_dest) {
map_add(r_dest);
}
#else
}
// push onto end of list
list_add(r_allocation);
#endif // CONFIG_HEAP_TRACE_HASH_MAP
total_allocations++;
}
@ -404,11 +451,19 @@ static IRAM_ATTR void record_free(void *p, void **callers)
}
portENTER_CRITICAL(&trace_mux);
// return directly if records.count == 0. In case of hashmap being used
// this prevents the hashmap to return an item that is no longer in the
// records list.
if (records.count == 0) {
portEXIT_CRITICAL(&trace_mux);
return;
}
if (tracing) {
total_frees++;
#if CONFIG_HEAP_TRACE_HASH_MAP
// check the hashmap
heap_trace_record_t *r_found = map_find(p);
@ -416,7 +471,9 @@ static IRAM_ATTR void record_free(void *p, void **callers)
if(!r_found) {
r_found = list_find_address_reverse(p);
}
#else
heap_trace_record_t *r_found = list_find_address_reverse(p);
#endif // CONFIG_HEAP_TRACE_HASH_MAP
// search backwards for the allocation record matching this fre
if (r_found) {
@ -430,7 +487,9 @@ static IRAM_ATTR void record_free(void *p, void **callers)
// Leak trace mode, once an allocation is freed
// we remove it from the list & hashmap
list_remove(r_found);
#if CONFIG_HEAP_TRACE_HASH_MAP
map_remove(p);
#endif // CONFIG_HEAP_TRACE_HASH_MAP
}
}
}
@ -541,10 +600,6 @@ static IRAM_ATTR heap_trace_record_t* list_add(const heap_trace_record_t *r_appe
// search records.list backwards for the allocation record matching this address
static IRAM_ATTR heap_trace_record_t* list_find_address_reverse(void *p)
{
if (records.count == 0) {
return NULL;
}
heap_trace_record_t *r_found = NULL;
// Perf: We search backwards because new allocations are appended
@ -560,74 +615,6 @@ static IRAM_ATTR heap_trace_record_t* list_find_address_reverse(void *p)
return r_found;
}
#define MAXLINEAR 100
static size_t hash_idx(void* p)
{
const uint64_t prime = 11020851777194292899ULL;
uint32_t n = (uint32_t) p;
return (n * prime) % map.count;
}
static void map_add(const heap_trace_record_t *r_add)
{
if (map.buffer == NULL || map.count == 0) {
return;
}
size_t idx = hash_idx(r_add->address);
// linear search: find empty location
for(size_t i = 0; i < MAXLINEAR; i++) {
size_t n = (i + idx) % map.count;
if (map.buffer[n].address == NULL) {
map.buffer[n].address = r_add->address;
map.buffer[n].record = (heap_trace_record_t*) r_add;
break;
}
}
}
static void map_remove(void *p)
{
if (map.buffer == NULL || map.count == 0) {
return;
}
size_t idx = hash_idx(p);
// linear search: find matching address
for(size_t i = 0; i < MAXLINEAR; i++) {
size_t n = (i + idx) % map.count;
if (map.buffer[n].address == p) {
map.buffer[n].address = NULL;
map.buffer[n].record = NULL;
break;
}
}
}
static heap_trace_record_t* map_find(void *p)
{
if (map.buffer == NULL || map.count == 0) {
return NULL;
}
size_t idx = hash_idx(p);
// linear search: find matching address
for(size_t i = 0; i < MAXLINEAR; i++) {
size_t n = (i + idx) % map.count;
if (map.buffer[n].address == p) {
total_hashmap_hits++;
return map.buffer[n].record;
}
}
total_hashmap_miss++;
return NULL;
}
#include "heap_trace.inc"
#endif // CONFIG_HEAP_TRACING_STANDALONE

View File

@ -41,11 +41,6 @@ typedef struct heap_trace_record_t {
#endif // CONFIG_HEAP_TRACING_STANDALONE
} heap_trace_record_t;
typedef struct heap_trace_hashmap_entry_t {
void* address; ///< ptr returned by malloc/calloc/realloc
heap_trace_record_t* record; ///< associated record
} heap_trace_hashmap_entry_t;
/**
* @brief Stores information about the result of a heap trace.
*/
@ -57,8 +52,10 @@ typedef struct {
size_t capacity; ///< The capacity of the internal buffer
size_t high_water_mark; ///< The maximum value that 'count' got to
size_t has_overflowed; ///< True if the internal buffer overflowed at some point
#if CONFIG_HEAP_TRACE_HASH_MAP
size_t total_hashmap_hits; ///< If hashmap is used, the total number of hits
size_t total_hashmap_miss; ///< If hashmap is used, the total number of misses (possibly due to overflow)
#endif
} heap_trace_summary_t;
/**
@ -78,22 +75,6 @@ typedef struct {
*/
esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records);
/**
* @brief Provide a hashmap to greatly improve the performance of standalone heap trace leaks mode.
*
* This function must be called before heap_trace_start.
*
* @param entries_buffer Provide a buffer to use for heap trace hashmap.
* Note: External RAM is allowed, but it prevents recording allocations made from ISR's.
* @param num_entries Size of the entries_buffer. Should be greater than num_records, preferably 2-4x as large.
* @return
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
* - ESP_ERR_INVALID_STATE Heap tracing is currently in progress.
* - ESP_OK Heap tracing initialised successfully.
*/
esp_err_t heap_trace_set_hashmap(heap_trace_hashmap_entry_t *entries_buffer, size_t num_entries);
/**
* @brief Initialise heap tracing in host-based mode.
*