heap: update the calculation of fl index max and use bitfield in control_t

The calculation of fl index max is changed to always be the smallest
number that includes the size of the registered memory.

The control_construct() function now checks for minimum size as the control structure
parameters are calculated.

There is no longer a minimum configuration for fl index max so the tlsf_config
enum is striped down to remove unecessary compile time values.

the tlsf_size() function will fail if no tlsf pointer is passed as parameter since there
is no way to calculate a default tlsf size anymore.

bitfields are now used in control_t when possible which reduces the size of the structure
from 56 bytes to 36 bytes.
This commit is contained in:
Guillaume Souchere 2022-10-13 10:02:29 +02:00 committed by BOT
parent 9f6b549dea
commit 48b0000e22
5 changed files with 97 additions and 64 deletions

View File

@ -142,7 +142,7 @@ static inline __attribute__((__always_inline__)) void mapping_insert(control_t *
{
/* Store small blocks in first list. */
fl = 0;
sl = tlsf_cast(int, size) >> 2;
sl = tlsf_cast(int, size) / (control->small_block_size / control->sl_index_count);
}
else
{
@ -459,16 +459,19 @@ static inline __attribute__((__always_inline__)) void* block_prepare_used(contro
}
/* Clear structure and point all empty lists at the null block. */
static void control_construct(control_t* control, size_t bytes)
static control_t* control_construct(control_t* control, size_t bytes)
{
int i, j;
// check that the requested size can at least hold the control_t. This will allow us
// to fill in the field of control_t necessary to determine the final size of
// the metadata overhead and check that the requested size can hold
// this data and at least a block of minimum size
if (bytes < sizeof(control_t))
{
return NULL;
}
control->block_null.next_free = &control->block_null;
control->block_null.prev_free = &control->block_null;
/* find the closest ^2 for first layer */
i = (bytes - 1) / (16 * 1024);
control->fl_index_max = FL_INDEX_MAX_MIN + sizeof(i) * 8 - __builtin_clz(i);
/* Find the closest power of two for first layer */
control->fl_index_max = 32 - __builtin_clz(bytes);
/* adapt second layer to the pool */
if (bytes <= 16 * 1024) control->sl_index_count_log2 = 3;
@ -479,11 +482,26 @@ static void control_construct(control_t* control, size_t bytes)
control->sl_index_count = 1 << control->sl_index_count_log2;
control->fl_index_count = control->fl_index_max - control->fl_index_shift + 1;
control->small_block_size = 1 << control->fl_index_shift;
// the total size fo the metadata overhead is the size of the control_t
// added to the size of the sl_bitmaps and the size of blocks
control->size = sizeof(control_t) + (sizeof(*control->sl_bitmap) * control->fl_index_count) +
(sizeof(*control->blocks) * (control->fl_index_count * control->sl_index_count));
// check that the requested size can hold the whole control structure and
// a small block at least
if (bytes < control->size + block_size_min)
{
return NULL;
}
control->block_null.next_free = &control->block_null;
control->block_null.prev_free = &control->block_null;
control->fl_bitmap = 0;
control->sl_bitmap = align_ptr(control + 1, sizeof(*control->sl_bitmap));
control->blocks = align_ptr(control->sl_bitmap + control->fl_index_count, sizeof(*control->blocks));
control->size = (void*) (control->blocks + control->sl_index_count * control->fl_index_count) - (void*) control;
/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
tlsf_assert(sizeof(unsigned int) * CHAR_BIT >= control->sl_index_count && "CHAR_BIT less than sl_index_count");
@ -491,14 +509,16 @@ static void control_construct(control_t* control, size_t bytes)
/* Ensure we've properly tuned our sizes. */
tlsf_assert(ALIGN_SIZE == control->small_block_size / control->sl_index_count && "ALIGN_SIZE does not match");
for (i = 0; i < control->fl_index_count; ++i)
for (int i = 0; i < control->fl_index_count; ++i)
{
control->sl_bitmap[i] = 0;
for (j = 0; j < control->sl_index_count; ++j)
for (int j = 0; j < control->sl_index_count; ++j)
{
control->blocks[i*control->sl_index_count + j] = &control->block_null;
}
}
return control;
}
/*
@ -630,13 +650,13 @@ int tlsf_check_pool(pool_t pool)
size_t tlsf_fit_size(tlsf_t tlsf, size_t size)
{
/* because it's GoodFit, allocable size is one range lower */
if (size)
if (size && tlsf != NULL)
{
size_t sl_interval;
control_t* control = tlsf_cast(control_t*, tlsf);
sl_interval = (1 << ((sizeof(size_t) * 8 - 1) - __builtin_clz(size))) / control->sl_index_count;
return size & ~(sl_interval - 1);
}
sl_interval = (1 << (32 - __builtin_clz(size) - 1)) / control->sl_index_count;
return size & ~(sl_interval - 1);
}
return 0;
}
@ -648,16 +668,12 @@ size_t tlsf_fit_size(tlsf_t tlsf, size_t size)
*/
size_t tlsf_size(tlsf_t tlsf)
{
if (tlsf)
if (tlsf == NULL)
{
control_t* control = tlsf_cast(control_t*, tlsf);
return control->size;
return 0;
}
/* no tlsf, we'll just return a min size */
return sizeof(control_t) +
sizeof(int) * SL_INDEX_COUNT_MIN +
sizeof(block_header_t*) * SL_INDEX_COUNT_MIN * FL_INDEX_COUNT_MIN;
control_t* control = tlsf_cast(control_t*, tlsf);
return control->size;
}
size_t tlsf_align_size(void)
@ -672,6 +688,10 @@ size_t tlsf_block_size_min(void)
size_t tlsf_block_size_max(tlsf_t tlsf)
{
if (tlsf == NULL)
{
return 0;
}
control_t* control = tlsf_cast(control_t*, tlsf);
return tlsf_cast(size_t, 1) << control->fl_index_max;
}
@ -765,20 +785,24 @@ tlsf_t tlsf_create(void* mem, size_t max_bytes)
#if _DEBUG
if (test_ffs_fls())
{
return 0;
return NULL;
}
#endif
if (mem == NULL)
{
return NULL;
}
if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)
{
printf("tlsf_create: Memory must be aligned to %u bytes.\n",
(unsigned int)ALIGN_SIZE);
return 0;
return NULL;
}
control_construct(tlsf_cast(control_t*, mem), max_bytes);
return tlsf_cast(tlsf_t, mem);
control_t* control_ptr = control_construct(tlsf_cast(control_t*, mem), max_bytes);
return tlsf_cast(tlsf_t, control_ptr);
}
pool_t tlsf_get_pool(tlsf_t tlsf)
@ -789,7 +813,10 @@ pool_t tlsf_get_pool(tlsf_t tlsf)
tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes)
{
tlsf_t tlsf = tlsf_create(mem, max_bytes ? max_bytes : pool_bytes);
tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf));
if (tlsf != NULL)
{
tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf));
}
return tlsf;
}
@ -957,6 +984,12 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
const size_t combined = cursize + block_size(next) + block_header_overhead;
const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
// if adjust if equal to 0, the size is too big
if (adjust == 0)
{
return p;
}
tlsf_assert(!block_is_free(block) && "block already marked as free");
/*

View File

@ -78,13 +78,25 @@ typedef struct control_t
/* Empty lists point at this block to indicate they are free. */
block_header_t block_null;
/* Local parameter for the pool */
unsigned int fl_index_count;
unsigned int fl_index_shift;
unsigned int fl_index_max;
unsigned int sl_index_count;
unsigned int sl_index_count_log2;
unsigned int small_block_size;
/* Local parameter for the pool. Given the maximum
* value of each field, all the following parameters
* can fit on 4 bytes when using bitfields
*/
unsigned int fl_index_count : 5; // 5 cumulated bits
unsigned int fl_index_shift : 3; // 8 cumulated bits
unsigned int fl_index_max : 6; // 14 cumulated bits
unsigned int sl_index_count : 6; // 20 cumulated bits
/* log2 of number of linear subdivisions of block sizes. Larger
** values require more memory in the control structure. Values of
** 4 or 5 are typical.
*/
unsigned int sl_index_count_log2 : 3; // 23 cumulated bits
unsigned int small_block_size : 8; // 31 cumulated bits
/* size of the metadata ( size of control block,
* sl_bitmap and blocks )
*/
size_t size;
/* Bitmaps for free lists. */
@ -128,6 +140,15 @@ size_t tlsf_block_size_min(void);
size_t tlsf_block_size_max(tlsf_t tlsf);
size_t tlsf_pool_overhead(void);
size_t tlsf_alloc_overhead(void);
/**
* @brief Return the allocable size based on the size passed
* as parameter
*
* @param tlsf Pointer to the tlsf structure
* @param size The allocation size
* @return size_t The updated allocation size
*/
size_t tlsf_fit_size(tlsf_t tlsf, size_t size);
/* Debugging. */

View File

@ -63,11 +63,8 @@
** A free block must be large enough to store its header minus the size of
** the prev_phys_block field, and no larger than the number of addressable
** bits for FL_INDEX.
** The block_size_max macro returns the maximum block for the minimum pool
** use tlsf_block_size_max for a value specific to the pool
*/
#define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*))
#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX_MIN)
/*
** block_header_t member functions.

View File

@ -39,28 +39,7 @@
enum tlsf_config
{
/* log2 of number of linear subdivisions of block sizes. Larger
** values require more memory in the control structure. Values of
** 4 or 5 are typical, 3 is for very small pools.
*/
SL_INDEX_COUNT_LOG2_MIN = 3,
/* All allocation sizes and addresses are aligned to 4 bytes. */
ALIGN_SIZE_LOG2 = 2,
ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
/*
** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
** However, because we linearly subdivide the second-level lists, and
** our minimum size granularity is 4 bytes, it doesn't make sense to
** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
** trying to split size ranges into more slots than we have available.
** Instead, we calculate the minimum threshold size, and place all
** blocks below that size into the 0th first-level list.
** Values below are the absolute minimum to accept a pool addition
*/
FL_INDEX_MAX_MIN = 14, // For a less than 16kB pool
SL_INDEX_COUNT_MIN = (1 << SL_INDEX_COUNT_LOG2_MIN),
FL_INDEX_COUNT_MIN = (FL_INDEX_MAX_MIN - (SL_INDEX_COUNT_LOG2_MIN + ALIGN_SIZE_LOG2) + 1),
};

View File

@ -122,7 +122,7 @@ size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
{
assert(start_ptr);
if(size < (tlsf_size(NULL) + tlsf_block_size_min() + sizeof(heap_t))) {
if(size < (sizeof(heap_t))) {
//Region too small to be a heap.
return NULL;
}
@ -130,7 +130,10 @@ multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
heap_t *result = (heap_t *)start_ptr;
size -= sizeof(heap_t);
result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, 0);
/* Do not specify any maximum size for the allocations so that the default configuration is used */
const size_t max_bytes = 0;
result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, max_bytes);
if(!result->heap_data) {
return NULL;
}
@ -380,6 +383,6 @@ void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes - overhead;
info->minimum_free_bytes = heap->minimum_free_bytes;
info->total_free_bytes = heap->free_bytes;
info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block);
info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block);
multi_heap_internal_unlock(heap);
}