mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
175 lines
6.3 KiB
C
175 lines
6.3 KiB
C
/*
|
|
** Two Level Segregated Fit memory allocator, version 3.1.
|
|
** Written by Matthew Conte
|
|
** http://tlsf.baisoku.org
|
|
**
|
|
** Based on the original documentation by Miguel Masmano:
|
|
** http://www.gii.upv.es/tlsf/main/docs
|
|
**
|
|
** This implementation was written to the specification
|
|
** of the document, therefore no GPL restrictions apply.
|
|
**
|
|
** Copyright (c) 2006-2016, Matthew Conte
|
|
** All rights reserved.
|
|
**
|
|
** Redistribution and use in source and binary forms, with or without
|
|
** modification, are permitted provided that the following conditions are met:
|
|
** * Redistributions of source code must retain the above copyright
|
|
** notice, this list of conditions and the following disclaimer.
|
|
** * Redistributions in binary form must reproduce the above copyright
|
|
** notice, this list of conditions and the following disclaimer in the
|
|
** documentation and/or other materials provided with the distribution.
|
|
** * Neither the name of the copyright holder nor the
|
|
** names of its contributors may be used to endorse or promote products
|
|
** derived from this software without specific prior written permission.
|
|
**
|
|
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
|
|
** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
/*
|
|
** Data structures and associated constants.
|
|
*/
|
|
|
|
/*
|
|
** Since block sizes are always at least a multiple of 4, the two least
|
|
** significant bits of the size field are used to store the block status:
|
|
** - bit 0: whether block is busy or free
|
|
** - bit 1: whether previous block is busy or free
|
|
*/
|
|
#define block_header_free_bit (1 << 0)
|
|
#define block_header_prev_free_bit (1 << 1)
|
|
|
|
/*
|
|
** The size of the block header exposed to used blocks is the size field.
|
|
** The prev_phys_block field is stored *inside* the previous free block.
|
|
*/
|
|
#define block_header_overhead (sizeof(size_t))
|
|
|
|
/* User data starts directly after the size field in a used block. */
|
|
#define block_start_offset (offsetof(block_header_t, size) + sizeof(size_t))
|
|
|
|
/*
|
|
** A free block must be large enough to store its header minus the size of
|
|
** the prev_phys_block field, and no larger than the number of addressable
|
|
** bits for FL_INDEX.
|
|
** The block_size_max macro returns the maximum block for the minimum pool
|
|
** use tlsf_block_size_max for a value specific to the pool
|
|
*/
|
|
#define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*))
|
|
#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX_MIN)
|
|
|
|
/*
|
|
** block_header_t member functions.
|
|
*/
|
|
static inline __attribute__((__always_inline__)) size_t block_size(const block_header_t* block)
|
|
{
|
|
return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) void block_set_size(block_header_t* block, size_t size)
|
|
{
|
|
const size_t oldsize = block->size;
|
|
block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) int block_is_last(const block_header_t* block)
|
|
{
|
|
return block_size(block) == 0;
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) int block_is_free(const block_header_t* block)
|
|
{
|
|
return tlsf_cast(int, block->size & block_header_free_bit);
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) void block_set_free(block_header_t* block)
|
|
{
|
|
block->size |= block_header_free_bit;
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) void block_set_used(block_header_t* block)
|
|
{
|
|
block->size &= ~block_header_free_bit;
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) int block_is_prev_free(const block_header_t* block)
|
|
{
|
|
return tlsf_cast(int, block->size & block_header_prev_free_bit);
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) void block_set_prev_free(block_header_t* block)
|
|
{
|
|
block->size |= block_header_prev_free_bit;
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) void block_set_prev_used(block_header_t* block)
|
|
{
|
|
block->size &= ~block_header_prev_free_bit;
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) block_header_t* block_from_ptr(const void* ptr)
|
|
{
|
|
return tlsf_cast(block_header_t*,
|
|
tlsf_cast(unsigned char*, ptr) - block_start_offset);
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) void* block_to_ptr(const block_header_t* block)
|
|
{
|
|
return tlsf_cast(void*,
|
|
tlsf_cast(unsigned char*, block) + block_start_offset);
|
|
}
|
|
|
|
/* Return location of next block after block of given size. */
|
|
static inline __attribute__((__always_inline__)) block_header_t* offset_to_block(const void* ptr, size_t size)
|
|
{
|
|
return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
|
|
}
|
|
|
|
/* Return location of previous block. */
|
|
static inline __attribute__((__always_inline__)) block_header_t* block_prev(const block_header_t* block)
|
|
{
|
|
return block->prev_phys_block;
|
|
}
|
|
|
|
/* Return location of next existing block. */
|
|
static inline __attribute__((__always_inline__)) block_header_t* block_next(const block_header_t* block)
|
|
{
|
|
block_header_t* next = offset_to_block(block_to_ptr(block),
|
|
block_size(block) - block_header_overhead);
|
|
return next;
|
|
}
|
|
|
|
/* Link a new block with its physical neighbor, return the neighbor. */
|
|
static inline __attribute__((__always_inline__)) block_header_t* block_link_next(block_header_t* block)
|
|
{
|
|
block_header_t* next = block_next(block);
|
|
next->prev_phys_block = block;
|
|
return next;
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) void block_mark_as_free(block_header_t* block)
|
|
{
|
|
/* Link the block to the next block, first. */
|
|
block_header_t* next = block_link_next(block);
|
|
block_set_prev_free(next);
|
|
block_set_free(block);
|
|
}
|
|
|
|
static inline __attribute__((__always_inline__)) void block_mark_as_used(block_header_t* block)
|
|
{
|
|
block_header_t* next = block_next(block);
|
|
block_set_prev_used(next);
|
|
block_set_used(block);
|
|
}
|