mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'bugfix/himem_error_checks' into 'master'
himem: reverse error check logic in himem Closes IDFCI-857 and IDFCI-859 See merge request espressif/esp-idf!15009
This commit is contained in:
commit
ba3b34583a
@ -133,10 +133,10 @@ void __attribute__((constructor)) esp_himem_init(void)
|
|||||||
if (SPIRAM_BANKSWITCH_RESERVE == 0) return;
|
if (SPIRAM_BANKSWITCH_RESERVE == 0) return;
|
||||||
int maxram=esp_spiram_get_size();
|
int maxram=esp_spiram_get_size();
|
||||||
//catch double init
|
//catch double init
|
||||||
ESP_RETURN_ON_FALSE(s_ram_descriptor != NULL, , TAG, "already initialized"); //Looks weird; last arg is empty so it expands to 'return ;'
|
ESP_RETURN_ON_FALSE(s_ram_descriptor == NULL, , TAG, "already initialized"); //Looks weird; last arg is empty so it expands to 'return ;'
|
||||||
ESP_RETURN_ON_FALSE(s_range_descriptor != NULL, , TAG, "already initialized");
|
ESP_RETURN_ON_FALSE(s_range_descriptor == NULL, , TAG, "already initialized");
|
||||||
//need to have some reserved banks
|
//need to have some reserved banks
|
||||||
ESP_RETURN_ON_FALSE(SPIRAM_BANKSWITCH_RESERVE == 0, , TAG, "No banks reserved for himem");
|
ESP_RETURN_ON_FALSE(SPIRAM_BANKSWITCH_RESERVE != 0, , TAG, "No banks reserved for himem");
|
||||||
//Start and end of physical reserved memory. Note it starts slightly under
|
//Start and end of physical reserved memory. Note it starts slightly under
|
||||||
//the 4MiB mark as the reserved banks can't have an unity mapping to be used by malloc
|
//the 4MiB mark as the reserved banks can't have an unity mapping to be used by malloc
|
||||||
//anymore; we treat them as himem instead.
|
//anymore; we treat them as himem instead.
|
||||||
@ -218,7 +218,7 @@ esp_err_t esp_himem_free(esp_himem_handle_t handle)
|
|||||||
//Check if any of the blocks is still mapped; fail if this is the case.
|
//Check if any of the blocks is still mapped; fail if this is the case.
|
||||||
for (int i = 0; i < handle->block_ct; i++) {
|
for (int i = 0; i < handle->block_ct; i++) {
|
||||||
assert(ramblock_idx_valid(handle->block[i]));
|
assert(ramblock_idx_valid(handle->block[i]));
|
||||||
ESP_RETURN_ON_FALSE(s_ram_descriptor[handle->block[i]].is_mapped, ESP_ERR_INVALID_ARG, TAG, "block in range still mapped");
|
ESP_RETURN_ON_FALSE(!s_ram_descriptor[handle->block[i]].is_mapped, ESP_ERR_INVALID_ARG, TAG, "block in range still mapped");
|
||||||
}
|
}
|
||||||
//Mark blocks as free
|
//Mark blocks as free
|
||||||
portENTER_CRITICAL(&spinlock);
|
portENTER_CRITICAL(&spinlock);
|
||||||
@ -236,8 +236,8 @@ esp_err_t esp_himem_free(esp_himem_handle_t handle)
|
|||||||
|
|
||||||
esp_err_t esp_himem_alloc_map_range(size_t size, esp_himem_rangehandle_t *handle_out)
|
esp_err_t esp_himem_alloc_map_range(size_t size, esp_himem_rangehandle_t *handle_out)
|
||||||
{
|
{
|
||||||
ESP_RETURN_ON_FALSE(s_ram_descriptor == NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
|
ESP_RETURN_ON_FALSE(s_ram_descriptor != NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
|
||||||
ESP_RETURN_ON_FALSE(size % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_SIZE, TAG, "requested size not aligned to blocksize");
|
ESP_RETURN_ON_FALSE(size % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_SIZE, TAG, "requested size not aligned to blocksize");
|
||||||
int blocks = size / CACHE_BLOCKSIZE;
|
int blocks = size / CACHE_BLOCKSIZE;
|
||||||
esp_himem_rangedata_t *r = calloc(sizeof(esp_himem_rangedata_t), 1);
|
esp_himem_rangedata_t *r = calloc(sizeof(esp_himem_rangedata_t), 1);
|
||||||
if (!r) {
|
if (!r) {
|
||||||
@ -279,7 +279,7 @@ esp_err_t esp_himem_free_map_range(esp_himem_rangehandle_t handle)
|
|||||||
for (int i = 0; i < handle->block_ct; i++) {
|
for (int i = 0; i < handle->block_ct; i++) {
|
||||||
assert(rangeblock_idx_valid(handle->block_start + i));
|
assert(rangeblock_idx_valid(handle->block_start + i));
|
||||||
assert(s_range_descriptor[i + handle->block_start].is_alloced == 1); //should be, if handle is valid
|
assert(s_range_descriptor[i + handle->block_start].is_alloced == 1); //should be, if handle is valid
|
||||||
ESP_RETURN_ON_FALSE(s_range_descriptor[i + handle->block_start].is_mapped, ESP_ERR_INVALID_ARG, TAG, "memory still mapped to range");
|
ESP_RETURN_ON_FALSE(!s_range_descriptor[i + handle->block_start].is_mapped, ESP_ERR_INVALID_ARG, TAG, "memory still mapped to range");
|
||||||
}
|
}
|
||||||
//We should be good to free this. Mark blocks as free.
|
//We should be good to free this. Mark blocks as free.
|
||||||
portENTER_CRITICAL(&spinlock);
|
portENTER_CRITICAL(&spinlock);
|
||||||
@ -297,19 +297,19 @@ esp_err_t esp_himem_map(esp_himem_handle_t handle, esp_himem_rangehandle_t range
|
|||||||
int ram_block = ram_offset / CACHE_BLOCKSIZE;
|
int ram_block = ram_offset / CACHE_BLOCKSIZE;
|
||||||
int range_block = range_offset / CACHE_BLOCKSIZE;
|
int range_block = range_offset / CACHE_BLOCKSIZE;
|
||||||
int blockcount = len / CACHE_BLOCKSIZE;
|
int blockcount = len / CACHE_BLOCKSIZE;
|
||||||
ESP_RETURN_ON_FALSE(s_ram_descriptor == NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
|
ESP_RETURN_ON_FALSE(s_ram_descriptor != NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
|
||||||
//Offsets and length must be block-aligned
|
//Offsets and length must be block-aligned
|
||||||
ESP_RETURN_ON_FALSE(ram_offset % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "ram offset not aligned to blocksize");
|
ESP_RETURN_ON_FALSE(ram_offset % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "ram offset not aligned to blocksize");
|
||||||
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "range not aligned to blocksize");
|
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "range not aligned to blocksize");
|
||||||
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "length not aligned to blocksize");
|
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "length not aligned to blocksize");
|
||||||
//ram and range should be within allocated range
|
//ram and range should be within allocated range
|
||||||
ESP_RETURN_ON_FALSE(ram_block + blockcount > handle->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of phys ram handle");
|
ESP_RETURN_ON_FALSE(ram_block + blockcount <= handle->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of phys ram handle");
|
||||||
ESP_RETURN_ON_FALSE(range_block + blockcount > range->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of range handle");
|
ESP_RETURN_ON_FALSE(range_block + blockcount <= range->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of range handle");
|
||||||
|
|
||||||
//Check if ram blocks aren't already mapped, and if memory range is unmapped
|
//Check if ram blocks aren't already mapped, and if memory range is unmapped
|
||||||
for (int i = 0; i < blockcount; i++) {
|
for (int i = 0; i < blockcount; i++) {
|
||||||
ESP_RETURN_ON_FALSE(s_ram_descriptor[handle->block[i + ram_block]].is_mapped, ESP_ERR_INVALID_STATE, TAG, "ram already mapped");
|
ESP_RETURN_ON_FALSE(!s_ram_descriptor[handle->block[i + ram_block]].is_mapped, ESP_ERR_INVALID_STATE, TAG, "ram already mapped");
|
||||||
ESP_RETURN_ON_FALSE(s_range_descriptor[range->block_start + i + range_block].is_mapped, ESP_ERR_INVALID_STATE, TAG, "range already mapped");
|
ESP_RETURN_ON_FALSE(!s_range_descriptor[range->block_start + i + range_block].is_mapped, ESP_ERR_INVALID_STATE, TAG, "range already mapped");
|
||||||
}
|
}
|
||||||
|
|
||||||
//Map and mark as mapped
|
//Map and mark as mapped
|
||||||
@ -338,9 +338,9 @@ esp_err_t esp_himem_unmap(esp_himem_rangehandle_t range, void *ptr, size_t len)
|
|||||||
int range_offset = (uint32_t)ptr - VIRT_HIMEM_RANGE_START;
|
int range_offset = (uint32_t)ptr - VIRT_HIMEM_RANGE_START;
|
||||||
int range_block = (range_offset / CACHE_BLOCKSIZE) - range->block_start;
|
int range_block = (range_offset / CACHE_BLOCKSIZE) - range->block_start;
|
||||||
int blockcount = len / CACHE_BLOCKSIZE;
|
int blockcount = len / CACHE_BLOCKSIZE;
|
||||||
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "range offset not block-aligned");
|
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "range offset not block-aligned");
|
||||||
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "map length not block-aligned");
|
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "map length not block-aligned");
|
||||||
ESP_RETURN_ON_FALSE(range_block + blockcount > range->block_ct, ESP_ERR_INVALID_ARG, TAG, "range out of bounds for handle");
|
ESP_RETURN_ON_FALSE(range_block + blockcount <= range->block_ct, ESP_ERR_INVALID_ARG, TAG, "range out of bounds for handle");
|
||||||
|
|
||||||
portENTER_CRITICAL(&spinlock);
|
portENTER_CRITICAL(&spinlock);
|
||||||
for (int i = 0; i < blockcount; i++) {
|
for (int i = 0; i < blockcount; i++) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user