fix(build): fix calloc warnings

This commit is contained in:
Alexey Lapshin 2024-05-10 12:09:16 +04:00
parent 47212eaa3a
commit 4c87af6359
8 changed files with 30 additions and 30 deletions

View File

@ -157,7 +157,7 @@ esp_err_t esp_ota_begin(const esp_partition_t *partition, size_t image_size, esp
}
}
new_entry = (ota_ops_entry_t *) calloc(sizeof(ota_ops_entry_t), 1);
new_entry = (ota_ops_entry_t *) calloc(1, sizeof(ota_ops_entry_t));
if (new_entry == NULL) {
return ESP_ERR_NO_MEM;
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -33,7 +33,7 @@
* This lock is designed to solve the conflicts between SPI devices (used in tasks) and
* the background operations (ISR or cache access).
*
* There are N (device/task) + 1 (BG) acquiring processer candidates that may touch the bus.
* There are N (device/task) + 1 (BG) acquiring processor candidates that may touch the bus.
*
* The core of the lock is a `status` atomic variable, which is always available. No intermediate
* status is allowed. The atomic operations (mainly `atomic_fetch_and`, `atomic_fetch_or`)
@ -49,7 +49,7 @@
* state of devices. Either one of REQ or PENDING being active indicates the device has pending BG
* requests. Reason of having two bits instead of one is in the appendix below.
*
* Acquiring processer means the current processor (task or ISR) allowed to touch the critical
* Acquiring processor means the current processor (task or ISR) allowed to touch the critical
* resources, or the SPI bus.
*
* States of the lock:
@ -168,7 +168,7 @@ typedef struct spi_bus_lock_t spi_bus_lock_t;
* This flag is weak, will not prevent acquiring of devices. But will help the BG to be re-enabled again after the bus is release.
*/
// get the bit mask wher bit [high-1, low] are all 1'b1 s.
// get the bit mask where bit [high-1, low] are all 1'b1 s.
#define BIT1_MASK(high, low) ((UINT32_MAX << (high)) ^ (UINT32_MAX << (low)))
#define LOCK_BIT(mask) ((mask) << LOCK_SHIFT)
@ -238,7 +238,7 @@ struct spi_bus_lock_dev_t {
* acquire_end_core():
* uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
*
* Becuase this is the first `spi_hdl_1`, so after this , lock_bits == 0`b0. status == 0
* Because this is the first `spi_hdl_1`, so after this , lock_bits == 0`b0. status == 0
*
* 2. spi_hdl_2:
* acquire_core:
@ -254,7 +254,7 @@ struct spi_bus_lock_dev_t {
*
* 5. spi_hdl_1:
* acquire_end_core:
* status is 0, so it cleas the lock->acquiring_dev
* status is 0, so it clears the lock->acquiring_dev
*
* 6. spi_hdl_2:
* spi_device_polling_end:
@ -482,7 +482,7 @@ SPI_BUS_LOCK_ISR_ATTR static inline void update_pend_core(spi_bus_lock_t *lock,
}
// Clear the PEND bit (not REQ bit!) of a device, return the suggestion whether we can try to quit the ISR.
// Lost the acquiring processor immediately when the BG bits for active device are inactive, indiciating by the return value.
// Lost the acquiring processor immediately when the BG bits for active device are inactive, indicating by the return value.
// Can be called only when ISR is acting as the acquiring processor.
SPI_BUS_LOCK_ISR_ATTR static inline bool clear_pend_core(spi_bus_lock_dev_t *dev_handle)
{
@ -585,7 +585,7 @@ SPI_BUS_LOCK_ISR_ATTR static inline esp_err_t dev_wait(spi_bus_lock_dev_t *dev_h
******************************************************************************/
esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config)
{
spi_bus_lock_t* lock = (spi_bus_lock_t*)calloc(sizeof(spi_bus_lock_t), 1);
spi_bus_lock_t* lock = (spi_bus_lock_t*)calloc(1, sizeof(spi_bus_lock_t));
if (lock == NULL) {
return ESP_ERR_NO_MEM;
}
@ -644,7 +644,7 @@ esp_err_t spi_bus_lock_register_dev(spi_bus_lock_handle_t lock, spi_bus_lock_dev
return ESP_ERR_NOT_SUPPORTED;
}
spi_bus_lock_dev_t* dev_lock = (spi_bus_lock_dev_t*)heap_caps_calloc(sizeof(spi_bus_lock_dev_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
spi_bus_lock_dev_t* dev_lock = (spi_bus_lock_dev_t*)heap_caps_calloc(1, sizeof(spi_bus_lock_dev_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
if (dev_lock == NULL) {
return ESP_ERR_NO_MEM;
}

View File

@ -132,7 +132,7 @@ static esp_err_t load_partitions(void)
#endif
// allocate new linked list item and populate it with data from partition table
partition_list_item_t *item = (partition_list_item_t *) calloc(sizeof(partition_list_item_t), 1);
partition_list_item_t *item = (partition_list_item_t *) calloc(1, sizeof(partition_list_item_t));
if (item == NULL) {
err = ESP_ERR_NO_MEM;
break;
@ -406,7 +406,7 @@ esp_err_t esp_partition_register_external(esp_flash_t *flash_chip, size_t offset
return err;
}
partition_list_item_t *item = (partition_list_item_t *) calloc(sizeof(partition_list_item_t), 1);
partition_list_item_t *item = (partition_list_item_t *) calloc(1, sizeof(partition_list_item_t));
if (item == NULL) {
return ESP_ERR_NO_MEM;
}

View File

@ -820,7 +820,7 @@ void esp_phy_load_cal_and_init(void)
#endif
esp_phy_calibration_data_t* cal_data =
(esp_phy_calibration_data_t*) calloc(sizeof(esp_phy_calibration_data_t), 1);
(esp_phy_calibration_data_t*) calloc(1, sizeof(esp_phy_calibration_data_t));
if (cal_data == NULL) {
ESP_LOGE(TAG, "failed to allocate memory for RF calibration data");
abort();

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2018-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2018-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -28,7 +28,7 @@ on B. Note that this goes for every 32-byte cache line: this implies that if a p
A, the write to Y may show up before the write to X does.
It gets even worse when both A and B are written: theoretically, a write to a 32-byte cache line in A can be entirely
undone because of a write to a different addres in B that happens to be in the same 32-byte cache line.
undone because of a write to a different address in B that happens to be in the same 32-byte cache line.
Because of these reasons, we do not allow double mappings at all. This, however, has other implications that make
supporting ranges not really useful. Because the lack of double mappings, applications will need to do their own
@ -153,8 +153,8 @@ void __attribute__((constructor)) esp_himem_init(void)
int paddr_end = maxram;
s_ramblockcnt = ((paddr_end - paddr_start) / CACHE_BLOCKSIZE);
//Allocate data structures
s_ram_descriptor = calloc(sizeof(ramblock_t), s_ramblockcnt);
s_range_descriptor = calloc(sizeof(rangeblock_t), SPIRAM_BANKSWITCH_RESERVE);
s_ram_descriptor = calloc(s_ramblockcnt, sizeof(ramblock_t));
s_range_descriptor = calloc(SPIRAM_BANKSWITCH_RESERVE, sizeof(rangeblock_t));
if (s_ram_descriptor == NULL || s_range_descriptor == NULL) {
ESP_EARLY_LOGE(TAG, "Cannot allocate memory for meta info. Not initializing!");
free(s_ram_descriptor);
@ -195,11 +195,11 @@ esp_err_t esp_himem_alloc(size_t size, esp_himem_handle_t *handle_out)
return ESP_ERR_INVALID_SIZE;
}
int blocks = size / CACHE_BLOCKSIZE;
esp_himem_ramdata_t *r = calloc(sizeof(esp_himem_ramdata_t), 1);
esp_himem_ramdata_t *r = calloc(1, sizeof(esp_himem_ramdata_t));
if (!r) {
goto nomem;
}
r->block = calloc(sizeof(uint16_t), blocks);
r->block = calloc(blocks, sizeof(uint16_t));
if (!r->block) {
goto nomem;
}
@ -245,7 +245,7 @@ esp_err_t esp_himem_alloc_map_range(size_t size, esp_himem_rangehandle_t *handle
ESP_RETURN_ON_FALSE(s_ram_descriptor != NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
ESP_RETURN_ON_FALSE(size % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_SIZE, TAG, "requested size not aligned to blocksize");
int blocks = size / CACHE_BLOCKSIZE;
esp_himem_rangedata_t *r = calloc(sizeof(esp_himem_rangedata_t), 1);
esp_himem_rangedata_t *r = calloc(1, sizeof(esp_himem_rangedata_t));
if (!r) {
return ESP_ERR_NO_MEM;
}
@ -338,7 +338,7 @@ esp_err_t esp_himem_map(esp_himem_handle_t handle, esp_himem_rangehandle_t range
esp_err_t esp_himem_unmap(esp_himem_rangehandle_t range, void *ptr, size_t len)
{
//Note: doesn't actually unmap, just clears cache and marks blocks as unmapped.
//Future optimization: could actually lazy-unmap here: essentially, do nothing and only clear the cache when we re-use
//Future optimization: could actually lazy-unmap here: essentially, do nothing and only clear the cache when we reuse
//the block for a different physical address.
int range_offset = (uint32_t)ptr - VIRT_HIMEM_RANGE_START;
int range_block = (range_offset / CACHE_BLOCKSIZE) - range->block_start;

View File

@ -292,7 +292,7 @@ esp_err_t esp_vfs_fat_sdmmc_mount(const char* base_path,
s_saved_ctx_id = 0;
}
ctx = calloc(sizeof(vfs_fat_sd_ctx_t), 1);
ctx = calloc(1, sizeof(vfs_fat_sd_ctx_t));
if (!ctx) {
CHECK_EXECUTE_RESULT(ESP_ERR_NO_MEM, "no mem");
}
@ -390,7 +390,7 @@ esp_err_t esp_vfs_fat_sdspi_mount(const char* base_path,
s_saved_ctx_id = 0;
}
ctx = calloc(sizeof(vfs_fat_sd_ctx_t), 1);
ctx = calloc(1, sizeof(vfs_fat_sd_ctx_t));
if (!ctx) {
CHECK_EXECUTE_RESULT(ESP_ERR_NO_MEM, "no mem");
}

View File

@ -163,7 +163,7 @@ esp_err_t esp_vfs_fat_spiflash_mount_rw_wl(const char* base_path,
goto fail;
}
ctx = calloc(sizeof(vfs_fat_spiflash_ctx_t), 1);
ctx = calloc(1, sizeof(vfs_fat_spiflash_ctx_t));
ESP_GOTO_ON_FALSE(ctx, ESP_ERR_NO_MEM, fail, TAG, "no mem");
ctx->partition = data_partition;
ctx->by_label = (partition_label != NULL);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -204,7 +204,7 @@ static esp_err_t esp_spiffs_init(const esp_vfs_spiffs_conf_t* conf)
return ESP_ERR_INVALID_ARG;
}
esp_spiffs_t * efs = calloc(sizeof(esp_spiffs_t), 1);
esp_spiffs_t * efs = calloc(1, sizeof(esp_spiffs_t));
if (efs == NULL) {
ESP_LOGE(TAG, "esp_spiffs could not be malloced");
return ESP_ERR_NO_MEM;
@ -229,7 +229,7 @@ static esp_err_t esp_spiffs_init(const esp_vfs_spiffs_conf_t* conf)
}
efs->fds_sz = conf->max_files * sizeof(spiffs_fd);
efs->fds = calloc(efs->fds_sz, 1);
efs->fds = calloc(1, efs->fds_sz);
if (efs->fds == NULL) {
ESP_LOGE(TAG, "fd buffer could not be allocated");
esp_spiffs_free(&efs);
@ -239,7 +239,7 @@ static esp_err_t esp_spiffs_init(const esp_vfs_spiffs_conf_t* conf)
#if SPIFFS_CACHE
efs->cache_sz = sizeof(spiffs_cache) + conf->max_files * (sizeof(spiffs_cache_page)
+ efs->cfg.log_page_size);
efs->cache = calloc(efs->cache_sz, 1);
efs->cache = calloc(1, efs->cache_sz);
if (efs->cache == NULL) {
ESP_LOGE(TAG, "cache buffer could not be allocated");
esp_spiffs_free(&efs);
@ -248,14 +248,14 @@ static esp_err_t esp_spiffs_init(const esp_vfs_spiffs_conf_t* conf)
#endif
const uint32_t work_sz = efs->cfg.log_page_size * 2;
efs->work = calloc(work_sz, 1);
efs->work = calloc(1, work_sz);
if (efs->work == NULL) {
ESP_LOGE(TAG, "work buffer could not be allocated");
esp_spiffs_free(&efs);
return ESP_ERR_NO_MEM;
}
efs->fs = calloc(sizeof(spiffs), 1);
efs->fs = calloc(1, sizeof(spiffs));
if (efs->fs == NULL) {
ESP_LOGE(TAG, "spiffs could not be allocated");
esp_spiffs_free(&efs);