mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
core system: Fix warnings in compilation when assertions are disabled
Adds a CI config for hello world that sets this, to catch future regressions
This commit is contained in:
parent
ac776657c9
commit
d6f4d99d93
@ -39,6 +39,7 @@ void bootloader_sha256_data(bootloader_sha256_handle_t handle, const void *data,
|
||||
mbedtls_sha256_context *ctx = (mbedtls_sha256_context *)handle;
|
||||
int ret = mbedtls_sha256_update_ret(ctx, data, data_len);
|
||||
assert(ret == 0);
|
||||
(void)ret;
|
||||
}
|
||||
|
||||
void bootloader_sha256_finish(bootloader_sha256_handle_t handle, uint8_t *digest)
|
||||
@ -48,6 +49,7 @@ void bootloader_sha256_finish(bootloader_sha256_handle_t handle, uint8_t *digest
|
||||
if (digest != NULL) {
|
||||
int ret = mbedtls_sha256_finish_ret(ctx, digest);
|
||||
assert(ret == 0);
|
||||
(void)ret;
|
||||
}
|
||||
mbedtls_sha256_free(ctx);
|
||||
free(handle);
|
||||
|
@ -599,3 +599,13 @@ if(CONFIG_BT_ENABLED)
|
||||
target_link_libraries(${COMPONENT_LIB} PUBLIC btdm_app btbb)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(CONFIG_BT_NIMBLE_MESH)
|
||||
set_source_files_properties("host/nimble/nimble/nimble/host/mesh/src/net.c"
|
||||
PROPERTIES COMPILE_FLAGS -Wno-type-limits)
|
||||
endif()
|
||||
|
||||
if(CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE AND CONFIG_BT_NIMBLE_ENABLED)
|
||||
# some variables in NimBLE are only used by asserts
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE -Wno-unused-but-set-variable -Wno-unused-variable)
|
||||
endif()
|
||||
|
@ -144,6 +144,7 @@ bool hash_map_set(hash_map_t *hash_map, const void *key, void *data)
|
||||
// Calls hash_map callback to delete the hash_map_entry.
|
||||
bool rc = list_remove(hash_bucket_list, hash_map_entry);
|
||||
assert(rc == true);
|
||||
(void)rc;
|
||||
} else {
|
||||
hash_map->hash_size++;
|
||||
}
|
||||
|
@ -1309,7 +1309,8 @@ esp_err_t esp_bt_controller_init(esp_bt_controller_config_t *cfg)
|
||||
btdm_lpclk_sel = BTDM_LPCLK_SEL_XTAL; // set default value
|
||||
#endif
|
||||
|
||||
bool select_src_ret, set_div_ret;
|
||||
bool select_src_ret __attribute__((unused));
|
||||
bool set_div_ret __attribute__((unused));
|
||||
if (btdm_lpclk_sel == BTDM_LPCLK_SEL_XTAL) {
|
||||
select_src_ret = btdm_lpclk_select_src(BTDM_LPCLK_SEL_XTAL);
|
||||
set_div_ret = btdm_lpclk_set_div(rtc_clk_xtal_freq_get() * 2 - 1);
|
||||
|
@ -1050,7 +1050,8 @@ esp_err_t esp_bt_controller_init(esp_bt_controller_config_t *cfg)
|
||||
s_lp_cntl.lpclk_sel = BTDM_LPCLK_SEL_XTAL; // set default value
|
||||
#endif
|
||||
|
||||
bool select_src_ret, set_div_ret;
|
||||
bool select_src_ret __attribute__((unused));
|
||||
bool set_div_ret __attribute__((unused));
|
||||
if (s_lp_cntl.lpclk_sel == BTDM_LPCLK_SEL_XTAL) {
|
||||
select_src_ret = btdm_lpclk_select_src(BTDM_LPCLK_SEL_XTAL);
|
||||
set_div_ret = btdm_lpclk_set_div(rtc_clk_xtal_freq_get() * 2);
|
||||
|
@ -988,8 +988,8 @@ esp_err_t esp_bt_controller_init(esp_bt_controller_config_t *cfg)
|
||||
cfg->sleep_clock = ESP_BT_SLEEP_CLOCK_FPGA_32K;
|
||||
ESP_LOGW(BTDM_LOG_TAG, "%s sleep clock overwrite on FPGA", __func__);
|
||||
#endif
|
||||
bool select_src_ret = false;
|
||||
bool set_div_ret = false;
|
||||
bool select_src_ret __attribute__((unused));
|
||||
bool set_div_ret __attribute__((unused));
|
||||
if (cfg->sleep_clock == ESP_BT_SLEEP_CLOCK_MAIN_XTAL) {
|
||||
select_src_ret = btdm_lpclk_select_src(BTDM_LPCLK_SEL_XTAL);
|
||||
set_div_ret = btdm_lpclk_set_div(rtc_clk_xtal_freq_get() * 2);
|
||||
|
@ -227,19 +227,19 @@ static uint8_t *read_command_complete_header(
|
||||
uint8_t *stream = response->data + response->offset;
|
||||
|
||||
// Read the event header
|
||||
uint8_t event_code;
|
||||
uint8_t parameter_length;
|
||||
uint8_t event_code __attribute__((unused));
|
||||
uint8_t parameter_length __attribute__((unused));
|
||||
STREAM_TO_UINT8(event_code, stream);
|
||||
STREAM_TO_UINT8(parameter_length, stream);
|
||||
|
||||
const size_t parameter_bytes_we_read_here = 4;
|
||||
const size_t parameter_bytes_we_read_here __attribute__((unused)) = 4;
|
||||
|
||||
// Check the event header values against what we expect
|
||||
assert(event_code == HCI_COMMAND_COMPLETE_EVT);
|
||||
assert(parameter_length >= (parameter_bytes_we_read_here + minimum_bytes_after));
|
||||
|
||||
// Read the command complete header
|
||||
command_opcode_t opcode;
|
||||
command_opcode_t opcode __attribute__((unused));
|
||||
uint8_t status;
|
||||
STREAM_SKIP_UINT8(stream); // skip the number of hci command packets field
|
||||
STREAM_TO_UINT16(opcode, stream);
|
||||
|
@ -141,7 +141,7 @@ static void reassemble_and_dispatch(BT_HDR *packet)
|
||||
uint8_t *stream = packet->data + packet->offset;
|
||||
uint16_t handle;
|
||||
uint16_t l2cap_length;
|
||||
uint16_t acl_length;
|
||||
uint16_t acl_length __attribute__((unused));
|
||||
|
||||
STREAM_TO_UINT16(handle, stream);
|
||||
STREAM_TO_UINT16(acl_length, stream);
|
||||
|
@ -82,6 +82,7 @@ static void wait_for_guard_obj(guard_t* g)
|
||||
do {
|
||||
auto result = xSemaphoreGive(s_static_init_mutex);
|
||||
assert(result);
|
||||
static_cast<void>(result);
|
||||
/* Task may be preempted here, but this isn't a problem,
|
||||
* as the semaphore will be given exactly the s_static_init_waiting_count
|
||||
* number of times; eventually the current task will execute next statement,
|
||||
@ -140,6 +141,7 @@ extern "C" int __cxa_guard_acquire(__guard* pg)
|
||||
*/
|
||||
auto result = xSemaphoreTake(s_static_init_mutex, portMAX_DELAY);
|
||||
assert(result);
|
||||
static_cast<void>(result);
|
||||
if (g->pending) {
|
||||
/* Another task is doing initialization at the moment; wait until it calls
|
||||
* __cxa_guard_release or __cxa_guard_abort
|
||||
@ -168,6 +170,7 @@ extern "C" int __cxa_guard_acquire(__guard* pg)
|
||||
if (scheduler_started) {
|
||||
auto result = xSemaphoreGive(s_static_init_mutex);
|
||||
assert(result);
|
||||
static_cast<void>(result);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -179,6 +182,7 @@ extern "C" void __cxa_guard_release(__guard* pg)
|
||||
if (scheduler_started) {
|
||||
auto result = xSemaphoreTake(s_static_init_mutex, portMAX_DELAY);
|
||||
assert(result);
|
||||
static_cast<void>(result);
|
||||
}
|
||||
assert(g->pending && "tried to release a guard which wasn't acquired");
|
||||
g->pending = 0;
|
||||
@ -189,6 +193,7 @@ extern "C" void __cxa_guard_release(__guard* pg)
|
||||
signal_waiting_tasks();
|
||||
auto result = xSemaphoreGive(s_static_init_mutex);
|
||||
assert(result);
|
||||
static_cast<void>(result);
|
||||
}
|
||||
}
|
||||
|
||||
@ -199,6 +204,7 @@ extern "C" void __cxa_guard_abort(__guard* pg)
|
||||
if (scheduler_started) {
|
||||
auto result = xSemaphoreTake(s_static_init_mutex, portMAX_DELAY);
|
||||
assert(result);
|
||||
static_cast<void>(result);
|
||||
}
|
||||
assert(!g->ready && "tried to abort a guard which is ready");
|
||||
assert(g->pending && "tried to release a guard which is not acquired");
|
||||
@ -208,6 +214,7 @@ extern "C" void __cxa_guard_abort(__guard* pg)
|
||||
signal_waiting_tasks();
|
||||
auto result = xSemaphoreGive(s_static_init_mutex);
|
||||
assert(result);
|
||||
static_cast<void>(result);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -375,6 +375,7 @@ void sdio_slave_deinit(void)
|
||||
}
|
||||
esp_err_t ret = esp_intr_free(context.intr_handle);
|
||||
assert(ret==ESP_OK);
|
||||
(void)ret;
|
||||
context.intr_handle = NULL;
|
||||
deinit_context();
|
||||
}
|
||||
@ -532,7 +533,7 @@ static void sdio_intr_send(void* arg)
|
||||
|
||||
uint32_t returned_cnt;
|
||||
if (sdio_slave_hal_send_eof_happened(context.hal)) {
|
||||
portBASE_TYPE ret = pdTRUE;
|
||||
portBASE_TYPE ret __attribute__((unused));
|
||||
|
||||
esp_err_t err;
|
||||
while (1) {
|
||||
@ -549,7 +550,7 @@ static void sdio_intr_send(void* arg)
|
||||
}
|
||||
//get_next_finished_arg returns the total amount of returned descs.
|
||||
for(size_t i = 0; i < returned_cnt; i++) {
|
||||
portBASE_TYPE ret = xSemaphoreGiveFromISR(context.remain_cnt, &yield);
|
||||
ret = xSemaphoreGiveFromISR(context.remain_cnt, &yield);
|
||||
assert(ret == pdTRUE);
|
||||
}
|
||||
}
|
||||
@ -603,16 +604,17 @@ esp_err_t sdio_slave_transmit(uint8_t* addr, size_t len)
|
||||
static esp_err_t send_flush_data(void)
|
||||
{
|
||||
esp_err_t err;
|
||||
portBASE_TYPE ret __attribute__((unused));
|
||||
|
||||
while (1) {
|
||||
void *finished_arg;
|
||||
uint32_t return_cnt = 0;
|
||||
err = sdio_slave_hal_send_flush_next_buffer(context.hal, &finished_arg, &return_cnt);
|
||||
if (err == ESP_OK) {
|
||||
portBASE_TYPE ret = xQueueSend(context.ret_queue, &finished_arg, portMAX_DELAY);
|
||||
ret = xQueueSend(context.ret_queue, &finished_arg, portMAX_DELAY);
|
||||
assert(ret == pdTRUE);
|
||||
for (size_t i = 0; i < return_cnt; i++) {
|
||||
portBASE_TYPE ret = xSemaphoreGive(context.remain_cnt);
|
||||
ret = xSemaphoreGive(context.remain_cnt);
|
||||
assert(ret == pdTRUE);
|
||||
}
|
||||
} else {
|
||||
|
@ -915,6 +915,7 @@ void SPI_MASTER_ISR_ATTR spi_device_release_bus(spi_device_t *dev)
|
||||
host->device_acquiring_lock = NULL;
|
||||
esp_err_t ret = spi_bus_lock_acquire_end(dev->dev_lock);
|
||||
assert(ret == ESP_OK);
|
||||
(void) ret;
|
||||
}
|
||||
|
||||
esp_err_t SPI_MASTER_ISR_ATTR spi_device_polling_start(spi_device_handle_t handle, spi_transaction_t *trans_desc, TickType_t ticks_to_wait)
|
||||
|
@ -380,7 +380,7 @@ static IRAM_ATTR void spi_slave_hd_intr_append(void *arg)
|
||||
spi_slave_hd_callback_config_t *callback = &host->callback;
|
||||
spi_slave_hd_hal_context_t *hal = &host->hal;
|
||||
BaseType_t awoken = pdFALSE;
|
||||
BaseType_t ret;
|
||||
BaseType_t ret __attribute__((unused));
|
||||
|
||||
bool tx_done = false;
|
||||
bool rx_done = false;
|
||||
|
@ -410,6 +410,7 @@ esp_err_t twai_driver_install(const twai_general_config_t *g_config, const twai_
|
||||
periph_module_enable(PERIPH_TWAI_MODULE); //Enable APB CLK to TWAI peripheral
|
||||
bool init = twai_hal_init(&twai_context);
|
||||
assert(init);
|
||||
(void)init;
|
||||
twai_hal_configure(&twai_context, t_config, f_config, DRIVER_DEFAULT_INTERRUPTS, g_config->clkout_divider);
|
||||
TWAI_EXIT_CRITICAL();
|
||||
|
||||
@ -536,6 +537,7 @@ esp_err_t twai_transmit(const twai_message_t *message, TickType_t ticks_to_wait)
|
||||
//Manually start a transmission
|
||||
int res = xQueueReceive(p_twai_obj->tx_queue, &tx_frame, 0);
|
||||
assert(res == pdTRUE);
|
||||
(void)res;
|
||||
twai_hal_set_tx_buffer_and_transmit(&twai_context, &tx_frame);
|
||||
p_twai_obj->tx_msg_count++;
|
||||
ret = ESP_OK;
|
||||
|
@ -42,8 +42,7 @@ uint16_t esp_efuse_rtc_calib_get_init_code(int version, int atten)
|
||||
assert(init_code_size == 10);
|
||||
|
||||
uint32_t init_code = 0;
|
||||
esp_err_t err = esp_efuse_read_field_blob(init_code_efuse, &init_code, init_code_size);
|
||||
assert(err == ESP_OK);
|
||||
ESP_ERROR_CHECK(esp_efuse_read_field_blob(init_code_efuse, &init_code, init_code_size));
|
||||
return init_code + 1000; // version 1 logic
|
||||
}
|
||||
|
||||
@ -71,12 +70,10 @@ esp_err_t esp_efuse_rtc_calib_get_cal_voltage(int version, int atten, uint32_t*
|
||||
calib_vol_expected_mv = 1370;
|
||||
}
|
||||
|
||||
int cal_vol_size = esp_efuse_get_field_size(cal_vol_efuse);
|
||||
assert(cal_vol_size == 10);
|
||||
assert(cal_vol_efuse[0]->bit_count == 10);
|
||||
|
||||
uint32_t cal_vol = 0;
|
||||
esp_err_t err = esp_efuse_read_field_blob(cal_vol_efuse, &cal_vol, cal_vol_size) & 0x3FF;
|
||||
assert(err == ESP_OK);
|
||||
ESP_ERROR_CHECK(esp_efuse_read_field_blob(cal_vol_efuse, &cal_vol, cal_vol_efuse[0]->bit_count));
|
||||
|
||||
*out_digi = 2000 + ((cal_vol & BIT(9))? -(cal_vol & ~BIT9): cal_vol);
|
||||
*out_vol_mv = calib_vol_expected_mv;
|
||||
@ -94,6 +91,7 @@ float esp_efuse_rtc_calib_get_cal_temp(int version)
|
||||
uint32_t cal_temp = 0;
|
||||
esp_err_t err = esp_efuse_read_field_blob(cal_temp_efuse, &cal_temp, cal_temp_size);
|
||||
assert(err == ESP_OK);
|
||||
(void)err;
|
||||
// BIT(8) stands for sign: 1: negtive, 0: positive
|
||||
return ((cal_temp & BIT(8)) != 0)? -(uint8_t)cal_temp: (uint8_t)cal_temp;
|
||||
}
|
||||
|
@ -228,8 +228,7 @@ esp_err_t esp_efuse_utility_write_reg(esp_efuse_block_t efuse_block, unsigned in
|
||||
uint32_t esp_efuse_utility_read_reg(esp_efuse_block_t blk, unsigned int num_reg)
|
||||
{
|
||||
assert(blk >= 0 && blk < EFUSE_BLK_MAX);
|
||||
unsigned int max_num_reg = (range_read_addr_blocks[blk].end - range_read_addr_blocks[blk].start) / sizeof(uint32_t);
|
||||
assert(num_reg <= max_num_reg);
|
||||
assert(num_reg <= (range_read_addr_blocks[blk].end - range_read_addr_blocks[blk].start) / sizeof(uint32_t));
|
||||
uint32_t value;
|
||||
#ifdef CONFIG_EFUSE_VIRTUAL
|
||||
value = virt_blocks[blk][num_reg];
|
||||
@ -245,8 +244,8 @@ uint32_t esp_efuse_utility_read_reg(esp_efuse_block_t blk, unsigned int num_reg)
|
||||
static void write_reg(esp_efuse_block_t blk, unsigned int num_reg, uint32_t value)
|
||||
{
|
||||
assert(blk >= 0 && blk < EFUSE_BLK_MAX);
|
||||
unsigned int max_num_reg = (range_read_addr_blocks[blk].end - range_read_addr_blocks[blk].start) / sizeof(uint32_t);
|
||||
assert(num_reg <= max_num_reg);
|
||||
assert(num_reg <= (range_read_addr_blocks[blk].end - range_read_addr_blocks[blk].start) / sizeof(uint32_t));
|
||||
|
||||
uint32_t addr_wr_reg = range_write_addr_blocks[blk].start + num_reg * 4;
|
||||
uint32_t reg_to_write = REG_READ(addr_wr_reg) | value;
|
||||
// The register can be written in parts so we combine the new value with the one already available.
|
||||
|
@ -84,7 +84,7 @@ void esp_crosscore_int_init(void) {
|
||||
portENTER_CRITICAL(&reason_spinlock);
|
||||
reason[xPortGetCoreID()]=0;
|
||||
portEXIT_CRITICAL(&reason_spinlock);
|
||||
esp_err_t err;
|
||||
esp_err_t err __attribute__((unused));
|
||||
if (xPortGetCoreID()==0) {
|
||||
err = esp_intr_alloc(ETS_FROM_CPU_INTR0_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void*)&reason[0], NULL);
|
||||
} else {
|
||||
|
@ -173,6 +173,7 @@ void esp_dport_access_int_init(void)
|
||||
#ifndef CONFIG_FREERTOS_UNICORE
|
||||
portBASE_TYPE res = xTaskCreatePinnedToCore(&dport_access_init_core, "dport", configMINIMAL_STACK_SIZE, NULL, 5, NULL, xPortGetCoreID());
|
||||
assert(res == pdTRUE);
|
||||
(void)res;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ static inline int rangeblock_idx_valid(int rangeblock_idx)
|
||||
|
||||
static void set_bank(int virt_bank, int phys_bank, int ct)
|
||||
{
|
||||
int r;
|
||||
int r __attribute__((unused));
|
||||
r = cache_sram_mmu_set( 0, 0, SOC_EXTRAM_DATA_LOW + CACHE_BLOCKSIZE * virt_bank, phys_bank * CACHE_BLOCKSIZE, 32, ct );
|
||||
assert(r == 0);
|
||||
r = cache_sram_mmu_set( 1, 0, SOC_EXTRAM_DATA_LOW + CACHE_BLOCKSIZE * virt_bank, phys_bank * CACHE_BLOCKSIZE, 32, ct );
|
||||
|
@ -66,8 +66,7 @@ void esp_crosscore_int_init(void)
|
||||
portENTER_CRITICAL(&reason_spinlock);
|
||||
reason[cpu_hal_get_core_id()] = 0;
|
||||
portEXIT_CRITICAL(&reason_spinlock);
|
||||
esp_err_t err = esp_intr_alloc(ETS_FROM_CPU_INTR0_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void *)&reason[0], NULL);
|
||||
assert(err == ESP_OK);
|
||||
ESP_ERROR_CHECK(esp_intr_alloc(ETS_FROM_CPU_INTR0_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void *)&reason[0], NULL));
|
||||
}
|
||||
|
||||
static void IRAM_ATTR esp_crosscore_int_send(int core_id, uint32_t reason_mask)
|
||||
|
@ -73,13 +73,11 @@ void esp_crosscore_int_init(void)
|
||||
portENTER_CRITICAL(&reason_spinlock);
|
||||
reason[cpu_hal_get_core_id()] = 0;
|
||||
portEXIT_CRITICAL(&reason_spinlock);
|
||||
esp_err_t err;
|
||||
if (cpu_hal_get_core_id() == 0) {
|
||||
err = esp_intr_alloc(ETS_FROM_CPU_INTR0_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void *)&reason[0], NULL);
|
||||
ESP_ERROR_CHECK(esp_intr_alloc(ETS_FROM_CPU_INTR0_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void *)&reason[0], NULL));
|
||||
} else {
|
||||
err = esp_intr_alloc(ETS_FROM_CPU_INTR1_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void *)&reason[1], NULL);
|
||||
ESP_ERROR_CHECK(esp_intr_alloc(ETS_FROM_CPU_INTR1_SOURCE, ESP_INTR_FLAG_IRAM, esp_crosscore_isr, (void *)&reason[1], NULL));
|
||||
}
|
||||
assert(err == ESP_OK);
|
||||
}
|
||||
|
||||
static void IRAM_ATTR esp_crosscore_int_send(int core_id, uint32_t reason_mask)
|
||||
|
@ -177,7 +177,7 @@ esp_adc_cal_value_t esp_adc_cal_characterize(adc_unit_t adc_num,
|
||||
uint32_t default_vref,
|
||||
esp_adc_cal_characteristics_t *chars)
|
||||
{
|
||||
bool res;
|
||||
bool res __attribute__((unused));
|
||||
adc_calib_parsed_info efuse_parsed_data = {0};
|
||||
// Check parameters
|
||||
assert((adc_num == ADC_UNIT_1) || (adc_num == ADC_UNIT_2));
|
||||
|
@ -193,8 +193,7 @@ static void set_ocode_by_efuse(int calib_version)
|
||||
assert(calib_version == 1);
|
||||
// use efuse ocode.
|
||||
uint32_t ocode;
|
||||
esp_err_t err = esp_efuse_read_field_blob(ESP_EFUSE_OCODE, &ocode, 8);
|
||||
assert(err == ESP_OK);
|
||||
ESP_ERROR_CHECK(esp_efuse_read_field_blob(ESP_EFUSE_OCODE, &ocode, 8));
|
||||
REGI2C_WRITE_MASK(I2C_ULP, I2C_ULP_EXT_CODE, ocode);
|
||||
REGI2C_WRITE_MASK(I2C_ULP, I2C_ULP_IR_FORCE_CODE, 1);
|
||||
}
|
||||
|
@ -95,6 +95,7 @@ static void esp_ipc_init(void)
|
||||
portBASE_TYPE res = xTaskCreatePinnedToCore(ipc_task, task_name, CONFIG_ESP_IPC_TASK_STACK_SIZE, (void*) i,
|
||||
configMAX_PRIORITIES - 1, &s_ipc_task_handle[i], i);
|
||||
assert(res == pdTRUE);
|
||||
(void)res;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -288,7 +288,7 @@ esp_err_t esp_pm_configure(const void* vconfig)
|
||||
|
||||
portENTER_CRITICAL(&s_switch_lock);
|
||||
|
||||
bool res = false;
|
||||
bool res __attribute__((unused));
|
||||
res = rtc_clk_cpu_freq_mhz_to_config(max_freq_mhz, &s_cpu_freq_by_mode[PM_MODE_CPU_MAX]);
|
||||
assert(res);
|
||||
res = rtc_clk_cpu_freq_mhz_to_config(apb_max_freq, &s_cpu_freq_by_mode[PM_MODE_APB_MAX]);
|
||||
|
@ -192,7 +192,9 @@ static void select_rtc_slow_clk(slow_clk_sel_t slow_clk)
|
||||
esp_rom_uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
|
||||
}
|
||||
|
||||
rtc_clk_cpu_freq_set_config(&new_config);
|
||||
if (res) {
|
||||
rtc_clk_cpu_freq_set_config(&new_config);
|
||||
}
|
||||
|
||||
// Re calculate the ccount to make time calculation correct.
|
||||
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
|
||||
|
@ -133,7 +133,9 @@ static const char *TAG = "clk";
|
||||
// when switching APB frequency
|
||||
esp_rom_uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
|
||||
|
||||
rtc_clk_cpu_freq_set_config(&new_config);
|
||||
if (res) {
|
||||
rtc_clk_cpu_freq_set_config(&new_config);
|
||||
}
|
||||
|
||||
// Re calculate the ccount to make time calculation correct.
|
||||
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
|
||||
|
@ -136,7 +136,9 @@ static void select_rtc_slow_clk(slow_clk_sel_t slow_clk);
|
||||
esp_rom_uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
|
||||
}
|
||||
|
||||
rtc_clk_cpu_freq_set_config(&new_config);
|
||||
if (res) {
|
||||
rtc_clk_cpu_freq_set_config(&new_config);
|
||||
}
|
||||
|
||||
// Re calculate the ccount to make time calculation correct.
|
||||
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
|
||||
|
@ -131,7 +131,9 @@ static void select_rtc_slow_clk(slow_clk_sel_t slow_clk);
|
||||
esp_rom_uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
|
||||
}
|
||||
|
||||
rtc_clk_cpu_freq_set_config(&new_config);
|
||||
if (res) {
|
||||
rtc_clk_cpu_freq_set_config(&new_config);
|
||||
}
|
||||
|
||||
// Re calculate the ccount to make time calculation correct.
|
||||
cpu_hal_set_cycle_count( (uint64_t)cpu_hal_get_cycle_count() * new_freq_mhz / old_freq_mhz );
|
||||
|
@ -927,7 +927,7 @@ touch_pad_t esp_sleep_get_touchpad_wakeup_status(void)
|
||||
touch_pad_t pad_num;
|
||||
esp_err_t ret = touch_pad_get_wakeup_status(&pad_num); //TODO 723diff commit id:fda9ada1b
|
||||
assert(ret == ESP_OK && "wakeup reason is RTC_TOUCH_TRIG_EN but SENS_TOUCH_MEAS_EN is zero");
|
||||
return pad_num;
|
||||
return (ret == ESP_OK) ? pad_num : TOUCH_PAD_MAX;
|
||||
}
|
||||
|
||||
#endif // SOC_TOUCH_SENSOR_NUM > 0
|
||||
@ -1097,7 +1097,7 @@ esp_err_t esp_deep_sleep_enable_gpio_wakeup(uint64_t gpio_pin_mask, esp_deepslee
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
err = gpio_deep_sleep_wakeup_enable(gpio_idx, intr_type);
|
||||
|
||||
|
||||
s_config.gpio_wakeup_mask |= BIT(gpio_idx);
|
||||
if (mode == ESP_GPIO_WAKEUP_GPIO_HIGH) {
|
||||
s_config.gpio_trigger_mode |= (mode << gpio_idx);
|
||||
|
@ -284,7 +284,7 @@ static void do_core_init(void)
|
||||
esp_secure_boot_init_checks();
|
||||
#endif
|
||||
|
||||
esp_err_t err;
|
||||
esp_err_t err __attribute__((unused));
|
||||
|
||||
#if CONFIG_SECURE_DISABLE_ROM_DL_MODE
|
||||
err = esp_efuse_disable_rom_download_mode();
|
||||
@ -323,6 +323,7 @@ static void do_core_init(void)
|
||||
esp_flash_app_init();
|
||||
esp_err_t flash_ret = esp_flash_init_default_chip();
|
||||
assert(flash_ret == ESP_OK);
|
||||
(void)flash_ret;
|
||||
}
|
||||
|
||||
static void do_secondary_init(void)
|
||||
|
@ -83,6 +83,7 @@ void esp_startup_start_app_common(void)
|
||||
ESP_TASK_MAIN_STACK, NULL,
|
||||
ESP_TASK_MAIN_PRIO, NULL, 0);
|
||||
assert(res == pdTRUE);
|
||||
(void)res;
|
||||
}
|
||||
|
||||
static void main_task(void* args)
|
||||
|
@ -91,14 +91,15 @@
|
||||
#include <stdlib.h> /* for abort() */
|
||||
#include "esp32c3/rom/ets_sys.h"
|
||||
|
||||
#if defined(CONFIG_FREERTOS_ASSERT_DISABLE)
|
||||
#define configASSERT(a) /* assertions disabled */
|
||||
#elif defined(CONFIG_FREERTOS_ASSERT_FAIL_PRINT_CONTINUE)
|
||||
// If CONFIG_FREERTOS_ASSERT_DISABLE is set then configASSERT is defined empty later in FreeRTOS.h and the macro
|
||||
// configASSERT_DEFINED remains unset (meaning some warnings are avoided)
|
||||
|
||||
#if defined(CONFIG_FREERTOS_ASSERT_FAIL_PRINT_CONTINUE)
|
||||
#define configASSERT(a) if (unlikely(!(a))) { \
|
||||
esp_rom_printf("%s:%d (%s)- assert failed!\n", __FILE__, __LINE__, \
|
||||
__FUNCTION__); \
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_ASSERT_FAIL_ABORT */
|
||||
#elif defined(CONFIG_FREERTOS_ASSERT_FAIL_ABORT)
|
||||
#define configASSERT(a) if (unlikely(!(a))) { \
|
||||
esp_rom_printf("%s:%d (%s)- assert failed!\n", __FILE__, __LINE__, \
|
||||
__FUNCTION__); \
|
||||
|
@ -148,8 +148,7 @@ void vPortExitCritical(void)
|
||||
void vPortSetupTimer(void)
|
||||
{
|
||||
/* set system timer interrupt vector */
|
||||
esp_err_t err = esp_intr_alloc(ETS_SYSTIMER_TARGET0_EDGE_INTR_SOURCE, ESP_INTR_FLAG_IRAM, vPortSysTickHandler, NULL, NULL);
|
||||
assert(err == ESP_OK);
|
||||
ESP_ERROR_CHECK(esp_intr_alloc(ETS_SYSTIMER_TARGET0_EDGE_INTR_SOURCE, ESP_INTR_FLAG_IRAM, vPortSysTickHandler, NULL, NULL));
|
||||
|
||||
/* configure the timer */
|
||||
systimer_hal_init();
|
||||
|
@ -131,14 +131,15 @@ int xt_clock_freq(void) __attribute__((deprecated));
|
||||
#include "esp32c3/rom/ets_sys.h"
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_FREERTOS_ASSERT_DISABLE)
|
||||
#define configASSERT(a) /* assertions disabled */
|
||||
#elif defined(CONFIG_FREERTOS_ASSERT_FAIL_PRINT_CONTINUE)
|
||||
#define configASSERT(a) if (unlikely(!(a))) { \
|
||||
// If CONFIG_FREERTOS_ASSERT_DISABLE is set then configASSERT is defined empty later in FreeRTOS.h and the macro
|
||||
// configASSERT_DEFINED remains unset (meaning some warnings are avoided)
|
||||
|
||||
#if defined(CONFIG_FREERTOS_ASSERT_FAIL_PRINT_CONTINUE)
|
||||
#define configASSERT(a) if (unlikely(!(a))) { \
|
||||
esp_rom_printf("%s:%d (%s)- assert failed!\n", __FILE__, __LINE__, \
|
||||
__FUNCTION__); \
|
||||
}
|
||||
#else /* CONFIG_FREERTOS_ASSERT_FAIL_ABORT */
|
||||
#elif defined(CONFIG_FREERTOS_ASSERT_FAIL_ABORT)
|
||||
#define configASSERT(a) if (unlikely(!(a))) { \
|
||||
esp_rom_printf("%s:%d (%s)- assert failed!\n", __FILE__, __LINE__, \
|
||||
__FUNCTION__); \
|
||||
|
@ -1935,6 +1935,7 @@ static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode
|
||||
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||
|
||||
configASSERT( suspended == 0 );
|
||||
(void)suspended;
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
else
|
||||
|
@ -141,8 +141,10 @@ void usbh_hal_init(usbh_hal_context_t *hal)
|
||||
{
|
||||
//Check if a peripheral is alive by reading the core ID registers
|
||||
usbh_dev_t *dev = &USBH;
|
||||
#ifndef NDEBUG
|
||||
uint32_t core_id = usb_ll_get_controller_core_id(dev);
|
||||
assert(core_id == CORE_REG_GSNPSID);
|
||||
#endif
|
||||
//Initialize HAL context
|
||||
memset(hal, 0, sizeof(usbh_hal_context_t));
|
||||
hal->dev = dev;
|
||||
|
@ -178,7 +178,7 @@ void sdio_slave_hal_hw_init(sdio_slave_context_t *hal)
|
||||
static esp_err_t init_send_queue(sdio_slave_context_t *hal)
|
||||
{
|
||||
esp_err_t ret;
|
||||
esp_err_t rcv_res;
|
||||
esp_err_t rcv_res __attribute((unused));
|
||||
sdio_ringbuf_t *buf = &(hal->send_desc_queue);
|
||||
|
||||
//initialize pointers
|
||||
|
@ -42,7 +42,7 @@ static esp_alloc_failed_hook_t alloc_failed_callback;
|
||||
IRAM_ATTR static void *dram_alloc_to_iram_addr(void *addr, size_t len)
|
||||
{
|
||||
uintptr_t dstart = (uintptr_t)addr; //First word
|
||||
uintptr_t dend = dstart + len - 4; //Last word
|
||||
uintptr_t dend __attribute__((unused)) = dstart + len - 4; //Last word
|
||||
assert(esp_ptr_in_diram_dram((void *)dstart));
|
||||
assert(esp_ptr_in_diram_dram((void *)dend));
|
||||
assert((dstart & 3) == 0);
|
||||
|
@ -173,3 +173,8 @@ set_source_files_properties(
|
||||
PROPERTIES COMPILE_FLAGS
|
||||
-DRANDOMBYTES_DEFAULT_IMPLEMENTATION
|
||||
)
|
||||
|
||||
if(CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE)
|
||||
# some libsodium variables are only used for asserts
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE -Wno-unused-but-set-variable)
|
||||
endif()
|
||||
|
@ -82,6 +82,7 @@ sys_mutex_lock(sys_mutex_t *pxMutex)
|
||||
BaseType_t ret = xSemaphoreTake(*pxMutex, portMAX_DELAY);
|
||||
|
||||
LWIP_ASSERT("failed to take the mutex", ret == pdTRUE);
|
||||
(void)ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -95,6 +96,7 @@ sys_mutex_unlock(sys_mutex_t *pxMutex)
|
||||
BaseType_t ret = xSemaphoreGive(*pxMutex);
|
||||
|
||||
LWIP_ASSERT("failed to give the mutex", ret == pdTRUE);
|
||||
(void)ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -134,6 +136,7 @@ sys_sem_new(sys_sem_t *sem, u8_t count)
|
||||
if (count == 1) {
|
||||
BaseType_t ret = xSemaphoreGive(*sem);
|
||||
LWIP_ASSERT("sys_sem_new: initial give failed", ret == pdTRUE);
|
||||
(void)ret;
|
||||
}
|
||||
|
||||
return ERR_OK;
|
||||
@ -151,6 +154,7 @@ sys_sem_signal(sys_sem_t *sem)
|
||||
/* queue full is OK, this is a signal only... */
|
||||
LWIP_ASSERT("sys_sem_signal: sane return value",
|
||||
(ret == pdTRUE) || (ret == errQUEUE_FULL));
|
||||
(void)ret;
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------------------------*/
|
||||
@ -247,6 +251,7 @@ sys_mbox_post(sys_mbox_t *mbox, void *msg)
|
||||
{
|
||||
BaseType_t ret = xQueueSendToBack((*mbox)->os_mbox, &msg, portMAX_DELAY);
|
||||
LWIP_ASSERT("mbox post failed", ret == pdTRUE);
|
||||
(void)ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -386,6 +391,8 @@ sys_mbox_free(sys_mbox_t *mbox)
|
||||
vQueueDelete((*mbox)->os_mbox);
|
||||
free(*mbox);
|
||||
*mbox = NULL;
|
||||
|
||||
(void)msgs_waiting;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -45,7 +45,7 @@ void esp_sha(esp_sha_type sha_type, const unsigned char *input, size_t ilen, uns
|
||||
#endif
|
||||
} ctx;
|
||||
|
||||
int ret;
|
||||
int ret __attribute__((unused));
|
||||
assert(input != NULL && output != NULL);
|
||||
|
||||
#if SOC_SHA_SUPPORT_SHA1
|
||||
|
@ -455,10 +455,9 @@ esp_err_t Storage::readMultiPageBlob(uint8_t nsIndex, const char* key, void* dat
|
||||
|
||||
uint8_t chunkCount = item.blobIndex.chunkCount;
|
||||
VerOffset chunkStart = item.blobIndex.chunkStart;
|
||||
size_t readSize = item.blobIndex.dataSize;
|
||||
size_t offset = 0;
|
||||
|
||||
assert(dataSize == readSize);
|
||||
assert(dataSize == item.blobIndex.dataSize);
|
||||
|
||||
/* Now read corresponding chunks */
|
||||
for (uint8_t chunkNum = 0; chunkNum < chunkCount; chunkNum++) {
|
||||
|
@ -1,2 +1,7 @@
|
||||
idf_component_register(SRCS "protobuf-c/protobuf-c/protobuf-c.c"
|
||||
INCLUDE_DIRS protobuf-c)
|
||||
|
||||
if(CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE)
|
||||
# some variables are only used by asserts
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE -Wno-unused-but-set-variable)
|
||||
endif()
|
||||
|
@ -152,8 +152,8 @@ void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu(void)
|
||||
// Signal to the spi_flash_op_block_task on the other CPU that we need it to
|
||||
// disable cache there and block other tasks from executing.
|
||||
s_flash_op_can_start = false;
|
||||
esp_err_t ret = esp_ipc_call(other_cpuid, &spi_flash_op_block_func, (void *) other_cpuid);
|
||||
assert(ret == ESP_OK);
|
||||
ESP_ERROR_CHECK(esp_ipc_call(other_cpuid, &spi_flash_op_block_func, (void *) other_cpuid));
|
||||
|
||||
while (!s_flash_op_can_start) {
|
||||
// Busy loop and wait for spi_flash_op_block_func to disable cache
|
||||
// on the other CPU
|
||||
|
@ -177,6 +177,7 @@ void wpa_sta_connect(uint8_t *bssid)
|
||||
wpa_config_profile();
|
||||
ret = wpa_config_bss(bssid);
|
||||
WPA_ASSERT(ret == 0);
|
||||
(void)ret;
|
||||
}
|
||||
|
||||
int wpa_parse_wpa_ie_wrapper(const u8 *wpa_ie, size_t wpa_ie_len, wifi_wpa_ie_t *data)
|
||||
|
Loading…
Reference in New Issue
Block a user