mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
fix(system): fixed warnings related to ununsed var if asserts disabled
This commit is contained in:
parent
ff8265b6b3
commit
0d140f38ea
4
Kconfig
4
Kconfig
@ -389,11 +389,11 @@ mainmenu "Espressif IoT Development Framework Configuration"
|
||||
endchoice # assertions
|
||||
|
||||
config COMPILER_ASSERT_NDEBUG_EVALUATE
|
||||
bool "Enable to evaluate the expression inside assert(X) when NDEBUG is set"
|
||||
bool "Enable the evaluation of the expression inside assert(X) when NDEBUG is set"
|
||||
default y
|
||||
help
|
||||
When NDEBUG is set, assert(X) will not cause code to trigger an assertion.
|
||||
With this option set assert(X) will still evaluate the expression X, though
|
||||
With this option set, assert(X) will still evaluate the expression X, though
|
||||
the result will never cause an assertion. This means that if X is a function
|
||||
then the function will be called.
|
||||
|
||||
|
@ -323,7 +323,8 @@ static esp_err_t bootloader_flash_read_allow_decrypt(size_t src_addr, void *dest
|
||||
ESP_EARLY_LOGD(TAG, "mmu set block paddr=0x%08" PRIx32 " (was 0x%08" PRIx32 ")", map_at, current_read_mapping);
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
//Should never fail if we only map a SPI_FLASH_MMU_PAGE_SIZE to the vaddr starting from FLASH_READ_VADDR
|
||||
int e = cache_flash_mmu_set(0, 0, FLASH_READ_VADDR, map_at, 64, 1);
|
||||
// Return value unused if asserts are disabled
|
||||
int e __attribute__((unused)) = cache_flash_mmu_set(0, 0, FLASH_READ_VADDR, map_at, 64, 1);
|
||||
assert(e == 0);
|
||||
#else
|
||||
uint32_t actual_mapped_len = 0;
|
||||
|
@ -112,7 +112,8 @@ esp_err_t esp_dma_capable_malloc(size_t size, const esp_dma_mem_info_t *dma_mem_
|
||||
heap_caps &= ~MALLOC_CAP_DMA;
|
||||
}
|
||||
|
||||
esp_err_t ret = esp_cache_get_alignment(cache_flags, &cache_alignment_bytes);
|
||||
// Return value unused if asserts are disabled
|
||||
esp_err_t __attribute((unused)) ret = esp_cache_get_alignment(cache_flags, &cache_alignment_bytes);
|
||||
assert(ret == ESP_OK);
|
||||
|
||||
//Get the least common multiple of two alignment
|
||||
@ -198,7 +199,8 @@ bool esp_dma_is_buffer_alignment_satisfied(const void *ptr, size_t size, esp_dma
|
||||
if (esp_ptr_external_ram(ptr)) {
|
||||
cache_flags |= MALLOC_CAP_SPIRAM;
|
||||
}
|
||||
esp_err_t ret = esp_cache_get_alignment(cache_flags, &cache_alignment_bytes);
|
||||
// Return value unused if asserts are disabled
|
||||
esp_err_t __attribute__((unused)) ret = esp_cache_get_alignment(cache_flags, &cache_alignment_bytes);
|
||||
assert(ret == ESP_OK);
|
||||
|
||||
//Get the least common multiple of two alignment
|
||||
|
@ -69,13 +69,15 @@ static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t
|
||||
* function reenables interrupts once the spinlock is acquired). For critical
|
||||
* sections, use the interface provided by the operating system.
|
||||
* @param lock - target spinlock object
|
||||
* @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
|
||||
* @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocks indefinitely
|
||||
*/
|
||||
static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *lock, int32_t timeout)
|
||||
{
|
||||
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE && !BOOTLOADER_BUILD
|
||||
uint32_t irq_status;
|
||||
uint32_t core_owner_id, other_core_owner_id;
|
||||
uint32_t core_owner_id;
|
||||
// Unused if asserts are disabled
|
||||
uint32_t __attribute__((unused)) other_core_owner_id;
|
||||
bool lock_set;
|
||||
esp_cpu_cycle_count_t start_count;
|
||||
|
||||
@ -173,7 +175,8 @@ static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *l
|
||||
{
|
||||
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE && !BOOTLOADER_BUILD
|
||||
uint32_t irq_status;
|
||||
uint32_t core_owner_id;
|
||||
// Return value unused if asserts are disabled
|
||||
uint32_t __attribute__((unused)) core_owner_id;
|
||||
|
||||
assert(lock);
|
||||
#if __XTENSA__
|
||||
|
@ -50,7 +50,8 @@ static void s_c2m_ops(uint32_t vaddr, size_t size)
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
bool valid = false;
|
||||
// Value unused if asserts are disabled
|
||||
bool __attribute__((unused)) valid = false;
|
||||
esp_os_enter_critical_safe(&s_spinlock);
|
||||
valid = cache_hal_writeback_addr(vaddr, size);
|
||||
esp_os_exit_critical_safe(&s_spinlock);
|
||||
|
@ -931,7 +931,9 @@ void esp_pm_impl_init(void)
|
||||
uart_ll_set_sclk(UART_LL_GET_HW(CONFIG_ESP_CONSOLE_UART_NUM), (soc_module_clk_t)clk_source);
|
||||
}
|
||||
uint32_t sclk_freq;
|
||||
esp_err_t err = esp_clk_tree_src_get_freq_hz((soc_module_clk_t)clk_source, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &sclk_freq);
|
||||
|
||||
// Return value unused if asserts are disabled
|
||||
esp_err_t __attribute__((unused)) err = esp_clk_tree_src_get_freq_hz((soc_module_clk_t)clk_source, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &sclk_freq);
|
||||
assert(err == ESP_OK);
|
||||
HP_UART_SRC_CLK_ATOMIC() {
|
||||
uart_ll_set_baudrate(UART_LL_GET_HW(CONFIG_ESP_CONSOLE_UART_NUM), CONFIG_ESP_CONSOLE_UART_BAUDRATE, sclk_freq);
|
||||
|
@ -1397,7 +1397,8 @@ err:
|
||||
|
||||
void vRingbufferDeleteWithCaps(RingbufHandle_t xRingbuffer)
|
||||
{
|
||||
BaseType_t xResult;
|
||||
// Return value unused if asserts are disabled
|
||||
BaseType_t __attribute__((unused)) xResult;
|
||||
StaticRingbuffer_t *pxStaticRingbuffer = NULL;
|
||||
uint8_t *pucRingbufferStorage = NULL;
|
||||
|
||||
|
@ -26,7 +26,8 @@ static int esp_dbg_stubs_advertise_table(void *stub_table_addr)
|
||||
void esp_dbg_stubs_ll_init(void *stub_table_addr)
|
||||
{
|
||||
// notify host about control block address
|
||||
int res = esp_dbg_stubs_advertise_table(stub_table_addr);
|
||||
// Value unused if asserts are disabled
|
||||
int __attribute__((unused)) res = esp_dbg_stubs_advertise_table(stub_table_addr);
|
||||
assert(res == 0 && "Failed to send debug stubs table address to host!");
|
||||
ESP_LOGV(TAG, "%s stubs %p", __func__, stub_table_addr);
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ static UBaseType_t get_task_affinity(const TaskHandle_t xTask)
|
||||
void task_wdt_timeout_abort(bool current_core)
|
||||
{
|
||||
TaskSnapshot_t snapshot = { 0 };
|
||||
BaseType_t ret = pdTRUE;
|
||||
BaseType_t __attribute__((unused)) ret = pdTRUE;
|
||||
|
||||
ESP_EARLY_LOGE(TAG, "Aborting.");
|
||||
esp_reset_reason_set_hint(ESP_RST_TASK_WDT);
|
||||
|
@ -115,6 +115,7 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
||||
* event group structure. */
|
||||
volatile size_t xSize = sizeof( StaticEventGroup_t );
|
||||
configASSERT( xSize == sizeof( EventGroup_t ) );
|
||||
( void ) xSize; /* Prevent unused variable warning when configASSERT() is not used. */
|
||||
} /*lint !e529 xSize is referenced if configASSERT() is defined. */
|
||||
#endif /* configASSERT_DEFINED */
|
||||
|
||||
|
@ -455,6 +455,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
||||
* message buffer structure. */
|
||||
volatile size_t xSize = sizeof( StaticStreamBuffer_t );
|
||||
configASSERT( xSize == sizeof( StreamBuffer_t ) );
|
||||
( void ) xSize; /* Prevent unused variable warning when configASSERT() is not used. */
|
||||
} /*lint !e529 xSize is referenced is configASSERT() is defined. */
|
||||
#endif /* configASSERT_DEFINED */
|
||||
|
||||
@ -1420,6 +1421,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
|
||||
* result in confusion as to what is actually being observed. */
|
||||
const BaseType_t xWriteValue = 0x55;
|
||||
configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer );
|
||||
(void)xWriteValue;
|
||||
} /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */
|
||||
#endif
|
||||
|
||||
|
@ -164,7 +164,8 @@ err:
|
||||
portYIELD_WITHIN_API();
|
||||
}
|
||||
|
||||
BaseType_t xResult;
|
||||
// Return value unused if asserts are disabled
|
||||
BaseType_t __attribute__((unused)) xResult;
|
||||
StaticTask_t * pxTaskBuffer;
|
||||
StackType_t * puxStackBuffer;
|
||||
|
||||
@ -187,7 +188,8 @@ err:
|
||||
* from another task's context. */
|
||||
configASSERT( eRunning != eTaskGetState( xTaskToDelete ) );
|
||||
|
||||
BaseType_t xResult;
|
||||
// Return value unused if asserts are disabled
|
||||
BaseType_t __attribute__((unused)) xResult;
|
||||
StaticTask_t * pxTaskBuffer;
|
||||
StackType_t * puxStackBuffer;
|
||||
|
||||
@ -260,7 +262,8 @@ err:
|
||||
|
||||
void vQueueDeleteWithCaps( QueueHandle_t xQueue )
|
||||
{
|
||||
BaseType_t xResult;
|
||||
// Return value unused if asserts are disabled
|
||||
BaseType_t __attribute__((unused)) xResult;
|
||||
StaticQueue_t * pxQueueBuffer;
|
||||
uint8_t * pucQueueStorageBuffer;
|
||||
|
||||
@ -332,7 +335,8 @@ err:
|
||||
|
||||
void vSemaphoreDeleteWithCaps( SemaphoreHandle_t xSemaphore )
|
||||
{
|
||||
BaseType_t xResult;
|
||||
// Return value unused if asserts are disabled
|
||||
BaseType_t __attribute__((unused)) xResult;
|
||||
StaticSemaphore_t * pxSemaphoreBuffer;
|
||||
|
||||
/* Retrieve the buffer used to create the semaphore before deleting it
|
||||
@ -404,7 +408,8 @@ err:
|
||||
void vStreamBufferGenericDeleteWithCaps( StreamBufferHandle_t xStreamBuffer,
|
||||
BaseType_t xIsMessageBuffer )
|
||||
{
|
||||
BaseType_t xResult;
|
||||
// Return value unused if asserts are disabled
|
||||
BaseType_t __attribute__((unused)) xResult;
|
||||
StaticStreamBuffer_t * pxStaticStreamBuffer;
|
||||
uint8_t * pucStreamBufferStorageArea;
|
||||
|
||||
|
@ -59,7 +59,8 @@
|
||||
|
||||
void vEventGroupDeleteWithCaps( EventGroupHandle_t xEventGroup )
|
||||
{
|
||||
BaseType_t xResult;
|
||||
// Return value unused if asserts are disabled
|
||||
BaseType_t __attribute__((unused)) xResult;
|
||||
StaticEventGroup_t * pxEventGroupBuffer;
|
||||
|
||||
/* Retrieve the buffer used to create the event group before deleting it
|
||||
|
Loading…
Reference in New Issue
Block a user