diff --git a/components/cxx/CMakeLists.txt b/components/cxx/CMakeLists.txt index a6e61100fc..228414599c 100644 --- a/components/cxx/CMakeLists.txt +++ b/components/cxx/CMakeLists.txt @@ -12,7 +12,6 @@ idf_component_register(SRCS "cxx_exception_stubs.cpp" if(NOT CONFIG_CXX_EXCEPTIONS) set(WRAP_FUNCTIONS - _Unwind_SetEnableExceptionFdeSorting __register_frame_info_bases __register_frame_info __register_frame diff --git a/components/cxx/cxx_exception_stubs.cpp b/components/cxx/cxx_exception_stubs.cpp index 168d9dae2c..827d40dd81 100644 --- a/components/cxx/cxx_exception_stubs.cpp +++ b/components/cxx/cxx_exception_stubs.cpp @@ -35,11 +35,6 @@ static T abort_return() } // unwind-dw2-fde.o -extern "C" void __wrap__Unwind_SetEnableExceptionFdeSorting(unsigned char enable) -{ - abort(); -} - extern "C" void __wrap___register_frame_info_bases(const void *begin, struct object *ob, void *tbase, void *dbase) { abort(); diff --git a/components/cxx/cxx_init.cpp b/components/cxx/cxx_init.cpp index 1819e5b7b2..bfff600583 100644 --- a/components/cxx/cxx_init.cpp +++ b/components/cxx/cxx_init.cpp @@ -12,18 +12,6 @@ namespace { const char *TAG = "C++ init"; } -#ifdef CONFIG_COMPILER_CXX_EXCEPTIONS -// workaround for C++ exception large memory allocation -extern "C" void _Unwind_SetEnableExceptionFdeSorting(unsigned char enable); - -ESP_SYSTEM_INIT_FN(init_cxx_exceptions, SECONDARY, BIT(0), 205) -{ - ESP_EARLY_LOGD(TAG, "Setting C++ exception workarounds."); - _Unwind_SetEnableExceptionFdeSorting(0); - return ESP_OK; -} -#endif // CONFIG_COMPILER_CXX_EXCEPTIONS - /** * This function overwrites the same function of libsupc++ (part of libstdc++). * Consequently, libsupc++ will then follow our configured exception emergency pool size. diff --git a/components/esp_system/system_init_fn.txt b/components/esp_system/system_init_fn.txt index a4f99518b0..b841acd726 100644 --- a/components/esp_system/system_init_fn.txt +++ b/components/esp_system/system_init_fn.txt @@ -90,7 +90,6 @@ SECONDARY: 140: init_dbg_stubs in components/app_trace/debug_stubs.c on BIT(0) SECONDARY: 201: init_pm in components/esp_system/startup_funcs.c on BIT(0) SECONDARY: 203: init_apb_dma in components/esp_system/startup_funcs.c on BIT(0) SECONDARY: 204: init_coexist in components/esp_system/startup_funcs.c on BIT(0) -SECONDARY: 205: init_cxx_exceptions in components/cxx/cxx_init.cpp on BIT(0) # usb_console needs to create an esp_timer at startup. # This can be done only after esp_timer initialization (esp_timer_init_os). diff --git a/components/newlib/CMakeLists.txt b/components/newlib/CMakeLists.txt index 1027332cb3..df29489bad 100644 --- a/components/newlib/CMakeLists.txt +++ b/components/newlib/CMakeLists.txt @@ -4,6 +4,11 @@ if(${target} STREQUAL "linux") return() # This component is not supported by the POSIX/Linux simulator endif() +set(include_dirs "platform_include") +if(CMAKE_C_COMPILER_ID MATCHES "Clang") # TODO LLVM-330 + list(APPEND include_dirs "clang_include") +endif() + if(BOOTLOADER_BUILD) # Bootloader builds need the platform_include directory (for assert.h), but nothing else idf_component_register(INCLUDE_DIRS platform_include) @@ -29,7 +34,10 @@ set(srcs "realpath.c" "scandir.c" ) -set(include_dirs platform_include) + +if(CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND) + list(APPEND srcs "port/xtensa/stdatomic_s32c1i.c") +endif() if(CONFIG_SPIRAM_CACHE_WORKAROUND) set(ldfragments "esp32-spiram-rom-functions-c.lf") @@ -49,6 +57,11 @@ target_link_libraries(${COMPONENT_LIB} INTERFACE c m ${CONFIG_COMPILER_RT_LIB_NA set_source_files_properties(heap.c PROPERTIES COMPILE_FLAGS -fno-builtin) +if(CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND) + set_source_files_properties("port/xtensa/stdatomic_s32c1i.c" + PROPERTIES COMPILE_FLAGS "-mno-disable-hardware-atomics") +endif() + # Forces the linker to include heap, syscall, pthread, assert, and retargetable locks from this component, # instead of the implementations provided by newlib. list(APPEND EXTRA_LINK_FLAGS "-u newlib_include_heap_impl") diff --git a/components/newlib/Kconfig b/components/newlib/Kconfig index 43f28bc953..84dddf9e43 100644 --- a/components/newlib/Kconfig +++ b/components/newlib/Kconfig @@ -113,3 +113,7 @@ menu "Newlib" endchoice endmenu # Newlib + +config STDATOMIC_S32C1I_SPIRAM_WORKAROUND + bool + default SPIRAM && (IDF_TARGET_ESP32 || IDF_TARGET_ESP32S3) && !IDF_TOOLCHAIN_CLANG # TODO IDF-9032 diff --git a/components/newlib/platform_include/sys/dirent.h b/components/newlib/clang_include/sys/dirent.h similarity index 100% rename from components/newlib/platform_include/sys/dirent.h rename to components/newlib/clang_include/sys/dirent.h diff --git a/components/newlib/newlib.lf b/components/newlib/newlib.lf index d92c04fb0f..19f0992d3a 100644 --- a/components/newlib/newlib.lf +++ b/components/newlib/newlib.lf @@ -5,3 +5,5 @@ entries: abort (noflash) assert (noflash) stdatomic (noflash) + if STDATOMIC_S32C1I_SPIRAM_WORKAROUND = y: + stdatomic_s32c1i (noflash) diff --git a/components/newlib/port/xtensa/stdatomic_s32c1i.c b/components/newlib/port/xtensa/stdatomic_s32c1i.c new file mode 100644 index 0000000000..f1affcdfc2 --- /dev/null +++ b/components/newlib/port/xtensa/stdatomic_s32c1i.c @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include "esp_stdatomic.h" + +#undef SYNC_OP_FUNCTIONS +#undef _ATOMIC_OP_FUNCTION +#undef ATOMIC_LOAD +#undef ATOMIC_CMP_EXCHANGE +#undef ATOMIC_STORE +#undef ATOMIC_EXCHANGE +#undef SYNC_BOOL_CMP_EXCHANGE +#undef SYNC_VAL_CMP_EXCHANGE +#undef SYNC_LOCK_TEST_AND_SET +#undef SYNC_LOCK_RELEASE + +#define SYNC_OP_FUNCTIONS(n, type, name) + +#define _ATOMIC_OP_FUNCTION(n, type, name_1, name_2, ret_var, operation, inverse) \ +type __atomic_s32c1i_ ##name_1 ##_ ##name_2 ##_ ##n (volatile void* ptr, type value, int memorder) \ +{ \ + return __atomic_ ##name_1 ##_ ##name_2 ##_ ##n (ptr, value, memorder); \ +} + +#define ATOMIC_LOAD(n, type) \ +type __atomic_s32c1i_load_ ## n (const volatile void* ptr, int memorder) \ +{ \ + return __atomic_load_ ## n (ptr, memorder); \ +} + +#define ATOMIC_CMP_EXCHANGE(n, type) \ +bool __atomic_s32c1i_compare_exchange_ ## n (volatile void* ptr, void* expected, type desired, bool weak, int success, int failure) \ +{ \ + return __atomic_compare_exchange_ ## n (ptr, expected, desired, weak, success, failure); \ +} + +#define ATOMIC_STORE(n, type) \ +void __atomic_s32c1i_store_ ## n (volatile void * ptr, type value, int memorder) \ +{ \ + __atomic_store_ ## n (ptr, value, memorder); \ +} + +#define ATOMIC_EXCHANGE(n, type) \ +type __atomic_s32c1i_exchange_ ## n (volatile void* ptr, type value, int memorder) \ +{ \ + return __atomic_exchange_ ## n (ptr, value, memorder); \ +} + +#define SYNC_BOOL_CMP_EXCHANGE(n, type) \ +bool __sync_s32c1i_bool_compare_and_swap_ ## n (volatile void* ptr, type expected, type desired) \ +{ \ + return __sync_bool_compare_and_swap_ ## n (ptr, expected, desired); \ +} + +#define SYNC_VAL_CMP_EXCHANGE(n, type) \ +type __sync_s32c1i_val_compare_and_swap_ ## n (volatile void* ptr, type expected, type desired) \ +{ \ + return __sync_val_compare_and_swap_ ## n (ptr, expected, desired); \ +} + +#define SYNC_LOCK_TEST_AND_SET(n, type) \ +type __sync_s32c1i_lock_test_and_set_ ## n (volatile void* ptr, type value) \ +{ \ + return __sync_lock_test_and_set_ ## n (ptr, value); \ +} + +#define SYNC_LOCK_RELEASE(n, type) \ +void __sync_s32c1i_lock_release_ ## n (volatile void* ptr) \ +{ \ + __sync_lock_release_ ## n (ptr); \ +} + +ATOMIC_FUNCTIONS(1, unsigned char) +ATOMIC_FUNCTIONS(2, short unsigned int) +ATOMIC_FUNCTIONS(4, unsigned int) diff --git a/components/newlib/priv_include/esp_stdatomic.h b/components/newlib/priv_include/esp_stdatomic.h new file mode 100644 index 0000000000..a988075a6f --- /dev/null +++ b/components/newlib/priv_include/esp_stdatomic.h @@ -0,0 +1,296 @@ +/* + * SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ +#include "freertos/FreeRTOS.h" +#include "soc/soc_caps.h" +#include "sdkconfig.h" + +#ifdef __XTENSA__ +#include "xtensa/config/core-isa.h" + +#ifndef XCHAL_HAVE_S32C1I +#error "XCHAL_HAVE_S32C1I not defined, include correct header!" +#endif // XCHAL_HAVE_S32C1I + +#ifndef CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND +#define CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND 0 +#endif + +#define HAS_ATOMICS_32 ((XCHAL_HAVE_S32C1I == 1) && !CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND) + +// no 64-bit atomics on Xtensa +#define HAS_ATOMICS_64 0 +#else // RISCV +// GCC toolchain will define this pre-processor if "A" extension is supported +#ifndef __riscv_atomic +#define __riscv_atomic 0 +#endif + +#define HAS_ATOMICS_32 (__riscv_atomic == 1) +#define HAS_ATOMICS_64 ((__riscv_atomic == 1) && (__riscv_xlen == 64)) +#endif // (__XTENSA__, __riscv) + +#if SOC_CPU_CORES_NUM == 1 +// Single core SoC: atomics can be implemented using portSET_INTERRUPT_MASK_FROM_ISR +// and portCLEAR_INTERRUPT_MASK_FROM_ISR, which disables and enables interrupts. +#if CONFIG_FREERTOS_SMP +#define _ATOMIC_ENTER_CRITICAL() unsigned int state = portDISABLE_INTERRUPTS(); +#define _ATOMIC_EXIT_CRITICAL() portRESTORE_INTERRUPTS(state) +#else // CONFIG_FREERTOS_SMP +#define _ATOMIC_ENTER_CRITICAL() unsigned int state = portSET_INTERRUPT_MASK_FROM_ISR() +#define _ATOMIC_EXIT_CRITICAL() portCLEAR_INTERRUPT_MASK_FROM_ISR(state) +#endif // CONFIG_FREERTOS_SMP +#else // SOC_CPU_CORES_NUM + +#define _ATOMIC_ENTER_CRITICAL() portENTER_CRITICAL_SAFE(&s_atomic_lock); +#define _ATOMIC_EXIT_CRITICAL() portEXIT_CRITICAL_SAFE(&s_atomic_lock); + +#endif // SOC_CPU_CORES_NUM + +#if CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND + +#define _ATOMIC_IF_NOT_EXT_RAM() \ + if (!((uintptr_t)ptr >= SOC_EXTRAM_DATA_LOW && (uintptr_t) ptr < SOC_EXTRAM_DATA_HIGH)) + +#define _ATOMIC_HW_STUB_OP_FUNCTION(n, type, name_1, name_2) \ + _ATOMIC_IF_NOT_EXT_RAM() { \ + type __atomic_s32c1i_ ##name_1 ##_ ##name_2 ##_ ##n (volatile void* ptr, type value, int memorder); \ + return __atomic_s32c1i_ ##name_1 ##_ ##name_2 ##_ ##n (ptr, value, memorder); \ + } + +#define _ATOMIC_HW_STUB_EXCHANGE(n, type) \ + _ATOMIC_IF_NOT_EXT_RAM() { \ + type __atomic_s32c1i_exchange_ ## n (volatile void* ptr, type value, int memorder); \ + return __atomic_s32c1i_exchange_ ## n (ptr, value, memorder); \ + } + +#define _ATOMIC_HW_STUB_STORE(n, type) \ + _ATOMIC_IF_NOT_EXT_RAM() { \ + void __atomic_s32c1i_store_ ## n (volatile void * ptr, type value, int memorder); \ + __atomic_s32c1i_store_ ## n (ptr, value, memorder); \ + return; \ + } + +#define _ATOMIC_HW_STUB_CMP_EXCHANGE(n, type) \ + _ATOMIC_IF_NOT_EXT_RAM() { \ + bool __atomic_s32c1i_compare_exchange_ ## n (volatile void* ptr, void* expected, type desired, bool weak, int success, int failure); \ + return __atomic_s32c1i_compare_exchange_ ## n (ptr, expected, desired, weak, success, failure); \ + } + +#define _ATOMIC_HW_STUB_LOAD(n, type) \ + _ATOMIC_IF_NOT_EXT_RAM() { \ + type __atomic_s32c1i_load_ ## n (const volatile void* ptr, int memorder); \ + return __atomic_s32c1i_load_ ## n (ptr,memorder); \ + } + +#define _ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE(n, type) \ + _ATOMIC_IF_NOT_EXT_RAM() { \ + bool __sync_s32c1i_bool_compare_and_swap_ ## n (volatile void* ptr, type expected, type desired); \ + return __sync_s32c1i_bool_compare_and_swap_ ## n (ptr, expected, desired); \ + } + +#define _ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE(n, type) \ + _ATOMIC_IF_NOT_EXT_RAM() { \ + type __sync_s32c1i_val_compare_and_swap_ ## n (volatile void* ptr, type expected, type desired); \ + return __sync_s32c1i_val_compare_and_swap_ ## n (ptr, expected, desired); \ + } + +#define _ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET(n, type) \ + _ATOMIC_IF_NOT_EXT_RAM() { \ + type __sync_s32c1i_lock_test_and_set_ ## n (volatile void* ptr, type value); \ + return __sync_s32c1i_lock_test_and_set_ ## n (ptr, value); \ + } + +#define _ATOMIC_HW_STUB_SYNC_LOCK_RELEASE(n, type) \ + _ATOMIC_IF_NOT_EXT_RAM() { \ + void __sync_s32c1i_lock_release_ ## n (volatile void* ptr); \ + __sync_s32c1i_lock_release_ ## n (ptr); \ + return; \ + } + +#else // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND + +#define _ATOMIC_HW_STUB_OP_FUNCTION(n, type, name_1, name_2) +#define _ATOMIC_HW_STUB_EXCHANGE(n, type) +#define _ATOMIC_HW_STUB_STORE(n, type) +#define _ATOMIC_HW_STUB_CMP_EXCHANGE(n, type) +#define _ATOMIC_HW_STUB_LOAD(n, type) +#define _ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE(n, type) +#define _ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE(n, type) +#define _ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET(n, type) +#define _ATOMIC_HW_STUB_SYNC_LOCK_RELEASE(n, type) + +#endif // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND + +#ifdef __clang__ +// Clang doesn't allow to define "__sync_*" atomics. The workaround is to define function with name "__sync_*_builtin", +// which implements "__sync_*" atomic functionality and use asm directive to set the value of symbol "__sync_*" to the name +// of defined function. + +#define CLANG_ATOMIC_SUFFIX(name_) name_ ## _builtin +#define CLANG_DECLARE_ALIAS(name_) \ +__asm__(".type " # name_ ", @function\n" \ + ".global " #name_ "\n" \ + ".equ " #name_ ", " #name_ "_builtin"); + +#else // __clang__ + +#define CLANG_ATOMIC_SUFFIX(name_) name_ +#define CLANG_DECLARE_ALIAS(name_) + +#endif // __clang__ + +#define ATOMIC_OP_FUNCTIONS(n, type, name, operation, inverse) \ + _ATOMIC_OP_FUNCTION(n, type, fetch, name, old, operation, inverse) \ + _ATOMIC_OP_FUNCTION(n, type, name, fetch, new, operation, inverse) + +#define _ATOMIC_OP_FUNCTION(n, type, name_1, name_2, ret_var, operation, inverse) \ +type __atomic_ ##name_1 ##_ ##name_2 ##_ ##n (volatile void* ptr, type value, int memorder) \ +{ \ + type old, new; \ + _ATOMIC_HW_STUB_OP_FUNCTION(n, type, name_1, name_2); \ + _ATOMIC_ENTER_CRITICAL(); \ + old = (*(volatile type*)ptr); \ + new = inverse(old operation value); \ + *(volatile type*)ptr = new; \ + _ATOMIC_EXIT_CRITICAL(); \ + return ret_var; \ +} + +#define ATOMIC_LOAD(n, type) \ +type __atomic_load_ ## n (const volatile void* ptr, int memorder) \ +{ \ + type old; \ + _ATOMIC_HW_STUB_LOAD(n, type); \ + _ATOMIC_ENTER_CRITICAL(); \ + old = *(const volatile type*)ptr; \ + _ATOMIC_EXIT_CRITICAL(); \ + return old; \ +} + +#define ATOMIC_CMP_EXCHANGE(n, type) \ +bool __atomic_compare_exchange_ ## n (volatile void* ptr, void* expected, type desired, bool weak, int success, int failure) \ +{ \ + bool ret = false; \ + _ATOMIC_HW_STUB_CMP_EXCHANGE(n, type); \ + _ATOMIC_ENTER_CRITICAL(); \ + if (*(volatile type*)ptr == *(type*)expected) { \ + ret = true; \ + *(volatile type*)ptr = desired; \ + } else { \ + *(type*)expected = *(volatile type*)ptr; \ + } \ + _ATOMIC_EXIT_CRITICAL(); \ + return ret; \ +} + +#define ATOMIC_STORE(n, type) \ +void __atomic_store_ ## n (volatile void * ptr, type value, int memorder) \ +{ \ + _ATOMIC_HW_STUB_STORE(n, type); \ + _ATOMIC_ENTER_CRITICAL(); \ + *(volatile type*)ptr = value; \ + _ATOMIC_EXIT_CRITICAL(); \ +} + +#define ATOMIC_EXCHANGE(n, type) \ +type __atomic_exchange_ ## n (volatile void* ptr, type value, int memorder) \ +{ \ + type old; \ + _ATOMIC_HW_STUB_EXCHANGE(n, type); \ + _ATOMIC_ENTER_CRITICAL(); \ + old = *(volatile type*)ptr; \ + *(volatile type*)ptr = value; \ + _ATOMIC_EXIT_CRITICAL(); \ + return old; \ +} + +#define SYNC_OP_FUNCTIONS(n, type, name) \ + _SYNC_OP_FUNCTION(n, type, fetch, name) \ + _SYNC_OP_FUNCTION(n, type, name, fetch) + +#define _SYNC_OP_FUNCTION(n, type, name_1, name_2) \ +type CLANG_ATOMIC_SUFFIX(__sync_ ##name_1 ##_and_ ##name_2 ##_ ##n) (volatile void* ptr, type value) \ +{ \ + return __atomic_ ##name_1 ##_ ##name_2 ##_ ##n (ptr, value, __ATOMIC_SEQ_CST); \ +} \ +CLANG_DECLARE_ALIAS( __sync_##name_1 ##_and_ ##name_2 ##_ ##n ) + +#define SYNC_BOOL_CMP_EXCHANGE(n, type) \ +bool CLANG_ATOMIC_SUFFIX(__sync_bool_compare_and_swap_ ## n) (volatile void* ptr, type expected, type desired) \ +{ \ + bool ret = false; \ + _ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE(n, type); \ + _ATOMIC_ENTER_CRITICAL(); \ + if (*(volatile type*)ptr == expected) { \ + *(volatile type*)ptr = desired; \ + ret = true; \ + } \ + _ATOMIC_EXIT_CRITICAL(); \ + return ret; \ +} \ +CLANG_DECLARE_ALIAS( __sync_bool_compare_and_swap_ ## n ) + +#define SYNC_VAL_CMP_EXCHANGE(n, type) \ +type CLANG_ATOMIC_SUFFIX(__sync_val_compare_and_swap_ ## n) (volatile void* ptr, type expected, type desired) \ +{ \ + type old; \ + _ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE(n, type); \ + _ATOMIC_ENTER_CRITICAL(); \ + old = *(volatile type*)ptr; \ + if (old == expected) { \ + *(volatile type*)ptr = desired; \ + } \ + _ATOMIC_EXIT_CRITICAL(); \ + return old; \ +} \ +CLANG_DECLARE_ALIAS( __sync_val_compare_and_swap_ ## n ) + +#define SYNC_LOCK_TEST_AND_SET(n, type) \ +type CLANG_ATOMIC_SUFFIX(__sync_lock_test_and_set_ ## n) (volatile void* ptr, type value) \ +{ \ + type old; \ + _ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET(n, type); \ + _ATOMIC_ENTER_CRITICAL(); \ + old = *(volatile type*)ptr; \ + *(volatile type*)ptr = value; \ + _ATOMIC_EXIT_CRITICAL(); \ + return old; \ +} \ +CLANG_DECLARE_ALIAS( __sync_lock_test_and_set_ ## n ) + +#define SYNC_LOCK_RELEASE(n, type) \ +void CLANG_ATOMIC_SUFFIX(__sync_lock_release_ ## n) (volatile void* ptr) \ +{ \ + _ATOMIC_HW_STUB_SYNC_LOCK_RELEASE(n, type); \ + _ATOMIC_ENTER_CRITICAL(); \ + *(volatile type*)ptr = 0; \ + _ATOMIC_EXIT_CRITICAL(); \ +} \ +CLANG_DECLARE_ALIAS( __sync_lock_release_ ## n ) + +#define ATOMIC_FUNCTIONS(n, type) \ + ATOMIC_EXCHANGE(n, type) \ + ATOMIC_CMP_EXCHANGE(n, type) \ + ATOMIC_OP_FUNCTIONS(n, type, add, +, ) \ + ATOMIC_OP_FUNCTIONS(n, type, sub, -, ) \ + ATOMIC_OP_FUNCTIONS(n, type, and, &, ) \ + ATOMIC_OP_FUNCTIONS(n, type, or, |, ) \ + ATOMIC_OP_FUNCTIONS(n, type, xor, ^, ) \ + ATOMIC_OP_FUNCTIONS(n, type, nand, &, ~) \ +/* LLVM has not implemented native atomic load/stores for riscv targets without the Atomic extension. LLVM thread: https://reviews.llvm.org/D47553. \ + * Even though GCC does transform them, these libcalls need to be available for the case where a LLVM based project links against IDF. */ \ + ATOMIC_LOAD(n, type) \ + ATOMIC_STORE(n, type) \ + SYNC_OP_FUNCTIONS(n, type, add) \ + SYNC_OP_FUNCTIONS(n, type, sub) \ + SYNC_OP_FUNCTIONS(n, type, and) \ + SYNC_OP_FUNCTIONS(n, type, or) \ + SYNC_OP_FUNCTIONS(n, type, xor) \ + SYNC_OP_FUNCTIONS(n, type, nand) \ + SYNC_BOOL_CMP_EXCHANGE(n, type) \ + SYNC_VAL_CMP_EXCHANGE(n, type) \ + SYNC_LOCK_TEST_AND_SET(n, type) \ + SYNC_LOCK_RELEASE(n, type) diff --git a/components/newlib/project_include.cmake b/components/newlib/project_include.cmake new file mode 100644 index 0000000000..6292237846 --- /dev/null +++ b/components/newlib/project_include.cmake @@ -0,0 +1,3 @@ +if(CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND) + idf_build_set_property(COMPILE_OPTIONS "-mdisable-hardware-atomics" APPEND) +endif() diff --git a/components/newlib/stdatomic.c b/components/newlib/stdatomic.c index 103a3dc2ba..f3d4ee2ebb 100644 --- a/components/newlib/stdatomic.c +++ b/components/newlib/stdatomic.c @@ -6,296 +6,21 @@ //replacement for gcc built-in functions -#include "sdkconfig.h" #include #include #include -#include "soc/soc_caps.h" +#include "esp_stdatomic.h" #include "freertos/FreeRTOS.h" +#include "sdkconfig.h" -#ifdef __XTENSA__ -#include "xtensa/config/core-isa.h" - -#ifndef XCHAL_HAVE_S32C1I -#error "XCHAL_HAVE_S32C1I not defined, include correct header!" -#endif - -#define HAS_ATOMICS_32 (XCHAL_HAVE_S32C1I == 1) -// no 64-bit atomics on Xtensa -#define HAS_ATOMICS_64 0 - -#else // RISCV - -// GCC toolchain will define this pre-processor if "A" extension is supported -#ifndef __riscv_atomic -#define __riscv_atomic 0 -#endif - -#define HAS_ATOMICS_32 (__riscv_atomic == 1) -#define HAS_ATOMICS_64 ((__riscv_atomic == 1) && (__riscv_xlen == 64)) -#endif // (__XTENSA__, __riscv) - -#if SOC_CPU_CORES_NUM == 1 - -// Single core SoC: atomics can be implemented using portSET_INTERRUPT_MASK_FROM_ISR -// and portCLEAR_INTERRUPT_MASK_FROM_ISR, which disables and enables interrupts. -#if CONFIG_FREERTOS_SMP -#define _ATOMIC_ENTER_CRITICAL() ({ \ - unsigned state = portDISABLE_INTERRUPTS(); \ - state; \ -}) - -#define _ATOMIC_EXIT_CRITICAL(state) do { \ - portRESTORE_INTERRUPTS(state); \ - } while (0) -#else // CONFIG_FREERTOS_SMP -#define _ATOMIC_ENTER_CRITICAL() ({ \ - unsigned state = portSET_INTERRUPT_MASK_FROM_ISR(); \ - state; \ -}) - -#define _ATOMIC_EXIT_CRITICAL(state) do { \ - portCLEAR_INTERRUPT_MASK_FROM_ISR(state); \ - } while (0) -#endif -#else // SOC_CPU_CORES_NUM - +#if SOC_CPU_CORES_NUM > 1 +#if !CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND _Static_assert(HAS_ATOMICS_32, "32-bit atomics should be supported if SOC_CPU_CORES_NUM > 1"); +#endif // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND // Only need to implement 64-bit atomics here. Use a single global portMUX_TYPE spinlock // to emulate the atomics. static portMUX_TYPE s_atomic_lock = portMUX_INITIALIZER_UNLOCKED; - -// Return value is not used but kept for compatibility with the single-core version above. -#define _ATOMIC_ENTER_CRITICAL() ({ \ - portENTER_CRITICAL_SAFE(&s_atomic_lock); \ - 0; \ -}) - -#define _ATOMIC_EXIT_CRITICAL(state) do { \ - (void) (state); \ - portEXIT_CRITICAL_SAFE(&s_atomic_lock); \ -} while(0) - -#endif // SOC_CPU_CORES_NUM - -#ifdef __clang__ -// Clang doesn't allow to define "__sync_*" atomics. The workaround is to define function with name "__sync_*_builtin", -// which implements "__sync_*" atomic functionality and use asm directive to set the value of symbol "__sync_*" to the name -// of defined function. - -#define CLANG_ATOMIC_SUFFIX(name_) name_ ## _builtin -#define CLANG_DECLARE_ALIAS(name_) \ -__asm__(".type " # name_ ", @function\n" \ - ".global " #name_ "\n" \ - ".equ " #name_ ", " #name_ "_builtin"); - -#else // __clang__ - -#define CLANG_ATOMIC_SUFFIX(name_) name_ -#define CLANG_DECLARE_ALIAS(name_) - -#endif // __clang__ - -#define ATOMIC_LOAD(n, type) type __atomic_load_ ## n (const volatile void* mem, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(const volatile type*)mem; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define ATOMIC_STORE(n, type) void __atomic_store_ ## n (volatile void * mem, type val, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - *(volatile type *)mem = val; \ - _ATOMIC_EXIT_CRITICAL(state); \ -} - -#define ATOMIC_EXCHANGE(n, type) type __atomic_exchange_ ## n (volatile void* mem, type val, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)mem; \ - *(volatile type*)mem = val; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define CMP_EXCHANGE(n, type) bool __atomic_compare_exchange_ ## n (volatile void* mem, void* expect, type desired, bool weak, int success, int failure) \ -{ \ - bool ret = false; \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - if (*(volatile type*)mem == *(type*)expect) { \ - ret = true; \ - *(volatile type*)mem = desired; \ - } else { \ - *(type*)expect = *(volatile type*)mem; \ - } \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define FETCH_ADD(n, type) type __atomic_fetch_add_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr; \ - *(volatile type*)ptr = *(volatile type*)ptr + value; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define ADD_FETCH(n, type) type __atomic_add_fetch_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr + value; \ - *(volatile type*)ptr = ret; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define FETCH_SUB(n, type) type __atomic_fetch_sub_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr; \ - *(volatile type*)ptr = *(volatile type*)ptr - value; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define SUB_FETCH(n, type) type __atomic_sub_fetch_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr - value; \ - *(volatile type*)ptr = ret; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define FETCH_AND(n, type) type __atomic_fetch_and_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr; \ - *(volatile type*)ptr = *(volatile type*)ptr & value; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define AND_FETCH(n, type) type __atomic_and_fetch_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr & value; \ - *(volatile type*)ptr = ret; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define FETCH_OR(n, type) type __atomic_fetch_or_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr; \ - *(volatile type*)ptr = *(volatile type*)ptr | value; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define OR_FETCH(n, type) type __atomic_or_fetch_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr | value; \ - *(volatile type*)ptr = ret; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define FETCH_XOR(n, type) type __atomic_fetch_xor_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr; \ - *(volatile type*)ptr = *(volatile type*)ptr ^ value; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define XOR_FETCH(n, type) type __atomic_xor_fetch_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr ^ value; \ - *(volatile type*)ptr = ret; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define FETCH_NAND(n, type) type __atomic_fetch_nand_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr; \ - *(volatile type*)ptr = ~(*(volatile type*)ptr & value); \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define NAND_FETCH(n, type) type __atomic_nand_fetch_ ## n (volatile void* ptr, type value, int memorder) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = ~(*(volatile type*)ptr & value); \ - *(volatile type*)ptr = ret; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} - -#define SYNC_FETCH_OP(op, n, type) type CLANG_ATOMIC_SUFFIX(__sync_fetch_and_ ## op ##_ ## n) (volatile void* ptr, type value) \ -{ \ - return __atomic_fetch_ ## op ##_ ## n (ptr, value, __ATOMIC_SEQ_CST); \ -} \ -CLANG_DECLARE_ALIAS( __sync_fetch_and_ ## op ##_ ## n ) - -#define SYNC_OP_FETCH(op, n, type) type CLANG_ATOMIC_SUFFIX(__sync_ ## op ##_and_fetch_ ## n) (volatile void* ptr, type value) \ -{ \ - return __atomic_ ## op ##_fetch_ ## n (ptr, value, __ATOMIC_SEQ_CST); \ -} \ -CLANG_DECLARE_ALIAS( __sync_ ## op ##_and_fetch_ ## n ) - -#define SYNC_BOOL_CMP_EXCHANGE(n, type) bool CLANG_ATOMIC_SUFFIX(__sync_bool_compare_and_swap_ ## n) (volatile void* ptr, type oldval, type newval) \ -{ \ - bool ret = false; \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - if (*(volatile type*)ptr == oldval) { \ - *(volatile type*)ptr = newval; \ - ret = true; \ - } \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} \ -CLANG_DECLARE_ALIAS( __sync_bool_compare_and_swap_ ## n ) - -#define SYNC_VAL_CMP_EXCHANGE(n, type) type CLANG_ATOMIC_SUFFIX(__sync_val_compare_and_swap_ ## n) (volatile void* ptr, type oldval, type newval) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr; \ - if (*(volatile type*)ptr == oldval) { \ - *(volatile type*)ptr = newval; \ - } \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} \ -CLANG_DECLARE_ALIAS( __sync_val_compare_and_swap_ ## n ) - -#define SYNC_LOCK_TEST_AND_SET(n, type) type CLANG_ATOMIC_SUFFIX(__sync_lock_test_and_set_ ## n) (volatile void* ptr, type val) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - type ret = *(volatile type*)ptr; \ - *(volatile type*)ptr = val; \ - _ATOMIC_EXIT_CRITICAL(state); \ - return ret; \ -} \ -CLANG_DECLARE_ALIAS( __sync_lock_test_and_set_ ## n ) - -#define SYNC_LOCK_RELEASE(n, type) void CLANG_ATOMIC_SUFFIX(__sync_lock_release_ ## n) (volatile void* ptr) \ -{ \ - unsigned state = _ATOMIC_ENTER_CRITICAL(); \ - *(volatile type*)ptr = 0; \ - _ATOMIC_EXIT_CRITICAL(state); \ -} \ -CLANG_DECLARE_ALIAS( __sync_lock_release_ ## n ) +#endif #if !HAS_ATOMICS_32 @@ -303,134 +28,9 @@ _Static_assert(sizeof(unsigned char) == 1, "atomics require a 1-byte type"); _Static_assert(sizeof(short unsigned int) == 2, "atomics require a 2-bytes type"); _Static_assert(sizeof(unsigned int) == 4, "atomics require a 4-bytes type"); -ATOMIC_EXCHANGE(1, unsigned char) -ATOMIC_EXCHANGE(2, short unsigned int) -ATOMIC_EXCHANGE(4, unsigned int) - -CMP_EXCHANGE(1, unsigned char) -CMP_EXCHANGE(2, short unsigned int) -CMP_EXCHANGE(4, unsigned int) - -FETCH_ADD(1, unsigned char) -FETCH_ADD(2, short unsigned int) -FETCH_ADD(4, unsigned int) - -ADD_FETCH(1, unsigned char) -ADD_FETCH(2, short unsigned int) -ADD_FETCH(4, unsigned int) - -FETCH_SUB(1, unsigned char) -FETCH_SUB(2, short unsigned int) -FETCH_SUB(4, unsigned int) - -SUB_FETCH(1, unsigned char) -SUB_FETCH(2, short unsigned int) -SUB_FETCH(4, unsigned int) - -FETCH_AND(1, unsigned char) -FETCH_AND(2, short unsigned int) -FETCH_AND(4, unsigned int) - -AND_FETCH(1, unsigned char) -AND_FETCH(2, short unsigned int) -AND_FETCH(4, unsigned int) - -FETCH_OR(1, unsigned char) -FETCH_OR(2, short unsigned int) -FETCH_OR(4, unsigned int) - -OR_FETCH(1, unsigned char) -OR_FETCH(2, short unsigned int) -OR_FETCH(4, unsigned int) - -FETCH_XOR(1, unsigned char) -FETCH_XOR(2, short unsigned int) -FETCH_XOR(4, unsigned int) - -XOR_FETCH(1, unsigned char) -XOR_FETCH(2, short unsigned int) -XOR_FETCH(4, unsigned int) - -FETCH_NAND(1, unsigned char) -FETCH_NAND(2, short unsigned int) -FETCH_NAND(4, unsigned int) - -NAND_FETCH(1, unsigned char) -NAND_FETCH(2, short unsigned int) -NAND_FETCH(4, unsigned int) - -SYNC_FETCH_OP(add, 1, unsigned char) -SYNC_FETCH_OP(add, 2, short unsigned int) -SYNC_FETCH_OP(add, 4, unsigned int) - -SYNC_OP_FETCH(add, 1, unsigned char) -SYNC_OP_FETCH(add, 2, short unsigned int) -SYNC_OP_FETCH(add, 4, unsigned int) - -SYNC_FETCH_OP(sub, 1, unsigned char) -SYNC_FETCH_OP(sub, 2, short unsigned int) -SYNC_FETCH_OP(sub, 4, unsigned int) - -SYNC_OP_FETCH(sub, 1, unsigned char) -SYNC_OP_FETCH(sub, 2, short unsigned int) -SYNC_OP_FETCH(sub, 4, unsigned int) - -SYNC_FETCH_OP( and, 1, unsigned char) -SYNC_FETCH_OP( and, 2, short unsigned int) -SYNC_FETCH_OP( and, 4, unsigned int) - -SYNC_OP_FETCH( and, 1, unsigned char) -SYNC_OP_FETCH( and, 2, short unsigned int) -SYNC_OP_FETCH( and, 4, unsigned int) - -SYNC_FETCH_OP( or, 1, unsigned char) -SYNC_FETCH_OP( or, 2, short unsigned int) -SYNC_FETCH_OP( or, 4, unsigned int) - -SYNC_OP_FETCH( or, 1, unsigned char) -SYNC_OP_FETCH( or, 2, short unsigned int) -SYNC_OP_FETCH( or, 4, unsigned int) - -SYNC_FETCH_OP(xor, 1, unsigned char) -SYNC_FETCH_OP(xor, 2, short unsigned int) -SYNC_FETCH_OP(xor, 4, unsigned int) - -SYNC_OP_FETCH(xor, 1, unsigned char) -SYNC_OP_FETCH(xor, 2, short unsigned int) -SYNC_OP_FETCH(xor, 4, unsigned int) - -SYNC_FETCH_OP(nand, 1, unsigned char) -SYNC_FETCH_OP(nand, 2, short unsigned int) -SYNC_FETCH_OP(nand, 4, unsigned int) - -SYNC_OP_FETCH(nand, 1, unsigned char) -SYNC_OP_FETCH(nand, 2, short unsigned int) -SYNC_OP_FETCH(nand, 4, unsigned int) - -SYNC_BOOL_CMP_EXCHANGE(1, unsigned char) -SYNC_BOOL_CMP_EXCHANGE(2, short unsigned int) -SYNC_BOOL_CMP_EXCHANGE(4, unsigned int) - -SYNC_VAL_CMP_EXCHANGE(1, unsigned char) -SYNC_VAL_CMP_EXCHANGE(2, short unsigned int) -SYNC_VAL_CMP_EXCHANGE(4, unsigned int) - -SYNC_LOCK_TEST_AND_SET(1, unsigned char) -SYNC_LOCK_TEST_AND_SET(2, short unsigned int) -SYNC_LOCK_TEST_AND_SET(4, unsigned int) - -SYNC_LOCK_RELEASE(1, unsigned char) -SYNC_LOCK_RELEASE(2, short unsigned int) -SYNC_LOCK_RELEASE(4, unsigned int) - -// LLVM has not implemented native atomic load/stores for riscv targets without the Atomic extension. LLVM thread: https://reviews.llvm.org/D47553. -// Even though GCC does transform them, these libcalls need to be available for the case where a LLVM based project links against IDF. -ATOMIC_LOAD(1, unsigned char) -ATOMIC_LOAD(2, short unsigned int) -ATOMIC_LOAD(4, unsigned int) -ATOMIC_STORE(1, unsigned char) -ATOMIC_STORE(2, short unsigned int) -ATOMIC_STORE(4, unsigned int) +ATOMIC_FUNCTIONS(1, unsigned char) +ATOMIC_FUNCTIONS(2, short unsigned int) +ATOMIC_FUNCTIONS(4, unsigned int) #elif __riscv_atomic == 1 @@ -450,102 +50,62 @@ CLANG_DECLARE_ALIAS(__atomic_is_lock_free) #if !HAS_ATOMICS_64 +#if CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND +#undef _ATOMIC_HW_STUB_OP_FUNCTION +#undef _ATOMIC_HW_STUB_EXCHANGE +#undef _ATOMIC_HW_STUB_STORE +#undef _ATOMIC_HW_STUB_CMP_EXCHANGE +#undef _ATOMIC_HW_STUB_LOAD +#undef _ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE +#undef _ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE +#undef _ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET +#undef _ATOMIC_HW_STUB_SYNC_LOCK_RELEASE + +#define _ATOMIC_HW_STUB_OP_FUNCTION(n, type, name_1, name_2) +#define _ATOMIC_HW_STUB_EXCHANGE(n, type) +#define _ATOMIC_HW_STUB_STORE(n, type) +#define _ATOMIC_HW_STUB_CMP_EXCHANGE(n, type) +#define _ATOMIC_HW_STUB_LOAD(n, type) +#define _ATOMIC_HW_STUB_SYNC_BOOL_CMP_EXCHANGE(n, type) +#define _ATOMIC_HW_STUB_SYNC_VAL_CMP_EXCHANGE(n, type) +#define _ATOMIC_HW_STUB_SYNC_LOCK_TEST_AND_SET(n, type) +#define _ATOMIC_HW_STUB_SYNC_LOCK_RELEASE(n, type) +#endif // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND + _Static_assert(sizeof(long long unsigned int) == 8, "atomics require a 8-bytes type"); -ATOMIC_EXCHANGE(8, long long unsigned int) - -CMP_EXCHANGE(8, long long unsigned int) - -FETCH_ADD(8, long long unsigned int) - -FETCH_SUB(8, long long unsigned int) - -FETCH_AND(8, long long unsigned int) - -FETCH_OR(8, long long unsigned int) - -FETCH_XOR(8, long long unsigned int) - -FETCH_NAND(8, long long unsigned int) - -ADD_FETCH(8, long long unsigned int) - -SUB_FETCH(8, long long unsigned int) - -AND_FETCH(8, long long unsigned int) - -OR_FETCH(8, long long unsigned int) - -XOR_FETCH(8, long long unsigned int) - -NAND_FETCH(8, long long unsigned int) - -SYNC_FETCH_OP(add, 8, long long unsigned int) - -SYNC_FETCH_OP(sub, 8, long long unsigned int) - -SYNC_FETCH_OP( and, 8, long long unsigned int) - -SYNC_FETCH_OP( or, 8, long long unsigned int) - -SYNC_FETCH_OP(xor, 8, long long unsigned int) - -SYNC_FETCH_OP(nand, 8, long long unsigned int) - -SYNC_OP_FETCH(add, 8, long long unsigned int) - -SYNC_OP_FETCH(sub, 8, long long unsigned int) - -SYNC_OP_FETCH( and, 8, long long unsigned int) - -SYNC_OP_FETCH( or, 8, long long unsigned int) - -SYNC_OP_FETCH(xor, 8, long long unsigned int) - -SYNC_OP_FETCH(nand, 8, long long unsigned int) - -SYNC_BOOL_CMP_EXCHANGE(8, long long unsigned int) - -SYNC_VAL_CMP_EXCHANGE(8, long long unsigned int) - -SYNC_LOCK_TEST_AND_SET(8, long long unsigned int) -SYNC_LOCK_RELEASE(8, long long unsigned int) - -// LLVM has not implemented native atomic load/stores for riscv targets without the Atomic extension. LLVM thread: https://reviews.llvm.org/D47553. -// Even though GCC does transform them, these libcalls need to be available for the case where a LLVM based project links against IDF. -ATOMIC_LOAD(8, long long unsigned int) -ATOMIC_STORE(8, long long unsigned int) +ATOMIC_FUNCTIONS(8, long long unsigned int) #endif // !HAS_ATOMICS_64 // Clang generates calls to the __atomic_load/__atomic_store functions for object size more then 4 bytes void CLANG_ATOMIC_SUFFIX(__atomic_load)(size_t size, const volatile void *src, void *dest, int model) { - unsigned state = _ATOMIC_ENTER_CRITICAL(); + _ATOMIC_ENTER_CRITICAL(); memcpy(dest, (const void *)src, size); - _ATOMIC_EXIT_CRITICAL(state); + _ATOMIC_EXIT_CRITICAL(); } CLANG_DECLARE_ALIAS(__atomic_load) void CLANG_ATOMIC_SUFFIX(__atomic_store)(size_t size, volatile void *dest, void *src, int model) { - unsigned state = _ATOMIC_ENTER_CRITICAL(); + _ATOMIC_ENTER_CRITICAL(); memcpy((void *)dest, (const void *)src, size); - _ATOMIC_EXIT_CRITICAL(state); + _ATOMIC_EXIT_CRITICAL(); } CLANG_DECLARE_ALIAS(__atomic_store) bool CLANG_ATOMIC_SUFFIX(__atomic_compare_exchange)(size_t size, volatile void *ptr, void *expected, void *desired, int success_memorder, int failure_memorder) { bool ret = false; - unsigned state = _ATOMIC_ENTER_CRITICAL(); + _ATOMIC_ENTER_CRITICAL(); if (!memcmp((void *)ptr, expected, size)) { memcpy((void *)ptr, (const void *)desired, size); ret = true; } else { memcpy((void *)expected, (const void *)ptr, size); } - _ATOMIC_EXIT_CRITICAL(state); + _ATOMIC_EXIT_CRITICAL(); return ret; } CLANG_DECLARE_ALIAS(__atomic_compare_exchange) diff --git a/components/newlib/test_apps/newlib/main/test_misc.c b/components/newlib/test_apps/newlib/main/test_misc.c index a79fdf252b..bc7c076ebc 100644 --- a/components/newlib/test_apps/newlib/main/test_misc.c +++ b/components/newlib/test_apps/newlib/main/test_misc.c @@ -12,8 +12,11 @@ #include #include #include +#ifdef __clang__ // TODO LLVM-330 +#include +#else #include -#include "sys/dirent.h" +#endif #include "unity.h" #include "esp_heap_caps.h" #include "esp_vfs.h" diff --git a/components/newlib/test_apps/newlib/main/test_stdatomic.c b/components/newlib/test_apps/newlib/main/test_stdatomic.c index 2f642f27d3..56d7db55a0 100644 --- a/components/newlib/test_apps/newlib/main/test_stdatomic.c +++ b/components/newlib/test_apps/newlib/main/test_stdatomic.c @@ -12,219 +12,233 @@ #include #include #include "esp_pthread.h" +#include "esp_attr.h" #include "freertos/portmacro.h" #include "unity.h" +#include "esp_heap_caps.h" +#include "sdkconfig.h" + +#define MALLOC_CAP_ATOMIC MALLOC_CAP_DEFAULT /* non-static to prevent optimization */ -atomic_ullong g_atomic64; -atomic_uint g_atomic32; -atomic_ushort g_atomic16; -atomic_uchar g_atomic8; +atomic_ullong *g_atomic64; +atomic_uint *g_atomic32; +atomic_ushort *g_atomic16; +atomic_uchar *g_atomic8; TEST_CASE("stdatomic - test_64bit_atomics", "[newlib_stdatomic]") { unsigned long long x64 = 0; - g_atomic64 = 0; // calls atomic_store + g_atomic64 = heap_caps_calloc(sizeof(*g_atomic64), 1, MALLOC_CAP_DEFAULT); - x64 += atomic_fetch_or(&g_atomic64, 0x1111111111111111ULL); - x64 += atomic_fetch_xor(&g_atomic64, 0x3333333333333333ULL); - x64 += atomic_fetch_and(&g_atomic64, 0xf0f0f0f0f0f0f0f0ULL); - x64 += atomic_fetch_sub(&g_atomic64, 0x0f0f0f0f0f0f0f0fULL); - x64 += atomic_fetch_add(&g_atomic64, 0x2222222222222222ULL); + x64 += atomic_fetch_or(g_atomic64, 0x1111111111111111ULL); + x64 += atomic_fetch_xor(g_atomic64, 0x3333333333333333ULL); + x64 += atomic_fetch_and(g_atomic64, 0xf0f0f0f0f0f0f0f0ULL); + x64 += atomic_fetch_sub(g_atomic64, 0x0f0f0f0f0f0f0f0fULL); + x64 += atomic_fetch_add(g_atomic64, 0x2222222222222222ULL); #ifndef __clang__ - x64 += __atomic_fetch_nand_8(&g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0); + x64 += __atomic_fetch_nand_8(g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0); TEST_ASSERT_EQUAL_HEX64(0x9797979797979797ULL, x64); - TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, g_atomic64); // calls atomic_load + TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, *g_atomic64); // calls atomic_load #else TEST_ASSERT_EQUAL_HEX64(0x6464646464646464ULL, x64); - TEST_ASSERT_EQUAL_HEX64(0x3333333333333333ULL, g_atomic64); // calls atomic_load + TEST_ASSERT_EQUAL_HEX64(0x3333333333333333ULL, *g_atomic64); // calls atomic_load #endif + free(g_atomic64); } TEST_CASE("stdatomic - test_32bit_atomics", "[newlib_stdatomic]") { unsigned int x32 = 0; - g_atomic32 = 0; + g_atomic32 = heap_caps_calloc(sizeof(*g_atomic32), 1, MALLOC_CAP_DEFAULT); - x32 += atomic_fetch_or(&g_atomic32, 0x11111111U); - x32 += atomic_fetch_xor(&g_atomic32, 0x33333333U); - x32 += atomic_fetch_and(&g_atomic32, 0xf0f0f0f0U); - x32 += atomic_fetch_sub(&g_atomic32, 0x0f0f0f0fU); - x32 += atomic_fetch_add(&g_atomic32, 0x22222222U); + x32 += atomic_fetch_or(g_atomic32, 0x11111111U); + x32 += atomic_fetch_xor(g_atomic32, 0x33333333U); + x32 += atomic_fetch_and(g_atomic32, 0xf0f0f0f0U); + x32 += atomic_fetch_sub(g_atomic32, 0x0f0f0f0fU); + x32 += atomic_fetch_add(g_atomic32, 0x22222222U); #ifndef __clang__ - x32 += __atomic_fetch_nand_4(&g_atomic32, 0xAAAAAAAAU, 0); + x32 += __atomic_fetch_nand_4(g_atomic32, 0xAAAAAAAAU, 0); TEST_ASSERT_EQUAL_HEX32(0x97979797U, x32); - TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, g_atomic32); + TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, *g_atomic32); #else TEST_ASSERT_EQUAL_HEX32(0x64646464U, x32); - TEST_ASSERT_EQUAL_HEX32(0x33333333U, g_atomic32); // calls atomic_load + TEST_ASSERT_EQUAL_HEX32(0x33333333U, *g_atomic32); // calls atomic_load #endif + free(g_atomic32); } TEST_CASE("stdatomic - test_16bit_atomics", "[newlib_stdatomic]") { unsigned int x16 = 0; - g_atomic16 = 0; + g_atomic16 = heap_caps_calloc(sizeof(*g_atomic16), 1, MALLOC_CAP_DEFAULT); - x16 += atomic_fetch_or(&g_atomic16, 0x1111); - x16 += atomic_fetch_xor(&g_atomic16, 0x3333); - x16 += atomic_fetch_and(&g_atomic16, 0xf0f0); - x16 += atomic_fetch_sub(&g_atomic16, 0x0f0f); - x16 += atomic_fetch_add(&g_atomic16, 0x2222); + x16 += atomic_fetch_or(g_atomic16, 0x1111); + x16 += atomic_fetch_xor(g_atomic16, 0x3333); + x16 += atomic_fetch_and(g_atomic16, 0xf0f0); + x16 += atomic_fetch_sub(g_atomic16, 0x0f0f); + x16 += atomic_fetch_add(g_atomic16, 0x2222); #ifndef __clang__ - x16 += __atomic_fetch_nand_2(&g_atomic16, 0xAAAA, 0); + x16 += __atomic_fetch_nand_2(g_atomic16, 0xAAAA, 0); TEST_ASSERT_EQUAL_HEX16(0x9797, x16); - TEST_ASSERT_EQUAL_HEX16(0xDDDD, g_atomic16); + TEST_ASSERT_EQUAL_HEX16(0xDDDD, *g_atomic16); #else TEST_ASSERT_EQUAL_HEX16(0x6464, x16); - TEST_ASSERT_EQUAL_HEX16(0x3333, g_atomic16); // calls atomic_load + TEST_ASSERT_EQUAL_HEX16(0x3333, *g_atomic16); // calls atomic_load #endif + free(g_atomic16); } TEST_CASE("stdatomic - test_8bit_atomics", "[newlib_stdatomic]") { unsigned int x8 = 0; - g_atomic8 = 0; + g_atomic8 = heap_caps_calloc(sizeof(*g_atomic8), 1, MALLOC_CAP_DEFAULT); - x8 += atomic_fetch_or(&g_atomic8, 0x11); - x8 += atomic_fetch_xor(&g_atomic8, 0x33); - x8 += atomic_fetch_and(&g_atomic8, 0xf0); - x8 += atomic_fetch_sub(&g_atomic8, 0x0f); - x8 += atomic_fetch_add(&g_atomic8, 0x22); + x8 += atomic_fetch_or(g_atomic8, 0x11); + x8 += atomic_fetch_xor(g_atomic8, 0x33); + x8 += atomic_fetch_and(g_atomic8, 0xf0); + x8 += atomic_fetch_sub(g_atomic8, 0x0f); + x8 += atomic_fetch_add(g_atomic8, 0x22); #ifndef __clang__ - x8 += __atomic_fetch_nand_1(&g_atomic8, 0xAA, 0); + x8 += __atomic_fetch_nand_1(g_atomic8, 0xAA, 0); TEST_ASSERT_EQUAL_HEX8(0x97, x8); - TEST_ASSERT_EQUAL_HEX8(0xDD, g_atomic8); + TEST_ASSERT_EQUAL_HEX8(0xDD, *g_atomic8); #else TEST_ASSERT_EQUAL_HEX8(0x64, x8); - TEST_ASSERT_EQUAL_HEX8(0x33, g_atomic8); // calls atomic_load + TEST_ASSERT_EQUAL_HEX8(0x33, *g_atomic8); // calls atomic_load #endif + free(g_atomic8); } #ifndef __clang__ TEST_CASE("stdatomic - test_64bit_atomics", "[newlib_stdatomic]") { unsigned long long x64 = 0; - g_atomic64 = 0; // calls atomic_store + g_atomic64 = heap_caps_calloc(sizeof(*g_atomic64), 1, MALLOC_CAP_DEFAULT); - x64 += __atomic_or_fetch_8(&g_atomic64, 0x1111111111111111ULL, 0); - x64 += __atomic_xor_fetch_8(&g_atomic64, 0x3333333333333333ULL, 0); - x64 += __atomic_and_fetch_8(&g_atomic64, 0xf0f0f0f0f0f0f0f0ULL, 0); - x64 += __atomic_sub_fetch_8(&g_atomic64, 0x0f0f0f0f0f0f0f0fULL, 0); - x64 += __atomic_add_fetch_8(&g_atomic64, 0x2222222222222222ULL, 0); - x64 += __atomic_nand_fetch_8(&g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0); + x64 += __atomic_or_fetch_8(g_atomic64, 0x1111111111111111ULL, 0); + x64 += __atomic_xor_fetch_8(g_atomic64, 0x3333333333333333ULL, 0); + x64 += __atomic_and_fetch_8(g_atomic64, 0xf0f0f0f0f0f0f0f0ULL, 0); + x64 += __atomic_sub_fetch_8(g_atomic64, 0x0f0f0f0f0f0f0f0fULL, 0); + x64 += __atomic_add_fetch_8(g_atomic64, 0x2222222222222222ULL, 0); + x64 += __atomic_nand_fetch_8(g_atomic64, 0xAAAAAAAAAAAAAAAAULL, 0); TEST_ASSERT_EQUAL_HEX64(0x7575757575757574ULL, x64); - TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, g_atomic64); // calls atomic_load + TEST_ASSERT_EQUAL_HEX64(0xDDDDDDDDDDDDDDDDULL, *g_atomic64); // calls atomic_load + free(g_atomic64); } TEST_CASE("stdatomic - test_32bit_atomics", "[newlib_stdatomic]") { unsigned int x32 = 0; - g_atomic32 = 0; + g_atomic32 = heap_caps_calloc(sizeof(*g_atomic32), 1, MALLOC_CAP_DEFAULT); - x32 += __atomic_or_fetch_4(&g_atomic32, 0x11111111U, 0); - x32 += __atomic_xor_fetch_4(&g_atomic32, 0x33333333U, 0); - x32 += __atomic_and_fetch_4(&g_atomic32, 0xf0f0f0f0U, 0); - x32 += __atomic_sub_fetch_4(&g_atomic32, 0x0f0f0f0fU, 0); - x32 += __atomic_add_fetch_4(&g_atomic32, 0x22222222U, 0); - x32 += __atomic_nand_fetch_4(&g_atomic32, 0xAAAAAAAAU, 0); + x32 += __atomic_or_fetch_4(g_atomic32, 0x11111111U, 0); + x32 += __atomic_xor_fetch_4(g_atomic32, 0x33333333U, 0); + x32 += __atomic_and_fetch_4(g_atomic32, 0xf0f0f0f0U, 0); + x32 += __atomic_sub_fetch_4(g_atomic32, 0x0f0f0f0fU, 0); + x32 += __atomic_add_fetch_4(g_atomic32, 0x22222222U, 0); + x32 += __atomic_nand_fetch_4(g_atomic32, 0xAAAAAAAAU, 0); TEST_ASSERT_EQUAL_HEX32(0x75757574U, x32); - TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, g_atomic32); + TEST_ASSERT_EQUAL_HEX32(0xDDDDDDDDU, *g_atomic32); + free(g_atomic32); } TEST_CASE("stdatomic - test_16bit_atomics", "[newlib_stdatomic]") { unsigned int x16 = 0; - g_atomic16 = 0; + g_atomic16 = heap_caps_calloc(sizeof(*g_atomic16), 1, MALLOC_CAP_DEFAULT); - x16 += __atomic_or_fetch_2(&g_atomic16, 0x1111, 0); - x16 += __atomic_xor_fetch_2(&g_atomic16, 0x3333, 0); - x16 += __atomic_and_fetch_2(&g_atomic16, 0xf0f0, 0); - x16 += __atomic_sub_fetch_2(&g_atomic16, 0x0f0f, 0); - x16 += __atomic_add_fetch_2(&g_atomic16, 0x2222, 0); - x16 += __atomic_nand_fetch_2(&g_atomic16, 0xAAAA, 0); + x16 += __atomic_or_fetch_2(g_atomic16, 0x1111, 0); + x16 += __atomic_xor_fetch_2(g_atomic16, 0x3333, 0); + x16 += __atomic_and_fetch_2(g_atomic16, 0xf0f0, 0); + x16 += __atomic_sub_fetch_2(g_atomic16, 0x0f0f, 0); + x16 += __atomic_add_fetch_2(g_atomic16, 0x2222, 0); + x16 += __atomic_nand_fetch_2(g_atomic16, 0xAAAA, 0); TEST_ASSERT_EQUAL_HEX16(0x7574, x16); - TEST_ASSERT_EQUAL_HEX16(0xDDDD, g_atomic16); + TEST_ASSERT_EQUAL_HEX16(0xDDDD, *g_atomic16); + free(g_atomic16); } TEST_CASE("stdatomic - test_8bit_atomics", "[newlib_stdatomic]") { unsigned int x8 = 0; - g_atomic8 = 0; + g_atomic8 = heap_caps_calloc(sizeof(*g_atomic8), 1, MALLOC_CAP_DEFAULT); - x8 += __atomic_or_fetch_1(&g_atomic8, 0x11, 0); - x8 += __atomic_xor_fetch_1(&g_atomic8, 0x33, 0); - x8 += __atomic_and_fetch_1(&g_atomic8, 0xf0, 0); - x8 += __atomic_sub_fetch_1(&g_atomic8, 0x0f, 0); - x8 += __atomic_add_fetch_1(&g_atomic8, 0x22, 0); - x8 += __atomic_nand_fetch_1(&g_atomic8, 0xAA, 0); + x8 += __atomic_or_fetch_1(g_atomic8, 0x11, 0); + x8 += __atomic_xor_fetch_1(g_atomic8, 0x33, 0); + x8 += __atomic_and_fetch_1(g_atomic8, 0xf0, 0); + x8 += __atomic_sub_fetch_1(g_atomic8, 0x0f, 0); + x8 += __atomic_add_fetch_1(g_atomic8, 0x22, 0); + x8 += __atomic_nand_fetch_1(g_atomic8, 0xAA, 0); TEST_ASSERT_EQUAL_HEX8(0x74, x8); - TEST_ASSERT_EQUAL_HEX8(0xDD, g_atomic8); + TEST_ASSERT_EQUAL_HEX8(0xDD, *g_atomic8); + free(g_atomic8); } #endif // #ifndef __clang__ -#define TEST_EXCLUSION(n) TEST_CASE("stdatomic - test_" #n "bit_exclusion", "[newlib_stdatomic]") \ -{ \ - g_atomic ## n = 0; \ - pthread_t thread1; \ - pthread_t thread2; \ - esp_pthread_cfg_t cfg = esp_pthread_get_default_config(); \ - cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \ - esp_pthread_set_cfg(&cfg); \ - pthread_create(&thread1, NULL, exclusion_task_ ## n, (void*) 1); \ - cfg.pin_to_core = xPortGetCoreID(); \ - esp_pthread_set_cfg(&cfg); \ - pthread_create(&thread2, NULL, exclusion_task_ ## n, (void*) 0); \ - pthread_join(thread1, NULL); \ - pthread_join(thread2, NULL); \ - TEST_ASSERT_EQUAL(0, g_atomic ## n); \ +#define TEST_EXCLUSION(n, POSTFIX) TEST_CASE("stdatomic - test_" #n #POSTFIX "bit_exclusion", "[newlib_stdatomic]") \ +{ \ + g_atomic ## n = heap_caps_calloc(sizeof(*g_atomic ## n), 1, MALLOC_CAP_ATOMIC); \ + pthread_t thread1; \ + pthread_t thread2; \ + esp_pthread_cfg_t cfg = esp_pthread_get_default_config(); \ + cfg.pin_to_core = (xPortGetCoreID() + 1) % portNUM_PROCESSORS; \ + esp_pthread_set_cfg(&cfg); \ + pthread_create(&thread1, NULL, exclusion_task_ ##n ##POSTFIX, (void*) 1); \ + cfg.pin_to_core = xPortGetCoreID(); \ + esp_pthread_set_cfg(&cfg); \ + pthread_create(&thread2, NULL, exclusion_task_ ##n ##POSTFIX, (void*) 0); \ + pthread_join(thread1, NULL); \ + pthread_join(thread2, NULL); \ + TEST_ASSERT_EQUAL(0, (*g_atomic ## n)); \ + free(g_atomic ## n); \ } -#define TEST_EXCLUSION_TASK(n) static void* exclusion_task_ ## n(void *varg) \ -{ \ - int arg = (int) varg; \ - for (int i = 0; i < 1000000; ++i) { \ - if (arg == 0) { \ - atomic_fetch_add(&g_atomic ## n, 1ULL); \ - } else { \ - atomic_fetch_sub(&g_atomic ## n, 1ULL); \ - } \ - } \ - return NULL; \ +#define TEST_EXCLUSION_TASK(n, POSTFIX) static void* exclusion_task_ ##n ##POSTFIX(void *varg) \ +{ \ + int arg = (int) varg; \ + for (int i = 0; i < 1000000; ++i) { \ + if (arg == 0) { \ + atomic_fetch_add(g_atomic ## n, 1ULL); \ + } else { \ + atomic_fetch_sub(g_atomic ## n, 1ULL); \ + } \ + } \ + return NULL; \ } -TEST_EXCLUSION_TASK(64) -TEST_EXCLUSION(64) +TEST_EXCLUSION_TASK(64, _default_mem) +TEST_EXCLUSION(64, _default_mem) -TEST_EXCLUSION_TASK(32) -TEST_EXCLUSION(32) +TEST_EXCLUSION_TASK(32, _default_mem) +TEST_EXCLUSION(32, _default_mem) -TEST_EXCLUSION_TASK(16) -TEST_EXCLUSION(16) +TEST_EXCLUSION_TASK(16, _default_mem) +TEST_EXCLUSION(16, _default_mem) -TEST_EXCLUSION_TASK(8) -TEST_EXCLUSION(8) +TEST_EXCLUSION_TASK(8, _default_mem) +TEST_EXCLUSION(8, _default_mem) #define ITER_COUNT 20000 #define TEST_RACE_OPERATION(ASSERT_SUFFIX, NAME, LHSTYPE, PRE, POST, INIT, FINAL) \ \ -static _Atomic LHSTYPE var_##NAME; \ +static _Atomic LHSTYPE *var_##NAME; \ \ static void *test_thread_##NAME (void *arg) \ { \ for (int i = 0; i < ITER_COUNT; i++) \ { \ - PRE var_##NAME POST; \ + PRE (*var_##NAME) POST; \ } \ return NULL; \ } \ @@ -233,9 +247,10 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]") { \ pthread_t thread_id1; \ pthread_t thread_id2; \ - var_##NAME = (INIT); \ + var_##NAME = heap_caps_calloc(sizeof(*var_##NAME), 1, MALLOC_CAP_ATOMIC); \ + *var_##NAME = (INIT); \ esp_pthread_cfg_t cfg = esp_pthread_get_default_config(); \ - cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \ + cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \ esp_pthread_set_cfg(&cfg); \ pthread_create (&thread_id1, NULL, test_thread_##NAME, NULL); \ cfg.pin_to_core = xPortGetCoreID(); \ @@ -243,7 +258,8 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]") pthread_create (&thread_id2, NULL, test_thread_##NAME, NULL); \ pthread_join (thread_id1, NULL); \ pthread_join (thread_id2, NULL); \ - TEST_ASSERT_EQUAL##ASSERT_SUFFIX((FINAL), var_##NAME); \ + TEST_ASSERT_EQUAL##ASSERT_SUFFIX((FINAL), (*var_##NAME)); \ + free(var_##NAME); \ } // Note that the assert at the end is doing an excat bitwise comparison. @@ -251,13 +267,13 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]") // no corresponding Unity assert macro for long double. USE THIS WITH CARE! #define TEST_RACE_OPERATION_LONG_DOUBLE(NAME, LHSTYPE, PRE, POST, INIT, FINAL) \ \ -static _Atomic LHSTYPE var_##NAME; \ +static _Atomic LHSTYPE *var_##NAME; \ \ static void *test_thread_##NAME (void *arg) \ { \ for (int i = 0; i < ITER_COUNT; i++) \ { \ - PRE var_##NAME POST; \ + PRE (*var_##NAME) POST; \ } \ return NULL; \ } \ @@ -266,10 +282,11 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]") \ { \ pthread_t thread_id1; \ pthread_t thread_id2; \ - var_##NAME = (INIT); \ + var_##NAME = heap_caps_calloc(sizeof(*var_##NAME), 1, MALLOC_CAP_ATOMIC); \ + *var_##NAME = (INIT); \ const LHSTYPE EXPECTED = (FINAL); \ esp_pthread_cfg_t cfg = esp_pthread_get_default_config(); \ - cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \ + cfg.pin_to_core = (xPortGetCoreID() + 1) % CONFIG_FREERTOS_NUMBER_OF_CORES; \ esp_pthread_set_cfg(&cfg); \ pthread_create (&thread_id1, NULL, test_thread_##NAME, NULL); \ cfg.pin_to_core = xPortGetCoreID(); \ @@ -277,7 +294,8 @@ TEST_CASE("stdatomic - test_" #NAME, "[newlib_stdatomic]") \ pthread_create (&thread_id2, NULL, test_thread_##NAME, NULL); \ pthread_join (thread_id1, NULL); \ pthread_join (thread_id2, NULL); \ - TEST_ASSERT(EXPECTED == var_##NAME); \ + TEST_ASSERT(EXPECTED == (*var_##NAME)); \ + free(var_##NAME); \ } TEST_RACE_OPERATION(, uint8_add, uint8_t,, += 1, 0, (uint8_t)(2 * ITER_COUNT)) @@ -352,3 +370,93 @@ TEST_RACE_OPERATION_LONG_DOUBLE(long_double_preinc, long double, ++,, 0, (2 * IT TEST_RACE_OPERATION_LONG_DOUBLE(complex_long_double_sub, _Complex long double,, -= 1, 0, -(2 * ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(long_double_postdec, long double,, --, 0, -(2 * ITER_COUNT)) TEST_RACE_OPERATION_LONG_DOUBLE(long_double_predec, long double, --,, 0, -(2 * ITER_COUNT)) + +#if CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND +#undef MALLOC_CAP_ATOMIC +#define MALLOC_CAP_ATOMIC MALLOC_CAP_SPIRAM + +TEST_EXCLUSION_TASK(64, _ext_mem) +TEST_EXCLUSION(64, _ext_mem) + +TEST_EXCLUSION_TASK(32, _ext_mem) +TEST_EXCLUSION(32, _ext_mem) + +TEST_EXCLUSION_TASK(16, _ext_mem) +TEST_EXCLUSION(16, _ext_mem) + +TEST_EXCLUSION_TASK(8, _ext_mem) +TEST_EXCLUSION(8, _ext_mem) + +TEST_RACE_OPERATION(, uint8_add_ext, uint8_t,, += 1, 0, (uint8_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint8_add_3_ext, uint8_t,, += 3, 0, (uint8_t)(6 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint8_postinc_ext, uint8_t,, ++, 0, (uint8_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint8_preinc_ext, uint8_t, ++,, 0, (uint8_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint8_sub_ext, uint8_t,, -= 1, 0, (uint8_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint8_sub_3_ext, uint8_t,, -= 3, 0, (uint8_t) - (6 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint8_postdec_ext, uint8_t,, --, 0, (uint8_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint8_predec_ext, uint8_t, --,, 0, (uint8_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint8_mul_ext, uint8_t,, *= 3, 1, (uint8_t) 0x1) + +TEST_RACE_OPERATION(, uint16_add_ext, uint16_t,, += 1, 0, (uint16_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint16_add_3_ext, uint16_t,, += 3, 0, (uint16_t)(6 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint16_postinc_ext, uint16_t,, ++, 0, (uint16_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint16_preinc_ext, uint16_t, ++,, 0, (uint16_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint16_sub_ext, uint16_t,, -= 1, 0, (uint16_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint16_sub_3_ext, uint16_t,, -= 3, 0, (uint16_t) - (6 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint16_postdec_ext, uint16_t,, --, 0, (uint16_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint16_predec_ext, uint16_t, --,, 0, (uint16_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint16_mul_ext, uint16_t,, *= 3, 1, (uint16_t) 0x6D01) + +TEST_RACE_OPERATION(, uint32_add_ext, uint32_t,, += 1, 0, (uint32_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint32_add_3_ext, uint32_t,, += 3, 0, (uint32_t)(6 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint32_postinc_ext, uint32_t,, ++, 0, (uint32_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint32_preinc_ext, uint32_t, ++,, 0, (uint32_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint32_sub_ext, uint32_t,, -= 1, 0, (uint32_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint32_sub_3_ext, uint32_t,, -= 3, 0, (uint32_t) - (6 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint32_postdec_ext, uint32_t,, --, 0, (uint32_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint32_predec_ext, uint32_t, --,, 0, (uint32_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint32_mul_ext, uint32_t,, *= 3, 1, (uint32_t) 0xC1E36D01U) + +TEST_RACE_OPERATION(, uint64_add_ext, uint64_t,, += 1, 0, (uint64_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint64_add_3_ext, uint64_t,, += 3, 0, (uint64_t)(6 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint64_add_neg_ext, uint64_t,, += 1, -10000, (uint64_t)(2 * ITER_COUNT - 10000)) +TEST_RACE_OPERATION(, uint64_postinc_ext, uint64_t,, ++, 0, (uint64_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint64_postinc_neg_ext, uint64_t,, ++, -10000, (uint64_t)(2 * ITER_COUNT - 10000)) +TEST_RACE_OPERATION(, uint64_preinc_ext, uint64_t, ++,, 0, (uint64_t)(2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint64_preinc_neg_ext, uint64_t, ++,, -10000, (uint64_t)(2 * ITER_COUNT - 10000)) +TEST_RACE_OPERATION(, uint64_sub_ext, uint64_t,, -= 1, 0, (uint64_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint64_sub_3_ext, uint64_t,, -= 3, 0, (uint64_t) - (6 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint64_sub_neg_ext, uint64_t,, -= 1, 10000, (uint64_t)((-2 * ITER_COUNT) + 10000)) +TEST_RACE_OPERATION(, uint64_postdec_ext, uint64_t,, --, 0, (uint64_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint64_postdec_neg_ext, uint64_t,, --, 10000, (uint64_t)((-2 * ITER_COUNT) + 10000)) +TEST_RACE_OPERATION(, uint64_predec_ext, uint64_t, --,, 0, (uint64_t) - (2 * ITER_COUNT)) +TEST_RACE_OPERATION(, uint64_predec_neg_ext, uint64_t, --,, 10000, (uint64_t)((-2 * ITER_COUNT) + 10000)) +TEST_RACE_OPERATION(, uint64_mul_ext, uint64_t,, *= 3, 1, (uint64_t) 0x988EE974C1E36D01ULL) + +TEST_RACE_OPERATION(_FLOAT, float_add_ext, float,, += 1, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION(_FLOAT, complex_float_add_ext, _Complex float,, += 1, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION(_FLOAT, float_postinc_ext, float,, ++, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION(_FLOAT, float_preinc_ext, float, ++,, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION(_FLOAT, float_sub_ext, float,, -= 1, 0, -(2 * ITER_COUNT)) +TEST_RACE_OPERATION(_FLOAT, complex_float_sub_ext, _Complex float,, -= 1, 0, -(2 * ITER_COUNT)) +TEST_RACE_OPERATION(_FLOAT, float_postdec_ext, float,, --, 0, -(2 * ITER_COUNT)) +TEST_RACE_OPERATION(_FLOAT, float_predec_ext, float, --,, 0, -(2 * ITER_COUNT)) + +TEST_RACE_OPERATION(_DOUBLE, double_add_ext, double,, += 1, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION(_DOUBLE, complex_double_add_ext, _Complex double,, += 1, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION(_DOUBLE, double_postinc_ext, double,, ++, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION(_DOUBLE, double_preinc_ext, double, ++,, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION(_DOUBLE, double_sub_ext, double,, -= 1, 0, -(2 * ITER_COUNT)) +TEST_RACE_OPERATION(_DOUBLE, complex_double_sub_ext, _Complex double,, -= 1, 0, -(2 * ITER_COUNT)) +TEST_RACE_OPERATION(_DOUBLE, double_postdec_ext, double,, --, 0, -(2 * ITER_COUNT)) +TEST_RACE_OPERATION(_DOUBLE, double_predec_ext, double, --,, 0, -(2 * ITER_COUNT)) + +TEST_RACE_OPERATION_LONG_DOUBLE(long_double_add_ext, long double,, += 1, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION_LONG_DOUBLE(complex_long_double_add_ext, _Complex long double,, += 1, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION_LONG_DOUBLE(long_double_postinc_ext, long double,, ++, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION_LONG_DOUBLE(long_double_sub_ext, long double,, -= 1, 0, -(2 * ITER_COUNT)) +TEST_RACE_OPERATION_LONG_DOUBLE(long_double_preinc_ext, long double, ++,, 0, (2 * ITER_COUNT)) +TEST_RACE_OPERATION_LONG_DOUBLE(complex_long_double_sub_ext, _Complex long double,, -= 1, 0, -(2 * ITER_COUNT)) +TEST_RACE_OPERATION_LONG_DOUBLE(long_double_postdec_ext, long double,, --, 0, -(2 * ITER_COUNT)) +TEST_RACE_OPERATION_LONG_DOUBLE(long_double_predec_ext, long double, --,, 0, -(2 * ITER_COUNT)) +#endif // CONFIG_STDATOMIC_S32C1I_SPIRAM_WORKAROUND diff --git a/components/vfs/include/esp_vfs.h b/components/vfs/include/esp_vfs.h index f7cca42e71..3fc2131b1b 100644 --- a/components/vfs/include/esp_vfs.h +++ b/components/vfs/include/esp_vfs.h @@ -21,7 +21,11 @@ #include #include #include +#ifdef __clang__ // TODO LLVM-330 #include +#else +#include +#endif #include #include "sdkconfig.h" diff --git a/components/vfs/test_apps/main/test_vfs_paths.c b/components/vfs/test_apps/main/test_vfs_paths.c index ed9ce374c7..48a41089e2 100644 --- a/components/vfs/test_apps/main/test_vfs_paths.c +++ b/components/vfs/test_apps/main/test_vfs_paths.c @@ -10,7 +10,11 @@ #include #include #include +#ifdef __clang__ // TODO LLVM-330 #include +#else +#include +#endif #include "esp_vfs.h" #include "unity.h" #include "esp_log.h" diff --git a/docs/en/migration-guides/release-5.x/5.3/gcc.rst b/docs/en/migration-guides/release-5.x/5.3/gcc.rst new file mode 100644 index 0000000000..a700db1d69 --- /dev/null +++ b/docs/en/migration-guides/release-5.x/5.3/gcc.rst @@ -0,0 +1,40 @@ +GCC +*** + +:link_to_translation:`zh_CN:[中文]` + +Common Porting Problems and Fixes +================================= + +``sys/dirent.h`` No Longer Includes Function Prototypes +------------------------------------------------------- + +Issue +^^^^^^ + +Compilation errors may occur in code that previously worked with the old toolchain. For example: + +.. code-block:: c + + #include + /* .... */ + DIR* dir = opendir("test_dir"); + /* .... */ + /** + * Compile error: + * test.c: In function 'test_opendir': + * test.c:100:16: error: implicit declaration of function 'opendir' [-Werror=implicit-function-declaration] + * 100 | DIR* dir = opendir(path); + * | ^~~~~~~ + */ + +Solution +^^^^^^^^^ + +To resolve this issue, the correct header must be included. Refactor the code like this: + +.. code-block:: c + + #include + /* .... */ + DIR* dir = opendir("test_dir"); diff --git a/docs/en/migration-guides/release-5.x/5.3/index.rst b/docs/en/migration-guides/release-5.x/5.3/index.rst index 06f5ab107b..6ee094665a 100644 --- a/docs/en/migration-guides/release-5.x/5.3/index.rst +++ b/docs/en/migration-guides/release-5.x/5.3/index.rst @@ -7,6 +7,7 @@ Migration from 5.2 to 5.3 :maxdepth: 1 bluetooth-low-energy + gcc peripherals security storage diff --git a/docs/zh_CN/migration-guides/release-5.x/5.3/gcc.rst b/docs/zh_CN/migration-guides/release-5.x/5.3/gcc.rst new file mode 100644 index 0000000000..d2312c21ef --- /dev/null +++ b/docs/zh_CN/migration-guides/release-5.x/5.3/gcc.rst @@ -0,0 +1,4 @@ +GCC +*** + +.. include:: ../../../../en/migration-guides/release-5.x/5.3/gcc.rst diff --git a/docs/zh_CN/migration-guides/release-5.x/5.3/index.rst b/docs/zh_CN/migration-guides/release-5.x/5.3/index.rst index daae13829c..3a60f2cc1d 100644 --- a/docs/zh_CN/migration-guides/release-5.x/5.3/index.rst +++ b/docs/zh_CN/migration-guides/release-5.x/5.3/index.rst @@ -7,6 +7,7 @@ :maxdepth: 1 bluetooth-low-energy + gcc peripherals security storage diff --git a/tools/cmake/toolchain-esp32p4.cmake b/tools/cmake/toolchain-esp32p4.cmake index c768ed0c75..d4b32308f2 100644 --- a/tools/cmake/toolchain-esp32p4.cmake +++ b/tools/cmake/toolchain-esp32p4.cmake @@ -7,14 +7,14 @@ set(CMAKE_CXX_COMPILER riscv32-esp-elf-g++) set(CMAKE_ASM_COMPILER riscv32-esp-elf-gcc) set(_CMAKE_TOOLCHAIN_PREFIX riscv32-esp-elf-) -remove_duplicated_flags("-march=rv32imafc_zicsr_zifencei -mabi=ilp32f ${CMAKE_C_FLAGS}" UNIQ_CMAKE_C_FLAGS) +remove_duplicated_flags("-march=rv32imafc_zicsr_zifencei_xesppie -mabi=ilp32f ${CMAKE_C_FLAGS}" UNIQ_CMAKE_C_FLAGS) set(CMAKE_C_FLAGS "${UNIQ_CMAKE_C_FLAGS}" CACHE STRING "C Compiler Base Flags" FORCE) -remove_duplicated_flags("-march=rv32imafc_zicsr_zifencei -mabi=ilp32f ${CMAKE_CXX_FLAGS}" UNIQ_CMAKE_CXX_FLAGS) +remove_duplicated_flags("-march=rv32imafc_zicsr_zifencei_xesppie -mabi=ilp32f ${CMAKE_CXX_FLAGS}" UNIQ_CMAKE_CXX_FLAGS) set(CMAKE_CXX_FLAGS "${UNIQ_CMAKE_CXX_FLAGS}" CACHE STRING "C++ Compiler Base Flags" FORCE) -remove_duplicated_flags("-march=rv32imafc_zicsr_zifencei -mabi=ilp32f ${CMAKE_ASM_FLAGS}" UNIQ_CMAKE_ASM_FLAGS) +remove_duplicated_flags("-march=rv32imafc_zicsr_zifencei_xesppie -mabi=ilp32f ${CMAKE_ASM_FLAGS}" UNIQ_CMAKE_ASM_FLAGS) set(CMAKE_ASM_FLAGS "${UNIQ_CMAKE_CXX_FLAGS}" CACHE STRING "Asm Compiler Base Flags" FORCE) -remove_duplicated_flags("-nostartfiles -march=rv32imafc_zicsr_zifencei -mabi=ilp32f --specs=nosys.specs \ +remove_duplicated_flags("-nostartfiles -march=rv32imafc_zicsr_zifencei_xesppie -mabi=ilp32f --specs=nosys.specs \ ${CMAKE_EXE_LINKER_FLAGS}" UNIQ_CMAKE_SAFE_EXE_LINKER_FLAGS) set(CMAKE_EXE_LINKER_FLAGS "${UNIQ_CMAKE_SAFE_EXE_LINKER_FLAGS}" CACHE STRING "Linker Base Flags" FORCE) diff --git a/tools/idf_py_actions/hints.yml b/tools/idf_py_actions/hints.yml index 9cb3dd3845..c47560a2be 100644 --- a/tools/idf_py_actions/hints.yml +++ b/tools/idf_py_actions/hints.yml @@ -422,3 +422,7 @@ re: "-Werror=(xor-used-as-pow|enum-int-mismatch|self-move|dangling-reference)" hint: "The warning(s) '{}' may appear after compiler update above GCC-13\nTo suppress these warnings use 'idf.py menuconfig' to enable configure option 'Compiler options' -> 'Disable new warnings introduced in GCC 13'\nPlease note that this is not a permanent solution, and this option will be removed in a future update of the ESP-IDF.\nIt is strongly recommended to fix all warnings, as they may indicate potential issues!" match_to_output: True + +- + re: "implicit declaration of function '(opendir|readdir|telldir|seekdir|rewinddir|closedir|readdir_r|scandir|alphasort)'" + hint: "Please include (not )" diff --git a/tools/tools.json b/tools/tools.json index 71696181ab..83953bd148 100644 --- a/tools/tools.json +++ b/tools/tools.json @@ -180,51 +180,51 @@ "versions": [ { "linux-amd64": { - "sha256": "bae7da23ea8516fb7e42640f4420c4dd1ebfd64189a14fc330d73e173b3a038b", - "size": 112588084, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/xtensa-esp-elf-13.2.0_20230928-x86_64-linux-gnu.tar.xz" + "sha256": "4e43e56cd533a39c6b0ccc8b30320b19ce66b0b17e646b53fa84c9bf956b2c83", + "size": 112254280, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/xtensa-esp-elf-13.2.0_20240305-x86_64-linux-gnu.tar.xz" }, "linux-arm64": { - "sha256": "faa4755bedafb1c10feaeef01c610803ee9ace088b26d7db90a5ee0816c20f9e", - "size": 104257688, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/xtensa-esp-elf-13.2.0_20230928-aarch64-linux-gnu.tar.xz" + "sha256": "06bc30be9d824fa8da507dff228085563baa7f6251e42a14deae0ca0e93ec2eb", + "size": 103677608, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/xtensa-esp-elf-13.2.0_20240305-aarch64-linux-gnu.tar.xz" }, "linux-armel": { - "sha256": "38702870453b8d226fbc348ae2288f02cbc6317a3afa89982da6a6ef6866e05a", - "size": 99702488, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/xtensa-esp-elf-13.2.0_20230928-arm-linux-gnueabi.tar.xz" + "sha256": "f0ecab5ae0a63abf4e43b1f3873d89181d1772748f028653f5e81264fb451e61", + "size": 106290920, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/xtensa-esp-elf-13.2.0_20240305-arm-linux-gnueabi.tar.xz" }, "linux-armhf": { - "sha256": "aeb872fe0f7f342ed1a42e02dad15e1fa255aec852e88bb8ff2725380ddde501", - "size": 104316996, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/xtensa-esp-elf-13.2.0_20230928-arm-linux-gnueabihf.tar.xz" + "sha256": "15ed342e9d5c647dce8c688a4796bf8b0b9e44283f9ebe99e11aba63cc3d85b2", + "size": 102905548, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/xtensa-esp-elf-13.2.0_20240305-arm-linux-gnueabihf.tar.xz" }, "linux-i686": { - "sha256": "fc25701749f365af5f270221e0e8439ce7fcc26eeac145a91cfe02f3100de2d6", - "size": 113231244, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/xtensa-esp-elf-13.2.0_20230928-i586-linux-gnu.tar.xz" + "sha256": "73fe99abc7d7a33eeb13473902e7025f0b41626891cb358a4dc9bf02b2b53931", + "size": 117286888, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/xtensa-esp-elf-13.2.0_20240305-i586-linux-gnu.tar.xz" }, "macos": { - "sha256": "b9b7a6d1dc4ea065bf6763fa904729e1c808d6dfbf1dfabf12852e2929251ee9", - "size": 115211408, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/xtensa-esp-elf-13.2.0_20230928-x86_64-apple-darwin.tar.xz" + "sha256": "5bf2b5ececdf92169e5a084d2485b8d0d60480ce130a3035dc407f01e4e7820d", + "size": 115090676, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/xtensa-esp-elf-13.2.0_20240305-x86_64-apple-darwin.tar.xz" }, "macos-arm64": { - "sha256": "687243e5cbefb7cf05603effbdd6fde5769f94daff7e519f5bbe61f43c4c0ef6", - "size": 100098880, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/xtensa-esp-elf-13.2.0_20230928-aarch64-apple-darwin.tar.xz" + "sha256": "e2bf7886bb39ad6558e1f46160fae887705f903ea8b77cd28bbf77093d3ca286", + "size": 100350656, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/xtensa-esp-elf-13.2.0_20240305-aarch64-apple-darwin.tar.xz" }, - "name": "esp-13.2.0_20230928", + "name": "esp-13.2.0_20240305", "status": "recommended", "win32": { - "sha256": "7a2822ef554be175bbe5c67c2010a6dd29aec6221bdb5ed8970f164e2744714a", - "size": 266511200, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/xtensa-esp-elf-13.2.0_20230928-i686-w64-mingw32.zip" + "sha256": "79ea0dbd314012f199fc9a9bbbcc4c11473ea87f81be4c1b4c60328d3d73b9f8", + "size": 266666180, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/xtensa-esp-elf-13.2.0_20240305-i686-w64-mingw32.zip" }, "win64": { - "sha256": "80e3271b7c9b64694ba8494b90054da2efce328f7d4e5f5f625d08808372fa64", - "size": 270164567, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/xtensa-esp-elf-13.2.0_20230928-x86_64-w64-mingw32.zip" + "sha256": "a80879c35b7f82ce80332ef0b68b0c7d245bafd9c98a35c45965850f40faf5ba", + "size": 270417276, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/xtensa-esp-elf-13.2.0_20240305-x86_64-w64-mingw32.zip" } } ] @@ -328,51 +328,51 @@ "versions": [ { "linux-amd64": { - "sha256": "782feefe354500c5f968e8c91959651be3bdbbd7ae8a17affcee2b1bffcaad89", - "size": 143575940, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/riscv32-esp-elf-13.2.0_20230928-x86_64-linux-gnu.tar.xz" + "sha256": "2bd71171ddb801e59c85ecbea3b89d6f707627d6c11e501cae43ca7c0db73eda", + "size": 145977452, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/riscv32-esp-elf-13.2.0_20240305-x86_64-linux-gnu.tar.xz" }, "linux-arm64": { - "sha256": "6ee4b30dff18bdea9ada79399c0c81ba82b6ed99a565746a7d5040c7e62566b3", - "size": 142577236, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/riscv32-esp-elf-13.2.0_20230928-aarch64-linux-gnu.tar.xz" + "sha256": "806ccd08333a96ae73507625a1762f7ac7a8c82f193602cafb835c4d7f5678ab", + "size": 144233996, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/riscv32-esp-elf-13.2.0_20240305-aarch64-linux-gnu.tar.xz" }, "linux-armel": { - "sha256": "3231ca04ea4f53dc602ae1cc728151a16c5d424063ac69542b8bf6cde10e7755", - "size": 135201840, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/riscv32-esp-elf-13.2.0_20230928-arm-linux-gnueabi.tar.xz" + "sha256": "312f404e86dde7d22f5c4b7216ea386dbf8d5f93dea50f689471cedc2e457f91", + "size": 136753128, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/riscv32-esp-elf-13.2.0_20240305-arm-linux-gnueabi.tar.xz" }, "linux-armhf": { - "sha256": "eb43ac9dcad8fe79bdf4b8d29cf4751d41cbcb1fadd831f2779a84f4fb1c5ca0", - "size": 143656008, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/riscv32-esp-elf-13.2.0_20230928-arm-linux-gnueabihf.tar.xz" + "sha256": "a546224d8dc33c6a00a35b5856261232ce9218953e2ee8bcacdcc899d0c19591", + "size": 145140184, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/riscv32-esp-elf-13.2.0_20240305-arm-linux-gnueabihf.tar.xz" }, "linux-i686": { - "sha256": "51421bd181392472fee8242d53dfa6305a67b21e1073f0f9f69d215987da9684", - "size": 151339344, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/riscv32-esp-elf-13.2.0_20230928-i586-linux-gnu.tar.xz" + "sha256": "09d0ee10e1e617a93f6597c279bf9388b6384790a45b1d87451a40d1ff4e5f71", + "size": 156611372, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/riscv32-esp-elf-13.2.0_20240305-i586-linux-gnu.tar.xz" }, "macos": { - "sha256": "ce40c75a1ae0e4b986daeeff321aaa7b57f74eb4bcfd011f1252fd6932bbb90f", - "size": 153157496, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/riscv32-esp-elf-13.2.0_20230928-x86_64-apple-darwin.tar.xz" + "sha256": "dfb4a2f46c66a9246a25e3c34b19a91c7a3f33a44721cd61ec01d442d5344193", + "size": 152864248, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/riscv32-esp-elf-13.2.0_20240305-x86_64-apple-darwin.tar.xz" }, "macos-arm64": { - "sha256": "c2f989370c101ae3f890aa71e6f57064f068f7c4a1d9f26445894c83f919624f", - "size": 135811812, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/riscv32-esp-elf-13.2.0_20230928-aarch64-apple-darwin.tar.xz" + "sha256": "1e48833974a8e9ad2a0ac287ad244b825392d623edaf269bd66f4d8a215a0ef8", + "size": 136622828, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/riscv32-esp-elf-13.2.0_20240305-aarch64-apple-darwin.tar.xz" }, - "name": "esp-13.2.0_20230928", + "name": "esp-13.2.0_20240305", "status": "recommended", "win32": { - "sha256": "37737463826486c9c11e74a140b1b50195dc868e547c8ee557950c811741197c", - "size": 362812332, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/riscv32-esp-elf-13.2.0_20230928-i686-w64-mingw32.zip" + "sha256": "61492d38a0ceaae7b4784820810f9717454a0b4413a9f20ced595122eae3111f", + "size": 362677865, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/riscv32-esp-elf-13.2.0_20240305-i686-w64-mingw32.zip" }, "win64": { - "sha256": "1300a54505dc964fa9104482737152e669f4d880efc1d54057378d9e6910ae1e", - "size": 366053112, - "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20230928/riscv32-esp-elf-13.2.0_20230928-x86_64-w64-mingw32.zip" + "sha256": "e1e63f1926b9c643bc1de72e30cc79fc2079ad169546669e55836efbcc559d11", + "size": 366029146, + "url": "https://github.com/espressif/crosstool-NG/releases/download/esp-13.2.0_20240305/riscv32-esp-elf-13.2.0_20240305-x86_64-w64-mingw32.zip" } } ]