newlib: add _RETARGETABLE_LOCKING support

This adds support for the retargetable locking implementation in
newlib 3. This feature will be enabled in the future toolchain builds.
With the present version of the toolchain, this code doesn't get used.

When _RETARGETABLE_LOCKING gets enabled, newlib locking implementation
will be modified as follows:

- Legacy ESP-specific _lock_xxx functions are preserved. This is done
  because ROM copies of newlib in ESP32 and ESP32-S2 rely on these
  functions through the function pointer table. Also there is some
  code in IDF which still uses these locking functions.

- New __retarget_lock_xxx functions are introduced. Newlib expects
  these functions to be provided by the system. These functions work
  pretty much the same way as the ESP-specific _lock_xxx functions,
  except one major difference: _lock_acquire receives the lock pointer
  by value, and as such doesn't support lazy initialization.

- Static locks used by newlib are now explicitly initialized at
  startup. Since it is unlikely that these static locks are used at
  the same time, all compatible locks are set to point to the same
  mutex. This saves a bit of RAM. Note that there are still many locks
  not initialized statically, in particular those inside FILE
  structures.
This commit is contained in:
Ivan Grokhotkov 2020-06-10 18:03:32 +00:00
parent b1c4107275
commit b7b9ea4361
10 changed files with 368 additions and 9 deletions

View File

@ -17,3 +17,7 @@ __sf_fake_stderr = 0x3ff96458;
__sf_fake_stdin = 0x3ff96498;
__sf_fake_stdout = 0x3ff96478;
__wctomb = 0x3ff96540;
__sfp_lock = 0x3ffae0ac;
__sinit_lock = 0x3ffae0a8;
__env_lock_object = 0x3ffae0b8;
__tz_lock_object = 0x3ffae080;

View File

@ -13,3 +13,5 @@ _PathLocale = 0x3ffffd80;
__sf_fake_stderr = 0x3ffaf08c;
__sf_fake_stdin = 0x3ffaf0cc;
__sf_fake_stdout = 0x3ffaf0ac;
__sfp_recursive_mutex = 0x3ffffd88;
__sinit_recursive_mutex = 0x3ffffd84;

View File

@ -15,3 +15,5 @@ _PathLocale = 0x3fcefcd0;
__sf_fake_stderr = 0x3ff0c524;
__sf_fake_stdin = 0x3ff0c564;
__sf_fake_stdout = 0x3ff0c544;
__sinit_recursive_mutex = 0x3fcefcd4;
__sfp_recursive_mutex = 0x3fcefcd8;

View File

@ -199,6 +199,7 @@ static void do_core_init(void)
fail initializing it properly. */
heap_caps_init();
esp_setup_syscall_table();
esp_newlib_locks_init();
esp_newlib_time_init();
if (g_spiram_ok) {

View File

@ -30,9 +30,8 @@ target_link_libraries(${COMPONENT_LIB} INTERFACE c m gcc "$<TARGET_FILE:${newlib
set_source_files_properties(heap.c PROPERTIES COMPILE_FLAGS -fno-builtin)
# Forces the linker to include locks, heap, and syscalls from this component,
# Forces the linker to include heap, syscalls, and pthread from this component,
# instead of the implementations provided by newlib.
set(EXTRA_LINK_FLAGS "-u newlib_include_locks_impl")
list(APPEND EXTRA_LINK_FLAGS "-u newlib_include_heap_impl")
list(APPEND EXTRA_LINK_FLAGS "-u newlib_include_syscalls_impl")
list(APPEND EXTRA_LINK_FLAGS "-u newlib_include_pthread_impl")

View File

@ -15,9 +15,8 @@ endif
COMPONENT_PRIV_INCLUDEDIRS := priv_include
COMPONENT_SRCDIRS := . port
# Forces the linker to include locks, heap, and syscalls from this component,
# Forces the linker to include heap, syscalls, and pthread from this component,
# instead of the implementations provided by newlib.
COMPONENT_ADD_LDFLAGS += -u newlib_include_locks_impl
COMPONENT_ADD_LDFLAGS += -u newlib_include_heap_impl
COMPONENT_ADD_LDFLAGS += -u newlib_include_syscalls_impl

View File

@ -21,7 +21,6 @@
#include "freertos/semphr.h"
#include "freertos/task.h"
#include "freertos/portable.h"
#include "esp_heap_caps.h"
/* Notes on our newlib lock implementation:
*
@ -119,7 +118,6 @@ void _lock_close_recursive(_lock_t *lock) __attribute__((alias("_lock_close")));
*/
static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type) {
xSemaphoreHandle h = (xSemaphoreHandle)(*lock);
if (!h) {
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
return 0; /* locking is a no-op before scheduler is up, so this "succeeds" */
@ -212,9 +210,208 @@ void IRAM_ATTR _lock_release_recursive(_lock_t *lock) {
lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}
/* No-op function, used to force linking this file,
instead of the dummy locks implementation from newlib.
#ifdef _RETARGETABLE_LOCKING
/* To ease the transition to newlib 3.3.0, this part is kept under an ifdef.
* After the toolchain with newlib 3.3.0 is released and merged, the ifdefs
* can be removed.
*
* Also the retargetable locking functions still rely on the previous
* implementation. Once support for !_RETARGETABLE_LOCKING is removed,
* the code can be simplified, removing support for lazy initialization of
* locks. At the same time, IDF code which relies on _lock_acquire/_lock_release
* will have to be updated to not depend on lazy initialization.
*
* Explanation of the different lock types:
*
* Newlib 2.2.0 and 3.0.0:
* _lock_t is defined as int, stores SemaphoreHandle_t.
*
* Newlib 3.3.0:
* struct __lock is (or contains) StaticSemaphore_t
* _LOCK_T is a pointer to struct __lock, equivalent to SemaphoreHandle_t.
* It has the same meaning as _lock_t in the previous implementation.
*
*/
void newlib_include_locks_impl(void)
/* This ensures the platform-specific definition in lock.h is correct.
* We use "greater or equal" since the size of StaticSemaphore_t may
* vary by 2 words, depending on whether configUSE_TRACE_FACILITY is enabled.
*/
_Static_assert(sizeof(struct __lock) >= sizeof(StaticSemaphore_t),
"Incorrect size of struct __lock");
/* FreeRTOS configuration check */
_Static_assert(configSUPPORT_STATIC_ALLOCATION,
"FreeRTOS should be configured with static allocation support");
/* These 2 locks are used instead of 9 distinct newlib static locks,
* as most of the locks are required for lesser-used features, so
* the chance of performance degradation due to lock contention is low.
*/
static StaticSemaphore_t s_common_mutex;
static StaticSemaphore_t s_common_recursive_mutex;
#ifdef CONFIG_IDF_TARGET_ESP32C3
/* C3 ROM is built without Newlib static lock symbols exported, and
* with an extra level of _LOCK_T indirection in mind.
* The following is a workaround for this:
* - on startup, we call esp_rom_newlib_init_common_mutexes to set
* the two mutex pointers to magic values.
* - if in __retarget_lock_acquire*, we check if the argument dereferences
* to the magic value. If yes, we lock the correct mutex defined in the app,
* instead.
* Casts from &StaticSemaphore_t to _LOCK_T are okay because _LOCK_T
* (which is SemaphoreHandle_t) is a pointer to the corresponding
* StaticSemaphore_t structure. This is ensured by asserts below.
*/
#define ROM_NEEDS_MUTEX_OVERRIDE
#endif // CONFIG_IDF_TARGET_ESP32C3
#ifdef ROM_NEEDS_MUTEX_OVERRIDE
#define ROM_MUTEX_MAGIC 0xbb10c433
/* This is a macro, since we are overwriting the argument */
#define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead) \
if (*(int*)_lock == ROM_MUTEX_MAGIC) { \
(_lock) = (_LOCK_T) (_lock_to_use_instead); \
}
#else // ROM_NEEDS_MUTEX_OVERRIDE
#define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead)
#endif // ROM_NEEDS_MUTEX_OVERRIDE
void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock)
{
*lock = NULL; /* In case lock's memory is uninitialized */
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
}
void IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock)
{
*lock = NULL; /* In case lock's memory is uninitialized */
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}
void IRAM_ATTR __retarget_lock_close(_LOCK_T lock)
{
_lock_close(&lock);
}
void IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock)
{
_lock_close_recursive(&lock);
}
/* Separate function, to prevent generating multiple assert strings */
static void IRAM_ATTR check_lock_nonzero(_LOCK_T lock)
{
assert(lock != NULL && "Uninitialized lock used");
}
void IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
_lock_acquire(&lock);
}
void IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
_lock_acquire_recursive(&lock);
}
int IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
return _lock_try_acquire(&lock);
}
int IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
return _lock_try_acquire_recursive(&lock);
}
void IRAM_ATTR __retarget_lock_release(_LOCK_T lock)
{
check_lock_nonzero(lock);
_lock_release(&lock);
}
void IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock)
{
check_lock_nonzero(lock);
_lock_release_recursive(&lock);
}
/* When _RETARGETABLE_LOCKING is enabled, newlib expects the following locks to be provided: */
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sinit_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___malloc_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___env_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sfp_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___atexit_recursive_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___at_quick_exit_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___tz_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___dd_hash_mutex;
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___arc4random_mutex;
void esp_newlib_locks_init(void)
{
/* Initialize the two mutexes used for the locks above.
* Asserts below check our assumption that SemaphoreHandle_t will always
* point to the corresponding StaticSemaphore_t structure.
*/
SemaphoreHandle_t handle;
handle = xSemaphoreCreateMutexStatic(&s_common_mutex);
assert(handle == (SemaphoreHandle_t) &s_common_mutex);
handle = xSemaphoreCreateRecursiveMutexStatic(&s_common_recursive_mutex);
assert(handle == (SemaphoreHandle_t) &s_common_recursive_mutex);
(void) handle;
/* Chip ROMs are built with older versions of newlib, and rely on different lock variables.
* Initialize these locks to the same values.
*/
#ifdef CONFIG_IDF_TARGET_ESP32
/* Newlib 2.2.0 is used in ROM, the following lock symbols are defined: */
extern _lock_t __sfp_lock;
__sfp_lock = (_lock_t) &s_common_recursive_mutex;
extern _lock_t __sinit_lock;
__sinit_lock = (_lock_t) &s_common_recursive_mutex;
extern _lock_t __env_lock_object;
__env_lock_object = (_lock_t) &s_common_mutex;
extern _lock_t __tz_lock_object;
__tz_lock_object = (_lock_t) &s_common_recursive_mutex;
#elif defined(CONFIG_IDF_TARGET_ESP32S2) || defined(CONFIG_IDF_TARGET_ESP32S3)
/* Newlib 3.0.0 is used in ROM, the following lock symbols are defined: */
extern _lock_t __sinit_recursive_mutex;
__sinit_recursive_mutex = (_lock_t) &s_common_recursive_mutex;
extern _lock_t __sfp_recursive_mutex;
__sfp_recursive_mutex = (_lock_t) &s_common_recursive_mutex;
#elif defined(CONFIG_IDF_TARGET_ESP32C3)
/* Newlib 3.3.0 is used in ROM, built with _RETARGETABLE_LOCKING.
* No access to lock variables for the purpose of ECO forward compatibility,
* however we have an API to initialize lock variables used in the ROM.
*/
extern void esp_rom_newlib_init_common_mutexes(_LOCK_T, _LOCK_T);
/* See notes about ROM_NEEDS_MUTEX_OVERRIDE above */
int magic_val = ROM_MUTEX_MAGIC;
_LOCK_T magic_mutex = (_LOCK_T) &magic_val;
esp_rom_newlib_init_common_mutexes(magic_mutex, magic_mutex);
#else // other target
#error Unsupported target
#endif
}
#else // _RETARGETABLE_LOCKING
void esp_newlib_locks_init(void)
{
}
#endif // _RETARGETABLE_LOCKING

View File

@ -53,4 +53,9 @@ void esp_set_time_from_rtc(void);
*/
void esp_sync_counters_rtc_and_frc(void);
/**
* Initialize newlib static locks
*/
void esp_newlib_locks_init(void);
#endif //__ESP_NEWLIB_H__

View File

@ -0,0 +1,41 @@
#pragma once
#include_next <sys/lock.h>
#ifdef _RETARGETABLE_LOCKING
/* Actual platfrom-specific definition of struct __lock.
* The size here should be sufficient for a FreeRTOS mutex.
* This is checked by a static assertion in locks.c
*
* Note 1: this might need to be made dependent on whether FreeRTOS
* is included in the build.
*
* Note 2: the size is made sufficient for the case when
* configUSE_TRACE_FACILITY is enabled. If it is disabled,
* this definition wastes 8 bytes.
*/
struct __lock {
int reserved[23];
};
/* Compatibility definitions for the legacy ESP-specific locking implementation.
* These used to be provided by libc/sys/xtensa/sys/lock.h in newlib.
* Newer versions of newlib don't have this ESP-specific lock.h header, and are
* built with _RETARGETABLE_LOCKING enabled, instead.
*/
typedef _LOCK_T _lock_t;
void _lock_init(_lock_t *plock);
void _lock_init_recursive(_lock_t *plock);
void _lock_close(_lock_t *plock);
void _lock_close_recursive(_lock_t *plock);
void _lock_acquire(_lock_t *plock);
void _lock_acquire_recursive(_lock_t *plock);
int _lock_try_acquire(_lock_t *plock);
int _lock_try_acquire_recursive(_lock_t *plock);
void _lock_release(_lock_t *plock);
void _lock_release_recursive(_lock_t *plock);
#endif // _RETARGETABLE_LOCKING

View File

@ -0,0 +1,109 @@
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <sys/lock.h>
#include "unity.h"
#include "test_utils.h"
#include "sdkconfig.h"
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
#if defined(_RETARGETABLE_LOCKING)
static void locking_task(void* arg)
{
_LOCK_T lock = (_LOCK_T) arg;
__lock_acquire(lock);
__lock_release(lock);
vTaskSuspend(NULL);
}
static void recursive_locking_task(void* arg)
{
_LOCK_T lock = (_LOCK_T) arg;
__lock_acquire_recursive(lock);
__lock_release_recursive(lock);
vTaskSuspend(NULL);
}
static void test_inner_normal(_LOCK_T lock)
{
/* Acquire the lock */
__lock_acquire(lock);
/* Create another task to try acquire same lock */
TaskHandle_t task_hdl;
TEST_ASSERT(xTaskCreate(&locking_task, "locking_task", 2048, lock, UNITY_FREERTOS_PRIORITY, &task_hdl));
vTaskDelay(2);
/* It should get blocked */
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(task_hdl));
/* Once we release the lock, the task should succeed and suspend itself */
__lock_release(lock);
vTaskDelay(2);
TEST_ASSERT_EQUAL(eSuspended, eTaskGetState(task_hdl));
vTaskDelete(task_hdl);
/* Can not recursively acquire the lock from same task */
TEST_ASSERT_EQUAL(0, __lock_try_acquire(lock));
TEST_ASSERT_EQUAL(-1, __lock_try_acquire(lock));
__lock_release(lock);
}
static void test_inner_recursive(_LOCK_T lock)
{
/* Acquire the lock */
__lock_acquire_recursive(lock);
/* Create another task to try acquire same lock */
TaskHandle_t task_hdl;
TEST_ASSERT(xTaskCreate(&recursive_locking_task, "locking_task", 2048, lock, UNITY_FREERTOS_PRIORITY, &task_hdl));
vTaskDelay(2);
/* It should get blocked */
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(task_hdl));
/* Once we release the lock, the task should succeed and suspend itself */
__lock_release_recursive(lock);
vTaskDelay(2);
TEST_ASSERT_EQUAL(eSuspended, eTaskGetState(task_hdl));
vTaskDelete(task_hdl);
/* Try recursively acquiring the lock */
TEST_ASSERT_EQUAL(0, __lock_try_acquire_recursive(lock));
TEST_ASSERT_EQUAL(0, __lock_try_acquire_recursive(lock));
__lock_release_recursive(lock);
__lock_release_recursive(lock);
}
TEST_CASE("Retargetable static locks", "[newlib_locks]")
{
StaticSemaphore_t semaphore;
_LOCK_T lock = (_LOCK_T) xSemaphoreCreateMutexStatic(&semaphore);
test_inner_normal(lock);
}
TEST_CASE("Retargetable static recursive locks", "[newlib_locks]")
{
StaticSemaphore_t semaphore;
_LOCK_T lock = (_LOCK_T) xSemaphoreCreateRecursiveMutexStatic(&semaphore);
test_inner_recursive(lock);
}
TEST_CASE("Retargetable dynamic locks", "[newlib_locks]")
{
_LOCK_T lock;
__lock_init(lock);
test_inner_normal(lock);
__lock_close(lock);
}
TEST_CASE("Retargetable dynamic recursive locks", "[newlib_locks]")
{
_LOCK_T lock;
__lock_init_recursive(lock);
test_inner_recursive(lock);
__lock_close_recursive(lock);
}
#endif // _RETARGETABLE_LOCKING