mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
newlib: revert back from spinlocks to using newlib locks for time.h
Spinlocks from spinlock.h do not disable the scheduler and thus cannot safely be directly used as a locking mechanism. A task holding the lock can get pre-empted, and at that point the new running task will also be allowed to take the spinlock and access whatever it was protecting. Another issue is that the task holding a spinlock could migrate to a different core which in turn would cause the application to fail asserts. The current implementation assumes the core that takes the lock is also the core that releases it. Closes https://github.com/espressif/esp-idf/issues/5762
This commit is contained in:
parent
8a9dc46b14
commit
6fb996b1ac
@ -15,9 +15,10 @@
|
||||
#include "esp_system.h"
|
||||
#include "esp_attr.h"
|
||||
|
||||
#include "soc/spinlock.h"
|
||||
#include "soc/rtc.h"
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
|
||||
#include "sdkconfig.h"
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
@ -33,10 +34,7 @@
|
||||
int64_t IRAM_ATTR __attribute__((weak)) esp_system_get_time(void)
|
||||
{
|
||||
int64_t t = 0;
|
||||
static spinlock_t s_time_lock = SPINLOCK_INITIALIZER;
|
||||
spinlock_acquire(&s_time_lock, SPINLOCK_WAIT_FOREVER);
|
||||
t = (esp_rtc_get_time_us() - g_startup_time);
|
||||
spinlock_release(&s_time_lock);
|
||||
t = (esp_rtc_get_time_us() - g_startup_time);
|
||||
return t;
|
||||
}
|
||||
|
||||
|
@ -14,10 +14,10 @@
|
||||
#include <stdint.h>
|
||||
#include <time.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/lock.h>
|
||||
|
||||
#include "esp_system.h"
|
||||
|
||||
#include "soc/spinlock.h"
|
||||
#include "soc/rtc.h"
|
||||
#include "esp_rom_sys.h"
|
||||
|
||||
@ -61,7 +61,7 @@ uint64_t s_microseconds_offset;
|
||||
static uint64_t s_boot_time; // when RTC is used to persist time, two RTC_STORE registers are used to store boot time instead
|
||||
#endif
|
||||
|
||||
static spinlock_t s_time_lock = SPINLOCK_INITIALIZER;
|
||||
static _lock_t s_boot_time_lock;
|
||||
|
||||
#if defined( WITH_FRC ) || defined( WITH_RTC )
|
||||
uint64_t esp_time_impl_get_time_since_boot(void)
|
||||
@ -75,9 +75,7 @@ uint64_t esp_time_impl_get_time_since_boot(void)
|
||||
microseconds = esp_system_get_time();
|
||||
#endif // WITH_RTC
|
||||
#elif defined(WITH_RTC)
|
||||
spinlock_acquire(&s_time_lock, SPINLOCK_WAIT_FOREVER);
|
||||
microseconds = esp_rtc_get_time_us();
|
||||
spinlock_release(&s_time_lock);
|
||||
#endif // WITH_FRC
|
||||
return microseconds;
|
||||
}
|
||||
@ -88,9 +86,7 @@ uint64_t esp_time_impl_get_time(void)
|
||||
#if defined( WITH_FRC )
|
||||
microseconds = esp_system_get_time();
|
||||
#elif defined( WITH_RTC )
|
||||
spinlock_acquire(&s_time_lock, SPINLOCK_WAIT_FOREVER);
|
||||
microseconds = esp_rtc_get_time_us();
|
||||
spinlock_release(&s_time_lock);
|
||||
#endif // WITH_FRC
|
||||
return microseconds;
|
||||
}
|
||||
@ -100,14 +96,14 @@ uint64_t esp_time_impl_get_time(void)
|
||||
|
||||
void esp_time_impl_set_boot_time(uint64_t time_us)
|
||||
{
|
||||
spinlock_acquire(&s_time_lock, SPINLOCK_WAIT_FOREVER);
|
||||
_lock_acquire(&s_boot_time_lock);
|
||||
#ifdef WITH_RTC
|
||||
REG_WRITE(RTC_BOOT_TIME_LOW_REG, (uint32_t) (time_us & 0xffffffff));
|
||||
REG_WRITE(RTC_BOOT_TIME_HIGH_REG, (uint32_t) (time_us >> 32));
|
||||
#else
|
||||
s_boot_time = time_us;
|
||||
#endif
|
||||
spinlock_release(&s_time_lock);
|
||||
_lock_release(&s_boot_time_lock);
|
||||
}
|
||||
|
||||
uint64_t esp_clk_rtc_time(void)
|
||||
@ -122,13 +118,13 @@ uint64_t esp_clk_rtc_time(void)
|
||||
uint64_t esp_time_impl_get_boot_time(void)
|
||||
{
|
||||
uint64_t result;
|
||||
spinlock_acquire(&s_time_lock, SPINLOCK_WAIT_FOREVER);
|
||||
_lock_acquire(&s_boot_time_lock);
|
||||
#ifdef WITH_RTC
|
||||
result = ((uint64_t) REG_READ(RTC_BOOT_TIME_LOW_REG)) + (((uint64_t) REG_READ(RTC_BOOT_TIME_HIGH_REG)) << 32);
|
||||
#else
|
||||
result = s_boot_time;
|
||||
#endif
|
||||
spinlock_release(&s_time_lock);
|
||||
_lock_release(&s_boot_time_lock);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <sys/reent.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/times.h>
|
||||
#include <sys/lock.h>
|
||||
|
||||
#include "esp_system.h"
|
||||
#include "esp_attr.h"
|
||||
@ -30,7 +31,6 @@
|
||||
|
||||
#include "esp_private/system_internal.h"
|
||||
|
||||
#include "soc/spinlock.h"
|
||||
#include "soc/rtc.h"
|
||||
|
||||
#include "esp_time_impl.h"
|
||||
@ -53,7 +53,7 @@ static uint64_t s_adjtime_start_us;
|
||||
// is how many microseconds total to slew
|
||||
static int64_t s_adjtime_total_correction_us;
|
||||
|
||||
static spinlock_t s_time_lock = SPINLOCK_INITIALIZER;
|
||||
static _lock_t s_time_lock;
|
||||
|
||||
// This function gradually changes boot_time to the correction value and immediately updates it.
|
||||
static uint64_t adjust_boot_time(void)
|
||||
@ -100,21 +100,21 @@ static uint64_t adjust_boot_time(void)
|
||||
// Get the adjusted boot time.
|
||||
static uint64_t get_adjusted_boot_time(void)
|
||||
{
|
||||
spinlock_acquire(&s_time_lock, SPINLOCK_WAIT_FOREVER);
|
||||
_lock_acquire(&s_time_lock);
|
||||
uint64_t adjust_time = adjust_boot_time();
|
||||
spinlock_release(&s_time_lock);
|
||||
_lock_release(&s_time_lock);
|
||||
return adjust_time;
|
||||
}
|
||||
|
||||
// Applying the accumulated correction to base_time and stopping the smooth time adjustment.
|
||||
static void adjtime_corr_stop (void)
|
||||
{
|
||||
spinlock_acquire(&s_time_lock, SPINLOCK_WAIT_FOREVER);
|
||||
_lock_acquire(&s_time_lock);
|
||||
if (s_adjtime_start_us != 0){
|
||||
adjust_boot_time();
|
||||
s_adjtime_start_us = 0;
|
||||
}
|
||||
spinlock_release(&s_time_lock);
|
||||
_lock_release(&s_time_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -122,7 +122,7 @@ int adjtime(const struct timeval *delta, struct timeval *outdelta)
|
||||
{
|
||||
#if IMPL_NEWLIB_TIME_FUNCS
|
||||
if(outdelta != NULL){
|
||||
spinlock_acquire(&s_time_lock, SPINLOCK_WAIT_FOREVER);
|
||||
_lock_acquire(&s_time_lock);
|
||||
adjust_boot_time();
|
||||
if (s_adjtime_start_us != 0) {
|
||||
outdelta->tv_sec = s_adjtime_total_correction_us / 1000000L;
|
||||
@ -131,7 +131,7 @@ int adjtime(const struct timeval *delta, struct timeval *outdelta)
|
||||
outdelta->tv_sec = 0;
|
||||
outdelta->tv_usec = 0;
|
||||
}
|
||||
spinlock_release(&s_time_lock);
|
||||
_lock_release(&s_time_lock);
|
||||
}
|
||||
if(delta != NULL){
|
||||
int64_t sec = delta->tv_sec;
|
||||
@ -144,12 +144,12 @@ int adjtime(const struct timeval *delta, struct timeval *outdelta)
|
||||
* and the delta of the second call is not NULL, the earlier tuning is stopped,
|
||||
* but the already completed part of the adjustment is not canceled.
|
||||
*/
|
||||
spinlock_acquire(&s_time_lock, SPINLOCK_WAIT_FOREVER);
|
||||
_lock_acquire(&s_time_lock);
|
||||
// If correction is already in progress (s_adjtime_start_time_us != 0), then apply accumulated corrections.
|
||||
adjust_boot_time();
|
||||
s_adjtime_start_us = esp_time_impl_get_time_since_boot();
|
||||
s_adjtime_total_correction_us = sec * 1000000L + usec;
|
||||
spinlock_release(&s_time_lock);
|
||||
_lock_release(&s_time_lock);
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
|
Loading…
x
Reference in New Issue
Block a user