2022-05-19 09:50:31 -04:00
|
|
|
/*
|
|
|
|
* SPDX-FileCopyrightText: 2017-2022 Espressif Systems (Shanghai) CO LTD
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2017-10-12 06:58:19 -04:00
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include "esp_err.h"
|
|
|
|
#include "esp_attr.h"
|
|
|
|
#include "freertos/FreeRTOS.h"
|
|
|
|
#include "freertos/task.h"
|
|
|
|
#include "freertos/semphr.h"
|
|
|
|
#include "freertos/list.h"
|
2021-04-22 20:01:37 -04:00
|
|
|
#include "pthread_internal.h"
|
2017-10-12 06:58:19 -04:00
|
|
|
|
2019-03-14 05:29:32 -04:00
|
|
|
#include <sys/queue.h>
|
2017-10-12 06:58:19 -04:00
|
|
|
#include <sys/time.h>
|
|
|
|
|
|
|
|
#include "esp_log.h"
|
|
|
|
const static char *TAG = "esp_pthread";
|
|
|
|
|
|
|
|
typedef struct esp_pthread_cond_waiter {
|
|
|
|
SemaphoreHandle_t wait_sem; ///< task specific semaphore to wait on
|
|
|
|
TAILQ_ENTRY(esp_pthread_cond_waiter) link; ///< stash on the list of semaphores to be notified
|
|
|
|
} esp_pthread_cond_waiter_t;
|
|
|
|
|
|
|
|
typedef struct esp_pthread_cond {
|
|
|
|
_lock_t lock; ///< lock that protects the list of semaphores
|
|
|
|
TAILQ_HEAD(, esp_pthread_cond_waiter) waiter_list; ///< head of the list of semaphores
|
|
|
|
} esp_pthread_cond_t;
|
|
|
|
|
2021-04-22 20:01:37 -04:00
|
|
|
static int s_check_and_init_if_static(pthread_cond_t *cv)
|
2017-10-12 06:58:19 -04:00
|
|
|
{
|
2021-04-22 20:01:37 -04:00
|
|
|
int res = 0;
|
|
|
|
|
2017-10-12 06:58:19 -04:00
|
|
|
if (cv == NULL || *cv == (pthread_cond_t) 0) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-04-22 20:01:37 -04:00
|
|
|
if (*cv == PTHREAD_COND_INITIALIZER) {
|
|
|
|
portENTER_CRITICAL(&pthread_lazy_init_lock);
|
|
|
|
if (*cv == PTHREAD_COND_INITIALIZER) {
|
|
|
|
res = pthread_cond_init(cv, NULL);
|
|
|
|
}
|
|
|
|
portEXIT_CRITICAL(&pthread_lazy_init_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_cond_signal(pthread_cond_t *cv)
|
|
|
|
{
|
|
|
|
int res = s_check_and_init_if_static(cv);
|
|
|
|
if (res) {
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2017-10-12 06:58:19 -04:00
|
|
|
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
|
|
|
|
|
|
|
|
_lock_acquire_recursive(&cond->lock);
|
|
|
|
esp_pthread_cond_waiter_t *entry;
|
|
|
|
entry = TAILQ_FIRST(&cond->waiter_list);
|
|
|
|
if (entry) {
|
|
|
|
xSemaphoreGive(entry->wait_sem);
|
|
|
|
}
|
|
|
|
_lock_release_recursive(&cond->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_cond_broadcast(pthread_cond_t *cv)
|
|
|
|
{
|
2021-04-22 20:01:37 -04:00
|
|
|
int res = s_check_and_init_if_static(cv);
|
|
|
|
if (res) {
|
|
|
|
return res;
|
2017-10-12 06:58:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
|
|
|
|
|
|
|
|
_lock_acquire_recursive(&cond->lock);
|
|
|
|
esp_pthread_cond_waiter_t *entry;
|
|
|
|
TAILQ_FOREACH(entry, &cond->waiter_list, link) {
|
|
|
|
xSemaphoreGive(entry->wait_sem);
|
|
|
|
}
|
|
|
|
_lock_release_recursive(&cond->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut)
|
|
|
|
{
|
|
|
|
return pthread_cond_timedwait(cv, mut, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut, const struct timespec *to)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
TickType_t timeout_ticks;
|
|
|
|
|
2021-04-22 20:01:37 -04:00
|
|
|
int res = s_check_and_init_if_static(cv);
|
|
|
|
if (res) {
|
|
|
|
return res;
|
2017-10-12 06:58:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
|
|
|
|
|
|
|
|
if (to == NULL) {
|
|
|
|
timeout_ticks = portMAX_DELAY;
|
|
|
|
} else {
|
|
|
|
struct timeval abs_time, cur_time, diff_time;
|
|
|
|
long timeout_msec;
|
|
|
|
|
|
|
|
gettimeofday(&cur_time, NULL);
|
|
|
|
|
|
|
|
abs_time.tv_sec = to->tv_sec;
|
2021-05-21 05:09:33 -04:00
|
|
|
// Round up nanoseconds to the next microsecond
|
|
|
|
abs_time.tv_usec = (to->tv_nsec + 1000 - 1) / 1000;
|
2017-10-12 06:58:19 -04:00
|
|
|
|
|
|
|
if (timercmp(&abs_time, &cur_time, <)) {
|
|
|
|
/* As per the pthread spec, if the time has already
|
|
|
|
* passed, no sleep is required.
|
|
|
|
*/
|
|
|
|
timeout_msec = 0;
|
|
|
|
} else {
|
|
|
|
timersub(&abs_time, &cur_time, &diff_time);
|
2021-05-21 05:09:33 -04:00
|
|
|
// Round up timeout microseconds to the next millisecond
|
|
|
|
timeout_msec = (diff_time.tv_sec * 1000) +
|
|
|
|
((diff_time.tv_usec + 1000 - 1) / 1000);
|
2017-10-12 06:58:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout_msec <= 0) {
|
|
|
|
return ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
2021-05-21 05:09:33 -04:00
|
|
|
// Round up milliseconds to the next tick
|
|
|
|
timeout_ticks = (timeout_msec + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS;
|
|
|
|
|
|
|
|
/* We have to add 1 more tick of delay
|
|
|
|
|
|
|
|
The reason for this is that vTaskDelay(1) will sleep until the start of the next tick,
|
|
|
|
which can be any amount of time up to one tick period. So if we don't add one more tick,
|
|
|
|
we're likely to timeout a small time (< 1 tick period) before the requested timeout.
|
|
|
|
If we add 1 tick then we will timeout a small time (< 1 tick period) after the
|
|
|
|
requested timeout.
|
|
|
|
*/
|
|
|
|
timeout_ticks += 1;
|
2017-10-12 06:58:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
esp_pthread_cond_waiter_t w;
|
2022-05-19 09:50:31 -04:00
|
|
|
|
|
|
|
// Around 80 bytes
|
|
|
|
StaticSemaphore_t sem_buffer;
|
|
|
|
// Create semaphore: first take will block
|
|
|
|
w.wait_sem = xSemaphoreCreateCountingStatic(1, 0, &sem_buffer);
|
2017-10-12 06:58:19 -04:00
|
|
|
|
|
|
|
_lock_acquire_recursive(&cond->lock);
|
|
|
|
TAILQ_INSERT_TAIL(&cond->waiter_list, &w, link);
|
|
|
|
_lock_release_recursive(&cond->lock);
|
|
|
|
pthread_mutex_unlock(mut);
|
|
|
|
|
|
|
|
if (xSemaphoreTake(w.wait_sem, timeout_ticks) == pdTRUE) {
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
_lock_acquire_recursive(&cond->lock);
|
|
|
|
TAILQ_REMOVE(&cond->waiter_list, &w, link);
|
|
|
|
_lock_release_recursive(&cond->lock);
|
|
|
|
vSemaphoreDelete(w.wait_sem);
|
|
|
|
|
|
|
|
pthread_mutex_lock(mut);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-09-21 05:45:55 -04:00
|
|
|
// The following pthread_condattr_* function definitions are placed here to enable builds of code
|
|
|
|
// that references these functions but does not actively use them.
|
|
|
|
|
2017-10-12 06:58:19 -04:00
|
|
|
int pthread_condattr_init(pthread_condattr_t *attr)
|
|
|
|
{
|
2023-09-21 05:45:55 -04:00
|
|
|
ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
|
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_condattr_destroy(pthread_condattr_t *attr)
|
|
|
|
{
|
|
|
|
ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
|
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_condattr_getpshared(const pthread_condattr_t *restrict attr, int *restrict pshared)
|
|
|
|
{
|
|
|
|
ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
|
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
|
|
|
|
{
|
|
|
|
ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
|
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_condattr_getclock(const pthread_condattr_t *restrict attr, clockid_t *restrict clock_id)
|
|
|
|
{
|
|
|
|
ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
|
2017-10-12 06:58:19 -04:00
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
2023-09-21 05:45:55 -04:00
|
|
|
int pthread_condattr_setclock(pthread_condattr_t *attr, clockid_t clock_id)
|
|
|
|
{
|
|
|
|
ESP_LOGW(TAG, "%s: not yet supported!", __func__);
|
|
|
|
return 0; // moved here from newlib, where it was 0 instead of ENOSYS
|
|
|
|
}
|
|
|
|
|
2017-10-12 06:58:19 -04:00
|
|
|
int pthread_cond_init(pthread_cond_t *cv, const pthread_condattr_t *att)
|
|
|
|
{
|
|
|
|
(void) att; /* Unused argument as of now */
|
|
|
|
|
|
|
|
if (cv == NULL) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) calloc(1, sizeof(esp_pthread_cond_t));
|
|
|
|
if (cond == NULL) {
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
_lock_init_recursive(&cond->lock);
|
|
|
|
TAILQ_INIT(&cond->waiter_list);
|
|
|
|
|
|
|
|
*cv = (pthread_cond_t) cond;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_cond_destroy(pthread_cond_t *cv)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (cv == NULL || *cv == (pthread_cond_t) 0) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
2021-04-22 20:01:37 -04:00
|
|
|
if (*cv == PTHREAD_COND_INITIALIZER) {
|
|
|
|
return 0; // never initialized
|
|
|
|
}
|
2017-10-12 06:58:19 -04:00
|
|
|
|
|
|
|
esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
|
2021-04-22 20:01:37 -04:00
|
|
|
if (!cond) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
2017-10-12 06:58:19 -04:00
|
|
|
|
|
|
|
_lock_acquire_recursive(&cond->lock);
|
|
|
|
if (!TAILQ_EMPTY(&cond->waiter_list)) {
|
|
|
|
ret = EBUSY;
|
|
|
|
}
|
|
|
|
_lock_release_recursive(&cond->lock);
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
*cv = (pthread_cond_t) 0;
|
|
|
|
_lock_close_recursive(&cond->lock);
|
|
|
|
free(cond);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2019-07-28 05:22:08 -04:00
|
|
|
|
|
|
|
/* Hook function to force linking this file */
|
2019-07-16 05:33:30 -04:00
|
|
|
void pthread_include_pthread_cond_var_impl(void)
|
2019-07-28 05:22:08 -04:00
|
|
|
{
|
|
|
|
}
|