2017-03-21 23:07:37 -04:00
|
|
|
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
//
|
|
|
|
#include "freertos/FreeRTOS.h"
|
|
|
|
#include "freertos/task.h"
|
|
|
|
#include "esp_app_trace_util.h"
|
2019-06-05 22:57:29 -04:00
|
|
|
#include "sdkconfig.h"
|
2020-12-21 12:17:42 -05:00
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
///////////////////////////////// Locks /////////////////////////////////////
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
#if ESP_APPTRACE_PRINT_LOCK
|
|
|
|
static esp_apptrace_lock_t s_log_lock = {.irq_stat = 0, .portmux = portMUX_INITIALIZER_UNLOCKED};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int esp_apptrace_log_lock(void)
|
|
|
|
{
|
|
|
|
#if ESP_APPTRACE_PRINT_LOCK
|
|
|
|
esp_apptrace_tmo_t tmo;
|
|
|
|
esp_apptrace_tmo_init(&tmo, ESP_APPTRACE_TMO_INFINITE);
|
|
|
|
int ret = esp_apptrace_lock_take(&s_log_lock, &tmo);
|
|
|
|
return ret;
|
|
|
|
#else
|
|
|
|
return 0;
|
2019-06-05 22:57:29 -04:00
|
|
|
#endif
|
2020-12-21 12:17:42 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void esp_apptrace_log_unlock(void)
|
|
|
|
{
|
|
|
|
#if ESP_APPTRACE_PRINT_LOCK
|
|
|
|
esp_apptrace_lock_give(&s_log_lock);
|
|
|
|
#endif
|
|
|
|
}
|
2017-03-21 23:07:37 -04:00
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
2017-07-24 12:57:44 -04:00
|
|
|
///////////////////////////////// TIMEOUT /////////////////////////////////////
|
2017-03-21 23:07:37 -04:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo)
|
|
|
|
{
|
2020-12-21 12:17:42 -05:00
|
|
|
if (tmo->tmo != (int64_t)-1) {
|
|
|
|
tmo->elapsed = esp_timer_get_time() - tmo->start;
|
2017-07-24 12:57:44 -04:00
|
|
|
if (tmo->elapsed >= tmo->tmo) {
|
2017-03-21 23:07:37 -04:00
|
|
|
return ESP_ERR_TIMEOUT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2017-07-24 12:57:44 -04:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
///////////////////////////////// LOCK ////////////////////////////////////////
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
esp_err_t esp_apptrace_lock_take(esp_apptrace_lock_t *lock, esp_apptrace_tmo_t *tmo)
|
2017-03-21 23:07:37 -04:00
|
|
|
{
|
2017-09-02 20:44:21 -04:00
|
|
|
int res;
|
2017-03-21 23:07:37 -04:00
|
|
|
|
2017-09-02 20:44:21 -04:00
|
|
|
while (1) {
|
|
|
|
// do not overwrite lock->int_state before we actually acquired the mux
|
|
|
|
unsigned int_state = portENTER_CRITICAL_NESTED();
|
|
|
|
// FIXME: if mux is busy it is not good idea to loop during the whole tmo with disabled IRQs.
|
|
|
|
// So we check mux state using zero tmo, restore IRQs and let others tasks/IRQs to run on this CPU
|
|
|
|
// while we are doing our own tmo check.
|
2017-10-12 20:33:57 -04:00
|
|
|
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
|
|
|
bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0, __FUNCTION__, __LINE__);
|
|
|
|
#else
|
2017-09-02 20:44:21 -04:00
|
|
|
bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0);
|
2017-10-12 20:33:57 -04:00
|
|
|
#endif
|
2017-09-02 20:44:21 -04:00
|
|
|
if (success) {
|
|
|
|
lock->int_state = int_state;
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
portEXIT_CRITICAL_NESTED(int_state);
|
|
|
|
// we can be preempted from this place till the next call (above) to portENTER_CRITICAL_NESTED()
|
|
|
|
res = esp_apptrace_tmo_check(tmo);
|
|
|
|
if (res != ESP_OK) {
|
|
|
|
break;
|
|
|
|
}
|
2017-03-21 23:07:37 -04:00
|
|
|
}
|
2017-09-02 20:44:21 -04:00
|
|
|
return res;
|
2017-03-21 23:07:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock)
|
|
|
|
{
|
2017-09-02 20:44:21 -04:00
|
|
|
// save lock's irq state value for this CPU
|
|
|
|
unsigned int_state = lock->int_state;
|
|
|
|
// after call to the following func we can not be sure that lock->int_state
|
|
|
|
// is not overwritten by other CPU who has acquired the mux just after we released it. See esp_apptrace_lock_take().
|
2017-10-12 20:33:57 -04:00
|
|
|
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
|
|
|
vPortCPUReleaseMutex(&lock->mux, __FUNCTION__, __LINE__);
|
|
|
|
#else
|
2017-07-20 02:34:45 -04:00
|
|
|
vPortCPUReleaseMutex(&lock->mux);
|
2017-10-12 20:33:57 -04:00
|
|
|
#endif
|
2017-09-02 20:44:21 -04:00
|
|
|
portEXIT_CRITICAL_NESTED(int_state);
|
2017-07-20 02:34:45 -04:00
|
|
|
return ESP_OK;
|
2017-03-21 23:07:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
////////////////////////////// RING BUFFER ////////////////////////////////////
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
uint8_t *esp_apptrace_rb_produce(esp_apptrace_rb_t *rb, uint32_t size)
|
|
|
|
{
|
|
|
|
uint8_t *ptr = rb->data + rb->wr;
|
|
|
|
// check for avalable space
|
|
|
|
if (rb->rd <= rb->wr) {
|
|
|
|
// |?R......W??|
|
|
|
|
if (rb->wr + size >= rb->size) {
|
|
|
|
if (rb->rd == 0) {
|
|
|
|
return NULL; // cannot wrap wr
|
|
|
|
}
|
|
|
|
if (rb->wr + size == rb->size) {
|
|
|
|
rb->wr = 0;
|
|
|
|
} else {
|
|
|
|
// check if we can wrap wr earlier to get space for requested size
|
|
|
|
if (size > rb->rd - 1) {
|
|
|
|
return NULL; // cannot wrap wr
|
|
|
|
}
|
|
|
|
// shrink buffer a bit, full size will be restored at rd wrapping
|
|
|
|
rb->cur_size = rb->wr;
|
|
|
|
rb->wr = 0;
|
|
|
|
ptr = rb->data;
|
|
|
|
if (rb->rd == rb->cur_size) {
|
|
|
|
rb->rd = 0;
|
|
|
|
if (rb->cur_size < rb->size) {
|
|
|
|
rb->cur_size = rb->size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rb->wr += size;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rb->wr += size;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// |?W......R??|
|
|
|
|
if (size > rb->rd - rb->wr - 1) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
rb->wr += size;
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t *esp_apptrace_rb_consume(esp_apptrace_rb_t *rb, uint32_t size)
|
|
|
|
{
|
|
|
|
uint8_t *ptr = rb->data + rb->rd;
|
|
|
|
if (rb->rd <= rb->wr) {
|
|
|
|
// |?R......W??|
|
|
|
|
if (rb->rd + size > rb->wr) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
rb->rd += size;
|
|
|
|
} else {
|
|
|
|
// |?W......R??|
|
|
|
|
if (rb->rd + size > rb->cur_size) {
|
|
|
|
return NULL;
|
|
|
|
} else if (rb->rd + size == rb->cur_size) {
|
|
|
|
// restore full size usage
|
|
|
|
if (rb->cur_size < rb->size) {
|
|
|
|
rb->cur_size = rb->size;
|
|
|
|
}
|
|
|
|
rb->rd = 0;
|
|
|
|
} else {
|
|
|
|
rb->rd += size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t esp_apptrace_rb_read_size_get(esp_apptrace_rb_t *rb)
|
|
|
|
{
|
|
|
|
uint32_t size = 0;
|
|
|
|
if (rb->rd <= rb->wr) {
|
|
|
|
// |?R......W??|
|
|
|
|
size = rb->wr - rb->rd;
|
|
|
|
} else {
|
|
|
|
// |?W......R??|
|
|
|
|
size = rb->cur_size - rb->rd;
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
2017-07-24 12:57:44 -04:00
|
|
|
|
|
|
|
uint32_t esp_apptrace_rb_write_size_get(esp_apptrace_rb_t *rb)
|
|
|
|
{
|
|
|
|
uint32_t size = 0;
|
|
|
|
if (rb->rd <= rb->wr) {
|
|
|
|
// |?R......W??|
|
|
|
|
size = rb->size - rb->wr;
|
|
|
|
if (size && rb->rd == 0) {
|
|
|
|
size--;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// |?W......R??|
|
|
|
|
size = rb->rd - rb->wr - 1;
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|