2022-02-16 10:15:20 -05:00
/*
* SPDX - FileCopyrightText : 2018 - 2022 Espressif Systems ( Shanghai ) CO LTD
*
* SPDX - License - Identifier : Apache - 2.0
*/
2018-10-26 01:14:19 -04:00
# include <stdlib.h>
# include <string.h>
# include <stdio.h>
# include <stdbool.h>
# include "esp_log.h"
# include "esp_event.h"
# include "esp_event_internal.h"
# include "esp_event_private.h"
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2018-10-26 01:14:19 -04:00
# include "esp_timer.h"
# endif
/* ---------------------------- Definitions --------------------------------- */
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2018-12-09 19:37:46 -05:00
// LOOP @<address, name> rx:<recieved events no.> dr:<dropped events no.>
2023-05-22 08:36:50 -04:00
# define LOOP_DUMP_FORMAT "LOOP @%p,%s rx:%" PRIu32 " dr:%" PRIu32 "\n"
2018-12-09 19:37:46 -05:00
// handler @<address> ev:<base, id> inv:<times invoked> time:<runtime>
2023-05-22 08:36:50 -04:00
# define HANDLER_DUMP_FORMAT " HANDLER @%p ev:%s,%s inv:%" PRIu32 " time:%lld us\n"
2018-10-26 01:14:19 -04:00
# define PRINT_DUMP_INFO(dst, sz, ...) do { \
int cb = snprintf ( dst , sz , __VA_ARGS__ ) ; \
dst + = cb ; \
sz - = cb ; \
} while ( 0 ) ;
# endif
/* ------------------------- Static Variables ------------------------------- */
static const char * TAG = " event " ;
static const char * esp_event_any_base = " any " ;
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2018-10-26 01:14:19 -04:00
static SLIST_HEAD ( esp_event_loop_instance_list_t , esp_event_loop_instance ) s_event_loops =
SLIST_HEAD_INITIALIZER ( s_event_loops ) ;
static portMUX_TYPE s_event_loops_spinlock = portMUX_INITIALIZER_UNLOCKED ;
# endif
/* ------------------------- Static Functions ------------------------------- */
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2019-02-15 08:28:24 -05:00
2019-07-16 05:33:30 -04:00
static int esp_event_dump_prepare ( void )
2018-10-26 01:14:19 -04:00
{
esp_event_loop_instance_t * loop_it ;
2018-12-09 19:37:46 -05:00
esp_event_loop_node_t * loop_node_it ;
esp_event_base_node_t * base_node_it ;
esp_event_id_node_t * id_node_it ;
2020-01-16 23:37:41 -05:00
esp_event_handler_node_t * handler_it ;
2018-10-26 01:14:19 -04:00
// Count the number of items to be printed. This is needed to compute how much memory to reserve.
2018-12-09 19:37:46 -05:00
int loops = 0 , handlers = 0 ;
2018-10-26 01:14:19 -04:00
portENTER_CRITICAL ( & s_event_loops_spinlock ) ;
2018-12-09 19:37:46 -05:00
SLIST_FOREACH ( loop_it , & s_event_loops , next ) {
SLIST_FOREACH ( loop_node_it , & ( loop_it - > loop_nodes ) , next ) {
SLIST_FOREACH ( handler_it , & ( loop_node_it - > handlers ) , next ) {
2018-10-26 01:14:19 -04:00
handlers + + ;
}
2018-12-09 19:37:46 -05:00
SLIST_FOREACH ( base_node_it , & ( loop_node_it - > base_nodes ) , next ) {
SLIST_FOREACH ( handler_it , & ( base_node_it - > handlers ) , next ) {
2018-10-26 01:14:19 -04:00
handlers + + ;
}
2018-12-09 19:37:46 -05:00
SLIST_FOREACH ( id_node_it , & ( base_node_it - > id_nodes ) , next ) {
SLIST_FOREACH ( handler_it , & ( id_node_it - > handlers ) , next ) {
handlers + + ;
}
}
2018-10-26 01:14:19 -04:00
}
}
loops + + ;
}
portEXIT_CRITICAL ( & s_event_loops_spinlock ) ;
// Reserve slightly more memory than computed
int allowance = 3 ;
2018-12-09 19:37:46 -05:00
int size = ( ( ( loops + allowance ) * ( sizeof ( LOOP_DUMP_FORMAT ) + 10 + 20 + 2 * 11 ) ) +
( ( handlers + allowance ) * ( sizeof ( HANDLER_DUMP_FORMAT ) + 10 + 2 * 20 + 11 + 20 ) ) ) ;
2018-10-26 01:14:19 -04:00
return size ;
}
# endif
static void esp_event_loop_run_task ( void * args )
{
esp_err_t err ;
esp_event_loop_handle_t event_loop = ( esp_event_loop_handle_t ) args ;
ESP_LOGD ( TAG , " running task for loop %p " , event_loop ) ;
while ( 1 ) {
err = esp_event_loop_run ( event_loop , portMAX_DELAY ) ;
if ( err ! = ESP_OK ) {
break ;
}
}
ESP_LOGE ( TAG , " suspended task for loop %p " , event_loop ) ;
vTaskSuspend ( NULL ) ;
}
2020-01-16 23:37:41 -05:00
static void handler_execute ( esp_event_loop_instance_t * loop , esp_event_handler_node_t * handler , esp_event_post_instance_t post )
2018-10-26 01:14:19 -04:00
{
2020-01-16 23:37:41 -05:00
ESP_LOGD ( TAG , " running post %s:%d with handler %p and context %p on loop %p " , post . base , post . id , handler - > handler_ctx - > handler , & handler - > handler_ctx , loop ) ;
2018-10-26 01:14:19 -04:00
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2018-12-09 19:37:46 -05:00
int64_t start , diff ;
start = esp_timer_get_time ( ) ;
# endif
// Execute the handler
2019-04-29 06:54:02 -04:00
# if CONFIG_ESP_EVENT_POST_FROM_ISR
2019-05-15 23:51:57 -04:00
void * data_ptr = NULL ;
if ( post . data_set ) {
if ( post . data_allocated ) {
data_ptr = post . data . ptr ;
} else {
data_ptr = & post . data . val ;
}
}
2020-01-16 23:37:41 -05:00
( * ( handler - > handler_ctx - > handler ) ) ( handler - > handler_ctx - > arg , post . base , post . id , data_ptr ) ;
2019-05-15 23:51:57 -04:00
# else
2020-01-16 23:37:41 -05:00
( * ( handler - > handler_ctx - > handler ) ) ( handler - > handler_ctx - > arg , post . base , post . id , post . data ) ;
2019-02-15 08:28:24 -05:00
# endif
2018-10-26 01:14:19 -04:00
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2018-12-09 19:37:46 -05:00
diff = esp_timer_get_time ( ) - start ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
xSemaphoreTake ( loop - > profiling_mutex , portMAX_DELAY ) ;
2023-05-22 08:36:50 -04:00
// At this point handler may be already unregistered.
// This happens in "handler instance can unregister itself" test case.
// To prevent memory corruption error it's necessary to check if pointer is still valid.
esp_event_loop_node_t * loop_node ;
esp_event_handler_node_t * handler_node ;
SLIST_FOREACH ( loop_node , & ( loop - > loop_nodes ) , next ) {
SLIST_FOREACH ( handler_node , & ( loop_node - > handlers ) , next ) {
if ( handler_node = = handler ) {
handler - > invoked + + ;
handler - > time + = diff ;
}
}
}
2018-12-09 19:37:46 -05:00
xSemaphoreGive ( loop - > profiling_mutex ) ;
# endif
2018-10-26 01:14:19 -04:00
}
2020-01-16 23:37:41 -05:00
static esp_err_t handler_instances_add ( esp_event_handler_nodes_t * handlers , esp_event_handler_t event_handler , void * event_handler_arg , esp_event_handler_instance_context_t * * handler_ctx , bool legacy )
2018-10-26 01:14:19 -04:00
{
2020-01-16 23:37:41 -05:00
esp_event_handler_node_t * handler_instance = calloc ( 1 , sizeof ( * handler_instance ) ) ;
2018-10-26 01:14:19 -04:00
2020-01-16 23:37:41 -05:00
if ( ! handler_instance ) return ESP_ERR_NO_MEM ;
esp_event_handler_instance_context_t * context = calloc ( 1 , sizeof ( * context ) ) ;
if ( ! context ) {
free ( handler_instance ) ;
2018-12-09 19:37:46 -05:00
return ESP_ERR_NO_MEM ;
}
2020-01-16 23:37:41 -05:00
context - > handler = event_handler ;
context - > arg = event_handler_arg ;
handler_instance - > handler_ctx = context ;
2018-12-09 19:37:46 -05:00
2019-05-20 07:28:35 -04:00
if ( SLIST_EMPTY ( handlers ) ) {
2018-12-09 19:37:46 -05:00
SLIST_INSERT_HEAD ( handlers , handler_instance , next ) ;
}
else {
2020-01-16 23:37:41 -05:00
esp_event_handler_node_t * it = NULL , * last = NULL ;
2018-12-09 19:37:46 -05:00
SLIST_FOREACH ( it , handlers , next ) {
2020-01-16 23:37:41 -05:00
if ( legacy ) {
if ( event_handler = = it - > handler_ctx - > handler ) {
it - > handler_ctx - > arg = event_handler_arg ;
ESP_LOGW ( TAG , " handler already registered, overwriting " ) ;
free ( handler_instance ) ;
free ( context ) ;
return ESP_OK ;
}
2018-12-09 19:37:46 -05:00
}
last = it ;
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
SLIST_INSERT_AFTER ( last , handler_instance , next ) ;
2018-10-26 01:14:19 -04:00
}
2020-01-16 23:37:41 -05:00
// If the caller didn't provide the handler instance context, don't set it.
// It will be removed once the event loop is deleted.
if ( handler_ctx ) {
* handler_ctx = context ;
}
2018-12-09 19:37:46 -05:00
return ESP_OK ;
2018-10-26 01:14:19 -04:00
}
2020-01-16 23:37:41 -05:00
static esp_err_t base_node_add_handler ( esp_event_base_node_t * base_node ,
int32_t id ,
esp_event_handler_t event_handler ,
void * event_handler_arg ,
esp_event_handler_instance_context_t * * handler_ctx ,
bool legacy )
2018-10-26 01:14:19 -04:00
{
2018-12-09 19:37:46 -05:00
if ( id = = ESP_EVENT_ANY_ID ) {
2020-01-16 23:37:41 -05:00
return handler_instances_add ( & ( base_node - > handlers ) , event_handler , event_handler_arg , handler_ctx , legacy ) ;
2018-12-09 19:37:46 -05:00
}
else {
esp_err_t err = ESP_OK ;
esp_event_id_node_t * it = NULL , * id_node = NULL , * last_id_node = NULL ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
SLIST_FOREACH ( it , & ( base_node - > id_nodes ) , next ) {
if ( it - > id = = id ) {
id_node = it ;
}
last_id_node = it ;
}
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
if ( ! last_id_node | | ! id_node ) {
id_node = ( esp_event_id_node_t * ) calloc ( 1 , sizeof ( * id_node ) ) ;
if ( ! id_node ) {
2019-05-31 05:23:02 -04:00
ESP_LOGE ( TAG , " alloc for new id node failed " ) ;
2018-12-09 19:37:46 -05:00
return ESP_ERR_NO_MEM ;
}
id_node - > id = id ;
SLIST_INIT ( & ( id_node - > handlers ) ) ;
2020-01-16 23:37:41 -05:00
err = handler_instances_add ( & ( id_node - > handlers ) , event_handler , event_handler_arg , handler_ctx , legacy ) ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
if ( err = = ESP_OK ) {
if ( ! last_id_node ) {
SLIST_INSERT_HEAD ( & ( base_node - > id_nodes ) , id_node , next ) ;
}
else {
SLIST_INSERT_AFTER ( last_id_node , id_node , next ) ;
}
2019-05-31 05:23:02 -04:00
} else {
free ( id_node ) ;
2018-12-09 19:37:46 -05:00
}
return err ;
}
else {
2020-01-16 23:37:41 -05:00
return handler_instances_add ( & ( id_node - > handlers ) , event_handler , event_handler_arg , handler_ctx , legacy ) ;
2018-12-09 19:37:46 -05:00
}
2018-10-26 01:14:19 -04:00
}
}
2020-01-16 23:37:41 -05:00
static esp_err_t loop_node_add_handler ( esp_event_loop_node_t * loop_node ,
esp_event_base_t base ,
int32_t id ,
esp_event_handler_t event_handler ,
void * event_handler_arg ,
esp_event_handler_instance_context_t * * handler_ctx ,
bool legacy )
2018-10-26 01:14:19 -04:00
{
2018-12-09 19:37:46 -05:00
if ( base = = esp_event_any_base & & id = = ESP_EVENT_ANY_ID ) {
2020-01-16 23:37:41 -05:00
return handler_instances_add ( & ( loop_node - > handlers ) , event_handler , event_handler_arg , handler_ctx , legacy ) ;
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
else {
esp_err_t err = ESP_OK ;
esp_event_base_node_t * it = NULL , * base_node = NULL , * last_base_node = NULL ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
SLIST_FOREACH ( it , & ( loop_node - > base_nodes ) , next ) {
if ( it - > base = = base ) {
base_node = it ;
}
last_base_node = it ;
}
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
if ( ! last_base_node | |
! base_node | |
( base_node & & ! SLIST_EMPTY ( & ( base_node - > id_nodes ) ) & & id = = ESP_EVENT_ANY_ID ) | |
( last_base_node & & last_base_node - > base ! = base & & ! SLIST_EMPTY ( & ( last_base_node - > id_nodes ) ) & & id = = ESP_EVENT_ANY_ID ) ) {
base_node = ( esp_event_base_node_t * ) calloc ( 1 , sizeof ( * base_node ) ) ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
if ( ! base_node ) {
ESP_LOGE ( TAG , " alloc mem for new base node failed " ) ;
return ESP_ERR_NO_MEM ;
}
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
base_node - > base = base ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
SLIST_INIT ( & ( base_node - > handlers ) ) ;
SLIST_INIT ( & ( base_node - > id_nodes ) ) ;
2020-01-16 23:37:41 -05:00
err = base_node_add_handler ( base_node , id , event_handler , event_handler_arg , handler_ctx , legacy ) ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
if ( err = = ESP_OK ) {
if ( ! last_base_node ) {
SLIST_INSERT_HEAD ( & ( loop_node - > base_nodes ) , base_node , next ) ;
}
else {
SLIST_INSERT_AFTER ( last_base_node , base_node , next ) ;
}
2019-05-31 05:23:02 -04:00
} else {
free ( base_node ) ;
2018-12-09 19:37:46 -05:00
}
return err ;
} else {
2020-01-16 23:37:41 -05:00
return base_node_add_handler ( base_node , id , event_handler , event_handler_arg , handler_ctx , legacy ) ;
2018-12-09 19:37:46 -05:00
}
}
2018-10-26 01:14:19 -04:00
}
2020-01-16 23:37:41 -05:00
static esp_err_t handler_instances_remove ( esp_event_handler_nodes_t * handlers , esp_event_handler_instance_context_t * handler_ctx , bool legacy )
2018-10-26 01:14:19 -04:00
{
2020-01-16 23:37:41 -05:00
esp_event_handler_node_t * it , * temp ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
SLIST_FOREACH_SAFE ( it , handlers , next , temp ) {
2020-01-16 23:37:41 -05:00
if ( legacy ) {
if ( it - > handler_ctx - > handler = = handler_ctx - > handler ) {
SLIST_REMOVE ( handlers , it , esp_event_handler_node , next ) ;
free ( it - > handler_ctx ) ;
free ( it ) ;
return ESP_OK ;
}
} else {
if ( it - > handler_ctx = = handler_ctx ) {
SLIST_REMOVE ( handlers , it , esp_event_handler_node , next ) ;
free ( it - > handler_ctx ) ;
free ( it ) ;
return ESP_OK ;
}
2018-12-09 19:37:46 -05:00
}
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
return ESP_ERR_NOT_FOUND ;
2018-10-26 01:14:19 -04:00
}
2020-01-16 23:37:41 -05:00
static esp_err_t base_node_remove_handler ( esp_event_base_node_t * base_node , int32_t id , esp_event_handler_instance_context_t * handler_ctx , bool legacy )
2018-10-26 01:14:19 -04:00
{
2018-12-09 19:37:46 -05:00
if ( id = = ESP_EVENT_ANY_ID ) {
2020-01-16 23:37:41 -05:00
return handler_instances_remove ( & ( base_node - > handlers ) , handler_ctx , legacy ) ;
2018-12-09 19:37:46 -05:00
}
else {
esp_event_id_node_t * it , * temp ;
SLIST_FOREACH_SAFE ( it , & ( base_node - > id_nodes ) , next , temp ) {
if ( it - > id = = id ) {
2020-01-16 23:37:41 -05:00
esp_err_t res = handler_instances_remove ( & ( it - > handlers ) , handler_ctx , legacy ) ;
2018-12-09 19:37:46 -05:00
if ( res = = ESP_OK ) {
if ( SLIST_EMPTY ( & ( it - > handlers ) ) ) {
SLIST_REMOVE ( & ( base_node - > id_nodes ) , it , esp_event_id_node , next ) ;
free ( it ) ;
return ESP_OK ;
}
}
}
2018-10-26 01:14:19 -04:00
}
}
2018-12-09 19:37:46 -05:00
return ESP_ERR_NOT_FOUND ;
2018-10-26 01:14:19 -04:00
}
2020-01-16 23:37:41 -05:00
static esp_err_t loop_node_remove_handler ( esp_event_loop_node_t * loop_node , esp_event_base_t base , int32_t id , esp_event_handler_instance_context_t * handler_ctx , bool legacy )
2018-10-26 01:14:19 -04:00
{
2018-12-09 19:37:46 -05:00
if ( base = = esp_event_any_base & & id = = ESP_EVENT_ANY_ID ) {
2020-01-16 23:37:41 -05:00
return handler_instances_remove ( & ( loop_node - > handlers ) , handler_ctx , legacy ) ;
2018-12-09 19:37:46 -05:00
}
else {
esp_event_base_node_t * it , * temp ;
SLIST_FOREACH_SAFE ( it , & ( loop_node - > base_nodes ) , next , temp ) {
if ( it - > base = = base ) {
2020-01-16 23:37:41 -05:00
esp_err_t res = base_node_remove_handler ( it , id , handler_ctx , legacy ) ;
2018-12-09 19:37:46 -05:00
if ( res = = ESP_OK ) {
if ( SLIST_EMPTY ( & ( it - > handlers ) ) & & SLIST_EMPTY ( & ( it - > id_nodes ) ) ) {
SLIST_REMOVE ( & ( loop_node - > base_nodes ) , it , esp_event_base_node , next ) ;
free ( it ) ;
return ESP_OK ;
}
}
}
}
}
return ESP_ERR_NOT_FOUND ;
2018-10-26 01:14:19 -04:00
}
2020-01-16 23:37:41 -05:00
static void handler_instances_remove_all ( esp_event_handler_nodes_t * handlers )
2018-12-09 19:37:46 -05:00
{
2020-01-16 23:37:41 -05:00
esp_event_handler_node_t * it , * temp ;
2018-12-09 19:37:46 -05:00
SLIST_FOREACH_SAFE ( it , handlers , next , temp ) {
2020-01-16 23:37:41 -05:00
SLIST_REMOVE ( handlers , it , esp_event_handler_node , next ) ;
free ( it - > handler_ctx ) ;
2018-12-09 19:37:46 -05:00
free ( it ) ;
}
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
static void base_node_remove_all_handler ( esp_event_base_node_t * base_node )
2018-10-26 01:14:19 -04:00
{
2018-12-09 19:37:46 -05:00
handler_instances_remove_all ( & ( base_node - > handlers ) ) ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
esp_event_id_node_t * it , * temp ;
SLIST_FOREACH_SAFE ( it , & ( base_node - > id_nodes ) , next , temp ) {
handler_instances_remove_all ( & ( it - > handlers ) ) ;
SLIST_REMOVE ( & ( base_node - > id_nodes ) , it , esp_event_id_node , next ) ;
free ( it ) ;
2018-10-26 01:14:19 -04:00
}
}
2018-12-09 19:37:46 -05:00
static void loop_node_remove_all_handler ( esp_event_loop_node_t * loop_node )
2018-10-26 01:14:19 -04:00
{
2018-12-09 19:37:46 -05:00
handler_instances_remove_all ( & ( loop_node - > handlers ) ) ;
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
esp_event_base_node_t * it , * temp ;
SLIST_FOREACH_SAFE ( it , & ( loop_node - > base_nodes ) , next , temp ) {
base_node_remove_all_handler ( it ) ;
SLIST_REMOVE ( & ( loop_node - > base_nodes ) , it , esp_event_base_node , next ) ;
free ( it ) ;
2018-10-26 01:14:19 -04:00
}
}
2019-02-15 08:28:24 -05:00
static void inline __attribute__ ( ( always_inline ) ) post_instance_delete ( esp_event_post_instance_t * post )
2018-10-26 01:14:19 -04:00
{
2019-04-29 06:54:02 -04:00
# if CONFIG_ESP_EVENT_POST_FROM_ISR
2019-05-15 23:51:57 -04:00
if ( post - > data_allocated & & post - > data . ptr ) {
2019-02-15 08:28:24 -05:00
free ( post - > data . ptr ) ;
2018-10-26 01:14:19 -04:00
}
2019-02-15 08:28:24 -05:00
# else
if ( post - > data ) {
free ( post - > data ) ;
}
# endif
memset ( post , 0 , sizeof ( * post ) ) ;
2018-10-26 01:14:19 -04:00
}
/* ---------------------------- Public API --------------------------------- */
esp_err_t esp_event_loop_create ( const esp_event_loop_args_t * event_loop_args , esp_event_loop_handle_t * event_loop )
{
2021-07-06 05:19:20 -04:00
if ( event_loop_args = = NULL ) {
ESP_LOGE ( TAG , " event_loop_args was NULL " ) ;
return ESP_ERR_INVALID_ARG ;
}
if ( event_loop = = NULL ) {
ESP_LOGE ( TAG , " event_loop was NULL " ) ;
return ESP_ERR_INVALID_ARG ;
}
2018-10-26 01:14:19 -04:00
esp_event_loop_instance_t * loop ;
esp_err_t err = ESP_ERR_NO_MEM ; // most likely error
loop = calloc ( 1 , sizeof ( * loop ) ) ;
if ( loop = = NULL ) {
ESP_LOGE ( TAG , " alloc for event loop failed " ) ;
2019-05-31 05:23:02 -04:00
return err ;
2018-10-26 01:14:19 -04:00
}
loop - > queue = xQueueCreate ( event_loop_args - > queue_size , sizeof ( esp_event_post_instance_t ) ) ;
if ( loop - > queue = = NULL ) {
ESP_LOGE ( TAG , " create event loop queue failed " ) ;
goto on_err ;
}
loop - > mutex = xSemaphoreCreateRecursiveMutex ( ) ;
if ( loop - > mutex = = NULL ) {
ESP_LOGE ( TAG , " create event loop mutex failed " ) ;
goto on_err ;
}
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2018-10-26 01:14:19 -04:00
loop - > profiling_mutex = xSemaphoreCreateMutex ( ) ;
if ( loop - > profiling_mutex = = NULL ) {
ESP_LOGE ( TAG , " create event loop profiling mutex failed " ) ;
goto on_err ;
}
# endif
2018-12-09 19:37:46 -05:00
SLIST_INIT ( & ( loop - > loop_nodes ) ) ;
2018-10-26 01:14:19 -04:00
// Create the loop task if requested
if ( event_loop_args - > task_name ! = NULL ) {
BaseType_t task_created = xTaskCreatePinnedToCore ( esp_event_loop_run_task , event_loop_args - > task_name ,
event_loop_args - > task_stack_size , ( void * ) loop ,
event_loop_args - > task_priority , & ( loop - > task ) , event_loop_args - > task_core_id ) ;
if ( task_created ! = pdPASS ) {
ESP_LOGE ( TAG , " create task for loop failed " ) ;
err = ESP_FAIL ;
goto on_err ;
}
loop - > name = event_loop_args - > task_name ;
ESP_LOGD ( TAG , " created task for loop %p " , loop ) ;
} else {
loop - > name = " " ;
loop - > task = NULL ;
}
loop - > running_task = NULL ;
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2018-10-26 01:14:19 -04:00
portENTER_CRITICAL ( & s_event_loops_spinlock ) ;
2018-12-09 19:37:46 -05:00
SLIST_INSERT_HEAD ( & s_event_loops , loop , next ) ;
2018-10-26 01:14:19 -04:00
portEXIT_CRITICAL ( & s_event_loops_spinlock ) ;
# endif
* event_loop = ( esp_event_loop_handle_t ) loop ;
ESP_LOGD ( TAG , " created event loop %p " , loop ) ;
return ESP_OK ;
on_err :
2019-05-20 07:28:35 -04:00
if ( loop - > queue ! = NULL ) {
2018-10-26 01:14:19 -04:00
vQueueDelete ( loop - > queue ) ;
}
2019-05-20 07:28:35 -04:00
if ( loop - > mutex ! = NULL ) {
2018-10-26 01:14:19 -04:00
vSemaphoreDelete ( loop - > mutex ) ;
}
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2019-05-20 07:28:35 -04:00
if ( loop - > profiling_mutex ! = NULL ) {
2018-10-26 01:14:19 -04:00
vSemaphoreDelete ( loop - > profiling_mutex ) ;
}
# endif
free ( loop ) ;
return err ;
}
// On event lookup performance: The library implements the event list as a linked list, which results to O(n)
// lookup time. The test comparing this implementation to the O(lg n) performance of rbtrees
// (https://github.com/freebsd/freebsd/blob/master/sys/sys/tree.h)
// indicate that the difference is not that substantial, especially considering the additional
// pointers per node of rbtrees. Code for the rbtree implementation of the event loop library is archived
// in feature/esp_event_loop_library_rbtrees if needed.
esp_err_t esp_event_loop_run ( esp_event_loop_handle_t event_loop , TickType_t ticks_to_run )
{
assert ( event_loop ) ;
esp_event_loop_instance_t * loop = ( esp_event_loop_instance_t * ) event_loop ;
esp_event_post_instance_t post ;
TickType_t marker = xTaskGetTickCount ( ) ;
TickType_t end = 0 ;
2019-05-20 07:28:35 -04:00
# if (configUSE_16_BIT_TICKS == 1)
2018-10-26 01:14:19 -04:00
int32_t remaining_ticks = ticks_to_run ;
# else
int64_t remaining_ticks = ticks_to_run ;
# endif
while ( xQueueReceive ( loop - > queue , & post , ticks_to_run ) = = pdTRUE ) {
// The event has already been unqueued, so ensure it gets executed.
xSemaphoreTakeRecursive ( loop - > mutex , portMAX_DELAY ) ;
2018-12-09 19:37:46 -05:00
2018-10-26 01:14:19 -04:00
loop - > running_task = xTaskGetCurrentTaskHandle ( ) ;
bool exec = false ;
2020-01-16 23:37:41 -05:00
esp_event_handler_node_t * handler , * temp_handler ;
2019-10-18 00:54:42 -04:00
esp_event_loop_node_t * loop_node , * temp_node ;
2019-09-30 19:31:09 -04:00
esp_event_base_node_t * base_node , * temp_base ;
esp_event_id_node_t * id_node , * temp_id_node ;
2018-10-26 01:14:19 -04:00
2019-10-18 00:54:42 -04:00
SLIST_FOREACH_SAFE ( loop_node , & ( loop - > loop_nodes ) , next , temp_node ) {
2018-12-09 19:37:46 -05:00
// Execute loop level handlers
2019-09-30 19:31:09 -04:00
SLIST_FOREACH_SAFE ( handler , & ( loop_node - > handlers ) , next , temp_handler ) {
2018-12-09 19:37:46 -05:00
handler_execute ( loop , handler , post ) ;
exec | = true ;
}
2018-10-26 01:14:19 -04:00
2019-09-30 19:31:09 -04:00
SLIST_FOREACH_SAFE ( base_node , & ( loop_node - > base_nodes ) , next , temp_base ) {
2018-12-09 19:37:46 -05:00
if ( base_node - > base = = post . base ) {
// Execute base level handlers
2019-09-30 19:31:09 -04:00
SLIST_FOREACH_SAFE ( handler , & ( base_node - > handlers ) , next , temp_handler ) {
2018-12-09 19:37:46 -05:00
handler_execute ( loop , handler , post ) ;
exec | = true ;
2018-10-26 01:14:19 -04:00
}
2019-09-30 19:31:09 -04:00
SLIST_FOREACH_SAFE ( id_node , & ( base_node - > id_nodes ) , next , temp_id_node ) {
2019-05-20 07:28:35 -04:00
if ( id_node - > id = = post . id ) {
2018-12-09 19:37:46 -05:00
// Execute id level handlers
2019-09-30 19:31:09 -04:00
SLIST_FOREACH_SAFE ( handler , & ( id_node - > handlers ) , next , temp_handler ) {
2018-12-09 19:37:46 -05:00
handler_execute ( loop , handler , post ) ;
exec | = true ;
}
// Skip to next base node
break ;
}
}
2018-10-26 01:14:19 -04:00
}
}
}
2019-02-15 08:28:24 -05:00
esp_event_base_t base = post . base ;
int32_t id = post . id ;
2019-01-28 21:52:53 -05:00
post_instance_delete ( & post ) ;
2018-10-26 01:14:19 -04:00
if ( ticks_to_run ! = portMAX_DELAY ) {
end = xTaskGetTickCount ( ) ;
remaining_ticks - = end - marker ;
// If the ticks to run expired, return to the caller
if ( remaining_ticks < = 0 ) {
xSemaphoreGiveRecursive ( loop - > mutex ) ;
break ;
} else {
marker = end ;
}
}
loop - > running_task = NULL ;
xSemaphoreGiveRecursive ( loop - > mutex ) ;
if ( ! exec ) {
// No handlers were registered, not even loop/base level handlers
2019-04-08 07:31:37 -04:00
ESP_LOGD ( TAG , " no handlers have been registered for event %s:%d posted to loop %p " , base , id , event_loop ) ;
2018-10-26 01:14:19 -04:00
}
}
return ESP_OK ;
}
esp_err_t esp_event_loop_delete ( esp_event_loop_handle_t event_loop )
{
assert ( event_loop ) ;
esp_event_loop_instance_t * loop = ( esp_event_loop_instance_t * ) event_loop ;
SemaphoreHandle_t loop_mutex = loop - > mutex ;
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2019-01-28 21:52:53 -05:00
SemaphoreHandle_t loop_profiling_mutex = loop - > profiling_mutex ;
# endif
2018-10-26 01:14:19 -04:00
xSemaphoreTakeRecursive ( loop - > mutex , portMAX_DELAY ) ;
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2020-12-03 03:53:05 -05:00
xSemaphoreTake ( loop - > profiling_mutex , portMAX_DELAY ) ;
2018-10-26 01:14:19 -04:00
portENTER_CRITICAL ( & s_event_loops_spinlock ) ;
2018-12-09 19:37:46 -05:00
SLIST_REMOVE ( & s_event_loops , loop , esp_event_loop_instance , next ) ;
2018-10-26 01:14:19 -04:00
portEXIT_CRITICAL ( & s_event_loops_spinlock ) ;
# endif
// Delete the task if it was created
if ( loop - > task ! = NULL ) {
vTaskDelete ( loop - > task ) ;
}
2018-12-09 19:37:46 -05:00
// Remove all registered events and handlers in the loop
esp_event_loop_node_t * it , * temp ;
SLIST_FOREACH_SAFE ( it , & ( loop - > loop_nodes ) , next , temp ) {
loop_node_remove_all_handler ( it ) ;
SLIST_REMOVE ( & ( loop - > loop_nodes ) , it , esp_event_loop_node , next ) ;
free ( it ) ;
}
2018-10-26 01:14:19 -04:00
// Drop existing posts on the queue
esp_event_post_instance_t post ;
while ( xQueueReceive ( loop - > queue , & post , 0 ) = = pdTRUE ) {
2019-02-15 08:28:24 -05:00
post_instance_delete ( & post ) ;
2018-10-26 01:14:19 -04:00
}
// Cleanup loop
vQueueDelete ( loop - > queue ) ;
free ( loop ) ;
// Free loop mutex before deleting
xSemaphoreGiveRecursive ( loop_mutex ) ;
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2020-12-03 03:53:05 -05:00
xSemaphoreGive ( loop_profiling_mutex ) ;
2019-01-28 21:52:53 -05:00
vSemaphoreDelete ( loop_profiling_mutex ) ;
# endif
2018-10-26 01:14:19 -04:00
vSemaphoreDelete ( loop_mutex ) ;
ESP_LOGD ( TAG , " deleted loop %p " , ( void * ) event_loop ) ;
return ESP_OK ;
}
2020-01-16 23:37:41 -05:00
esp_err_t esp_event_handler_register_with_internal ( esp_event_loop_handle_t event_loop , esp_event_base_t event_base ,
int32_t event_id , esp_event_handler_t event_handler , void * event_handler_arg ,
esp_event_handler_instance_context_t * * handler_ctx_arg , bool legacy )
2018-10-26 01:14:19 -04:00
{
assert ( event_loop ) ;
assert ( event_handler ) ;
if ( event_base = = ESP_EVENT_ANY_BASE & & event_id ! = ESP_EVENT_ANY_ID ) {
ESP_LOGE ( TAG , " registering to any event base with specific id unsupported " ) ;
return ESP_ERR_INVALID_ARG ;
}
esp_event_loop_instance_t * loop = ( esp_event_loop_instance_t * ) event_loop ;
if ( event_base = = ESP_EVENT_ANY_BASE ) {
event_base = esp_event_any_base ;
}
2018-12-09 19:37:46 -05:00
esp_err_t err = ESP_OK ;
2018-10-26 01:14:19 -04:00
xSemaphoreTakeRecursive ( loop - > mutex , portMAX_DELAY ) ;
2018-12-09 19:37:46 -05:00
esp_event_loop_node_t * loop_node = NULL , * last_loop_node = NULL ;
SLIST_FOREACH ( loop_node , & ( loop - > loop_nodes ) , next ) {
last_loop_node = loop_node ;
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
bool is_loop_level_handler = ( event_base = = esp_event_any_base ) & & ( event_id = = ESP_EVENT_ANY_ID ) ;
if ( ! last_loop_node | |
( last_loop_node & & ! SLIST_EMPTY ( & ( last_loop_node - > base_nodes ) ) & & is_loop_level_handler ) ) {
loop_node = ( esp_event_loop_node_t * ) calloc ( 1 , sizeof ( * loop_node ) ) ;
if ( ! loop_node ) {
ESP_LOGE ( TAG , " alloc for new loop node failed " ) ;
err = ESP_ERR_NO_MEM ;
goto on_err ;
}
2019-11-25 01:27:10 -05:00
SLIST_INIT ( & ( loop_node - > handlers ) ) ;
SLIST_INIT ( & ( loop_node - > base_nodes ) ) ;
2020-01-16 23:37:41 -05:00
err = loop_node_add_handler ( loop_node , event_base , event_id , event_handler , event_handler_arg , handler_ctx_arg , legacy ) ;
2018-12-09 19:37:46 -05:00
if ( err = = ESP_OK ) {
if ( ! last_loop_node ) {
SLIST_INSERT_HEAD ( & ( loop - > loop_nodes ) , loop_node , next ) ;
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
else {
SLIST_INSERT_AFTER ( last_loop_node , loop_node , next ) ;
2018-10-26 01:14:19 -04:00
}
2019-05-31 05:23:02 -04:00
} else {
free ( loop_node ) ;
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
}
else {
2020-01-16 23:37:41 -05:00
err = loop_node_add_handler ( last_loop_node , event_base , event_id , event_handler , event_handler_arg , handler_ctx_arg , legacy ) ;
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
on_err :
2018-10-26 01:14:19 -04:00
xSemaphoreGiveRecursive ( loop - > mutex ) ;
2018-12-09 19:37:46 -05:00
return err ;
2018-10-26 01:14:19 -04:00
}
2020-01-16 23:37:41 -05:00
esp_err_t esp_event_handler_register_with ( esp_event_loop_handle_t event_loop , esp_event_base_t event_base ,
int32_t event_id , esp_event_handler_t event_handler , void * event_handler_arg )
{
return esp_event_handler_register_with_internal ( event_loop , event_base , event_id , event_handler , event_handler_arg , NULL , true ) ;
}
esp_err_t esp_event_handler_instance_register_with ( esp_event_loop_handle_t event_loop , esp_event_base_t event_base ,
int32_t event_id , esp_event_handler_t event_handler , void * event_handler_arg ,
esp_event_handler_instance_t * handler_ctx_arg )
{
return esp_event_handler_register_with_internal ( event_loop , event_base , event_id , event_handler , event_handler_arg , ( esp_event_handler_instance_context_t * * ) handler_ctx_arg , false ) ;
}
esp_err_t esp_event_handler_unregister_with_internal ( esp_event_loop_handle_t event_loop , esp_event_base_t event_base ,
int32_t event_id , esp_event_handler_instance_context_t * handler_ctx , bool legacy )
2018-10-26 01:14:19 -04:00
{
assert ( event_loop ) ;
2020-01-16 23:37:41 -05:00
assert ( handler_ctx ) ;
2018-10-26 01:14:19 -04:00
if ( event_base = = ESP_EVENT_ANY_BASE & & event_id ! = ESP_EVENT_ANY_ID ) {
ESP_LOGE ( TAG , " unregistering to any event base with specific id unsupported " ) ;
return ESP_FAIL ;
}
if ( event_base = = ESP_EVENT_ANY_BASE ) {
event_base = esp_event_any_base ;
}
esp_event_loop_instance_t * loop = ( esp_event_loop_instance_t * ) event_loop ;
xSemaphoreTakeRecursive ( loop - > mutex , portMAX_DELAY ) ;
2018-12-09 19:37:46 -05:00
esp_event_loop_node_t * it , * temp ;
SLIST_FOREACH_SAFE ( it , & ( loop - > loop_nodes ) , next , temp ) {
2020-01-16 23:37:41 -05:00
esp_err_t res = loop_node_remove_handler ( it , event_base , event_id , handler_ctx , legacy ) ;
2018-12-09 19:37:46 -05:00
if ( res = = ESP_OK & & SLIST_EMPTY ( & ( it - > base_nodes ) ) & & SLIST_EMPTY ( & ( it - > handlers ) ) ) {
SLIST_REMOVE ( & ( loop - > loop_nodes ) , it , esp_event_loop_node , next ) ;
free ( it ) ;
break ;
}
2018-10-26 01:14:19 -04:00
}
xSemaphoreGiveRecursive ( loop - > mutex ) ;
return ESP_OK ;
}
2020-01-16 23:37:41 -05:00
esp_err_t esp_event_handler_unregister_with ( esp_event_loop_handle_t event_loop , esp_event_base_t event_base ,
int32_t event_id , esp_event_handler_t event_handler )
{
esp_event_handler_instance_context_t local_handler_ctx ;
local_handler_ctx . handler = event_handler ;
local_handler_ctx . arg = NULL ;
return esp_event_handler_unregister_with_internal ( event_loop , event_base , event_id , & local_handler_ctx , true ) ;
}
esp_err_t esp_event_handler_instance_unregister_with ( esp_event_loop_handle_t event_loop , esp_event_base_t event_base ,
int32_t event_id , esp_event_handler_instance_t handler_ctx_arg )
{
if ( ! handler_ctx_arg ) return ESP_ERR_INVALID_ARG ;
return esp_event_handler_unregister_with_internal ( event_loop , event_base , event_id , ( esp_event_handler_instance_context_t * ) handler_ctx_arg , false ) ;
}
2018-10-26 01:14:19 -04:00
esp_err_t esp_event_post_to ( esp_event_loop_handle_t event_loop , esp_event_base_t event_base , int32_t event_id ,
2022-02-16 10:15:20 -05:00
const void * event_data , size_t event_data_size , TickType_t ticks_to_wait )
2018-10-26 01:14:19 -04:00
{
assert ( event_loop ) ;
if ( event_base = = ESP_EVENT_ANY_BASE | | event_id = = ESP_EVENT_ANY_ID ) {
return ESP_ERR_INVALID_ARG ;
}
esp_event_loop_instance_t * loop = ( esp_event_loop_instance_t * ) event_loop ;
esp_event_post_instance_t post ;
2019-05-15 23:51:57 -04:00
memset ( ( void * ) ( & post ) , 0 , sizeof ( post ) ) ;
2019-02-15 08:28:24 -05:00
if ( event_data ! = NULL & & event_data_size ! = 0 ) {
2019-05-20 07:31:45 -04:00
// Make persistent copy of event data on heap.
void * event_data_copy = calloc ( 1 , event_data_size ) ;
2019-02-15 08:28:24 -05:00
2019-05-20 07:31:45 -04:00
if ( event_data_copy = = NULL ) {
return ESP_ERR_NO_MEM ;
}
2018-10-26 01:14:19 -04:00
2019-05-20 07:31:45 -04:00
memcpy ( event_data_copy , event_data , event_data_size ) ;
2019-04-29 06:54:02 -04:00
# if CONFIG_ESP_EVENT_POST_FROM_ISR
2019-05-20 07:31:45 -04:00
post . data . ptr = event_data_copy ;
post . data_allocated = true ;
2019-05-15 23:51:57 -04:00
post . data_set = true ;
2019-02-15 08:28:24 -05:00
# else
post . data = event_data_copy ;
# endif
2018-10-26 01:14:19 -04:00
}
2019-02-15 08:28:24 -05:00
post . base = event_base ;
post . id = event_id ;
2018-10-26 01:14:19 -04:00
BaseType_t result = pdFALSE ;
// Find the task that currently executes the loop. It is safe to query loop->task since it is
// not mutated since loop creation. ENSURE THIS REMAINS TRUE.
if ( loop - > task = = NULL ) {
// The loop has no dedicated task. Find out what task is currently running it.
result = xSemaphoreTakeRecursive ( loop - > mutex , ticks_to_wait ) ;
if ( result = = pdTRUE ) {
if ( loop - > running_task ! = xTaskGetCurrentTaskHandle ( ) ) {
xSemaphoreGiveRecursive ( loop - > mutex ) ;
result = xQueueSendToBack ( loop - > queue , & post , ticks_to_wait ) ;
} else {
xSemaphoreGiveRecursive ( loop - > mutex ) ;
result = xQueueSendToBack ( loop - > queue , & post , 0 ) ;
}
}
} else {
// The loop has a dedicated task.
if ( loop - > task ! = xTaskGetCurrentTaskHandle ( ) ) {
result = xQueueSendToBack ( loop - > queue , & post , ticks_to_wait ) ;
} else {
result = xQueueSendToBack ( loop - > queue , & post , 0 ) ;
}
}
if ( result ! = pdTRUE ) {
post_instance_delete ( & post ) ;
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2019-02-15 08:28:24 -05:00
atomic_fetch_add ( & loop - > events_dropped , 1 ) ;
2018-10-26 01:14:19 -04:00
# endif
return ESP_ERR_TIMEOUT ;
}
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2019-02-15 08:28:24 -05:00
atomic_fetch_add ( & loop - > events_recieved , 1 ) ;
2018-10-26 01:14:19 -04:00
# endif
return ESP_OK ;
}
2019-04-29 06:54:02 -04:00
# if CONFIG_ESP_EVENT_POST_FROM_ISR
2019-02-15 08:28:24 -05:00
esp_err_t esp_event_isr_post_to ( esp_event_loop_handle_t event_loop , esp_event_base_t event_base , int32_t event_id ,
2022-02-16 10:15:20 -05:00
const void * event_data , size_t event_data_size , BaseType_t * task_unblocked )
2019-02-15 08:28:24 -05:00
{
assert ( event_loop ) ;
if ( event_base = = ESP_EVENT_ANY_BASE | | event_id = = ESP_EVENT_ANY_ID ) {
return ESP_ERR_INVALID_ARG ;
}
esp_event_loop_instance_t * loop = ( esp_event_loop_instance_t * ) event_loop ;
esp_event_post_instance_t post ;
2019-05-15 23:51:57 -04:00
memset ( ( void * ) ( & post ) , 0 , sizeof ( post ) ) ;
2019-02-15 08:28:24 -05:00
if ( event_data_size > sizeof ( post . data . val ) ) {
return ESP_ERR_INVALID_ARG ;
}
if ( event_data ! = NULL & & event_data_size ! = 0 ) {
memcpy ( ( void * ) ( & ( post . data . val ) ) , event_data , event_data_size ) ;
2019-05-15 23:51:57 -04:00
post . data_allocated = false ;
post . data_set = true ;
2019-02-15 08:28:24 -05:00
}
post . base = event_base ;
post . id = event_id ;
BaseType_t result = pdFALSE ;
// Post the event from an ISR,
result = xQueueSendToBackFromISR ( loop - > queue , & post , task_unblocked ) ;
if ( result ! = pdTRUE ) {
post_instance_delete ( & post ) ;
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2019-02-15 08:28:24 -05:00
atomic_fetch_add ( & loop - > events_dropped , 1 ) ;
# endif
return ESP_FAIL ;
}
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2019-02-15 08:28:24 -05:00
atomic_fetch_add ( & loop - > events_recieved , 1 ) ;
# endif
return ESP_OK ;
}
# endif
2018-12-09 19:37:46 -05:00
2018-10-26 01:14:19 -04:00
esp_err_t esp_event_dump ( FILE * file )
{
2019-04-29 06:54:02 -04:00
# ifdef CONFIG_ESP_EVENT_LOOP_PROFILING
2018-10-26 01:14:19 -04:00
assert ( file ) ;
esp_event_loop_instance_t * loop_it ;
2018-12-09 19:37:46 -05:00
esp_event_loop_node_t * loop_node_it ;
esp_event_base_node_t * base_node_it ;
esp_event_id_node_t * id_node_it ;
2020-01-16 23:37:41 -05:00
esp_event_handler_node_t * handler_it ;
2018-10-26 01:14:19 -04:00
// Allocate memory for printing
int sz = esp_event_dump_prepare ( ) ;
char * buf = calloc ( sz , sizeof ( char ) ) ;
char * dst = buf ;
2018-12-09 19:37:46 -05:00
char id_str_buf [ 20 ] ;
2018-10-26 01:14:19 -04:00
// Print info to buffer
portENTER_CRITICAL ( & s_event_loops_spinlock ) ;
2018-12-09 19:37:46 -05:00
SLIST_FOREACH ( loop_it , & s_event_loops , next ) {
2019-02-15 08:28:24 -05:00
uint32_t events_recieved , events_dropped ;
events_recieved = atomic_load ( & loop_it - > events_recieved ) ;
events_dropped = atomic_load ( & loop_it - > events_dropped ) ;
2018-12-09 19:37:46 -05:00
PRINT_DUMP_INFO ( dst , sz , LOOP_DUMP_FORMAT , loop_it , loop_it - > task ! = NULL ? loop_it - > name : " none " ,
2019-02-15 08:28:24 -05:00
events_recieved , events_dropped ) ;
2018-12-09 19:37:46 -05:00
int sz_bak = sz ;
SLIST_FOREACH ( loop_node_it , & ( loop_it - > loop_nodes ) , next ) {
SLIST_FOREACH ( handler_it , & ( loop_node_it - > handlers ) , next ) {
2020-01-16 23:37:41 -05:00
PRINT_DUMP_INFO ( dst , sz , HANDLER_DUMP_FORMAT , handler_it - > handler_ctx - > handler , " ESP_EVENT_ANY_BASE " ,
2018-12-09 19:37:46 -05:00
" ESP_EVENT_ANY_ID " , handler_it - > invoked , handler_it - > time ) ;
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
SLIST_FOREACH ( base_node_it , & ( loop_node_it - > base_nodes ) , next ) {
SLIST_FOREACH ( handler_it , & ( base_node_it - > handlers ) , next ) {
2020-01-16 23:37:41 -05:00
PRINT_DUMP_INFO ( dst , sz , HANDLER_DUMP_FORMAT , handler_it - > handler_ctx - > handler , base_node_it - > base ,
2018-12-09 19:37:46 -05:00
" ESP_EVENT_ANY_ID " , handler_it - > invoked , handler_it - > time ) ;
}
2018-10-26 01:14:19 -04:00
2018-12-09 19:37:46 -05:00
SLIST_FOREACH ( id_node_it , & ( base_node_it - > id_nodes ) , next ) {
SLIST_FOREACH ( handler_it , & ( id_node_it - > handlers ) , next ) {
memset ( id_str_buf , 0 , sizeof ( id_str_buf ) ) ;
2023-05-22 08:36:50 -04:00
snprintf ( id_str_buf , sizeof ( id_str_buf ) , " % " PRIi32 , id_node_it - > id ) ;
2018-12-09 19:37:46 -05:00
2020-01-16 23:37:41 -05:00
PRINT_DUMP_INFO ( dst , sz , HANDLER_DUMP_FORMAT , handler_it - > handler_ctx - > handler , base_node_it - > base ,
2018-12-09 19:37:46 -05:00
id_str_buf , handler_it - > invoked , handler_it - > time ) ;
}
2018-10-26 01:14:19 -04:00
}
}
}
2018-12-09 19:37:46 -05:00
// No handlers registered for this loop
if ( sz = = sz_bak ) {
PRINT_DUMP_INFO ( dst , sz , " NO HANDLERS REGISTERED \n " ) ;
}
2018-10-26 01:14:19 -04:00
}
2018-12-09 19:37:46 -05:00
2018-10-26 01:14:19 -04:00
portEXIT_CRITICAL ( & s_event_loops_spinlock ) ;
2018-12-09 19:37:46 -05:00
2018-10-26 01:14:19 -04:00
// Print the contents of the buffer to the file
fprintf ( file , buf ) ;
2018-12-09 19:37:46 -05:00
2018-10-26 01:14:19 -04:00
// Free the allocated buffer
free ( buf ) ;
# endif
return ESP_OK ;
}