2021-05-23 20:09:38 -04:00
/*
2023-11-21 01:52:55 -05:00
* SPDX - FileCopyrightText : 2015 - 2024 Espressif Systems ( Shanghai ) CO LTD
2021-05-23 20:09:38 -04:00
*
* SPDX - License - Identifier : Apache - 2.0
*/
2016-12-14 20:45:40 -05:00
# include <string.h>
2022-04-26 09:48:07 -04:00
# include <sys/param.h>
2016-12-14 20:45:40 -05:00
# include "esp_types.h"
# include "esp_attr.h"
2019-03-26 04:30:43 -04:00
# include "esp_intr_alloc.h"
2016-12-14 20:45:40 -05:00
# include "esp_log.h"
# include "esp_err.h"
2021-07-08 01:18:27 -04:00
# include "esp_check.h"
2016-12-14 20:45:40 -05:00
# include "malloc.h"
# include "freertos/FreeRTOS.h"
# include "freertos/semphr.h"
# include "freertos/ringbuf.h"
2022-07-12 07:04:26 -04:00
# include "esp_private/critical_section.h"
2019-04-17 08:19:44 -04:00
# include "hal/uart_hal.h"
2021-03-15 22:55:05 -04:00
# include "hal/gpio_hal.h"
2022-07-27 22:47:13 -04:00
# include "hal/clk_tree_ll.h"
2019-05-13 06:02:45 -04:00
# include "soc/uart_periph.h"
2016-12-14 20:45:40 -05:00
# include "driver/uart.h"
# include "driver/gpio.h"
2018-05-03 04:41:10 -04:00
# include "driver/uart_select.h"
2021-10-25 05:13:46 -04:00
# include "esp_private/periph_ctrl.h"
2023-04-23 03:49:59 -04:00
# include "esp_clk_tree.h"
2019-06-05 22:57:29 -04:00
# include "sdkconfig.h"
2020-06-19 00:00:58 -04:00
# include "esp_rom_gpio.h"
2022-04-29 00:42:44 -04:00
# include "clk_ctrl_os.h"
2019-08-26 05:14:15 -04:00
# ifdef CONFIG_UART_ISR_IN_IRAM
2021-05-19 23:12:40 -04:00
# define UART_ISR_ATTR IRAM_ATTR
# define UART_MALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
2019-08-26 05:14:15 -04:00
# else
# define UART_ISR_ATTR
2021-05-19 23:12:40 -04:00
# define UART_MALLOC_CAPS MALLOC_CAP_DEFAULT
2019-08-26 05:14:15 -04:00
# endif
2019-06-13 23:01:30 -04:00
2019-04-17 08:19:44 -04:00
# define XOFF (0x13)
# define XON (0x11)
2017-08-01 11:02:44 -04:00
2021-05-19 08:32:55 -04:00
static const char * UART_TAG = " uart " ;
2016-12-14 20:45:40 -05:00
2020-03-27 04:20:21 -04:00
# define UART_EMPTY_THRESH_DEFAULT (10)
# define UART_FULL_THRESH_DEFAULT (120)
# define UART_TOUT_THRESH_DEFAULT (10)
# define UART_CLKDIV_FRAG_BIT_WIDTH (3)
# define UART_TX_IDLE_NUM_DEFAULT (0)
# define UART_PATTERN_DET_QLEN_DEFAULT (10)
2020-09-09 22:37:58 -04:00
# define UART_MIN_WAKEUP_THRESH (UART_LL_MIN_WAKEUP_THRESH)
2017-08-25 09:04:13 -04:00
2022-01-11 22:03:38 -05:00
# if SOC_UART_SUPPORT_WAKEUP_INT
2021-07-22 06:10:30 -04:00
# define UART_INTR_CONFIG_FLAG ((UART_INTR_RXFIFO_FULL) \
| ( UART_INTR_RXFIFO_TOUT ) \
| ( UART_INTR_RXFIFO_OVF ) \
| ( UART_INTR_BRK_DET ) \
| ( UART_INTR_PARITY_ERR ) ) \
| ( UART_INTR_WAKEUP )
# else
2019-11-18 08:36:40 -05:00
# define UART_INTR_CONFIG_FLAG ((UART_INTR_RXFIFO_FULL) \
| ( UART_INTR_RXFIFO_TOUT ) \
| ( UART_INTR_RXFIFO_OVF ) \
| ( UART_INTR_BRK_DET ) \
| ( UART_INTR_PARITY_ERR ) )
2021-07-22 06:10:30 -04:00
# endif
2019-11-18 08:36:40 -05:00
2022-04-26 09:48:07 -04:00
2022-07-12 07:04:26 -04:00
# define UART_ENTER_CRITICAL_SAFE(spinlock) esp_os_enter_critical_safe(spinlock)
# define UART_EXIT_CRITICAL_SAFE(spinlock) esp_os_exit_critical_safe(spinlock)
# define UART_ENTER_CRITICAL_ISR(spinlock) esp_os_enter_critical_isr(spinlock)
# define UART_EXIT_CRITICAL_ISR(spinlock) esp_os_exit_critical_isr(spinlock)
# define UART_ENTER_CRITICAL(spinlock) esp_os_enter_critical(spinlock)
# define UART_EXIT_CRITICAL(spinlock) esp_os_exit_critical(spinlock)
2016-12-14 20:45:40 -05:00
2019-04-17 08:19:44 -04:00
2018-04-11 02:56:00 -04:00
// Check actual UART mode set
# define UART_IS_MODE_SET(uart_number, mode) ((p_uart_obj[uart_number]->uart_mode == mode))
2019-04-17 08:19:44 -04:00
# define UART_CONTEX_INIT_DEF(uart_num) {\
. hal . dev = UART_LL_GET_HW ( uart_num ) , \
2022-07-12 07:04:26 -04:00
INIT_CRIT_SECTION_LOCK_IN_STRUCT ( spinlock ) \
2019-04-17 08:19:44 -04:00
. hw_enabled = false , \
}
2016-12-14 20:45:40 -05:00
typedef struct {
uart_event_type_t type ; /*!< UART TX data type */
struct {
int brk_len ;
size_t size ;
uint8_t data [ 0 ] ;
} tx_data ;
} uart_tx_data_t ;
2017-08-25 09:04:13 -04:00
typedef struct {
int wr ;
int rd ;
int len ;
2021-05-19 08:32:55 -04:00
int * data ;
2017-08-25 09:04:13 -04:00
} uart_pat_rb_t ;
2016-12-14 20:45:40 -05:00
typedef struct {
uart_port_t uart_num ; /*!< UART port number*/
2021-05-19 23:12:40 -04:00
int event_queue_size ; /*!< UART event queue size*/
2016-12-14 20:45:40 -05:00
intr_handle_t intr_handle ; /*!< UART interrupt handle*/
2018-04-11 02:56:00 -04:00
uart_mode_t uart_mode ; /*!< UART controller actual mode set by uart_set_mode() */
bool coll_det_flg ; /*!< UART collision detection flag */
2020-03-30 10:05:48 -04:00
bool rx_always_timeout_flg ; /*!< UART always detect rx timeout flag */
2021-05-19 23:12:40 -04:00
int rx_buffered_len ; /*!< UART cached data length */
2016-12-14 20:45:40 -05:00
int rx_buf_size ; /*!< RX ring buffer size */
bool rx_buffer_full_flg ; /*!< RX ring buffer full flag. */
2020-09-09 22:37:58 -04:00
uint8_t rx_data_buf [ SOC_UART_FIFO_LEN ] ; /*!< Data buffer to stash FIFO data*/
2016-12-14 20:45:40 -05:00
uint8_t rx_stash_len ; /*!< stashed data length.(When using flow control, after reading out FIFO data, if we fail to push to buffer, we can just stash them.) */
2021-12-07 23:15:02 -05:00
uint32_t rx_int_usr_mask ; /*!< RX interrupt status. Valid at any time, regardless of RX buffer status. */
2017-08-25 09:04:13 -04:00
uart_pat_rb_t rx_pattern_pos ;
2016-12-14 20:45:40 -05:00
int tx_buf_size ; /*!< TX ring buffer size */
bool tx_waiting_fifo ; /*!< this flag indicates that some task is waiting for FIFO empty interrupt, used to send all data without any data buffer*/
2021-05-19 08:32:55 -04:00
uint8_t * tx_ptr ; /*!< TX data pointer to push to FIFO in TX buffer mode*/
uart_tx_data_t * tx_head ; /*!< TX data pointer to head of the current buffer in TX ring buffer*/
2016-12-14 20:45:40 -05:00
uint32_t tx_len_tot ; /*!< Total length of current item in ring buffer*/
uint32_t tx_len_cur ;
uint8_t tx_brk_flg ; /*!< Flag to indicate to send a break signal in the end of the item sending procedure */
uint8_t tx_brk_len ; /*!< TX break signal cycle length/number */
uint8_t tx_waiting_brk ; /*!< Flag to indicate that TX FIFO is ready to send break signal after FIFO is empty, do not push data into TX FIFO right now.*/
2018-05-03 04:41:10 -04:00
uart_select_notif_callback_t uart_select_notif_callback ; /*!< Notification about select() events */
2021-05-19 23:12:40 -04:00
QueueHandle_t event_queue ; /*!< UART event queue handler*/
RingbufHandle_t rx_ring_buf ; /*!< RX ring buffer handler*/
RingbufHandle_t tx_ring_buf ; /*!< TX ring buffer handler*/
SemaphoreHandle_t rx_mux ; /*!< UART RX data mutex*/
SemaphoreHandle_t tx_mux ; /*!< UART TX mutex*/
SemaphoreHandle_t tx_fifo_sem ; /*!< UART TX FIFO semaphore*/
SemaphoreHandle_t tx_done_sem ; /*!< UART TX done semaphore*/
SemaphoreHandle_t tx_brk_sem ; /*!< UART TX send break done semaphore*/
# if CONFIG_UART_ISR_IN_IRAM
void * event_queue_storage ;
void * event_queue_struct ;
void * rx_ring_buf_storage ;
void * rx_ring_buf_struct ;
void * tx_ring_buf_storage ;
void * tx_ring_buf_struct ;
void * rx_mux_struct ;
void * tx_mux_struct ;
void * tx_fifo_sem_struct ;
void * tx_done_sem_struct ;
void * tx_brk_sem_struct ;
# endif
2016-12-14 20:45:40 -05:00
} uart_obj_t ;
2019-04-17 08:19:44 -04:00
typedef struct {
uart_hal_context_t hal ; /*!< UART hal context*/
2022-07-12 07:04:26 -04:00
DECLARE_CRIT_SECTION_LOCK_IN_STRUCT ( spinlock )
2019-04-17 08:19:44 -04:00
bool hw_enabled ;
} uart_context_t ;
2016-12-14 20:45:40 -05:00
static uart_obj_t * p_uart_obj [ UART_NUM_MAX ] = { 0 } ;
2019-04-17 08:19:44 -04:00
static uart_context_t uart_context [ UART_NUM_MAX ] = {
UART_CONTEX_INIT_DEF ( UART_NUM_0 ) ,
UART_CONTEX_INIT_DEF ( UART_NUM_1 ) ,
# if UART_NUM_MAX > 2
UART_CONTEX_INIT_DEF ( UART_NUM_2 ) ,
2019-06-13 23:01:30 -04:00
# endif
} ;
2019-04-17 08:19:44 -04:00
2018-05-03 04:41:10 -04:00
static portMUX_TYPE uart_selectlock = portMUX_INITIALIZER_UNLOCKED ;
2016-12-14 20:45:40 -05:00
2019-04-17 08:19:44 -04:00
static void uart_module_enable ( uart_port_t uart_num )
{
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
if ( uart_context [ uart_num ] . hw_enabled ! = true ) {
2021-07-15 05:08:42 -04:00
periph_module_enable ( uart_periph_signal [ uart_num ] . module ) ;
2019-04-17 08:19:44 -04:00
if ( uart_num ! = CONFIG_ESP_CONSOLE_UART_NUM ) {
2022-01-17 07:32:39 -05:00
// Workaround for ESP32C3/S3: enable core reset before enabling uart module clock to prevent uart output
// garbage value.
2021-05-19 08:32:55 -04:00
# if SOC_UART_REQUIRE_CORE_RESET
2021-07-15 05:08:42 -04:00
uart_hal_set_reset_core ( & ( uart_context [ uart_num ] . hal ) , true ) ;
periph_module_reset ( uart_periph_signal [ uart_num ] . module ) ;
uart_hal_set_reset_core ( & ( uart_context [ uart_num ] . hal ) , false ) ;
2021-05-19 08:32:55 -04:00
# else
2019-04-17 08:19:44 -04:00
periph_module_reset ( uart_periph_signal [ uart_num ] . module ) ;
2021-05-19 08:32:55 -04:00
# endif
2019-04-17 08:19:44 -04:00
}
uart_context [ uart_num ] . hw_enabled = true ;
}
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
}
static void uart_module_disable ( uart_port_t uart_num )
{
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
if ( uart_context [ uart_num ] . hw_enabled ! = false ) {
if ( uart_num ! = CONFIG_ESP_CONSOLE_UART_NUM ) {
periph_module_disable ( uart_periph_signal [ uart_num ] . module ) ;
}
uart_context [ uart_num ] . hw_enabled = false ;
}
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
}
2023-01-31 02:09:24 -05:00
esp_err_t uart_get_sclk_freq ( uart_sclk_t sclk , uint32_t * out_freq_hz )
2022-07-27 22:47:13 -04:00
{
2023-04-23 03:49:59 -04:00
return esp_clk_tree_src_get_freq_hz ( ( soc_module_clk_t ) sclk , ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED , out_freq_hz ) ;
2022-07-27 22:47:13 -04:00
}
2016-12-14 20:45:40 -05:00
esp_err_t uart_set_word_length ( uart_port_t uart_num , uart_word_length_t data_bit )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( data_bit < UART_DATA_BITS_MAX ) , ESP_FAIL , UART_TAG , " data bit error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_data_bit_num ( & ( uart_context [ uart_num ] . hal ) , data_bit ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2021-05-19 08:32:55 -04:00
esp_err_t uart_get_word_length ( uart_port_t uart_num , uart_word_length_t * data_bit )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2019-04-17 08:19:44 -04:00
uart_hal_get_data_bit_num ( & ( uart_context [ uart_num ] . hal ) , data_bit ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
esp_err_t uart_set_stop_bits ( uart_port_t uart_num , uart_stop_bits_t stop_bit )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( stop_bit < UART_STOP_BITS_MAX ) , ESP_FAIL , UART_TAG , " stop bit error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_stop_bits ( & ( uart_context [ uart_num ] . hal ) , stop_bit ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2021-05-19 08:32:55 -04:00
esp_err_t uart_get_stop_bits ( uart_port_t uart_num , uart_stop_bits_t * stop_bit )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2021-06-28 02:46:41 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2019-04-17 08:19:44 -04:00
uart_hal_get_stop_bits ( & ( uart_context [ uart_num ] . hal ) , stop_bit ) ;
2021-06-28 02:46:41 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
esp_err_t uart_set_parity ( uart_port_t uart_num , uart_parity_t parity_mode )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_parity ( & ( uart_context [ uart_num ] . hal ) , parity_mode ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2021-05-19 08:32:55 -04:00
esp_err_t uart_get_parity ( uart_port_t uart_num , uart_parity_t * parity_mode )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2021-06-28 02:46:41 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2019-04-17 08:19:44 -04:00
uart_hal_get_parity ( & ( uart_context [ uart_num ] . hal ) , parity_mode ) ;
2021-06-28 02:46:41 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
esp_err_t uart_set_baudrate ( uart_port_t uart_num , uint32_t baud_rate )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2022-07-27 22:47:13 -04:00
uart_sclk_t src_clk ;
uint32_t sclk_freq ;
uart_hal_get_sclk ( & ( uart_context [ uart_num ] . hal ) , & src_clk ) ;
2022-08-25 23:41:11 -04:00
ESP_RETURN_ON_ERROR ( uart_get_sclk_freq ( src_clk , & sclk_freq ) , UART_TAG , " Invalid src_clk " ) ;
2022-07-27 22:47:13 -04:00
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2022-07-27 22:47:13 -04:00
uart_hal_set_baudrate ( & ( uart_context [ uart_num ] . hal ) , baud_rate , sclk_freq ) ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
return ESP_OK ;
2016-12-14 20:45:40 -05:00
}
2019-07-01 06:00:10 -04:00
esp_err_t uart_get_baudrate ( uart_port_t uart_num , uint32_t * baudrate )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2022-07-27 22:47:13 -04:00
uart_sclk_t src_clk ;
uint32_t sclk_freq ;
uart_hal_get_sclk ( & ( uart_context [ uart_num ] . hal ) , & src_clk ) ;
2022-08-25 23:41:11 -04:00
ESP_RETURN_ON_ERROR ( uart_get_sclk_freq ( src_clk , & sclk_freq ) , UART_TAG , " Invalid src_clk " ) ;
2022-07-27 22:47:13 -04:00
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2022-07-27 22:47:13 -04:00
uart_hal_get_baudrate ( & ( uart_context [ uart_num ] . hal ) , baudrate , sclk_freq ) ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
esp_err_t uart_set_line_inverse ( uart_port_t uart_num , uint32_t inverse_mask )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_inverse_signal ( & ( uart_context [ uart_num ] . hal ) , inverse_mask ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2017-08-01 11:02:44 -04:00
esp_err_t uart_set_sw_flow_ctrl ( uart_port_t uart_num , bool enable , uint8_t rx_thresh_xon , uint8_t rx_thresh_xoff )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( rx_thresh_xon < SOC_UART_FIFO_LEN ) , ESP_FAIL , UART_TAG , " rx flow xon thresh error " ) ;
2021-08-02 21:13:24 -04:00
ESP_RETURN_ON_FALSE ( ( rx_thresh_xoff < SOC_UART_FIFO_LEN ) , ESP_FAIL , UART_TAG , " rx flow xoff thresh error " ) ;
2019-04-17 08:19:44 -04:00
uart_sw_flowctrl_t sw_flow_ctl = {
. xon_char = XON ,
. xoff_char = XOFF ,
. xon_thrd = rx_thresh_xon ,
. xoff_thrd = rx_thresh_xoff ,
} ;
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_sw_flow_ctrl ( & ( uart_context [ uart_num ] . hal ) , & sw_flow_ctl , enable ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2017-08-01 11:02:44 -04:00
return ESP_OK ;
}
2016-12-14 20:45:40 -05:00
esp_err_t uart_set_hw_flow_ctrl ( uart_port_t uart_num , uart_hw_flowcontrol_t flow_ctrl , uint8_t rx_thresh )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( rx_thresh < SOC_UART_FIFO_LEN ) , ESP_FAIL , UART_TAG , " rx flow thresh error " ) ;
ESP_RETURN_ON_FALSE ( ( flow_ctrl < UART_HW_FLOWCTRL_MAX ) , ESP_FAIL , UART_TAG , " hw_flowctrl mode error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_hw_flow_ctrl ( & ( uart_context [ uart_num ] . hal ) , flow_ctrl , rx_thresh ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2021-05-19 08:32:55 -04:00
esp_err_t uart_get_hw_flow_ctrl ( uart_port_t uart_num , uart_hw_flowcontrol_t * flow_ctrl )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_get_hw_flow_ctrl ( & ( uart_context [ uart_num ] . hal ) , flow_ctrl ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2019-08-26 05:14:15 -04:00
esp_err_t UART_ISR_ATTR uart_clear_intr_status ( uart_port_t uart_num , uint32_t clr_mask )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , clr_mask ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
esp_err_t uart_enable_intr_mask ( uart_port_t uart_num , uint32_t enable_mask )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2021-12-07 23:15:02 -05:00
/* Keep track of the interrupt toggling. In fact, without such variable,
* once the RX buffer is full and the RX interrupts disabled , it is
* impossible what was the previous state ( enabled / disabled ) of these
* interrupt masks . Thus , this will be very particularly handy when
* emptying a filled RX buffer . */
p_uart_obj [ uart_num ] - > rx_int_usr_mask | = enable_mask ;
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , enable_mask ) ;
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , enable_mask ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2021-12-07 23:15:02 -05:00
/**
* @ brief Function re - enabling the given interrupts ( mask ) if and only if
* they have not been disabled by the user .
*
* @ param uart_num UART number to perform the operation on
* @ param enable_mask Interrupts ( flags ) to be re - enabled
*
* @ return ESP_OK in success , ESP_FAIL if uart_num is incorrect
*/
static esp_err_t uart_reenable_intr_mask ( uart_port_t uart_num , uint32_t enable_mask )
{
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
/* Mask will only contain the interrupt flags that needs to be re-enabled
* AND which have NOT been explicitly disabled by the user . */
uint32_t mask = p_uart_obj [ uart_num ] - > rx_int_usr_mask & enable_mask ;
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , mask ) ;
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , mask ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
return ESP_OK ;
}
2016-12-14 20:45:40 -05:00
esp_err_t uart_disable_intr_mask ( uart_port_t uart_num , uint32_t disable_mask )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2021-12-07 23:15:02 -05:00
p_uart_obj [ uart_num ] - > rx_int_usr_mask & = ~ disable_mask ;
2019-04-17 08:19:44 -04:00
uart_hal_disable_intr_mask ( & ( uart_context [ uart_num ] . hal ) , disable_mask ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2017-08-25 09:04:13 -04:00
static esp_err_t uart_pattern_link_free ( uart_port_t uart_num )
{
2021-05-19 08:32:55 -04:00
int * pdata = NULL ;
2021-05-14 11:41:35 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2017-08-25 09:04:13 -04:00
if ( p_uart_obj [ uart_num ] - > rx_pattern_pos . data ! = NULL ) {
2021-05-14 11:41:35 -04:00
pdata = p_uart_obj [ uart_num ] - > rx_pattern_pos . data ;
2017-08-25 09:04:13 -04:00
p_uart_obj [ uart_num ] - > rx_pattern_pos . data = NULL ;
p_uart_obj [ uart_num ] - > rx_pattern_pos . wr = 0 ;
p_uart_obj [ uart_num ] - > rx_pattern_pos . rd = 0 ;
}
2021-05-14 11:41:35 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
free ( pdata ) ;
2017-08-25 09:04:13 -04:00
return ESP_OK ;
}
2019-08-26 05:14:15 -04:00
static esp_err_t UART_ISR_ATTR uart_pattern_enqueue ( uart_port_t uart_num , int pos )
2017-08-25 09:04:13 -04:00
{
esp_err_t ret = ESP_OK ;
2021-05-19 08:32:55 -04:00
uart_pat_rb_t * p_pos = & p_uart_obj [ uart_num ] - > rx_pattern_pos ;
2017-08-25 09:04:13 -04:00
int next = p_pos - > wr + 1 ;
if ( next > = p_pos - > len ) {
next = 0 ;
}
if ( next = = p_pos - > rd ) {
2021-05-19 23:12:40 -04:00
# ifndef CONFIG_UART_ISR_IN_IRAM //Only log if ISR is not in IRAM
2017-08-25 09:04:13 -04:00
ESP_EARLY_LOGW ( UART_TAG , " Fail to enqueue pattern position, pattern queue is full. " ) ;
2021-05-19 23:12:40 -04:00
# endif
2017-08-25 09:04:13 -04:00
ret = ESP_FAIL ;
} else {
p_pos - > data [ p_pos - > wr ] = pos ;
p_pos - > wr = next ;
ret = ESP_OK ;
}
return ret ;
}
static esp_err_t uart_pattern_dequeue ( uart_port_t uart_num )
{
2021-05-19 08:32:55 -04:00
if ( p_uart_obj [ uart_num ] - > rx_pattern_pos . data = = NULL ) {
2017-08-25 09:04:13 -04:00
return ESP_ERR_INVALID_STATE ;
} else {
esp_err_t ret = ESP_OK ;
2021-05-19 08:32:55 -04:00
uart_pat_rb_t * p_pos = & p_uart_obj [ uart_num ] - > rx_pattern_pos ;
2017-08-25 09:04:13 -04:00
if ( p_pos - > rd = = p_pos - > wr ) {
ret = ESP_FAIL ;
} else {
p_pos - > rd + + ;
}
if ( p_pos - > rd > = p_pos - > len ) {
p_pos - > rd = 0 ;
}
return ret ;
}
}
static esp_err_t uart_pattern_queue_update ( uart_port_t uart_num , int diff_len )
{
2021-05-19 08:32:55 -04:00
uart_pat_rb_t * p_pos = & p_uart_obj [ uart_num ] - > rx_pattern_pos ;
2017-08-25 09:04:13 -04:00
int rd = p_pos - > rd ;
2021-05-19 08:32:55 -04:00
while ( rd ! = p_pos - > wr ) {
2017-08-25 09:04:13 -04:00
p_pos - > data [ rd ] - = diff_len ;
int rd_rec = rd ;
rd + + ;
if ( rd > = p_pos - > len ) {
rd = 0 ;
}
if ( p_pos - > data [ rd_rec ] < 0 ) {
p_pos - > rd = rd ;
}
}
return ESP_OK ;
}
int uart_pattern_pop_pos ( uart_port_t uart_num )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ( - 1 ) , UART_TAG , " uart driver error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2021-05-19 08:32:55 -04:00
uart_pat_rb_t * pat_pos = & p_uart_obj [ uart_num ] - > rx_pattern_pos ;
2017-08-25 09:04:13 -04:00
int pos = - 1 ;
if ( pat_pos ! = NULL & & pat_pos - > rd ! = pat_pos - > wr ) {
pos = pat_pos - > data [ pat_pos - > rd ] ;
uart_pattern_dequeue ( uart_num ) ;
}
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2017-08-25 09:04:13 -04:00
return pos ;
}
2018-04-03 00:34:16 -04:00
int uart_pattern_get_pos ( uart_port_t uart_num )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ( - 1 ) , UART_TAG , " uart driver error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2021-05-19 08:32:55 -04:00
uart_pat_rb_t * pat_pos = & p_uart_obj [ uart_num ] - > rx_pattern_pos ;
2018-04-03 00:34:16 -04:00
int pos = - 1 ;
if ( pat_pos ! = NULL & & pat_pos - > rd ! = pat_pos - > wr ) {
pos = pat_pos - > data [ pat_pos - > rd ] ;
}
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2018-04-03 00:34:16 -04:00
return pos ;
}
2017-08-25 09:04:13 -04:00
esp_err_t uart_pattern_queue_reset ( uart_port_t uart_num , int queue_length )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ESP_ERR_INVALID_STATE , UART_TAG , " uart driver error " ) ;
2017-08-25 09:04:13 -04:00
2021-05-19 08:32:55 -04:00
int * pdata = ( int * ) malloc ( queue_length * sizeof ( int ) ) ;
if ( pdata = = NULL ) {
2017-08-25 09:04:13 -04:00
return ESP_ERR_NO_MEM ;
}
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2021-05-19 08:32:55 -04:00
int * ptmp = p_uart_obj [ uart_num ] - > rx_pattern_pos . data ;
2017-08-25 09:04:13 -04:00
p_uart_obj [ uart_num ] - > rx_pattern_pos . data = pdata ;
p_uart_obj [ uart_num ] - > rx_pattern_pos . len = queue_length ;
p_uart_obj [ uart_num ] - > rx_pattern_pos . rd = 0 ;
p_uart_obj [ uart_num ] - > rx_pattern_pos . wr = 0 ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2017-08-25 09:04:13 -04:00
free ( ptmp ) ;
return ESP_OK ;
}
2019-07-01 06:00:10 -04:00
esp_err_t uart_enable_pattern_det_baud_intr ( uart_port_t uart_num , char pattern_chr , uint8_t chr_num , int chr_tout , int post_idle , int pre_idle )
2016-12-18 23:52:10 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( uart_num < UART_NUM_MAX , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( chr_tout > = 0 & & chr_tout < = UART_RX_GAP_TOUT_V , ESP_FAIL , UART_TAG , " uart pattern set error \n " ) ;
ESP_RETURN_ON_FALSE ( post_idle > = 0 & & post_idle < = UART_POST_IDLE_NUM_V , ESP_FAIL , UART_TAG , " uart pattern set error \n " ) ;
ESP_RETURN_ON_FALSE ( pre_idle > = 0 & & pre_idle < = UART_PRE_IDLE_NUM_V , ESP_FAIL , UART_TAG , " uart pattern set error \n " ) ;
2019-04-17 08:19:44 -04:00
uart_at_cmd_t at_cmd = { 0 } ;
at_cmd . cmd_char = pattern_chr ;
at_cmd . char_num = chr_num ;
2019-07-01 06:00:10 -04:00
# if CONFIG_IDF_TARGET_ESP32
2023-01-31 02:09:24 -05:00
uint32_t apb_clk_freq = 0 ;
2019-07-01 06:00:10 -04:00
uint32_t uart_baud = 0 ;
uint32_t uart_div = 0 ;
uart_get_baudrate ( uart_num , & uart_baud ) ;
2023-04-23 03:49:59 -04:00
esp_clk_tree_src_get_freq_hz ( ( soc_module_clk_t ) UART_SCLK_APB , ESP_CLK_TREE_SRC_FREQ_PRECISION_EXACT , & apb_clk_freq ) ;
2019-07-01 06:00:10 -04:00
uart_div = apb_clk_freq / uart_baud ;
2019-04-17 08:19:44 -04:00
at_cmd . gap_tout = chr_tout * uart_div ;
at_cmd . pre_idle = pre_idle * uart_div ;
at_cmd . post_idle = post_idle * uart_div ;
2021-06-07 22:47:49 -04:00
# else
2019-04-17 08:19:44 -04:00
at_cmd . gap_tout = chr_tout ;
at_cmd . pre_idle = pre_idle ;
at_cmd . post_idle = post_idle ;
2019-07-01 06:00:10 -04:00
# endif
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_CMD_CHAR_DET ) ;
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_at_cmd_char ( & ( uart_context [ uart_num ] . hal ) , & at_cmd ) ;
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_CMD_CHAR_DET ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
return ESP_OK ;
2016-12-18 23:52:10 -05:00
}
2019-04-17 08:19:44 -04:00
2016-12-18 23:52:10 -05:00
esp_err_t uart_disable_pattern_det_intr ( uart_port_t uart_num )
{
2019-04-17 08:19:44 -04:00
return uart_disable_intr_mask ( uart_num , UART_INTR_CMD_CHAR_DET ) ;
2016-12-18 23:52:10 -05:00
}
2016-12-14 20:45:40 -05:00
esp_err_t uart_enable_rx_intr ( uart_port_t uart_num )
{
2021-05-19 08:32:55 -04:00
return uart_enable_intr_mask ( uart_num , UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT ) ;
2016-12-14 20:45:40 -05:00
}
esp_err_t uart_disable_rx_intr ( uart_port_t uart_num )
{
2021-05-19 08:32:55 -04:00
return uart_disable_intr_mask ( uart_num , UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT ) ;
2016-12-14 20:45:40 -05:00
}
esp_err_t uart_disable_tx_intr ( uart_port_t uart_num )
{
2019-04-17 08:19:44 -04:00
return uart_disable_intr_mask ( uart_num , UART_INTR_TXFIFO_EMPTY ) ;
2016-12-14 20:45:40 -05:00
}
esp_err_t uart_enable_tx_intr ( uart_port_t uart_num , int enable , int thresh )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( thresh < SOC_UART_FIFO_LEN ) , ESP_FAIL , UART_TAG , " empty intr threshold error " ) ;
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TXFIFO_EMPTY ) ;
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_txfifo_empty_thr ( & ( uart_context [ uart_num ] . hal ) , thresh ) ;
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TXFIFO_EMPTY ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2021-07-09 02:20:33 -04:00
static bool uart_try_set_iomux_pin ( uart_port_t uart_num , int io_num , uint32_t idx )
{
/* Store a pointer to the default pin, to optimize access to its fields. */
2021-05-19 08:32:55 -04:00
const uart_periph_sig_t * upin = & uart_periph_signal [ uart_num ] . pins [ idx ] ;
2021-07-09 02:20:33 -04:00
/* In theory, if default_gpio is -1, iomux_func should also be -1, but
* let ' s be safe and test both . */
if ( upin - > iomux_func = = - 1 | | upin - > default_gpio = = - 1 | | upin - > default_gpio ! = io_num ) {
return false ;
}
/* Assign the correct funct to the GPIO. */
assert ( upin - > iomux_func ! = - 1 ) ;
gpio_iomux_out ( io_num , upin - > iomux_func , false ) ;
/* If the pin is input, we also have to redirect the signal,
* in order to bypasse the GPIO matrix . */
if ( upin - > input ) {
gpio_iomux_in ( io_num , upin - > signal ) ;
}
return true ;
}
2016-12-14 20:45:40 -05:00
//internal signal can be output to multiple GPIO pads
//only one GPIO pad can connect with input signal
esp_err_t uart_set_pin ( uart_port_t uart_num , int tx_io_num , int rx_io_num , int rts_io_num , int cts_io_num )
{
2021-07-09 02:20:33 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num > = 0 ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( tx_io_num < 0 | | ( GPIO_IS_VALID_OUTPUT_GPIO ( tx_io_num ) ) ) , ESP_FAIL , UART_TAG , " tx_io_num error " ) ;
ESP_RETURN_ON_FALSE ( ( rx_io_num < 0 | | ( GPIO_IS_VALID_GPIO ( rx_io_num ) ) ) , ESP_FAIL , UART_TAG , " rx_io_num error " ) ;
ESP_RETURN_ON_FALSE ( ( rts_io_num < 0 | | ( GPIO_IS_VALID_OUTPUT_GPIO ( rts_io_num ) ) ) , ESP_FAIL , UART_TAG , " rts_io_num error " ) ;
ESP_RETURN_ON_FALSE ( ( cts_io_num < 0 | | ( GPIO_IS_VALID_GPIO ( cts_io_num ) ) ) , ESP_FAIL , UART_TAG , " cts_io_num error " ) ;
2016-12-14 20:45:40 -05:00
2021-07-09 02:20:33 -04:00
/* In the following statements, if the io_num is negative, no need to configure anything. */
if ( tx_io_num > = 0 & & ! uart_try_set_iomux_pin ( uart_num , tx_io_num , SOC_UART_TX_PIN_IDX ) ) {
2021-03-15 22:55:05 -04:00
gpio_hal_iomux_func_sel ( GPIO_PIN_MUX_REG [ tx_io_num ] , PIN_FUNC_GPIO ) ;
2017-07-20 02:34:11 -04:00
gpio_set_level ( tx_io_num , 1 ) ;
2021-07-09 02:20:33 -04:00
esp_rom_gpio_connect_out_signal ( tx_io_num , UART_PERIPH_SIGNAL ( uart_num , SOC_UART_TX_PIN_IDX ) , 0 , 0 ) ;
2016-12-14 20:45:40 -05:00
}
2021-07-09 02:20:33 -04:00
if ( rx_io_num > = 0 & & ! uart_try_set_iomux_pin ( uart_num , rx_io_num , SOC_UART_RX_PIN_IDX ) ) {
2021-03-15 22:55:05 -04:00
gpio_hal_iomux_func_sel ( GPIO_PIN_MUX_REG [ rx_io_num ] , PIN_FUNC_GPIO ) ;
2016-12-21 23:08:15 -05:00
gpio_set_pull_mode ( rx_io_num , GPIO_PULLUP_ONLY ) ;
2016-12-14 20:45:40 -05:00
gpio_set_direction ( rx_io_num , GPIO_MODE_INPUT ) ;
2021-07-09 02:20:33 -04:00
esp_rom_gpio_connect_in_signal ( rx_io_num , UART_PERIPH_SIGNAL ( uart_num , SOC_UART_RX_PIN_IDX ) , 0 ) ;
2016-12-14 20:45:40 -05:00
}
2021-07-09 02:20:33 -04:00
if ( rts_io_num > = 0 & & ! uart_try_set_iomux_pin ( uart_num , rts_io_num , SOC_UART_RTS_PIN_IDX ) ) {
2021-03-15 22:55:05 -04:00
gpio_hal_iomux_func_sel ( GPIO_PIN_MUX_REG [ rts_io_num ] , PIN_FUNC_GPIO ) ;
2016-12-14 20:45:40 -05:00
gpio_set_direction ( rts_io_num , GPIO_MODE_OUTPUT ) ;
2021-07-09 02:20:33 -04:00
esp_rom_gpio_connect_out_signal ( rts_io_num , UART_PERIPH_SIGNAL ( uart_num , SOC_UART_RTS_PIN_IDX ) , 0 , 0 ) ;
2016-12-14 20:45:40 -05:00
}
2021-07-09 02:20:33 -04:00
if ( cts_io_num > = 0 & & ! uart_try_set_iomux_pin ( uart_num , cts_io_num , SOC_UART_CTS_PIN_IDX ) ) {
2021-03-15 22:55:05 -04:00
gpio_hal_iomux_func_sel ( GPIO_PIN_MUX_REG [ cts_io_num ] , PIN_FUNC_GPIO ) ;
2016-12-21 23:08:15 -05:00
gpio_set_pull_mode ( cts_io_num , GPIO_PULLUP_ONLY ) ;
2016-12-14 20:45:40 -05:00
gpio_set_direction ( cts_io_num , GPIO_MODE_INPUT ) ;
2021-07-09 02:20:33 -04:00
esp_rom_gpio_connect_in_signal ( cts_io_num , UART_PERIPH_SIGNAL ( uart_num , SOC_UART_CTS_PIN_IDX ) , 0 ) ;
2016-12-14 20:45:40 -05:00
}
2021-07-09 02:20:33 -04:00
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
esp_err_t uart_set_rts ( uart_port_t uart_num , int level )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( ! uart_hal_is_hw_rts_en ( & ( uart_context [ uart_num ] . hal ) ) ) , ESP_FAIL , UART_TAG , " disable hw flowctrl before using sw control " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_rts ( & ( uart_context [ uart_num ] . hal ) , level ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
esp_err_t uart_set_dtr ( uart_port_t uart_num , int level )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_dtr ( & ( uart_context [ uart_num ] . hal ) , level ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2017-10-18 00:04:59 -04:00
esp_err_t uart_set_tx_idle_num ( uart_port_t uart_num , uint16_t idle_num )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( idle_num < = UART_TX_IDLE_NUM_V ) , ESP_FAIL , UART_TAG , " uart idle num error " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_tx_idle_num ( & ( uart_context [ uart_num ] . hal ) , idle_num ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2017-10-18 00:04:59 -04:00
return ESP_OK ;
}
2016-12-14 20:45:40 -05:00
esp_err_t uart_param_config ( uart_port_t uart_num , const uart_config_t * uart_config )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( uart_config ) , ESP_FAIL , UART_TAG , " param null " ) ;
ESP_RETURN_ON_FALSE ( ( uart_config - > rx_flow_ctrl_thresh < SOC_UART_FIFO_LEN ) , ESP_FAIL , UART_TAG , " rx flow thresh error " ) ;
ESP_RETURN_ON_FALSE ( ( uart_config - > flow_ctrl < UART_HW_FLOWCTRL_MAX ) , ESP_FAIL , UART_TAG , " hw_flowctrl mode error " ) ;
ESP_RETURN_ON_FALSE ( ( uart_config - > data_bits < UART_DATA_BITS_MAX ) , ESP_FAIL , UART_TAG , " data bit error " ) ;
2019-04-17 08:19:44 -04:00
uart_module_enable ( uart_num ) ;
2023-04-14 04:14:55 -04:00
uart_sclk_t clk_src = ( uart_config - > source_clk ) ? uart_config - > source_clk : UART_SCLK_DEFAULT ; // if no specifying the clock source (soc_module_clk_t starts from 1), then just use the default clock
2020-11-23 06:31:50 -05:00
# if SOC_UART_SUPPORT_RTC_CLK
2023-04-14 04:14:55 -04:00
if ( clk_src = = UART_SCLK_RTC ) {
2022-04-29 00:42:44 -04:00
periph_rtc_dig_clk8m_enable ( ) ;
2020-11-23 06:31:50 -05:00
}
# endif
2022-07-27 22:47:13 -04:00
uint32_t sclk_freq ;
2023-04-14 04:14:55 -04:00
ESP_RETURN_ON_ERROR ( uart_get_sclk_freq ( clk_src , & sclk_freq ) , UART_TAG , " Invalid src_clk " ) ;
2022-07-27 22:47:13 -04:00
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_init ( & ( uart_context [ uart_num ] . hal ) , uart_num ) ;
2023-04-14 04:14:55 -04:00
uart_hal_set_sclk ( & ( uart_context [ uart_num ] . hal ) , clk_src ) ;
2022-07-27 22:47:13 -04:00
uart_hal_set_baudrate ( & ( uart_context [ uart_num ] . hal ) , uart_config - > baud_rate , sclk_freq ) ;
2019-04-17 08:19:44 -04:00
uart_hal_set_parity ( & ( uart_context [ uart_num ] . hal ) , uart_config - > parity ) ;
uart_hal_set_data_bit_num ( & ( uart_context [ uart_num ] . hal ) , uart_config - > data_bits ) ;
uart_hal_set_stop_bits ( & ( uart_context [ uart_num ] . hal ) , uart_config - > stop_bits ) ;
uart_hal_set_tx_idle_num ( & ( uart_context [ uart_num ] . hal ) , UART_TX_IDLE_NUM_DEFAULT ) ;
uart_hal_set_hw_flow_ctrl ( & ( uart_context [ uart_num ] . hal ) , uart_config - > flow_ctrl , uart_config - > rx_flow_ctrl_thresh ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_rxfifo_rst ( & ( uart_context [ uart_num ] . hal ) ) ;
2020-03-16 08:37:12 -04:00
uart_hal_txfifo_rst ( & ( uart_context [ uart_num ] . hal ) ) ;
2019-04-17 08:19:44 -04:00
return ESP_OK ;
2016-12-14 20:45:40 -05:00
}
esp_err_t uart_intr_config ( uart_port_t uart_num , const uart_intr_config_t * intr_conf )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( intr_conf ) , ESP_FAIL , UART_TAG , " param null " ) ;
2020-09-09 22:37:58 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_LL_INTR_MASK ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2021-05-19 08:32:55 -04:00
if ( intr_conf - > intr_enable_mask & UART_INTR_RXFIFO_TOUT ) {
2019-04-17 08:19:44 -04:00
uart_hal_set_rx_timeout ( & ( uart_context [ uart_num ] . hal ) , intr_conf - > rx_timeout_thresh ) ;
2016-12-14 20:45:40 -05:00
} else {
2019-04-17 08:19:44 -04:00
//Disable rx_tout intr
uart_hal_set_rx_timeout ( & ( uart_context [ uart_num ] . hal ) , 0 ) ;
2016-12-14 20:45:40 -05:00
}
2021-05-19 08:32:55 -04:00
if ( intr_conf - > intr_enable_mask & UART_INTR_RXFIFO_FULL ) {
2019-04-17 08:19:44 -04:00
uart_hal_set_rxfifo_full_thr ( & ( uart_context [ uart_num ] . hal ) , intr_conf - > rxfifo_full_thresh ) ;
2016-12-14 20:45:40 -05:00
}
2021-05-19 08:32:55 -04:00
if ( intr_conf - > intr_enable_mask & UART_INTR_TXFIFO_EMPTY ) {
2019-04-17 08:19:44 -04:00
uart_hal_set_txfifo_empty_thr ( & ( uart_context [ uart_num ] . hal ) , intr_conf - > txfifo_empty_intr_thresh ) ;
2016-12-14 20:45:40 -05:00
}
2019-04-17 08:19:44 -04:00
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , intr_conf - > intr_enable_mask ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-16 17:27:45 -05:00
return ESP_OK ;
2016-12-14 20:45:40 -05:00
}
2021-05-19 08:32:55 -04:00
static int UART_ISR_ATTR uart_find_pattern_from_last ( uint8_t * buf , int length , uint8_t pat_chr , uint8_t pat_num )
2017-08-25 09:04:13 -04:00
{
int cnt = 0 ;
int len = length ;
while ( len > = 0 ) {
if ( buf [ len ] = = pat_chr ) {
cnt + + ;
} else {
cnt = 0 ;
}
if ( cnt > = pat_num ) {
break ;
}
len - - ;
}
return len ;
}
2022-04-26 09:48:07 -04:00
static uint32_t UART_ISR_ATTR uart_enable_tx_write_fifo ( uart_port_t uart_num , const uint8_t * pbuf , uint32_t len )
{
uint32_t sent_len = 0 ;
UART_ENTER_CRITICAL_SAFE ( & ( uart_context [ uart_num ] . spinlock ) ) ;
if ( UART_IS_MODE_SET ( uart_num , UART_MODE_RS485_HALF_DUPLEX ) ) {
uart_hal_set_rts ( & ( uart_context [ uart_num ] . hal ) , 0 ) ;
2022-12-14 02:30:57 -05:00
// If any new things are written to fifo, then we can always clear the previous TX_DONE interrupt bit (if it was set)
// Old TX_DONE bit might reset the RTS, leading new tx transmission failure for rs485 mode
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_DONE ) ;
2022-04-26 09:48:07 -04:00
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_DONE ) ;
}
uart_hal_write_txfifo ( & ( uart_context [ uart_num ] . hal ) , pbuf , len , & sent_len ) ;
UART_EXIT_CRITICAL_SAFE ( & ( uart_context [ uart_num ] . spinlock ) ) ;
return sent_len ;
}
2016-12-14 20:45:40 -05:00
//internal isr handler for default driver code.
2019-08-26 05:14:15 -04:00
static void UART_ISR_ATTR uart_rx_intr_handler_default ( void * param )
2016-12-14 20:45:40 -05:00
{
2021-05-19 08:32:55 -04:00
uart_obj_t * p_uart = ( uart_obj_t * ) param ;
2016-12-14 20:45:40 -05:00
uint8_t uart_num = p_uart - > uart_num ;
2019-06-12 04:20:19 -04:00
int rx_fifo_len = 0 ;
uint32_t uart_intr_status = 0 ;
2016-12-14 20:45:40 -05:00
uart_event_t uart_event ;
portBASE_TYPE HPTaskAwoken = 0 ;
2023-05-22 04:29:37 -04:00
bool need_yield = false ;
2017-08-25 09:04:13 -04:00
static uint8_t pat_flg = 0 ;
2023-05-22 04:29:37 -04:00
BaseType_t sent = pdFALSE ;
2021-05-19 08:32:55 -04:00
while ( 1 ) {
2019-06-12 04:20:19 -04:00
// The `continue statement` may cause the interrupt to loop infinitely
// we exit the interrupt here
2019-04-17 08:19:44 -04:00
uart_intr_status = uart_hal_get_intsts_mask ( & ( uart_context [ uart_num ] . hal ) ) ;
//Exit form while loop
2021-05-19 08:32:55 -04:00
if ( uart_intr_status = = 0 ) {
2019-06-12 04:20:19 -04:00
break ;
}
2016-12-14 20:45:40 -05:00
uart_event . type = UART_EVENT_MAX ;
2021-05-19 08:32:55 -04:00
if ( uart_intr_status & UART_INTR_TXFIFO_EMPTY ) {
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_disable_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TXFIFO_EMPTY ) ;
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TXFIFO_EMPTY ) ;
2021-05-19 08:32:55 -04:00
if ( p_uart - > tx_waiting_brk ) {
2016-12-14 20:45:40 -05:00
continue ;
}
//TX semaphore will only be used when tx_buf_size is zero.
2021-05-19 08:32:55 -04:00
if ( p_uart - > tx_waiting_fifo = = true & & p_uart - > tx_buf_size = = 0 ) {
2016-12-14 20:45:40 -05:00
p_uart - > tx_waiting_fifo = false ;
xSemaphoreGiveFromISR ( p_uart - > tx_fifo_sem , & HPTaskAwoken ) ;
2023-05-22 04:29:37 -04:00
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
2017-08-25 09:04:13 -04:00
} else {
2016-12-14 20:45:40 -05:00
//We don't use TX ring buffer, because the size is zero.
2021-05-19 08:32:55 -04:00
if ( p_uart - > tx_buf_size = = 0 ) {
2016-12-14 20:45:40 -05:00
continue ;
}
bool en_tx_flg = false ;
2020-11-16 23:48:35 -05:00
uint32_t tx_fifo_rem = uart_hal_get_txfifo_len ( & ( uart_context [ uart_num ] . hal ) ) ;
2016-12-14 20:45:40 -05:00
//We need to put a loop here, in case all the buffer items are very short.
//That would cause a watch_dog reset because empty interrupt happens so often.
//Although this is a loop in ISR, this loop will execute at most 128 turns.
2021-05-19 08:32:55 -04:00
while ( tx_fifo_rem ) {
if ( p_uart - > tx_len_tot = = 0 | | p_uart - > tx_ptr = = NULL | | p_uart - > tx_len_cur = = 0 ) {
2016-12-14 20:45:40 -05:00
size_t size ;
2021-05-19 08:32:55 -04:00
p_uart - > tx_head = ( uart_tx_data_t * ) xRingbufferReceiveFromISR ( p_uart - > tx_ring_buf , & size ) ;
if ( p_uart - > tx_head ) {
2016-12-14 20:45:40 -05:00
//The first item is the data description
//Get the first item to get the data information
2021-05-19 08:32:55 -04:00
if ( p_uart - > tx_len_tot = = 0 ) {
2016-12-14 20:45:40 -05:00
p_uart - > tx_ptr = NULL ;
p_uart - > tx_len_tot = p_uart - > tx_head - > tx_data . size ;
2021-05-19 08:32:55 -04:00
if ( p_uart - > tx_head - > type = = UART_DATA_BREAK ) {
2016-12-14 20:45:40 -05:00
p_uart - > tx_brk_flg = 1 ;
p_uart - > tx_brk_len = p_uart - > tx_head - > tx_data . brk_len ;
}
//We have saved the data description from the 1st item, return buffer.
vRingbufferReturnItemFromISR ( p_uart - > tx_ring_buf , p_uart - > tx_head , & HPTaskAwoken ) ;
2023-05-22 04:29:37 -04:00
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
2021-05-19 08:32:55 -04:00
} else if ( p_uart - > tx_ptr = = NULL ) {
2016-12-14 20:45:40 -05:00
//Update the TX item pointer, we will need this to return item to buffer.
2021-05-19 08:32:55 -04:00
p_uart - > tx_ptr = ( uint8_t * ) p_uart - > tx_head ;
2016-12-14 20:45:40 -05:00
en_tx_flg = true ;
p_uart - > tx_len_cur = size ;
}
2019-07-01 06:00:10 -04:00
} else {
2016-12-14 20:45:40 -05:00
//Can not get data from ring buffer, return;
break ;
}
}
2017-08-25 09:04:13 -04:00
if ( p_uart - > tx_len_tot > 0 & & p_uart - > tx_ptr & & p_uart - > tx_len_cur > 0 ) {
2022-04-26 09:48:07 -04:00
// To fill the TX FIFO.
uint32_t send_len = uart_enable_tx_write_fifo ( uart_num , ( const uint8_t * ) p_uart - > tx_ptr ,
MIN ( p_uart - > tx_len_cur , tx_fifo_rem ) ) ;
2019-04-17 08:19:44 -04:00
p_uart - > tx_ptr + = send_len ;
2016-12-14 20:45:40 -05:00
p_uart - > tx_len_tot - = send_len ;
p_uart - > tx_len_cur - = send_len ;
tx_fifo_rem - = send_len ;
2017-08-25 09:04:13 -04:00
if ( p_uart - > tx_len_cur = = 0 ) {
2016-12-14 20:45:40 -05:00
//Return item to ring buffer.
vRingbufferReturnItemFromISR ( p_uart - > tx_ring_buf , p_uart - > tx_head , & HPTaskAwoken ) ;
2023-05-22 04:29:37 -04:00
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
2016-12-14 20:45:40 -05:00
p_uart - > tx_head = NULL ;
p_uart - > tx_ptr = NULL ;
//Sending item done, now we need to send break if there is a record.
//Set TX break signal after FIFO is empty
2021-05-19 08:32:55 -04:00
if ( p_uart - > tx_len_tot = = 0 & & p_uart - > tx_brk_flg = = 1 ) {
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_BRK_DONE ) ;
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_tx_break ( & ( uart_context [ uart_num ] . hal ) , p_uart - > tx_brk_len ) ;
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_BRK_DONE ) ;
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
p_uart - > tx_waiting_brk = 1 ;
2018-07-19 09:41:35 -04:00
//do not enable TX empty interrupt
en_tx_flg = false ;
2016-12-14 20:45:40 -05:00
} else {
//enable TX empty interrupt
en_tx_flg = true ;
}
2023-05-22 04:29:37 -04:00
UART_ENTER_CRITICAL_ISR ( & uart_selectlock ) ;
if ( p_uart - > uart_select_notif_callback ) {
p_uart - > uart_select_notif_callback ( uart_num , UART_SELECT_WRITE_NOTIF , & HPTaskAwoken ) ;
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
}
UART_EXIT_CRITICAL_ISR ( & uart_selectlock ) ;
2016-12-14 20:45:40 -05:00
} else {
//enable TX empty interrupt
en_tx_flg = true ;
}
}
}
2017-08-25 09:04:13 -04:00
if ( en_tx_flg ) {
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TXFIFO_EMPTY ) ;
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TXFIFO_EMPTY ) ;
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
}
}
2021-05-19 08:32:55 -04:00
} else if ( ( uart_intr_status & UART_INTR_RXFIFO_TOUT )
| | ( uart_intr_status & UART_INTR_RXFIFO_FULL )
| | ( uart_intr_status & UART_INTR_CMD_CHAR_DET )
) {
if ( pat_flg = = 1 ) {
2019-04-17 08:19:44 -04:00
uart_intr_status | = UART_INTR_CMD_CHAR_DET ;
2017-08-25 09:04:13 -04:00
pat_flg = 0 ;
}
if ( p_uart - > rx_buffer_full_flg = = false ) {
2020-03-30 10:05:48 -04:00
rx_fifo_len = uart_hal_get_rxfifo_len ( & ( uart_context [ uart_num ] . hal ) ) ;
if ( ( p_uart_obj [ uart_num ] - > rx_always_timeout_flg ) & & ! ( uart_intr_status & UART_INTR_RXFIFO_TOUT ) ) {
rx_fifo_len - - ; // leave one byte in the fifo in order to trigger uart_intr_rxfifo_tout
}
2019-04-17 08:19:44 -04:00
uart_hal_read_rxfifo ( & ( uart_context [ uart_num ] . hal ) , p_uart - > rx_data_buf , & rx_fifo_len ) ;
uint8_t pat_chr = 0 ;
uint8_t pat_num = 0 ;
2017-08-25 09:04:13 -04:00
int pat_idx = - 1 ;
2019-04-17 08:19:44 -04:00
uart_hal_get_at_cmd_char ( & ( uart_context [ uart_num ] . hal ) , & pat_chr , & pat_num ) ;
2017-08-25 09:04:13 -04:00
//Get the buffer from the FIFO
2019-04-17 08:19:44 -04:00
if ( uart_intr_status & UART_INTR_CMD_CHAR_DET ) {
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_CMD_CHAR_DET ) ;
2017-08-25 09:04:13 -04:00
uart_event . type = UART_PATTERN_DET ;
uart_event . size = rx_fifo_len ;
pat_idx = uart_find_pattern_from_last ( p_uart - > rx_data_buf , rx_fifo_len - 1 , pat_chr , pat_num ) ;
} else {
//After Copying the Data From FIFO ,Clear intr_status
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_RXFIFO_TOUT | UART_INTR_RXFIFO_FULL ) ;
2017-08-25 09:04:13 -04:00
uart_event . type = UART_DATA ;
uart_event . size = rx_fifo_len ;
2020-03-30 10:05:48 -04:00
uart_event . timeout_flag = ( uart_intr_status & UART_INTR_RXFIFO_TOUT ) ? true : false ;
2018-05-03 04:41:10 -04:00
UART_ENTER_CRITICAL_ISR ( & uart_selectlock ) ;
if ( p_uart - > uart_select_notif_callback ) {
p_uart - > uart_select_notif_callback ( uart_num , UART_SELECT_READ_NOTIF , & HPTaskAwoken ) ;
2023-05-22 04:29:37 -04:00
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
2018-05-03 04:41:10 -04:00
}
UART_EXIT_CRITICAL_ISR ( & uart_selectlock ) ;
2017-08-25 09:04:13 -04:00
}
p_uart - > rx_stash_len = rx_fifo_len ;
2016-12-14 20:45:40 -05:00
//If we fail to push data to ring buffer, we will have to stash the data, and send next time.
//Mainly for applications that uses flow control or small ring buffer.
2023-05-22 04:29:37 -04:00
sent = xRingbufferSendFromISR ( p_uart - > rx_ring_buf , p_uart - > rx_data_buf , p_uart - > rx_stash_len , & HPTaskAwoken ) ;
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
if ( sent = = pdFALSE ) {
2018-10-19 02:51:28 -04:00
p_uart - > rx_buffer_full_flg = true ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_disable_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_RXFIFO_TOUT | UART_INTR_RXFIFO_FULL ) ;
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2017-08-25 09:04:13 -04:00
if ( uart_event . type = = UART_PATTERN_DET ) {
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2017-08-25 09:04:13 -04:00
if ( rx_fifo_len < pat_num ) {
//some of the characters are read out in last interrupt
uart_pattern_enqueue ( uart_num , p_uart - > rx_buffered_len - ( pat_num - rx_fifo_len ) ) ;
} else {
uart_pattern_enqueue ( uart_num ,
2021-05-19 08:32:55 -04:00
pat_idx < = - 1 ?
//can not find the pattern in buffer,
p_uart - > rx_buffered_len + p_uart - > rx_stash_len :
// find the pattern in buffer
p_uart - > rx_buffered_len + pat_idx ) ;
2017-08-25 09:04:13 -04:00
}
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2023-05-22 04:29:37 -04:00
sent = xQueueSendFromISR ( p_uart - > event_queue , ( void * ) & uart_event , & HPTaskAwoken ) ;
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
if ( ( p_uart - > event_queue ! = NULL ) & & ( sent = = pdFALSE ) ) {
2021-05-19 23:12:40 -04:00
# ifndef CONFIG_UART_ISR_IN_IRAM //Only log if ISR is not in IRAM
2018-03-13 03:57:33 -04:00
ESP_EARLY_LOGV ( UART_TAG , " UART event queue full " ) ;
2021-05-19 23:12:40 -04:00
# endif
2017-08-25 09:04:13 -04:00
}
}
2016-12-14 20:45:40 -05:00
uart_event . type = UART_BUFFER_FULL ;
} else {
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
if ( uart_intr_status & UART_INTR_CMD_CHAR_DET ) {
2017-08-25 09:04:13 -04:00
if ( rx_fifo_len < pat_num ) {
//some of the characters are read out in last interrupt
uart_pattern_enqueue ( uart_num , p_uart - > rx_buffered_len - ( pat_num - rx_fifo_len ) ) ;
2021-05-19 08:32:55 -04:00
} else if ( pat_idx > = 0 ) {
2019-04-17 08:19:44 -04:00
// find the pattern in stash buffer.
2017-08-25 09:04:13 -04:00
uart_pattern_enqueue ( uart_num , p_uart - > rx_buffered_len + pat_idx ) ;
}
}
2016-12-18 23:52:10 -05:00
p_uart - > rx_buffered_len + = p_uart - > rx_stash_len ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
}
} else {
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_disable_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT ) ;
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT ) ;
2021-05-19 08:32:55 -04:00
if ( uart_intr_status & UART_INTR_CMD_CHAR_DET ) {
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_CMD_CHAR_DET ) ;
2017-08-25 09:04:13 -04:00
uart_event . type = UART_PATTERN_DET ;
uart_event . size = rx_fifo_len ;
pat_flg = 1 ;
}
}
2021-05-19 08:32:55 -04:00
} else if ( uart_intr_status & UART_INTR_RXFIFO_OVF ) {
2017-08-25 09:04:13 -04:00
// When fifo overflows, we reset the fifo.
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_rxfifo_rst ( & ( uart_context [ uart_num ] . hal ) ) ;
2019-11-27 08:32:52 -05:00
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
UART_ENTER_CRITICAL_ISR ( & uart_selectlock ) ;
2018-05-03 04:41:10 -04:00
if ( p_uart - > uart_select_notif_callback ) {
p_uart - > uart_select_notif_callback ( uart_num , UART_SELECT_ERROR_NOTIF , & HPTaskAwoken ) ;
2023-05-22 04:29:37 -04:00
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
2018-05-03 04:41:10 -04:00
}
UART_EXIT_CRITICAL_ISR ( & uart_selectlock ) ;
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_RXFIFO_OVF ) ;
uart_event . type = UART_FIFO_OVF ;
2021-05-19 08:32:55 -04:00
} else if ( uart_intr_status & UART_INTR_BRK_DET ) {
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_BRK_DET ) ;
2016-12-14 20:45:40 -05:00
uart_event . type = UART_BREAK ;
2021-05-19 08:32:55 -04:00
} else if ( uart_intr_status & UART_INTR_FRAM_ERR ) {
2018-05-03 04:41:10 -04:00
UART_ENTER_CRITICAL_ISR ( & uart_selectlock ) ;
if ( p_uart - > uart_select_notif_callback ) {
p_uart - > uart_select_notif_callback ( uart_num , UART_SELECT_ERROR_NOTIF , & HPTaskAwoken ) ;
2023-05-22 04:29:37 -04:00
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
2018-05-03 04:41:10 -04:00
}
UART_EXIT_CRITICAL_ISR ( & uart_selectlock ) ;
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_FRAM_ERR ) ;
uart_event . type = UART_FRAME_ERR ;
2021-05-19 08:32:55 -04:00
} else if ( uart_intr_status & UART_INTR_PARITY_ERR ) {
2018-05-03 04:41:10 -04:00
UART_ENTER_CRITICAL_ISR ( & uart_selectlock ) ;
if ( p_uart - > uart_select_notif_callback ) {
p_uart - > uart_select_notif_callback ( uart_num , UART_SELECT_ERROR_NOTIF , & HPTaskAwoken ) ;
2023-05-22 04:29:37 -04:00
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
2018-05-03 04:41:10 -04:00
}
UART_EXIT_CRITICAL_ISR ( & uart_selectlock ) ;
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_PARITY_ERR ) ;
uart_event . type = UART_PARITY_ERR ;
2021-05-19 08:32:55 -04:00
} else if ( uart_intr_status & UART_INTR_TX_BRK_DONE ) {
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_tx_break ( & ( uart_context [ uart_num ] . hal ) , 0 ) ;
uart_hal_disable_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_BRK_DONE ) ;
2021-05-19 08:32:55 -04:00
if ( p_uart - > tx_brk_flg = = 1 ) {
2019-04-17 08:19:44 -04:00
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TXFIFO_EMPTY ) ;
2016-12-14 20:45:40 -05:00
}
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_BRK_DONE ) ;
2021-05-19 08:32:55 -04:00
if ( p_uart - > tx_brk_flg = = 1 ) {
2016-12-14 20:45:40 -05:00
p_uart - > tx_brk_flg = 0 ;
p_uart - > tx_waiting_brk = 0 ;
} else {
xSemaphoreGiveFromISR ( p_uart - > tx_brk_sem , & HPTaskAwoken ) ;
2023-05-22 04:29:37 -04:00
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
2016-12-14 20:45:40 -05:00
}
2021-05-19 08:32:55 -04:00
} else if ( uart_intr_status & UART_INTR_TX_BRK_IDLE ) {
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_disable_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_BRK_IDLE ) ;
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_BRK_IDLE ) ;
2021-05-19 08:32:55 -04:00
} else if ( uart_intr_status & UART_INTR_CMD_CHAR_DET ) {
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_CMD_CHAR_DET ) ;
2016-12-18 23:52:10 -05:00
uart_event . type = UART_PATTERN_DET ;
2019-04-17 08:19:44 -04:00
} else if ( ( uart_intr_status & UART_INTR_RS485_PARITY_ERR )
2021-05-19 08:32:55 -04:00
| | ( uart_intr_status & UART_INTR_RS485_FRM_ERR )
| | ( uart_intr_status & UART_INTR_RS485_CLASH ) ) {
2018-04-11 02:56:00 -04:00
// RS485 collision or frame error interrupt triggered
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_rxfifo_rst ( & ( uart_context [ uart_num ] . hal ) ) ;
2018-04-11 02:56:00 -04:00
// Set collision detection flag
2019-06-05 22:57:29 -04:00
p_uart_obj [ uart_num ] - > coll_det_flg = true ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_RS485_CLASH | UART_INTR_RS485_FRM_ERR | UART_INTR_RS485_PARITY_ERR ) ;
2018-04-11 02:56:00 -04:00
uart_event . type = UART_EVENT_MAX ;
2021-05-19 08:32:55 -04:00
} else if ( uart_intr_status & UART_INTR_TX_DONE ) {
2019-04-17 08:19:44 -04:00
if ( UART_IS_MODE_SET ( uart_num , UART_MODE_RS485_HALF_DUPLEX ) & & uart_hal_is_tx_idle ( & ( uart_context [ uart_num ] . hal ) ) ! = true ) {
// The TX_DONE interrupt is triggered but transmit is active
// then postpone interrupt processing for next interrupt
uart_event . type = UART_EVENT_MAX ;
} else {
2020-09-09 22:37:58 -04:00
// Workaround for RS485: If the RS485 half duplex mode is active
2019-04-17 08:19:44 -04:00
// and transmitter is in idle state then reset received buffer and reset RTS pin
// skip this behavior for other UART modes
2022-04-26 09:48:07 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_DONE ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_disable_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_DONE ) ;
if ( UART_IS_MODE_SET ( uart_num , UART_MODE_RS485_HALF_DUPLEX ) ) {
uart_hal_rxfifo_rst ( & ( uart_context [ uart_num ] . hal ) ) ;
uart_hal_set_rts ( & ( uart_context [ uart_num ] . hal ) , 1 ) ;
}
UART_EXIT_CRITICAL_ISR ( & ( uart_context [ uart_num ] . spinlock ) ) ;
xSemaphoreGiveFromISR ( p_uart_obj [ uart_num ] - > tx_done_sem , & HPTaskAwoken ) ;
2023-05-22 04:29:37 -04:00
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
2018-04-11 02:56:00 -04:00
}
2021-07-22 06:10:30 -04:00
}
2022-01-11 22:03:38 -05:00
# if SOC_UART_SUPPORT_WAKEUP_INT
2021-07-22 06:10:30 -04:00
else if ( uart_intr_status & UART_INTR_WAKEUP ) {
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_WAKEUP ) ;
uart_event . type = UART_WAKEUP ;
}
# endif
else {
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , uart_intr_status ) ; /*simply clear all other intr status*/
2016-12-14 20:45:40 -05:00
uart_event . type = UART_EVENT_MAX ;
}
2021-05-19 23:12:40 -04:00
if ( uart_event . type ! = UART_EVENT_MAX & & p_uart - > event_queue ) {
2023-05-22 04:29:37 -04:00
sent = xQueueSendFromISR ( p_uart - > event_queue , ( void * ) & uart_event , & HPTaskAwoken ) ;
need_yield | = ( HPTaskAwoken = = pdTRUE ) ;
if ( sent = = pdFALSE ) {
2021-05-19 23:12:40 -04:00
# ifndef CONFIG_UART_ISR_IN_IRAM //Only log if ISR is not in IRAM
2018-03-13 03:57:33 -04:00
ESP_EARLY_LOGV ( UART_TAG , " UART event queue full " ) ;
2021-05-19 23:12:40 -04:00
# endif
2017-08-25 09:04:13 -04:00
}
2016-12-14 20:45:40 -05:00
}
2019-06-12 04:20:19 -04:00
}
2023-05-22 04:29:37 -04:00
if ( need_yield ) {
2019-06-12 04:20:19 -04:00
portYIELD_FROM_ISR ( ) ;
2016-12-14 20:45:40 -05:00
}
}
/**************************************************************/
esp_err_t uart_wait_tx_done ( uart_port_t uart_num , TickType_t ticks_to_wait )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ESP_FAIL , UART_TAG , " uart driver error " ) ;
2016-12-14 20:45:40 -05:00
BaseType_t res ;
2022-02-08 04:39:38 -05:00
TickType_t ticks_start = xTaskGetTickCount ( ) ;
2016-12-14 20:45:40 -05:00
//Take tx_mux
2022-02-08 04:39:38 -05:00
res = xSemaphoreTake ( p_uart_obj [ uart_num ] - > tx_mux , ( TickType_t ) ticks_to_wait ) ;
2021-05-19 08:32:55 -04:00
if ( res = = pdFALSE ) {
2016-12-14 20:45:40 -05:00
return ESP_ERR_TIMEOUT ;
}
2022-12-14 02:30:57 -05:00
// Check the enable status of TX_DONE: If already enabled, then let the isr handle the status bit;
// If not enabled, then make sure to clear the status bit before enabling the TX_DONE interrupt bit
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
bool is_rs485_mode = UART_IS_MODE_SET ( uart_num , UART_MODE_RS485_HALF_DUPLEX ) ;
bool disabled = ! ( uart_hal_get_intr_ena_status ( & ( uart_context [ uart_num ] . hal ) ) & UART_INTR_TX_DONE ) ;
// For RS485 mode, TX_DONE interrupt is enabled for every tx transmission, so there shouldn't be a case of
// interrupt not enabled but raw bit is set.
assert ( ! ( is_rs485_mode & &
disabled & &
uart_hal_get_intraw_mask ( & ( uart_context [ uart_num ] . hal ) ) & UART_INTR_TX_DONE ) ) ;
// If decided to register for the TX_DONE event, then we should clear any possible old tx transmission status.
// The clear operation of RS485 mode should only be handled in isr or when writing to tx fifo.
if ( disabled & & ! is_rs485_mode ) {
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_DONE ) ;
}
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
xSemaphoreTake ( p_uart_obj [ uart_num ] - > tx_done_sem , 0 ) ;
2022-12-14 02:30:57 -05:00
// FSM status register update comes later than TX_DONE interrupt raw bit raise
// The maximum time takes for FSM status register to update is (6 APB clock cycles + 3 UART core clock cycles)
// Therefore, to avoid the situation of TX_DONE bit being cleared but FSM didn't be recognized as IDLE (which
// would lead to timeout), a delay of 2us is added in between.
esp_rom_delay_us ( 2 ) ;
2021-05-19 08:32:55 -04:00
if ( uart_hal_is_tx_idle ( & ( uart_context [ uart_num ] . hal ) ) ) {
2016-12-14 20:45:40 -05:00
xSemaphoreGive ( p_uart_obj [ uart_num ] - > tx_mux ) ;
return ESP_OK ;
}
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_DONE ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2019-05-17 09:29:45 -04:00
TickType_t ticks_end = xTaskGetTickCount ( ) ;
if ( ticks_end - ticks_start > ticks_to_wait ) {
ticks_to_wait = 0 ;
} else {
ticks_to_wait = ticks_to_wait - ( ticks_end - ticks_start ) ;
}
2016-12-14 20:45:40 -05:00
//take 2nd tx_done_sem, wait given from ISR
2022-02-08 04:39:38 -05:00
res = xSemaphoreTake ( p_uart_obj [ uart_num ] - > tx_done_sem , ( TickType_t ) ticks_to_wait ) ;
2021-05-19 08:32:55 -04:00
if ( res = = pdFALSE ) {
2021-07-20 22:07:44 -04:00
// The TX_DONE interrupt will be disabled in ISR
2016-12-14 20:45:40 -05:00
xSemaphoreGive ( p_uart_obj [ uart_num ] - > tx_mux ) ;
2020-08-28 04:09:46 -04:00
return ESP_ERR_TIMEOUT ;
2016-12-14 20:45:40 -05:00
}
xSemaphoreGive ( p_uart_obj [ uart_num ] - > tx_mux ) ;
return ESP_OK ;
}
2021-05-19 08:32:55 -04:00
int uart_tx_chars ( uart_port_t uart_num , const char * buffer , uint32_t len )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ( - 1 ) , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ( - 1 ) , UART_TAG , " uart driver error " ) ;
ESP_RETURN_ON_FALSE ( buffer , ( - 1 ) , UART_TAG , " buffer null " ) ;
2021-05-19 08:32:55 -04:00
if ( len = = 0 ) {
2016-12-14 20:45:40 -05:00
return 0 ;
}
2019-04-17 08:19:44 -04:00
int tx_len = 0 ;
2022-02-08 04:39:38 -05:00
xSemaphoreTake ( p_uart_obj [ uart_num ] - > tx_mux , ( TickType_t ) portMAX_DELAY ) ;
2022-04-26 09:48:07 -04:00
tx_len = ( int ) uart_enable_tx_write_fifo ( uart_num , ( const uint8_t * ) buffer , len ) ;
2016-12-14 20:45:40 -05:00
xSemaphoreGive ( p_uart_obj [ uart_num ] - > tx_mux ) ;
return tx_len ;
}
2021-05-19 08:32:55 -04:00
static int uart_tx_all ( uart_port_t uart_num , const char * src , size_t size , bool brk_en , int brk_len )
2016-12-14 20:45:40 -05:00
{
2021-05-19 08:32:55 -04:00
if ( size = = 0 ) {
2016-12-14 20:45:40 -05:00
return 0 ;
}
size_t original_size = size ;
//lock for uart_tx
2022-02-08 04:39:38 -05:00
xSemaphoreTake ( p_uart_obj [ uart_num ] - > tx_mux , ( TickType_t ) portMAX_DELAY ) ;
2018-04-11 02:56:00 -04:00
p_uart_obj [ uart_num ] - > coll_det_flg = false ;
2021-05-19 08:32:55 -04:00
if ( p_uart_obj [ uart_num ] - > tx_buf_size > 0 ) {
2020-11-16 23:48:35 -05:00
size_t max_size = xRingbufferGetMaxItemSize ( p_uart_obj [ uart_num ] - > tx_ring_buf ) ;
2016-12-14 20:45:40 -05:00
int offset = 0 ;
uart_tx_data_t evt ;
evt . tx_data . size = size ;
evt . tx_data . brk_len = brk_len ;
2021-05-19 08:32:55 -04:00
if ( brk_en ) {
2016-12-14 20:45:40 -05:00
evt . type = UART_DATA_BREAK ;
} else {
evt . type = UART_DATA ;
}
2021-05-19 08:32:55 -04:00
xRingbufferSend ( p_uart_obj [ uart_num ] - > tx_ring_buf , ( void * ) & evt , sizeof ( uart_tx_data_t ) , portMAX_DELAY ) ;
while ( size > 0 ) {
2020-11-16 23:48:35 -05:00
size_t send_size = size > max_size / 2 ? max_size / 2 : size ;
2021-05-19 08:32:55 -04:00
xRingbufferSend ( p_uart_obj [ uart_num ] - > tx_ring_buf , ( void * ) ( src + offset ) , send_size , portMAX_DELAY ) ;
2016-12-14 20:45:40 -05:00
size - = send_size ;
offset + = send_size ;
2017-04-12 04:57:37 -04:00
uart_enable_tx_intr ( uart_num , 1 , UART_EMPTY_THRESH_DEFAULT ) ;
2016-12-14 20:45:40 -05:00
}
} else {
2021-05-19 08:32:55 -04:00
while ( size ) {
2016-12-14 20:45:40 -05:00
//semaphore for tx_fifo available
2022-02-08 04:39:38 -05:00
if ( pdTRUE = = xSemaphoreTake ( p_uart_obj [ uart_num ] - > tx_fifo_sem , ( TickType_t ) portMAX_DELAY ) ) {
2022-04-26 09:48:07 -04:00
uint32_t sent = uart_enable_tx_write_fifo ( uart_num , ( const uint8_t * ) src , size ) ;
2021-05-19 08:32:55 -04:00
if ( sent < size ) {
2016-12-14 20:45:40 -05:00
p_uart_obj [ uart_num ] - > tx_waiting_fifo = true ;
uart_enable_tx_intr ( uart_num , 1 , UART_EMPTY_THRESH_DEFAULT ) ;
}
size - = sent ;
src + = sent ;
}
}
2021-05-19 08:32:55 -04:00
if ( brk_en ) {
2019-04-17 08:19:44 -04:00
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_BRK_DONE ) ;
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_tx_break ( & ( uart_context [ uart_num ] . hal ) , brk_len ) ;
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_TX_BRK_DONE ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2022-02-08 04:39:38 -05:00
xSemaphoreTake ( p_uart_obj [ uart_num ] - > tx_brk_sem , ( TickType_t ) portMAX_DELAY ) ;
2016-12-14 20:45:40 -05:00
}
xSemaphoreGive ( p_uart_obj [ uart_num ] - > tx_fifo_sem ) ;
}
xSemaphoreGive ( p_uart_obj [ uart_num ] - > tx_mux ) ;
return original_size ;
}
2021-05-19 08:32:55 -04:00
int uart_write_bytes ( uart_port_t uart_num , const void * src , size_t size )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ( - 1 ) , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ! = NULL ) , ( - 1 ) , UART_TAG , " uart driver error " ) ;
ESP_RETURN_ON_FALSE ( src , ( - 1 ) , UART_TAG , " buffer null " ) ;
2016-12-14 20:45:40 -05:00
return uart_tx_all ( uart_num , src , size , 0 , 0 ) ;
}
2021-05-19 08:32:55 -04:00
int uart_write_bytes_with_break ( uart_port_t uart_num , const void * src , size_t size , int brk_len )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ( - 1 ) , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ( - 1 ) , UART_TAG , " uart driver error " ) ;
ESP_RETURN_ON_FALSE ( ( size > 0 ) , ( - 1 ) , UART_TAG , " uart size error " ) ;
ESP_RETURN_ON_FALSE ( ( src ) , ( - 1 ) , UART_TAG , " uart data null " ) ;
ESP_RETURN_ON_FALSE ( ( brk_len > 0 & & brk_len < 256 ) , ( - 1 ) , UART_TAG , " break_num error " ) ;
2016-12-14 20:45:40 -05:00
return uart_tx_all ( uart_num , src , size , 1 , brk_len ) ;
}
2018-10-19 02:51:28 -04:00
static bool uart_check_buf_full ( uart_port_t uart_num )
{
2021-05-19 08:32:55 -04:00
if ( p_uart_obj [ uart_num ] - > rx_buffer_full_flg ) {
2024-06-06 23:42:35 -04:00
BaseType_t res = xRingbufferSend ( p_uart_obj [ uart_num ] - > rx_ring_buf , p_uart_obj [ uart_num ] - > rx_data_buf , p_uart_obj [ uart_num ] - > rx_stash_len , 0 ) ;
2021-05-19 08:32:55 -04:00
if ( res = = pdTRUE ) {
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2018-10-19 02:51:28 -04:00
p_uart_obj [ uart_num ] - > rx_buffered_len + = p_uart_obj [ uart_num ] - > rx_stash_len ;
p_uart_obj [ uart_num ] - > rx_buffer_full_flg = false ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2021-12-07 23:15:02 -05:00
/* Only re-activate UART_INTR_RXFIFO_TOUT or UART_INTR_RXFIFO_FULL
* interrupts if they were NOT explicitly disabled by the user . */
uart_reenable_intr_mask ( p_uart_obj [ uart_num ] - > uart_num , UART_INTR_RXFIFO_TOUT | UART_INTR_RXFIFO_FULL ) ;
2018-10-19 02:51:28 -04:00
return true ;
}
}
return false ;
}
2021-05-19 08:32:55 -04:00
int uart_read_bytes ( uart_port_t uart_num , void * buf , uint32_t length , TickType_t ticks_to_wait )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ( - 1 ) , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( buf ) , ( - 1 ) , UART_TAG , " uart data null " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ( - 1 ) , UART_TAG , " uart driver error " ) ;
2021-05-19 08:32:55 -04:00
uint8_t * data = NULL ;
2023-11-21 01:52:55 -05:00
size_t size = 0 ;
2016-12-14 20:45:40 -05:00
size_t copy_len = 0 ;
2022-02-08 04:39:38 -05:00
if ( xSemaphoreTake ( p_uart_obj [ uart_num ] - > rx_mux , ( TickType_t ) ticks_to_wait ) ! = pdTRUE ) {
2016-12-14 20:45:40 -05:00
return - 1 ;
}
2021-05-19 08:32:55 -04:00
while ( length ) {
2023-11-21 01:52:55 -05:00
data = ( uint8_t * ) xRingbufferReceiveUpTo ( p_uart_obj [ uart_num ] - > rx_ring_buf , & size , ( TickType_t ) ticks_to_wait , length ) ;
if ( ! data ) {
// When using dual cores, `rx_buffer_full_flg` may read and write on different cores at same time,
// which may lose synchronization. So we also need to call `uart_check_buf_full` once when ringbuffer is empty
// to solve the possible asynchronous issues.
if ( uart_check_buf_full ( uart_num ) ) {
// This condition will never be true if `uart_read_bytes`
// and `uart_rx_intr_handler_default` are scheduled on the same core.
continue ;
2016-12-14 20:45:40 -05:00
} else {
2023-11-21 01:52:55 -05:00
// Timeout while not fetched all requested length
break ;
2016-12-14 20:45:40 -05:00
}
}
2023-11-21 01:52:55 -05:00
memcpy ( ( uint8_t * ) buf + copy_len , data , size ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2023-11-21 01:52:55 -05:00
p_uart_obj [ uart_num ] - > rx_buffered_len - = size ;
uart_pattern_queue_update ( uart_num , size ) ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2023-11-21 01:52:55 -05:00
copy_len + = size ;
length - = size ;
vRingbufferReturnItem ( p_uart_obj [ uart_num ] - > rx_ring_buf , data ) ;
uart_check_buf_full ( uart_num ) ;
2016-12-14 20:45:40 -05:00
}
2017-08-25 09:04:13 -04:00
2016-12-14 20:45:40 -05:00
xSemaphoreGive ( p_uart_obj [ uart_num ] - > rx_mux ) ;
return copy_len ;
}
2021-05-19 08:32:55 -04:00
esp_err_t uart_get_buffered_data_len ( uart_port_t uart_num , size_t * size )
2016-12-18 23:52:10 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ESP_FAIL , UART_TAG , " uart driver error " ) ;
2021-01-13 12:42:04 -05:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-18 23:52:10 -05:00
* size = p_uart_obj [ uart_num ] - > rx_buffered_len ;
2021-01-13 12:42:04 -05:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-18 23:52:10 -05:00
return ESP_OK ;
}
2022-06-29 05:56:38 -04:00
esp_err_t uart_get_tx_buffer_free_size ( uart_port_t uart_num , size_t * size )
{
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart driver error " ) ;
ESP_RETURN_ON_FALSE ( ( size ! = NULL ) , ESP_ERR_INVALID_ARG , UART_TAG , " arg pointer is NULL " ) ;
* size = p_uart_obj [ uart_num ] - > tx_buf_size - p_uart_obj [ uart_num ] - > tx_len_tot ;
return ESP_OK ;
}
2017-08-25 09:04:13 -04:00
esp_err_t uart_flush ( uart_port_t uart_num ) __attribute__ ( ( alias ( " uart_flush_input " ) ) ) ;
esp_err_t uart_flush_input ( uart_port_t uart_num )
2016-12-14 20:45:40 -05:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ESP_FAIL , UART_TAG , " uart driver error " ) ;
2021-05-19 08:32:55 -04:00
uart_obj_t * p_uart = p_uart_obj [ uart_num ] ;
uint8_t * data ;
2016-12-14 20:45:40 -05:00
size_t size ;
//rx sem protect the ring buffer read related functions
2022-02-08 04:39:38 -05:00
xSemaphoreTake ( p_uart - > rx_mux , ( TickType_t ) portMAX_DELAY ) ;
2021-12-07 23:15:02 -05:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_disable_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2021-05-19 08:32:55 -04:00
while ( true ) {
2023-11-21 01:52:55 -05:00
data = ( uint8_t * ) xRingbufferReceive ( p_uart - > rx_ring_buf , & size , ( TickType_t ) 0 ) ;
if ( data = = NULL ) {
2021-06-28 02:46:41 -04:00
bool error = false ;
2021-01-13 12:42:04 -05:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2023-11-21 01:52:55 -05:00
if ( p_uart_obj [ uart_num ] - > rx_buffered_len ! = 0 ) {
2018-05-18 03:47:02 -04:00
p_uart_obj [ uart_num ] - > rx_buffered_len = 0 ;
2021-06-28 02:46:41 -04:00
error = true ;
2018-05-18 03:47:02 -04:00
}
2023-11-21 01:52:55 -05:00
// We also need to clear the `rx_buffer_full_flg` here.
2018-05-18 03:47:02 -04:00
p_uart_obj [ uart_num ] - > rx_buffer_full_flg = false ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2021-06-28 02:46:41 -04:00
if ( error ) {
// this must be called outside the critical section
ESP_LOGE ( UART_TAG , " rx_buffered_len error " ) ;
}
2016-12-14 20:45:40 -05:00
break ;
}
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-18 23:52:10 -05:00
p_uart_obj [ uart_num ] - > rx_buffered_len - = size ;
2017-08-25 09:04:13 -04:00
uart_pattern_queue_update ( uart_num , size ) ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-14 20:45:40 -05:00
vRingbufferReturnItem ( p_uart - > rx_ring_buf , data ) ;
2021-05-19 08:32:55 -04:00
if ( p_uart_obj [ uart_num ] - > rx_buffer_full_flg ) {
2024-06-06 23:42:35 -04:00
BaseType_t res = xRingbufferSend ( p_uart_obj [ uart_num ] - > rx_ring_buf , p_uart_obj [ uart_num ] - > rx_data_buf , p_uart_obj [ uart_num ] - > rx_stash_len , 0 ) ;
2021-05-19 08:32:55 -04:00
if ( res = = pdTRUE ) {
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-18 23:52:10 -05:00
p_uart_obj [ uart_num ] - > rx_buffered_len + = p_uart_obj [ uart_num ] - > rx_stash_len ;
p_uart_obj [ uart_num ] - > rx_buffer_full_flg = false ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2016-12-18 23:52:10 -05:00
}
}
2016-12-14 20:45:40 -05:00
}
2019-04-17 08:19:44 -04:00
uart_hal_rxfifo_rst ( & ( uart_context [ uart_num ] . hal ) ) ;
2021-12-07 23:15:02 -05:00
/* Only re-enable UART_INTR_RXFIFO_TOUT or UART_INTR_RXFIFO_FULL if they
* were explicitly enabled by the user . */
uart_reenable_intr_mask ( uart_num , UART_INTR_RXFIFO_TOUT | UART_INTR_RXFIFO_FULL ) ;
2016-12-18 23:52:10 -05:00
xSemaphoreGive ( p_uart - > rx_mux ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2021-05-19 23:12:40 -04:00
static void uart_free_driver_obj ( uart_obj_t * uart_obj )
{
if ( uart_obj - > tx_fifo_sem ) {
vSemaphoreDelete ( uart_obj - > tx_fifo_sem ) ;
}
if ( uart_obj - > tx_done_sem ) {
vSemaphoreDelete ( uart_obj - > tx_done_sem ) ;
}
if ( uart_obj - > tx_brk_sem ) {
vSemaphoreDelete ( uart_obj - > tx_brk_sem ) ;
}
if ( uart_obj - > tx_mux ) {
vSemaphoreDelete ( uart_obj - > tx_mux ) ;
}
if ( uart_obj - > rx_mux ) {
vSemaphoreDelete ( uart_obj - > rx_mux ) ;
}
if ( uart_obj - > event_queue ) {
vQueueDelete ( uart_obj - > event_queue ) ;
}
if ( uart_obj - > rx_ring_buf ) {
vRingbufferDelete ( uart_obj - > rx_ring_buf ) ;
}
if ( uart_obj - > tx_ring_buf ) {
vRingbufferDelete ( uart_obj - > tx_ring_buf ) ;
}
# if CONFIG_UART_ISR_IN_IRAM
free ( uart_obj - > event_queue_storage ) ;
free ( uart_obj - > event_queue_struct ) ;
free ( uart_obj - > tx_ring_buf_storage ) ;
free ( uart_obj - > tx_ring_buf_struct ) ;
free ( uart_obj - > rx_ring_buf_storage ) ;
free ( uart_obj - > rx_ring_buf_struct ) ;
free ( uart_obj - > rx_mux_struct ) ;
free ( uart_obj - > tx_mux_struct ) ;
free ( uart_obj - > tx_brk_sem_struct ) ;
free ( uart_obj - > tx_done_sem_struct ) ;
free ( uart_obj - > tx_fifo_sem_struct ) ;
# endif
free ( uart_obj ) ;
}
static uart_obj_t * uart_alloc_driver_obj ( int event_queue_size , int tx_buffer_size , int rx_buffer_size )
{
uart_obj_t * uart_obj = heap_caps_calloc ( 1 , sizeof ( uart_obj_t ) , UART_MALLOC_CAPS ) ;
if ( ! uart_obj ) {
return NULL ;
}
# if CONFIG_UART_ISR_IN_IRAM
if ( event_queue_size > 0 ) {
uart_obj - > event_queue_storage = heap_caps_calloc ( event_queue_size , sizeof ( uart_event_t ) , UART_MALLOC_CAPS ) ;
uart_obj - > event_queue_struct = heap_caps_calloc ( 1 , sizeof ( StaticQueue_t ) , UART_MALLOC_CAPS ) ;
if ( ! uart_obj - > event_queue_storage | | ! uart_obj - > event_queue_struct ) {
goto err ;
}
}
if ( tx_buffer_size > 0 ) {
uart_obj - > tx_ring_buf_storage = heap_caps_calloc ( 1 , tx_buffer_size , UART_MALLOC_CAPS ) ;
uart_obj - > tx_ring_buf_struct = heap_caps_calloc ( 1 , sizeof ( StaticRingbuffer_t ) , UART_MALLOC_CAPS ) ;
if ( ! uart_obj - > tx_ring_buf_storage | | ! uart_obj - > tx_ring_buf_struct ) {
goto err ;
}
}
uart_obj - > rx_ring_buf_storage = heap_caps_calloc ( 1 , rx_buffer_size , UART_MALLOC_CAPS ) ;
uart_obj - > rx_ring_buf_struct = heap_caps_calloc ( 1 , sizeof ( StaticRingbuffer_t ) , UART_MALLOC_CAPS ) ;
uart_obj - > rx_mux_struct = heap_caps_calloc ( 1 , sizeof ( StaticSemaphore_t ) , UART_MALLOC_CAPS ) ;
uart_obj - > tx_mux_struct = heap_caps_calloc ( 1 , sizeof ( StaticSemaphore_t ) , UART_MALLOC_CAPS ) ;
uart_obj - > tx_brk_sem_struct = heap_caps_calloc ( 1 , sizeof ( StaticSemaphore_t ) , UART_MALLOC_CAPS ) ;
uart_obj - > tx_done_sem_struct = heap_caps_calloc ( 1 , sizeof ( StaticSemaphore_t ) , UART_MALLOC_CAPS ) ;
uart_obj - > tx_fifo_sem_struct = heap_caps_calloc ( 1 , sizeof ( StaticSemaphore_t ) , UART_MALLOC_CAPS ) ;
if ( ! uart_obj - > rx_ring_buf_storage | | ! uart_obj - > rx_ring_buf_struct | | ! uart_obj - > rx_mux_struct | |
! uart_obj - > tx_mux_struct | | ! uart_obj - > tx_brk_sem_struct | | ! uart_obj - > tx_done_sem_struct | |
! uart_obj - > tx_fifo_sem_struct ) {
goto err ;
}
if ( event_queue_size > 0 ) {
uart_obj - > event_queue = xQueueCreateStatic ( event_queue_size , sizeof ( uart_event_t ) ,
uart_obj - > event_queue_storage , uart_obj - > event_queue_struct ) ;
if ( ! uart_obj - > event_queue ) {
goto err ;
}
}
if ( tx_buffer_size > 0 ) {
uart_obj - > tx_ring_buf = xRingbufferCreateStatic ( tx_buffer_size , RINGBUF_TYPE_NOSPLIT ,
uart_obj - > tx_ring_buf_storage , uart_obj - > tx_ring_buf_struct ) ;
if ( ! uart_obj - > tx_ring_buf ) {
goto err ;
}
}
uart_obj - > rx_ring_buf = xRingbufferCreateStatic ( rx_buffer_size , RINGBUF_TYPE_BYTEBUF ,
uart_obj - > rx_ring_buf_storage , uart_obj - > rx_ring_buf_struct ) ;
uart_obj - > rx_mux = xSemaphoreCreateMutexStatic ( uart_obj - > rx_mux_struct ) ;
uart_obj - > tx_mux = xSemaphoreCreateMutexStatic ( uart_obj - > tx_mux_struct ) ;
uart_obj - > tx_brk_sem = xSemaphoreCreateBinaryStatic ( uart_obj - > tx_brk_sem_struct ) ;
uart_obj - > tx_done_sem = xSemaphoreCreateBinaryStatic ( uart_obj - > tx_done_sem_struct ) ;
uart_obj - > tx_fifo_sem = xSemaphoreCreateBinaryStatic ( uart_obj - > tx_fifo_sem_struct ) ;
if ( ! uart_obj - > rx_ring_buf | | ! uart_obj - > rx_mux | | ! uart_obj - > tx_mux | | ! uart_obj - > tx_brk_sem | |
! uart_obj - > tx_done_sem | | ! uart_obj - > tx_fifo_sem ) {
goto err ;
}
# else
if ( event_queue_size > 0 ) {
uart_obj - > event_queue = xQueueCreate ( event_queue_size , sizeof ( uart_event_t ) ) ;
if ( ! uart_obj - > event_queue ) {
goto err ;
}
}
if ( tx_buffer_size > 0 ) {
uart_obj - > tx_ring_buf = xRingbufferCreate ( tx_buffer_size , RINGBUF_TYPE_NOSPLIT ) ;
if ( ! uart_obj - > tx_ring_buf ) {
goto err ;
}
}
uart_obj - > rx_ring_buf = xRingbufferCreate ( rx_buffer_size , RINGBUF_TYPE_BYTEBUF ) ;
uart_obj - > tx_mux = xSemaphoreCreateMutex ( ) ;
uart_obj - > rx_mux = xSemaphoreCreateMutex ( ) ;
uart_obj - > tx_brk_sem = xSemaphoreCreateBinary ( ) ;
uart_obj - > tx_done_sem = xSemaphoreCreateBinary ( ) ;
uart_obj - > tx_fifo_sem = xSemaphoreCreateBinary ( ) ;
if ( ! uart_obj - > rx_ring_buf | | ! uart_obj - > rx_mux | | ! uart_obj - > tx_mux | | ! uart_obj - > tx_brk_sem | |
! uart_obj - > tx_done_sem | | ! uart_obj - > tx_fifo_sem ) {
goto err ;
}
# endif
return uart_obj ;
err :
uart_free_driver_obj ( uart_obj ) ;
return NULL ;
}
esp_err_t uart_driver_install ( uart_port_t uart_num , int rx_buffer_size , int tx_buffer_size , int event_queue_size , QueueHandle_t * uart_queue , int intr_alloc_flags )
2016-12-14 20:45:40 -05:00
{
2022-01-14 06:26:11 -05:00
esp_err_t ret ;
2021-08-24 05:19:21 -04:00
# ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num ! = CONFIG_ESP_CONSOLE_UART_NUM ) , ESP_FAIL , UART_TAG , " UART used by GDB-stubs! Please disable GDB in menuconfig. " ) ;
2021-08-24 05:19:21 -04:00
# endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( rx_buffer_size > SOC_UART_FIFO_LEN ) , ESP_FAIL , UART_TAG , " uart rx buffer length error " ) ;
ESP_RETURN_ON_FALSE ( ( tx_buffer_size > SOC_UART_FIFO_LEN ) | | ( tx_buffer_size = = 0 ) , ESP_FAIL , UART_TAG , " uart tx buffer length error " ) ;
2019-08-26 05:14:15 -04:00
# if CONFIG_UART_ISR_IN_IRAM
2019-11-08 14:27:31 -05:00
if ( ( intr_alloc_flags & ESP_INTR_FLAG_IRAM ) = = 0 ) {
ESP_LOGI ( UART_TAG , " ESP_INTR_FLAG_IRAM flag not set while CONFIG_UART_ISR_IN_IRAM is enabled, flag updated " ) ;
intr_alloc_flags | = ESP_INTR_FLAG_IRAM ;
}
2019-08-26 05:14:15 -04:00
# else
2019-11-08 14:27:31 -05:00
if ( ( intr_alloc_flags & ESP_INTR_FLAG_IRAM ) ! = 0 ) {
ESP_LOGW ( UART_TAG , " ESP_INTR_FLAG_IRAM flag is set while CONFIG_UART_ISR_IN_IRAM is not enabled, flag updated " ) ;
intr_alloc_flags & = ~ ESP_INTR_FLAG_IRAM ;
}
2019-08-26 05:14:15 -04:00
# endif
2017-08-30 23:29:12 -04:00
2021-05-19 08:32:55 -04:00
if ( p_uart_obj [ uart_num ] = = NULL ) {
2021-05-19 23:12:40 -04:00
p_uart_obj [ uart_num ] = uart_alloc_driver_obj ( event_queue_size , tx_buffer_size , rx_buffer_size ) ;
2021-05-19 08:32:55 -04:00
if ( p_uart_obj [ uart_num ] = = NULL ) {
2016-12-14 20:45:40 -05:00
ESP_LOGE ( UART_TAG , " UART driver malloc error " ) ;
return ESP_FAIL ;
}
p_uart_obj [ uart_num ] - > uart_num = uart_num ;
2018-04-11 02:56:00 -04:00
p_uart_obj [ uart_num ] - > uart_mode = UART_MODE_UART ;
p_uart_obj [ uart_num ] - > coll_det_flg = false ;
2020-03-30 10:05:48 -04:00
p_uart_obj [ uart_num ] - > rx_always_timeout_flg = false ;
2021-05-19 23:12:40 -04:00
p_uart_obj [ uart_num ] - > event_queue_size = event_queue_size ;
2016-12-14 20:45:40 -05:00
p_uart_obj [ uart_num ] - > tx_ptr = NULL ;
p_uart_obj [ uart_num ] - > tx_head = NULL ;
p_uart_obj [ uart_num ] - > tx_len_tot = 0 ;
p_uart_obj [ uart_num ] - > tx_brk_flg = 0 ;
p_uart_obj [ uart_num ] - > tx_brk_len = 0 ;
p_uart_obj [ uart_num ] - > tx_waiting_brk = 0 ;
2016-12-18 23:52:10 -05:00
p_uart_obj [ uart_num ] - > rx_buffered_len = 0 ;
2016-12-14 20:45:40 -05:00
p_uart_obj [ uart_num ] - > rx_buffer_full_flg = false ;
p_uart_obj [ uart_num ] - > tx_waiting_fifo = false ;
2021-12-07 23:15:02 -05:00
p_uart_obj [ uart_num ] - > rx_int_usr_mask = UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT ;
2021-05-19 23:12:40 -04:00
p_uart_obj [ uart_num ] - > tx_buf_size = tx_buffer_size ;
2018-05-03 04:41:10 -04:00
p_uart_obj [ uart_num ] - > uart_select_notif_callback = NULL ;
2021-05-19 23:12:40 -04:00
xSemaphoreGive ( p_uart_obj [ uart_num ] - > tx_fifo_sem ) ;
uart_pattern_queue_reset ( uart_num , UART_PATTERN_DET_QLEN_DEFAULT ) ;
if ( uart_queue ) {
* uart_queue = p_uart_obj [ uart_num ] - > event_queue ;
ESP_LOGI ( UART_TAG , " queue free spaces: %d " , uxQueueSpacesAvailable ( p_uart_obj [ uart_num ] - > event_queue ) ) ;
}
2016-12-14 20:45:40 -05:00
} else {
ESP_LOGE ( UART_TAG , " UART driver already installed " ) ;
return ESP_FAIL ;
}
2017-02-06 23:05:18 -05:00
2016-12-14 20:45:40 -05:00
uart_intr_config_t uart_intr = {
2019-11-18 08:36:40 -05:00
. intr_enable_mask = UART_INTR_CONFIG_FLAG ,
2016-12-14 20:45:40 -05:00
. rxfifo_full_thresh = UART_FULL_THRESH_DEFAULT ,
. rx_timeout_thresh = UART_TOUT_THRESH_DEFAULT ,
2020-03-30 10:05:48 -04:00
. txfifo_empty_intr_thresh = UART_EMPTY_THRESH_DEFAULT ,
2016-12-14 20:45:40 -05:00
} ;
2019-04-17 08:19:44 -04:00
uart_module_enable ( uart_num ) ;
2020-09-09 22:37:58 -04:00
uart_hal_disable_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_LL_INTR_MASK ) ;
uart_hal_clr_intsts_mask ( & ( uart_context [ uart_num ] . hal ) , UART_LL_INTR_MASK ) ;
2022-01-14 06:26:11 -05:00
ret = esp_intr_alloc ( uart_periph_signal [ uart_num ] . irq , intr_alloc_flags ,
uart_rx_intr_handler_default , p_uart_obj [ uart_num ] ,
& p_uart_obj [ uart_num ] - > intr_handle ) ;
ESP_GOTO_ON_ERROR ( ret , err , UART_TAG , " Could not allocate an interrupt for UART " ) ;
ret = uart_intr_config ( uart_num , & uart_intr ) ;
ESP_GOTO_ON_ERROR ( ret , err , UART_TAG , " Could not configure the interrupt for UART " ) ;
return ret ;
2017-08-30 23:29:12 -04:00
err :
uart_driver_delete ( uart_num ) ;
2022-01-14 06:26:11 -05:00
return ret ;
2016-12-14 20:45:40 -05:00
}
//Make sure no other tasks are still using UART before you call this function
esp_err_t uart_driver_delete ( uart_port_t uart_num )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_FAIL , UART_TAG , " uart_num error " ) ;
2021-05-19 08:32:55 -04:00
if ( p_uart_obj [ uart_num ] = = NULL ) {
2016-12-14 20:45:40 -05:00
ESP_LOGI ( UART_TAG , " ALREADY NULL " ) ;
return ESP_OK ;
}
2016-11-25 04:33:51 -05:00
esp_intr_free ( p_uart_obj [ uart_num ] - > intr_handle ) ;
2016-12-14 20:45:40 -05:00
uart_disable_rx_intr ( uart_num ) ;
uart_disable_tx_intr ( uart_num ) ;
2017-08-25 09:04:13 -04:00
uart_pattern_link_free ( uart_num ) ;
2021-05-19 23:12:40 -04:00
uart_free_driver_obj ( p_uart_obj [ uart_num ] ) ;
2016-12-14 20:45:40 -05:00
p_uart_obj [ uart_num ] = NULL ;
2017-10-18 00:04:59 -04:00
2020-11-23 06:31:50 -05:00
# if SOC_UART_SUPPORT_RTC_CLK
uart_sclk_t sclk = 0 ;
uart_hal_get_sclk ( & ( uart_context [ uart_num ] . hal ) , & sclk ) ;
if ( sclk = = UART_SCLK_RTC ) {
2022-04-29 00:42:44 -04:00
periph_rtc_dig_clk8m_disable ( ) ;
2020-11-23 06:31:50 -05:00
}
# endif
2019-04-17 08:19:44 -04:00
uart_module_disable ( uart_num ) ;
2016-12-14 20:45:40 -05:00
return ESP_OK ;
}
2018-05-03 04:41:10 -04:00
2020-01-14 07:48:36 -05:00
bool uart_is_driver_installed ( uart_port_t uart_num )
{
return uart_num < UART_NUM_MAX & & ( p_uart_obj [ uart_num ] ! = NULL ) ;
}
2018-05-03 04:41:10 -04:00
void uart_set_select_notif_callback ( uart_port_t uart_num , uart_select_notif_callback_t uart_select_notif_callback )
{
if ( uart_num < UART_NUM_MAX & & p_uart_obj [ uart_num ] ) {
p_uart_obj [ uart_num ] - > uart_select_notif_callback = ( uart_select_notif_callback_t ) uart_select_notif_callback ;
}
}
2019-07-16 05:33:30 -04:00
portMUX_TYPE * uart_get_selectlock ( void )
2018-05-03 04:41:10 -04:00
{
return & uart_selectlock ;
}
2019-06-13 23:01:30 -04:00
2018-04-11 02:56:00 -04:00
// Set UART mode
2019-06-05 22:57:29 -04:00
esp_err_t uart_set_mode ( uart_port_t uart_num , uart_mode_t mode )
2018-04-11 02:56:00 -04:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ESP_ERR_INVALID_STATE , UART_TAG , " uart driver error " ) ;
2019-06-05 22:57:29 -04:00
if ( ( mode = = UART_MODE_RS485_COLLISION_DETECT ) | | ( mode = = UART_MODE_RS485_APP_CTRL )
2018-04-11 02:56:00 -04:00
| | ( mode = = UART_MODE_RS485_HALF_DUPLEX ) ) {
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( ! uart_hal_is_hw_rts_en ( & ( uart_context [ uart_num ] . hal ) ) ) , ESP_ERR_INVALID_ARG , UART_TAG ,
2021-05-19 08:32:55 -04:00
" disable hw flowctrl before using RS485 mode " ) ;
2018-04-11 02:56:00 -04:00
}
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_mode ( & ( uart_context [ uart_num ] . hal ) , mode ) ;
2021-05-19 08:32:55 -04:00
if ( mode = = UART_MODE_RS485_COLLISION_DETECT ) {
2018-04-11 02:56:00 -04:00
// This mode allows read while transmitting that allows collision detection
p_uart_obj [ uart_num ] - > coll_det_flg = false ;
// Enable collision detection interrupts
2019-04-17 08:19:44 -04:00
uart_hal_ena_intr_mask ( & ( uart_context [ uart_num ] . hal ) , UART_INTR_RXFIFO_TOUT
2021-05-19 08:32:55 -04:00
| UART_INTR_RXFIFO_FULL
| UART_INTR_RS485_CLASH
| UART_INTR_RS485_FRM_ERR
| UART_INTR_RS485_PARITY_ERR ) ;
2018-04-11 02:56:00 -04:00
}
p_uart_obj [ uart_num ] - > uart_mode = mode ;
2019-04-17 08:19:44 -04:00
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2018-04-11 02:56:00 -04:00
return ESP_OK ;
}
2019-11-18 08:36:40 -05:00
esp_err_t uart_set_rx_full_threshold ( uart_port_t uart_num , int threshold )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( threshold < UART_RXFIFO_FULL_THRHD_V ) & & ( threshold > 0 ) , ESP_ERR_INVALID_ARG , UART_TAG ,
2021-05-19 08:32:55 -04:00
" rx fifo full threshold value error " ) ;
2019-11-18 08:36:40 -05:00
if ( p_uart_obj [ uart_num ] = = NULL ) {
ESP_LOGE ( UART_TAG , " call uart_driver_install API first " ) ;
return ESP_ERR_INVALID_STATE ;
}
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
if ( uart_hal_get_intr_ena_status ( & ( uart_context [ uart_num ] . hal ) ) & UART_INTR_RXFIFO_FULL ) {
uart_hal_set_rxfifo_full_thr ( & ( uart_context [ uart_num ] . hal ) , threshold ) ;
}
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
return ESP_OK ;
}
esp_err_t uart_set_tx_empty_threshold ( uart_port_t uart_num , int threshold )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( threshold < UART_TXFIFO_EMPTY_THRHD_V ) & & ( threshold > 0 ) , ESP_ERR_INVALID_ARG , UART_TAG ,
2021-05-19 08:32:55 -04:00
" tx fifo empty threshold value error " ) ;
2019-11-18 08:36:40 -05:00
if ( p_uart_obj [ uart_num ] = = NULL ) {
ESP_LOGE ( UART_TAG , " call uart_driver_install API first " ) ;
return ESP_ERR_INVALID_STATE ;
}
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
if ( uart_hal_get_intr_ena_status ( & ( uart_context [ uart_num ] . hal ) ) & UART_INTR_TXFIFO_EMPTY ) {
uart_hal_set_txfifo_empty_thr ( & ( uart_context [ uart_num ] . hal ) , threshold ) ;
}
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
return ESP_OK ;
}
2019-06-05 22:57:29 -04:00
esp_err_t uart_set_rx_timeout ( uart_port_t uart_num , const uint8_t tout_thresh )
2018-04-11 02:56:00 -04:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
2020-03-27 04:20:21 -04:00
// get maximum timeout threshold
uint16_t tout_max_thresh = uart_hal_get_max_rx_timeout_thrd ( & ( uart_context [ uart_num ] . hal ) ) ;
if ( tout_thresh > tout_max_thresh ) {
ESP_LOGE ( UART_TAG , " tout_thresh = %d > maximum value = %d " , tout_thresh , tout_max_thresh ) ;
return ESP_ERR_INVALID_ARG ;
}
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_rx_timeout ( & ( uart_context [ uart_num ] . hal ) , tout_thresh ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2018-04-11 02:56:00 -04:00
return ESP_OK ;
}
2021-05-19 08:32:55 -04:00
esp_err_t uart_get_collision_flag ( uart_port_t uart_num , bool * collision_flag )
2018-04-11 02:56:00 -04:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( p_uart_obj [ uart_num ] ) , ESP_FAIL , UART_TAG , " uart driver error " ) ;
ESP_RETURN_ON_FALSE ( ( collision_flag ! = NULL ) , ESP_ERR_INVALID_ARG , UART_TAG , " wrong parameter pointer " ) ;
2021-05-19 08:32:55 -04:00
ESP_RETURN_ON_FALSE ( ( UART_IS_MODE_SET ( uart_num , UART_MODE_RS485_HALF_DUPLEX ) | | UART_IS_MODE_SET ( uart_num , UART_MODE_RS485_COLLISION_DETECT ) ) ,
ESP_ERR_INVALID_ARG , UART_TAG , " wrong mode " ) ;
2018-04-11 02:56:00 -04:00
* collision_flag = p_uart_obj [ uart_num ] - > coll_det_flg ;
return ESP_OK ;
}
2018-08-13 20:40:31 -04:00
esp_err_t uart_set_wakeup_threshold ( uart_port_t uart_num , int wakeup_threshold )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
2024-03-07 03:59:20 -05:00
ESP_RETURN_ON_FALSE ( ( wakeup_threshold < = UART_ACTIVE_THRESHOLD_V & & wakeup_threshold > = UART_MIN_WAKEUP_THRESH ) , ESP_ERR_INVALID_ARG , UART_TAG ,
2021-05-19 08:32:55 -04:00
" wakeup_threshold out of bounds " ) ;
2019-04-17 08:19:44 -04:00
UART_ENTER_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
uart_hal_set_wakeup_thrd ( & ( uart_context [ uart_num ] . hal ) , wakeup_threshold ) ;
UART_EXIT_CRITICAL ( & ( uart_context [ uart_num ] . spinlock ) ) ;
2018-08-13 20:40:31 -04:00
return ESP_OK ;
}
2021-05-19 08:32:55 -04:00
esp_err_t uart_get_wakeup_threshold ( uart_port_t uart_num , int * out_wakeup_threshold )
2018-08-13 20:40:31 -04:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
ESP_RETURN_ON_FALSE ( ( out_wakeup_threshold ! = NULL ) , ESP_ERR_INVALID_ARG , UART_TAG , " argument is NULL " ) ;
2019-04-17 08:19:44 -04:00
uart_hal_get_wakeup_thrd ( & ( uart_context [ uart_num ] . hal ) , ( uint32_t * ) out_wakeup_threshold ) ;
2018-08-13 20:40:31 -04:00
return ESP_OK ;
}
2019-05-22 08:21:11 -04:00
2019-04-17 08:19:44 -04:00
esp_err_t uart_wait_tx_idle_polling ( uart_port_t uart_num )
2019-05-22 08:21:11 -04:00
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
2021-05-19 08:32:55 -04:00
while ( ! uart_hal_is_tx_idle ( & ( uart_context [ uart_num ] . hal ) ) ) ;
2019-04-17 08:19:44 -04:00
return ESP_OK ;
2019-05-22 08:21:11 -04:00
}
2019-04-17 08:19:44 -04:00
esp_err_t uart_set_loop_back ( uart_port_t uart_num , bool loop_back_en )
{
2021-07-08 01:18:27 -04:00
ESP_RETURN_ON_FALSE ( ( uart_num < UART_NUM_MAX ) , ESP_ERR_INVALID_ARG , UART_TAG , " uart_num error " ) ;
2019-04-17 08:19:44 -04:00
uart_hal_set_loop_back ( & ( uart_context [ uart_num ] . hal ) , loop_back_en ) ;
return ESP_OK ;
2020-01-14 07:48:36 -05:00
}
2020-03-30 10:05:48 -04:00
void uart_set_always_rx_timeout ( uart_port_t uart_num , bool always_rx_timeout )
{
uint16_t rx_tout = uart_hal_get_rx_tout_thr ( & ( uart_context [ uart_num ] . hal ) ) ;
if ( rx_tout ) {
p_uart_obj [ uart_num ] - > rx_always_timeout_flg = always_rx_timeout ;
} else {
p_uart_obj [ uart_num ] - > rx_always_timeout_flg = false ;
}
}