2021-05-23 20:09:38 -04:00
/*
2024-02-18 22:43:13 -05:00
* SPDX - FileCopyrightText : 2015 - 2024 Espressif Systems ( Shanghai ) CO LTD
2021-05-23 20:09:38 -04:00
*
* SPDX - License - Identifier : Apache - 2.0
*/
2016-11-23 16:08:09 -05:00
# include <stdlib.h>
2019-11-19 03:10:02 -05:00
# include <string.h>
# include <sys/lock.h>
2020-11-02 03:58:28 -05:00
# include <sys/cdefs.h>
2020-10-09 04:41:41 -04:00
# include "esp_compiler.h"
2019-03-26 04:30:43 -04:00
# include "esp_intr_alloc.h"
2016-11-09 22:23:40 -05:00
# include "esp_log.h"
2021-04-20 03:57:34 -04:00
# include "esp_check.h"
2019-11-19 03:10:02 -05:00
# include "driver/gpio.h"
2021-10-25 05:13:46 -04:00
# include "esp_private/periph_ctrl.h"
2024-02-18 22:43:13 -05:00
# include "esp_private/gpio.h"
2022-04-06 23:50:41 -04:00
# include "driver/rmt_types_legacy.h"
2019-11-19 03:10:02 -05:00
# include "freertos/FreeRTOS.h"
# include "freertos/task.h"
# include "freertos/semphr.h"
# include "freertos/ringbuf.h"
global: move the soc component out of the common list
This MR removes the common dependency from every IDF components to the SOC component.
Currently, in the ``idf_functions.cmake`` script, we include the header path of SOC component by default for all components.
But for better code organization (or maybe also benifits to the compiling speed), we may remove the dependency to SOC components for most components except the driver and kernel related components.
In CMAKE, we have two kinds of header visibilities (set by include path visibility):
(Assume component A --(depends on)--> B, B is the current component)
1. public (``COMPONENT_ADD_INCLUDEDIRS``): means this path is visible to other depending components (A) (visible to A and B)
2. private (``COMPONENT_PRIV_INCLUDEDIRS``): means this path is only visible to source files inside the component (visible to B only)
and we have two kinds of depending ways:
(Assume component A --(depends on)--> B --(depends on)--> C, B is the current component)
1. public (```COMPONENT_REQUIRES```): means B can access to public include path of C. All other components rely on you (A) will also be available for the public headers. (visible to A, B)
2. private (``COMPONENT_PRIV_REQUIRES``): means B can access to public include path of C, but don't propagate this relation to other components (A). (visible to B)
1. remove the common requirement in ``idf_functions.cmake``, this makes the SOC components invisible to all other components by default.
2. if a component (for example, DRIVER) really needs the dependency to SOC, add a private dependency to SOC for it.
3. some other components that don't really depends on the SOC may still meet some errors saying "can't find header soc/...", this is because it's depended component (DRIVER) incorrectly include the header of SOC in its public headers. Moving all this kind of #include into source files, or private headers
4. Fix the include requirements for some file which miss sufficient #include directives. (Previously they include some headers by the long long long header include link)
This is a breaking change. Previous code may depends on the long include chain.
You may need to include the following headers for some files after this commit:
- soc/soc.h
- soc/soc_memory_layout.h
- driver/gpio.h
- esp_sleep.h
The major broken include chain includes:
1. esp_system.h no longer includes esp_sleep.h. The latter includes driver/gpio.h and driver/touch_pad.h.
2. ets_sys.h no longer includes soc/soc.h
3. freertos/portmacro.h no longer includes soc/soc_memory_layout.h
some peripheral headers no longer includes their hw related headers, e.g. rom/gpio.h no longer includes soc/gpio_pins.h and soc/gpio_reg.h
BREAKING CHANGE
2019-04-03 01:17:38 -04:00
# include "soc/soc_memory_layout.h"
2020-10-09 04:41:41 -04:00
# include "soc/rmt_periph.h"
2022-03-01 03:16:07 -05:00
# include "soc/rmt_struct.h"
2023-04-23 03:49:59 -04:00
# include "esp_clk_tree.h"
2019-11-19 03:10:02 -05:00
# include "hal/rmt_hal.h"
# include "hal/rmt_ll.h"
2021-03-15 22:55:05 -04:00
# include "hal/gpio_hal.h"
2020-06-19 00:00:58 -04:00
# include "esp_rom_gpio.h"
2017-06-08 02:11:40 -04:00
2019-11-19 03:10:02 -05:00
# define RMT_CHANNEL_ERROR_STR "RMT CHANNEL ERR"
# define RMT_ADDR_ERROR_STR "RMT ADDRESS ERR"
# define RMT_MEM_CNT_ERROR_STR "RMT MEM BLOCK NUM ERR"
# define RMT_CARRIER_ERROR_STR "RMT CARRIER LEVEL ERR"
# define RMT_MEM_OWNER_ERROR_STR "RMT MEM OWNER_ERR"
# define RMT_BASECLK_ERROR_STR "RMT BASECLK ERR"
# define RMT_WR_MEM_OVF_ERROR_STR "RMT WR MEM OVERFLOW"
# define RMT_GPIO_ERROR_STR "RMT GPIO ERROR"
# define RMT_MODE_ERROR_STR "RMT MODE ERROR"
# define RMT_CLK_DIV_ERROR_STR "RMT CLK DIV ERR"
# define RMT_DRIVER_ERROR_STR "RMT DRIVER ERR"
# define RMT_DRIVER_LENGTH_ERROR_STR "RMT PARAM LEN ERROR"
# define RMT_PSRAM_BUFFER_WARN_STR "Using buffer allocated from psram"
# define RMT_TRANSLATOR_NULL_STR "RMT translator is null"
# define RMT_TRANSLATOR_UNINIT_STR "RMT translator not init"
# define RMT_PARAM_ERR_STR "RMT param error"
2022-04-06 23:50:41 -04:00
static const char * TAG = " rmt(legacy) " ;
2019-11-19 03:10:02 -05:00
2017-06-08 02:11:40 -04:00
// Spinlock for protecting concurrent register-level access only
2020-03-17 03:52:45 -04:00
# define RMT_ENTER_CRITICAL() portENTER_CRITICAL_SAFE(&(rmt_contex.rmt_spinlock))
# define RMT_EXIT_CRITICAL() portEXIT_CRITICAL_SAFE(&(rmt_contex.rmt_spinlock))
2017-06-08 02:11:40 -04:00
2021-02-07 04:18:39 -05:00
# define RMT_RX_CHANNEL_ENCODING_START (SOC_RMT_CHANNELS_PER_GROUP-SOC_RMT_TX_CANDIDATES_PER_GROUP)
# define RMT_TX_CHANNEL_ENCODING_END (SOC_RMT_TX_CANDIDATES_PER_GROUP-1)
2020-10-09 04:41:41 -04:00
# define RMT_IS_RX_CHANNEL(channel) ((channel) >= RMT_RX_CHANNEL_ENCODING_START)
# define RMT_IS_TX_CHANNEL(channel) ((channel) <= RMT_TX_CHANNEL_ENCODING_END)
# define RMT_DECODE_RX_CHANNEL(encode_chan) ((encode_chan - RMT_RX_CHANNEL_ENCODING_START))
# define RMT_ENCODE_RX_CHANNEL(decode_chan) ((decode_chan + RMT_RX_CHANNEL_ENCODING_START))
2023-08-01 05:32:26 -04:00
# if SOC_PERIPH_CLK_CTRL_SHARED
# define RMT_CLOCK_SRC_ATOMIC() PERIPH_RCC_ATOMIC()
# else
# define RMT_CLOCK_SRC_ATOMIC()
# endif
# if !SOC_RCC_IS_INDEPENDENT
# define RMT_RCC_ATOMIC() PERIPH_RCC_ATOMIC()
# else
# define RMT_RCC_ATOMIC()
# endif
2016-11-09 22:23:40 -05:00
typedef struct {
2019-11-19 03:10:02 -05:00
rmt_hal_context_t hal ;
2020-03-17 03:52:45 -04:00
_lock_t rmt_driver_isr_lock ;
portMUX_TYPE rmt_spinlock ; // Mutex lock for protecting concurrent register/unregister of RMT channels' ISR
rmt_isr_handle_t rmt_driver_intr_handle ;
rmt_tx_end_callback_t rmt_tx_end_callback ; // Event called when transmission is ended
2022-11-14 05:46:58 -05:00
uint8_t rmt_driver_channels ; // Bitmask of installed drivers' channels, used to protect concurrent register/unregister of RMT channels' ISR
2020-03-17 03:52:45 -04:00
bool rmt_module_enabled ;
2021-02-07 04:18:39 -05:00
uint32_t synchro_channel_mask ; // Bitmap of channels already added in the synchronous group
2020-03-17 03:52:45 -04:00
} rmt_contex_t ;
typedef struct {
2018-02-24 00:36:49 -05:00
size_t tx_offset ;
size_t tx_len_rem ;
size_t tx_sub_len ;
bool translator ;
2018-02-24 03:36:21 -05:00
bool wait_done ; //Mark whether wait tx done.
2021-07-29 20:40:17 -04:00
bool loop_autostop ; // mark whether loop auto-stop is enabled
2016-11-09 22:23:40 -05:00
rmt_channel_t channel ;
2019-11-19 03:10:02 -05:00
const rmt_item32_t * tx_data ;
2022-02-08 04:39:38 -05:00
SemaphoreHandle_t tx_sem ;
2018-05-02 03:27:41 -04:00
# if CONFIG_SPIRAM_USE_MALLOC
int intr_alloc_flags ;
StaticSemaphore_t tx_sem_buffer ;
# endif
2019-11-19 03:10:02 -05:00
rmt_item32_t * tx_buf ;
2016-11-09 22:23:40 -05:00
RingbufHandle_t rx_buf ;
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
2020-01-13 01:38:24 -05:00
rmt_item32_t * rx_item_buf ;
uint32_t rx_item_buf_size ;
uint32_t rx_item_len ;
2020-11-16 23:48:35 -05:00
int rx_item_start_idx ;
2020-01-13 01:38:24 -05:00
# endif
2018-02-24 00:36:49 -05:00
sample_to_rmt_t sample_to_rmt ;
2020-11-02 03:58:28 -05:00
void * tx_context ;
2018-02-24 00:36:49 -05:00
size_t sample_size_remain ;
const uint8_t * sample_cur ;
2016-11-09 22:23:40 -05:00
} rmt_obj_t ;
2020-03-17 03:52:45 -04:00
static rmt_contex_t rmt_contex = {
2022-03-01 03:16:07 -05:00
. hal . regs = & RMT ,
2020-03-17 03:52:45 -04:00
. rmt_spinlock = portMUX_INITIALIZER_UNLOCKED ,
. rmt_driver_intr_handle = NULL ,
. rmt_tx_end_callback = {
. function = NULL ,
} ,
. rmt_driver_channels = 0 ,
. rmt_module_enabled = false ,
2021-02-07 04:18:39 -05:00
. synchro_channel_mask = 0
2020-03-17 03:52:45 -04:00
} ;
2020-02-14 01:33:07 -05:00
static rmt_obj_t * p_rmt_obj [ RMT_CHANNEL_MAX ] = { 0 } ;
2016-11-09 22:23:40 -05:00
2021-04-20 03:06:12 -04:00
# if SOC_RMT_CHANNEL_CLK_INDEPENDENT
2020-10-09 04:41:41 -04:00
static uint32_t s_rmt_source_clock_hz [ RMT_CHANNEL_MAX ] ;
# else
static uint32_t s_rmt_source_clock_hz ;
# endif
2022-03-01 03:16:07 -05:00
// RMTMEM address is declared in <target>.peripherals.ld
extern rmt_mem_t RMTMEM ;
2020-03-17 03:52:45 -04:00
//Enable RMT module
static void rmt_module_enable ( void )
{
RMT_ENTER_CRITICAL ( ) ;
if ( rmt_contex . rmt_module_enabled = = false ) {
2023-08-01 05:32:26 -04:00
RMT_RCC_ATOMIC ( ) {
rmt_ll_enable_bus_clock ( 0 , true ) ;
rmt_ll_reset_register ( 0 ) ;
}
2024-06-20 11:46:21 -04:00
rmt_ll_mem_power_by_pmu ( rmt_contex . hal . regs ) ;
2020-03-17 03:52:45 -04:00
rmt_contex . rmt_module_enabled = true ;
}
RMT_EXIT_CRITICAL ( ) ;
}
//Disable RMT module
static void rmt_module_disable ( void )
{
RMT_ENTER_CRITICAL ( ) ;
if ( rmt_contex . rmt_module_enabled = = true ) {
2024-06-20 11:46:21 -04:00
rmt_ll_mem_force_power_off ( rmt_contex . hal . regs ) ;
2023-08-01 05:32:26 -04:00
RMT_RCC_ATOMIC ( ) {
rmt_ll_enable_bus_clock ( 0 , false ) ;
}
2020-03-17 03:52:45 -04:00
rmt_contex . rmt_module_enabled = false ;
}
RMT_EXIT_CRITICAL ( ) ;
}
2017-10-27 09:23:07 -04:00
2016-11-09 22:23:40 -05:00
esp_err_t rmt_set_clk_div ( rmt_channel_t channel , uint8_t div_cnt )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
if ( RMT_IS_RX_CHANNEL ( channel ) ) {
2021-02-07 04:18:39 -05:00
rmt_ll_rx_set_channel_clock_div ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , div_cnt ) ;
2020-10-09 04:41:41 -04:00
} else {
2021-02-07 04:18:39 -05:00
rmt_ll_tx_set_channel_clock_div ( rmt_contex . hal . regs , channel , div_cnt ) ;
2020-10-09 04:41:41 -04:00
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_clk_div ( rmt_channel_t channel , uint8_t * div_cnt )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( div_cnt , ESP_ERR_INVALID_ARG , TAG , RMT_ADDR_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
if ( RMT_IS_RX_CHANNEL ( channel ) ) {
2021-02-07 04:18:39 -05:00
* div_cnt = ( uint8_t ) rmt_ll_rx_get_channel_clock_div ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
2020-10-09 04:41:41 -04:00
} else {
2021-02-07 04:18:39 -05:00
* div_cnt = ( uint8_t ) rmt_ll_tx_get_channel_clock_div ( rmt_contex . hal . regs , channel ) ;
2020-10-09 04:41:41 -04:00
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_rx_idle_thresh ( rmt_channel_t channel , uint16_t thresh )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_rx_set_idle_thres ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , thresh ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_get_rx_idle_thresh ( rmt_channel_t channel , uint16_t * thresh )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( thresh , ESP_ERR_INVALID_ARG , TAG , RMT_ADDR_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
* thresh = ( uint16_t ) rmt_ll_rx_get_idle_thres ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_mem_block_num ( rmt_channel_t channel , uint8_t rmt_mem_num )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( rmt_mem_num < = RMT_CHANNEL_MAX - channel , ESP_ERR_INVALID_ARG , TAG , RMT_MEM_CNT_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
if ( RMT_IS_RX_CHANNEL ( channel ) ) {
rmt_ll_rx_set_mem_blocks ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , rmt_mem_num ) ;
} else {
rmt_ll_tx_set_mem_blocks ( rmt_contex . hal . regs , channel , rmt_mem_num ) ;
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_mem_block_num ( rmt_channel_t channel , uint8_t * rmt_mem_num )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( rmt_mem_num , ESP_ERR_INVALID_ARG , TAG , RMT_ADDR_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
if ( RMT_IS_RX_CHANNEL ( channel ) ) {
* rmt_mem_num = ( uint8_t ) rmt_ll_rx_get_mem_blocks ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
} else {
* rmt_mem_num = ( uint8_t ) rmt_ll_tx_get_mem_blocks ( rmt_contex . hal . regs , channel ) ;
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_tx_carrier ( rmt_channel_t channel , bool carrier_en , uint16_t high_level , uint16_t low_level ,
2019-11-19 03:10:02 -05:00
rmt_carrier_level_t carrier_level )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( carrier_level < RMT_CARRIER_LEVEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CARRIER_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_tx_set_carrier_high_low_ticks ( rmt_contex . hal . regs , channel , high_level , low_level ) ;
rmt_ll_tx_set_carrier_level ( rmt_contex . hal . regs , channel , carrier_level ) ;
rmt_ll_tx_enable_carrier_modulation ( rmt_contex . hal . regs , channel , carrier_en ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_mem_pd ( rmt_channel_t channel , bool pd_en )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2024-06-20 11:46:21 -04:00
if ( pd_en ) {
rmt_ll_mem_force_power_off ( rmt_contex . hal . regs ) ;
} else {
rmt_ll_mem_power_by_pmu ( rmt_contex . hal . regs ) ;
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_mem_pd ( rmt_channel_t channel , bool * pd_en )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2024-06-20 11:46:21 -04:00
* pd_en = rmt_ll_is_mem_force_powered_down ( rmt_contex . hal . regs ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_tx_start ( rmt_channel_t channel , bool tx_idx_rst )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
if ( tx_idx_rst ) {
2020-10-09 04:41:41 -04:00
rmt_ll_tx_reset_pointer ( rmt_contex . hal . regs , channel ) ;
2016-11-09 22:23:40 -05:00
}
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( rmt_contex . hal . regs , RMT_LL_EVENT_TX_DONE ( channel ) ) ;
2020-03-18 06:13:27 -04:00
// enable tx end interrupt in non-loop mode
2022-03-01 03:16:07 -05:00
if ( ! rmt_ll_tx_is_loop_enabled ( rmt_contex . hal . regs , channel ) ) {
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_TX_DONE ( channel ) , true ) ;
2020-03-18 06:13:27 -04:00
} else {
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_TX_LOOP_COUNT
2022-03-01 03:16:07 -05:00
rmt_ll_tx_reset_loop_count ( rmt_contex . hal . regs , channel ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_tx_enable_loop_count ( rmt_contex . hal . regs , channel , true ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( rmt_contex . hal . regs , RMT_LL_EVENT_TX_LOOP_END ( channel ) ) ;
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_TX_LOOP_END ( channel ) , true ) ;
2020-03-18 06:13:27 -04:00
# endif
}
2020-10-09 04:41:41 -04:00
rmt_ll_tx_start ( rmt_contex . hal . regs , channel ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_tx_stop ( rmt_channel_t channel )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-03-01 03:16:07 -05:00
# if SOC_RMT_SUPPORT_TX_ASYNC_STOP
2020-10-09 04:41:41 -04:00
rmt_ll_tx_stop ( rmt_contex . hal . regs , channel ) ;
2022-03-01 03:16:07 -05:00
# else
// write ending marker to stop the TX channel
RMTMEM . chan [ channel ] . data32 [ 0 ] . val = 0 ;
# endif
2020-10-09 04:41:41 -04:00
rmt_ll_tx_reset_pointer ( rmt_contex . hal . regs , channel ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2022-03-01 03:16:07 -05:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
esp_err_t rmt_set_rx_thr_intr_en ( rmt_channel_t channel , bool en , uint16_t evt_thresh )
{
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
if ( en ) {
uint32_t item_block_len = rmt_ll_rx_get_mem_blocks ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) * RMT_MEM_ITEM_NUM ;
ESP_RETURN_ON_FALSE ( evt_thresh < = item_block_len , ESP_ERR_INVALID_ARG , TAG , " RMT EVT THRESH ERR " ) ;
RMT_ENTER_CRITICAL ( ) ;
rmt_ll_rx_set_limit ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , evt_thresh ) ;
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_RX_THRES ( RMT_DECODE_RX_CHANNEL ( channel ) ) , true ) ;
RMT_EXIT_CRITICAL ( ) ;
} else {
RMT_ENTER_CRITICAL ( ) ;
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_RX_THRES ( RMT_DECODE_RX_CHANNEL ( channel ) ) , false ) ;
RMT_EXIT_CRITICAL ( ) ;
}
return ESP_OK ;
}
# endif
2016-11-09 22:23:40 -05:00
esp_err_t rmt_rx_start ( rmt_channel_t channel , bool rx_idx_rst )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_rx_enable ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , false ) ;
2019-11-19 03:10:02 -05:00
if ( rx_idx_rst ) {
2020-10-09 04:41:41 -04:00
rmt_ll_rx_reset_pointer ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
2016-11-09 22:23:40 -05:00
}
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( rmt_contex . hal . regs , RMT_LL_EVENT_RX_DONE ( RMT_DECODE_RX_CHANNEL ( channel ) ) ) ;
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_RX_DONE ( RMT_DECODE_RX_CHANNEL ( channel ) ) , true ) ;
2020-01-13 01:38:24 -05:00
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
2020-10-09 04:41:41 -04:00
const uint32_t item_block_len = rmt_ll_rx_get_mem_blocks ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) * RMT_MEM_ITEM_NUM ;
2020-01-13 01:38:24 -05:00
p_rmt_obj [ channel ] - > rx_item_start_idx = 0 ;
p_rmt_obj [ channel ] - > rx_item_len = 0 ;
2020-03-17 07:58:05 -04:00
rmt_set_rx_thr_intr_en ( channel , true , item_block_len / 2 ) ;
2020-01-13 01:38:24 -05:00
# endif
2020-10-09 04:41:41 -04:00
rmt_ll_rx_enable ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , true ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_rx_stop ( rmt_channel_t channel )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_RX_DONE ( RMT_DECODE_RX_CHANNEL ( channel ) ) , false ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_rx_enable ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , false ) ;
rmt_ll_rx_reset_pointer ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
2022-03-01 03:16:07 -05:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_RX_THRES ( RMT_DECODE_RX_CHANNEL ( channel ) ) , false ) ;
2020-01-13 01:38:24 -05:00
# endif
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2020-10-09 04:41:41 -04:00
esp_err_t rmt_tx_memory_reset ( rmt_channel_t channel )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_tx_reset_pointer ( rmt_contex . hal . regs , channel ) ;
RMT_EXIT_CRITICAL ( ) ;
return ESP_OK ;
}
esp_err_t rmt_rx_memory_reset ( rmt_channel_t channel )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2020-10-09 04:41:41 -04:00
RMT_ENTER_CRITICAL ( ) ;
rmt_ll_rx_reset_pointer ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_memory_owner ( rmt_channel_t channel , rmt_mem_owner_t owner )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( owner < RMT_MEM_OWNER_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_MEM_OWNER_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_rx_set_mem_owner ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , owner ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_memory_owner ( rmt_channel_t channel , rmt_mem_owner_t * owner )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( owner , ESP_ERR_INVALID_ARG , TAG , RMT_MEM_OWNER_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
* owner = ( rmt_mem_owner_t ) rmt_ll_rx_get_mem_owner ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_tx_loop_mode ( rmt_channel_t channel , bool loop_en )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_tx_enable_loop ( rmt_contex . hal . regs , channel , loop_en ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_tx_loop_mode ( rmt_channel_t channel , bool * loop_en )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-03-01 03:16:07 -05:00
* loop_en = rmt_ll_tx_is_loop_enabled ( rmt_contex . hal . regs , channel ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_rx_filter ( rmt_channel_t channel , bool rx_filter_en , uint8_t thresh )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_rx_enable_filter ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , rx_filter_en ) ;
rmt_ll_rx_set_filter_thres ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) , thresh ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_source_clk ( rmt_channel_t channel , rmt_source_clk_t base_clk )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-04-06 23:50:41 -04:00
// `rmt_clock_source_t` and `rmt_source_clk_t` are binary compatible, as the underlying enum entries come from the same `soc_module_clk_t`
2023-08-01 05:32:26 -04:00
RMT_CLOCK_SRC_ATOMIC ( ) {
rmt_ll_set_group_clock_src ( rmt_contex . hal . regs , channel , ( rmt_clock_source_t ) base_clk , 1 , 0 , 0 ) ;
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_source_clk ( rmt_channel_t channel , rmt_source_clk_t * src_clk )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-04-06 23:50:41 -04:00
// `rmt_clock_source_t` and `rmt_source_clk_t` are binary compatible, as the underlying enum entries come from the same `soc_module_clk_t`
2021-02-07 04:18:39 -05:00
* src_clk = ( rmt_source_clk_t ) rmt_ll_get_group_clock_src ( rmt_contex . hal . regs , channel ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_idle_level ( rmt_channel_t channel , bool idle_out_en , rmt_idle_level_t level )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( level < RMT_IDLE_LEVEL_MAX , ESP_ERR_INVALID_ARG , TAG , " RMT IDLE LEVEL ERR " ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_tx_fix_idle_level ( rmt_contex . hal . regs , channel , level , idle_out_en ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_idle_level ( rmt_channel_t channel , bool * idle_out_en , rmt_idle_level_t * level )
2018-09-24 15:23:26 -04:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-03-01 03:16:07 -05:00
* idle_out_en = rmt_ll_tx_is_idle_enabled ( rmt_contex . hal . regs , channel ) ;
2020-10-09 04:41:41 -04:00
* level = rmt_ll_tx_get_idle_level ( rmt_contex . hal . regs , channel ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2018-09-24 15:23:26 -04:00
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_status ( rmt_channel_t channel , uint32_t * status )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
if ( RMT_IS_RX_CHANNEL ( channel ) ) {
2022-03-01 03:16:07 -05:00
* status = rmt_ll_rx_get_status_word ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
2020-10-09 04:41:41 -04:00
} else {
2022-03-01 03:16:07 -05:00
* status = rmt_ll_tx_get_status_word ( rmt_contex . hal . regs , channel ) ;
2020-10-09 04:41:41 -04:00
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_rx_intr_en ( rmt_channel_t channel , bool en )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) & & channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_RX_DONE ( RMT_DECODE_RX_CHANNEL ( channel ) ) , en ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_err_intr_en ( rmt_channel_t channel , bool en )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
if ( RMT_IS_RX_CHANNEL ( channel ) ) {
2022-03-01 03:16:07 -05:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_RX_ERROR ( RMT_DECODE_RX_CHANNEL ( channel ) ) , en ) ;
2020-10-09 04:41:41 -04:00
} else {
2022-03-01 03:16:07 -05:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_TX_ERROR ( channel ) , en ) ;
2020-10-09 04:41:41 -04:00
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
esp_err_t rmt_set_tx_intr_en ( rmt_channel_t channel , bool en )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_TX_DONE ( channel ) , en ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2016-12-21 21:54:42 -05:00
esp_err_t rmt_set_tx_thr_intr_en ( rmt_channel_t channel , bool en , uint16_t evt_thresh )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
if ( en ) {
2020-10-09 04:41:41 -04:00
uint32_t item_block_len = rmt_ll_tx_get_mem_blocks ( rmt_contex . hal . regs , channel ) * RMT_MEM_ITEM_NUM ;
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( evt_thresh < = item_block_len , ESP_ERR_INVALID_ARG , TAG , " RMT EVT THRESH ERR " ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_tx_set_limit ( rmt_contex . hal . regs , channel , evt_thresh ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_TX_THRES ( channel ) , true ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
} else {
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_TX_THRES ( channel ) , false ) ;
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
}
return ESP_OK ;
}
2021-02-19 04:38:03 -05:00
esp_err_t rmt_set_gpio ( rmt_channel_t channel , rmt_mode_t mode , gpio_num_t gpio_num , bool invert_signal )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( mode < RMT_MODE_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_MODE_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( ( ( GPIO_IS_VALID_GPIO ( gpio_num ) & & ( mode = = RMT_MODE_RX ) ) | |
( GPIO_IS_VALID_OUTPUT_GPIO ( gpio_num ) & & ( mode = = RMT_MODE_TX ) ) ) , ESP_ERR_INVALID_ARG , TAG , RMT_GPIO_ERROR_STR ) ;
2016-11-09 22:23:40 -05:00
2024-02-18 22:43:13 -05:00
gpio_func_sel ( gpio_num , PIN_FUNC_GPIO ) ;
2019-11-19 03:10:02 -05:00
if ( mode = = RMT_MODE_TX ) {
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2016-11-09 22:23:40 -05:00
gpio_set_direction ( gpio_num , GPIO_MODE_OUTPUT ) ;
2021-04-20 02:36:34 -04:00
esp_rom_gpio_connect_out_signal ( gpio_num , rmt_periph_signals . groups [ 0 ] . channels [ channel ] . tx_sig , invert_signal , 0 ) ;
2016-11-09 22:23:40 -05:00
} else {
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_RX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2016-11-09 22:23:40 -05:00
gpio_set_direction ( gpio_num , GPIO_MODE_INPUT ) ;
2021-04-20 02:36:34 -04:00
esp_rom_gpio_connect_in_signal ( gpio_num , rmt_periph_signals . groups [ 0 ] . channels [ channel ] . rx_sig , invert_signal ) ;
2016-11-09 22:23:40 -05:00
}
return ESP_OK ;
}
2020-10-09 04:41:41 -04:00
static bool rmt_is_channel_number_valid ( rmt_channel_t channel , uint8_t mode )
{
// RX mode
if ( mode = = RMT_MODE_RX ) {
return RMT_IS_RX_CHANNEL ( channel ) & & ( channel < RMT_CHANNEL_MAX ) ;
}
// TX mode
return ( channel > = 0 ) & & RMT_IS_TX_CHANNEL ( channel ) ;
}
2019-11-19 03:10:02 -05:00
static esp_err_t rmt_internal_config ( rmt_dev_t * dev , const rmt_config_t * rmt_param )
2016-11-09 22:23:40 -05:00
{
uint8_t mode = rmt_param - > rmt_mode ;
uint8_t channel = rmt_param - > channel ;
uint8_t gpio_num = rmt_param - > gpio_num ;
uint8_t mem_cnt = rmt_param - > mem_block_num ;
2019-11-19 03:10:02 -05:00
uint8_t clk_div = rmt_param - > clk_div ;
2016-12-21 21:21:11 -05:00
uint32_t carrier_freq_hz = rmt_param - > tx_config . carrier_freq_hz ;
bool carrier_en = rmt_param - > tx_config . carrier_en ;
2020-03-17 03:52:45 -04:00
uint32_t rmt_source_clk_hz ;
2023-08-01 05:32:26 -04:00
rmt_clock_source_t clk_src = RMT_BASECLK_DEFAULT ;
2019-11-19 03:10:02 -05:00
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( rmt_is_channel_number_valid ( channel , mode ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2022-03-14 10:49:18 -04:00
ESP_RETURN_ON_FALSE ( mem_cnt + channel < = SOC_RMT_CHANNELS_PER_GROUP & & mem_cnt > 0 , ESP_ERR_INVALID_ARG , TAG , RMT_MEM_CNT_ERROR_STR ) ;
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( clk_div > 0 , ESP_ERR_INVALID_ARG , TAG , RMT_CLK_DIV_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
2016-12-21 21:21:11 -05:00
if ( mode = = RMT_MODE_TX ) {
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( ! carrier_en | | carrier_freq_hz > 0 , ESP_ERR_INVALID_ARG , TAG , " RMT carrier frequency can't be zero " ) ;
2016-12-21 21:21:11 -05:00
}
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_enable_mem_access_nonfifo ( dev , true ) ;
2020-10-09 04:41:41 -04:00
2020-11-02 05:03:33 -05:00
if ( rmt_param - > flags & RMT_CHANNEL_FLAGS_AWARE_DFS ) {
2020-10-09 04:41:41 -04:00
# if SOC_RMT_SUPPORT_XTAL
2020-08-13 04:55:56 -04:00
// clock src: XTAL_CLK
2023-08-01 05:32:26 -04:00
clk_src = RMT_BASECLK_XTAL ;
2020-10-09 04:41:41 -04:00
# elif SOC_RMT_SUPPORT_REF_TICK
// clock src: REF_CLK
2023-08-01 05:32:26 -04:00
clk_src = RMT_BASECLK_REF ;
2023-02-08 04:02:21 -05:00
# else
# error "No clock source is aware of DFS"
2020-08-13 04:55:56 -04:00
# endif
2023-08-01 05:32:26 -04:00
}
esp_clk_tree_src_get_freq_hz ( ( soc_module_clk_t ) clk_src , ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED , & rmt_source_clk_hz ) ;
RMT_CLOCK_SRC_ATOMIC ( ) {
rmt_ll_set_group_clock_src ( dev , channel , clk_src , 1 , 0 , 0 ) ;
rmt_ll_enable_group_clock ( dev , true ) ;
2020-02-14 01:33:07 -05:00
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2021-04-20 03:06:12 -04:00
# if SOC_RMT_CHANNEL_CLK_INDEPENDENT
2020-10-09 04:41:41 -04:00
s_rmt_source_clock_hz [ channel ] = rmt_source_clk_hz ;
# else
if ( s_rmt_source_clock_hz & & rmt_source_clk_hz ! = s_rmt_source_clock_hz ) {
2022-08-04 01:08:48 -04:00
ESP_LOGW ( TAG , " RMT clock source has been configured to % " PRIu32 " by other channel, now reconfigure it to % " PRIu32 , s_rmt_source_clock_hz , rmt_source_clk_hz ) ;
2020-10-09 04:41:41 -04:00
}
s_rmt_source_clock_hz = rmt_source_clk_hz ;
# endif
2022-08-04 01:08:48 -04:00
ESP_LOGD ( TAG , " rmt_source_clk_hz: % " PRIu32 , rmt_source_clk_hz ) ;
2020-10-09 04:41:41 -04:00
2019-11-19 03:10:02 -05:00
if ( mode = = RMT_MODE_TX ) {
2016-11-09 22:23:40 -05:00
uint16_t carrier_duty_percent = rmt_param - > tx_config . carrier_duty_percent ;
uint8_t carrier_level = rmt_param - > tx_config . carrier_level ;
uint8_t idle_level = rmt_param - > tx_config . idle_level ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2021-02-07 04:18:39 -05:00
rmt_ll_tx_set_channel_clock_div ( dev , channel , clk_div ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_tx_set_mem_blocks ( dev , channel , mem_cnt ) ;
rmt_ll_tx_reset_pointer ( dev , channel ) ;
rmt_ll_tx_enable_loop ( dev , channel , rmt_param - > tx_config . loop_en ) ;
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_TX_LOOP_COUNT
2020-03-18 06:13:27 -04:00
if ( rmt_param - > tx_config . loop_en ) {
2020-10-09 04:41:41 -04:00
rmt_ll_tx_set_loop_count ( dev , channel , rmt_param - > tx_config . loop_count ) ;
2020-03-18 06:13:27 -04:00
}
# endif
2020-03-17 07:58:05 -04:00
/* always enable tx ping-pong */
2022-03-01 03:16:07 -05:00
rmt_ll_tx_enable_wrap ( dev , channel , true ) ;
2016-11-09 22:23:40 -05:00
/*Set idle level */
2022-03-01 03:16:07 -05:00
rmt_ll_tx_fix_idle_level ( dev , channel , idle_level , rmt_param - > tx_config . idle_output_en ) ;
2016-11-09 22:23:40 -05:00
/*Set carrier*/
2020-10-09 04:41:41 -04:00
rmt_ll_tx_enable_carrier_modulation ( dev , channel , carrier_en ) ;
2016-12-21 21:21:11 -05:00
if ( carrier_en ) {
uint32_t duty_div , duty_h , duty_l ;
duty_div = rmt_source_clk_hz / carrier_freq_hz ;
duty_h = duty_div * carrier_duty_percent / 100 ;
duty_l = duty_div - duty_h ;
2020-10-09 04:41:41 -04:00
rmt_ll_tx_set_carrier_level ( dev , channel , carrier_level ) ;
rmt_ll_tx_set_carrier_high_low_ticks ( dev , channel , duty_h , duty_l ) ;
2016-12-21 21:21:11 -05:00
} else {
2020-10-09 04:41:41 -04:00
rmt_ll_tx_set_carrier_level ( dev , channel , 0 ) ;
2016-12-21 21:21:11 -05:00
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2017-10-18 06:49:09 -04:00
2022-08-04 01:08:48 -04:00
ESP_LOGD ( TAG , " Rmt Tx Channel %u|Gpio %u|Sclk_Hz % " PRIu32 " |Div %u|Carrier_Hz % " PRIu32 " |Duty %u " ,
2016-12-21 21:21:11 -05:00
channel , gpio_num , rmt_source_clk_hz , clk_div , carrier_freq_hz , carrier_duty_percent ) ;
2019-11-19 03:10:02 -05:00
} else if ( RMT_MODE_RX = = mode ) {
2016-11-09 22:23:40 -05:00
uint8_t filter_cnt = rmt_param - > rx_config . filter_ticks_thresh ;
uint16_t threshold = rmt_param - > rx_config . idle_threshold ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2021-02-07 04:18:39 -05:00
rmt_ll_rx_set_channel_clock_div ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , clk_div ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_rx_set_mem_blocks ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , mem_cnt ) ;
rmt_ll_rx_reset_pointer ( dev , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_rx_set_mem_owner ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , RMT_LL_MEM_OWNER_HW ) ;
2016-11-09 22:23:40 -05:00
/*Set idle threshold*/
2020-10-09 04:41:41 -04:00
rmt_ll_rx_set_idle_thres ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , threshold ) ;
2016-11-09 22:23:40 -05:00
/* Set RX filter */
2020-10-09 04:41:41 -04:00
rmt_ll_rx_set_filter_thres ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , filter_cnt ) ;
rmt_ll_rx_enable_filter ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , rmt_param - > rx_config . filter_en ) ;
2020-01-13 01:38:24 -05:00
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
2020-03-17 07:58:05 -04:00
/* always enable rx ping-pong */
2022-03-01 03:16:07 -05:00
rmt_ll_rx_enable_wrap ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , true ) ;
2020-01-13 01:38:24 -05:00
# endif
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_DEMODULATION
2020-10-09 04:41:41 -04:00
rmt_ll_rx_enable_carrier_demodulation ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , rmt_param - > rx_config . rm_carrier ) ;
2020-03-17 07:47:38 -04:00
if ( rmt_param - > rx_config . rm_carrier ) {
2021-02-07 04:18:39 -05:00
uint32_t duty_total = rmt_source_clk_hz / rmt_ll_rx_get_channel_clock_div ( dev , RMT_DECODE_RX_CHANNEL ( channel ) ) / rmt_param - > rx_config . carrier_freq_hz ;
2020-03-17 07:47:38 -04:00
uint32_t duty_high = duty_total * rmt_param - > rx_config . carrier_duty_percent / 100 ;
// there could be residual in timing the carrier pulse, so double enlarge the theoretical value
2020-10-09 04:41:41 -04:00
rmt_ll_rx_set_carrier_high_low_ticks ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , duty_high * 2 , ( duty_total - duty_high ) * 2 ) ;
rmt_ll_rx_set_carrier_level ( dev , RMT_DECODE_RX_CHANNEL ( channel ) , rmt_param - > rx_config . carrier_level ) ;
2020-03-17 07:47:38 -04:00
}
2020-01-13 01:38:24 -05:00
# endif
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
2016-11-09 22:23:40 -05:00
2024-03-25 02:11:33 -04:00
ESP_LOGD ( TAG , " Rmt Rx Channel %u|Gpio %u|Sclk_Hz % " PRIu32 " |Div %u|Threshold %u|Filter %u " ,
2019-11-19 03:10:02 -05:00
channel , gpio_num , rmt_source_clk_hz , clk_div , threshold , filter_cnt ) ;
2016-11-09 22:23:40 -05:00
}
2020-10-09 04:41:41 -04:00
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_config ( const rmt_config_t * rmt_param )
2016-11-09 22:23:40 -05:00
{
2020-03-17 03:52:45 -04:00
rmt_module_enable ( ) ;
2019-11-19 03:10:02 -05:00
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_ERROR ( rmt_set_gpio ( rmt_param - > channel , rmt_param - > rmt_mode , rmt_param - > gpio_num , rmt_param - > flags & RMT_CHANNEL_FLAGS_INVERT_SIG ) , TAG , " set gpio for RMT driver failed " ) ;
ESP_RETURN_ON_ERROR ( rmt_internal_config ( & RMT , rmt_param ) , TAG , " initialize RMT driver failed " ) ;
2019-11-19 03:10:02 -05:00
return ESP_OK ;
}
static void IRAM_ATTR rmt_fill_memory ( rmt_channel_t channel , const rmt_item32_t * item ,
uint16_t item_num , uint16_t mem_offset )
{
2022-03-01 03:16:07 -05:00
uint32_t * from = ( uint32_t * ) item ;
2022-04-07 07:14:37 -04:00
volatile uint32_t * to = ( volatile uint32_t * ) & RMTMEM . chan [ channel ] . data32 [ 0 ] . val ;
to + = mem_offset ;
2022-03-01 03:16:07 -05:00
while ( item_num - - ) {
* to + + = * from + + ;
}
2016-11-09 22:23:40 -05:00
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_fill_tx_items ( rmt_channel_t channel , const rmt_item32_t * item , uint16_t item_num , uint16_t mem_offset )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ( 0 ) , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( item , ESP_ERR_INVALID_ARG , TAG , RMT_ADDR_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( item_num > 0 , ESP_ERR_INVALID_ARG , TAG , RMT_DRIVER_LENGTH_ERROR_STR ) ;
2016-11-09 22:23:40 -05:00
2020-10-09 04:41:41 -04:00
uint8_t mem_cnt = rmt_ll_tx_get_mem_blocks ( rmt_contex . hal . regs , channel ) ;
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( mem_cnt * RMT_MEM_ITEM_NUM > = item_num , ESP_ERR_INVALID_ARG , TAG , RMT_WR_MEM_OVF_ERROR_STR ) ;
2016-11-09 22:23:40 -05:00
rmt_fill_memory ( channel , item , item_num , mem_offset ) ;
return ESP_OK ;
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_isr_register ( void ( * fn ) ( void * ) , void * arg , int intr_alloc_flags , rmt_isr_handle_t * handle )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( fn , ESP_ERR_INVALID_ARG , TAG , RMT_ADDR_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( rmt_contex . rmt_driver_channels = = 0 , ESP_FAIL , TAG , " RMT driver installed, can not install generic ISR handler " ) ;
2017-06-08 02:11:40 -04:00
2021-04-20 02:36:34 -04:00
return esp_intr_alloc ( rmt_periph_signals . groups [ 0 ] . irq , intr_alloc_flags , fn , arg , handle ) ;
2016-11-09 22:23:40 -05:00
}
2016-12-07 08:30:21 -05:00
esp_err_t rmt_isr_deregister ( rmt_isr_handle_t handle )
{
return esp_intr_free ( handle ) ;
}
2019-11-19 03:10:02 -05:00
static void IRAM_ATTR rmt_driver_isr_default ( void * arg )
2016-11-09 22:23:40 -05:00
{
2019-11-19 03:10:02 -05:00
uint32_t status = 0 ;
2022-01-05 03:11:19 -05:00
rmt_item32_t * addr = NULL ;
2019-11-19 03:10:02 -05:00
uint8_t channel = 0 ;
rmt_hal_context_t * hal = ( rmt_hal_context_t * ) arg ;
2023-07-31 11:10:34 -04:00
BaseType_t HPTaskAwoken = pdFALSE ;
2019-11-19 03:10:02 -05:00
// Tx end interrupt
status = rmt_ll_get_tx_end_interrupt_status ( hal - > regs ) ;
2019-02-07 08:20:39 -05:00
while ( status ) {
2019-11-19 03:10:02 -05:00
channel = __builtin_ffs ( status ) - 1 ;
status & = ~ ( 1 < < channel ) ;
rmt_obj_t * p_rmt = p_rmt_obj [ channel ] ;
if ( p_rmt ) {
xSemaphoreGiveFromISR ( p_rmt - > tx_sem , & HPTaskAwoken ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_tx_reset_pointer ( rmt_contex . hal . regs , channel ) ;
2019-11-19 03:10:02 -05:00
p_rmt - > tx_data = NULL ;
p_rmt - > tx_len_rem = 0 ;
p_rmt - > tx_offset = 0 ;
p_rmt - > tx_sub_len = 0 ;
p_rmt - > sample_cur = NULL ;
p_rmt - > translator = false ;
2021-04-20 03:57:34 -04:00
if ( rmt_contex . rmt_tx_end_callback . function ) {
2020-03-17 03:52:45 -04:00
rmt_contex . rmt_tx_end_callback . function ( channel , rmt_contex . rmt_tx_end_callback . arg ) ;
2016-11-09 22:23:40 -05:00
}
2019-11-19 03:10:02 -05:00
}
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_TX_DONE ( channel ) ) ;
2019-11-19 03:10:02 -05:00
}
// Tx thres interrupt
status = rmt_ll_get_tx_thres_interrupt_status ( hal - > regs ) ;
while ( status ) {
channel = __builtin_ffs ( status ) - 1 ;
status & = ~ ( 1 < < channel ) ;
rmt_obj_t * p_rmt = p_rmt_obj [ channel ] ;
if ( p_rmt ) {
if ( p_rmt - > translator ) {
if ( p_rmt - > sample_size_remain > 0 ) {
size_t translated_size = 0 ;
2020-11-02 03:58:28 -05:00
p_rmt - > sample_to_rmt ( ( void * ) p_rmt - > sample_cur ,
p_rmt - > tx_buf ,
p_rmt - > sample_size_remain ,
p_rmt - > tx_sub_len ,
& translated_size ,
& p_rmt - > tx_len_rem ) ;
2019-11-19 03:10:02 -05:00
p_rmt - > sample_size_remain - = translated_size ;
p_rmt - > sample_cur + = translated_size ;
p_rmt - > tx_data = p_rmt - > tx_buf ;
} else {
2019-02-07 08:20:39 -05:00
p_rmt - > sample_cur = NULL ;
p_rmt - > translator = false ;
2019-11-19 03:10:02 -05:00
}
2019-02-07 08:20:39 -05:00
}
2019-11-19 03:10:02 -05:00
const rmt_item32_t * pdata = p_rmt - > tx_data ;
2020-11-16 23:48:35 -05:00
size_t len_rem = p_rmt - > tx_len_rem ;
2022-05-07 02:14:56 -04:00
rmt_idle_level_t idle_level = rmt_ll_tx_get_idle_level ( hal - > regs , channel ) ;
rmt_item32_t stop_data = ( rmt_item32_t ) {
. level0 = idle_level ,
. duration0 = 0 ,
} ;
2019-11-19 03:10:02 -05:00
if ( len_rem > = p_rmt - > tx_sub_len ) {
rmt_fill_memory ( channel , pdata , p_rmt - > tx_sub_len , p_rmt - > tx_offset ) ;
p_rmt - > tx_data + = p_rmt - > tx_sub_len ;
p_rmt - > tx_len_rem - = p_rmt - > tx_sub_len ;
} else if ( len_rem = = 0 ) {
2022-03-01 03:16:07 -05:00
rmt_fill_memory ( channel , & stop_data , 1 , p_rmt - > tx_offset ) ;
2019-02-07 08:20:39 -05:00
} else {
2019-11-19 03:10:02 -05:00
rmt_fill_memory ( channel , pdata , len_rem , p_rmt - > tx_offset ) ;
2022-03-01 03:16:07 -05:00
rmt_fill_memory ( channel , & stop_data , 1 , p_rmt - > tx_offset + len_rem ) ;
2019-11-19 03:10:02 -05:00
p_rmt - > tx_data + = len_rem ;
p_rmt - > tx_len_rem - = len_rem ;
}
if ( p_rmt - > tx_offset = = 0 ) {
p_rmt - > tx_offset = p_rmt - > tx_sub_len ;
} else {
p_rmt - > tx_offset = 0 ;
}
}
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_TX_THRES ( channel ) ) ;
2019-11-19 03:10:02 -05:00
}
// Rx end interrupt
status = rmt_ll_get_rx_end_interrupt_status ( hal - > regs ) ;
while ( status ) {
channel = __builtin_ffs ( status ) - 1 ;
status & = ~ ( 1 < < channel ) ;
2020-10-09 04:41:41 -04:00
rmt_obj_t * p_rmt = p_rmt_obj [ RMT_ENCODE_RX_CHANNEL ( channel ) ] ;
2019-11-19 03:10:02 -05:00
if ( p_rmt ) {
2020-10-09 04:41:41 -04:00
rmt_ll_rx_enable ( rmt_contex . hal . regs , channel , false ) ;
2022-03-14 10:49:18 -04:00
int item_len = rmt_ll_rx_get_memory_writer_offset ( rmt_contex . hal . regs , channel ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_rx_set_mem_owner ( rmt_contex . hal . regs , channel , RMT_LL_MEM_OWNER_SW ) ;
2019-11-19 03:10:02 -05:00
if ( p_rmt - > rx_buf ) {
2022-01-05 03:11:19 -05:00
addr = ( rmt_item32_t * ) RMTMEM . chan [ RMT_ENCODE_RX_CHANNEL ( channel ) ] . data32 ;
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
2020-03-16 02:41:47 -04:00
if ( item_len > p_rmt - > rx_item_start_idx ) {
2020-01-13 01:38:24 -05:00
item_len = item_len - p_rmt - > rx_item_start_idx ;
}
2020-03-16 02:41:47 -04:00
memcpy ( ( void * ) ( p_rmt - > rx_item_buf + p_rmt - > rx_item_len ) , ( void * ) ( addr + p_rmt - > rx_item_start_idx ) , item_len * 4 ) ;
2020-01-13 01:38:24 -05:00
p_rmt - > rx_item_len + = item_len ;
BaseType_t res = xRingbufferSendFromISR ( p_rmt - > rx_buf , ( void * ) ( p_rmt - > rx_item_buf ) , p_rmt - > rx_item_len * 4 , & HPTaskAwoken ) ;
# else
2019-11-19 03:10:02 -05:00
BaseType_t res = xRingbufferSendFromISR ( p_rmt - > rx_buf , ( void * ) addr , item_len * 4 , & HPTaskAwoken ) ;
2020-01-13 01:38:24 -05:00
# endif
2019-11-19 03:10:02 -05:00
if ( res = = pdFALSE ) {
2022-05-16 00:27:08 -04:00
ESP_DRAM_LOGE ( TAG , " RMT RX BUFFER FULL " ) ;
2019-02-07 08:20:39 -05:00
}
2019-11-19 03:10:02 -05:00
} else {
2022-05-16 00:27:08 -04:00
ESP_DRAM_LOGE ( TAG , " RMT RX BUFFER ERROR " ) ;
2016-11-09 22:23:40 -05:00
}
2020-01-13 01:38:24 -05:00
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
2020-01-13 01:38:24 -05:00
p_rmt - > rx_item_start_idx = 0 ;
p_rmt - > rx_item_len = 0 ;
2020-03-16 02:41:47 -04:00
memset ( ( void * ) p_rmt - > rx_item_buf , 0 , p_rmt - > rx_item_buf_size ) ;
2020-01-13 01:38:24 -05:00
# endif
2020-10-09 04:41:41 -04:00
rmt_ll_rx_reset_pointer ( rmt_contex . hal . regs , channel ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_rx_set_mem_owner ( rmt_contex . hal . regs , channel , RMT_LL_MEM_OWNER_HW ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_rx_enable ( rmt_contex . hal . regs , channel , true ) ;
2019-11-19 03:10:02 -05:00
}
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_RX_DONE ( channel ) ) ;
2019-11-19 03:10:02 -05:00
}
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
2020-01-13 01:38:24 -05:00
// Rx thres interrupt
status = rmt_ll_get_rx_thres_interrupt_status ( hal - > regs ) ;
while ( status ) {
channel = __builtin_ffs ( status ) - 1 ;
status & = ~ ( 1 < < channel ) ;
2020-10-09 04:41:41 -04:00
rmt_obj_t * p_rmt = p_rmt_obj [ RMT_ENCODE_RX_CHANNEL ( channel ) ] ;
int mem_item_size = rmt_ll_rx_get_mem_blocks ( rmt_contex . hal . regs , channel ) * RMT_MEM_ITEM_NUM ;
int rx_thres_lim = rmt_ll_rx_get_limit ( rmt_contex . hal . regs , channel ) ;
2020-01-13 01:38:24 -05:00
int item_len = ( p_rmt - > rx_item_start_idx = = 0 ) ? rx_thres_lim : ( mem_item_size - rx_thres_lim ) ;
2020-03-16 02:41:47 -04:00
if ( ( p_rmt - > rx_item_len + item_len ) < ( p_rmt - > rx_item_buf_size / 4 ) ) {
2022-03-01 03:16:07 -05:00
rmt_ll_rx_set_mem_owner ( rmt_contex . hal . regs , channel , RMT_LL_MEM_OWNER_SW ) ;
2020-10-09 04:41:41 -04:00
memcpy ( ( void * ) ( p_rmt - > rx_item_buf + p_rmt - > rx_item_len ) , ( void * ) ( RMTMEM . chan [ RMT_ENCODE_RX_CHANNEL ( channel ) ] . data32 + p_rmt - > rx_item_start_idx ) , item_len * 4 ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_rx_set_mem_owner ( rmt_contex . hal . regs , channel , RMT_LL_MEM_OWNER_HW ) ;
2020-01-13 01:38:24 -05:00
p_rmt - > rx_item_len + = item_len ;
p_rmt - > rx_item_start_idx + = item_len ;
if ( p_rmt - > rx_item_start_idx > = mem_item_size ) {
p_rmt - > rx_item_start_idx = 0 ;
}
} else {
2022-05-16 00:27:08 -04:00
ESP_DRAM_LOGE ( TAG , " ---RX buffer too small: %d " , sizeof ( p_rmt - > rx_item_buf ) ) ;
2020-01-13 01:38:24 -05:00
}
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_RX_THRES ( channel ) ) ;
2020-01-13 01:38:24 -05:00
}
# endif
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_TX_LOOP_COUNT
2020-03-18 06:13:27 -04:00
// loop count interrupt
status = rmt_ll_get_tx_loop_interrupt_status ( hal - > regs ) ;
while ( status ) {
channel = __builtin_ffs ( status ) - 1 ;
status & = ~ ( 1 < < channel ) ;
rmt_obj_t * p_rmt = p_rmt_obj [ channel ] ;
if ( p_rmt ) {
2021-07-29 20:40:17 -04:00
if ( p_rmt - > loop_autostop ) {
2022-03-01 03:16:07 -05:00
# ifndef SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP
2021-07-29 20:40:17 -04:00
// hardware doesn't support automatically stop output so driver should stop output here (possibility already overshotted several us)
rmt_ll_tx_stop ( rmt_contex . hal . regs , channel ) ;
rmt_ll_tx_reset_pointer ( rmt_contex . hal . regs , channel ) ;
# endif
}
2020-03-18 06:13:27 -04:00
xSemaphoreGiveFromISR ( p_rmt - > tx_sem , & HPTaskAwoken ) ;
2021-04-20 03:57:34 -04:00
if ( rmt_contex . rmt_tx_end_callback . function ) {
2020-03-17 03:52:45 -04:00
rmt_contex . rmt_tx_end_callback . function ( channel , rmt_contex . rmt_tx_end_callback . arg ) ;
2020-03-18 06:13:27 -04:00
}
}
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_TX_LOOP_END ( channel ) ) ;
2020-03-18 06:13:27 -04:00
}
# endif
2020-10-09 04:41:41 -04:00
// RX Err interrupt
status = rmt_ll_get_rx_err_interrupt_status ( hal - > regs ) ;
while ( status ) {
channel = __builtin_ffs ( status ) - 1 ;
status & = ~ ( 1 < < channel ) ;
rmt_obj_t * p_rmt = p_rmt_obj [ RMT_ENCODE_RX_CHANNEL ( channel ) ] ;
if ( p_rmt ) {
// Reset the receiver's write/read addresses to prevent endless err interrupts.
rmt_ll_rx_reset_pointer ( rmt_contex . hal . regs , channel ) ;
2022-05-16 00:27:08 -04:00
ESP_DRAM_LOGD ( TAG , " RMT RX channel %d error " , channel ) ;
ESP_DRAM_LOGD ( TAG , " status: 0x%08x " , rmt_ll_rx_get_status_word ( rmt_contex . hal . regs , channel ) ) ;
2020-10-09 04:41:41 -04:00
}
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_RX_ERROR ( channel ) ) ;
2020-10-09 04:41:41 -04:00
}
// TX Err interrupt
status = rmt_ll_get_tx_err_interrupt_status ( hal - > regs ) ;
2019-11-19 03:10:02 -05:00
while ( status ) {
channel = __builtin_ffs ( status ) - 1 ;
status & = ~ ( 1 < < channel ) ;
rmt_obj_t * p_rmt = p_rmt_obj [ channel ] ;
if ( p_rmt ) {
2020-10-09 04:41:41 -04:00
// Reset the transmitter's write/read addresses to prevent endless err interrupts.
rmt_ll_tx_reset_pointer ( rmt_contex . hal . regs , channel ) ;
2022-05-16 00:27:08 -04:00
ESP_DRAM_LOGD ( TAG , " RMT TX channel %d error " , channel ) ;
ESP_DRAM_LOGD ( TAG , " status: 0x%08x " , rmt_ll_tx_get_status_word ( rmt_contex . hal . regs , channel ) ) ;
2016-11-09 22:23:40 -05:00
}
2022-03-01 03:16:07 -05:00
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_TX_ERROR ( channel ) ) ;
2016-11-09 22:23:40 -05:00
}
2019-11-19 03:10:02 -05:00
if ( HPTaskAwoken = = pdTRUE ) {
2018-02-24 00:36:49 -05:00
portYIELD_FROM_ISR ( ) ;
}
2016-11-09 22:23:40 -05:00
}
esp_err_t rmt_driver_uninstall ( rmt_channel_t channel )
{
2017-06-08 02:11:40 -04:00
esp_err_t err = ESP_OK ;
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2022-11-14 05:46:58 -05:00
// we allow to call this uninstall function on the same channel for multiple times
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] = = NULL ) {
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2018-02-24 03:36:21 -05:00
//Avoid blocking here(when the interrupt is disabled and do not wait tx done).
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > wait_done ) {
2018-07-09 07:22:31 -04:00
xSemaphoreTake ( p_rmt_obj [ channel ] - > tx_sem , portMAX_DELAY ) ;
2018-02-24 03:36:21 -05:00
}
2020-10-09 04:41:41 -04:00
RMT_ENTER_CRITICAL ( ) ;
// check channel's working mode
if ( p_rmt_obj [ channel ] - > rx_buf ) {
2022-08-01 02:16:47 -04:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_RX_MASK ( RMT_DECODE_RX_CHANNEL ( channel ) ) | RMT_LL_EVENT_RX_ERROR ( RMT_DECODE_RX_CHANNEL ( channel ) ) , false ) ;
2020-10-09 04:41:41 -04:00
} else {
2022-08-01 02:16:47 -04:00
rmt_ll_enable_interrupt ( rmt_contex . hal . regs , RMT_LL_EVENT_TX_MASK ( channel ) | RMT_LL_EVENT_TX_ERROR ( channel ) , false ) ;
2020-10-09 04:41:41 -04:00
}
RMT_EXIT_CRITICAL ( ) ;
2017-06-08 02:11:40 -04:00
2020-03-17 03:52:45 -04:00
_lock_acquire_recursive ( & ( rmt_contex . rmt_driver_isr_lock ) ) ;
rmt_contex . rmt_driver_channels & = ~ BIT ( channel ) ;
2022-11-14 05:46:58 -05:00
if ( rmt_contex . rmt_driver_channels = = 0 & & rmt_contex . rmt_driver_intr_handle ) {
2020-03-17 03:52:45 -04:00
rmt_module_disable ( ) ;
2019-11-19 03:10:02 -05:00
// all channels have driver disabled
2020-03-17 03:52:45 -04:00
err = rmt_isr_deregister ( rmt_contex . rmt_driver_intr_handle ) ;
rmt_contex . rmt_driver_intr_handle = NULL ;
2017-06-08 02:11:40 -04:00
}
2020-03-17 03:52:45 -04:00
_lock_release_recursive ( & ( rmt_contex . rmt_driver_isr_lock ) ) ;
2017-06-08 02:11:40 -04:00
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > tx_sem ) {
2016-11-09 22:23:40 -05:00
vSemaphoreDelete ( p_rmt_obj [ channel ] - > tx_sem ) ;
p_rmt_obj [ channel ] - > tx_sem = NULL ;
}
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > rx_buf ) {
2016-11-09 22:23:40 -05:00
vRingbufferDelete ( p_rmt_obj [ channel ] - > rx_buf ) ;
p_rmt_obj [ channel ] - > rx_buf = NULL ;
}
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > tx_buf ) {
2018-02-24 00:36:49 -05:00
free ( p_rmt_obj [ channel ] - > tx_buf ) ;
p_rmt_obj [ channel ] - > tx_buf = NULL ;
}
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > sample_to_rmt ) {
2018-02-24 00:36:49 -05:00
p_rmt_obj [ channel ] - > sample_to_rmt = NULL ;
}
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
2020-01-13 01:38:24 -05:00
if ( p_rmt_obj [ channel ] - > rx_item_buf ) {
free ( p_rmt_obj [ channel ] - > rx_item_buf ) ;
p_rmt_obj [ channel ] - > rx_item_buf = NULL ;
p_rmt_obj [ channel ] - > rx_item_buf_size = 0 ;
}
# endif
2016-11-09 22:23:40 -05:00
free ( p_rmt_obj [ channel ] ) ;
p_rmt_obj [ channel ] = NULL ;
2022-11-14 05:46:58 -05:00
return err ;
2016-11-09 22:23:40 -05:00
}
2016-11-25 04:33:51 -05:00
esp_err_t rmt_driver_install ( rmt_channel_t channel , size_t rx_buf_size , int intr_alloc_flags )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2017-06-08 02:11:40 -04:00
esp_err_t err = ESP_OK ;
2021-04-20 03:57:34 -04:00
if ( p_rmt_obj [ channel ] ) {
ESP_LOGD ( TAG , " RMT driver already installed " ) ;
2017-03-22 00:30:34 -04:00
return ESP_ERR_INVALID_STATE ;
2016-11-09 22:23:40 -05:00
}
2022-06-30 03:43:45 -04:00
# if CONFIG_RINGBUF_PLACE_ISR_FUNCTIONS_INTO_FLASH
2024-02-23 04:28:52 -05:00
if ( intr_alloc_flags & ESP_INTR_FLAG_IRAM ) {
2022-08-01 02:16:47 -04:00
ESP_LOGE ( TAG , " ringbuf ISR functions in flash, but used in IRAM interrupt " ) ;
return ESP_ERR_INVALID_ARG ;
}
2022-06-30 03:43:45 -04:00
# endif
2018-05-02 03:27:41 -04:00
# if !CONFIG_SPIRAM_USE_MALLOC
2020-03-17 07:58:05 -04:00
p_rmt_obj [ channel ] = calloc ( 1 , sizeof ( rmt_obj_t ) ) ;
2018-05-02 03:27:41 -04:00
# else
2019-11-19 03:10:02 -05:00
if ( ! ( intr_alloc_flags & ESP_INTR_FLAG_IRAM ) ) {
2020-03-17 07:58:05 -04:00
p_rmt_obj [ channel ] = calloc ( 1 , sizeof ( rmt_obj_t ) ) ;
2018-05-02 03:27:41 -04:00
} else {
2020-03-17 07:58:05 -04:00
p_rmt_obj [ channel ] = heap_caps_calloc ( 1 , sizeof ( rmt_obj_t ) , MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT ) ;
2018-05-02 03:27:41 -04:00
}
# endif
2016-11-09 22:23:40 -05:00
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] = = NULL ) {
2021-04-20 03:57:34 -04:00
ESP_LOGE ( TAG , " RMT driver malloc error " ) ;
2017-03-22 00:30:34 -04:00
return ESP_ERR_NO_MEM ;
2016-11-09 22:23:40 -05:00
}
p_rmt_obj [ channel ] - > tx_len_rem = 0 ;
p_rmt_obj [ channel ] - > tx_data = NULL ;
p_rmt_obj [ channel ] - > channel = channel ;
p_rmt_obj [ channel ] - > tx_offset = 0 ;
p_rmt_obj [ channel ] - > tx_sub_len = 0 ;
2018-02-24 03:36:21 -05:00
p_rmt_obj [ channel ] - > wait_done = false ;
2021-07-29 20:40:17 -04:00
p_rmt_obj [ channel ] - > loop_autostop = false ;
2018-02-24 00:36:49 -05:00
p_rmt_obj [ channel ] - > translator = false ;
p_rmt_obj [ channel ] - > sample_to_rmt = NULL ;
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > tx_sem = = NULL ) {
2018-05-02 03:27:41 -04:00
# if !CONFIG_SPIRAM_USE_MALLOC
2016-11-09 22:23:40 -05:00
p_rmt_obj [ channel ] - > tx_sem = xSemaphoreCreateBinary ( ) ;
2018-05-02 03:27:41 -04:00
# else
p_rmt_obj [ channel ] - > intr_alloc_flags = intr_alloc_flags ;
2019-11-19 03:10:02 -05:00
if ( ! ( intr_alloc_flags & ESP_INTR_FLAG_IRAM ) ) {
2018-05-02 03:27:41 -04:00
p_rmt_obj [ channel ] - > tx_sem = xSemaphoreCreateBinary ( ) ;
} else {
p_rmt_obj [ channel ] - > tx_sem = xSemaphoreCreateBinaryStatic ( & p_rmt_obj [ channel ] - > tx_sem_buffer ) ;
}
# endif
2016-11-09 22:23:40 -05:00
xSemaphoreGive ( p_rmt_obj [ channel ] - > tx_sem ) ;
}
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > rx_buf = = NULL & & rx_buf_size > 0 ) {
2016-11-09 22:23:40 -05:00
p_rmt_obj [ channel ] - > rx_buf = xRingbufferCreate ( rx_buf_size , RINGBUF_TYPE_NOSPLIT ) ;
}
2020-01-13 01:38:24 -05:00
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_RX_PINGPONG
2020-03-17 07:58:05 -04:00
if ( p_rmt_obj [ channel ] - > rx_item_buf = = NULL & & rx_buf_size > 0 ) {
2020-01-13 01:38:24 -05:00
# if !CONFIG_SPIRAM_USE_MALLOC
2020-03-17 07:58:05 -04:00
p_rmt_obj [ channel ] - > rx_item_buf = calloc ( 1 , rx_buf_size ) ;
2020-01-13 01:38:24 -05:00
# else
2020-03-16 02:41:47 -04:00
if ( ! ( p_rmt_obj [ channel ] - > intr_alloc_flags & ESP_INTR_FLAG_IRAM ) ) {
2020-03-17 07:58:05 -04:00
p_rmt_obj [ channel ] - > rx_item_buf = calloc ( 1 , rx_buf_size ) ;
2020-01-13 01:38:24 -05:00
} else {
2020-03-17 07:58:05 -04:00
p_rmt_obj [ channel ] - > rx_item_buf = heap_caps_calloc ( 1 , rx_buf_size , MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT ) ;
2020-01-13 01:38:24 -05:00
}
# endif
if ( p_rmt_obj [ channel ] - > rx_item_buf = = NULL ) {
2021-04-20 03:57:34 -04:00
ESP_LOGE ( TAG , " RMT malloc fail " ) ;
2020-01-13 01:38:24 -05:00
return ESP_FAIL ;
}
p_rmt_obj [ channel ] - > rx_item_buf_size = rx_buf_size ;
}
# endif
2020-03-17 03:52:45 -04:00
_lock_acquire_recursive ( & ( rmt_contex . rmt_driver_isr_lock ) ) ;
2017-06-08 02:11:40 -04:00
2020-03-17 03:52:45 -04:00
if ( rmt_contex . rmt_driver_channels = = 0 ) {
2019-11-19 03:10:02 -05:00
// first RMT channel using driver
2020-03-17 03:52:45 -04:00
err = rmt_isr_register ( rmt_driver_isr_default , & rmt_contex . hal , intr_alloc_flags , & ( rmt_contex . rmt_driver_intr_handle ) ) ;
2016-11-09 22:23:40 -05:00
}
2017-06-08 02:11:40 -04:00
if ( err = = ESP_OK ) {
2020-03-17 03:52:45 -04:00
rmt_contex . rmt_driver_channels | = BIT ( channel ) ;
2017-06-08 02:11:40 -04:00
}
2020-03-17 03:52:45 -04:00
_lock_release_recursive ( & ( rmt_contex . rmt_driver_isr_lock ) ) ;
2020-10-09 04:41:41 -04:00
2020-03-17 03:52:45 -04:00
rmt_module_enable ( ) ;
2020-10-09 04:41:41 -04:00
if ( RMT_IS_RX_CHANNEL ( channel ) ) {
rmt_hal_rx_channel_reset ( & rmt_contex . hal , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
} else {
rmt_hal_tx_channel_reset ( & rmt_contex . hal , channel ) ;
}
2017-06-08 02:11:40 -04:00
return err ;
2016-11-09 22:23:40 -05:00
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_write_items ( rmt_channel_t channel , const rmt_item32_t * rmt_item , int item_num , bool wait_tx_done )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( p_rmt_obj [ channel ] , ESP_FAIL , TAG , RMT_DRIVER_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( rmt_item , ESP_FAIL , TAG , RMT_ADDR_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( item_num > 0 , ESP_ERR_INVALID_ARG , TAG , RMT_DRIVER_LENGTH_ERROR_STR ) ;
2022-03-14 10:49:18 -04:00
uint32_t mem_blocks = rmt_ll_tx_get_mem_blocks ( rmt_contex . hal . regs , channel ) ;
ESP_RETURN_ON_FALSE ( mem_blocks + channel < = SOC_RMT_CHANNELS_PER_GROUP , ESP_ERR_INVALID_STATE , TAG , RMT_MEM_CNT_ERROR_STR ) ;
2018-05-02 03:27:41 -04:00
# if CONFIG_SPIRAM_USE_MALLOC
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > intr_alloc_flags & ESP_INTR_FLAG_IRAM ) {
if ( ! esp_ptr_internal ( rmt_item ) ) {
2021-04-20 03:57:34 -04:00
ESP_LOGE ( TAG , RMT_PSRAM_BUFFER_WARN_STR ) ;
2018-05-02 03:27:41 -04:00
return ESP_ERR_INVALID_ARG ;
}
}
# endif
2019-11-19 03:10:02 -05:00
rmt_obj_t * p_rmt = p_rmt_obj [ channel ] ;
2022-03-14 10:49:18 -04:00
int item_block_len = mem_blocks * RMT_MEM_ITEM_NUM ;
int item_sub_len = mem_blocks * RMT_MEM_ITEM_NUM / 2 ;
2016-11-09 22:23:40 -05:00
int len_rem = item_num ;
xSemaphoreTake ( p_rmt - > tx_sem , portMAX_DELAY ) ;
// fill the memory block first
2019-11-19 03:10:02 -05:00
if ( item_num > = item_block_len ) {
2016-11-09 22:23:40 -05:00
rmt_fill_memory ( channel , rmt_item , item_block_len , 0 ) ;
len_rem - = item_block_len ;
2018-05-23 05:01:22 -04:00
rmt_set_tx_loop_mode ( channel , false ) ;
2016-12-21 21:54:42 -05:00
rmt_set_tx_thr_intr_en ( channel , 1 , item_sub_len ) ;
2016-11-09 22:23:40 -05:00
p_rmt - > tx_data = rmt_item + item_block_len ;
p_rmt - > tx_len_rem = len_rem ;
p_rmt - > tx_offset = 0 ;
p_rmt - > tx_sub_len = item_sub_len ;
} else {
rmt_fill_memory ( channel , rmt_item , len_rem , 0 ) ;
2022-05-07 02:14:56 -04:00
rmt_idle_level_t idle_level = rmt_ll_tx_get_idle_level ( rmt_contex . hal . regs , channel ) ;
rmt_item32_t stop_data = ( rmt_item32_t ) {
. level0 = idle_level ,
. duration0 = 0 ,
} ;
2022-03-01 03:16:07 -05:00
rmt_fill_memory ( channel , & stop_data , 1 , len_rem ) ;
2018-02-24 03:36:21 -05:00
p_rmt - > tx_len_rem = 0 ;
2016-11-09 22:23:40 -05:00
}
rmt_tx_start ( channel , true ) ;
2018-02-24 03:36:21 -05:00
p_rmt - > wait_done = wait_tx_done ;
2019-11-19 03:10:02 -05:00
if ( wait_tx_done ) {
2020-03-18 06:13:27 -04:00
// wait loop done
2022-03-01 03:16:07 -05:00
if ( rmt_ll_tx_is_loop_enabled ( rmt_contex . hal . regs , channel ) ) {
2020-03-25 05:13:10 -04:00
# if SOC_RMT_SUPPORT_TX_LOOP_COUNT
2020-03-18 06:13:27 -04:00
xSemaphoreTake ( p_rmt - > tx_sem , portMAX_DELAY ) ;
xSemaphoreGive ( p_rmt - > tx_sem ) ;
# endif
} else {
// wait tx end
xSemaphoreTake ( p_rmt - > tx_sem , portMAX_DELAY ) ;
xSemaphoreGive ( p_rmt - > tx_sem ) ;
}
2016-11-09 22:23:40 -05:00
}
return ESP_OK ;
}
2017-06-20 01:23:29 -04:00
esp_err_t rmt_wait_tx_done ( rmt_channel_t channel , TickType_t wait_time )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( p_rmt_obj [ channel ] , ESP_FAIL , TAG , RMT_DRIVER_ERROR_STR ) ;
2019-11-19 03:10:02 -05:00
if ( xSemaphoreTake ( p_rmt_obj [ channel ] - > tx_sem , wait_time ) = = pdTRUE ) {
2018-02-24 03:36:21 -05:00
p_rmt_obj [ channel ] - > wait_done = false ;
2017-06-20 01:23:29 -04:00
xSemaphoreGive ( p_rmt_obj [ channel ] - > tx_sem ) ;
return ESP_OK ;
2019-11-19 03:10:02 -05:00
} else {
if ( wait_time ! = 0 ) {
// Don't emit error message if just polling.
2021-04-20 03:57:34 -04:00
ESP_LOGE ( TAG , " Timeout on wait_tx_done " ) ;
2018-09-24 15:23:26 -04:00
}
2017-06-20 01:23:29 -04:00
return ESP_ERR_TIMEOUT ;
}
2016-11-09 22:23:40 -05:00
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_ringbuf_handle ( rmt_channel_t channel , RingbufHandle_t * buf_handle )
2016-11-09 22:23:40 -05:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( p_rmt_obj [ channel ] , ESP_FAIL , TAG , RMT_DRIVER_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( buf_handle , ESP_ERR_INVALID_ARG , TAG , RMT_ADDR_ERROR_STR ) ;
2017-06-20 01:23:29 -04:00
* buf_handle = p_rmt_obj [ channel ] - > rx_buf ;
2016-11-09 22:23:40 -05:00
return ESP_OK ;
}
2018-01-05 01:10:37 -05:00
rmt_tx_end_callback_t rmt_register_tx_end_callback ( rmt_tx_end_fn_t function , void * arg )
2017-10-27 09:23:07 -04:00
{
2020-03-17 03:52:45 -04:00
rmt_tx_end_callback_t previous = rmt_contex . rmt_tx_end_callback ;
rmt_contex . rmt_tx_end_callback . function = function ;
rmt_contex . rmt_tx_end_callback . arg = arg ;
2017-10-27 09:23:07 -04:00
return previous ;
}
2018-02-24 00:36:49 -05:00
esp_err_t rmt_translator_init ( rmt_channel_t channel , sample_to_rmt_t fn )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( fn , ESP_ERR_INVALID_ARG , TAG , RMT_TRANSLATOR_NULL_STR ) ;
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( p_rmt_obj [ channel ] , ESP_FAIL , TAG , RMT_DRIVER_ERROR_STR ) ;
2022-03-14 10:49:18 -04:00
uint32_t mem_blocks = rmt_ll_tx_get_mem_blocks ( rmt_contex . hal . regs , channel ) ;
ESP_RETURN_ON_FALSE ( mem_blocks + channel < = SOC_RMT_CHANNELS_PER_GROUP , ESP_ERR_INVALID_STATE , TAG , RMT_MEM_CNT_ERROR_STR ) ;
const uint32_t block_size = mem_blocks * RMT_MEM_ITEM_NUM * sizeof ( rmt_item32_t ) ;
2018-02-24 00:36:49 -05:00
if ( p_rmt_obj [ channel ] - > tx_buf = = NULL ) {
# if !CONFIG_SPIRAM_USE_MALLOC
2023-02-14 00:40:39 -05:00
p_rmt_obj [ channel ] - > tx_buf = ( rmt_item32_t * ) calloc ( 1 , block_size ) ;
2018-02-24 00:36:49 -05:00
# else
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > intr_alloc_flags & ESP_INTR_FLAG_IRAM ) {
p_rmt_obj [ channel ] - > tx_buf = ( rmt_item32_t * ) heap_caps_calloc ( 1 , block_size , MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT ) ;
2023-02-14 00:40:39 -05:00
} else {
p_rmt_obj [ channel ] - > tx_buf = ( rmt_item32_t * ) calloc ( 1 , block_size ) ;
2018-02-24 00:36:49 -05:00
}
# endif
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > tx_buf = = NULL ) {
2021-04-20 03:57:34 -04:00
ESP_LOGE ( TAG , " RMT translator buffer create fail " ) ;
2018-02-24 00:36:49 -05:00
return ESP_FAIL ;
}
}
p_rmt_obj [ channel ] - > sample_to_rmt = fn ;
2020-10-18 15:16:04 -04:00
p_rmt_obj [ channel ] - > tx_context = NULL ;
2018-02-24 00:36:49 -05:00
p_rmt_obj [ channel ] - > sample_size_remain = 0 ;
p_rmt_obj [ channel ] - > sample_cur = NULL ;
2021-04-20 03:57:34 -04:00
ESP_LOGD ( TAG , " RMT translator init done " ) ;
2018-02-24 00:36:49 -05:00
return ESP_OK ;
}
2020-11-02 03:58:28 -05:00
esp_err_t rmt_translator_set_context ( rmt_channel_t channel , void * context )
2020-10-18 15:16:04 -04:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( p_rmt_obj [ channel ] , ESP_FAIL , TAG , RMT_DRIVER_ERROR_STR ) ;
2020-11-02 03:58:28 -05:00
2020-10-18 15:16:04 -04:00
p_rmt_obj [ channel ] - > tx_context = context ;
return ESP_OK ;
}
2020-11-02 03:58:28 -05:00
esp_err_t rmt_translator_get_context ( const size_t * item_num , void * * context )
2020-10-18 15:16:04 -04:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( item_num & & context , ESP_ERR_INVALID_ARG , TAG , " invalid arguments " ) ;
2020-11-02 03:58:28 -05:00
// the address of tx_len_rem is directlly passed to the callback,
// so it's possible to get the object address from that
rmt_obj_t * obj = __containerof ( item_num , rmt_obj_t , tx_len_rem ) ;
* context = obj - > tx_context ;
2020-10-18 15:16:04 -04:00
return ESP_OK ;
}
2018-02-24 00:36:49 -05:00
esp_err_t rmt_write_sample ( rmt_channel_t channel , const uint8_t * src , size_t src_size , bool wait_tx_done )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( p_rmt_obj [ channel ] , ESP_FAIL , TAG , RMT_DRIVER_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( p_rmt_obj [ channel ] - > sample_to_rmt , ESP_FAIL , TAG , RMT_TRANSLATOR_UNINIT_STR ) ;
2022-03-14 10:49:18 -04:00
uint32_t mem_blocks = rmt_ll_tx_get_mem_blocks ( rmt_contex . hal . regs , channel ) ;
ESP_RETURN_ON_FALSE ( mem_blocks + channel < = SOC_RMT_CHANNELS_PER_GROUP , ESP_ERR_INVALID_STATE , TAG , RMT_MEM_CNT_ERROR_STR ) ;
2018-02-24 00:36:49 -05:00
# if CONFIG_SPIRAM_USE_MALLOC
2019-11-19 03:10:02 -05:00
if ( p_rmt_obj [ channel ] - > intr_alloc_flags & ESP_INTR_FLAG_IRAM ) {
if ( ! esp_ptr_internal ( src ) ) {
2021-04-20 03:57:34 -04:00
ESP_LOGE ( TAG , RMT_PSRAM_BUFFER_WARN_STR ) ;
2018-02-24 00:36:49 -05:00
return ESP_ERR_INVALID_ARG ;
}
}
# endif
size_t translated_size = 0 ;
2019-11-19 03:10:02 -05:00
rmt_obj_t * p_rmt = p_rmt_obj [ channel ] ;
2022-03-14 10:49:18 -04:00
const uint32_t item_block_len = mem_blocks * RMT_MEM_ITEM_NUM ;
2018-02-24 00:36:49 -05:00
const uint32_t item_sub_len = item_block_len / 2 ;
xSemaphoreTake ( p_rmt - > tx_sem , portMAX_DELAY ) ;
2020-11-02 03:58:28 -05:00
p_rmt - > sample_to_rmt ( ( void * ) src , p_rmt - > tx_buf , src_size , item_block_len , & translated_size , & p_rmt - > tx_len_rem ) ;
2018-02-24 00:36:49 -05:00
p_rmt - > sample_size_remain = src_size - translated_size ;
p_rmt - > sample_cur = src + translated_size ;
2020-11-02 03:58:28 -05:00
rmt_fill_memory ( channel , p_rmt - > tx_buf , p_rmt - > tx_len_rem , 0 ) ;
if ( p_rmt - > tx_len_rem = = item_block_len ) {
2018-02-24 00:36:49 -05:00
rmt_set_tx_thr_intr_en ( channel , 1 , item_sub_len ) ;
p_rmt - > tx_data = p_rmt - > tx_buf ;
p_rmt - > tx_offset = 0 ;
p_rmt - > tx_sub_len = item_sub_len ;
p_rmt - > translator = true ;
} else {
2022-05-07 02:14:56 -04:00
rmt_idle_level_t idle_level = rmt_ll_tx_get_idle_level ( rmt_contex . hal . regs , channel ) ;
rmt_item32_t stop_data = ( rmt_item32_t ) {
. level0 = idle_level ,
. duration0 = 0 ,
} ;
2022-03-01 03:16:07 -05:00
rmt_fill_memory ( channel , & stop_data , 1 , p_rmt - > tx_len_rem ) ;
2018-02-24 00:36:49 -05:00
p_rmt - > tx_len_rem = 0 ;
p_rmt - > sample_cur = NULL ;
p_rmt - > translator = false ;
}
rmt_tx_start ( channel , true ) ;
p_rmt - > wait_done = wait_tx_done ;
if ( wait_tx_done ) {
xSemaphoreTake ( p_rmt - > tx_sem , portMAX_DELAY ) ;
xSemaphoreGive ( p_rmt - > tx_sem ) ;
}
return ESP_OK ;
2018-07-09 07:22:31 -04:00
}
2018-05-23 05:01:22 -04:00
esp_err_t rmt_get_channel_status ( rmt_channel_status_result_t * channel_status )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel_status , ESP_ERR_INVALID_ARG , TAG , RMT_PARAM_ERR_STR ) ;
2019-11-19 03:10:02 -05:00
for ( int i = 0 ; i < RMT_CHANNEL_MAX ; i + + ) {
channel_status - > status [ i ] = RMT_CHANNEL_UNINIT ;
2021-04-20 03:57:34 -04:00
if ( p_rmt_obj [ i ] ) {
if ( p_rmt_obj [ i ] - > tx_sem ) {
2019-11-19 03:10:02 -05:00
if ( xSemaphoreTake ( p_rmt_obj [ i ] - > tx_sem , ( TickType_t ) 0 ) = = pdTRUE ) {
2018-05-23 05:01:22 -04:00
channel_status - > status [ i ] = RMT_CHANNEL_IDLE ;
global: move the soc component out of the common list
This MR removes the common dependency from every IDF components to the SOC component.
Currently, in the ``idf_functions.cmake`` script, we include the header path of SOC component by default for all components.
But for better code organization (or maybe also benifits to the compiling speed), we may remove the dependency to SOC components for most components except the driver and kernel related components.
In CMAKE, we have two kinds of header visibilities (set by include path visibility):
(Assume component A --(depends on)--> B, B is the current component)
1. public (``COMPONENT_ADD_INCLUDEDIRS``): means this path is visible to other depending components (A) (visible to A and B)
2. private (``COMPONENT_PRIV_INCLUDEDIRS``): means this path is only visible to source files inside the component (visible to B only)
and we have two kinds of depending ways:
(Assume component A --(depends on)--> B --(depends on)--> C, B is the current component)
1. public (```COMPONENT_REQUIRES```): means B can access to public include path of C. All other components rely on you (A) will also be available for the public headers. (visible to A, B)
2. private (``COMPONENT_PRIV_REQUIRES``): means B can access to public include path of C, but don't propagate this relation to other components (A). (visible to B)
1. remove the common requirement in ``idf_functions.cmake``, this makes the SOC components invisible to all other components by default.
2. if a component (for example, DRIVER) really needs the dependency to SOC, add a private dependency to SOC for it.
3. some other components that don't really depends on the SOC may still meet some errors saying "can't find header soc/...", this is because it's depended component (DRIVER) incorrectly include the header of SOC in its public headers. Moving all this kind of #include into source files, or private headers
4. Fix the include requirements for some file which miss sufficient #include directives. (Previously they include some headers by the long long long header include link)
This is a breaking change. Previous code may depends on the long include chain.
You may need to include the following headers for some files after this commit:
- soc/soc.h
- soc/soc_memory_layout.h
- driver/gpio.h
- esp_sleep.h
The major broken include chain includes:
1. esp_system.h no longer includes esp_sleep.h. The latter includes driver/gpio.h and driver/touch_pad.h.
2. ets_sys.h no longer includes soc/soc.h
3. freertos/portmacro.h no longer includes soc/soc_memory_layout.h
some peripheral headers no longer includes their hw related headers, e.g. rom/gpio.h no longer includes soc/gpio_pins.h and soc/gpio_reg.h
BREAKING CHANGE
2019-04-03 01:17:38 -04:00
xSemaphoreGive ( p_rmt_obj [ i ] - > tx_sem ) ;
2018-05-23 05:01:22 -04:00
} else {
channel_status - > status [ i ] = RMT_CHANNEL_BUSY ;
}
}
}
}
return ESP_OK ;
2019-02-07 08:20:39 -05:00
}
2019-11-19 03:10:02 -05:00
esp_err_t rmt_get_counter_clock ( rmt_channel_t channel , uint32_t * clock_hz )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( channel < RMT_CHANNEL_MAX , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
ESP_RETURN_ON_FALSE ( clock_hz , ESP_ERR_INVALID_ARG , TAG , " parameter clock_hz can't be null " ) ;
2019-11-19 03:10:02 -05:00
RMT_ENTER_CRITICAL ( ) ;
2020-08-13 04:55:56 -04:00
uint32_t rmt_source_clk_hz = 0 ;
2021-04-20 03:06:12 -04:00
# if SOC_RMT_CHANNEL_CLK_INDEPENDENT
2020-10-09 04:41:41 -04:00
rmt_source_clk_hz = s_rmt_source_clock_hz [ channel ] ;
# else
rmt_source_clk_hz = s_rmt_source_clock_hz ;
2020-08-13 04:55:56 -04:00
# endif
2020-10-09 04:41:41 -04:00
if ( RMT_IS_RX_CHANNEL ( channel ) ) {
2021-02-07 04:18:39 -05:00
* clock_hz = rmt_source_clk_hz / rmt_ll_rx_get_channel_clock_div ( rmt_contex . hal . regs , RMT_DECODE_RX_CHANNEL ( channel ) ) ;
2020-10-09 04:41:41 -04:00
} else {
2021-02-07 04:18:39 -05:00
* clock_hz = rmt_source_clk_hz / rmt_ll_tx_get_channel_clock_div ( rmt_contex . hal . regs , channel ) ;
2020-08-13 04:55:56 -04:00
}
2019-11-19 03:10:02 -05:00
RMT_EXIT_CRITICAL ( ) ;
return ESP_OK ;
}
2020-03-19 03:26:49 -04:00
2021-02-07 04:18:39 -05:00
# if SOC_RMT_SUPPORT_TX_SYNCHRO
2020-03-19 03:26:49 -04:00
esp_err_t rmt_add_channel_to_group ( rmt_channel_t channel )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2020-03-19 03:26:49 -04:00
RMT_ENTER_CRITICAL ( ) ;
2020-10-09 04:41:41 -04:00
rmt_ll_tx_enable_sync ( rmt_contex . hal . regs , true ) ;
2021-02-07 04:18:39 -05:00
rmt_contex . synchro_channel_mask | = ( 1 < < channel ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_tx_sync_group_add_channels ( rmt_contex . hal . regs , 1 < < channel ) ;
2021-02-07 04:18:39 -05:00
rmt_ll_tx_reset_channels_clock_div ( rmt_contex . hal . regs , rmt_contex . synchro_channel_mask ) ;
2020-03-19 03:26:49 -04:00
RMT_EXIT_CRITICAL ( ) ;
return ESP_OK ;
}
esp_err_t rmt_remove_channel_from_group ( rmt_channel_t channel )
2020-10-09 04:41:41 -04:00
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2020-10-09 04:41:41 -04:00
RMT_ENTER_CRITICAL ( ) ;
2021-02-07 04:18:39 -05:00
rmt_contex . synchro_channel_mask & = ~ ( 1 < < channel ) ;
2022-03-01 03:16:07 -05:00
rmt_ll_tx_sync_group_remove_channels ( rmt_contex . hal . regs , 1 < < channel ) ;
2021-02-07 04:18:39 -05:00
if ( rmt_contex . synchro_channel_mask = = 0 ) {
2020-10-09 04:41:41 -04:00
rmt_ll_tx_enable_sync ( rmt_contex . hal . regs , false ) ;
}
RMT_EXIT_CRITICAL ( ) ;
return ESP_OK ;
}
2021-04-20 03:06:12 -04:00
# endif
2020-10-09 04:41:41 -04:00
2020-09-09 03:28:14 -04:00
# if SOC_RMT_SUPPORT_TX_LOOP_COUNT
esp_err_t rmt_set_tx_loop_count ( rmt_channel_t channel , uint32_t count )
{
2021-04-20 03:57:34 -04:00
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
2022-03-01 03:16:07 -05:00
ESP_RETURN_ON_FALSE ( count < = RMT_LL_MAX_LOOP_COUNT_PER_BATCH , ESP_ERR_INVALID_ARG , TAG , " Invalid count value " ) ;
2020-09-09 03:28:14 -04:00
RMT_ENTER_CRITICAL ( ) ;
rmt_ll_tx_set_loop_count ( rmt_contex . hal . regs , channel , count ) ;
RMT_EXIT_CRITICAL ( ) ;
return ESP_OK ;
}
2021-07-29 20:40:17 -04:00
esp_err_t rmt_enable_tx_loop_autostop ( rmt_channel_t channel , bool en )
{
ESP_RETURN_ON_FALSE ( RMT_IS_TX_CHANNEL ( channel ) , ESP_ERR_INVALID_ARG , TAG , RMT_CHANNEL_ERROR_STR ) ;
p_rmt_obj [ channel ] - > loop_autostop = en ;
2022-03-01 03:16:07 -05:00
# if SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP
2021-07-29 20:40:17 -04:00
RMT_ENTER_CRITICAL ( ) ;
rmt_ll_tx_enable_loop_autostop ( rmt_contex . hal . regs , channel , en ) ;
RMT_EXIT_CRITICAL ( ) ;
# endif
return ESP_OK ;
}
2020-09-09 03:28:14 -04:00
# endif
2022-04-06 23:50:41 -04:00
/**
* @ brief This function will be called during start up , to check that this legacy RMT driver is not running along with the new driver
*/
__attribute__ ( ( constructor ) )
static void check_rmt_legacy_driver_conflict ( void )
{
// This function was declared as weak here. The new RMT driver has one implementation.
// So if the new RMT driver is not linked in, then `rmt_acquire_group_handle()` should be NULL at runtime.
extern __attribute__ ( ( weak ) ) void * rmt_acquire_group_handle ( int group_id ) ;
if ( ( void * ) rmt_acquire_group_handle ! = NULL ) {
ESP_EARLY_LOGE ( TAG , " CONFLICT! driver_ng is not allowed to be used with the legacy driver " ) ;
abort ( ) ;
}
ESP_EARLY_LOGW ( TAG , " legacy driver is deprecated, please migrate to `driver/rmt_tx.h` and/or `driver/rmt_rx.h` " ) ;
}