mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'feat/rmt_dma_burst_size' into 'master'
feat(rmt): enable DMA burst transfer See merge request espressif/esp-idf!31127
This commit is contained in:
commit
0f6004e06f
@ -139,11 +139,11 @@ static void rmt_module_enable(void)
|
||||
{
|
||||
RMT_ENTER_CRITICAL();
|
||||
if (rmt_contex.rmt_module_enabled == false) {
|
||||
rmt_ll_mem_power_by_pmu(rmt_contex.hal.regs);
|
||||
RMT_RCC_ATOMIC() {
|
||||
rmt_ll_enable_bus_clock(0, true);
|
||||
rmt_ll_reset_register(0);
|
||||
}
|
||||
rmt_ll_mem_power_by_pmu(rmt_contex.hal.regs);
|
||||
rmt_contex.rmt_module_enabled = true;
|
||||
}
|
||||
RMT_EXIT_CRITICAL();
|
||||
@ -154,10 +154,10 @@ static void rmt_module_disable(void)
|
||||
{
|
||||
RMT_ENTER_CRITICAL();
|
||||
if (rmt_contex.rmt_module_enabled == true) {
|
||||
rmt_ll_mem_force_power_off(rmt_contex.hal.regs);
|
||||
RMT_RCC_ATOMIC() {
|
||||
rmt_ll_enable_bus_clock(0, false);
|
||||
}
|
||||
rmt_ll_mem_force_power_off(rmt_contex.hal.regs);
|
||||
rmt_contex.rmt_module_enabled = false;
|
||||
}
|
||||
RMT_EXIT_CRITICAL();
|
||||
|
@ -53,9 +53,6 @@ extern "C" {
|
||||
|
||||
#define RMT_ALLOW_INTR_PRIORITY_MASK ESP_INTR_FLAG_LOWMED
|
||||
|
||||
// DMA buffer size must align to `rmt_symbol_word_t`
|
||||
#define RMT_DMA_DESC_BUF_MAX_SIZE (DMA_DESCRIPTOR_BUFFER_MAX_SIZE & ~(sizeof(rmt_symbol_word_t) - 1))
|
||||
|
||||
#define RMT_DMA_NODES_PING_PONG 2 // two nodes ping-pong
|
||||
#define RMT_PM_LOCK_NAME_LEN_MAX 16
|
||||
#define RMT_GROUP_INTR_PRIORITY_UNINITIALIZED (-1)
|
||||
@ -205,6 +202,7 @@ struct rmt_rx_channel_t {
|
||||
void *user_data; // user context
|
||||
rmt_rx_trans_desc_t trans_desc; // transaction description
|
||||
size_t num_dma_nodes; // number of DMA nodes, determined by how big the memory block that user configures
|
||||
size_t dma_int_mem_alignment; // DMA buffer alignment (both in size and address) for internal RX memory
|
||||
rmt_dma_descriptor_t *dma_nodes; // DMA link nodes
|
||||
rmt_dma_descriptor_t *dma_nodes_nc; // DMA descriptor nodes accessed in non-cached way
|
||||
};
|
||||
|
@ -59,12 +59,13 @@ static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx
|
||||
.direction = GDMA_CHANNEL_DIRECTION_RX,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &rx_channel->base.dma_chan), TAG, "allocate RX DMA channel failed");
|
||||
|
||||
// circular DMA descriptor
|
||||
for (int i = 0; i < rx_channel->num_dma_nodes; i++) {
|
||||
rx_channel->dma_nodes_nc[i].next = &rx_channel->dma_nodes[i + 1];
|
||||
}
|
||||
rx_channel->dma_nodes_nc[rx_channel->num_dma_nodes - 1].next = &rx_channel->dma_nodes[0];
|
||||
gdma_transfer_config_t transfer_cfg = {
|
||||
.access_ext_mem = false, // [IDF-8997]: PSRAM is not supported yet
|
||||
.max_data_burst_size = 32,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_config_transfer(rx_channel->base.dma_chan, &transfer_cfg), TAG, "config DMA transfer failed");
|
||||
// get the alignment requirement from DMA
|
||||
gdma_get_alignment_constraints(rx_channel->base.dma_chan, &rx_channel->dma_int_mem_alignment, NULL);
|
||||
|
||||
// register event callbacks
|
||||
gdma_rx_event_callbacks_t cbs = {
|
||||
@ -72,6 +73,12 @@ static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx
|
||||
};
|
||||
// register the DMA callbacks may fail if the interrupt service can not be installed successfully
|
||||
ESP_RETURN_ON_ERROR(gdma_register_rx_event_callbacks(rx_channel->base.dma_chan, &cbs, rx_channel), TAG, "register DMA callbacks failed");
|
||||
|
||||
// circular DMA descriptor
|
||||
for (int i = 0; i < rx_channel->num_dma_nodes - 1; i++) {
|
||||
rx_channel->dma_nodes_nc[i].next = &rx_channel->dma_nodes[i + 1];
|
||||
}
|
||||
rx_channel->dma_nodes_nc[rx_channel->num_dma_nodes - 1].next = &rx_channel->dma_nodes[0];
|
||||
return ESP_OK;
|
||||
}
|
||||
#endif // SOC_RMT_SUPPORT_DMA
|
||||
@ -199,32 +206,32 @@ esp_err_t rmt_new_rx_channel(const rmt_rx_channel_config_t *config, rmt_channel_
|
||||
ESP_GOTO_ON_FALSE(rx_channel, ESP_ERR_NO_MEM, err, TAG, "no mem for rx channel");
|
||||
// gpio is not configured yet
|
||||
rx_channel->base.gpio_num = -1;
|
||||
|
||||
#if SOC_RMT_SUPPORT_DMA
|
||||
// create DMA descriptor
|
||||
size_t num_dma_nodes = 0;
|
||||
if (config->flags.with_dma) {
|
||||
mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
|
||||
num_dma_nodes = config->mem_block_symbols * sizeof(rmt_symbol_word_t) / RMT_DMA_DESC_BUF_MAX_SIZE + 1;
|
||||
num_dma_nodes = MAX(2, num_dma_nodes); // at least 2 DMA nodes for ping-pong
|
||||
// DMA descriptors must be placed in internal SRAM
|
||||
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
// the alignment should meet both the DMA and cache requirement
|
||||
size_t alignment = MAX(data_cache_line_size, RMT_DMA_DESC_ALIGN);
|
||||
size_t dma_nodes_size = ALIGN_UP(num_dma_nodes * sizeof(rmt_dma_descriptor_t), alignment);
|
||||
rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(alignment, 1, dma_nodes_size, mem_caps);
|
||||
mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
|
||||
num_dma_nodes = config->mem_block_symbols * sizeof(rmt_symbol_word_t) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE + 1;
|
||||
num_dma_nodes = MAX(2, num_dma_nodes); // at least 2 DMA nodes for ping-pong
|
||||
rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(RMT_DMA_DESC_ALIGN, num_dma_nodes, sizeof(rmt_dma_descriptor_t), mem_caps);
|
||||
ESP_GOTO_ON_FALSE(dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for rx channel DMA nodes");
|
||||
rx_channel->dma_nodes = dma_nodes;
|
||||
// do memory sync only when the data cache exists
|
||||
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
if (data_cache_line_size) {
|
||||
// write back and then invalidate the cached dma_nodes, we will skip the cache (by non-cacheable address) when access the dma_nodes
|
||||
// even the cache auto-write back happens, there's no risk the dma_nodes will be overwritten
|
||||
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, dma_nodes_size,
|
||||
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE),
|
||||
// write back and then invalidate the cached dma_nodes, because later the DMA nodes are accessed by non-cacheable address
|
||||
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, num_dma_nodes * sizeof(rmt_dma_descriptor_t),
|
||||
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE | ESP_CACHE_MSYNC_FLAG_UNALIGNED),
|
||||
err, TAG, "cache sync failed");
|
||||
}
|
||||
// we will use the non-cached address to manipulate the DMA descriptor, for simplicity
|
||||
rx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(dma_nodes);
|
||||
}
|
||||
rx_channel->num_dma_nodes = num_dma_nodes;
|
||||
#endif // SOC_RMT_SUPPORT_DMA
|
||||
|
||||
// register the channel to group
|
||||
ESP_GOTO_ON_ERROR(rmt_rx_register_to_group(rx_channel, config), err, TAG, "register channel failed");
|
||||
rmt_group_t *group = rx_channel->base.group;
|
||||
@ -377,25 +384,24 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
|
||||
ESP_RETURN_ON_FALSE_ISR(!config->flags.en_partial_rx, ESP_ERR_NOT_SUPPORTED, TAG, "partial receive not supported");
|
||||
#endif
|
||||
rmt_rx_channel_t *rx_chan = __containerof(channel, rmt_rx_channel_t, base);
|
||||
size_t per_dma_block_size = 0;
|
||||
size_t last_dma_block_size = 0;
|
||||
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
size_t mem_alignment = sizeof(rmt_symbol_word_t);
|
||||
|
||||
#if SOC_RMT_SUPPORT_DMA
|
||||
if (channel->dma_chan) {
|
||||
// Currently we assume the user buffer is allocated from internal RAM, PSRAM is not supported yet.
|
||||
ESP_RETURN_ON_FALSE_ISR(esp_ptr_internal(buffer), ESP_ERR_INVALID_ARG, TAG, "user buffer not allocated from internal RAM");
|
||||
// DMA doesn't have alignment requirement for SRAM buffer if the burst mode is not enabled,
|
||||
// but we need to make sure the buffer is aligned to cache line size
|
||||
uint32_t align_mask = data_cache_line_size ? (data_cache_line_size - 1) : 0;
|
||||
ESP_RETURN_ON_FALSE_ISR(((uintptr_t)buffer & align_mask) == 0, ESP_ERR_INVALID_ARG, TAG, "buffer address not aligned");
|
||||
ESP_RETURN_ON_FALSE_ISR((buffer_size & align_mask) == 0, ESP_ERR_INVALID_ARG, TAG, "buffer size not aligned");
|
||||
ESP_RETURN_ON_FALSE_ISR(buffer_size <= rx_chan->num_dma_nodes * RMT_DMA_DESC_BUF_MAX_SIZE,
|
||||
ESP_ERR_INVALID_ARG, TAG, "buffer size exceeds DMA capacity");
|
||||
per_dma_block_size = buffer_size / rx_chan->num_dma_nodes;
|
||||
per_dma_block_size = ALIGN_DOWN(per_dma_block_size, sizeof(rmt_symbol_word_t));
|
||||
last_dma_block_size = buffer_size - per_dma_block_size * (rx_chan->num_dma_nodes - 1);
|
||||
ESP_RETURN_ON_FALSE_ISR(last_dma_block_size <= RMT_DMA_DESC_BUF_MAX_SIZE, ESP_ERR_INVALID_ARG, TAG, "buffer size exceeds DMA capacity");
|
||||
// append the alignment requirement from the DMA
|
||||
mem_alignment = MAX(mem_alignment, rx_chan->dma_int_mem_alignment);
|
||||
// [IDF-8997]: Currently we assume the user buffer is allocated from internal RAM, PSRAM is not supported yet.
|
||||
ESP_RETURN_ON_FALSE_ISR(esp_ptr_internal(buffer), ESP_ERR_INVALID_ARG, TAG, "user buffer not in the internal RAM");
|
||||
size_t max_buf_sz_per_dma_node = ALIGN_DOWN(DMA_DESCRIPTOR_BUFFER_MAX_SIZE, mem_alignment);
|
||||
ESP_RETURN_ON_FALSE_ISR(buffer_size <= rx_chan->num_dma_nodes * max_buf_sz_per_dma_node,
|
||||
ESP_ERR_INVALID_ARG, TAG, "buffer size exceeds DMA capacity: %zu", rx_chan->num_dma_nodes * max_buf_sz_per_dma_node);
|
||||
}
|
||||
#endif // SOC_RMT_SUPPORT_DMA
|
||||
|
||||
// check buffer alignment
|
||||
uint32_t align_check_mask = mem_alignment - 1;
|
||||
ESP_RETURN_ON_FALSE_ISR((((uintptr_t)buffer & align_check_mask) == 0) && ((buffer_size & align_check_mask) == 0), ESP_ERR_INVALID_ARG,
|
||||
TAG, "buffer address or size are not %zu bytes aligned", mem_alignment);
|
||||
|
||||
rmt_group_t *group = channel->group;
|
||||
rmt_hal_context_t *hal = &group->hal;
|
||||
@ -421,17 +427,23 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
|
||||
t->dma_desc_index = 0;
|
||||
t->flags.en_partial_rx = config->flags.en_partial_rx;
|
||||
|
||||
if (channel->dma_chan) {
|
||||
#if SOC_RMT_SUPPORT_DMA
|
||||
if (channel->dma_chan) {
|
||||
// invalidate the user buffer, in case cache auto-write back happens and breaks the data just written by the DMA
|
||||
if (data_cache_line_size) {
|
||||
uint32_t int_mem_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
if (int_mem_cache_line_size) {
|
||||
// this function will also check the alignment of the buffer and size, against the cache line size
|
||||
ESP_RETURN_ON_ERROR_ISR(esp_cache_msync(buffer, buffer_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C), TAG, "cache sync failed");
|
||||
}
|
||||
// we will mount the buffer to multiple DMA nodes, in a balanced way
|
||||
size_t per_dma_block_size = buffer_size / rx_chan->num_dma_nodes;
|
||||
per_dma_block_size = ALIGN_DOWN(per_dma_block_size, mem_alignment);
|
||||
size_t last_dma_block_size = buffer_size - per_dma_block_size * (rx_chan->num_dma_nodes - 1);
|
||||
rmt_rx_mount_dma_buffer(rx_chan, buffer, buffer_size, per_dma_block_size, last_dma_block_size);
|
||||
gdma_reset(channel->dma_chan);
|
||||
gdma_start(channel->dma_chan, (intptr_t)rx_chan->dma_nodes); // note, we must use the cached descriptor address to start the DMA
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
rx_chan->mem_off = 0;
|
||||
portENTER_CRITICAL_SAFE(&channel->spinlock);
|
||||
|
@ -50,26 +50,55 @@ static bool rmt_dma_tx_eof_cb(gdma_channel_handle_t dma_chan, gdma_event_data_t
|
||||
|
||||
static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx_channel_config_t *config)
|
||||
{
|
||||
// For simplicity, the encoder will access the dma_mem_base in a non-cached way
|
||||
// and we allocate the dma_mem_base from the internal SRAM for performance
|
||||
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
// the alignment should meet both the DMA and cache requirement
|
||||
size_t alignment = MAX(data_cache_line_size, sizeof(rmt_symbol_word_t));
|
||||
size_t dma_mem_base_size = ALIGN_UP(config->mem_block_symbols * sizeof(rmt_symbol_word_t), alignment);
|
||||
rmt_symbol_word_t *dma_mem_base = heap_caps_aligned_calloc(alignment, 1, dma_mem_base_size,
|
||||
gdma_channel_alloc_config_t dma_chan_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &tx_channel->base.dma_chan), TAG, "allocate TX DMA channel failed");
|
||||
gdma_strategy_config_t gdma_strategy_conf = {
|
||||
.auto_update_desc = true,
|
||||
.owner_check = true,
|
||||
};
|
||||
gdma_apply_strategy(tx_channel->base.dma_chan, &gdma_strategy_conf);
|
||||
gdma_transfer_config_t transfer_cfg = {
|
||||
.access_ext_mem = false, // for performance, we don't use external memory as the DMA buffer
|
||||
.max_data_burst_size = 32,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_config_transfer(tx_channel->base.dma_chan, &transfer_cfg), TAG, "config DMA transfer failed");
|
||||
gdma_tx_event_callbacks_t cbs = {
|
||||
.on_trans_eof = rmt_dma_tx_eof_cb,
|
||||
};
|
||||
// register the DMA callbacks may fail if the interrupt service can not be installed successfully
|
||||
ESP_RETURN_ON_ERROR(gdma_register_tx_event_callbacks(tx_channel->base.dma_chan, &cbs, tx_channel), TAG, "register DMA callbacks failed");
|
||||
|
||||
size_t int_alignment = 0;
|
||||
// get the alignment requirement from DMA
|
||||
gdma_get_alignment_constraints(tx_channel->base.dma_chan, &int_alignment, NULL);
|
||||
// apply RMT hardware alignment requirement
|
||||
int_alignment = MAX(int_alignment, sizeof(rmt_symbol_word_t));
|
||||
// the memory returned by `heap_caps_aligned_calloc` also meets the cache alignment requirement (both address and size)
|
||||
rmt_symbol_word_t *dma_mem_base = heap_caps_aligned_calloc(int_alignment, sizeof(rmt_symbol_word_t), config->mem_block_symbols,
|
||||
RMT_MEM_ALLOC_CAPS | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
ESP_RETURN_ON_FALSE(dma_mem_base, ESP_ERR_NO_MEM, TAG, "no mem for tx DMA buffer");
|
||||
tx_channel->dma_mem_base = dma_mem_base;
|
||||
// do memory sync only when the data cache exists
|
||||
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
// do memory sync if the dma buffer is cached
|
||||
if (data_cache_line_size) {
|
||||
// write back and then invalidate the cache, we will skip the cache (by non-cacheable address) when access the dma_mem_base
|
||||
// even the cache auto-write back happens, there's no risk the dma_mem_base will be overwritten
|
||||
ESP_RETURN_ON_ERROR(esp_cache_msync(dma_mem_base, dma_mem_base_size,
|
||||
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE),
|
||||
// write back and then invalidate the cache, because later RMT encoder accesses the dma_mem_base by non-cacheable address
|
||||
ESP_RETURN_ON_ERROR(esp_cache_msync(dma_mem_base, sizeof(rmt_symbol_word_t) * config->mem_block_symbols,
|
||||
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_UNALIGNED | ESP_CACHE_MSYNC_FLAG_INVALIDATE),
|
||||
TAG, "cache sync failed");
|
||||
}
|
||||
// we use the non-cached address to manipulate this DMA buffer
|
||||
// For simplicity, encoder will use the non-cached address to read/write the DMA buffer
|
||||
tx_channel->dma_mem_base_nc = (rmt_symbol_word_t *)RMT_GET_NON_CACHE_ADDR(dma_mem_base);
|
||||
// the DMA buffer size should be aligned to the DMA requirement
|
||||
size_t mount_size_per_node = ALIGN_DOWN(config->mem_block_symbols * sizeof(rmt_symbol_word_t) / RMT_DMA_NODES_PING_PONG, int_alignment);
|
||||
// check the upper and lower bound of mount_size_per_node
|
||||
ESP_RETURN_ON_FALSE(mount_size_per_node >= sizeof(rmt_symbol_word_t), ESP_ERR_INVALID_ARG,
|
||||
TAG, "mem_block_symbols is too small");
|
||||
ESP_RETURN_ON_FALSE(mount_size_per_node <= DMA_DESCRIPTOR_BUFFER_MAX_SIZE, ESP_ERR_INVALID_ARG,
|
||||
TAG, "mem_block_symbols can't exceed %zu", DMA_DESCRIPTOR_BUFFER_MAX_SIZE * RMT_DMA_NODES_PING_PONG / sizeof(rmt_symbol_word_t));
|
||||
|
||||
tx_channel->ping_pong_symbols = mount_size_per_node / sizeof(rmt_symbol_word_t);
|
||||
for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
|
||||
// each descriptor shares half of the DMA buffer
|
||||
tx_channel->dma_nodes_nc[i].buffer = dma_mem_base + tx_channel->ping_pong_symbols * i;
|
||||
@ -80,20 +109,6 @@ static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx
|
||||
tx_channel->dma_nodes_nc[i].dw0.suc_eof = 1;
|
||||
}
|
||||
|
||||
gdma_channel_alloc_config_t dma_chan_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &tx_channel->base.dma_chan), TAG, "allocate TX DMA channel failed");
|
||||
gdma_strategy_config_t gdma_strategy_conf = {
|
||||
.auto_update_desc = true,
|
||||
.owner_check = true,
|
||||
};
|
||||
gdma_apply_strategy(tx_channel->base.dma_chan, &gdma_strategy_conf);
|
||||
gdma_tx_event_callbacks_t cbs = {
|
||||
.on_trans_eof = rmt_dma_tx_eof_cb,
|
||||
};
|
||||
// register the DMA callbacks may fail if the interrupt service can not be installed successfully
|
||||
ESP_RETURN_ON_ERROR(gdma_register_tx_event_callbacks(tx_channel->base.dma_chan, &cbs, tx_channel), TAG, "register DMA callbacks failed");
|
||||
return ESP_OK;
|
||||
}
|
||||
#endif // SOC_RMT_SUPPORT_DMA
|
||||
@ -110,7 +125,6 @@ static esp_err_t rmt_tx_register_to_group(rmt_tx_channel_t *tx_channel, const rm
|
||||
mem_block_num = 1;
|
||||
// Only the last channel has the DMA capability
|
||||
channel_scan_start = RMT_TX_CHANNEL_OFFSET_IN_GROUP + SOC_RMT_TX_CANDIDATES_PER_GROUP - 1;
|
||||
tx_channel->ping_pong_symbols = config->mem_block_symbols / 2;
|
||||
} else {
|
||||
// one channel can occupy multiple memory blocks
|
||||
mem_block_num = config->mem_block_symbols / SOC_RMT_MEM_WORDS_PER_CHANNEL;
|
||||
@ -247,13 +261,7 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
|
||||
ESP_RETURN_ON_FALSE(GPIO_IS_VALID_OUTPUT_GPIO(config->gpio_num), ESP_ERR_INVALID_ARG, TAG, "invalid GPIO number %d", config->gpio_num);
|
||||
ESP_RETURN_ON_FALSE((config->mem_block_symbols & 0x01) == 0 && config->mem_block_symbols >= SOC_RMT_MEM_WORDS_PER_CHANNEL,
|
||||
ESP_ERR_INVALID_ARG, TAG, "mem_block_symbols must be even and at least %d", SOC_RMT_MEM_WORDS_PER_CHANNEL);
|
||||
|
||||
#if SOC_RMT_SUPPORT_DMA
|
||||
// we only support 2 nodes ping-pong, if the configured memory block size needs more than two DMA descriptors, should treat it as invalid
|
||||
ESP_RETURN_ON_FALSE(config->mem_block_symbols <= RMT_DMA_DESC_BUF_MAX_SIZE * RMT_DMA_NODES_PING_PONG / sizeof(rmt_symbol_word_t),
|
||||
ESP_ERR_INVALID_ARG, TAG, "mem_block_symbols can't exceed %d",
|
||||
RMT_DMA_DESC_BUF_MAX_SIZE * RMT_DMA_NODES_PING_PONG / sizeof(rmt_symbol_word_t));
|
||||
#else
|
||||
#if !SOC_RMT_SUPPORT_DMA
|
||||
ESP_RETURN_ON_FALSE(config->flags.with_dma == 0, ESP_ERR_NOT_SUPPORTED, TAG, "DMA not supported");
|
||||
#endif
|
||||
|
||||
@ -269,19 +277,16 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
|
||||
tx_channel->base.gpio_num = -1;
|
||||
// create DMA descriptors
|
||||
if (config->flags.with_dma) {
|
||||
mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
|
||||
// DMA descriptors must be placed in internal SRAM
|
||||
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
// the alignment should meet both the DMA and cache requirement
|
||||
size_t alignment = MAX(data_cache_line_size, RMT_DMA_DESC_ALIGN);
|
||||
size_t dma_nodes_mem_size = ALIGN_UP(RMT_DMA_NODES_PING_PONG * sizeof(rmt_dma_descriptor_t), alignment);
|
||||
rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(alignment, 1, dma_nodes_mem_size, mem_caps);
|
||||
mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
|
||||
rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(RMT_DMA_DESC_ALIGN, RMT_DMA_NODES_PING_PONG, sizeof(rmt_dma_descriptor_t), mem_caps);
|
||||
ESP_GOTO_ON_FALSE(dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for tx DMA nodes");
|
||||
tx_channel->dma_nodes = dma_nodes;
|
||||
// write back and then invalidate the cached dma_nodes, we will skip the cache (by non-cacheable address) when access the dma_nodes
|
||||
// write back and then invalidate the cached dma_nodes, because later the DMA nodes are accessed by non-cacheable address
|
||||
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
||||
if (data_cache_line_size) {
|
||||
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, dma_nodes_mem_size,
|
||||
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE),
|
||||
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, RMT_DMA_NODES_PING_PONG * sizeof(rmt_dma_descriptor_t),
|
||||
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE | ESP_CACHE_MSYNC_FLAG_UNALIGNED),
|
||||
err, TAG, "cache sync failed");
|
||||
}
|
||||
// we will use the non-cached address to manipulate the DMA descriptor, for simplicity
|
||||
|
@ -181,6 +181,10 @@ static void test_rmt_rx_nec_carrier(size_t mem_block_symbols, bool with_dma, rmt
|
||||
TEST_ASSERT_EQUAL(34, test_user_data.received_symbol_num);
|
||||
|
||||
TEST_ESP_OK(rmt_tx_wait_all_done(tx_channel, -1));
|
||||
|
||||
// test rmt receive with unaligned buffer
|
||||
TEST_ESP_ERR(ESP_ERR_INVALID_ARG, rmt_receive(rx_channel, remote_codes, 13, &receive_config));
|
||||
|
||||
printf("disable tx and rx channels\r\n");
|
||||
TEST_ESP_OK(rmt_disable(tx_channel));
|
||||
TEST_ESP_OK(rmt_disable(rx_channel));
|
||||
|
Loading…
x
Reference in New Issue
Block a user