diff --git a/components/esp_hw_support/CMakeLists.txt b/components/esp_hw_support/CMakeLists.txt index c77991945e..cf8e590e94 100644 --- a/components/esp_hw_support/CMakeLists.txt +++ b/components/esp_hw_support/CMakeLists.txt @@ -82,6 +82,10 @@ if(NOT BOOTLOADER_BUILD) list(APPEND srcs "dma/gdma_etm.c") endif() + if(CONFIG_SOC_DW_GDMA_SUPPORTED) + list(APPEND srcs "dma/dw_gdma.c") + endif() + if(CONFIG_SOC_SYSTIMER_SUPPORTED) list(APPEND srcs "port/${target}/systimer.c") endif() diff --git a/components/esp_hw_support/Kconfig b/components/esp_hw_support/Kconfig index aa379e8edc..efd8729152 100644 --- a/components/esp_hw_support/Kconfig +++ b/components/esp_hw_support/Kconfig @@ -232,31 +232,7 @@ menu "Hardware Settings" Note that, this option only controls the ETM related driver log, won't affect other drivers. endmenu # ETM Configuration - menu "GDMA Configuration" - depends on SOC_GDMA_SUPPORTED - config GDMA_CTRL_FUNC_IN_IRAM - bool "Place GDMA control functions into IRAM" - default n - help - Place GDMA control functions (like start/stop/append/reset) into IRAM, - so that these functions can be IRAM-safe and able to be called in the other IRAM interrupt context. - Enabling this option can improve driver performance as well. - - config GDMA_ISR_IRAM_SAFE - bool "GDMA ISR IRAM-Safe" - default n - help - This will ensure the GDMA interrupt handler is IRAM-Safe, allow to avoid flash - cache misses, and also be able to run whilst the cache is disabled. - (e.g. SPI Flash write). - - config GDMA_ENABLE_DEBUG_LOG - bool "Enable debug log" - default n - help - Wether to enable the debug log message for GDMA driver. - Note that, this option only controls the GDMA driver log, won't affect other drivers. - endmenu # GDMA Configuration + rsource "./dma/Kconfig.dma" menu "Main XTAL Config" choice XTAL_FREQ_SEL diff --git a/components/esp_hw_support/README.md b/components/esp_hw_support/README.md index 20892026c4..5a95217589 100644 --- a/components/esp_hw_support/README.md +++ b/components/esp_hw_support/README.md @@ -75,3 +75,13 @@ classDiagram class gptimer_etm_task_t { } ``` + +## DMA Service + +With the increasing demand, the hardware design of DMA is changing along the way. At first, each peripheral has a dedicated DMA controller. Later, a centralized DMA controller is introduced, which is called `GDMA` in the software. + +There may be multiple GDMA instances on a chip, some is attached to the AHB bus and some is attached to the AXI bus. But their functionalities are almost the same. + +Some high-performance peripherals, such as MIPI, require DMA to provide more functions, such as hardware handshake mechanism, address growth mode, out-of-order transmission and so on. Therefore, a new DMA controller, called `DW_GDMA` was born. The prefix *DW* is taken from *DesignWare*. + +Please note that the specific DMA controller to be used for peripherals is determined by the specific chip. It is possible that, on chip A, SPI works with AHB GDMA, while on chip B, SPI works with AXI GDMA. diff --git a/components/esp_hw_support/dma/Kconfig.dma b/components/esp_hw_support/dma/Kconfig.dma new file mode 100644 index 0000000000..6ba2246ef3 --- /dev/null +++ b/components/esp_hw_support/dma/Kconfig.dma @@ -0,0 +1,57 @@ +menu "GDMA Configurations" + depends on SOC_GDMA_SUPPORTED + config GDMA_CTRL_FUNC_IN_IRAM + bool "Place GDMA control functions in IRAM" + default n + help + Place GDMA control functions (like start/stop/append/reset) into IRAM, + so that these functions can be IRAM-safe and able to be called in the other IRAM interrupt context. + + config GDMA_ISR_IRAM_SAFE + bool "GDMA ISR IRAM-Safe" + default n + help + This will ensure the GDMA interrupt handler is IRAM-Safe, allow to avoid flash + cache misses, and also be able to run whilst the cache is disabled. + (e.g. SPI Flash write). + + config GDMA_ENABLE_DEBUG_LOG + bool "Enable debug log" + default n + help + Wether to enable the debug log message for GDMA driver. + Note that, this option only controls the GDMA driver log, won't affect other drivers. +endmenu # GDMA Configurations + +menu "DW_GDMA Configurations" + depends on SOC_DW_GDMA_SUPPORTED + + config DW_GDMA_CTRL_FUNC_IN_IRAM + bool + default n + help + Place DW_GDMA control functions (e.g. dw_gdma_channel_continue) into IRAM, + so that these functions can be IRAM-safe and able to be called in the other IRAM interrupt context. + + config DW_GDMA_SETTER_FUNC_IN_IRAM + bool + default n + help + Place DW_GDMA setter functions (e.g. dw_gdma_channel_set_block_markers) into IRAM, + so that these functions can be IRAM-safe and able to be called in the other IRAM interrupt context. + + config DW_GDMA_ISR_IRAM_SAFE + bool + default n + help + This will ensure the DW_GDMA interrupt handler is IRAM-Safe, allow to avoid flash + cache misses, and also be able to run whilst the cache is disabled. + (e.g. SPI Flash write). + + config DW_GDMA_ENABLE_DEBUG_LOG + bool "Enable debug log" + default n + help + Wether to enable the debug log message for DW_GDMA driver. + Note that, this option only controls the DW_GDMA driver log, won't affect other drivers. +endmenu # DW_GDMA Configurations diff --git a/components/esp_hw_support/dma/dw_gdma.c b/components/esp_hw_support/dma/dw_gdma.c new file mode 100644 index 0000000000..fc69c75d84 --- /dev/null +++ b/components/esp_hw_support/dma/dw_gdma.c @@ -0,0 +1,672 @@ +/* + * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include "sdkconfig.h" +#if CONFIG_DW_GDMA_ENABLE_DEBUG_LOG +// The local log level must be defined before including esp_log.h +// Set the maximum log level for this source file +#define LOG_LOCAL_LEVEL ESP_LOG_DEBUG +#endif +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "soc/soc_caps.h" +#include "soc/interrupts.h" +#include "esp_log.h" +#include "esp_check.h" +#include "esp_intr_alloc.h" +#include "esp_memory_utils.h" +#include "esp_private/periph_ctrl.h" +#include "esp_private/dw_gdma.h" +#include "hal/dw_gdma_hal.h" +#include "hal/dw_gdma_ll.h" +#include "hal/cache_hal.h" +#include "hal/cache_ll.h" + +static const char *TAG = "dw-gdma"; + +#if !SOC_RCC_IS_INDEPENDENT +// Reset and Clock Control registers are mixing with other peripherals, so we need to use a critical section +#define DW_GDMA_RCC_ATOMIC() PERIPH_RCC_ATOMIC() +#else +#define DW_GDMA_RCC_ATOMIC() +#endif + +#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE +#define DW_GDMA_GET_NON_CACHE_ADDR(addr) ((addr) ? CACHE_LL_L2MEM_NON_CACHE_ADDR(addr) : 0) +#else +#define DW_GDMA_GET_NON_CACHE_ADDR(addr) (addr) +#endif + +#if CONFIG_DW_GDMA_ISR_IRAM_SAFE || CONFIG_DW_GDMA_CTRL_FUNC_IN_IRAM || DW_GDMA_SETTER_FUNC_IN_IRAM +#define DW_GDMA_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT) +#else +#define DW_GDMA_MEM_ALLOC_CAPS MALLOC_CAP_DEFAULT +#endif + +#if CONFIG_DW_GDMA_ISR_IRAM_SAFE +#define DW_GDMA_INTR_ALLOC_FLAGS (ESP_INTR_FLAG_IRAM) +#else +#define DW_GDMA_INTR_ALLOC_FLAGS 0 +#endif + +#define DW_GDMA_ALLOW_INTR_PRIORITY_MASK ESP_INTR_FLAG_LOWMED + +typedef struct dw_gdma_group_t dw_gdma_group_t; +typedef struct dw_gdma_channel_t dw_gdma_channel_t; + +typedef struct dw_gdma_link_list_t { + uint32_t num_items; // number of items in the link list + dw_gdma_link_list_item_t *items; // pointer to the link list items + dw_gdma_link_list_item_t *items_nc; // pointer to the link list items, non-cached +} dw_gdma_link_list_t; + +typedef struct { + _lock_t mutex; // platform level mutex lock + dw_gdma_group_t *groups[DW_GDMA_LL_GROUPS]; // array of DMA group instances + int group_ref_counts[DW_GDMA_LL_GROUPS]; // reference count used to protect group install/uninstall +} dw_gdma_platform_t; + +struct dw_gdma_group_t { + int group_id; // Group ID, index from 0 + dw_gdma_hal_context_t hal; // HAL instance is at group level + int intr_priority; // all channels in the same group should share the same interrupt priority + portMUX_TYPE spinlock; // group level spinlock, protect group level stuffs, e.g. hal object, pair handle slots and reference count of each pair + dw_gdma_channel_t *channels[DW_GDMA_LL_CHANNELS_PER_GROUP]; // handles of DMA channels +}; + +struct dw_gdma_channel_t { + int chan_id; // channel ID, index from 0 + intr_handle_t intr; // per-channel interrupt handle + portMUX_TYPE spinlock; // channel level spinlock + dw_gdma_group_t *group; // pointer to the group which the channel belongs to + void *user_data; // user registered DMA event data + dw_gdma_event_callbacks_t cbs; // Event callbacks + dw_gdma_block_transfer_type_t src_transfer_type; // transfer type for source + dw_gdma_block_transfer_type_t dst_transfer_type; // transfer type for destination +}; + +// dw_gdma driver platform +static dw_gdma_platform_t s_platform; + +static dw_gdma_group_t *dw_gdma_acquire_group_handle(int group_id) +{ + bool new_group = false; + dw_gdma_group_t *group = NULL; + + // prevent install dw_gdma group concurrently + _lock_acquire(&s_platform.mutex); + if (!s_platform.groups[group_id]) { + // The group is handle is not created yet + group = heap_caps_calloc(1, sizeof(dw_gdma_group_t), DW_GDMA_MEM_ALLOC_CAPS); + if (group) { + new_group = true; + s_platform.groups[group_id] = group; + // enable APB to access DMA registers + DW_GDMA_RCC_ATOMIC() { + dw_gdma_ll_enable_bus_clock(group_id, true); + dw_gdma_ll_reset_register(group_id); + } + // initialize the HAL context + dw_gdma_hal_config_t hal_config = {}; + dw_gdma_hal_init(&group->hal, &hal_config); + } + } else { + // the group is installed, we just retrieve it and increase the reference count + group = s_platform.groups[group_id]; + } + if (group) { + // someone acquired the group handle means we have a new object that refer to this group + s_platform.group_ref_counts[group_id]++; + } + _lock_release(&s_platform.mutex); + + if (new_group) { + portMUX_INITIALIZE(&group->spinlock); + group->group_id = group_id; + group->intr_priority = -1; // interrupt priority not assigned yet + ESP_LOGD(TAG, "new group (%d) at %p", group_id, group); + } + + return group; +} + +static void dw_gdma_release_group_handle(dw_gdma_group_t *group) +{ + int group_id = group->group_id; + bool del_group = false; + + _lock_acquire(&s_platform.mutex); + s_platform.group_ref_counts[group_id]--; + if (s_platform.group_ref_counts[group_id] == 0) { + del_group = true; + // the group now is not used by any channel, unregister it from the platform + s_platform.groups[group_id] = NULL; + // deinitialize the HAL context + dw_gdma_hal_deinit(&group->hal); + DW_GDMA_RCC_ATOMIC() { + dw_gdma_ll_enable_bus_clock(group_id, false); + } + } + _lock_release(&s_platform.mutex); + + if (del_group) { + free(group); + ESP_LOGD(TAG, "delete group (%d)", group_id); + } +} + +static esp_err_t channel_register_to_group(dw_gdma_channel_t *chan) +{ + dw_gdma_group_t *group = NULL; + int chan_id = -1; + for (int i = 0; i < DW_GDMA_LL_GROUPS; i++) { + group = dw_gdma_acquire_group_handle(i); + ESP_RETURN_ON_FALSE(group, ESP_ERR_NO_MEM, TAG, "no mem for group(%d)", i); + // loop to search free channel in the group + portENTER_CRITICAL(&group->spinlock); + for (int j = 0; j < DW_GDMA_LL_CHANNELS_PER_GROUP; j++) { + if (group->channels[j] == NULL) { + group->channels[j] = chan; + chan_id = j; + break; + } + } + portEXIT_CRITICAL(&group->spinlock); + if (chan_id < 0) { + dw_gdma_release_group_handle(group); + } else { + chan->group = group; + chan->chan_id = chan_id; + break; + } + } + ESP_RETURN_ON_FALSE(chan_id >= 0, ESP_ERR_NOT_FOUND, TAG, "no free channels"); + return ESP_OK; +} + +static void channel_unregister_from_group(dw_gdma_channel_t *chan) +{ + dw_gdma_group_t *group = chan->group; + int chan_id = chan->chan_id; + portENTER_CRITICAL(&group->spinlock); + group->channels[chan_id] = NULL; + portEXIT_CRITICAL(&group->spinlock); + // channel has a reference on group, release it now + dw_gdma_release_group_handle(group); +} + +static esp_err_t channel_destroy(dw_gdma_channel_t *chan) +{ + if (chan->group) { + channel_unregister_from_group(chan); + } + free(chan); + return ESP_OK; +} + +esp_err_t dw_gdma_new_channel(const dw_gdma_channel_alloc_config_t *config, dw_gdma_channel_handle_t *ret_chan) +{ +#if CONFIG_DW_GDMA_ENABLE_DEBUG_LOG + esp_log_level_set(TAG, ESP_LOG_DEBUG); +#endif + esp_err_t ret = ESP_OK; + dw_gdma_channel_t *chan = NULL; + ESP_RETURN_ON_FALSE(config && ret_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + ESP_RETURN_ON_FALSE(config->src.num_outstanding_requests >= 1 && config->src.num_outstanding_requests <= DW_GDMA_LL_MAX_OUTSTANDING_REQUESTS, + ESP_ERR_INVALID_ARG, TAG, "invalid num_outstanding_requests"); + ESP_RETURN_ON_FALSE(config->dst.num_outstanding_requests >= 1 && config->dst.num_outstanding_requests <= DW_GDMA_LL_MAX_OUTSTANDING_REQUESTS, + ESP_ERR_INVALID_ARG, TAG, "invalid num_outstanding_request"); + ESP_RETURN_ON_FALSE(config->chan_priority >= 0 && config->chan_priority < DW_GDMA_LL_CHANNELS_PER_GROUP, + ESP_ERR_INVALID_ARG, TAG, "invalid channel priority"); + if (config->intr_priority) { + ESP_RETURN_ON_FALSE(1 << (config->intr_priority) & DW_GDMA_ALLOW_INTR_PRIORITY_MASK, ESP_ERR_INVALID_ARG, + TAG, "invalid interrupt priority:%d", config->intr_priority); + } + + chan = heap_caps_calloc(1, sizeof(dw_gdma_channel_t), DW_GDMA_MEM_ALLOC_CAPS); + ESP_RETURN_ON_FALSE(chan, ESP_ERR_NO_MEM, TAG, "no mem for channel"); + // register channel to the group + ESP_GOTO_ON_ERROR(channel_register_to_group(chan), err, TAG, "register to group failed"); + dw_gdma_group_t *group = chan->group; + dw_gdma_hal_context_t *hal = &group->hal; + int group_id = group->group_id; + int chan_id = chan->chan_id; + + // all channels in the same group should use the same interrupt priority + bool intr_priority_conflict = false; + portENTER_CRITICAL(&group->spinlock); + if (group->intr_priority == -1) { + group->intr_priority = config->intr_priority; + } else if (config->intr_priority != 0) { + intr_priority_conflict = (group->intr_priority != config->intr_priority); + } + portEXIT_CRITICAL(&group->spinlock); + ESP_GOTO_ON_FALSE(!intr_priority_conflict, ESP_ERR_INVALID_STATE, err, TAG, "intr_priority conflict, already is %d but attempt to %d", group->intr_priority, config->intr_priority); + + // basic initialization + portMUX_INITIALIZE(&chan->spinlock); + chan->src_transfer_type = config->src.block_transfer_type; + chan->dst_transfer_type = config->dst.block_transfer_type; + // set transfer flow type + dw_gdma_ll_channel_set_trans_flow(hal->dev, chan_id, config->src.role, config->dst.role, config->flow_controller); + // set the transfer type for source and destination + dw_gdma_ll_channel_set_src_multi_block_type(hal->dev, chan_id, config->src.block_transfer_type); + dw_gdma_ll_channel_set_dst_multi_block_type(hal->dev, chan_id, config->dst.block_transfer_type); + // set handshake interface + dw_gdma_ll_channel_set_src_handshake_interface(hal->dev, chan_id, config->src.handshake_type); + dw_gdma_ll_channel_set_dst_handshake_interface(hal->dev, chan_id, config->dst.handshake_type); + // set handshake peripheral + if (config->src.role != DW_GDMA_ROLE_MEM) { + dw_gdma_ll_channel_set_src_handshake_periph(hal->dev, chan_id, config->src.role); + } + if (config->dst.role != DW_GDMA_ROLE_MEM) { + dw_gdma_ll_channel_set_dst_handshake_periph(hal->dev, chan_id, config->dst.role); + } + // set channel priority + dw_gdma_ll_channel_set_priority(hal->dev, chan_id, config->chan_priority); + // set the outstanding request number + dw_gdma_ll_channel_set_src_outstanding_limit(hal->dev, chan_id, config->src.num_outstanding_requests); + dw_gdma_ll_channel_set_dst_outstanding_limit(hal->dev, chan_id, config->dst.num_outstanding_requests); + // set the status fetch address + dw_gdma_ll_channel_set_src_periph_status_addr(hal->dev, chan_id, config->src.status_fetch_addr); + dw_gdma_ll_channel_set_dst_periph_status_addr(hal->dev, chan_id, config->dst.status_fetch_addr); + + // enable all channel events (notes, they can't trigger an interrupt until `dw_gdma_ll_channel_enable_intr_propagation` is called) + dw_gdma_ll_channel_enable_intr_generation(hal->dev, chan_id, UINT32_MAX, true); + + ESP_LOGD(TAG, "new channel (%d,%d) at %p", group_id, chan_id, chan); + *ret_chan = chan; + return ESP_OK; +err: + if (chan) { + channel_destroy(chan); + } + return ret; +} + +esp_err_t dw_gdma_del_channel(dw_gdma_channel_handle_t chan) +{ + ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + dw_gdma_group_t *group = chan->group; + int group_id = group->group_id; + int chan_id = chan->chan_id; + ESP_LOGD(TAG, "del channel (%d,%d)", group_id, chan_id); + // recycle memory resource + ESP_RETURN_ON_ERROR(channel_destroy(chan), TAG, "destroy channel failed"); + return ESP_OK; +} + +esp_err_t dw_gdma_channel_enable_ctrl(dw_gdma_channel_handle_t chan, bool en_or_dis) +{ + ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + dw_gdma_hal_context_t *hal = &chan->group->hal; + int chan_id = chan->chan_id; + // the atomic is ensured by the hardware, so no lock is needed here + dw_gdma_ll_channel_enable(hal->dev, chan_id, en_or_dis); + return ESP_OK; +} + +esp_err_t dw_gdma_channel_suspend_ctrl(dw_gdma_channel_handle_t chan, bool enter_or_exit) +{ + ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + dw_gdma_hal_context_t *hal = &chan->group->hal; + int chan_id = chan->chan_id; + // the atomic is ensured by the hardware, so no lock is needed here + dw_gdma_ll_channel_suspend(hal->dev, chan_id, enter_or_exit); + return ESP_OK; +} + +esp_err_t dw_gdma_channel_abort(dw_gdma_channel_handle_t chan) +{ + ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + dw_gdma_hal_context_t *hal = &chan->group->hal; + int chan_id = chan->chan_id; + // the atomic is ensured by the hardware, so no lock is needed here + dw_gdma_ll_channel_abort(hal->dev, chan_id); + return ESP_OK; +} + +esp_err_t dw_gdma_channel_lock(dw_gdma_channel_handle_t chan, dw_gdma_lock_level_t level) +{ + ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + dw_gdma_hal_context_t *hal = &chan->group->hal; + int chan_id = chan->chan_id; + + // the lock control bit is located in a cfg register, with other configuration bits + portENTER_CRITICAL(&chan->spinlock); + dw_gdma_ll_channel_lock(hal->dev, chan_id, level); + portEXIT_CRITICAL(&chan->spinlock); + return ESP_OK; +} + +esp_err_t dw_gdma_channel_unlock(dw_gdma_channel_handle_t chan) +{ + ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + dw_gdma_hal_context_t *hal = &chan->group->hal; + int chan_id = chan->chan_id; + + // the lock control bit is located in a cfg register, with other configuration bits + portENTER_CRITICAL(&chan->spinlock); + dw_gdma_ll_channel_unlock(hal->dev, chan_id); + portEXIT_CRITICAL(&chan->spinlock); + return ESP_OK; +} + +esp_err_t dw_gdma_channel_continue(dw_gdma_channel_handle_t chan) +{ + ESP_RETURN_ON_FALSE(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + dw_gdma_hal_context_t *hal = &chan->group->hal; + int chan_id = chan->chan_id; + // the atomic is ensured by the hardware, so no lock is needed here + dw_gdma_ll_channel_resume_multi_block_transfer(hal->dev, chan_id); + return ESP_OK; +} + +esp_err_t dw_gdma_new_link_list(const dw_gdma_link_list_config_t *config, dw_gdma_link_list_handle_t *ret_list) +{ + esp_err_t ret = ESP_OK; + ESP_RETURN_ON_FALSE(ret_list, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + dw_gdma_link_list_item_t *items = NULL; + dw_gdma_link_list_t *list = NULL; + uint32_t num_items = config->num_items; + list = heap_caps_calloc(1, sizeof(dw_gdma_link_list_t), DW_GDMA_MEM_ALLOC_CAPS); + ESP_GOTO_ON_FALSE(list, ESP_ERR_NO_MEM, err, TAG, "no mem for link list"); + // the link list item has a strict alignment requirement, so we allocate it separately + items = heap_caps_aligned_calloc(DW_GDMA_LL_LINK_LIST_ALIGNMENT, num_items, + sizeof(dw_gdma_link_list_item_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA); + ESP_RETURN_ON_FALSE(items, ESP_ERR_NO_MEM, TAG, "no mem for link list items"); + list->num_items = num_items; + list->items = items; + list->items_nc = (dw_gdma_link_list_item_t *)DW_GDMA_GET_NON_CACHE_ADDR(items); + + // set up the link list + for (size_t i = 0; i < num_items; i++) { + dw_gdma_ll_lli_set_next_item_addr(list->items_nc + i, (uint32_t)(list->items + i + 1)); + // set master port for the link list + dw_gdma_ll_lli_set_link_list_master_port(list->items_nc + i, DW_GDMA_LL_MASTER_PORT_MEMORY); + } + switch (config->link_type) { + case DW_GDMA_LINKED_LIST_TYPE_CIRCULAR: + dw_gdma_ll_lli_set_next_item_addr(list->items_nc + num_items - 1, (uint32_t)(list->items)); + break; + case DW_GDMA_LINKED_LIST_TYPE_SINGLY: + dw_gdma_ll_lli_set_next_item_addr(list->items_nc + num_items - 1, 0); + break; + } + + ESP_LOGD(TAG, "new link list @%p, items @%p", list, items); + *ret_list = list; + return ESP_OK; +err: + if (list) { + free(list); + } + if (items) { + free(items); + } + return ret; +} + +esp_err_t dw_gdma_del_link_list(dw_gdma_link_list_handle_t list) +{ + ESP_RETURN_ON_FALSE(list, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + ESP_LOGD(TAG, "del link list at %p", list); + free(list->items); + free(list); + return ESP_OK; +} + +esp_err_t dw_gdma_channel_use_link_list(dw_gdma_channel_handle_t chan, dw_gdma_link_list_handle_t list) +{ + ESP_RETURN_ON_FALSE(chan && list, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + ESP_RETURN_ON_FALSE(chan->src_transfer_type == DW_GDMA_BLOCK_TRANSFER_LIST || + chan->dst_transfer_type == DW_GDMA_BLOCK_TRANSFER_LIST, + ESP_ERR_INVALID_STATE, TAG, "invalid transfer type"); + dw_gdma_hal_context_t *hal = &chan->group->hal; + int chan_id = chan->chan_id; + + // set master port for the link list + dw_gdma_ll_channel_set_link_list_master_port(hal->dev, chan_id, DW_GDMA_LL_MASTER_PORT_MEMORY); + // set the link list head address + dw_gdma_ll_channel_set_link_list_head_addr(hal->dev, chan_id, (uint32_t)(list->items)); + return ESP_OK; +} + +dw_gdma_lli_handle_t dw_gdma_link_list_get_item(dw_gdma_link_list_handle_t list, int item_index) +{ + ESP_RETURN_ON_FALSE_ISR(list, NULL, TAG, "invalid argument"); + ESP_RETURN_ON_FALSE_ISR(item_index < list->num_items, NULL, TAG, "invalid item index"); + dw_gdma_link_list_item_t *lli = list->items_nc + item_index; + return lli; +} + +esp_err_t dw_gdma_channel_config_transfer(dw_gdma_channel_handle_t chan, const dw_gdma_block_transfer_config_t *config) +{ + ESP_RETURN_ON_FALSE(chan && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + ESP_RETURN_ON_FALSE(chan->src_transfer_type != DW_GDMA_BLOCK_TRANSFER_LIST && + chan->dst_transfer_type != DW_GDMA_BLOCK_TRANSFER_LIST, + ESP_ERR_INVALID_STATE, TAG, "invalid transfer type"); + dw_gdma_hal_context_t *hal = &chan->group->hal; + int chan_id = chan->chan_id; + + // set memory address + dw_gdma_ll_channel_set_src_addr(hal->dev, chan_id, config->src.addr); + dw_gdma_ll_channel_set_dst_addr(hal->dev, chan_id, config->dst.addr); + // transfer size + dw_gdma_ll_channel_set_trans_block_size(hal->dev, chan_id, config->size); + // [Ctrl0] register + // set master port for the source and destination target + dw_gdma_ll_channel_set_src_master_port(hal->dev, chan_id, config->src.addr); + dw_gdma_ll_channel_set_dst_master_port(hal->dev, chan_id, config->src.addr); + // transfer width + dw_gdma_ll_channel_set_src_trans_width(hal->dev, chan_id, config->src.width); + dw_gdma_ll_channel_set_dst_trans_width(hal->dev, chan_id, config->dst.width); + // set burst items + dw_gdma_ll_channel_set_src_burst_items(hal->dev, chan_id, config->src.burst_items); + dw_gdma_ll_channel_set_dst_burst_items(hal->dev, chan_id, config->dst.burst_items); + // set burst mode + dw_gdma_ll_channel_set_src_burst_mode(hal->dev, chan_id, config->src.burst_mode); + dw_gdma_ll_channel_set_dst_burst_mode(hal->dev, chan_id, config->dst.burst_mode); + // [Ctrl1] register + // set burst length + dw_gdma_ll_channel_set_src_burst_len(hal->dev, chan_id, config->src.burst_len); + dw_gdma_ll_channel_set_dst_burst_len(hal->dev, chan_id, config->dst.burst_len); + // whether to enable the peripheral status write back + dw_gdma_ll_channel_enable_src_periph_status_write_back(hal->dev, chan_id, config->src.flags.en_status_write_back); + dw_gdma_ll_channel_enable_dst_periph_status_write_back(hal->dev, chan_id, config->dst.flags.en_status_write_back); + + return ESP_OK; +} + +esp_err_t dw_gdma_channel_set_block_markers(dw_gdma_channel_handle_t chan, dw_gdma_block_markers_t markers) +{ + ESP_RETURN_ON_FALSE_ISR(chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + ESP_RETURN_ON_FALSE_ISR(chan->src_transfer_type != DW_GDMA_BLOCK_TRANSFER_LIST && + chan->dst_transfer_type != DW_GDMA_BLOCK_TRANSFER_LIST, + ESP_ERR_INVALID_STATE, TAG, "invalid transfer type"); + dw_gdma_hal_context_t *hal = &chan->group->hal; + int chan_id = chan->chan_id; + + // [Ctrl1] register + // set the block markers + dw_gdma_ll_channel_set_block_markers(hal->dev, chan_id, markers.en_trans_done_intr, markers.is_last, markers.is_valid); + + return ESP_OK; +} + +esp_err_t dw_gdma_lli_config_transfer(dw_gdma_lli_handle_t lli, dw_gdma_block_transfer_config_t *config) +{ + ESP_RETURN_ON_FALSE(lli && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + + // set memory address + dw_gdma_ll_lli_set_src_addr(lli, config->src.addr); + dw_gdma_ll_lli_set_dst_addr(lli, config->dst.addr); + // transfer size + dw_gdma_ll_lli_set_trans_block_size(lli, config->size); + // [Ctrl0] register + // set master port for the source and destination target + dw_gdma_ll_lli_set_src_master_port(lli, config->src.addr); + dw_gdma_ll_lli_set_dst_master_port(lli, config->dst.addr); + // transfer width + dw_gdma_ll_lli_set_src_trans_width(lli, config->src.width); + dw_gdma_ll_lli_set_dst_trans_width(lli, config->dst.width); + // set burst items + dw_gdma_ll_lli_set_src_burst_items(lli, config->src.burst_items); + dw_gdma_ll_lli_set_dst_burst_items(lli, config->dst.burst_items); + // set burst mode + dw_gdma_ll_lli_set_src_burst_mode(lli, config->src.burst_mode); + dw_gdma_ll_lli_set_dst_burst_mode(lli, config->dst.burst_mode); + // [Ctrl1] register + // set burst length + dw_gdma_ll_lli_set_src_burst_len(lli, config->src.burst_len); + dw_gdma_ll_lli_set_dst_burst_len(lli, config->dst.burst_len); + // whether to enable the peripheral status write back + dw_gdma_ll_lli_enable_src_periph_status_write_back(lli, config->src.flags.en_status_write_back); + dw_gdma_ll_lli_enable_dst_periph_status_write_back(lli, config->dst.flags.en_status_write_back); + + return ESP_OK; +} + +esp_err_t dw_gdma_lli_set_block_markers(dw_gdma_lli_handle_t lli, dw_gdma_block_markers_t markers) +{ + ESP_RETURN_ON_FALSE_ISR(lli, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + + // [Ctrl1] register + // set the block markers + dw_gdma_ll_lli_set_block_markers(lli, markers.en_trans_done_intr, markers.is_last, markers.is_valid); + + return ESP_OK; +} + +void dw_gdma_channel_default_isr(void *args) +{ + dw_gdma_channel_t *chan = (dw_gdma_channel_t *)args; + dw_gdma_group_t *group = chan->group; + dw_gdma_hal_context_t *hal = &group->hal; + int chan_id = chan->chan_id; + bool need_yield = false; + // clear pending interrupt event + uint32_t intr_status = dw_gdma_ll_channel_get_intr_status(hal->dev, chan_id); + dw_gdma_ll_channel_clear_intr(hal->dev, chan_id, intr_status); + + // call user callbacks + if (intr_status & DW_GDMA_LL_CHANNEL_EVENT_SHADOWREG_OR_LLI_INVALID_ERR) { + if (chan->cbs.on_invalid_block) { + intptr_t invalid_lli_addr = dw_gdma_ll_channel_get_current_link_list_item_addr(hal->dev, chan_id); + dw_gdma_break_event_data_t edata = { + .invalid_lli = (dw_gdma_lli_handle_t)DW_GDMA_GET_NON_CACHE_ADDR(invalid_lli_addr), + }; + if (chan->cbs.on_invalid_block(chan, &edata, chan->user_data)) { + need_yield = true; + } + } + } + + if (intr_status & DW_GDMA_LL_CHANNEL_EVENT_BLOCK_TFR_DONE) { + if (chan->cbs.on_block_trans_done) { + dw_gdma_trans_done_event_data_t edata = {}; + if (chan->cbs.on_block_trans_done(chan, &edata, chan->user_data)) { + need_yield = true; + } + } + } + + if (intr_status & DW_GDMA_LL_CHANNEL_EVENT_DMA_TFR_DONE) { + if (chan->cbs.on_full_trans_done) { + dw_gdma_trans_done_event_data_t edata = {}; + if (chan->cbs.on_full_trans_done(chan, &edata, chan->user_data)) { + need_yield = true; + } + } + } + + if (need_yield) { + portYIELD_FROM_ISR(); + } +} + +static esp_err_t dw_gdma_install_channel_interrupt(dw_gdma_channel_t *chan) +{ + esp_err_t ret = ESP_OK; + dw_gdma_group_t *group = chan->group; + dw_gdma_hal_context_t *hal = &group->hal; + int chan_id = chan->chan_id; + // clear pending events + dw_gdma_ll_channel_enable_intr_propagation(hal->dev, chan_id, UINT32_MAX, false); + dw_gdma_ll_channel_clear_intr(hal->dev, chan_id, UINT32_MAX); + + // pre-alloc a interrupt handle, with handler disabled + // DW_GDMA multiple channels share the same interrupt source, so we use a shared interrupt handle + intr_handle_t intr = NULL; + int isr_flags = DW_GDMA_INTR_ALLOC_FLAGS | ESP_INTR_FLAG_SHARED; + if (group->intr_priority) { + isr_flags |= 1 << (group->intr_priority); + } else { + isr_flags |= DW_GDMA_ALLOW_INTR_PRIORITY_MASK; + } + ret = esp_intr_alloc_intrstatus(ETS_DW_GDMA_INTR_SOURCE, isr_flags, + (uint32_t)dw_gdma_ll_get_intr_status_reg(hal->dev), DW_GDMA_LL_CHANNEL_EVENT_MASK(chan_id), + dw_gdma_channel_default_isr, chan, &intr); + ESP_RETURN_ON_ERROR(ret, TAG, "alloc interrupt failed"); + + ESP_LOGD(TAG, "install interrupt service for channel (%d,%d)", group->group_id, chan_id); + chan->intr = intr; + return ESP_OK; +} + +esp_err_t dw_gdma_channel_register_event_callbacks(dw_gdma_channel_handle_t chan, dw_gdma_event_callbacks_t *cbs, void *user_data) +{ + ESP_RETURN_ON_FALSE(chan && cbs, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + dw_gdma_group_t *group = chan->group; + dw_gdma_hal_context_t *hal = &group->hal; + int chan_id = chan->chan_id; + +#if CONFIG_DW_GDMA_ISR_IRAM_SAFE + if (cbs->on_block_trans_done) { + ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_block_trans_done), ESP_ERR_INVALID_ARG, + TAG, "on_block_trans_done not in IRAM"); + } + if (cbs->on_full_trans_done) { + ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_full_trans_done), ESP_ERR_INVALID_ARG, + TAG, "on_full_trans_done not in IRAM"); + } + if (cbs->on_invalid_block) { + ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_invalid_block), ESP_ERR_INVALID_ARG, + TAG, "on_invalid_block not in IRAM"); + } + if (user_data) { + ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, + TAG, "user context not in internal RAM"); + } +#endif // CONFIG_DW_GDMA_ISR_IRAM_SAFE + + // lazy install interrupt service + if (!chan->intr) { + ESP_RETURN_ON_ERROR(dw_gdma_install_channel_interrupt(chan), TAG, "install interrupt service failed"); + } + + // enable the event to be able to trigger an interrupt + dw_gdma_ll_channel_enable_intr_propagation(hal->dev, chan_id, DW_GDMA_LL_CHANNEL_EVENT_BLOCK_TFR_DONE, cbs->on_block_trans_done != NULL); + dw_gdma_ll_channel_enable_intr_propagation(hal->dev, chan_id, DW_GDMA_LL_CHANNEL_EVENT_DMA_TFR_DONE, cbs->on_full_trans_done != NULL); + dw_gdma_ll_channel_enable_intr_propagation(hal->dev, chan_id, DW_GDMA_LL_CHANNEL_EVENT_SHADOWREG_OR_LLI_INVALID_ERR, cbs->on_invalid_block != NULL); + + chan->user_data = user_data; + memcpy(&chan->cbs, cbs, sizeof(dw_gdma_event_callbacks_t)); + + return ESP_OK; +} + +esp_err_t dw_gdma_channel_get_id(dw_gdma_channel_handle_t chan, int *channel_id) +{ + ESP_RETURN_ON_FALSE(chan && channel_id, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + *channel_id = chan->chan_id; + return ESP_OK; +} diff --git a/components/esp_hw_support/dma/linker.lf b/components/esp_hw_support/dma/linker.lf index 176d6b1021..7dd8eeb8a1 100644 --- a/components/esp_hw_support/dma/linker.lf +++ b/components/esp_hw_support/dma/linker.lf @@ -67,3 +67,18 @@ entries: gdma_hal_axi: gdma_axi_hal_stop (noflash) gdma_hal_axi: gdma_axi_hal_append (noflash) gdma_hal_axi: gdma_axi_hal_reset (noflash) + +[mapping:dw_gdma_driver] +archive: libesp_hw_support.a +entries: + # performance optimization, always put the DW_GDMA default interrupt handler in IRAM + if SOC_DW_GDMA_SUPPORTED = y: + dw_gdma: dw_gdma_channel_default_isr (noflash) + + # put DW_GDMA control functions in IRAM + if DW_GDMA_CTRL_FUNC_IN_IRAM = y: + dw_gdma: dw_gdma_channel_continue (noflash) + + if DW_GDMA_SETTER_FUNC_IN_IRAM = y: + dw_gdma: dw_gdma_channel_set_block_markers (noflash) + dw_gdma: dw_gdma_lli_set_block_markers (noflash) diff --git a/components/esp_hw_support/include/esp_private/dw_gdma.h b/components/esp_hw_support/include/esp_private/dw_gdma.h new file mode 100644 index 0000000000..14968015b5 --- /dev/null +++ b/components/esp_hw_support/include/esp_private/dw_gdma.h @@ -0,0 +1,405 @@ +/* + * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include "esp_err.h" +#include "hal/dw_gdma_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Type of DW_GDMA channel handle + */ +typedef struct dw_gdma_channel_t *dw_gdma_channel_handle_t; + +/** + * @brief Type of DW_GDMA link list handle + */ +typedef struct dw_gdma_link_list_t *dw_gdma_link_list_handle_t; + +/** + * @brief Type of DW_GDMA link list item handle + */ +typedef struct dw_gdma_link_list_item_t *dw_gdma_lli_handle_t; + +/** + * @brief A group of channel's static configurations + * + * @note By static, we mean these channel end configurations shouldn't be changed after the DMA channel is created. + */ +typedef struct { + dw_gdma_block_transfer_type_t block_transfer_type; /*!< Block transfer type */ + dw_gdma_role_t role; /*!< Role of the DMA channel end */ + dw_gdma_handshake_type_t handshake_type; /*!< Handshake type */ + uint8_t num_outstanding_requests; /*!< Number of R/W requests that the AXI master can issue to the slave before receiving a response. + Suggest value range: [1,16] */ + uint32_t status_fetch_addr; /*!< Address where to fetch the status of the peripheral */ +} dw_gdma_channel_static_config_t; + +/** + * @brief Configurations for allocating a DMA channel + */ +typedef struct { + dw_gdma_channel_static_config_t src; /*!< source end static configuration */ + dw_gdma_channel_static_config_t dst; /*!< destination end static configuration */ + dw_gdma_flow_controller_t flow_controller; /*!< Transfer flow controller */ + int chan_priority; /*!< DMA channel priority */ + int intr_priority; /*!< DMA interrupt priority, + if set to 0, the driver will try to allocate an interrupt with a relative low priority (1,2,3) */ +} dw_gdma_channel_alloc_config_t; + +/** + * @brief Create a DMA channel + * + * @param[in] config Channel allocation configuration + * @param[out] ret_chan Returned channel handle + * @return + * - ESP_OK: Create DMA channel successfully + * - ESP_ERR_INVALID_ARG: Create DMA channel failed because of invalid argument + * - ESP_ERR_NO_MEM: Create DMA channel failed because out of memory + * - ESP_FAIL: Create DMA channel failed because of other error + */ +esp_err_t dw_gdma_new_channel(const dw_gdma_channel_alloc_config_t *config, dw_gdma_channel_handle_t *ret_chan); + +/** + * @brief Delete DMA channel + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @return + * - ESP_OK: Delete DMA channel successfully + * - ESP_ERR_INVALID_ARG: Delete DMA channel failed because of invalid argument + * - ESP_FAIL: Delete DMA channel failed because of other error + */ +esp_err_t dw_gdma_del_channel(dw_gdma_channel_handle_t chan); + +/** + * @brief Get the DMA channel ID + * + * @note This API breaks the encapsulation of DW_GDMA Channel Object. + * With the returned channel ID, you can even bypass all other driver API and access Low Level API directly. + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @param[out] channel_id Returned channel ID + * @return + * - ESP_OK: Get DW_GDMA channel ID successfully + * - ESP_ERR_INVALID_ARG: Get DW_GDMA channel ID failed because of invalid argument + * - ESP_FAIL: Get DW_GDMA channel ID failed because of other error + */ +esp_err_t dw_gdma_channel_get_id(dw_gdma_channel_handle_t chan, int *channel_id); + +/** + * @brief A group of channel's dynamic configurations + * + * @note By dynamic, we mean these channel end configurations can be changed in each transfer. + */ +typedef struct { + uint32_t addr; /*!< Memory address */ + dw_gdma_transfer_width_t width; /*!< Transfer width */ + dw_gdma_burst_mode_t burst_mode; /*!< Burst mode */ + dw_gdma_burst_items_t burst_items; /*!< Number of data items that are contained in one burst transaction */ + uint8_t burst_len; /*!< Burst transaction length, if set to 0, the hardware will apply a possible value as burst length */ + struct { + uint32_t en_status_write_back: 1; /*!< Enable peripheral status write back */ + } flags; +} dw_gdma_channel_dynamic_config_t; + +/** + * @brief Channel block transfer configurations + */ +typedef struct { + dw_gdma_channel_dynamic_config_t src; /*!< source configuration */ + dw_gdma_channel_dynamic_config_t dst; /*!< destination configuration */ + size_t size; /*!< Transfer size */ +} dw_gdma_block_transfer_config_t; + +/** + * @brief Configure transfer parameters for a DMA channel + * + * @note This is an "all-in-one" function for set up the block transfer. + * @note This function can't work with Link-List transfer type. For Link-List transfer, please use `dw_gdma_lli_config_transfer` instead. + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @param[in] config Block transfer configurations + * @return + * - ESP_OK: Configure DMA channel block transfer successfully + * - ESP_ERR_INVALID_ARG: Configure DMA channel block transfer failed because of invalid argument + * - ESP_ERR_INVALID_STATE: Configure DMA channel block transfer failed because the channel has Link-List transfer type + * - ESP_FAIL: Configure DMA channel block transfer failed because of other error + */ +esp_err_t dw_gdma_channel_config_transfer(dw_gdma_channel_handle_t chan, const dw_gdma_block_transfer_config_t *config); + +/** + * @brief Enable or disable a DMA channel + * + * @note Before enabling a channel, you need to setup the channel transfer by either `dw_gdma_channel_config_transfer` or `dw_gdma_lli_config_transfer` + * @note When a DMA channel is disabled, the DMA engine will stop working. You need to reconfigure the channel before enabling it again. + * @note After all block transfers are completed, the DMA channel will be disabled automatically. + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @param[in] en_or_dis True to enable, false to disable the DMA channel + * @return + * - ESP_OK: Enable or disable DMA channel successfully + * - ESP_ERR_INVALID_ARG: Enable or disable DMA channel failed because of invalid argument + * - ESP_FAIL: Enable or disable DMA channel failed because of other error + */ +esp_err_t dw_gdma_channel_enable_ctrl(dw_gdma_channel_handle_t chan, bool en_or_dis); + +/** + * @brief Suspend or resume a DMA channel + * + * @note When a DMA channel is suspended, the DMA engine will stop working gracefully and the channel's status will be saved. + * @note The channel will exit the suspend state automatically if it is disabled. + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @param[in] enter_or_exit True to suspend, false to resume the DMA channel + * @return + * - ESP_OK: Suspend or resume DMA channel successfully + * - ESP_ERR_INVALID_ARG: Suspend or resume DMA channel failed because of invalid argument + * - ESP_FAIL: Suspend or resume DMA channel failed because of other error + */ +esp_err_t dw_gdma_channel_suspend_ctrl(dw_gdma_channel_handle_t chan, bool enter_or_exit); + +/** + * @brief Abort the DMA channel + * + * @note If the channel is aborted, it will be diabled immediately, which may cause AXI bus protocol violation. + * @note This function is recommended to only be used when the channel hangs. Recommend to try `dw_gdma_channel_enable_ctrl` first, then opt for aborting. + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @return + * - ESP_OK: Abort DMA channel successfully + * - ESP_ERR_INVALID_ARG: Abort DMA channel failed because of invalid argument + * - ESP_FAIL: Abort DMA channel failed because of other error + */ +esp_err_t dw_gdma_channel_abort(dw_gdma_channel_handle_t chan); + +/** + * @brief Lock the DMA channel at specific transfer level + * + * @note When a DMA channel is locked, no other channels are granted control of the master bus for the duration specified by the lock level. + * @note Only lock the channel if you want to exclusive access to the master bus. + * @note Channel locking feature is only for M2M transfer. + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @param[in] level Transfer level + * @return + * - ESP_OK: Lock DMA channel successfully + * - ESP_ERR_INVALID_ARG: Lock DMA channel failed because of invalid argument + * - ESP_FAIL: Lock DMA channel failed because of other error + */ +esp_err_t dw_gdma_channel_lock(dw_gdma_channel_handle_t chan, dw_gdma_lock_level_t level); + +/** + * @brief Unlock the DMA channel + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @return + * - ESP_OK: Unlock DMA channel successfully + * - ESP_ERR_INVALID_ARG: Unlock DMA channel failed because of invalid argument + * - ESP_FAIL: Unlock DMA channel failed because of other error + */ +esp_err_t dw_gdma_channel_unlock(dw_gdma_channel_handle_t chan); + +/** + * @brief Continue the temporarily stopped DMA transfer because of invalid block + * + * @note You should only call this API when the block becomes valid again, + * by calling `dw_gdma_lli_set_block_markers`/`dw_gdma_channel_set_block_markers` with `is_valid` set to true. + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @return + * - ESP_OK: Continue DMA transfer successfully + * - ESP_ERR_INVALID_ARG: Continue DMA transfer failed because of invalid argument + * - ESP_FAIL: Continue DMA transfer failed because of other error + */ +esp_err_t dw_gdma_channel_continue(dw_gdma_channel_handle_t chan); + +/** + * @brief Type of DW_GDMA trans done event data + */ +typedef struct { +} dw_gdma_trans_done_event_data_t; + +/** + * @brief Type of DW_GDMA trans_done event callback + * @param chan GDMA channel handle, created from `dw_gdma_new_channel` + * @param event_data GDMA event data + * @param user_data User registered data from `dw_gdma_channel_register_event_callbacks` + * + * @return Whether a task switch is needed after the callback function returns, + * this is usually due to the callback wakes up some high priority task. + */ +typedef bool (*dw_gdma_trans_done_event_callback_t)(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data); + +/** + * @brief Type of DW_GDMA break event data + */ +typedef struct { + dw_gdma_lli_handle_t invalid_lli; /*!< Invalid link list item */ +} dw_gdma_break_event_data_t; + +/** + * @brief Type of DW_GDMA break event callback + * @param chan GDMA channel handle, created from `dw_gdma_new_channel` + * @param event_data GDMA event data + * @param user_data User registered data from `dw_gdma_channel_register_event_callbacks` + * + * @return Whether a task switch is needed after the callback function returns, + * this is usually due to the callback wakes up some high priority task. + */ +typedef bool (*dw_gdma_break_event_callback_t)(dw_gdma_channel_handle_t chan, const dw_gdma_break_event_data_t *event_data, void *user_data); + +/** + * @brief Group of supported DW_GDMA callbacks + * @note The callbacks are all running under ISR environment + */ +typedef struct { + dw_gdma_trans_done_event_callback_t on_block_trans_done; /*!< Invoked when a block transfer is completed */ + dw_gdma_trans_done_event_callback_t on_full_trans_done; /*!< Invoked when all block transfers are completed */ + dw_gdma_break_event_callback_t on_invalid_block; /*!< Invoked when an invalid block is detected */ +} dw_gdma_event_callbacks_t; + +/** + * @brief Set DW_GDMA event callbacks for a channel + * @note This API will lazy install the DW_GDMA interrupt service + * + * @param[in] chan DW_GDMA channel handle, allocated by `dw_gdma_new_channel` + * @param[in] cbs Group of callback functions + * @param[in] user_data User data, which will be passed to callback functions directly + * @return + * - ESP_OK: Set event callbacks successfully + * - ESP_ERR_INVALID_ARG: Set event callbacks failed because of invalid argument + * - ESP_FAIL: Set event callbacks failed because of other error + */ +esp_err_t dw_gdma_channel_register_event_callbacks(dw_gdma_channel_handle_t chan, dw_gdma_event_callbacks_t *cbs, void *user_data); + +/** + * @brief DMA link list type + */ +typedef enum { + DW_GDMA_LINKED_LIST_TYPE_SINGLY, /*!< Singly linked list */ + DW_GDMA_LINKED_LIST_TYPE_CIRCULAR, /*!< Circular linked list */ +} dw_gdma_link_list_type_t; + +/** + * @brief DMA link list configurations + */ +typedef struct { + uint32_t num_items; //!< Number of link list items + dw_gdma_link_list_type_t link_type; //!< Link list type +} dw_gdma_link_list_config_t; + +/** + * @brief Create a DMA link list + * + * @param[in] config Link list configurations + * @param[out] ret_list Returned link list handle + * @return + * - ESP_OK: Create DMA link list successfully + * - ESP_ERR_INVALID_ARG: Create DMA link list failed because of invalid argument + * - ESP_ERR_NO_MEM: Create DMA link list failed because out of memory + * - ESP_FAIL: Create DMA link list failed because of other error + */ +esp_err_t dw_gdma_new_link_list(const dw_gdma_link_list_config_t *config, dw_gdma_link_list_handle_t *ret_list); + +/** + * @brief Delete a DMA link list + * + * @param[in] list Link list handle, allocated by `dw_gdma_new_link_list` + * @return + * - ESP_OK: Delete DMA link list successfully + * - ESP_ERR_INVALID_ARG: Delete DMA link list failed because of invalid argument + * - ESP_FAIL: Delete DMA link list failed because of other error + */ +esp_err_t dw_gdma_del_link_list(dw_gdma_link_list_handle_t list); + +/** + * @brief Apply a link list to a DMA channel + * + * @note This function can only work with Link-List transfer type. + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @param[in] list Link list handle, allocated by `dw_gdma_new_link_list` + * @return + * - ESP_OK: Apply link list to DMA channel successfully + * - ESP_ERR_INVALID_ARG: Apply link list to DMA channel failed because of invalid argument + * - ESP_ERR_INVALID_STATE: Apply link list to DMA channel failed because the channel is not with Link-List transfer type + * - ESP_FAIL: Apply link list to DMA channel failed because of other error + */ +esp_err_t dw_gdma_channel_use_link_list(dw_gdma_channel_handle_t chan, dw_gdma_link_list_handle_t list); + +/** + * @brief A helper function to return an item from a given link list, by index + * + * @param[in] list Link list handle, allocated by `dw_gdma_new_link_list` + * @param[in] item_index Index of the item + * @return + * - NULL: Invalid argument + * - Others: Link list item handle + */ +dw_gdma_lli_handle_t dw_gdma_link_list_get_item(dw_gdma_link_list_handle_t list, int item_index); + +/** + * @brief Configure transfer parameters for a DMA link list item + * + * @note This is an "all-in-one" function for set up the link list item. + * @note This function can only work with Link-List transfer type. For other transfer types, please use `dw_gdma_channel_config_transfer` instead. + * + * @param[in] lli Link list item + * @param[in] config Block transfer configurations + * @return + * - ESP_OK: Configure link list item block transfer successfully + * - ESP_ERR_INVALID_ARG: Configure link list item block transfer failed because of invalid argument + * - ESP_FAIL: Configure link list item block transfer failed because of other error + */ +esp_err_t dw_gdma_lli_config_transfer(dw_gdma_lli_handle_t lli, dw_gdma_block_transfer_config_t *config); + +/** + * @brief Markers of a DW_GDMA block + */ +typedef struct { + uint32_t is_last: 1; /*!< Set if this block is the last one */ + uint32_t is_valid: 1; /*!< Set if this block is valid */ + uint32_t en_trans_done_intr: 1; /*!< Set if to enable the transfer done interrupt for this block */ +} dw_gdma_block_markers_t; + +/** + * @brief Set block markers for a DMA channel + * + * @note This function doesn't work for Link-List transfer type. For Link-List transfer, please use `dw_gdma_lli_set_block_markers` instead. + * @note Setting the markers should always be the last step of configuring a block transfer, before enabling/continuing the channel. + * + * @param[in] chan DMA channel handle, allocated by `dw_gdma_new_channel` + * @param[in] markers Block markers + * @return + * - ESP_OK: Set block markers successfully + * - ESP_ERR_INVALID_ARG: Set block markers failed because of invalid argument + * - ESP_ERR_INVALID_STATE: Set block markers failed because the channel has Link-List transfer type + * - ESP_FAIL: Set block markers failed because of other error + */ +esp_err_t dw_gdma_channel_set_block_markers(dw_gdma_channel_handle_t chan, dw_gdma_block_markers_t markers); + +/** + * @brief Set block markers for a DMA link list item + * + * @note Setting the markers should always be the last step of configuring a block transfer, before enabling/continuing the channel. + * + * @param[in] lli Link list item + * @param[in] markers Block markers + * @return + * - ESP_OK: Set block markers successfully + * - ESP_ERR_INVALID_ARG: Set block markers failed because of invalid argument + * - ESP_FAIL: Set block markers failed because of other error + */ +esp_err_t dw_gdma_lli_set_block_markers(dw_gdma_lli_handle_t lli, dw_gdma_block_markers_t markers); + +#ifdef __cplusplus +} +#endif diff --git a/components/esp_hw_support/test_apps/dma/main/CMakeLists.txt b/components/esp_hw_support/test_apps/dma/main/CMakeLists.txt index 54b2398209..b923e9882f 100644 --- a/components/esp_hw_support/test_apps/dma/main/CMakeLists.txt +++ b/components/esp_hw_support/test_apps/dma/main/CMakeLists.txt @@ -8,6 +8,10 @@ if(CONFIG_SOC_GDMA_SUPPORTED) list(APPEND srcs "test_gdma.c") endif() +if(CONFIG_SOC_DW_GDMA_SUPPORTED) + list(APPEND srcs "test_dw_gdma.c") +endif() + # In order for the cases defined by `TEST_CASE` to be linked into the final elf, # the component can be registered as WHOLE_ARCHIVE idf_component_register(SRCS ${srcs} diff --git a/components/esp_hw_support/test_apps/dma/main/test_dw_gdma.c b/components/esp_hw_support/test_apps/dma/main/test_dw_gdma.c new file mode 100644 index 0000000000..3ffe52c112 --- /dev/null +++ b/components/esp_hw_support/test_apps/dma/main/test_dw_gdma.c @@ -0,0 +1,517 @@ +/* + * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include "sdkconfig.h" +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "unity.h" +#include "esp_private/dw_gdma.h" +#include "hal/dw_gdma_ll.h" +#include "esp_cache.h" + +TEST_CASE("DW_GDMA channel allocation", "[DW_GDMA]") +{ + printf("install DMA channels exhaustively\r\n"); + dw_gdma_channel_static_config_t static_config = { + .block_transfer_type = DW_GDMA_BLOCK_TRANSFER_CONTIGUOUS, + .role = DW_GDMA_ROLE_MEM, + .num_outstanding_requests = 1, + }; + dw_gdma_channel_alloc_config_t alloc_config = { + .src = static_config, + .dst = static_config, + }; + dw_gdma_channel_handle_t chans[DW_GDMA_LL_GROUPS][DW_GDMA_LL_CHANNELS_PER_GROUP]; + for (int i = 0; i < DW_GDMA_LL_GROUPS; i++) { + for (int j = 0; j < DW_GDMA_LL_CHANNELS_PER_GROUP; j++) { + TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &chans[i][j])); + } + } + TEST_ESP_ERR(ESP_ERR_NOT_FOUND, dw_gdma_new_channel(&alloc_config, &chans[0][0])); + + printf("delete DMA channels\r\n"); + for (int i = 0; i < DW_GDMA_LL_GROUPS; i++) { + for (int j = 0; j < DW_GDMA_LL_CHANNELS_PER_GROUP; j++) { + TEST_ESP_OK(dw_gdma_del_channel(chans[i][j])); + } + } +} + +static bool test_dw_gdma_conti_mode_trans_done_cb(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data) +{ + BaseType_t task_woken = pdFALSE; + SemaphoreHandle_t done_sem = (SemaphoreHandle_t)user_data; + xSemaphoreGiveFromISR(done_sem, &task_woken); + return task_woken == pdTRUE; +} + +TEST_CASE("DW_GDMA M2M Test: Contiguous Mode", "[DW_GDMA]") +{ + SemaphoreHandle_t done_sem = xSemaphoreCreateBinary(); + TEST_ASSERT_NOT_NULL(done_sem); + + printf("prepare the source and destination buffers\r\n"); + uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + TEST_ASSERT_NOT_NULL(src_buf); + TEST_ASSERT_NOT_NULL(dst_buf); + for (int i = 0; i < 256; i++) { + src_buf[i] = i; + } +#if CONFIG_IDF_TARGET_ESP32P4 + // do write-back for the source data because it's in the cache + TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M)); +#endif + + printf("allocate a channel for memory copy\r\n"); + dw_gdma_channel_static_config_t static_config = { + .block_transfer_type = DW_GDMA_BLOCK_TRANSFER_CONTIGUOUS, + .role = DW_GDMA_ROLE_MEM, + .num_outstanding_requests = 1, + }; + dw_gdma_channel_alloc_config_t alloc_config = { + .src = static_config, + .dst = static_config, + .flow_controller = DW_GDMA_FLOW_CTRL_SELF, // DMA as the flow controller + .chan_priority = 1, + }; + dw_gdma_channel_handle_t m2m_chan = NULL; + TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &m2m_chan)); + + printf("register event handler\r\n"); + dw_gdma_event_callbacks_t cbs = { + .on_full_trans_done = test_dw_gdma_conti_mode_trans_done_cb, + }; + TEST_ESP_OK(dw_gdma_channel_register_event_callbacks(m2m_chan, &cbs, done_sem)); + + printf("set up memory copy transaction\r\n"); + dw_gdma_block_transfer_config_t transfer_config = { + .src = { + .addr = (uint32_t)src_buf, + .burst_mode = DW_GDMA_BURST_MODE_INCREMENT, + .width = DW_GDMA_TRANS_WIDTH_8, + .burst_items = 4, + .burst_len = 0, + }, + .dst = { + .addr = (uint32_t)dst_buf, + .burst_mode = DW_GDMA_BURST_MODE_INCREMENT, + .width = DW_GDMA_TRANS_WIDTH_8, + .burst_items = 4, + .burst_len = 0, + }, + .size = 256, + }; + TEST_ESP_OK(dw_gdma_channel_config_transfer(m2m_chan, &transfer_config)); + + printf("start the DMA engine\r\n"); + TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true)); + + TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100))); + // DMA should stop after the first block transfer is done + TEST_ASSERT_EQUAL(pdFALSE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100))); + + printf("check the memory copy result\r\n"); +#if CONFIG_IDF_TARGET_ESP32P4 + // the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data + TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C)); +#endif + for (int i = 0; i < 256; i++) { + TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]); + } + + TEST_ESP_OK(dw_gdma_del_channel(m2m_chan)); + free(src_buf); + free(dst_buf); + vSemaphoreDelete(done_sem); +} + +static bool test_dw_gdma_reload_mode_block_done_cb(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data) +{ + BaseType_t task_woken = pdFALSE; + SemaphoreHandle_t done_sem = (SemaphoreHandle_t)user_data; + xSemaphoreGiveFromISR(done_sem, &task_woken); + return task_woken == pdTRUE; +} + +TEST_CASE("DW_GDMA M2M Test: Reload Mode", "[DW_GDMA]") +{ + SemaphoreHandle_t done_sem = xSemaphoreCreateBinary(); + TEST_ASSERT_NOT_NULL(done_sem); + + printf("prepare the source and destination buffers\r\n"); + uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + TEST_ASSERT_NOT_NULL(src_buf); + TEST_ASSERT_NOT_NULL(dst_buf); + for (int i = 0; i < 256; i++) { + src_buf[i] = i; + } +#if CONFIG_IDF_TARGET_ESP32P4 + // do write-back for the source data because it's in the cache + TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M)); +#endif + + printf("allocate a channel for memory copy\r\n"); + dw_gdma_channel_static_config_t static_config = { + .block_transfer_type = DW_GDMA_BLOCK_TRANSFER_RELOAD, + .role = DW_GDMA_ROLE_MEM, + .num_outstanding_requests = 1, + }; + dw_gdma_channel_alloc_config_t alloc_config = { + .src = static_config, + .dst = static_config, + .flow_controller = DW_GDMA_FLOW_CTRL_SELF, // DMA as the flow controller + .chan_priority = 1, + }; + dw_gdma_channel_handle_t m2m_chan = NULL; + TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &m2m_chan)); + + printf("register event handler\r\n"); + dw_gdma_event_callbacks_t cbs = { + .on_block_trans_done = test_dw_gdma_reload_mode_block_done_cb, + }; + TEST_ESP_OK(dw_gdma_channel_register_event_callbacks(m2m_chan, &cbs, done_sem)); + + printf("set up memory copy transaction\r\n"); + dw_gdma_block_transfer_config_t transfer_config = { + .src = { + .addr = (uint32_t)src_buf, + .burst_mode = DW_GDMA_BURST_MODE_INCREMENT, + .width = DW_GDMA_TRANS_WIDTH_8, + .burst_items = 4, + .burst_len = 0, + }, + .dst = { + .addr = (uint32_t)dst_buf, + .burst_mode = DW_GDMA_BURST_MODE_INCREMENT, + .width = DW_GDMA_TRANS_WIDTH_8, + .burst_items = 4, + .burst_len = 0, + }, + .size = 256, + }; + TEST_ESP_OK(dw_gdma_channel_config_transfer(m2m_chan, &transfer_config)); + + dw_gdma_block_markers_t markers = { + .en_trans_done_intr = true, // enable block trans done interrupt + }; + TEST_ESP_OK(dw_gdma_channel_set_block_markers(m2m_chan, markers)); + + printf("start the DMA engine\r\n"); + TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true)); + + // because of the auto-reload, we can keep receiving the block trans done event + TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100))); + TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100))); + TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(100))); + + printf("check the memory copy result\r\n"); +#if CONFIG_IDF_TARGET_ESP32P4 + // the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data + TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C)); +#endif + for (int i = 0; i < 256; i++) { + TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]); + } + + // stop the DMA channel + TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, false)); + TEST_ESP_OK(dw_gdma_del_channel(m2m_chan)); + free(src_buf); + free(dst_buf); + vSemaphoreDelete(done_sem); +} + +typedef struct { + SemaphoreHandle_t done_sem; + uint8_t count; +} test_gdma_shadow_mode_user_data_t; + +static bool test_dw_gdma_shadow_mode_block_invalid_cb(dw_gdma_channel_handle_t chan, const dw_gdma_break_event_data_t *event_data, void *user_data) +{ + BaseType_t task_woken = pdFALSE; + test_gdma_shadow_mode_user_data_t *udata = (test_gdma_shadow_mode_user_data_t *)user_data; + udata->count++; + + dw_gdma_block_markers_t markers = { + .is_last = true, // mark the block as the last one + .is_valid = true, // mark the block as valid so that the DMA can continue the transfer + }; + dw_gdma_channel_set_block_markers(chan, markers); + // after the block is marked as valid again, tell the DMA to continue the transfer + dw_gdma_channel_continue(chan); + return task_woken == pdTRUE; +} + +static bool test_dw_gdma_shadow_mode_trans_done_cb(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data) +{ + BaseType_t task_woken = pdFALSE; + test_gdma_shadow_mode_user_data_t *udata = (test_gdma_shadow_mode_user_data_t *)user_data; + SemaphoreHandle_t done_sem = udata->done_sem; + xSemaphoreGiveFromISR(done_sem, &task_woken); + return task_woken == pdTRUE; +} + +TEST_CASE("DW_GDMA M2M Test: Shadow Mode", "[DW_GDMA]") +{ + SemaphoreHandle_t done_sem = xSemaphoreCreateBinary(); + TEST_ASSERT_NOT_NULL(done_sem); + + printf("prepare the source and destination buffers\r\n"); + uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + TEST_ASSERT_NOT_NULL(src_buf); + TEST_ASSERT_NOT_NULL(dst_buf); + for (int i = 0; i < 256; i++) { + src_buf[i] = i; + } +#if CONFIG_IDF_TARGET_ESP32P4 + // do write-back for the source data because it's in the cache + TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M)); +#endif + + printf("allocate a channel for memory copy\r\n"); + dw_gdma_channel_static_config_t static_config = { + .block_transfer_type = DW_GDMA_BLOCK_TRANSFER_SHADOW, + .role = DW_GDMA_ROLE_MEM, + .num_outstanding_requests = 1, + }; + dw_gdma_channel_alloc_config_t alloc_config = { + .src = static_config, + .dst = static_config, + .flow_controller = DW_GDMA_FLOW_CTRL_SELF, // DMA as the flow controller + .chan_priority = 1, + }; + dw_gdma_channel_handle_t m2m_chan = NULL; + TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &m2m_chan)); + + printf("set up memory copy transaction\r\n"); + dw_gdma_block_transfer_config_t transfer_config = { + .src = { + .addr = (uint32_t)src_buf, + .burst_mode = DW_GDMA_BURST_MODE_INCREMENT, + .width = DW_GDMA_TRANS_WIDTH_8, + .burst_items = 4, + .burst_len = 0, + }, + .dst = { + .addr = (uint32_t)dst_buf, + .burst_mode = DW_GDMA_BURST_MODE_INCREMENT, + .width = DW_GDMA_TRANS_WIDTH_8, + .burst_items = 4, + .burst_len = 0, + }, + .size = 256, + }; + TEST_ESP_OK(dw_gdma_channel_config_transfer(m2m_chan, &transfer_config)); + dw_gdma_block_markers_t markers = { + .is_valid = true, // mark the block as valid so that the DMA can start the transfer + }; + TEST_ESP_OK(dw_gdma_channel_set_block_markers(m2m_chan, markers)); + + printf("register event handler\r\n"); + dw_gdma_event_callbacks_t cbs = { + .on_invalid_block = test_dw_gdma_shadow_mode_block_invalid_cb, + .on_full_trans_done = test_dw_gdma_shadow_mode_trans_done_cb, + }; + test_gdma_shadow_mode_user_data_t user_data = { + .done_sem = done_sem, + .count = 0, + }; + TEST_ESP_OK(dw_gdma_channel_register_event_callbacks(m2m_chan, &cbs, &user_data)); + + printf("start the DMA engine\r\n"); + TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true)); + + TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(1000))); + // should only go into the block invalid callback for once + TEST_ASSERT_EQUAL_UINT8(1, user_data.count); + + printf("check the memory copy result\r\n"); +#if CONFIG_IDF_TARGET_ESP32P4 + // the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data + TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C)); +#endif + for (int i = 0; i < 256; i++) { + TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]); + } + + TEST_ESP_OK(dw_gdma_del_channel(m2m_chan)); + free(src_buf); + free(dst_buf); + vSemaphoreDelete(done_sem); +} + +typedef struct { + SemaphoreHandle_t done_sem; + void *dst_buffer_addr; + size_t dst_buffer_size; + uint8_t count; +} test_gdma_list_mode_user_data_t; + +static bool test_dw_gdma_list_mode_trans_done_cb(dw_gdma_channel_handle_t chan, const dw_gdma_trans_done_event_data_t *event_data, void *user_data) +{ + BaseType_t task_woken = pdFALSE; + test_gdma_list_mode_user_data_t *udata = (test_gdma_list_mode_user_data_t *)user_data; + SemaphoreHandle_t done_sem = udata->done_sem; + xSemaphoreGiveFromISR(done_sem, &task_woken); + return task_woken == pdTRUE; +} + +static bool test_dw_gdma_list_mode_invalid_block_cb(dw_gdma_channel_handle_t chan, const dw_gdma_break_event_data_t *event_data, void *user_data) +{ + test_gdma_list_mode_user_data_t *udata = (test_gdma_list_mode_user_data_t *)user_data; + dw_gdma_lli_handle_t lli = event_data->invalid_lli; + udata->count++; + // clear the destination buffer + memset(udata->dst_buffer_addr, 0, udata->dst_buffer_size); + dw_gdma_block_markers_t markers = { + .is_last = true, // mark the next block as the last one + .is_valid = true, // mark the block as valid so that the DMA can continue the transfer + }; + dw_gdma_lli_set_block_markers(lli, markers); + // after the item is marked as valid again, tell the DMA to continue the transfer + dw_gdma_channel_continue(chan); + return false; +} + +TEST_CASE("DW_GDMA M2M Test: Link-List Mode", "[DW_GDMA]") +{ + SemaphoreHandle_t done_sem = xSemaphoreCreateBinary(); + TEST_ASSERT_NOT_NULL(done_sem); + + printf("prepare the source and destination buffers\r\n"); + uint8_t *src_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + uint8_t *dst_buf = heap_caps_aligned_calloc(64, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + TEST_ASSERT_NOT_NULL(src_buf); + TEST_ASSERT_NOT_NULL(dst_buf); + for (int i = 0; i < 256; i++) { + src_buf[i] = i; + } +#if CONFIG_IDF_TARGET_ESP32P4 + // do write-back for the source data because it's in the cache + TEST_ESP_OK(esp_cache_msync((void *)src_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M)); +#endif + + printf("allocate a channel for memory copy\r\n"); + dw_gdma_channel_static_config_t static_config = { + .block_transfer_type = DW_GDMA_BLOCK_TRANSFER_LIST, + .role = DW_GDMA_ROLE_MEM, + .num_outstanding_requests = 1, + }; + dw_gdma_channel_alloc_config_t alloc_config = { + .src = static_config, + .dst = static_config, + .flow_controller = DW_GDMA_FLOW_CTRL_SELF, // DMA as the flow controller + .chan_priority = 1, + }; + dw_gdma_channel_handle_t m2m_chan = NULL; + TEST_ESP_OK(dw_gdma_new_channel(&alloc_config, &m2m_chan)); + + printf("create singly DMA link list\r\n"); + dw_gdma_link_list_config_t link_list_config = { + .num_items = 2, + .link_type = DW_GDMA_LINKED_LIST_TYPE_SINGLY, + }; + dw_gdma_link_list_handle_t link_list = NULL; + TEST_ESP_OK(dw_gdma_new_link_list(&link_list_config, &link_list)); + + printf("set up memory copy transaction\r\n"); + dw_gdma_block_transfer_config_t transfer_config = { + .src = { + .addr = (uint32_t)src_buf, + .burst_mode = DW_GDMA_BURST_MODE_INCREMENT, + .width = DW_GDMA_TRANS_WIDTH_8, + .burst_items = 4, + .burst_len = 0, + }, + .dst = { + .addr = (uint32_t)dst_buf, + .burst_mode = DW_GDMA_BURST_MODE_INCREMENT, + .width = DW_GDMA_TRANS_WIDTH_8, + .burst_items = 4, + .burst_len = 0, + }, + .size = 128, + }; + dw_gdma_block_markers_t markers = { + .is_valid = true, // mark the block as valid so that the DMA can start the transfer + }; + TEST_ESP_OK(dw_gdma_lli_config_transfer(dw_gdma_link_list_get_item(link_list, 0), &transfer_config)); + TEST_ESP_OK(dw_gdma_lli_set_block_markers(dw_gdma_link_list_get_item(link_list, 0), markers)); + transfer_config.src.addr = (uint32_t)(src_buf + 128); + transfer_config.dst.addr = (uint32_t)(dst_buf + 128); + markers.is_last = true; + TEST_ESP_OK(dw_gdma_lli_config_transfer(dw_gdma_link_list_get_item(link_list, 1), &transfer_config)); + TEST_ESP_OK(dw_gdma_lli_set_block_markers(dw_gdma_link_list_get_item(link_list, 1), markers)); + + printf("register event handler\r\n"); + dw_gdma_event_callbacks_t cbs = { + .on_full_trans_done = test_dw_gdma_list_mode_trans_done_cb, + .on_invalid_block = test_dw_gdma_list_mode_invalid_block_cb, + }; + test_gdma_list_mode_user_data_t user_data = { + .done_sem = done_sem, + .count = 0, + .dst_buffer_addr = dst_buf, + .dst_buffer_size = 256, + }; + TEST_ESP_OK(dw_gdma_channel_register_event_callbacks(m2m_chan, &cbs, &user_data)); + + printf("use the link list\r\n"); + TEST_ESP_OK(dw_gdma_channel_use_link_list(m2m_chan, link_list)); + printf("start the DMA engine\r\n"); + TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true)); + + TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(1000))); + + printf("check the memory copy result\r\n"); +#if CONFIG_IDF_TARGET_ESP32P4 + // the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data + TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C)); +#endif + for (int i = 0; i < 256; i++) { + TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]); + } + + // delete the singly link list, and create a circular link list instead + TEST_ESP_OK(dw_gdma_del_link_list(link_list)); + printf("create circular DMA link list\r\n"); + link_list_config.link_type = DW_GDMA_LINKED_LIST_TYPE_CIRCULAR; + link_list_config.num_items = 1; + TEST_ESP_OK(dw_gdma_new_link_list(&link_list_config, &link_list)); + // set the transfer parameters for the link list item + transfer_config.size = 256; + transfer_config.src.addr = (uint32_t)src_buf; + transfer_config.dst.addr = (uint32_t)dst_buf; + TEST_ESP_OK(dw_gdma_lli_config_transfer(dw_gdma_link_list_get_item(link_list, 0), &transfer_config)); + markers.is_valid = true; + markers.is_last = false; + TEST_ESP_OK(dw_gdma_lli_set_block_markers(dw_gdma_link_list_get_item(link_list, 0), markers)); + + printf("use the link list\r\n"); + TEST_ESP_OK(dw_gdma_channel_use_link_list(m2m_chan, link_list)); + TEST_ESP_OK(dw_gdma_channel_enable_ctrl(m2m_chan, true)); + + TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(done_sem, pdMS_TO_TICKS(1000))); + + printf("check the memory copy result\r\n"); +#if CONFIG_IDF_TARGET_ESP32P4 + // the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data + TEST_ESP_OK(esp_cache_msync((void *)dst_buf, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C)); +#endif + for (int i = 0; i < 256; i++) { + TEST_ASSERT_EQUAL_UINT8(i, dst_buf[i]); + } + + TEST_ESP_OK(dw_gdma_del_link_list(link_list)); + TEST_ESP_OK(dw_gdma_del_channel(m2m_chan)); + free(src_buf); + free(dst_buf); + vSemaphoreDelete(done_sem); +} diff --git a/components/hal/dw_gdma_hal.c b/components/hal/dw_gdma_hal.c index b1e576f34d..1549b0dde3 100644 --- a/components/hal/dw_gdma_hal.c +++ b/components/hal/dw_gdma_hal.c @@ -12,4 +12,14 @@ void dw_gdma_hal_init(dw_gdma_hal_context_t *hal, const dw_gdma_hal_config_t *config) { hal->dev = DW_GDMA_LL_GET_HW(); + dw_gdma_ll_reset(hal->dev); + dw_gdma_ll_enable_controller(hal->dev, true); + dw_gdma_ll_enable_intr_global(hal->dev, true); +} + +void dw_gdma_hal_deinit(dw_gdma_hal_context_t *hal) +{ + dw_gdma_ll_enable_intr_global(hal->dev, false); + dw_gdma_ll_enable_controller(hal->dev, false); + hal->dev = NULL; } diff --git a/components/hal/esp32p4/include/hal/dw_gdma_ll.h b/components/hal/esp32p4/include/hal/dw_gdma_ll.h index b710340d1a..11400503a6 100644 --- a/components/hal/esp32p4/include/hal/dw_gdma_ll.h +++ b/components/hal/esp32p4/include/hal/dw_gdma_ll.h @@ -8,16 +8,26 @@ #include #include -#include "hal/assert.h" +#include "esp_assert.h" +#include "hal/misc.h" +#include "hal/dw_gdma_types.h" #include "soc/dw_gdma_struct.h" #include "soc/hp_sys_clkrst_struct.h" +#include "soc/reg_base.h" #define DW_GDMA_LL_GET_HW() (&DW_GDMA) +#define DW_GDMA_LL_GROUPS 1 // there's one DW-GDMA instance connected to the AXI bus +#define DW_GDMA_LL_CHANNELS_PER_GROUP 4 // there are 4 independent channels in the DW-GDMA + #define DW_GDMA_LL_MASTER_PORT_MIPI_DSI 0 // DW_GDMA master 0 can access DSI bridge #define DW_GDMA_LL_MASTER_PORT_MIPI_CSI 0 // DW_GDMA master 0 can access CSI bridge #define DW_GDMA_LL_MASTER_PORT_MEMORY 1 // DW_GDMA master 1 can only access L2MEM & ROM & MSPI Flash/PSRAM +#define DW_GDMA_LL_MAX_OUTSTANDING_REQUESTS 16 // maximum number of outstanding requests + +#define DW_GDMA_LL_LINK_LIST_ALIGNMENT 64 // link list item must be aligned to 64 bytes + // Common event bitmap #define DW_GDMA_LL_COMMON_EVENT_SLVIF_DEC_ERR (0x1 << 0) #define DW_GDMA_LL_COMMON_EVENT_SLVIF_WR2RO_ERR (0x1 << 1) @@ -58,71 +68,13 @@ #define DW_GDMA_LL_CHANNEL_EVENT_DISABLED (0x1 << 30) #define DW_GDMA_LL_CHANNEL_EVENT_ABORTED (0x1 << 31) +#define DW_GDMA_LL_CHANNEL_EVENT_MASK(chan) (1 << (chan)) +#define DW_GDMA_LL_COMMON_EVENT_MASK (1 << 16) + #ifdef __cplusplus extern "C" { #endif -/** - * @brief DW_GDMA transfer width - */ -typedef enum { - DW_GDMA_LL_TRANS_WIDTH_8, /*!< Data transfer width: 8 bits */ - DW_GDMA_LL_TRANS_WIDTH_16, /*!< Data transfer width: 16 bits */ - DW_GDMA_LL_TRANS_WIDTH_32, /*!< Data transfer width: 32 bits */ - DW_GDMA_LL_TRANS_WIDTH_64, /*!< Data transfer width: 64 bits */ - DW_GDMA_LL_TRANS_WIDTH_128, /*!< Data transfer width: 128 bits */ - DW_GDMA_LL_TRANS_WIDTH_256, /*!< Data transfer width: 256 bits */ - DW_GDMA_LL_TRANS_WIDTH_512, /*!< Data transfer width: 512 bits */ -} dw_gdma_ll_transfer_width_t; - -/** - * @brief DW_GDMA burst items - */ -typedef enum { - DW_GDMA_LL_BURST_ITEMS_1, /*!< 1 data items in the burst transaction */ - DW_GDMA_LL_BURST_ITEMS_4, /*!< 4 data items in the burst transaction */ - DW_GDMA_LL_BURST_ITEMS_8, /*!< 8 data items in the burst transaction */ - DW_GDMA_LL_BURST_ITEMS_16, /*!< 16 data items in the burst transaction */ - DW_GDMA_LL_BURST_ITEMS_32, /*!< 32 data items in the burst transaction */ - DW_GDMA_LL_BURST_ITEMS_64, /*!< 64 data items in the burst transaction */ - DW_GDMA_LL_BURST_ITEMS_128, /*!< 128 data items in the burst transaction */ - DW_GDMA_LL_BURST_ITEMS_256, /*!< 256 data items in the burst transaction */ - DW_GDMA_LL_BURST_ITEMS_512, /*!< 512 data items in the burst transaction */ - DW_GDMA_LL_BURST_ITEMS_1024, /*!< 1024 data items in the burst transaction */ -} dw_gdma_ll_burst_items_t; - -/** - * @brief Multi block transfer type - */ -typedef enum { - DW_GDMA_LL_MULTI_BLOCK_CONTIGUOUS, /*!< Contiguous */ - DW_GDMA_LL_MULTI_BLOCK_RELOAD, /*!< Reload */ - DW_GDMA_LL_MULTI_BLOCK_SHADOW_REG, /*!< Shadow register */ - DW_GDMA_LL_MULTI_BLOCK_LINK_LIST, /*!< Link list */ -} dw_gdma_ll_multi_block_type_t; - -/** - * @brief Transfer type and flow control - */ -typedef enum { - DW_GDMA_LL_FLOW_M2M_DMAC, /*!< Flow: memory to memory, controller: DMA engine */ - DW_GDMA_LL_FLOW_M2P_DMAC, /*!< Flow: memory to peripheral, controller: DMA engine */ - DW_GDMA_LL_FLOW_P2M_DMAC, /*!< Flow: peripheral to memory, controller: DMA engine */ - DW_GDMA_LL_FLOW_P2P_DMAC, /*!< Flow: peripheral to peripheral, controller: DMA engine */ - DW_GDMA_LL_FLOW_P2M_SRC, /*!< Flow: peripheral to memory, controller: source peripheral */ - DW_GDMA_LL_FLOW_P2P_SRC, /*!< Flow: peripheral to peripheral, controller: source peripheral */ - DW_GDMA_LL_FLOW_M2P_DST, /*!< Flow: memory to peripheral, controller: destination peripheral */ - DW_GDMA_LL_FLOW_P2P_DST, /*!< Flow: peripheral to peripheral, controller: destination peripheral */ -} dw_gdma_ll_trans_flow_t; - -/** - * @brief Handshake interface - */ -typedef enum { - DW_GDMA_LL_HANDSHAKE_HW, /*!< Transaction requests are initiated by hardware */ - DW_GDMA_LL_HANDSHAKE_SW, /*!< Transaction requests are initiated by software */ -} dw_gdma_ll_handshake_interface_t; - /** * @brief Handshake number for different peripherals */ @@ -132,14 +84,6 @@ typedef enum { DW_GDMA_LL_HW_HANDSHAKE_PERIPH_ISP, /*!< Handshake peripheral is ISP */ } dw_gdma_ll_hw_handshake_periph_t; -/** - * @brief Channel lock level - */ -typedef enum { - DW_GDMA_LL_LOCK_LEVEL_FULL_TRANS, /*!< Lock over complete DMA transfer */ - DW_GDMA_LL_LOCK_LEVEL_BLOCK_TRANS, /*!< Lock over DMA block transfer */ -} dw_gdma_ll_lock_level_t; - /** * @brief Enable the bus clock for the DMA module */ @@ -205,14 +149,14 @@ static inline void dw_gdma_ll_enable_intr_global(dw_gdma_dev_t *dev, bool en) } /** - * @brief Check if the common register interrupt is active + * @brief Get the address of the interrupt status register * * @param dev Pointer to the DW_GDMA registers - * @return True: common register interrupt is active, False: common register interrupt is inactive + * @return Address of the interrupt status register */ -static inline bool dw_gdma_ll_is_common_intr_active(dw_gdma_dev_t *dev) +static inline volatile void *dw_gdma_ll_get_intr_status_reg(dw_gdma_dev_t *dev) { - return dev->int_st0.commonreg_intstat; + return &dev->int_st0; } /** @@ -312,7 +256,8 @@ static inline void dw_gdma_ll_channel_enable_intr_propagation(dw_gdma_dev_t *dev * @param channel Channel number * @return Mask of the channel interrupt status */ -static inline uint32_t dw_gdma_ll_channel_get_inr_status(dw_gdma_dev_t *dev, uint8_t channel) +__attribute__((always_inline)) +static inline uint32_t dw_gdma_ll_channel_get_intr_status(dw_gdma_dev_t *dev, uint8_t channel) { return dev->ch[channel].int_st0.val; } @@ -324,6 +269,7 @@ static inline uint32_t dw_gdma_ll_channel_get_inr_status(dw_gdma_dev_t *dev, uin * @param channel Channel number * @param mask Mask of the interrupt to clear */ +__attribute__((always_inline)) static inline void dw_gdma_ll_channel_clear_intr(dw_gdma_dev_t *dev, uint8_t channel, uint32_t mask) { dev->ch[channel].int_clr0.val = mask; @@ -380,18 +326,6 @@ static inline void dw_gdma_ll_channel_abort(dw_gdma_dev_t *dev, uint8_t channel) dev->chen1.val = 0x101 << channel; } -/** - * @brief Check if the DMA channel interrupt is active - * - * @param dev Pointer to the DW_GDMA registers - * @param channel Channel number - * @return True: channel interrupt is active, False: channel interrupt is inactive - */ -static inline bool dw_gdma_ll_channel_is_interrupt_active(dw_gdma_dev_t *dev, uint8_t channel) -{ - return dev->int_st0.val & (1 << channel); -} - /** * @brief Set the source address of the DMA transfer * @@ -399,6 +333,7 @@ static inline bool dw_gdma_ll_channel_is_interrupt_active(dw_gdma_dev_t *dev, ui * @param channel Channel number * @param src_addr Source address */ +__attribute__((always_inline)) static inline void dw_gdma_ll_channel_set_src_addr(dw_gdma_dev_t *dev, uint8_t channel, uint32_t src_addr) { dev->ch[channel].sar0.sar0 = src_addr; @@ -411,6 +346,7 @@ static inline void dw_gdma_ll_channel_set_src_addr(dw_gdma_dev_t *dev, uint8_t c * @param channel Channel number * @param dst_addr Destination address */ +__attribute__((always_inline)) static inline void dw_gdma_ll_channel_set_dst_addr(dw_gdma_dev_t *dev, uint8_t channel, uint32_t dst_addr) { dev->ch[channel].dar0.dar0 = dst_addr; @@ -419,71 +355,80 @@ static inline void dw_gdma_ll_channel_set_dst_addr(dw_gdma_dev_t *dev, uint8_t c /** * @brief Set the number of data to be transferred * - * @note data_transfer_width * item_amount determins the total bytes in one block transfer. + * @note "transfer width" * "transfer block size" = the total bytes in one block transfer * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param item_numbers Number of transfer items + * @param sz Number of transfer items */ -static inline void dw_gdma_ll_channel_set_trans_amount(dw_gdma_dev_t *dev, uint8_t channel, uint32_t item_numbers) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_trans_block_size(dw_gdma_dev_t *dev, uint8_t channel, uint32_t sz) { - dev->ch[channel].block_ts0.block_ts = item_numbers - 1; + dev->ch[channel].block_ts0.block_ts = sz - 1; } /** - * @brief Set the source master port - * - * @note The choice of master port depends on the location of the source data. + * @brief Set the source master port based on the memory address * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param port Source master port + * @param mem_addr Memory address */ -static inline void dw_gdma_ll_channel_set_src_master_port(dw_gdma_dev_t *dev, uint8_t channel, uint32_t port) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_src_master_port(dw_gdma_dev_t *dev, uint8_t channel, intptr_t mem_addr) { - dev->ch[channel].ctl0.sms = port; + if (mem_addr == MIPI_CSI_MEM_BASE) { + dev->ch[channel].ctl0.sms = DW_GDMA_LL_MASTER_PORT_MIPI_CSI; + } else { + dev->ch[channel].ctl0.sms = DW_GDMA_LL_MASTER_PORT_MEMORY; + } } /** - * @brief Set the destination master port - * - * @note The choice of master port depends on the location of the destination data. + * @brief Set the destination master port based on the memory address * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param port Destination master port + * @param mem_addr Memory address */ -static inline void dw_gdma_ll_channel_set_dst_master_port(dw_gdma_dev_t *dev, uint8_t channel, uint32_t port) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_dst_master_port(dw_gdma_dev_t *dev, uint8_t channel, intptr_t mem_addr) { - dev->ch[channel].ctl0.dms = port; + if (mem_addr == MIPI_DSI_MEM_BASE) { + dev->ch[channel].ctl0.dms = DW_GDMA_LL_MASTER_PORT_MIPI_DSI; + } else { + dev->ch[channel].ctl0.dms = DW_GDMA_LL_MASTER_PORT_MEMORY; + } } /** - * @brief Enable the source address increment + * @brief Enable the source address burst mode * * @note Increase the source address by the data width after each transfer * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param en True to enable, false to disable + * @param mode Address burst mode */ -static inline void dw_gdma_ll_channel_enable_src_addr_increment(dw_gdma_dev_t *dev, uint8_t channel, bool en) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_src_burst_mode(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_burst_mode_t mode) { - dev->ch[channel].ctl0.sinc = !en; + dev->ch[channel].ctl0.sinc = mode; } /** - * @brief Enable the destination address increment + * @brief Enable the destination address burst mode * * @note Increase the destination address by the data width after each transfer * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param en True to enable, false to disable + * @param mode Address burst mode */ -static inline void dw_gdma_ll_channel_enable_dst_addr_increment(dw_gdma_dev_t *dev, uint8_t channel, bool en) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_dst_burst_mode(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_burst_mode_t mode) { - dev->ch[channel].ctl0.dinc = !en; + dev->ch[channel].ctl0.dinc = mode; } /** @@ -493,7 +438,8 @@ static inline void dw_gdma_ll_channel_enable_dst_addr_increment(dw_gdma_dev_t *d * @param channel Channel number * @param width Transfer width */ -static inline void dw_gdma_ll_channel_set_src_trans_width(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_transfer_width_t width) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_src_trans_width(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_transfer_width_t width) { dev->ch[channel].ctl0.src_tr_width = width; } @@ -505,7 +451,8 @@ static inline void dw_gdma_ll_channel_set_src_trans_width(dw_gdma_dev_t *dev, ui * @param channel Channel number * @param width Transfer width */ -static inline void dw_gdma_ll_channel_set_dst_trans_width(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_transfer_width_t width) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_dst_trans_width(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_transfer_width_t width) { dev->ch[channel].ctl0.dst_tr_width = width; } @@ -517,7 +464,8 @@ static inline void dw_gdma_ll_channel_set_dst_trans_width(dw_gdma_dev_t *dev, ui * @param channel Channel number * @param items Number of data items */ -static inline void dw_gdma_ll_channel_set_src_burst_items(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_burst_items_t items) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_src_burst_items(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_burst_items_t items) { dev->ch[channel].ctl0.src_msize = items; } @@ -529,7 +477,8 @@ static inline void dw_gdma_ll_channel_set_src_burst_items(dw_gdma_dev_t *dev, ui * @param channel Channel number * @param items Number of data items */ -static inline void dw_gdma_ll_channel_set_dst_burst_items(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_burst_items_t items) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_dst_burst_items(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_burst_items_t items) { dev->ch[channel].ctl0.dst_msize = items; } @@ -543,10 +492,11 @@ static inline void dw_gdma_ll_channel_set_dst_burst_items(dw_gdma_dev_t *dev, ui * @param channel Channel number * @param len Burst length */ -static inline void dw_gdma_ll_channel_set_src_burst_len(dw_gdma_dev_t *dev, uint8_t channel, uint32_t len) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_src_burst_len(dw_gdma_dev_t *dev, uint8_t channel, uint8_t len) { - dev->ch[channel].ctl1.arlen_en = 1; - dev->ch[channel].ctl1.arlen = len - 1; + dev->ch[channel].ctl1.arlen_en = len > 0; + HAL_FORCE_MODIFY_U32_REG_FIELD(dev->ch[channel].ctl1, arlen, len); } /** @@ -556,22 +506,59 @@ static inline void dw_gdma_ll_channel_set_src_burst_len(dw_gdma_dev_t *dev, uint * @param channel Channel number * @param len Burst length */ -static inline void dw_gdma_ll_channel_set_dst_burst_len(dw_gdma_dev_t *dev, uint8_t channel, uint32_t len) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_dst_burst_len(dw_gdma_dev_t *dev, uint8_t channel, uint8_t len) { - dev->ch[channel].ctl1.awlen_en = 1; - dev->ch[channel].ctl1.awlen = len - 1; + dev->ch[channel].ctl1.awlen_en = len > 0; + HAL_FORCE_MODIFY_U32_REG_FIELD(dev->ch[channel].ctl1, awlen, len); } /** - * @brief Enable to generate an interrupt when the block transfer is done + * @brief Set block markers + * + * @note This is only valid for `DW_GDMA_BLOCK_TRANSFER_SHADOW` transfer type * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param en True to enable, false to disable + * @param en_intr True to generate an interrupt when the block transfer is done, false to disable + * @param is_last True to mark the block transfer as the last one + * @param is_valid True to mark the block transfer as valid */ -static inline void dw_gdma_ll_channel_enable_intr_block_trans_done(dw_gdma_dev_t *dev, uint8_t channel, bool en) +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_set_block_markers(dw_gdma_dev_t *dev, uint8_t channel, bool en_intr, bool is_last, bool is_valid) { - dev->ch[channel].ctl1.ioc_blktfr = en; + dmac_chn_ctl1_reg_t ctrl1; + ctrl1.val = dev->ch[channel].ctl1.val; + ctrl1.shadowreg_or_lli_last = is_last; + ctrl1.ioc_blktfr = en_intr; + ctrl1.shadowreg_or_lli_valid = is_valid; + dev->ch[channel].ctl1.val = ctrl1.val; +} + +/** + * @brief Whether to enable the status write back for the source peripheral + * + * @param dev Pointer to the DW_GDMA registers + * @param channel Channel number + * @param en True to enable write back, false to disable + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_enable_src_periph_status_write_back(dw_gdma_dev_t *dev, uint8_t channel, bool en) +{ + dev->ch[channel].ctl1.src_stat_en = en; +} + +/** + * @brief Whether to enable the status write back for the destination peripheral + * + * @param dev Pointer to the DW_GDMA registers + * @param channel Channel number + * @param en True to enable write back, false to disable + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_channel_enable_dst_periph_status_write_back(dw_gdma_dev_t *dev, uint8_t channel, bool en) +{ + dev->ch[channel].ctl1.dst_stat_en = en; } /** @@ -581,7 +568,7 @@ static inline void dw_gdma_ll_channel_enable_intr_block_trans_done(dw_gdma_dev_t * @param channel Channel number * @param type Multi block transfer type */ -static inline void dw_gdma_ll_channel_set_src_multi_block_type(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_multi_block_type_t type) +static inline void dw_gdma_ll_channel_set_src_multi_block_type(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_block_transfer_type_t type) { dev->ch[channel].cfg0.src_multblk_type = type; } @@ -593,49 +580,70 @@ static inline void dw_gdma_ll_channel_set_src_multi_block_type(dw_gdma_dev_t *de * @param channel Channel number * @param type Multi block transfer type */ -static inline void dw_gdma_ll_channel_set_dst_multi_block_type(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_multi_block_type_t type) +static inline void dw_gdma_ll_channel_set_dst_multi_block_type(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_block_transfer_type_t type) { dev->ch[channel].cfg0.dst_multblk_type = type; } /** - * @brief Set the unique ID for the source peripheral - * - * @note This ID is related to Out-of-order transaction - * - * @param dev Pointer to the DW_GDMA registers - * @param channel Channel number - * @param uid Unique ID + * @brief Transfer type and flow control */ -static inline void dw_gdma_ll_channel_set_src_uid(dw_gdma_dev_t *dev, uint8_t channel, uint32_t uid) -{ - dev->ch[channel].cfg0.rd_uid = uid; -} +typedef enum { + DW_GDMA_LL_FLOW_M2M_DMAC, /*!< Flow: memory to memory, controller: DMA engine */ + DW_GDMA_LL_FLOW_M2P_DMAC, /*!< Flow: memory to peripheral, controller: DMA engine */ + DW_GDMA_LL_FLOW_P2M_DMAC, /*!< Flow: peripheral to memory, controller: DMA engine */ + DW_GDMA_LL_FLOW_P2P_DMAC, /*!< Flow: peripheral to peripheral, controller: DMA engine */ + DW_GDMA_LL_FLOW_P2M_SRC, /*!< Flow: peripheral to memory, controller: source peripheral */ + DW_GDMA_LL_FLOW_P2P_SRC, /*!< Flow: peripheral to peripheral, controller: source peripheral */ + DW_GDMA_LL_FLOW_M2P_DST, /*!< Flow: memory to peripheral, controller: destination peripheral */ + DW_GDMA_LL_FLOW_P2P_DST, /*!< Flow: peripheral to peripheral, controller: destination peripheral */ +} dw_gdma_ll_trans_flow_t; + +#define _MAKE_GDMA_FLOW_CTRL_CODE(src, dst, con) ((src) << 3 | (dst) << 2 | (con)) /** - * @brief Set the unique ID for the destination peripheral - * - * @note This ID is related to Out-of-order transaction + * @brief Set transfer flow controller * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param uid Unique ID + * @param src_role Source target role + * @param dst_role Destination target role + * @param controller Flow controller */ -static inline void dw_gdma_ll_channel_set_dst_uid(dw_gdma_dev_t *dev, uint8_t channel, uint32_t uid) +static inline void dw_gdma_ll_channel_set_trans_flow(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_role_t src_role, dw_gdma_role_t dst_role, dw_gdma_flow_controller_t controller) { - dev->ch[channel].cfg0.wr_uid = uid; -} - -/** - * @brief Set transfer type and flow control - * - * @param dev Pointer to the DW_GDMA registers - * @param channel Channel number - * @param flow Transfer flow control - */ -static inline void dw_gdma_ll_channel_set_trans_flow(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_trans_flow_t flow) -{ - dev->ch[channel].cfg1.tt_fc = flow; + bool src_is_perih = (src_role != DW_GDMA_ROLE_MEM); + bool dst_is_perih = (dst_role != DW_GDMA_ROLE_MEM); + uint32_t fc_code = _MAKE_GDMA_FLOW_CTRL_CODE(src_is_perih, dst_is_perih, controller); + switch (fc_code) { + case _MAKE_GDMA_FLOW_CTRL_CODE(0, 0, DW_GDMA_FLOW_CTRL_SELF): + dev->ch[channel].cfg1.tt_fc = DW_GDMA_LL_FLOW_M2M_DMAC; + break; + case _MAKE_GDMA_FLOW_CTRL_CODE(0, 1, DW_GDMA_FLOW_CTRL_SELF): + dev->ch[channel].cfg1.tt_fc = DW_GDMA_LL_FLOW_M2P_DMAC; + break; + case _MAKE_GDMA_FLOW_CTRL_CODE(1, 0, DW_GDMA_FLOW_CTRL_SELF): + dev->ch[channel].cfg1.tt_fc = DW_GDMA_LL_FLOW_P2M_DMAC; + break; + case _MAKE_GDMA_FLOW_CTRL_CODE(1, 1, DW_GDMA_FLOW_CTRL_SELF): + dev->ch[channel].cfg1.tt_fc = DW_GDMA_LL_FLOW_P2P_DMAC; + break; + case _MAKE_GDMA_FLOW_CTRL_CODE(1, 0, DW_GDMA_FLOW_CTRL_SRC): + dev->ch[channel].cfg1.tt_fc = DW_GDMA_LL_FLOW_P2M_SRC; + break; + case _MAKE_GDMA_FLOW_CTRL_CODE(1, 1, DW_GDMA_FLOW_CTRL_SRC): + dev->ch[channel].cfg1.tt_fc = DW_GDMA_LL_FLOW_P2P_SRC; + break; + case _MAKE_GDMA_FLOW_CTRL_CODE(0, 1, DW_GDMA_FLOW_CTRL_DST): + dev->ch[channel].cfg1.tt_fc = DW_GDMA_LL_FLOW_M2P_DST; + break; + case _MAKE_GDMA_FLOW_CTRL_CODE(1, 1, DW_GDMA_FLOW_CTRL_DST): + dev->ch[channel].cfg1.tt_fc = DW_GDMA_LL_FLOW_P2P_DST; + break; + default: + abort(); + break; + } } /** @@ -647,7 +655,7 @@ static inline void dw_gdma_ll_channel_set_trans_flow(dw_gdma_dev_t *dev, uint8_t * @param channel Channel number * @param hs Handshaking interface */ -static inline void dw_gdma_ll_channel_set_src_handshake_interface(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_handshake_interface_t hs) +static inline void dw_gdma_ll_channel_set_src_handshake_interface(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_handshake_type_t hs) { dev->ch[channel].cfg1.hs_sel_src = hs; } @@ -661,7 +669,7 @@ static inline void dw_gdma_ll_channel_set_src_handshake_interface(dw_gdma_dev_t * @param channel Channel number * @param hs Handshaking interface */ -static inline void dw_gdma_ll_channel_set_dst_handshake_interface(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_handshake_interface_t hs) +static inline void dw_gdma_ll_channel_set_dst_handshake_interface(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_handshake_type_t hs) { dev->ch[channel].cfg1.hs_sel_dst = hs; } @@ -675,9 +683,22 @@ static inline void dw_gdma_ll_channel_set_dst_handshake_interface(dw_gdma_dev_t * @param channel Channel number * @param periph Peripheral ID */ -static inline void dw_gdma_ll_channel_set_src_handshake_periph(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_hw_handshake_periph_t periph) +static inline void dw_gdma_ll_channel_set_src_handshake_periph(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_role_t periph) { - dev->ch[channel].cfg1.src_per = periph; + switch (periph) { + case DW_GDMA_ROLE_PERIPH_DSI: + dev->ch[channel].cfg1.src_per = DW_GDMA_LL_HW_HANDSHAKE_PERIPH_DSI; + break; + case DW_GDMA_ROLE_PERIPH_CSI: + dev->ch[channel].cfg1.src_per = DW_GDMA_LL_HW_HANDSHAKE_PERIPH_CSI; + break; + case DW_GDMA_ROLE_PERIPH_ISP: + dev->ch[channel].cfg1.src_per = DW_GDMA_LL_HW_HANDSHAKE_PERIPH_ISP; + break; + default: + abort(); + break; + } } /** @@ -689,9 +710,22 @@ static inline void dw_gdma_ll_channel_set_src_handshake_periph(dw_gdma_dev_t *de * @param channel Channel number * @param periph Peripheral ID */ -static inline void dw_gdma_ll_channel_set_dst_handshake_periph(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_hw_handshake_periph_t periph) +static inline void dw_gdma_ll_channel_set_dst_handshake_periph(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_role_t periph) { - dev->ch[channel].cfg1.dst_per = periph; + switch (periph) { + case DW_GDMA_ROLE_PERIPH_DSI: + dev->ch[channel].cfg1.dst_per = DW_GDMA_LL_HW_HANDSHAKE_PERIPH_DSI; + break; + case DW_GDMA_ROLE_PERIPH_CSI: + dev->ch[channel].cfg1.dst_per = DW_GDMA_LL_HW_HANDSHAKE_PERIPH_CSI; + break; + case DW_GDMA_ROLE_PERIPH_ISP: + dev->ch[channel].cfg1.dst_per = DW_GDMA_LL_HW_HANDSHAKE_PERIPH_ISP; + break; + default: + abort(); + break; + } } /** @@ -715,7 +749,7 @@ static inline void dw_gdma_ll_channel_set_priority(dw_gdma_dev_t *dev, uint8_t c * @param channel Channel number * @param lock_level At which level the lock is applied */ -static inline void dw_gdma_ll_channel_lock(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_ll_lock_level_t lock_level) +static inline void dw_gdma_ll_channel_lock(dw_gdma_dev_t *dev, uint8_t channel, dw_gdma_lock_level_t lock_level) { dev->ch[channel].cfg1.lock_ch_l = lock_level; dev->ch[channel].cfg1.lock_ch = 1; @@ -764,11 +798,27 @@ static inline void dw_gdma_ll_channel_set_dst_outstanding_limit(dw_gdma_dev_t *d * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param addr Address of the first link list item, it must be aligned 64 + * @param addr Address of the first link list item, it must be aligned 64 bytes */ static inline void dw_gdma_ll_channel_set_link_list_head_addr(dw_gdma_dev_t *dev, uint8_t channel, uint32_t addr) { dev->ch[channel].llp0.loc0 = addr >> 6; + dev->ch[channel].llp1.val = 0; +} + +/** + * @brief Get the current link list item address + * + * @note When the DMA detects an invalid block, this function can tell which link list item is invalid + * + * @param dev Pointer to the DW_GDMA registers + * @param channel Channel number + * @return Address of the current link list item + */ +__attribute__((always_inline)) +static inline intptr_t dw_gdma_ll_channel_get_current_link_list_item_addr(dw_gdma_dev_t *dev, uint8_t channel) +{ + return (intptr_t)dev->ch[channel].llp0.loc0 << 6; } /** @@ -784,20 +834,34 @@ static inline void dw_gdma_ll_channel_set_link_list_master_port(dw_gdma_dev_t *d } /** - * @brief Get the total number of data that transferred for the previous block transfer. + * @brief Get the total number of data that got transferred * - * @note for normal transfer, this value is the same as the value of `dw_gdma_ll_channel_set_trans_amount` + * @note for normal transfer, this value is the same as the value of `dw_gdma_ll_channel_set_trans_block_size` * @note if any error occurs, the transfer might be terminated early, this function returns actual data transferred without error. * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @return Total number of data that transferred for the previous block transfer + * @return Total number of data that got transferred */ static inline uint32_t dw_gdma_ll_channel_get_trans_amount(dw_gdma_dev_t *dev, uint8_t channel) { return dev->ch[channel].status0.cmpltd_blk_tfr_size; } +/** + * @brief Get the total number of data left in the channel FIFO after completing the current block transfer + * + * @note for normal transfer completion without errors, this function should always return 0 + * + * @param dev Pointer to the DW_GDMA registers + * @param channel Channel number + * @return Total number of data left in the channel FIFO + */ +static inline uint32_t dw_gdma_ll_channel_get_fifo_remain(dw_gdma_dev_t *dev, uint8_t channel) +{ + return dev->ch[channel].status1.data_left_in_fifo; +} + /** * @brief Resume the multi-block transfer * @@ -806,6 +870,7 @@ static inline uint32_t dw_gdma_ll_channel_get_trans_amount(dw_gdma_dev_t *dev, u * @param dev Pointer to the DW_GDMA registers * @param channel Channel number */ +__attribute__((always_inline)) static inline void dw_gdma_ll_channel_resume_multi_block_transfer(dw_gdma_dev_t *dev, uint8_t channel) { // this register is write-only, we can't do read-modify-write @@ -813,29 +878,367 @@ static inline void dw_gdma_ll_channel_resume_multi_block_transfer(dw_gdma_dev_t } /** - * @brief Set the address to fetch the source status of the DMA channel + * @brief Set the address where to fetch the status of the source peripheral + * + * @note Status of the source peripheral can be read by `dw_gdma_ll_channel_get_src_periph_status` * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param addr Address to fetch the source status of the DMA channel + * @param addr Address to fetch the status */ -static inline void dw_gdma_ll_channel_set_src_status_fetch_addr(dw_gdma_dev_t *dev, uint8_t channel, uint32_t addr) +static inline void dw_gdma_ll_channel_set_src_periph_status_addr(dw_gdma_dev_t *dev, uint8_t channel, uint32_t addr) { dev->ch[channel].sstatar0.sstatar0 = addr; } /** - * @brief Set the address to fetch the destination status of the DMA channel + * @brief Set the address where to fetch the status of the destination peripheral + * + * @note Status of the destination peripheral can be read by `dw_gdma_ll_channel_get_dst_periph_status` * * @param dev Pointer to the DW_GDMA registers * @param channel Channel number - * @param addr Address to fetch the destination status of the DMA channel + * @param addr Address to fetch the status */ -static inline void dw_gdma_ll_channel_set_dst_status_fetch_addr(dw_gdma_dev_t *dev, uint8_t channel, uint32_t addr) +static inline void dw_gdma_ll_channel_set_dst_periph_status_addr(dw_gdma_dev_t *dev, uint8_t channel, uint32_t addr) { dev->ch[channel].dstatar0.dstatar0 = addr; } +/** + * @brief Get the status of the source peripheral + * + * @param dev Pointer to the DW_GDMA registers + * @param channel Channel number + * @return Status of the source peripheral + */ +static inline uint32_t dw_gdma_ll_channel_get_src_periph_status(dw_gdma_dev_t *dev, uint8_t channel) +{ + return dev->ch[channel].sstat0.val; +} + +/** + * @brief Get the status of the destination peripheral + * + * @param dev Pointer to the DW_GDMA registers + * @param channel Channel number + * @return Status of the destination peripheral + */ +static inline uint32_t dw_gdma_ll_channel_get_dst_periph_status(dw_gdma_dev_t *dev, uint8_t channel) +{ + return dev->ch[channel].dstat0.val; +} + +/** + * @brief Type of DW-DMA link list item + */ +typedef struct dw_gdma_link_list_item_t { + dmac_chn_sar0_reg_t sar_lo; /*!< Source address low 32 bits */ + dmac_chn_sar1_reg_t sar_hi; /*!< Source address high 32 bits */ + dmac_chn_dar0_reg_t dar_lo; /*!< Destination address low 32 bits */ + dmac_chn_dar1_reg_t dar_hi; /*!< Destination address high 32 bits */ + dmac_chn_block_ts0_reg_t block_ts_lo; /*!< Block transfer size, specify the number of data items to be transferred in a block */ + uint32_t reserved_14; + dmac_chn_llp0_reg_t llp_lo; /*!< Pointer to the next link list item low 32 bits. Set to zero to indicate the end of the list */ + dmac_chn_llp1_reg_t llp_hi; /*!< Pointer to the next link list item high 32 bits. Set to zero to indicate the end of the list */ + dmac_chn_ctl0_reg_t ctrl_lo; /*!< Control word low 32 bits */ + dmac_chn_ctl1_reg_t ctrl_hi; /*!< Control word high 32 bits */ + dmac_chn_sstat0_reg_t sstat; /*!< Status of the source peripheral */ + dmac_chn_dstat0_reg_t dstat; /*!< Status of the destination peripheral */ + dmac_chn_status0_reg_t status_lo; /*!< Channel status low 32 bits */ + dmac_chn_status1_reg_t status_hi; /*!< Channel status high 32 bits */ + uint32_t reserved_38; + uint32_t reserved_3c; +} dw_gdma_link_list_item_t __attribute__((aligned(DW_GDMA_LL_LINK_LIST_ALIGNMENT))); + +ESP_STATIC_ASSERT(sizeof(dw_gdma_link_list_item_t) == DW_GDMA_LL_LINK_LIST_ALIGNMENT, "Invalid size of dw_gdma_link_list_item_t structure"); + +/** + * @brief Set the transfer width of the source data + * + * @param lli Link list item + * @param width Transfer width + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_src_trans_width(dw_gdma_link_list_item_t *lli, dw_gdma_transfer_width_t width) +{ + lli->ctrl_lo.src_tr_width = width; +} + +/** + * @brief Set the transfer width of the destination data + * + * @param lli Link list item + * @param width Transfer width + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_dst_trans_width(dw_gdma_link_list_item_t *lli, dw_gdma_transfer_width_t width) +{ + lli->ctrl_lo.dst_tr_width = width; +} + +/** + * @brief Set the source master port based on the memory address + * + * @param lli Link list item + * @param mem_addr Memory address + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_src_master_port(dw_gdma_link_list_item_t *lli, intptr_t mem_addr) +{ + if (mem_addr == MIPI_CSI_MEM_BASE) { + lli->ctrl_lo.sms = DW_GDMA_LL_MASTER_PORT_MIPI_CSI; + } else { + lli->ctrl_lo.sms = DW_GDMA_LL_MASTER_PORT_MEMORY; + } +} + +/** + * @brief Set the destination master port based on the memory address + * + * @param lli Link list item + * @param mem_addr Memory address + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_dst_master_port(dw_gdma_link_list_item_t *lli, intptr_t mem_addr) +{ + if (mem_addr == MIPI_DSI_MEM_BASE) { + lli->ctrl_lo.dms = DW_GDMA_LL_MASTER_PORT_MIPI_DSI; + } else { + lli->ctrl_lo.dms = DW_GDMA_LL_MASTER_PORT_MEMORY; + } +} + +/** + * @brief Set the source address of the DMA transfer + * + * @param lli Link list item + * @param src_addr Source address + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_src_addr(dw_gdma_link_list_item_t *lli, uint32_t src_addr) +{ + lli->sar_lo.sar0 = src_addr; +} + +/** + * @brief Set the destination address of the DMA transfer + * + * @param lli Link list item + * @param dst_addr Destination address + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_dst_addr(dw_gdma_link_list_item_t *lli, uint32_t dst_addr) +{ + lli->dar_lo.dar0 = dst_addr; +} + +/** + * @brief Set the number of data to be transferred + * + * @note "transfer width" * "transfer size" = the total bytes in one block transfer + * + * @param lli Link list item + * @param sz Number of transfer items + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_trans_block_size(dw_gdma_link_list_item_t *lli, uint32_t sz) +{ + lli->block_ts_lo.block_ts = sz - 1; +} + +/** + * @brief Enable the source address burst mode + * + * @note Increase the source address by the data width after each transfer + * + * @param lli Link list item + * @param mode Address burst mode + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_src_burst_mode(dw_gdma_link_list_item_t *lli, dw_gdma_burst_mode_t mode) +{ + lli->ctrl_lo.sinc = mode; +} + +/** + * @brief Enable the destination address burst mode + * + * @note Increase the destination address by the data width after each transfer + * + * @param lli Link list item + * @param mode Address burst mode + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_dst_burst_mode(dw_gdma_link_list_item_t *lli, dw_gdma_burst_mode_t mode) +{ + lli->ctrl_lo.dinc = mode; +} + +/** + * @brief Set the number of data items that can be transferred in a single burst transaction for the source master port + * + * @param lli Link list item + * @param items Number of data items + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_src_burst_items(dw_gdma_link_list_item_t *lli, dw_gdma_burst_items_t items) +{ + lli->ctrl_lo.src_msize = items; +} + +/** + * @brief Set the number of data items that can be transferred in a single burst transaction for the destination master port + * + * @param lli Link list item + * @param items Number of data items + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_dst_burst_items(dw_gdma_link_list_item_t *lli, dw_gdma_burst_items_t items) +{ + lli->ctrl_lo.dst_msize = items; +} + +/** + * @brief Set the source burst length + * + * @note This controls how many times the DMA controller will ask for data from the source device in a single burst transaction. + * + * @param lli Link list item + * @param len Burst length + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_src_burst_len(dw_gdma_link_list_item_t *lli, uint8_t len) +{ + lli->ctrl_hi.arlen_en = len > 0; + lli->ctrl_hi.arlen = len; +} + +/** + * @brief Set the destination burst length + * + * @param lli Link list item + * @param len Burst length + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_dst_burst_len(dw_gdma_link_list_item_t *lli, uint8_t len) +{ + lli->ctrl_hi.awlen_en = len > 0; + lli->ctrl_hi.awlen = len; +} + +/** + * @brief Set block markers + * + * @note This is only valid for `DW_GDMA_BLOCK_TRANSFER_SHADOW` transfer type + * + * @param lli Link list item + * @param en_intr True to generate an interrupt when the block transfer is done, false to disable + * @param is_last True to mark the block transfer as the last one + * @param is_valid True to mark the block transfer as valid + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_set_block_markers(dw_gdma_link_list_item_t *lli, bool en_intr, bool is_last, bool is_valid) +{ + lli->ctrl_hi.ioc_blktfr = en_intr; + lli->ctrl_hi.shadowreg_or_lli_last = is_last; + lli->ctrl_hi.shadowreg_or_lli_valid = is_valid; +} + +/** + * @brief Whether to enable the status write back for the source peripheral + * + * @param lli Link list item + * @param en True to enable write back, false to disable + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_enable_src_periph_status_write_back(dw_gdma_link_list_item_t *lli, bool en) +{ + lli->ctrl_hi.src_stat_en = en; +} + +/** + * @brief Whether to enable the status write back for the destination peripheral + * + * @param lli Link list item + * @param en True to enable write back, false to disable + */ +__attribute__((always_inline)) +static inline void dw_gdma_ll_lli_enable_dst_periph_status_write_back(dw_gdma_link_list_item_t *lli, bool en) +{ + lli->ctrl_hi.dst_stat_en = en; +} + +/** + * @brief Get the status of the source peripheral + * + * @param lli Link list item + * @return Status of the source peripheral + */ +static inline uint32_t dw_gdma_ll_lli_get_src_periph_status(dw_gdma_link_list_item_t *lli) +{ + return lli->sstat.val; +} + +/** + * @brief Get the status of the destination peripheral + * + * @param lli Link list item + * @return Status of the destination peripheral + */ +static inline uint32_t dw_gdma_ll_lli_get_dst_periph_status(dw_gdma_link_list_item_t *lli) +{ + return lli->dstat.val; +} + +/** + * @brief Set the master port of the memory which holds the link list + * + * @param lli Link list item + * @param port Master port + */ +static inline void dw_gdma_ll_lli_set_link_list_master_port(dw_gdma_link_list_item_t *lli, uint32_t port) +{ + lli->llp_lo.lms = port; +} + +/** + * @brief Set the address of the next link list item + * + * @param lli Link list item + * @param addr Address of the next link list item, it must be aligned 64 bytes + */ +static inline void dw_gdma_ll_lli_set_next_item_addr(dw_gdma_link_list_item_t *lli, uint32_t addr) +{ + lli->llp_lo.loc0 = addr >> 6; + lli->llp_hi.val = 0; +} + +/** + * @brief Get the total number of data that got transferred + * + * @note for normal transfer, this value is the same as the value of `dw_gdma_ll_lli_set_trans_block_size` + * @note if any error occurs, the transfer might be terminated early, this function returns actual data transferred without error. + * + * @param lli Link list item + * @return Total number of data that got transferred + */ +static inline uint32_t dw_gdma_ll_lli_get_trans_amount(dw_gdma_link_list_item_t *lli) +{ + return lli->status_lo.cmpltd_blk_tfr_size; +} + +/** + * @brief Get the total number of data left in the channel FIFO after completing the current block transfer + * + * @param lli Link list item + * @return Total number of data left in the channel FIFO + */ +static inline uint32_t dw_gdma_ll_lli_get_fifo_remain(dw_gdma_link_list_item_t *lli) +{ + return lli->status_hi.data_left_in_fifo; +} + #ifdef __cplusplus } #endif diff --git a/components/hal/include/hal/dw_gdma_hal.h b/components/hal/include/hal/dw_gdma_hal.h index fa030c46ea..71c44d825b 100644 --- a/components/hal/include/hal/dw_gdma_hal.h +++ b/components/hal/include/hal/dw_gdma_hal.h @@ -41,6 +41,13 @@ typedef struct { */ void dw_gdma_hal_init(dw_gdma_hal_context_t *hal, const dw_gdma_hal_config_t *config); +/** + * @brief DW_GDMA HAL driver deinitialization + * + * @param hal Pointer to the HAL driver context + */ +void dw_gdma_hal_deinit(dw_gdma_hal_context_t *hal); + #ifdef __cplusplus } #endif diff --git a/components/hal/include/hal/dw_gdma_types.h b/components/hal/include/hal/dw_gdma_types.h new file mode 100644 index 0000000000..1a1bea8a58 --- /dev/null +++ b/components/hal/include/hal/dw_gdma_types.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief DMA block transfer type + */ +typedef enum { + DW_GDMA_BLOCK_TRANSFER_CONTIGUOUS, /*!< Contiguous buffer address */ + DW_GDMA_BLOCK_TRANSFER_RELOAD, /*!< Reload transfer configuration automatically */ + DW_GDMA_BLOCK_TRANSFER_SHADOW, /*!< Shadow register */ + DW_GDMA_BLOCK_TRANSFER_LIST, /*!< Link list */ +} dw_gdma_block_transfer_type_t; + +/** + * @brief Flow control type + */ +typedef enum { + DW_GDMA_FLOW_CTRL_SELF, /*!< Flow controller is the DMA engine itself */ + DW_GDMA_FLOW_CTRL_SRC, /*!< Flow controller is the source peripheral */ + DW_GDMA_FLOW_CTRL_DST, /*!< Flow controller is the destination peripheral */ +} dw_gdma_flow_controller_t; + +/** + * @brief Handshake interface type + */ +typedef enum { + DW_GDMA_HANDSHAKE_HW, /*!< Transaction requests are initiated by hardware */ + DW_GDMA_HANDSHAKE_SW, /*!< Transaction requests are initiated by software */ +} dw_gdma_handshake_type_t; + +/** + * @brief Role of the DMA source/destination + */ +typedef enum { + DW_GDMA_ROLE_MEM, /*!< Target is a plain memory which is accessible by the DMA */ + DW_GDMA_ROLE_PERIPH_DSI, /*!< Target is FIFO memory of peripheral: DSI */ + DW_GDMA_ROLE_PERIPH_CSI, /*!< Target is FIFO memory of peripheral: CSI */ + DW_GDMA_ROLE_PERIPH_ISP, /*!< Target is FIFO memory of peripheral: ISP */ +} dw_gdma_role_t; + +/** + * @brief Channel lock level + */ +typedef enum { + DW_GDMA_LOCK_LEVEL_FULL_TRANS, /*!< Lock over complete DMA transfer */ + DW_GDMA_LOCK_LEVEL_BLOCK_TRANS, /*!< Lock over DMA block transfer */ +} dw_gdma_lock_level_t; + +/** + * @brief DW_GDMA transfer width + */ +typedef enum { + DW_GDMA_TRANS_WIDTH_8, /*!< Data transfer width: 8 bits */ + DW_GDMA_TRANS_WIDTH_16, /*!< Data transfer width: 16 bits */ + DW_GDMA_TRANS_WIDTH_32, /*!< Data transfer width: 32 bits */ + DW_GDMA_TRANS_WIDTH_64, /*!< Data transfer width: 64 bits */ + DW_GDMA_TRANS_WIDTH_128, /*!< Data transfer width: 128 bits */ + DW_GDMA_TRANS_WIDTH_256, /*!< Data transfer width: 256 bits */ + DW_GDMA_TRANS_WIDTH_512, /*!< Data transfer width: 512 bits */ +} dw_gdma_transfer_width_t; + +/** + * @brief DW_GDMA burst mode + */ +typedef enum { + DW_GDMA_BURST_MODE_INCREMENT, /*!< The address is increased after each transfer */ + DW_GDMA_BURST_MODE_FIXED, /*!< The address remains the same after each transfer */ +} dw_gdma_burst_mode_t; + +/** + * @brief Number of data items that are contained in one burst transaction + * + * @note One item's bit width is set by `dw_gdma_transfer_width_t` + */ +typedef enum { + DW_GDMA_BURST_ITEMS_1, /*!< 1 data items in a burst transaction */ + DW_GDMA_BURST_ITEMS_4, /*!< 4 data items in a burst transaction */ + DW_GDMA_BURST_ITEMS_8, /*!< 8 data items in a burst transaction */ + DW_GDMA_BURST_ITEMS_16, /*!< 16 data items in a burst transaction */ + DW_GDMA_BURST_ITEMS_32, /*!< 32 data items in a burst transaction */ + DW_GDMA_BURST_ITEMS_64, /*!< 64 data items in a burst transaction */ + DW_GDMA_BURST_ITEMS_128, /*!< 128 data items in a burst transaction */ + DW_GDMA_BURST_ITEMS_256, /*!< 256 data items in a burst transaction */ + DW_GDMA_BURST_ITEMS_512, /*!< 512 data items in a burst transaction */ + DW_GDMA_BURST_ITEMS_1024, /*!< 1024 data items in a burst transaction */ +} dw_gdma_burst_items_t; + +#ifdef __cplusplus +} +#endif diff --git a/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in b/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in index e29d060b09..8318d17b45 100644 --- a/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in @@ -23,6 +23,10 @@ config SOC_AXI_GDMA_SUPPORTED bool default y +config SOC_DW_GDMA_SUPPORTED + bool + default y + config SOC_GPTIMER_SUPPORTED bool default y diff --git a/components/soc/esp32p4/include/soc/reg_base.h b/components/soc/esp32p4/include/soc/reg_base.h index 341bb518f9..19b8fb2d80 100644 --- a/components/soc/esp32p4/include/soc/reg_base.h +++ b/components/soc/esp32p4/include/soc/reg_base.h @@ -156,6 +156,12 @@ #define DR_REG_HP2LP_PERI_PMS_BASE (DR_REG_LPPERIPH_BASE + 0xE800) #define DR_REG_LP_TSENSOR_BASE (DR_REG_LPPERIPH_BASE + 0xF000) +/** + * @brief: Special memory address + */ +#define MIPI_CSI_MEM_BASE 0x50104000 +#define MIPI_DSI_MEM_BASE 0x50105000 + /** * This are module helper MACROs for quick module reference * including some module(renamed) address diff --git a/components/soc/esp32p4/include/soc/soc_caps.h b/components/soc/esp32p4/include/soc/soc_caps.h index 3eeb2f8c68..cd2d070a15 100644 --- a/components/soc/esp32p4/include/soc/soc_caps.h +++ b/components/soc/esp32p4/include/soc/soc_caps.h @@ -24,6 +24,7 @@ #define SOC_GDMA_SUPPORTED 1 #define SOC_AHB_GDMA_SUPPORTED 1 #define SOC_AXI_GDMA_SUPPORTED 1 +#define SOC_DW_GDMA_SUPPORTED 1 #define SOC_GPTIMER_SUPPORTED 1 #define SOC_PCNT_SUPPORTED 1 // #define SOC_LCDCAM_SUPPORTED 1 // TODO: IDF-7465