feat(esp_gdma): add hal interface for common operations

GDMA driver will be adapted to more DMA peripherals in the future.
This commit is to extract a minimal interface in the hal layer
This commit is contained in:
morris 2023-06-21 19:00:59 +08:00
parent 4dcab6c2ed
commit 56a376c696
51 changed files with 1040 additions and 423 deletions

View File

@ -6,33 +6,21 @@
#include "unity.h"
#include "unity_test_runner.h"
#include "unity_test_utils.h"
#include "esp_heap_caps.h"
// Some resources are lazy allocated in pulse_cnt driver, the threshold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (-300)
static size_t before_free_8bit;
static size_t before_free_32bit;
static void check_leak(size_t before_free, size_t after_free, const char *type)
{
ssize_t delta = after_free - before_free;
printf("MALLOC_CAP_%s: Before %u bytes free, After %u bytes free (delta %d)\n", type, before_free, after_free, delta);
TEST_ASSERT_MESSAGE(delta >= TEST_MEMORY_LEAK_THRESHOLD, "memory leak");
}
#define TEST_MEMORY_LEAK_THRESHOLD (400)
void setUp(void)
{
before_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
before_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
unity_utils_record_free_mem();
}
void tearDown(void)
{
size_t after_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
size_t after_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
check_leak(before_free_8bit, after_free_8bit, "8BIT");
check_leak(before_free_32bit, after_free_32bit, "32BIT");
esp_reent_cleanup(); //clean up some of the newlib's lazy allocations
unity_utils_evaluate_leaks_direct(TEST_MEMORY_LEAK_THRESHOLD);
}
void app_main(void)

View File

@ -8,7 +8,7 @@
#include "unity_test_utils.h"
#include "esp_heap_caps.h"
#define TEST_MEMORY_LEAK_THRESHOLD (150)
#define TEST_MEMORY_LEAK_THRESHOLD (200)
static size_t before_free_8bit;
static size_t before_free_32bit;

View File

@ -8,7 +8,7 @@
#include "unity_test_utils.h"
#include "esp_heap_caps.h"
#define TEST_MEMORY_LEAK_THRESHOLD (120)
#define TEST_MEMORY_LEAK_THRESHOLD (200)
static size_t before_free_8bit;
static size_t before_free_32bit;

View File

@ -137,7 +137,7 @@ idf_component_register(SRCS ${srcs}
PRIV_INCLUDE_DIRS port/include include/esp_private
REQUIRES ${requires}
PRIV_REQUIRES "${priv_requires}"
LDFRAGMENTS linker.lf)
LDFRAGMENTS linker.lf dma/linker.lf)
idf_build_get_property(target IDF_TARGET)
add_subdirectory(port/${target})

View File

@ -8,8 +8,6 @@
#include "soc/periph_defs.h"
#include "soc/soc_memory_layout.h"
#include "soc/soc_caps.h"
#include "hal/gdma_ll.h"
#include "hal/gdma_hal.h"
#include "esp_private/periph_ctrl.h"
#include "esp_log.h"
#include "esp_attr.h"

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -40,12 +40,12 @@ static const char *TAG = "gdma";
*/
typedef struct gdma_platform_t {
portMUX_TYPE spinlock; // platform level spinlock
gdma_group_t *groups[SOC_GDMA_GROUPS]; // array of GDMA group instances
int group_ref_counts[SOC_GDMA_GROUPS]; // reference count used to protect group install/uninstall
portMUX_TYPE spinlock; // platform level spinlock
gdma_group_t *groups[SOC_GDMA_NUM_GROUPS_MAX]; // array of GDMA group instances
int group_ref_counts[SOC_GDMA_NUM_GROUPS_MAX]; // reference count used to protect group install/uninstall
} gdma_platform_t;
static gdma_group_t *gdma_acquire_group_handle(int group_id);
static gdma_group_t *gdma_acquire_group_handle(int group_id, void (*hal_init)(gdma_hal_context_t *hal, const gdma_hal_config_t *config));
static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id);
static void gdma_release_group_handle(gdma_group_t *group);
static void gdma_release_pair_handle(gdma_pair_t *pair);
@ -57,10 +57,17 @@ static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan);
// gdma driver platform
static gdma_platform_t s_platform = {
.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED,
.groups = {} // groups will be lazy installed
};
esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
typedef struct {
int bus_id;
int start_group_id;
int end_group_id;
int pairs_per_group;
void (*hal_init)(gdma_hal_context_t *hal, const gdma_hal_config_t *config);
} gdma_channel_search_info_t;
static esp_err_t do_allocate_gdma_channel(const gdma_channel_search_info_t *search_info, const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
{
esp_err_t ret = ESP_OK;
gdma_tx_channel_t *alloc_tx_channel = NULL;
@ -68,7 +75,7 @@ esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_chann
int search_code = 0;
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_GOTO_ON_FALSE(config && ret_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(config && ret_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
if (config->flags.reserve_sibling) {
search_code = SEARCH_REQUEST_RX_CHANNEL | SEARCH_REQUEST_TX_CHANNEL; // search for a pair of channels
@ -94,10 +101,15 @@ esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_chann
goto search_done; // skip the search path below if user has specify a sibling channel
}
for (int i = 0; i < SOC_GDMA_GROUPS && search_code; i++) { // loop to search group
group = gdma_acquire_group_handle(i);
int start_group_id = search_info->start_group_id;
int end_group_id = search_info->end_group_id;
int pairs_per_group = search_info->pairs_per_group;
for (int i = start_group_id; i < end_group_id && search_code; i++) { // loop to search group
group = gdma_acquire_group_handle(i, search_info->hal_init);
group->bus_id = search_info->bus_id;
ESP_GOTO_ON_FALSE(group, ESP_ERR_NO_MEM, err, TAG, "no mem for group(%d)", i);
for (int j = 0; j < SOC_GDMA_PAIRS_PER_GROUP && search_code; j++) { // loop to search pair
for (int j = 0; j < pairs_per_group && search_code; j++) { // loop to search pair
pair = gdma_acquire_pair_handle(group, j);
ESP_GOTO_ON_FALSE(pair, ESP_ERR_NO_MEM, err, TAG, "no mem for pair(%d,%d)", i, j);
portENTER_CRITICAL(&pair->spinlock);
@ -160,15 +172,49 @@ err:
return ret;
}
#if SOC_AHB_GDMA_SUPPORTED
esp_err_t gdma_new_ahb_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
{
gdma_channel_search_info_t search_info = {
.bus_id = SOC_GDMA_BUS_AHB,
.start_group_id = GDMA_LL_AHB_GROUP_START_ID,
.end_group_id = GDMA_LL_AHB_GROUP_START_ID + GDMA_LL_AHB_NUM_GROUPS,
.pairs_per_group = GDMA_LL_AHB_PAIRS_PER_GROUP,
.hal_init = gdma_ahb_hal_init,
};
return do_allocate_gdma_channel(&search_info, config, ret_chan);
}
esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
__attribute__((alias("gdma_new_ahb_channel")));
#endif // SOC_AHB_GDMA_SUPPORTED
#if SOC_AXI_GDMA_SUPPORTED
esp_err_t gdma_new_axi_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
{
gdma_channel_search_info_t search_info = {
.bus_id = SOC_GDMA_BUS_AXI,
.start_group_id = GDMA_LL_AXI_GROUP_START_ID,
.end_group_id = GDMA_LL_AXI_GROUP_START_ID + GDMA_LL_AXI_NUM_GROUPS,
.pairs_per_group = GDMA_LL_AXI_PAIRS_PER_GROUP,
.hal_init = gdma_axi_hal_init,
};
return do_allocate_gdma_channel(&search_info, config, ret_chan);
}
#endif // SOC_AXI_GDMA_SUPPORTED
esp_err_t gdma_del_channel(gdma_channel_handle_t dma_chan)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
ret = dma_chan->del(dma_chan); // call `gdma_del_tx_channel` or `gdma_del_rx_channel`
// reset the channel priority to default
gdma_hal_set_priority(hal, pair->pair_id, dma_chan->direction, 0);
err:
return ret;
// call `gdma_del_tx_channel` or `gdma_del_rx_channel` under the hood
return dma_chan->del(dma_chan);
}
esp_err_t gdma_get_channel_id(gdma_channel_handle_t dma_chan, int *channel_id)
@ -184,13 +230,17 @@ err:
esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_periph)
{
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(dma_chan->periph_id == GDMA_INVALID_PERIPH_TRIG, ESP_ERR_INVALID_STATE, TAG, "channel is using by peripheral: %d", dma_chan->periph_id);
pair = dma_chan->pair;
group = pair->group;
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
bool periph_conflict = false;
//
if (trig_periph.bus_id != SOC_GDMA_BUS_ANY) {
ESP_RETURN_ON_FALSE(trig_periph.bus_id == group->bus_id, ESP_ERR_INVALID_ARG, TAG,
"peripheral and DMA system bus mismatch");
}
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
if (trig_periph.instance_id >= 0) {
@ -202,10 +252,6 @@ esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_perip
}
portEXIT_CRITICAL(&group->spinlock);
}
if (!periph_conflict) {
gdma_ll_tx_reset_channel(group->hal.dev, pair->pair_id); // reset channel
gdma_ll_tx_connect_to_periph(group->hal.dev, pair->pair_id, trig_periph.periph, trig_periph.instance_id);
}
} else {
if (trig_periph.instance_id >= 0) {
portENTER_CRITICAL(&group->spinlock);
@ -216,26 +262,22 @@ esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_perip
}
portEXIT_CRITICAL(&group->spinlock);
}
if (!periph_conflict) {
gdma_ll_rx_reset_channel(group->hal.dev, pair->pair_id); // reset channel
gdma_ll_rx_connect_to_periph(group->hal.dev, pair->pair_id, trig_periph.periph, trig_periph.instance_id);
}
}
ESP_RETURN_ON_FALSE(!periph_conflict, ESP_ERR_INVALID_STATE, TAG, "peripheral %d is already used by another channel", trig_periph.instance_id);
gdma_hal_connect_peri(hal, pair->pair_id, dma_chan->direction, trig_periph.periph, trig_periph.instance_id);
dma_chan->periph_id = trig_periph.instance_id;
return ESP_OK;
}
esp_err_t gdma_disconnect(gdma_channel_handle_t dma_chan)
{
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(dma_chan->periph_id != GDMA_INVALID_PERIPH_TRIG, ESP_ERR_INVALID_STATE, TAG, "no peripheral is connected to the channel");
pair = dma_chan->pair;
group = pair->group;
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
int save_periph_id = dma_chan->periph_id;
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
@ -244,29 +286,26 @@ esp_err_t gdma_disconnect(gdma_channel_handle_t dma_chan)
group->tx_periph_in_use_mask &= ~(1 << save_periph_id);
portEXIT_CRITICAL(&group->spinlock);
}
gdma_ll_tx_disconnect_from_periph(group->hal.dev, pair->pair_id);
} else {
if (save_periph_id >= 0) {
portENTER_CRITICAL(&group->spinlock);
group->rx_periph_in_use_mask &= ~(1 << save_periph_id);
portEXIT_CRITICAL(&group->spinlock);
}
gdma_ll_rx_disconnect_from_periph(group->hal.dev, pair->pair_id);
}
gdma_hal_disconnect_peri(hal, pair->pair_id, dma_chan->direction);
dma_chan->periph_id = GDMA_INVALID_PERIPH_TRIG;
return ESP_OK;
}
esp_err_t gdma_get_free_m2m_trig_id_mask(gdma_channel_handle_t dma_chan, uint32_t *mask)
{
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_RETURN_ON_FALSE(dma_chan && mask, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
uint32_t free_mask = GDMA_LL_M2M_FREE_PERIPH_ID_MASK;
pair = dma_chan->pair;
group = pair->group;
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
uint32_t free_mask = group->hal.priv_data->m2m_free_periph_mask;
portENTER_CRITICAL(&group->spinlock);
free_mask &= ~(group->tx_periph_in_use_mask);
@ -279,206 +318,166 @@ esp_err_t gdma_get_free_m2m_trig_id_mask(gdma_channel_handle_t dma_chan, uint32_
esp_err_t gdma_set_transfer_ability(gdma_channel_handle_t dma_chan, const gdma_transfer_ability_t *ability)
{
esp_err_t ret = ESP_OK;
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
bool en_burst = true;
ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
pair = dma_chan->pair;
group = pair->group;
ESP_RETURN_ON_FALSE(dma_chan && ability, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
size_t sram_alignment = ability->sram_trans_align;
size_t psram_alignment = ability->psram_trans_align;
// alignment should be 2^n
ESP_GOTO_ON_FALSE((sram_alignment & (sram_alignment - 1)) == 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid sram alignment: %zu", sram_alignment);
ESP_RETURN_ON_FALSE((sram_alignment & (sram_alignment - 1)) == 0, ESP_ERR_INVALID_ARG,
TAG, "invalid sram alignment: %zu", sram_alignment);
#if SOC_GDMA_SUPPORT_PSRAM
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA);
int block_size_index = 0;
switch (psram_alignment) {
case 64: // 64 Bytes alignment
block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_64B;
break;
case 32: // 32 Bytes alignment
block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_32B;
break;
case 16: // 16 Bytes alignment
block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_16B;
break;
case 0: // no alignment is requirement
block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_16B;
psram_alignment = data_cache_line_size; // fall back to use the same size of the psram data cache line size
break;
default:
ESP_GOTO_ON_FALSE(false, ESP_ERR_INVALID_ARG, err, TAG, "invalid psram alignment: %zu", psram_alignment);
break;
if (psram_alignment == 0) {
// fall back to use the same size of the psram data cache line size
psram_alignment = data_cache_line_size;
}
if (psram_alignment > data_cache_line_size) {
ESP_RETURN_ON_FALSE(((psram_alignment % data_cache_line_size) == 0), ESP_ERR_INVALID_ARG,
TAG, "psram_alignment(%d) should be multiple of the data_cache_line_size(%d)",
psram_alignment, data_cache_line_size);
}
ESP_GOTO_ON_FALSE(((psram_alignment % data_cache_line_size) == 0), ESP_ERR_INVALID_ARG, err, TAG, "psram alignment (%d)B should be multiple of the data cache line size (%d)B", psram_alignment, data_cache_line_size);
#endif // #if SOC_GDMA_SUPPORT_PSRAM
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
// TX channel can always enable burst mode, no matter data alignment
gdma_ll_tx_enable_data_burst(group->hal.dev, pair->pair_id, true);
gdma_ll_tx_enable_descriptor_burst(group->hal.dev, pair->pair_id, true);
#if SOC_GDMA_SUPPORT_PSRAM
gdma_ll_tx_set_block_size_psram(group->hal.dev, pair->pair_id, block_size_index);
#endif // #if SOC_GDMA_SUPPORT_PSRAM
} else {
// if the DMA can't access the PSRAM, this HAL function is no-op
gdma_hal_set_ext_mem_align(hal, pair->pair_id, dma_chan->direction, psram_alignment);
// TX channel can always enable burst mode, no matter data alignment
bool en_burst = true;
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
// RX channel burst mode depends on specific data alignment
en_burst = sram_alignment >= 4;
gdma_ll_rx_enable_data_burst(group->hal.dev, pair->pair_id, en_burst);
gdma_ll_rx_enable_descriptor_burst(group->hal.dev, pair->pair_id, en_burst);
#if SOC_GDMA_SUPPORT_PSRAM
gdma_ll_rx_set_block_size_psram(group->hal.dev, pair->pair_id, block_size_index);
#endif // #if SOC_GDMA_SUPPORT_PSRAM
}
gdma_hal_enable_burst(hal, pair->pair_id, dma_chan->direction, en_burst, en_burst);
dma_chan->sram_alignment = sram_alignment;
dma_chan->psram_alignment = psram_alignment;
ESP_LOGD(TAG, "%s channel (%d,%d), (%u:%u) bytes aligned, burst %s", dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX ? "tx" : "rx",
group->group_id, pair->pair_id, sram_alignment, psram_alignment, en_burst ? "enabled" : "disabled");
err:
return ret;
return ESP_OK;
}
esp_err_t gdma_apply_strategy(gdma_channel_handle_t dma_chan, const gdma_strategy_config_t *config)
{
esp_err_t ret = ESP_OK;
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
pair = dma_chan->pair;
group = pair->group;
ESP_RETURN_ON_FALSE(dma_chan && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
gdma_ll_tx_enable_owner_check(group->hal.dev, pair->pair_id, config->owner_check);
gdma_ll_tx_enable_auto_write_back(group->hal.dev, pair->pair_id, config->auto_update_desc);
} else {
gdma_ll_rx_enable_owner_check(group->hal.dev, pair->pair_id, config->owner_check);
}
gdma_hal_set_strategy(hal, pair->pair_id, dma_chan->direction, config->owner_check, config->auto_update_desc);
err:
return ret;
return ESP_OK;
}
esp_err_t gdma_set_priority(gdma_channel_handle_t dma_chan, uint32_t priority)
{
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_RETURN_ON_FALSE(dma_chan && priority <= GDMA_LL_CHANNEL_MAX_PRIORITY, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
pair = dma_chan->pair;
group = pair->group;
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
gdma_ll_tx_set_priority(group->hal.dev, pair->pair_id, priority);
} else {
gdma_ll_rx_set_priority(group->hal.dev, pair->pair_id, priority);
}
gdma_hal_set_priority(hal, pair->pair_id, dma_chan->direction, priority);
return ESP_OK;
}
esp_err_t gdma_register_tx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_tx_event_callbacks_t *cbs, void *user_data)
{
esp_err_t ret = ESP_OK;
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_GOTO_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
pair = dma_chan->pair;
group = pair->group;
ESP_RETURN_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
gdma_tx_channel_t *tx_chan = __containerof(dma_chan, gdma_tx_channel_t, base);
#if CONFIG_GDMA_ISR_IRAM_SAFE
if (cbs->on_trans_eof) {
ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_trans_eof), ESP_ERR_INVALID_ARG, err, TAG, "on_trans_eof not in IRAM");
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_trans_eof), ESP_ERR_INVALID_ARG,
TAG, "on_trans_eof not in IRAM");
}
if (cbs->on_descr_err) {
ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG, err, TAG, "on_descr_err not in IRAM");
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG,
TAG, "on_descr_err not in IRAM");
}
if (user_data) {
ESP_GOTO_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, err, TAG, "user context not in internal RAM");
ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG,
TAG, "user context not in internal RAM");
}
#endif // CONFIG_GDMA_ISR_IRAM_SAFE
// lazy install interrupt service
ESP_GOTO_ON_ERROR(gdma_install_tx_interrupt(tx_chan), err, TAG, "install interrupt service failed");
ESP_RETURN_ON_ERROR(gdma_install_tx_interrupt(tx_chan), TAG, "install interrupt service failed");
// enable/disable GDMA interrupt events for TX channel
portENTER_CRITICAL(&pair->spinlock);
gdma_ll_tx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_TX_EOF, cbs->on_trans_eof != NULL);
gdma_ll_tx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_TX_DESC_ERROR, cbs->on_descr_err != NULL);
gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, GDMA_LL_EVENT_TX_EOF, cbs->on_trans_eof != NULL);
gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, GDMA_LL_EVENT_TX_DESC_ERROR, cbs->on_descr_err != NULL);
portEXIT_CRITICAL(&pair->spinlock);
memcpy(&tx_chan->cbs, cbs, sizeof(gdma_tx_event_callbacks_t));
tx_chan->user_data = user_data;
ESP_GOTO_ON_ERROR(esp_intr_enable(dma_chan->intr), err, TAG, "enable interrupt failed");
ESP_RETURN_ON_ERROR(esp_intr_enable(dma_chan->intr), TAG, "enable interrupt failed");
err:
return ret;
return ESP_OK;
}
esp_err_t gdma_register_rx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_rx_event_callbacks_t *cbs, void *user_data)
{
esp_err_t ret = ESP_OK;
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_GOTO_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
pair = dma_chan->pair;
group = pair->group;
ESP_RETURN_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
gdma_rx_channel_t *rx_chan = __containerof(dma_chan, gdma_rx_channel_t, base);
#if CONFIG_GDMA_ISR_IRAM_SAFE
if (cbs->on_recv_eof) {
ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_eof), ESP_ERR_INVALID_ARG, err, TAG, "on_recv_eof not in IRAM");
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_eof), ESP_ERR_INVALID_ARG,
TAG, "on_recv_eof not in IRAM");
}
if (cbs->on_descr_err) {
ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG, err, TAG, "on_descr_err not in IRAM");
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_descr_err), ESP_ERR_INVALID_ARG,
TAG, "on_descr_err not in IRAM");
}
if (cbs->on_recv_done) {
ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_done), ESP_ERR_INVALID_ARG, err, TAG, "on_recv_done not in IRAM");
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_done), ESP_ERR_INVALID_ARG,
TAG, "on_recv_done not in IRAM");
}
if (user_data) {
ESP_GOTO_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, err, TAG, "user context not in internal RAM");
ESP_RETURN_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG,
TAG, "user context not in internal RAM");
}
#endif // CONFIG_GDMA_ISR_IRAM_SAFE
// lazy install interrupt service
ESP_GOTO_ON_ERROR(gdma_install_rx_interrupt(rx_chan), err, TAG, "install interrupt service failed");
ESP_RETURN_ON_ERROR(gdma_install_rx_interrupt(rx_chan), TAG, "install interrupt service failed");
// enable/disable GDMA interrupt events for RX channel
portENTER_CRITICAL(&pair->spinlock);
gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_RX_SUC_EOF, cbs->on_recv_eof != NULL);
gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_RX_DESC_ERROR, cbs->on_descr_err != NULL);
gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_RX_DONE, cbs->on_recv_done != NULL);
gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, GDMA_LL_EVENT_RX_SUC_EOF, cbs->on_recv_eof != NULL);
gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, GDMA_LL_EVENT_RX_DESC_ERROR, cbs->on_descr_err != NULL);
gdma_hal_enable_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, GDMA_LL_EVENT_RX_DONE, cbs->on_recv_done != NULL);
portEXIT_CRITICAL(&pair->spinlock);
memcpy(&rx_chan->cbs, cbs, sizeof(gdma_rx_event_callbacks_t));
rx_chan->user_data = user_data;
ESP_GOTO_ON_ERROR(esp_intr_enable(dma_chan->intr), err, TAG, "enable interrupt failed");
ESP_RETURN_ON_ERROR(esp_intr_enable(dma_chan->intr), TAG, "enable interrupt failed");
err:
return ret;
return ESP_OK;
}
esp_err_t gdma_start(gdma_channel_handle_t dma_chan, intptr_t desc_base_addr)
{
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE_ISR(dma_chan->flags.start_stop_by_etm == false, ESP_ERR_INVALID_STATE, TAG, "channel is controlled by ETM");
pair = dma_chan->pair;
group = pair->group;
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_set_desc_addr(group->hal.dev, pair->pair_id, desc_base_addr);
gdma_ll_rx_start(group->hal.dev, pair->pair_id);
} else {
gdma_ll_tx_set_desc_addr(group->hal.dev, pair->pair_id, desc_base_addr);
gdma_ll_tx_start(group->hal.dev, pair->pair_id);
}
gdma_hal_start_with_desc(hal, pair->pair_id, dma_chan->direction, desc_base_addr);
portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
return ESP_OK;
@ -486,19 +485,14 @@ esp_err_t gdma_start(gdma_channel_handle_t dma_chan, intptr_t desc_base_addr)
esp_err_t gdma_stop(gdma_channel_handle_t dma_chan)
{
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE_ISR(dma_chan->flags.start_stop_by_etm == false, ESP_ERR_INVALID_STATE, TAG, "channel is controlled by ETM");
pair = dma_chan->pair;
group = pair->group;
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_stop(group->hal.dev, pair->pair_id);
} else {
gdma_ll_tx_stop(group->hal.dev, pair->pair_id);
}
gdma_hal_stop(hal, pair->pair_id, dma_chan->direction);
portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
return ESP_OK;
@ -506,44 +500,30 @@ esp_err_t gdma_stop(gdma_channel_handle_t dma_chan)
esp_err_t gdma_append(gdma_channel_handle_t dma_chan)
{
esp_err_t ret = ESP_OK;
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_GOTO_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
pair = dma_chan->pair;
group = pair->group;
ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_restart(group->hal.dev, pair->pair_id);
} else {
gdma_ll_tx_restart(group->hal.dev, pair->pair_id);
}
gdma_hal_append(hal, pair->pair_id, dma_chan->direction);
portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
err:
return ret;
return ESP_OK;
}
esp_err_t gdma_reset(gdma_channel_handle_t dma_chan)
{
esp_err_t ret = ESP_OK;
gdma_pair_t *pair = NULL;
gdma_group_t *group = NULL;
ESP_GOTO_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
pair = dma_chan->pair;
group = pair->group;
ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_reset_channel(group->hal.dev, pair->pair_id);
} else {
gdma_ll_tx_reset_channel(group->hal.dev, pair->pair_id);
}
gdma_hal_reset(hal, pair->pair_id, dma_chan->direction);
portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
err:
return ret;
return ESP_OK;
}
static void gdma_release_group_handle(gdma_group_t *group)
@ -556,19 +536,20 @@ static void gdma_release_group_handle(gdma_group_t *group)
if (s_platform.group_ref_counts[group_id] == 0) {
assert(s_platform.groups[group_id]);
do_deinitialize = true;
s_platform.groups[group_id] = NULL; // deregister from platfrom
gdma_ll_enable_clock(group->hal.dev, false);
periph_module_disable(gdma_periph_signals.groups[group_id].module);
// deregister from the platform
s_platform.groups[group_id] = NULL;
}
portEXIT_CRITICAL(&s_platform.spinlock);
if (do_deinitialize) {
gdma_hal_deinit(&group->hal);
periph_module_disable(gdma_periph_signals.groups[group_id].module);
free(group);
ESP_LOGD(TAG, "del group %d", group_id);
}
}
static gdma_group_t *gdma_acquire_group_handle(int group_id)
static gdma_group_t *gdma_acquire_group_handle(int group_id, void (*hal_init)(gdma_hal_context_t *hal, const gdma_hal_config_t *config))
{
bool new_group = false;
gdma_group_t *group = NULL;
@ -576,16 +557,12 @@ static gdma_group_t *gdma_acquire_group_handle(int group_id)
if (!pre_alloc_group) {
goto out;
}
portENTER_CRITICAL(&s_platform.spinlock);
if (!s_platform.groups[group_id]) {
new_group = true;
group = pre_alloc_group;
s_platform.groups[group_id] = group; // register to platform
group->group_id = group_id;
group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
periph_module_enable(gdma_periph_signals.groups[group_id].module); // enable APB to access GDMA registers
gdma_hal_init(&group->hal, group_id); // initialize HAL context
gdma_ll_enable_clock(group->hal.dev, true); // enable gdma clock
} else {
group = s_platform.groups[group_id];
}
@ -594,7 +571,15 @@ static gdma_group_t *gdma_acquire_group_handle(int group_id)
portEXIT_CRITICAL(&s_platform.spinlock);
if (new_group) {
ESP_LOGD(TAG, "new group (%d) at %p", group->group_id, group);
group->group_id = group_id;
group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
// enable APB to access GDMA registers
periph_module_enable(gdma_periph_signals.groups[group_id].module);
gdma_hal_config_t config = {
.group_id = group_id,
};
hal_init(&group->hal, &config);
ESP_LOGD(TAG, "new group (%d) at %p", group_id, group);
} else {
free(pre_alloc_group);
}
@ -632,14 +617,13 @@ static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id)
if (!pre_alloc_pair) {
goto out;
}
portENTER_CRITICAL(&group->spinlock);
if (!group->pairs[pair_id]) {
new_pair = true;
pair = pre_alloc_pair;
group->pairs[pair_id] = pair; // register to group
pair->group = group;
pair->pair_id = pair_id;
pair->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
// register the pair to the group
group->pairs[pair_id] = pair;
} else {
pair = group->pairs[pair_id];
}
@ -648,10 +632,16 @@ static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id)
portEXIT_CRITICAL(&group->spinlock);
if (new_pair) {
pair->group = group;
pair->pair_id = pair_id;
pair->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
portENTER_CRITICAL(&s_platform.spinlock);
s_platform.group_ref_counts[group->group_id]++; // pair obtains a reference to group
// pair obtains a reference to group, so increase it
s_platform.group_ref_counts[group->group_id]++;
portEXIT_CRITICAL(&s_platform.spinlock);
ESP_LOGD(TAG, "new pair (%d,%d) at %p", group->group_id, pair->pair_id, pair);
ESP_LOGD(TAG, "new pair (%d,%d) at %p", group->group_id, pair_id, pair);
} else {
free(pre_alloc_pair);
}
@ -663,6 +653,7 @@ static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
{
gdma_pair_t *pair = dma_channel->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
int pair_id = pair->pair_id;
int group_id = group->group_id;
gdma_tx_channel_t *tx_chan = __containerof(dma_channel, gdma_tx_channel_t, base);
@ -674,14 +665,12 @@ static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
if (dma_channel->intr) {
esp_intr_free(dma_channel->intr);
portENTER_CRITICAL(&pair->spinlock);
gdma_ll_tx_enable_interrupt(group->hal.dev, pair_id, UINT32_MAX, false); // disable all interupt events
gdma_ll_tx_clear_interrupt_status(group->hal.dev, pair_id, UINT32_MAX); // clear all pending events
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interupt events
gdma_hal_clear_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX); // clear all pending events
portEXIT_CRITICAL(&pair->spinlock);
ESP_LOGD(TAG, "uninstall interrupt service for tx channel (%d,%d)", group_id, pair_id);
}
gdma_ll_tx_set_priority(group->hal.dev, pair_id, 0); // reset the priority to 0 (lowest)
free(tx_chan);
ESP_LOGD(TAG, "del tx channel (%d,%d)", group_id, pair_id);
// channel has a reference on pair, release it now
@ -693,6 +682,7 @@ static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
{
gdma_pair_t *pair = dma_channel->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
int pair_id = pair->pair_id;
int group_id = group->group_id;
gdma_rx_channel_t *rx_chan = __containerof(dma_channel, gdma_rx_channel_t, base);
@ -704,32 +694,32 @@ static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
if (dma_channel->intr) {
esp_intr_free(dma_channel->intr);
portENTER_CRITICAL(&pair->spinlock);
gdma_ll_rx_enable_interrupt(group->hal.dev, pair_id, UINT32_MAX, false); // disable all interupt events
gdma_ll_rx_clear_interrupt_status(group->hal.dev, pair_id, UINT32_MAX); // clear all pending events
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interupt events
gdma_hal_clear_intr(hal, pair->pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX); // clear all pending events
portEXIT_CRITICAL(&pair->spinlock);
ESP_LOGD(TAG, "uninstall interrupt service for rx channel (%d,%d)", group_id, pair_id);
}
gdma_ll_rx_set_priority(group->hal.dev, pair_id, 0); // reset the priority to 0 (lowest)
free(rx_chan);
ESP_LOGD(TAG, "del rx channel (%d,%d)", group_id, pair_id);
gdma_release_pair_handle(pair);
return ESP_OK;
}
static void IRAM_ATTR gdma_default_rx_isr(void *args)
void gdma_default_rx_isr(void *args)
{
gdma_rx_channel_t *rx_chan = (gdma_rx_channel_t *)args;
gdma_pair_t *pair = rx_chan->base.pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
int pair_id = pair->pair_id;
bool need_yield = false;
// clear pending interrupt event
uint32_t intr_status = gdma_ll_rx_get_interrupt_status(group->hal.dev, pair->pair_id);
gdma_ll_rx_clear_interrupt_status(group->hal.dev, pair->pair_id, intr_status);
uint32_t intr_status = gdma_hal_read_intr_status(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX);
gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, intr_status);
if ((intr_status & GDMA_LL_EVENT_RX_SUC_EOF) && rx_chan->cbs.on_recv_eof) {
uint32_t eof_addr = gdma_ll_rx_get_success_eof_desc_addr(group->hal.dev, pair->pair_id);
uint32_t eof_addr = gdma_hal_get_eof_desc_addr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX);
gdma_event_data_t edata = {
.rx_eof_desc_addr = eof_addr
};
@ -755,18 +745,20 @@ static void IRAM_ATTR gdma_default_rx_isr(void *args)
}
}
static void IRAM_ATTR gdma_default_tx_isr(void *args)
void gdma_default_tx_isr(void *args)
{
gdma_tx_channel_t *tx_chan = (gdma_tx_channel_t *)args;
gdma_pair_t *pair = tx_chan->base.pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
int pair_id = pair->pair_id;
bool need_yield = false;
// clear pending interrupt event
uint32_t intr_status = gdma_ll_tx_get_interrupt_status(group->hal.dev, pair->pair_id);
gdma_ll_tx_clear_interrupt_status(group->hal.dev, pair->pair_id, intr_status);
uint32_t intr_status = gdma_hal_read_intr_status(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX);
gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, intr_status);
if ((intr_status & GDMA_LL_EVENT_TX_EOF) && tx_chan->cbs.on_trans_eof) {
uint32_t eof_addr = gdma_ll_tx_get_eof_desc_addr(group->hal.dev, pair->pair_id);
uint32_t eof_addr = gdma_hal_get_eof_desc_addr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX);
gdma_event_data_t edata = {
.tx_eof_desc_addr = eof_addr
};
@ -785,23 +777,25 @@ static esp_err_t gdma_install_rx_interrupt(gdma_rx_channel_t *rx_chan)
esp_err_t ret = ESP_OK;
gdma_pair_t *pair = rx_chan->base.pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
int pair_id = pair->pair_id;
// pre-alloc a interrupt handle, with handler disabled
int isr_flags = GDMA_INTR_ALLOC_FLAGS;
#if SOC_GDMA_TX_RX_SHARE_INTERRUPT
#if GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT
isr_flags |= ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED;
#endif
intr_handle_t intr = NULL;
ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair->pair_id].rx_irq_id, isr_flags,
(uint32_t)gdma_ll_rx_get_interrupt_status_reg(group->hal.dev, pair->pair_id), GDMA_LL_RX_EVENT_MASK,
ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair_id].rx_irq_id, isr_flags,
gdma_hal_get_intr_status_reg(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX), GDMA_LL_RX_EVENT_MASK,
gdma_default_rx_isr, rx_chan, &intr);
ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
rx_chan->base.intr = intr;
portENTER_CRITICAL(&pair->spinlock);
gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, UINT32_MAX, false); // disable all interupt events
gdma_ll_rx_clear_interrupt_status(group->hal.dev, pair->pair_id, UINT32_MAX); // clear all pending events
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX, false); // disable all interupt events
gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_RX, UINT32_MAX); // clear all pending events
portEXIT_CRITICAL(&pair->spinlock);
ESP_LOGD(TAG, "install interrupt service for rx channel (%d,%d)", group->group_id, pair->pair_id);
ESP_LOGD(TAG, "install interrupt service for rx channel (%d,%d)", group->group_id, pair_id);
err:
return ret;
@ -812,23 +806,25 @@ static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan)
esp_err_t ret = ESP_OK;
gdma_pair_t *pair = tx_chan->base.pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
int pair_id = pair->pair_id;
// pre-alloc a interrupt handle, with handler disabled
int isr_flags = GDMA_INTR_ALLOC_FLAGS;
#if SOC_GDMA_TX_RX_SHARE_INTERRUPT
#if GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT
isr_flags |= ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED;
#endif
intr_handle_t intr = NULL;
ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair->pair_id].tx_irq_id, isr_flags,
(uint32_t)gdma_ll_tx_get_interrupt_status_reg(group->hal.dev, pair->pair_id), GDMA_LL_TX_EVENT_MASK,
ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair_id].tx_irq_id, isr_flags,
gdma_hal_get_intr_status_reg(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX), GDMA_LL_TX_EVENT_MASK,
gdma_default_tx_isr, tx_chan, &intr);
ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
tx_chan->base.intr = intr;
portENTER_CRITICAL(&pair->spinlock);
gdma_ll_tx_enable_interrupt(group->hal.dev, pair->pair_id, UINT32_MAX, false); // disable all interupt events
gdma_ll_tx_clear_interrupt_status(group->hal.dev, pair->pair_id, UINT32_MAX); // clear all pending events
gdma_hal_enable_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX, false); // disable all interupt events
gdma_hal_clear_intr(hal, pair_id, GDMA_CHANNEL_DIRECTION_TX, UINT32_MAX); // clear all pending events
portEXIT_CRITICAL(&pair->spinlock);
ESP_LOGD(TAG, "install interrupt service for tx channel (%d,%d)", group->group_id, pair->pair_id);
ESP_LOGD(TAG, "install interrupt service for tx channel (%d,%d)", group->group_id, pair_id);
err:
return ret;

View File

@ -15,6 +15,8 @@
#include "soc/soc_caps.h"
#include "hal/gdma_hal.h"
#include "hal/gdma_ll.h"
#include "hal/gdma_hal_ahb.h"
#include "hal/gdma_hal_axi.h"
#include "soc/gdma_periph.h"
#include "esp_private/gdma.h"
@ -40,13 +42,14 @@ typedef struct gdma_tx_channel_t gdma_tx_channel_t;
typedef struct gdma_rx_channel_t gdma_rx_channel_t;
typedef struct gdma_group_t {
int group_id; // Group ID, index from 0
int group_id; // Group ID, index from 0
int bus_id; // which system does the GDMA instance attached to
gdma_hal_context_t hal; // HAL instance is at group level
portMUX_TYPE spinlock; // group level spinlock
uint32_t tx_periph_in_use_mask; // each bit indicates which peripheral (TX direction) has been occupied
uint32_t rx_periph_in_use_mask; // each bit indicates which peripheral (RX direction) has been occupied
gdma_pair_t *pairs[SOC_GDMA_PAIRS_PER_GROUP]; // handles of GDMA pairs
int pair_ref_counts[SOC_GDMA_PAIRS_PER_GROUP]; // reference count used to protect pair install/uninstall
gdma_pair_t *pairs[SOC_GDMA_PAIRS_PER_GROUP_MAX]; // handles of GDMA pairs
int pair_ref_counts[SOC_GDMA_PAIRS_PER_GROUP_MAX]; // reference count used to protect pair install/uninstall
} gdma_group_t;
struct gdma_pair_t {

View File

@ -0,0 +1,69 @@
[mapping:gdma_driver]
archive: libesp_hw_support.a
entries:
# performance optimization, always put the DMA default interrupt handler in IRAM
if SOC_GDMA_SUPPORTED = y:
gdma: gdma_default_tx_isr (noflash)
gdma: gdma_default_rx_isr (noflash)
# put GDMA control functions in IRAM
if GDMA_CTRL_FUNC_IN_IRAM = y:
gdma: gdma_start (noflash)
gdma: gdma_stop (noflash)
gdma: gdma_append (noflash)
gdma: gdma_reset (noflash)
[mapping:gdma_hal]
archive: libhal.a
entries:
# performance optimization, always put the DMA default interrupt handler in IRAM
if SOC_GDMA_SUPPORTED = y:
gdma_hal_top: gdma_hal_clear_intr (noflash)
gdma_hal_top: gdma_hal_read_intr_status (noflash)
gdma_hal_top: gdma_hal_get_eof_desc_addr (noflash)
# GDMA implementation layer for AHB-DMA version 1
if SOC_AHB_GDMA_VERSION = 1:
gdma_hal_ahb_v1: gdma_ahb_hal_clear_intr (noflash)
gdma_hal_ahb_v1: gdma_ahb_hal_read_intr_status (noflash)
gdma_hal_ahb_v1: gdma_ahb_hal_get_eof_desc_addr (noflash)
# GDMA implementation layer for AHB-DMA version 2
if SOC_AHB_GDMA_VERSION = 2:
gdma_hal_ahb_v2: gdma_ahb_hal_clear_intr (noflash)
gdma_hal_ahb_v2: gdma_ahb_hal_read_intr_status (noflash)
gdma_hal_ahb_v2: gdma_ahb_hal_get_eof_desc_addr (noflash)
# GDMA implementation layer for AXI-DMA
if SOC_AXI_GDMA_SUPPORTED = y:
gdma_hal_axi: gdma_axi_hal_clear_intr (noflash)
gdma_hal_axi: gdma_axi_hal_read_intr_status (noflash)
gdma_hal_axi: gdma_axi_hal_get_eof_desc_addr (noflash)
# put GDMA control HAL functions in IRAM
if GDMA_CTRL_FUNC_IN_IRAM = y:
gdma_hal_top: gdma_hal_start_with_desc (noflash)
gdma_hal_top: gdma_hal_stop (noflash)
gdma_hal_top: gdma_hal_append (noflash)
gdma_hal_top: gdma_hal_reset (noflash)
# GDMA implementation layer for AHB-DMA version 1
if SOC_AHB_GDMA_VERSION = 1:
gdma_hal_ahb_v1: gdma_ahb_hal_start_with_desc (noflash)
gdma_hal_ahb_v1: gdma_ahb_hal_stop (noflash)
gdma_hal_ahb_v1: gdma_ahb_hal_append (noflash)
gdma_hal_ahb_v1: gdma_ahb_hal_reset (noflash)
# GDMA implementation layer for AHB-DMA version 2
if SOC_AHB_GDMA_VERSION = 2:
gdma_hal_ahb_v2: gdma_ahb_hal_start_with_desc (noflash)
gdma_hal_ahb_v2: gdma_ahb_hal_stop (noflash)
gdma_hal_ahb_v2: gdma_ahb_hal_append (noflash)
gdma_hal_ahb_v2: gdma_ahb_hal_reset (noflash)
# GDMA implementation layer for AXI-DMA
if SOC_AXI_GDMA_SUPPORTED = y:
gdma_hal_axi: gdma_axi_hal_start_with_desc (noflash)
gdma_hal_axi: gdma_axi_hal_stop (noflash)
gdma_hal_axi: gdma_axi_hal_append (noflash)
gdma_hal_axi: gdma_axi_hal_reset (noflash)

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -98,6 +98,7 @@ typedef struct {
typedef struct {
gdma_trigger_peripheral_t periph; /*!< Target peripheral which will trigger DMA operations */
int instance_id; /*!< Peripheral instance ID. Supported IDs are listed in `soc/gdma_channel.h`, e.g. SOC_GDMA_TRIG_PERIPH_UHCI0 */
int bus_id; /*!< Which system bus should the DMA attached to */
} gdma_trigger_t;
/**
@ -107,7 +108,7 @@ typedef struct {
*
*/
#define GDMA_MAKE_TRIGGER(peri, id) \
(gdma_trigger_t) { .periph = peri, .instance_id = SOC_##peri##id }
(gdma_trigger_t) { .periph = peri, .instance_id = SOC_##peri##id, .bus_id = SOC_##peri##id##_BUS }
/**
* @brief A collection of strategy item that each GDMA channel could apply
@ -118,20 +119,39 @@ typedef struct {
bool auto_update_desc; /*!< If set / clear, DMA channel enables / disables hardware to update descriptor automatically (TX channel only) */
} gdma_strategy_config_t;
/** @cond */
esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
/** @endcond */
/**
* @brief Create GDMA channel
* @brief Create AHB-GDMA channel
* @note This API won't install interrupt service for the allocated channel.
* If interrupt service is needed, user has to register GDMA event callback by `gdma_register_tx_event_callbacks` or `gdma_register_rx_event_callbacks`.
*
* @param[in] config Pointer to a collection of configurations for allocating GDMA channel
* @param[out] ret_chan Returnned channel handle
* @param[out] ret_chan Returned channel handle
* @return
* - ESP_OK: Create DMA channel successfully
* - ESP_ERR_INVALID_ARG: Create DMA channel failed because of invalid argument
* - ESP_ERR_NO_MEM: Create DMA channel failed because out of memory
* - ESP_FAIL: Create DMA channel failed because of other error
*/
esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
esp_err_t gdma_new_ahb_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
/**
* @brief Create AXI-GDMA channel
* @note This API won't install interrupt service for the allocated channel.
* If interrupt service is needed, user has to register GDMA event callback by `gdma_register_tx_event_callbacks` or `gdma_register_rx_event_callbacks`.
*
* @param[in] config Pointer to a collection of configurations for allocating GDMA channel
* @param[out] ret_chan Returned channel handle
* @return
* - ESP_OK: Create DMA channel successfully
* - ESP_ERR_INVALID_ARG: Create DMA channel failed because of invalid argument
* - ESP_ERR_NO_MEM: Create DMA channel failed because out of memory
* - ESP_FAIL: Create DMA channel failed because of other error
*/
esp_err_t gdma_new_axi_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
/**
* @brief Connect GDMA channel to trigger peripheral

View File

@ -31,11 +31,6 @@ entries:
if PERIPH_CTRL_FUNC_IN_IRAM = y && ESP_WIFI_ENABLED = y:
periph_ctrl: wifi_module_enable (noflash)
periph_ctrl: wifi_module_disable (noflash)
if GDMA_CTRL_FUNC_IN_IRAM = y:
gdma: gdma_start (noflash)
gdma: gdma_stop (noflash)
gdma: gdma_append (noflash)
gdma: gdma_reset (noflash)
if SOC_SYSTIMER_SUPPORTED = y:
systimer (noflash)
if APP_BUILD_TYPE_PURE_RAM_APP = n:

View File

@ -47,7 +47,7 @@ static void async_memcpy_setup_testbench(memcpy_testbench_context_t *test_contex
uint8_t *dst_buf = NULL;
uint8_t *from_addr = NULL;
uint8_t *to_addr = NULL;
#if CONFIG_SPIRAM && SOC_GDMA_SUPPORT_PSRAM
#if CONFIG_SPIRAM && SOC_AHB_GDMA_SUPPORT_PSRAM
if (test_context->src_in_psram) {
src_buf = heap_caps_malloc(buffer_size, MALLOC_CAP_SPIRAM);
} else {
@ -249,7 +249,7 @@ static void memcpy_performance_test(uint32_t buffer_size)
IDF_LOG_PERFORMANCE("CPU_COPY", "%.2f MB/s, dir: SRAM->SRAM, size: %zu Bytes", throughput, test_context.buffer_size);
async_memcpy_verify_and_clear_testbench(test_context.seed, test_context.buffer_size, test_context.src_buf, test_context.dst_buf, test_context.from_addr, test_context.to_addr);
#if CONFIG_SPIRAM && SOC_GDMA_SUPPORT_PSRAM
#if CONFIG_SPIRAM && SOC_AHB_GDMA_SUPPORT_PSRAM
// 2. PSRAM->PSRAM
test_context.src_in_psram = true;
test_context.dst_in_psram = true;

View File

@ -1,47 +1,48 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "unity.h"
#include "esp_private/gdma.h"
#include "soc/soc_caps.h"
#include "hal/gdma_ll.h"
TEST_CASE("GDMA channel allocation", "[gdma]")
TEST_CASE("AHB GDMA channel allocation", "[gdma]")
{
gdma_channel_alloc_config_t channel_config = {};
gdma_channel_handle_t tx_channels[SOC_GDMA_PAIRS_PER_GROUP] = {};
gdma_channel_handle_t rx_channels[SOC_GDMA_PAIRS_PER_GROUP] = {};
gdma_channel_handle_t tx_channels[GDMA_LL_AHB_PAIRS_PER_GROUP] = {};
gdma_channel_handle_t rx_channels[GDMA_LL_AHB_PAIRS_PER_GROUP] = {};
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
gdma_tx_event_callbacks_t tx_cbs = {};
gdma_rx_event_callbacks_t rx_cbs = {};
// install TX channels
for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[i]));
TEST_ESP_OK(gdma_register_tx_event_callbacks(tx_channels[i], &tx_cbs, NULL));
};
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_channel(&channel_config, &tx_channels[0]));
// Free interrupts before installing RX interrupts to ensure enough free interrupts
for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
TEST_ESP_OK(gdma_del_channel(tx_channels[i]));
}
// install RX channels
channel_config.direction = GDMA_CHANNEL_DIRECTION_RX;
for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
TEST_ESP_OK(gdma_new_channel(&channel_config, &rx_channels[i]));
TEST_ESP_OK(gdma_register_rx_event_callbacks(rx_channels[i], &rx_cbs, NULL));
}
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_channel(&channel_config, &rx_channels[0]));
for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
for (int i = 0; i < GDMA_LL_AHB_PAIRS_PER_GROUP; i++) {
TEST_ESP_OK(gdma_del_channel(rx_channels[i]));
}
// install single and paired TX/RX channels
#if SOC_GDMA_PAIRS_PER_GROUP >= 2
#if GDMA_LL_AHB_PAIRS_PER_GROUP >= 2
// single tx channel
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[0]));

View File

@ -85,7 +85,19 @@ if(NOT BOOTLOADER_BUILD)
endif()
if(CONFIG_SOC_GDMA_SUPPORTED)
list(APPEND srcs "gdma_hal.c")
list(APPEND srcs "gdma_hal_top.c")
endif()
if(CONFIG_SOC_AHB_GDMA_VERSION EQUAL 1)
list(APPEND srcs "gdma_hal_ahb_v1.c")
endif()
if(CONFIG_SOC_AHB_GDMA_VERSION EQUAL 2)
list(APPEND srcs "gdma_hal_ahb_v2.c")
endif()
if(CONFIG_SOC_AXI_GDMA_SUPPORTED)
list(APPEND srcs "gdma_hal_axi.c")
endif()
if(CONFIG_SOC_I2S_SUPPORTED)

View File

@ -27,7 +27,7 @@
/*---------------------------------------------------------------
Define all ADC DMA required operations here
---------------------------------------------------------------*/
#if SOC_GDMA_SUPPORTED
#if SOC_AHB_GDMA_VERSION == 1
#define adc_dma_ll_rx_clear_intr(dev, chan, mask) gdma_ll_rx_clear_interrupt_status(dev, chan, mask)
#define adc_dma_ll_rx_enable_intr(dev, chan, mask) gdma_ll_rx_enable_interrupt(dev, chan, mask, true)
#define adc_dma_ll_rx_disable_intr(dev, chan, mask) gdma_ll_rx_enable_interrupt(dev, chan, mask, false)

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -41,11 +41,16 @@ extern "C" {
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_AHB_GROUP_START_ID 0 // AHB GDMA group ID starts from 0
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
#define GDMA_LL_AHB_PAIRS_PER_GROUP 1 // Number of GDMA pairs in each AHB group
#define GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT 1 // TX and RX channel in the same pair will share the same interrupt source number
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable DMA clock gating
* @brief Force enable register clock
*/
static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -41,11 +41,16 @@ extern "C" {
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_AHB_GROUP_START_ID 0 // AHB GDMA group ID starts from 0
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
#define GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT 1 // TX and RX channel in the same pair will share the same interrupt source number
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable DMA clock gating
* @brief Force enable register clock
*/
static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}

View File

@ -42,6 +42,10 @@ extern "C" {
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_AHB_GROUP_START_ID 0 // AHB GDMA group ID starts from 0
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH0, \
@ -88,9 +92,9 @@ extern "C" {
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable DMA clock gating
* @brief Force enable register clock
*/
static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}
@ -102,7 +106,7 @@ static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
{
return dev->in_intr[channel].st.val & GDMA_LL_RX_EVENT_MASK;
return dev->in_intr[channel].st.val;
}
/**
@ -111,9 +115,9 @@ static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t
static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->in_intr[channel].ena.val |= (mask & GDMA_LL_RX_EVENT_MASK);
dev->in_intr[channel].ena.val |= mask;
} else {
dev->in_intr[channel].ena.val &= ~(mask & GDMA_LL_RX_EVENT_MASK);
dev->in_intr[channel].ena.val &= ~mask;
}
}
@ -123,7 +127,7 @@ static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel
__attribute__((always_inline))
static inline void gdma_ll_rx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->in_intr[channel].clr.val = (mask & GDMA_LL_RX_EVENT_MASK);
dev->in_intr[channel].clr.val = mask;
}
/**
@ -326,7 +330,7 @@ static inline void gdma_ll_rx_enable_etm_task(gdma_dev_t *dev, uint32_t channel,
__attribute__((always_inline))
static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
{
return dev->out_intr[channel].st.val & GDMA_LL_TX_EVENT_MASK;
return dev->out_intr[channel].st.val;
}
/**
@ -335,9 +339,9 @@ static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t
static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->out_intr[channel].ena.val |= (mask & GDMA_LL_TX_EVENT_MASK);
dev->out_intr[channel].ena.val |= mask;
} else {
dev->out_intr[channel].ena.val &= ~(mask & GDMA_LL_TX_EVENT_MASK);
dev->out_intr[channel].ena.val &= ~mask;
}
}
@ -347,7 +351,7 @@ static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel
__attribute__((always_inline))
static inline void gdma_ll_tx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->out_intr[channel].clr.val = (mask & GDMA_LL_TX_EVENT_MASK);
dev->out_intr[channel].clr.val = mask;
}
/**

View File

@ -42,6 +42,10 @@ extern "C" {
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_AHB_GROUP_START_ID 0 // AHB GDMA group ID starts from 0
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH0, \
@ -88,9 +92,9 @@ extern "C" {
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable DMA clock gating
* @brief Force enable register clock
*/
static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}
@ -102,7 +106,7 @@ static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
{
return dev->in_intr[channel].st.val & GDMA_LL_RX_EVENT_MASK;
return dev->in_intr[channel].st.val;
}
/**
@ -111,9 +115,9 @@ static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t
static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->in_intr[channel].ena.val |= (mask & GDMA_LL_RX_EVENT_MASK);
dev->in_intr[channel].ena.val |= mask;
} else {
dev->in_intr[channel].ena.val &= ~(mask & GDMA_LL_RX_EVENT_MASK);
dev->in_intr[channel].ena.val &= ~mask;
}
}
@ -123,7 +127,7 @@ static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel
__attribute__((always_inline))
static inline void gdma_ll_rx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->in_intr[channel].clr.val = (mask & GDMA_LL_RX_EVENT_MASK);
dev->in_intr[channel].clr.val = mask;
}
/**
@ -326,7 +330,7 @@ static inline void gdma_ll_rx_enable_etm_task(gdma_dev_t *dev, uint32_t channel,
__attribute__((always_inline))
static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
{
return dev->out_intr[channel].st.val & GDMA_LL_TX_EVENT_MASK;
return dev->out_intr[channel].st.val;
}
/**
@ -335,9 +339,9 @@ static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t
static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->out_intr[channel].ena.val |= (mask & GDMA_LL_TX_EVENT_MASK);
dev->out_intr[channel].ena.val |= mask;
} else {
dev->out_intr[channel].ena.val &= ~(mask & GDMA_LL_TX_EVENT_MASK);
dev->out_intr[channel].ena.val &= ~mask;
}
}
@ -347,7 +351,7 @@ static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel
__attribute__((always_inline))
static inline void gdma_ll_tx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->out_intr[channel].clr.val = (mask & GDMA_LL_TX_EVENT_MASK);
dev->out_intr[channel].clr.val = mask;
}
/**

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -8,6 +8,7 @@
#include <stddef.h> /* For NULL declaration */
#include <stdint.h>
#include <stdbool.h>
#include "hal/assert.h"
#include "hal/gdma_types.h"
#include "soc/gdma_struct.h"
#include "soc/gdma_reg.h"
@ -47,18 +48,22 @@ extern "C" {
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_L2FIFO_BASE_SIZE (16) // Basic size of GDMA Level 2 FIFO
#define GDMA_LL_L2FIFO_BASE_SIZE 16 // Basic size of GDMA Level 2 FIFO
/* Memory block size value supported by channel */
#define GDMA_LL_EXT_MEM_BK_SIZE_16B (0)
#define GDMA_LL_EXT_MEM_BK_SIZE_32B (1)
#define GDMA_LL_EXT_MEM_BK_SIZE_64B (2)
#define GDMA_LL_EXT_MEM_BK_SIZE_16B 0
#define GDMA_LL_EXT_MEM_BK_SIZE_32B 1
#define GDMA_LL_EXT_MEM_BK_SIZE_64B 2
#define GDMA_LL_AHB_GROUP_START_ID 0 // AHB GDMA group ID starts from 0
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
#define GDMA_LL_AHB_PAIRS_PER_GROUP 5 // Number of GDMA pairs in each AHB group
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable DMA clock gating
* @brief Force enable register clock
*/
static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}
@ -137,12 +142,28 @@ static inline void gdma_ll_rx_reset_channel(gdma_dev_t *dev, uint32_t channel)
}
/**
* @brief Set DMA RX channel memory block size
* @param size_index Supported value: GDMA_LL_EXT_MEM_BK_SIZE_16B/32B/64B
* @brief Set DMA RX channel memory block size based on the alignment requirement
* @param align Supported value: 16/32/64
*/
static inline void gdma_ll_rx_set_block_size_psram(gdma_dev_t *dev, uint32_t channel, uint32_t size_index)
static inline void gdma_ll_rx_set_ext_mem_block_size(gdma_dev_t *dev, uint32_t channel, uint8_t align)
{
dev->channel[channel].in.conf1.in_ext_mem_bk_size = size_index;
uint32_t block_size = 0;
switch (align) {
case 64: // 64 Bytes alignment
block_size = GDMA_LL_EXT_MEM_BK_SIZE_64B;
break;
case 32: // 32 Bytes alignment
block_size = GDMA_LL_EXT_MEM_BK_SIZE_32B;
break;
case 16: // 16 Bytes alignment
block_size = GDMA_LL_EXT_MEM_BK_SIZE_16B;
break;
default:
HAL_ASSERT(false);
break;
}
dev->channel[channel].in.conf1.in_ext_mem_bk_size = block_size;
}
/**
@ -401,12 +422,28 @@ static inline void gdma_ll_tx_reset_channel(gdma_dev_t *dev, uint32_t channel)
}
/**
* @brief Set DMA TX channel memory block size
* @param size_index Supported value: GDMA_LL_EXT_MEM_BK_SIZE_16B/32B/64B
* @brief Set DMA TX channel memory block size based on the alignment requirement
* @param align Supported value: 16/32/64
*/
static inline void gdma_ll_tx_set_block_size_psram(gdma_dev_t *dev, uint32_t channel, uint32_t size_index)
static inline void gdma_ll_tx_set_ext_mem_block_size(gdma_dev_t *dev, uint32_t channel, uint8_t align)
{
dev->channel[channel].out.conf1.out_ext_mem_bk_size = size_index;
uint32_t block_size = 0;
switch (align) {
case 64: // 64 Bytes alignment
block_size = GDMA_LL_EXT_MEM_BK_SIZE_64B;
break;
case 32: // 32 Bytes alignment
block_size = GDMA_LL_EXT_MEM_BK_SIZE_32B;
break;
case 16: // 16 Bytes alignment
block_size = GDMA_LL_EXT_MEM_BK_SIZE_16B;
break;
default:
HAL_ASSERT(false);
break;
}
dev->channel[channel].out.conf1.out_ext_mem_bk_size = block_size;
}
/**

View File

@ -1,13 +0,0 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "hal/gdma_hal.h"
#include "hal/gdma_ll.h"
void gdma_hal_init(gdma_hal_context_t *hal, int group_id)
{
hal->dev = GDMA_LL_GET_HW(group_id);
}

View File

@ -0,0 +1,181 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/soc_caps.h"
#include "hal/assert.h"
#include "hal/gdma_hal_ahb.h"
#include "hal/gdma_ll.h"
static gdma_hal_priv_data_t gdma_ahb_hal_priv_data = {
.m2m_free_periph_mask = GDMA_LL_M2M_FREE_PERIPH_ID_MASK,
};
void gdma_ahb_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_set_desc_addr(hal->dev, chan_id, desc_base_addr);
gdma_ll_rx_start(hal->dev, chan_id);
} else {
gdma_ll_tx_set_desc_addr(hal->dev, chan_id, desc_base_addr);
gdma_ll_tx_start(hal->dev, chan_id);
}
}
void gdma_ahb_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_stop(hal->dev, chan_id);
} else {
gdma_ll_tx_stop(hal->dev, chan_id);
}
}
void gdma_ahb_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_restart(hal->dev, chan_id);
} else {
gdma_ll_tx_restart(hal->dev, chan_id);
}
}
void gdma_ahb_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_reset_channel(hal->dev, chan_id);
} else {
gdma_ll_tx_reset_channel(hal->dev, chan_id);
}
}
void gdma_ahb_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_set_priority(hal->dev, chan_id, priority);
} else {
gdma_ll_tx_set_priority(hal->dev, chan_id, priority);
}
}
void gdma_ahb_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_reset_channel(hal->dev, chan_id); // reset channel
gdma_ll_rx_connect_to_periph(hal->dev, chan_id, periph, periph_sub_id);
} else {
gdma_ll_tx_reset_channel(hal->dev, chan_id); // reset channel
gdma_ll_tx_connect_to_periph(hal->dev, chan_id, periph, periph_sub_id);
}
}
void gdma_ahb_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_disconnect_from_periph(hal->dev, chan_id);
} else {
gdma_ll_tx_disconnect_from_periph(hal->dev, chan_id);
}
}
void gdma_ahb_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_enable_data_burst(hal->dev, chan_id, en_data_burst);
gdma_ll_rx_enable_descriptor_burst(hal->dev, chan_id, en_desc_burst);
} else {
gdma_ll_tx_enable_data_burst(hal->dev, chan_id, en_data_burst);
gdma_ll_tx_enable_descriptor_burst(hal->dev, chan_id, en_desc_burst);
}
}
#if SOC_AHB_GDMA_SUPPORT_PSRAM
void gdma_ahb_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_set_ext_mem_block_size(hal->dev, chan_id, align);
} else {
gdma_ll_tx_set_ext_mem_block_size(hal->dev, chan_id, align);
}
}
#endif
void gdma_ahb_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_enable_owner_check(hal->dev, chan_id, en_owner_check);
} else {
gdma_ll_tx_enable_owner_check(hal->dev, chan_id, en_owner_check);
gdma_ll_tx_enable_auto_write_back(hal->dev, chan_id, en_desc_write_back);
}
}
void gdma_ahb_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_enable_interrupt(hal->dev, chan_id, intr_event_mask, en_or_dis);
} else {
gdma_ll_tx_enable_interrupt(hal->dev, chan_id, intr_event_mask, en_or_dis);
}
}
void gdma_ahb_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_clear_interrupt_status(hal->dev, chan_id, intr_event_mask);
} else {
gdma_ll_tx_clear_interrupt_status(hal->dev, chan_id, intr_event_mask);
}
}
uint32_t gdma_ahb_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
return gdma_ll_rx_get_interrupt_status(hal->dev, chan_id);
} else {
return gdma_ll_tx_get_interrupt_status(hal->dev, chan_id);
}
}
uint32_t gdma_ahb_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
return (uint32_t)gdma_ll_rx_get_interrupt_status_reg(hal->dev, chan_id);
} else {
return (uint32_t)gdma_ll_tx_get_interrupt_status_reg(hal->dev, chan_id);
}
}
uint32_t gdma_ahb_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
return gdma_ll_rx_get_success_eof_desc_addr(hal->dev, chan_id);
} else {
return gdma_ll_tx_get_eof_desc_addr(hal->dev, chan_id);
}
}
void gdma_ahb_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config)
{
hal->dev = GDMA_LL_GET_HW(config->group_id - GDMA_LL_AHB_GROUP_START_ID);
hal->start_with_desc = gdma_ahb_hal_start_with_desc;
hal->stop = gdma_ahb_hal_stop;
hal->append = gdma_ahb_hal_append;
hal->reset = gdma_ahb_hal_reset;
hal->set_priority = gdma_ahb_hal_set_priority;
hal->connect_peri = gdma_ahb_hal_connect_peri;
hal->disconnect_peri = gdma_ahb_hal_disconnect_peri;
hal->enable_burst = gdma_ahb_hal_enable_burst;
hal->set_strategy = gdma_ahb_hal_set_strategy;
hal->enable_intr = gdma_ahb_hal_enable_intr;
hal->clear_intr = gdma_ahb_hal_clear_intr;
hal->read_intr_status = gdma_ahb_hal_read_intr_status;
hal->get_intr_status_reg = gdma_ahb_hal_get_intr_status_reg;
hal->get_eof_desc_addr = gdma_ahb_hal_get_eof_desc_addr;
#if SOC_AHB_GDMA_SUPPORT_PSRAM
hal->set_ext_mem_align = gdma_ahb_hal_set_ext_mem_align;
#endif // SOC_AHB_GDMA_SUPPORT_PSRAM
hal->priv_data = &gdma_ahb_hal_priv_data;
}

View File

@ -0,0 +1,91 @@
/*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include "hal/assert.h"
#include "hal/gdma_hal.h"
void gdma_hal_deinit(gdma_hal_context_t *hal)
{
hal->generic_dev = NULL;
}
void gdma_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr)
{
hal->start_with_desc(hal, chan_id, dir, desc_base_addr);
}
void gdma_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
hal->stop(hal, chan_id, dir);
}
void gdma_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
hal->append(hal, chan_id, dir);
}
void gdma_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
hal->reset(hal, chan_id, dir);
}
void gdma_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority)
{
hal->set_priority(hal, chan_id, dir, priority);
}
void gdma_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id)
{
hal->connect_peri(hal, chan_id, dir, periph, periph_sub_id);
}
void gdma_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
hal->disconnect_peri(hal, chan_id, dir);
}
void gdma_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst)
{
hal->enable_burst(hal, chan_id, dir, en_data_burst, en_desc_burst);
}
void gdma_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align)
{
if (hal->set_ext_mem_align) {
hal->set_ext_mem_align(hal, chan_id, dir, align);
}
}
void gdma_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back)
{
hal->set_strategy(hal, chan_id, dir, en_owner_check, en_desc_write_back);
}
void gdma_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis)
{
hal->enable_intr(hal, chan_id, dir, intr_event_mask, en_or_dis);
}
void gdma_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask)
{
hal->clear_intr(hal, chan_id, dir, intr_event_mask);
}
uint32_t gdma_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
return hal->read_intr_status(hal, chan_id, dir);
}
uint32_t gdma_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
return hal->get_intr_status_reg(hal, chan_id, dir);
}
uint32_t gdma_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
return hal->get_eof_desc_addr(hal, chan_id, dir);
}

View File

@ -1,34 +1,118 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
* NOTICE
* The HAL is not public api, don't use in application code.
* See readme.md in soc/README.md
******************************************************************************/
#pragma once
#include <stdbool.h>
#include <stdint.h>
#include "soc/soc_caps.h"
#include "hal/gdma_types.h"
// TODO: don't expose the SOC header files, we can typedef a new type for the register dev pointer
#if SOC_AHB_GDMA_VERSION == 1
#include "soc/gdma_struct.h"
#endif
#if SOC_AHB_GDMA_VERSION == 2
#include "soc/ahb_dma_struct.h"
#endif
#if SOC_AXI_GDMA_SUPPORTED
#include "soc/axi_dma_struct.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
#include "soc/soc_caps.h"
#if SOC_GDMA_SUPPORTED
#include "soc/gdma_struct.h"
/// forward declaration of the HAL context
typedef struct gdma_hal_context_t gdma_hal_context_t;
/**
* @brief GDMA HAL configuration
*/
typedef struct {
gdma_dev_t *dev;
} gdma_hal_context_t;
int group_id; /*!< GDMA group ID */
} gdma_hal_config_t;
void gdma_hal_init(gdma_hal_context_t *hal, int group_id);
/**
* @brief GDMA HAL private data
*/
typedef struct {
// The bitmap of the IDs that can be used by M2M are different between AXI DMA and AHB DMA, so we need to save a copy for each of them
uint32_t m2m_free_periph_mask;
// TODO: we can add more private data here, e.g. the interrupt event mask of interest
// for now, the AXI DMA and AHB DMA are sharing the same interrupt mask, so we don't need to store it here
// If one day they become incompatible, we shall save a copy for each of them as a private data
} gdma_hal_priv_data_t;
/**
* @brief HAL context definition
*/
struct gdma_hal_context_t {
/// the underlying hardware can be different
union {
#if SOC_AHB_GDMA_VERSION == 1
gdma_dev_t *dev;
#endif
#if SOC_AHB_GDMA_VERSION == 2
ahb_dma_dev_t *ahb_dma_dev;
#endif
#if SOC_AXI_GDMA_SUPPORTED
axi_dma_dev_t *axi_dma_dev;
#endif
void *generic_dev;
};
gdma_hal_priv_data_t *priv_data; /// private data for the HAL
void (*start_with_desc)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr); /// start the channel with the start address of the descriptor
void (*stop)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// stop the channel
void (*append)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Append a descriptor to the channel
void (*reset)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Reset the channel
void (*set_priority)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority); /// Set the channel priority
void (*connect_peri)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id); /// Connect the channel to a peripheral
void (*disconnect_peri)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Disconnect the channel from a peripheral
void (*enable_burst)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst); /// Enable burst mode
void (*set_ext_mem_align)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align); /// Set the alignment of the external memory
void (*set_strategy)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back); /// Set some misc strategy of the channel behaviour
uint32_t (*get_intr_status_reg)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); // Get the interrupt status register address
void (*enable_intr)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis); /// Enable the channel interrupt
void (*clear_intr)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask); /// Clear the channel interrupt
uint32_t (*read_intr_status)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Read the channel interrupt status
uint32_t (*get_eof_desc_addr)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Get the address of the descriptor with EOF flag set
};
void gdma_hal_deinit(gdma_hal_context_t *hal);
void gdma_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr);
void gdma_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority);
void gdma_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id);
void gdma_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst);
void gdma_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align);
void gdma_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back);
void gdma_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis);
void gdma_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask);
uint32_t gdma_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
uint32_t gdma_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
uint32_t gdma_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
#ifdef __cplusplus
}

View File

@ -0,0 +1,49 @@
/*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "hal/gdma_hal.h"
#ifdef __cplusplus
extern "C" {
#endif
void gdma_ahb_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr);
void gdma_ahb_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_ahb_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_ahb_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_ahb_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority);
void gdma_ahb_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id);
void gdma_ahb_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_ahb_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst);
void gdma_ahb_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align);
void gdma_ahb_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back);
void gdma_ahb_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis);
void gdma_ahb_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask);
uint32_t gdma_ahb_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
uint32_t gdma_ahb_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
uint32_t gdma_ahb_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_ahb_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config);
#ifdef __cplusplus
}
#endif

View File

@ -13,7 +13,6 @@ extern "C" {
/**
* @brief Enumeration of peripherals which have the DMA capability
* @note Some peripheral might not be available on certain chip, please refer to `soc_caps.h` for detail.
*
*/
typedef enum {
GDMA_TRIG_PERIPH_M2M, /*!< GDMA trigger peripheral: M2M */
@ -32,7 +31,6 @@ typedef enum {
/**
* @brief Enumeration of GDMA channel direction
*
*/
typedef enum {
GDMA_CHANNEL_DIRECTION_TX, /*!< GDMA channel direction: TX */

View File

@ -13,7 +13,7 @@
#include "soc/clk_tree_defs.h"
//This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
#if SOC_GDMA_SUPPORTED
#if SOC_AHB_GDMA_VERSION == 1
#include "soc/gdma_struct.h"
#include "hal/gdma_ll.h"

View File

@ -12,7 +12,7 @@
#include "soc/soc_caps.h"
//This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
#if SOC_GDMA_SUPPORTED
#if SOC_AHB_GDMA_VERSION == 1
#include "soc/gdma_struct.h"
#include "hal/gdma_ll.h"

View File

@ -3,7 +3,7 @@
#include "soc/soc_caps.h"
//This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
#if SOC_GDMA_SUPPORTED
#if SOC_AHB_GDMA_VERSION == 1
#include "soc/gdma_struct.h"
#include "hal/gdma_ll.h"

View File

@ -3,7 +3,7 @@
#include "soc/soc_caps.h"
//This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
#if SOC_GDMA_SUPPORTED
#if SOC_AHB_GDMA_VERSION == 1
#include "soc/gdma_struct.h"
#include "hal/gdma_ll.h"

View File

@ -18,7 +18,7 @@
#include "hal/assert.h"
//This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
#if SOC_GDMA_SUPPORTED
#if SOC_AHB_GDMA_VERSION == 1
#include "soc/gdma_struct.h"
#include "hal/gdma_ll.h"
#define spi_dma_ll_tx_restart(dev, chan) gdma_ll_tx_restart(&GDMA, chan)

View File

@ -1,19 +1,10 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_aes_dma_priv.h"
#include "hal/gdma_ll.h"
#include "esp_crypto_shared_gdma.h"
esp_err_t esp_aes_dma_start(const lldesc_t *input, const lldesc_t *output)

View File

@ -60,6 +60,7 @@ static esp_err_t crypto_shared_gdma_init(void)
gdma_transfer_ability_t transfer_ability = {
.sram_trans_align = 1,
.psram_trans_align = 16,
};

View File

@ -229,7 +229,7 @@ TEST_CASE("mbedtls SHA512 clone", "[mbedtls]")
TEST_ASSERT_EQUAL_MEMORY_MESSAGE(sha512_thousand_bs, sha512, 64, "SHA512 cloned calculation");
}
TEST_CASE("mbedtls SHA384 clone", "[mbedtls][")
TEST_CASE("mbedtls SHA384 clone", "[mbedtls]")
{
mbedtls_sha512_context ctx;
mbedtls_sha512_context clone;

View File

@ -18,7 +18,7 @@
#include "ccomp_timer.h"
#include "test_mbedtls_utils.h"
TEST_CASE("mbedtls SHA performance", "[aes]")
TEST_CASE("mbedtls SHA performance", "[mbedtls]")
{
const unsigned CALLS = 256;
const unsigned CALL_SZ = 16 * 1024;

View File

@ -19,6 +19,10 @@ config SOC_GDMA_SUPPORTED
bool
default y
config SOC_AHB_GDMA_SUPPORTED
bool
default y
config SOC_GPTIMER_SUPPORTED
bool
default y
@ -215,17 +219,17 @@ config SOC_ECC_SUPPORT_POINT_VERIFY_QUIRK
bool
default y
config SOC_GDMA_GROUPS
config SOC_AHB_GDMA_VERSION
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP
config SOC_GDMA_NUM_GROUPS_MAX
int
default 1
config SOC_GDMA_TX_RX_SHARE_INTERRUPT
bool
default y
config SOC_GDMA_PAIRS_PER_GROUP_MAX
int
default 1
config SOC_GPIO_PORT
int

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -12,3 +12,13 @@
#define SOC_GDMA_TRIG_PERIPH_UHCI0 (2)
#define SOC_GDMA_TRIG_PERIPH_SHA0 (7)
#define SOC_GDMA_TRIG_PERIPH_ADC0 (8)
// On which system bus is the DMA instance of the peripheral connection mounted
#define SOC_GDMA_BUS_ANY (-1)
#define SOC_GDMA_BUS_AHB (0)
#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS SOC_GDMA_BUS_ANY
#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS SOC_GDMA_BUS_AHB

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -29,6 +29,7 @@
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_UART_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
#define SOC_AHB_GDMA_SUPPORTED 1
#define SOC_GPTIMER_SUPPORTED 1
#define SOC_BT_SUPPORTED 1
#define SOC_WIFI_SUPPORTED 1
@ -104,9 +105,9 @@
#define SOC_ECC_SUPPORT_POINT_VERIFY_QUIRK 1 // C2 ECC peripheral has a bug in ECC point verification, if value of K is zero the verification fails
/*-------------------------- GDMA CAPS -------------------------------------*/
#define SOC_GDMA_GROUPS (1U) // Number of GDMA groups
#define SOC_GDMA_PAIRS_PER_GROUP (1U) // Number of GDMA pairs in each group
#define SOC_GDMA_TX_RX_SHARE_INTERRUPT (1) // TX and RX channel in the same pair will share the same interrupt source number
#define SOC_AHB_GDMA_VERSION 1U
#define SOC_GDMA_NUM_GROUPS_MAX 1U
#define SOC_GDMA_PAIRS_PER_GROUP_MAX 1U
/*-------------------------- GPIO CAPS ---------------------------------------*/
// ESP32-C2 has 1 GPIO peripheral
@ -187,7 +188,6 @@
/* The SHA engine is able to resume hashing from a user */
#define SOC_SHA_SUPPORT_RESUME (1)
/* Supported HW algorithms */
#define SOC_SHA_SUPPORT_SHA1 (1)
#define SOC_SHA_SUPPORT_SHA224 (1)

View File

@ -19,6 +19,10 @@ config SOC_GDMA_SUPPORTED
bool
default y
config SOC_AHB_GDMA_SUPPORTED
bool
default y
config SOC_GPTIMER_SUPPORTED
bool
default y
@ -307,18 +311,18 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
int
default 1100
config SOC_GDMA_GROUPS
config SOC_AHB_GDMA_VERSION
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP
config SOC_GDMA_NUM_GROUPS_MAX
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP_MAX
int
default 3
config SOC_GDMA_TX_RX_SHARE_INTERRUPT
bool
default y
config SOC_GPIO_PORT
int
default 1

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -14,3 +14,15 @@
#define SOC_GDMA_TRIG_PERIPH_AES0 (6)
#define SOC_GDMA_TRIG_PERIPH_SHA0 (7)
#define SOC_GDMA_TRIG_PERIPH_ADC0 (8)
// On which system bus is the DMA instance of the peripheral connection mounted
#define SOC_GDMA_BUS_ANY (-1)
#define SOC_GDMA_BUS_AHB (0)
#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS SOC_GDMA_BUS_ANY
#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_AES0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS SOC_GDMA_BUS_AHB

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -29,6 +29,7 @@
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_UART_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
#define SOC_AHB_GDMA_SUPPORTED 1
#define SOC_GPTIMER_SUPPORTED 1
#define SOC_TWAI_SUPPORTED 1
#define SOC_BT_SUPPORTED 1
@ -142,9 +143,9 @@
#define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
/*-------------------------- GDMA CAPS -------------------------------------*/
#define SOC_GDMA_GROUPS (1U) // Number of GDMA groups
#define SOC_GDMA_PAIRS_PER_GROUP (3) // Number of GDMA pairs in each group
#define SOC_GDMA_TX_RX_SHARE_INTERRUPT (1) // TX and RX channel in the same pair will share the same interrupt source number
#define SOC_AHB_GDMA_VERSION 1U
#define SOC_GDMA_NUM_GROUPS_MAX 1U
#define SOC_GDMA_PAIRS_PER_GROUP_MAX 3
/*-------------------------- GPIO CAPS ---------------------------------------*/
// ESP32-C3 has 1 GPIO peripheral
@ -426,6 +427,6 @@
/*---------------------------------- Bluetooth CAPS ----------------------------------*/
#define SOC_BLE_SUPPORTED (1) /*!< Support Bluetooth Low Energy hardware */
#define SOC_BLE_MESH_SUPPORTED (1) /*!< Support BLE MESH */
#define SOC_BLE_50_SUPPORTED (1) /*!< Support Bluetooth 5.0 */
#define SOC_BLE_50_SUPPORTED (1) /*!< Support Bluetooth 5.0 */
#define SOC_BLE_DEVICE_PRIVACY_SUPPORTED (1) /*!< Support BLE device privacy mode */
#define SOC_BLUFI_SUPPORTED (1) /*!< Support BLUFI */

View File

@ -19,6 +19,10 @@ config SOC_GDMA_SUPPORTED
bool
default y
config SOC_AHB_GDMA_SUPPORTED
bool
default y
config SOC_GPTIMER_SUPPORTED
bool
default y
@ -375,11 +379,15 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
int
default 1100
config SOC_GDMA_GROUPS
config SOC_AHB_GDMA_VERSION
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP
config SOC_GDMA_NUM_GROUPS_MAX
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP_MAX
int
default 3

View File

@ -15,3 +15,16 @@
#define SOC_GDMA_TRIG_PERIPH_SHA0 (7)
#define SOC_GDMA_TRIG_PERIPH_ADC0 (8)
#define SOC_GDMA_TRIG_PERIPH_PARLIO0 (9)
// On which system bus is the DMA instance of the peripheral connection mounted
#define SOC_GDMA_BUS_ANY (-1)
#define SOC_GDMA_BUS_AHB (0)
#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS SOC_GDMA_BUS_ANY
#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_AES0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS SOC_GDMA_BUS_AHB

View File

@ -29,6 +29,7 @@
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_UART_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
#define SOC_AHB_GDMA_SUPPORTED 1
#define SOC_GPTIMER_SUPPORTED 1
#define SOC_PCNT_SUPPORTED 1
#define SOC_MCPWM_SUPPORTED 1
@ -161,9 +162,10 @@
#define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
/*-------------------------- GDMA CAPS -------------------------------------*/
#define SOC_GDMA_GROUPS (1U) // Number of GDMA groups
#define SOC_GDMA_PAIRS_PER_GROUP (3) // Number of GDMA pairs in each group
#define SOC_GDMA_SUPPORT_ETM (1) // Support ETM submodule
#define SOC_AHB_GDMA_VERSION 1U
#define SOC_GDMA_NUM_GROUPS_MAX 1U
#define SOC_GDMA_PAIRS_PER_GROUP_MAX 3
#define SOC_GDMA_SUPPORT_ETM 1 // Support ETM submodule
/*-------------------------- ETM CAPS --------------------------------------*/
#define SOC_ETM_GROUPS 1U // Number of ETM groups

View File

@ -23,6 +23,10 @@ config SOC_GDMA_SUPPORTED
bool
default y
config SOC_AHB_GDMA_SUPPORTED
bool
default y
config SOC_ASYNC_MEMCPY_SUPPORTED
bool
default y
@ -367,11 +371,15 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
int
default 1100
config SOC_GDMA_GROUPS
config SOC_AHB_GDMA_VERSION
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP
config SOC_GDMA_NUM_GROUPS_MAX
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP_MAX
int
default 3

View File

@ -15,3 +15,16 @@
#define SOC_GDMA_TRIG_PERIPH_SHA0 (7)
#define SOC_GDMA_TRIG_PERIPH_ADC0 (8)
#define SOC_GDMA_TRIG_PERIPH_PARLIO0 (9)
// On which system bus is the DMA instance of the peripheral connection mounted
#define SOC_GDMA_BUS_ANY (-1)
#define SOC_GDMA_BUS_AHB (0)
#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS SOC_GDMA_BUS_ANY
#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_AES0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS SOC_GDMA_BUS_AHB

View File

@ -30,6 +30,7 @@
#define SOC_DEDICATED_GPIO_SUPPORTED 1
#define SOC_UART_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
#define SOC_AHB_GDMA_SUPPORTED 1
#define SOC_ASYNC_MEMCPY_SUPPORTED 1
#define SOC_PCNT_SUPPORTED 1
#define SOC_MCPWM_SUPPORTED 1
@ -161,9 +162,10 @@
#define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
/*-------------------------- GDMA CAPS -------------------------------------*/
#define SOC_GDMA_GROUPS (1U) // Number of GDMA groups
#define SOC_GDMA_PAIRS_PER_GROUP (3) // Number of GDMA pairs in each group
#define SOC_GDMA_SUPPORT_ETM (1) // Support ETM submodule
#define SOC_AHB_GDMA_VERSION 1U
#define SOC_GDMA_NUM_GROUPS_MAX 1U
#define SOC_GDMA_PAIRS_PER_GROUP_MAX 3
#define SOC_GDMA_SUPPORT_ETM 1 // Support ETM submodule
/*-------------------------- ETM CAPS --------------------------------------*/
#define SOC_ETM_GROUPS 1U // Number of ETM groups

View File

@ -47,6 +47,10 @@ config SOC_GDMA_SUPPORTED
bool
default y
config SOC_AHB_GDMA_SUPPORTED
bool
default y
config SOC_GPTIMER_SUPPORTED
bool
default y
@ -363,15 +367,19 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
int
default 1100
config SOC_GDMA_GROUPS
bool
default y
config SOC_AHB_GDMA_VERSION
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP
config SOC_GDMA_NUM_GROUPS_MAX
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP_MAX
int
default 5
config SOC_GDMA_SUPPORT_PSRAM
config SOC_AHB_GDMA_SUPPORT_PSRAM
bool
default y

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -19,3 +19,20 @@
#define SOC_GDMA_TRIG_PERIPH_SHA0 (7)
#define SOC_GDMA_TRIG_PERIPH_ADC0 (8)
#define SOC_GDMA_TRIG_PERIPH_RMT0 (9)
// On which system bus is the DMA instance of the peripheral connection mounted
#define SOC_GDMA_BUS_ANY (-1)
#define SOC_GDMA_BUS_AHB (0)
#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS SOC_GDMA_BUS_ANY
#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_SPI3_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I2S1_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_LCD0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_CAM0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_AES0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_RMT0_BUS SOC_GDMA_BUS_AHB

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2019-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2019-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -31,6 +31,7 @@
#define SOC_WIFI_SUPPORTED 1
#define SOC_TWAI_SUPPORTED 1
#define SOC_GDMA_SUPPORTED 1
#define SOC_AHB_GDMA_SUPPORTED 1
#define SOC_GPTIMER_SUPPORTED 1
#define SOC_LCDCAM_SUPPORTED 1
#define SOC_MCPWM_SUPPORTED 1
@ -145,9 +146,10 @@
#define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
/*-------------------------- GDMA CAPS ---------------------------------------*/
#define SOC_GDMA_GROUPS (1) // Number of GDMA groups
#define SOC_GDMA_PAIRS_PER_GROUP (5) // Number of GDMA pairs in each group
#define SOC_GDMA_SUPPORT_PSRAM (1) // GDMA can access external PSRAM
#define SOC_AHB_GDMA_VERSION 1U
#define SOC_GDMA_NUM_GROUPS_MAX 1U
#define SOC_GDMA_PAIRS_PER_GROUP_MAX 5
#define SOC_AHB_GDMA_SUPPORT_PSRAM 1
/*-------------------------- GPIO CAPS ---------------------------------------*/
// ESP32-S3 has 1 GPIO peripheral
@ -168,7 +170,6 @@
// digital I/O pad powered by VDD3P3_CPU or VDD_SPI(GPIO_NUM_26~GPIO_NUM_48)
#define SOC_GPIO_VALID_DIGITAL_IO_PAD_MASK 0x0001FFFFFC000000ULL
/*-------------------------- Dedicated GPIO CAPS -----------------------------*/
#define SOC_DEDIC_GPIO_OUT_CHANNELS_NUM (8) /*!< 8 outward channels on each CPU core */
#define SOC_DEDIC_GPIO_IN_CHANNELS_NUM (8) /*!< 8 inward channels on each CPU core */
@ -335,7 +336,7 @@
#define SOC_TIMER_GROUP_TOTAL_TIMERS (4)
/*-------------------------- TOUCH SENSOR CAPS -------------------------------*/
#define SOC_TOUCH_VERSION_2 (1) // Hardware version of touch sensor
#define SOC_TOUCH_VERSION_2 (1) // Hardware version of touch sensor
#define SOC_TOUCH_SENSOR_NUM (15) /*! 15 Touch channels */
#define SOC_TOUCH_PROXIMITY_CHANNEL_NUM (3) /* Sopport touch proximity channel number. */
#define SOC_TOUCH_PROXIMITY_MEAS_DONE_SUPPORTED (1) /*Sopport touch proximity channel measure done interrupt type. */
@ -367,7 +368,6 @@
/*-------------------------- USB CAPS ----------------------------------------*/
#define SOC_USB_PERIPH_NUM 1
/*--------------------------- SHA CAPS ---------------------------------------*/
/* Max amount of bytes in a single DMA operation is 4095,
for SHA this means that the biggest safe amount of bytes is
@ -392,7 +392,6 @@
#define SOC_SHA_SUPPORT_SHA512_256 (1)
#define SOC_SHA_SUPPORT_SHA512_T (1)
/*--------------------------- MPI CAPS ---------------------------------------*/
#define SOC_MPI_MEM_BLOCKS_NUM (4)
#define SOC_MPI_OPERATIONS_NUM (3)
@ -400,7 +399,6 @@
/*--------------------------- RSA CAPS ---------------------------------------*/
#define SOC_RSA_MAX_BIT_LEN (4096)
/*-------------------------- AES CAPS -----------------------------------------*/
#define SOC_AES_SUPPORT_DMA (1)
@ -410,7 +408,6 @@
#define SOC_AES_SUPPORT_AES_128 (1)
#define SOC_AES_SUPPORT_AES_256 (1)
/*-------------------------- Power Management CAPS ---------------------------*/
#define SOC_PM_SUPPORT_EXT0_WAKEUP (1)
#define SOC_PM_SUPPORT_EXT1_WAKEUP (1)

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -20,8 +20,8 @@ typedef struct {
struct {
const int rx_irq_id;
const int tx_irq_id;
} pairs[SOC_GDMA_PAIRS_PER_GROUP];
} groups[SOC_GDMA_GROUPS];
} pairs[SOC_GDMA_PAIRS_PER_GROUP_MAX];
} groups[SOC_GDMA_NUM_GROUPS_MAX];
} gdma_signal_conn_t;
extern const gdma_signal_conn_t gdma_periph_signals;

View File

@ -635,7 +635,6 @@ components/mbedtls/esp_crt_bundle/test_gen_crt_bundle/test_gen_crt_bundle.py
components/mbedtls/port/aes/block/esp_aes.c
components/mbedtls/port/aes/dma/esp_aes.c
components/mbedtls/port/aes/dma/esp_aes_crypto_dma_impl.c
components/mbedtls/port/aes/dma/esp_aes_gdma_impl.c
components/mbedtls/port/aes/dma/include/esp_aes_dma_priv.h
components/mbedtls/port/aes/esp_aes_xts.c
components/mbedtls/port/include/aes/esp_aes.h