Merge branch 'feature/esp32c5mp_gdma_support_v5.3' into 'release/v5.3'

feat(gdma): add GDMA support for ESP32C5 MP (v5.3)

See merge request espressif/esp-idf!30897
This commit is contained in:
morris 2024-06-12 17:26:16 +08:00
commit 54f30cc94b
20 changed files with 1683 additions and 796 deletions

View File

@ -16,7 +16,7 @@
#include "hal/apm_hal.h" #include "hal/apm_hal.h"
#endif #endif
#if CONFIG_IDF_TARGET_ESP32C5_BETA3_VERSION // TODO: IDF-8615 Remove the workaround when APM supported on C5! #if CONFIG_IDF_TARGET_ESP32C5 // TODO: IDF-8615 Remove the workaround when APM supported on C5!
#include "soc/hp_apm_reg.h" #include "soc/hp_apm_reg.h"
#include "soc/lp_apm_reg.h" #include "soc/lp_apm_reg.h"
#include "soc/lp_apm0_reg.h" #include "soc/lp_apm0_reg.h"
@ -36,7 +36,7 @@ void bootloader_init_mem(void)
apm_hal_apm_ctrl_filter_enable_all(false); apm_hal_apm_ctrl_filter_enable_all(false);
#endif #endif
#if CONFIG_IDF_TARGET_ESP32C5_BETA3_VERSION // TODO: IDF-8615 Remove the workaround when APM supported on C5! #if CONFIG_IDF_TARGET_ESP32C5 // TODO: IDF-8615 Remove the workaround when APM supported on C5!
// disable apm filter // disable apm filter
REG_WRITE(LP_APM_FUNC_CTRL_REG, 0); REG_WRITE(LP_APM_FUNC_CTRL_REG, 0);
REG_WRITE(LP_APM0_FUNC_CTRL_REG, 0); REG_WRITE(LP_APM0_FUNC_CTRL_REG, 0);

View File

@ -19,6 +19,8 @@
#include "hal/cache_ll.h" #include "hal/cache_ll.h"
#include "hal/cache_hal.h" #include "hal/cache_hal.h"
#include "esp_cache.h" #include "esp_cache.h"
#include "esp_memory_utils.h"
#include "soc/soc_caps.h"
TEST_CASE("GDMA channel allocation", "[GDMA]") TEST_CASE("GDMA channel allocation", "[GDMA]")
{ {
@ -183,7 +185,7 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
size_t sram_alignment = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA); size_t sram_alignment = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
size_t alignment = MAX(sram_alignment, 8); size_t alignment = MAX(sram_alignment, 8);
uint8_t *src_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); uint8_t *src_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
uint8_t *dst_buf = heap_caps_aligned_calloc(alignment, 1, 256, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); uint8_t *dst_buf = heap_caps_aligned_calloc(alignment, 1, 384, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
TEST_ASSERT_NOT_NULL(src_buf); TEST_ASSERT_NOT_NULL(src_buf);
TEST_ASSERT_NOT_NULL(dst_buf); TEST_ASSERT_NOT_NULL(dst_buf);
dma_descriptor_align8_t *tx_descs = (dma_descriptor_align8_t *) src_buf; dma_descriptor_align8_t *tx_descs = (dma_descriptor_align8_t *) src_buf;
@ -199,6 +201,19 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
// do write-back for the source data because it's in the cache // do write-back for the source data because it's in the cache
TEST_ESP_OK(esp_cache_msync((void *)src_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_C2M)); TEST_ESP_OK(esp_cache_msync((void *)src_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
} }
#if SOC_DMA_CAN_ACCESS_FLASH
const char *src_string = "GDMA can fetch data from MSPI Flash";
size_t src_string_len = strlen(src_string);
TEST_ASSERT_TRUE(esp_ptr_in_drom(src_string));
// Only gonna copy length = src_string_len, set the character after to be 0xFF
// So that we can check if the copied length is correct
dst_data[128 + src_string_len] = 0xFF;
if (sram_alignment) {
// do write-back for the dst data because it's in the cache
TEST_ESP_OK(esp_cache_msync((void *)dst_data, 256, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
}
#endif
#ifdef CACHE_LL_L2MEM_NON_CACHE_ADDR #ifdef CACHE_LL_L2MEM_NON_CACHE_ADDR
dma_descriptor_align8_t *tx_descs_nc = (dma_descriptor_align8_t *)(CACHE_LL_L2MEM_NON_CACHE_ADDR(tx_descs)); dma_descriptor_align8_t *tx_descs_nc = (dma_descriptor_align8_t *)(CACHE_LL_L2MEM_NON_CACHE_ADDR(tx_descs));
@ -219,11 +234,23 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
tx_descs_nc[1].dw0.size = 64; tx_descs_nc[1].dw0.size = 64;
tx_descs_nc[1].dw0.length = 64; tx_descs_nc[1].dw0.length = 64;
tx_descs_nc[1].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; tx_descs_nc[1].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
#if !SOC_DMA_CAN_ACCESS_FLASH
tx_descs_nc[1].dw0.suc_eof = 1; tx_descs_nc[1].dw0.suc_eof = 1;
tx_descs_nc[1].next = NULL; tx_descs_nc[1].next = NULL;
#else
tx_descs_nc[1].dw0.suc_eof = 0;
tx_descs_nc[1].next = &tx_descs[2];
tx_descs_nc[2].buffer = (void *)src_string;
tx_descs_nc[2].dw0.size = src_string_len + 1; // +1 for '\0'
tx_descs_nc[2].dw0.length = src_string_len;
tx_descs_nc[2].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
tx_descs_nc[2].dw0.suc_eof = 1;
tx_descs_nc[2].next = NULL;
#endif
rx_descs_nc->buffer = dst_data; rx_descs_nc->buffer = dst_data;
rx_descs_nc->dw0.size = 128; rx_descs_nc->dw0.size = 256;
rx_descs_nc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; rx_descs_nc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
rx_descs_nc->dw0.suc_eof = 1; rx_descs_nc->dw0.suc_eof = 1;
rx_descs_nc->next = NULL; rx_descs_nc->next = NULL;
@ -235,7 +262,7 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
if (sram_alignment) { if (sram_alignment) {
// the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data // the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data
TEST_ESP_OK(esp_cache_msync((void *)dst_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_M2C)); TEST_ESP_OK(esp_cache_msync((void *)dst_data, 256, ESP_CACHE_MSYNC_FLAG_DIR_M2C));
} }
// check the DMA descriptor write-back feature // check the DMA descriptor write-back feature
@ -245,6 +272,11 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
for (int i = 0; i < 128; i++) { for (int i = 0; i < 128; i++) {
TEST_ASSERT_EQUAL(i, dst_data[i]); TEST_ASSERT_EQUAL(i, dst_data[i]);
} }
#if SOC_DMA_CAN_ACCESS_FLASH
TEST_ASSERT_TRUE(dst_data[128 + src_string_len] == 0xFF);
dst_data[128 + src_string_len] = '\0';
TEST_ASSERT_TRUE(strcmp(src_string, (const char *)((uint32_t)dst_data + 128)) == 0);
#endif
free((void *)src_buf); free((void *)src_buf);
free((void *)dst_buf); free((void *)dst_buf);
vSemaphoreDelete(done_sem); vSemaphoreDelete(done_sem);

View File

@ -7,6 +7,12 @@ set(includes "platform_port/include")
# because of the "include_next" directive used by the efuse_hal.h # because of the "include_next" directive used by the efuse_hal.h
if(NOT ${target} STREQUAL "linux") if(NOT ${target} STREQUAL "linux")
list(APPEND includes "${target}/include") list(APPEND includes "${target}/include")
if(CONFIG_IDF_TARGET_ESP32C5_BETA3_VERSION)
list(APPEND includes "${target}/beta3/include")
elseif(CONFIG_IDF_TARGET_ESP32C5_MP_VERSION)
list(APPEND includes "${target}/mp/include")
endif()
endif() endif()
list(APPEND includes "include") list(APPEND includes "include")

View File

@ -0,0 +1,630 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stddef.h> /* Required for NULL constant */
#include <stdint.h>
#include <stdbool.h>
#include "soc/soc_caps.h"
#include "hal/gdma_types.h"
#include "soc/gdma_struct.h"
#include "soc/gdma_reg.h"
#include "soc/soc_etm_source.h"
#include "soc/pcr_struct.h"
#include "soc/retention_periph_defs.h"
#ifdef __cplusplus
extern "C" {
#endif
#define GDMA_CH_RETENTION_GET_MODULE_ID(group_id, pair_id) (SLEEP_RETENTION_MODULE_GDMA_CH0 << (SOC_GDMA_PAIRS_PER_GROUP_MAX * group_id) << pair_id)
#define GDMA_LL_GET_HW(id) (((id) == 0) ? (&GDMA) : NULL)
#define GDMA_LL_CHANNEL_MAX_PRIORITY 5 // supported priority levels: [0,5]
#define GDMA_LL_RX_EVENT_MASK (0x7F)
#define GDMA_LL_TX_EVENT_MASK (0x3F)
// any "dummy" peripheral ID can be used for M2M mode
#define GDMA_LL_M2M_FREE_PERIPH_ID_MASK (0xFC32)
#define GDMA_LL_INVALID_PERIPH_ID (0x3F)
#define GDMA_LL_EVENT_TX_FIFO_UDF (1<<5)
#define GDMA_LL_EVENT_TX_FIFO_OVF (1<<4)
#define GDMA_LL_EVENT_RX_FIFO_UDF (1<<6)
#define GDMA_LL_EVENT_RX_FIFO_OVF (1<<5)
#define GDMA_LL_EVENT_TX_TOTAL_EOF (1<<3)
#define GDMA_LL_EVENT_RX_DESC_EMPTY (1<<4)
#define GDMA_LL_EVENT_TX_DESC_ERROR (1<<2)
#define GDMA_LL_EVENT_RX_DESC_ERROR (1<<3)
#define GDMA_LL_EVENT_TX_EOF (1<<1)
#define GDMA_LL_EVENT_TX_DONE (1<<0)
#define GDMA_LL_EVENT_RX_ERR_EOF (1<<2)
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_AHB_GROUP_START_ID 0 // AHB GDMA group ID starts from 0
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH0, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH1, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH2, \
}}}[group][chan][event]
#define GDMA_LL_RX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_IN_SUC_EOF_CH0, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_IN_SUC_EOF_CH1, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_IN_SUC_EOF_CH2, \
}}}[group][chan][event]
#define GDMA_LL_TX_ETM_TASK_TABLE(group, chan, task) \
(uint32_t[1][3][GDMA_ETM_TASK_MAX]){{{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_OUT_START_CH0, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_OUT_START_CH1, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_OUT_START_CH2, \
}}}[group][chan][task]
#define GDMA_LL_RX_ETM_TASK_TABLE(group, chan, task) \
(uint32_t[1][3][GDMA_ETM_TASK_MAX]){{{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_IN_START_CH0, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_IN_START_CH1, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_IN_START_CH2, \
}}}[group][chan][task]
// Workaround for C5-beta3 only, it can not vectorized channels into an array in gdma_struct.h
#define GDMA_LL_CHANNEL_GET_REG_ADDR(dev, ch) ((volatile gdma_chn_reg_t*[]){&dev->channel0, &dev->channel1, &dev->channel2}[(ch)])
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable the bus clock for the DMA module
*/
static inline void gdma_ll_enable_bus_clock(int group_id, bool enable)
{
(void)group_id;
PCR.gdma_conf.gdma_clk_en = enable;
}
/**
* @brief Reset the DMA module
*/
static inline void gdma_ll_reset_register(int group_id)
{
(void)group_id;
PCR.gdma_conf.gdma_rst_en = 1;
PCR.gdma_conf.gdma_rst_en = 0;
}
/**
* @brief Force enable register clock
*/
static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}
///////////////////////////////////// RX /////////////////////////////////////////
/**
* @brief Get DMA RX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel, bool raw)
{
if (raw) {
return dev->in_intr[channel].raw.val;
} else {
return dev->in_intr[channel].st.val;
}
}
/**
* @brief Enable DMA RX channel interrupt
*/
static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->in_intr[channel].ena.val |= mask;
} else {
dev->in_intr[channel].ena.val &= ~mask;
}
}
/**
* @brief Clear DMA RX channel interrupt
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->in_intr[channel].clr.val = mask;
}
/**
* @brief Get DMA RX channel interrupt status register address
*/
static inline volatile void *gdma_ll_rx_get_interrupt_status_reg(gdma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->in_intr[channel].st);
}
/**
* @brief Enable DMA RX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void gdma_ll_rx_enable_owner_check(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf1.in_check_owner = enable;
}
/**
* @brief Enable DMA RX channel burst reading data, disabled by default
*/
static inline void gdma_ll_rx_enable_data_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf0.in_data_burst_en = enable;
}
/**
* @brief Enable DMA RX channel burst reading descriptor link, disabled by default
*/
static inline void gdma_ll_rx_enable_descriptor_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf0.indscr_burst_en = enable;
}
/**
* @brief Reset DMA RX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_reset_channel(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf0.in_rst = 1;
ch->in.in_conf0.in_rst = 0;
}
/**
* @brief Check if DMA RX FIFO is full
* @param fifo_level only supports level 1
*/
static inline bool gdma_ll_rx_is_fifo_full(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.infifo_status.val & 0x01;
}
/**
* @brief Check if DMA RX FIFO is empty
* @param fifo_level only supports level 1
*/
static inline bool gdma_ll_rx_is_fifo_empty(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.infifo_status.val & 0x02;
}
/**
* @brief Get number of bytes in RX FIFO
* @param fifo_level only supports level 1
*/
static inline uint32_t gdma_ll_rx_get_fifo_bytes(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.infifo_status.infifo_cnt;
}
/**
* @brief Pop data from DMA RX FIFO
*/
static inline uint32_t gdma_ll_rx_pop_data(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_pop.infifo_pop = 1;
return ch->in.in_pop.infifo_rdata;
}
/**
* @brief Set the descriptor link base address for RX channel
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_set_desc_addr(gdma_dev_t *dev, uint32_t channel, uint32_t addr)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_addr = addr;
}
/**
* @brief Start dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_start(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_start = 1;
}
/**
* @brief Stop dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_stop(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_stop = 1;
}
/**
* @brief Restart a new inlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_restart(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_restart = 1;
}
/**
* @brief Enable DMA RX to return the address of current descriptor when receives error
*/
static inline void gdma_ll_rx_enable_auto_return(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_auto_ret = enable;
}
/**
* @brief Check if DMA RX descriptor FSM is in IDLE state
*/
static inline bool gdma_ll_rx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.in_link.inlink_park;
}
/**
* @brief Get RX success EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_success_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.in_suc_eof_des_addr.val;
}
/**
* @brief Get RX error EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_error_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.in_err_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched RX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_prefetched_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.in_dscr.val;
}
/**
* @brief Set priority for DMA RX channel
*/
static inline void gdma_ll_rx_set_priority(gdma_dev_t *dev, uint32_t channel, uint32_t prio)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_pri.rx_pri = prio;
}
/**
* @brief Connect DMA RX channel to a given peripheral
*/
static inline void gdma_ll_rx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_peri_sel.peri_in_sel = periph_id;
ch->in.in_conf0.mem_trans_en = (periph == GDMA_TRIG_PERIPH_M2M);
}
/**
* @brief Disconnect DMA RX channel from peripheral
*/
static inline void gdma_ll_rx_disconnect_from_periph(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_peri_sel.peri_in_sel = GDMA_LL_INVALID_PERIPH_ID;
ch->in.in_conf0.mem_trans_en = false;
}
/**
* @brief Whether to enable the ETM subsystem for RX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void gdma_ll_rx_enable_etm_task(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf0.in_etm_en = enable;
}
///////////////////////////////////// TX /////////////////////////////////////////
/**
* @brief Get DMA TX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel, bool raw)
{
if (raw) {
return dev->out_intr[channel].raw.val;
} else {
return dev->out_intr[channel].st.val;
}
}
/**
* @brief Enable DMA TX channel interrupt
*/
static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->out_intr[channel].ena.val |= mask;
} else {
dev->out_intr[channel].ena.val &= ~mask;
}
}
/**
* @brief Clear DMA TX channel interrupt
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->out_intr[channel].clr.val = mask;
}
/**
* @brief Get DMA TX channel interrupt status register address
*/
static inline volatile void *gdma_ll_tx_get_interrupt_status_reg(gdma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->out_intr[channel].st);
}
/**
* @brief Enable DMA TX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void gdma_ll_tx_enable_owner_check(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf1.out_check_owner = enable;
}
/**
* @brief Enable DMA TX channel burst sending data, disabled by default
*/
static inline void gdma_ll_tx_enable_data_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_data_burst_en = enable;
}
/**
* @brief Enable DMA TX channel burst reading descriptor link, disabled by default
*/
static inline void gdma_ll_tx_enable_descriptor_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.outdscr_burst_en = enable;
}
/**
* @brief Set TX channel EOF mode
*/
static inline void gdma_ll_tx_set_eof_mode(gdma_dev_t *dev, uint32_t channel, uint32_t mode)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_eof_mode = mode;
}
/**
* @brief Enable DMA TX channel automatic write results back to descriptor after all data has been sent out, disabled by default
*/
static inline void gdma_ll_tx_enable_auto_write_back(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_auto_wrback = enable;
}
/**
* @brief Reset DMA TX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_reset_channel(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_rst = 1;
ch->out.out_conf0.out_rst = 0;
}
/**
* @brief Check if DMA TX FIFO is full
* @param fifo_level only supports level 1
*/
static inline bool gdma_ll_tx_is_fifo_full(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.outfifo_status.val & 0x01;
}
/**
* @brief Check if DMA TX FIFO is empty
* @param fifo_level only supports level 1
*/
static inline bool gdma_ll_tx_is_fifo_empty(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.outfifo_status.val & 0x02;
}
/**
* @brief Get number of bytes in TX FIFO
* @param fifo_level only supports level 1
*/
static inline uint32_t gdma_ll_tx_get_fifo_bytes(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.outfifo_status.outfifo_cnt;
}
/**
* @brief Push data into DMA TX FIFO
*/
static inline void gdma_ll_tx_push_data(gdma_dev_t *dev, uint32_t channel, uint32_t data)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_push.outfifo_wdata = data;
ch->out.out_push.outfifo_push = 1;
}
/**
* @brief Set the descriptor link base address for TX channel
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_set_desc_addr(gdma_dev_t *dev, uint32_t channel, uint32_t addr)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_link.outlink_addr = addr;
}
/**
* @brief Start dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_start(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_link.outlink_start = 1;
}
/**
* @brief Stop dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_stop(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_link.outlink_stop = 1;
}
/**
* @brief Restart a new outlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_restart(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_link.outlink_restart = 1;
}
/**
* @brief Check if DMA TX descriptor FSM is in IDLE state
*/
static inline bool gdma_ll_tx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.out_link.outlink_park;
}
/**
* @brief Get TX EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_tx_get_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.out_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched TX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_tx_get_prefetched_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.out_dscr.val;
}
/**
* @brief Set priority for DMA TX channel
*/
static inline void gdma_ll_tx_set_priority(gdma_dev_t *dev, uint32_t channel, uint32_t prio)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_pri.tx_pri = prio;
}
/**
* @brief Connect DMA TX channel to a given peripheral
*/
static inline void gdma_ll_tx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
(void)periph;
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_peri_sel.peri_out_sel = periph_id;
}
/**
* @brief Disconnect DMA TX channel from peripheral
*/
static inline void gdma_ll_tx_disconnect_from_periph(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_peri_sel.peri_out_sel = GDMA_LL_INVALID_PERIPH_ID;
}
/**
* @brief Whether to enable the ETM subsystem for TX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void gdma_ll_tx_enable_etm_task(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_etm_en = enable;
}
#ifdef __cplusplus
}
#endif

View File

@ -5,629 +5,9 @@
*/ */
#pragma once #pragma once
#include <stddef.h> /* Required for NULL constant */ #include "sdkconfig.h"
#include <stdint.h> #if CONFIG_IDF_TARGET_ESP32C5_BETA3_VERSION
#include <stdbool.h> #include "hal/gdma_beta3_ll.h"
#include "soc/soc_caps.h" #else
#if SOC_GDMA_SUPPORTED // TODO: [ESP32C5] IDF-8710 #include "hal/ahb_dma_ll.h"
#include "hal/gdma_types.h"
#include "soc/gdma_struct.h"
#include "soc/gdma_reg.h"
#include "soc/soc_etm_source.h"
#include "soc/pcr_struct.h"
#include "soc/retention_periph_defs.h"
#ifdef __cplusplus
extern "C" {
#endif #endif
#define GDMA_CH_RETENTION_GET_MODULE_ID(group_id, pair_id) (SLEEP_RETENTION_MODULE_GDMA_CH0 << (SOC_GDMA_PAIRS_PER_GROUP_MAX * group_id) << pair_id)
#define GDMA_LL_GET_HW(id) (((id) == 0) ? (&GDMA) : NULL)
#define GDMA_LL_CHANNEL_MAX_PRIORITY 5 // supported priority levels: [0,5]
#define GDMA_LL_RX_EVENT_MASK (0x7F)
#define GDMA_LL_TX_EVENT_MASK (0x3F)
// any "dummy" peripheral ID can be used for M2M mode
#define GDMA_LL_M2M_FREE_PERIPH_ID_MASK (0xFC32)
#define GDMA_LL_INVALID_PERIPH_ID (0x3F)
#define GDMA_LL_EVENT_TX_FIFO_UDF (1<<5)
#define GDMA_LL_EVENT_TX_FIFO_OVF (1<<4)
#define GDMA_LL_EVENT_RX_FIFO_UDF (1<<6)
#define GDMA_LL_EVENT_RX_FIFO_OVF (1<<5)
#define GDMA_LL_EVENT_TX_TOTAL_EOF (1<<3)
#define GDMA_LL_EVENT_RX_DESC_EMPTY (1<<4)
#define GDMA_LL_EVENT_TX_DESC_ERROR (1<<2)
#define GDMA_LL_EVENT_RX_DESC_ERROR (1<<3)
#define GDMA_LL_EVENT_TX_EOF (1<<1)
#define GDMA_LL_EVENT_TX_DONE (1<<0)
#define GDMA_LL_EVENT_RX_ERR_EOF (1<<2)
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_AHB_GROUP_START_ID 0 // AHB GDMA group ID starts from 0
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH0, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH1, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH2, \
}}}[group][chan][event]
#define GDMA_LL_RX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_IN_SUC_EOF_CH0, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_IN_SUC_EOF_CH1, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_IN_SUC_EOF_CH2, \
}}}[group][chan][event]
#define GDMA_LL_TX_ETM_TASK_TABLE(group, chan, task) \
(uint32_t[1][3][GDMA_ETM_TASK_MAX]){{{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_OUT_START_CH0, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_OUT_START_CH1, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_OUT_START_CH2, \
}}}[group][chan][task]
#define GDMA_LL_RX_ETM_TASK_TABLE(group, chan, task) \
(uint32_t[1][3][GDMA_ETM_TASK_MAX]){{{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_IN_START_CH0, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_IN_START_CH1, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_IN_START_CH2, \
}}}[group][chan][task]
// TODO: Workaround for C5-beta3 only. C5-mp can still vectorized channels into an array in gdma_struct.h
#define GDMA_LL_CHANNEL_GET_REG_ADDR(dev, ch) ((volatile gdma_chn_reg_t*[]){&dev->channel0, &dev->channel1, &dev->channel2}[(ch)])
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable the bus clock for the DMA module
*/
static inline void gdma_ll_enable_bus_clock(int group_id, bool enable)
{
(void)group_id;
PCR.gdma_conf.gdma_clk_en = enable;
}
/**
* @brief Reset the DMA module
*/
static inline void gdma_ll_reset_register(int group_id)
{
(void)group_id;
PCR.gdma_conf.gdma_rst_en = 1;
PCR.gdma_conf.gdma_rst_en = 0;
}
/**
* @brief Force enable register clock
*/
static inline void gdma_ll_force_enable_reg_clock(gdma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}
///////////////////////////////////// RX /////////////////////////////////////////
/**
* @brief Get DMA RX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel, bool raw)
{
if (raw) {
return dev->in_intr[channel].raw.val;
} else {
return dev->in_intr[channel].st.val;
}
}
/**
* @brief Enable DMA RX channel interrupt
*/
static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->in_intr[channel].ena.val |= mask;
} else {
dev->in_intr[channel].ena.val &= ~mask;
}
}
/**
* @brief Clear DMA RX channel interrupt
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->in_intr[channel].clr.val = mask;
}
/**
* @brief Get DMA RX channel interrupt status register address
*/
static inline volatile void *gdma_ll_rx_get_interrupt_status_reg(gdma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->in_intr[channel].st);
}
/**
* @brief Enable DMA RX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void gdma_ll_rx_enable_owner_check(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf1.in_check_owner = enable;
}
/**
* @brief Enable DMA RX channel burst reading data, disabled by default
*/
static inline void gdma_ll_rx_enable_data_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf0.in_data_burst_en = enable;
}
/**
* @brief Enable DMA RX channel burst reading descriptor link, disabled by default
*/
static inline void gdma_ll_rx_enable_descriptor_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf0.indscr_burst_en = enable;
}
/**
* @brief Reset DMA RX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_reset_channel(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf0.in_rst = 1;
ch->in.in_conf0.in_rst = 0;
}
/**
* @brief Check if DMA RX FIFO is full
* @param fifo_level only supports level 1
*/
static inline bool gdma_ll_rx_is_fifo_full(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.infifo_status.val & 0x01;
}
/**
* @brief Check if DMA RX FIFO is empty
* @param fifo_level only supports level 1
*/
static inline bool gdma_ll_rx_is_fifo_empty(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.infifo_status.val & 0x02;
}
/**
* @brief Get number of bytes in RX FIFO
* @param fifo_level only supports level 1
*/
static inline uint32_t gdma_ll_rx_get_fifo_bytes(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.infifo_status.infifo_cnt;
}
/**
* @brief Pop data from DMA RX FIFO
*/
static inline uint32_t gdma_ll_rx_pop_data(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_pop.infifo_pop = 1;
return ch->in.in_pop.infifo_rdata;
}
/**
* @brief Set the descriptor link base address for RX channel
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_set_desc_addr(gdma_dev_t *dev, uint32_t channel, uint32_t addr)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_addr = addr;
}
/**
* @brief Start dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_start(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_start = 1;
}
/**
* @brief Stop dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_stop(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_stop = 1;
}
/**
* @brief Restart a new inlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void gdma_ll_rx_restart(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_restart = 1;
}
/**
* @brief Enable DMA RX to return the address of current descriptor when receives error
*/
static inline void gdma_ll_rx_enable_auto_return(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_link.inlink_auto_ret = enable;
}
/**
* @brief Check if DMA RX descriptor FSM is in IDLE state
*/
static inline bool gdma_ll_rx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.in_link.inlink_park;
}
/**
* @brief Get RX success EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_success_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.in_suc_eof_des_addr.val;
}
/**
* @brief Get RX error EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_error_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.in_err_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched RX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_rx_get_prefetched_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->in.in_dscr.val;
}
/**
* @brief Set priority for DMA RX channel
*/
static inline void gdma_ll_rx_set_priority(gdma_dev_t *dev, uint32_t channel, uint32_t prio)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_pri.rx_pri = prio;
}
/**
* @brief Connect DMA RX channel to a given peripheral
*/
static inline void gdma_ll_rx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_peri_sel.peri_in_sel = periph_id;
ch->in.in_conf0.mem_trans_en = (periph == GDMA_TRIG_PERIPH_M2M);
}
/**
* @brief Disconnect DMA RX channel from peripheral
*/
static inline void gdma_ll_rx_disconnect_from_periph(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_peri_sel.peri_in_sel = GDMA_LL_INVALID_PERIPH_ID;
ch->in.in_conf0.mem_trans_en = false;
}
/**
* @brief Whether to enable the ETM subsystem for RX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void gdma_ll_rx_enable_etm_task(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->in.in_conf0.in_etm_en = enable;
}
///////////////////////////////////// TX /////////////////////////////////////////
/**
* @brief Get DMA TX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel, bool raw)
{
if (raw) {
return dev->out_intr[channel].raw.val;
} else {
return dev->out_intr[channel].st.val;
}
}
/**
* @brief Enable DMA TX channel interrupt
*/
static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->out_intr[channel].ena.val |= mask;
} else {
dev->out_intr[channel].ena.val &= ~mask;
}
}
/**
* @brief Clear DMA TX channel interrupt
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->out_intr[channel].clr.val = mask;
}
/**
* @brief Get DMA TX channel interrupt status register address
*/
static inline volatile void *gdma_ll_tx_get_interrupt_status_reg(gdma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->out_intr[channel].st);
}
/**
* @brief Enable DMA TX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void gdma_ll_tx_enable_owner_check(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf1.out_check_owner = enable;
}
/**
* @brief Enable DMA TX channel burst sending data, disabled by default
*/
static inline void gdma_ll_tx_enable_data_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_data_burst_en = enable;
}
/**
* @brief Enable DMA TX channel burst reading descriptor link, disabled by default
*/
static inline void gdma_ll_tx_enable_descriptor_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.outdscr_burst_en = enable;
}
/**
* @brief Set TX channel EOF mode
*/
static inline void gdma_ll_tx_set_eof_mode(gdma_dev_t *dev, uint32_t channel, uint32_t mode)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_eof_mode = mode;
}
/**
* @brief Enable DMA TX channel automatic write results back to descriptor after all data has been sent out, disabled by default
*/
static inline void gdma_ll_tx_enable_auto_write_back(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_auto_wrback = enable;
}
/**
* @brief Reset DMA TX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_reset_channel(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_rst = 1;
ch->out.out_conf0.out_rst = 0;
}
/**
* @brief Check if DMA TX FIFO is full
* @param fifo_level only supports level 1
*/
static inline bool gdma_ll_tx_is_fifo_full(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.outfifo_status.val & 0x01;
}
/**
* @brief Check if DMA TX FIFO is empty
* @param fifo_level only supports level 1
*/
static inline bool gdma_ll_tx_is_fifo_empty(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.outfifo_status.val & 0x02;
}
/**
* @brief Get number of bytes in TX FIFO
* @param fifo_level only supports level 1
*/
static inline uint32_t gdma_ll_tx_get_fifo_bytes(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.outfifo_status.outfifo_cnt;
}
/**
* @brief Push data into DMA TX FIFO
*/
static inline void gdma_ll_tx_push_data(gdma_dev_t *dev, uint32_t channel, uint32_t data)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_push.outfifo_wdata = data;
ch->out.out_push.outfifo_push = 1;
}
/**
* @brief Set the descriptor link base address for TX channel
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_set_desc_addr(gdma_dev_t *dev, uint32_t channel, uint32_t addr)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_link.outlink_addr = addr;
}
/**
* @brief Start dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_start(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_link.outlink_start = 1;
}
/**
* @brief Stop dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_stop(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_link.outlink_stop = 1;
}
/**
* @brief Restart a new outlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void gdma_ll_tx_restart(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_link.outlink_restart = 1;
}
/**
* @brief Check if DMA TX descriptor FSM is in IDLE state
*/
static inline bool gdma_ll_tx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.out_link.outlink_park;
}
/**
* @brief Get TX EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_tx_get_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.out_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched TX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t gdma_ll_tx_get_prefetched_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
return ch->out.out_dscr.val;
}
/**
* @brief Set priority for DMA TX channel
*/
static inline void gdma_ll_tx_set_priority(gdma_dev_t *dev, uint32_t channel, uint32_t prio)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_pri.tx_pri = prio;
}
/**
* @brief Connect DMA TX channel to a given peripheral
*/
static inline void gdma_ll_tx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
(void)periph;
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_peri_sel.peri_out_sel = periph_id;
}
/**
* @brief Disconnect DMA TX channel from peripheral
*/
static inline void gdma_ll_tx_disconnect_from_periph(gdma_dev_t *dev, uint32_t channel)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_peri_sel.peri_out_sel = GDMA_LL_INVALID_PERIPH_ID;
}
/**
* @brief Whether to enable the ETM subsystem for TX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void gdma_ll_tx_enable_etm_task(gdma_dev_t *dev, uint32_t channel, bool enable)
{
volatile gdma_chn_reg_t *ch = (volatile gdma_chn_reg_t *)GDMA_LL_CHANNEL_GET_REG_ADDR(dev, channel);
ch->out.out_conf0.out_etm_en = enable;
}
#ifdef __cplusplus
}
#endif
#endif // SOC_GDMA_SUPPORTED

View File

@ -0,0 +1,619 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stddef.h> /* Required for NULL constant */
#include <stdint.h>
#include <stdbool.h>
#include "soc/soc_caps.h"
#include "hal/gdma_types.h"
#include "soc/ahb_dma_struct.h"
#include "soc/ahb_dma_reg.h"
#include "soc/soc_etm_source.h"
#include "soc/pcr_struct.h"
#include "soc/retention_periph_defs.h"
#ifdef __cplusplus
extern "C" {
#endif
#define GDMA_CH_RETENTION_GET_MODULE_ID(group_id, pair_id) (SLEEP_RETENTION_MODULE_GDMA_CH0 << (SOC_GDMA_PAIRS_PER_GROUP_MAX * group_id) << pair_id)
#define AHB_DMA_LL_GET_HW(id) (((id) == 0) ? (&AHB_DMA) : NULL)
#define GDMA_LL_CHANNEL_MAX_PRIORITY 5 // supported priority levels: [0,5]
#define GDMA_LL_RX_EVENT_MASK (0x7F)
#define GDMA_LL_TX_EVENT_MASK (0x3F)
// any "dummy" peripheral ID can be used for M2M mode
#define AHB_DMA_LL_M2M_FREE_PERIPH_ID_MASK (0xFC31)
#define AHB_DMA_LL_INVALID_PERIPH_ID (0x3F)
#define GDMA_LL_EVENT_TX_FIFO_UDF (1<<5)
#define GDMA_LL_EVENT_TX_FIFO_OVF (1<<4)
#define GDMA_LL_EVENT_RX_FIFO_UDF (1<<6)
#define GDMA_LL_EVENT_RX_FIFO_OVF (1<<5)
#define GDMA_LL_EVENT_TX_TOTAL_EOF (1<<3)
#define GDMA_LL_EVENT_RX_DESC_EMPTY (1<<4)
#define GDMA_LL_EVENT_TX_DESC_ERROR (1<<2)
#define GDMA_LL_EVENT_RX_DESC_ERROR (1<<3)
#define GDMA_LL_EVENT_TX_EOF (1<<1)
#define GDMA_LL_EVENT_TX_DONE (1<<0)
#define GDMA_LL_EVENT_RX_ERR_EOF (1<<2)
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_AHB_GROUP_START_ID 0 // AHB GDMA group ID starts from 0
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH0, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH1, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_OUT_EOF_CH2, \
}}}[group][chan][event]
#define GDMA_LL_RX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_IN_SUC_EOF_CH0, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_IN_SUC_EOF_CH1, \
}, \
{ \
[GDMA_ETM_EVENT_EOF] = GDMA_EVT_IN_SUC_EOF_CH2, \
}}}[group][chan][event]
#define GDMA_LL_TX_ETM_TASK_TABLE(group, chan, task) \
(uint32_t[1][3][GDMA_ETM_TASK_MAX]){{{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_OUT_START_CH0, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_OUT_START_CH1, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_OUT_START_CH2, \
}}}[group][chan][task]
#define GDMA_LL_RX_ETM_TASK_TABLE(group, chan, task) \
(uint32_t[1][3][GDMA_ETM_TASK_MAX]){{{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_IN_START_CH0, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_IN_START_CH1, \
}, \
{ \
[GDMA_ETM_TASK_START] = GDMA_TASK_IN_START_CH2, \
}}}[group][chan][task]
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable the bus clock for the DMA module
*/
static inline void gdma_ll_enable_bus_clock(int group_id, bool enable)
{
(void)group_id;
PCR.gdma_conf.gdma_clk_en = enable;
}
/**
* @brief Reset the DMA module
*/
static inline void gdma_ll_reset_register(int group_id)
{
(void)group_id;
PCR.gdma_conf.gdma_rst_en = 1;
PCR.gdma_conf.gdma_rst_en = 0;
}
/**
* @brief Force enable register clock
*/
static inline void ahb_dma_ll_force_enable_reg_clock(ahb_dma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}
/**
* @brief Disable priority arbitration
*
* @param dev DMA register base address
* @param dis True to disable, false to enable
*/
static inline void ahb_dma_ll_disable_prio_arb(ahb_dma_dev_t *dev, bool dis)
{
dev->misc_conf.arb_pri_dis = dis;
}
/**
* @brief Reset DMA FSM
*
* @param dev DMA register base address
*/
static inline void ahb_dma_ll_reset_fsm(ahb_dma_dev_t *dev)
{
dev->misc_conf.ahbm_rst_inter = 1;
dev->misc_conf.ahbm_rst_inter = 0;
}
/**
* @brief Preset valid memory range for AHB-DMA
*
* @param dev DMA register base address
*/
static inline void ahb_dma_ll_set_default_memory_range(ahb_dma_dev_t *dev)
{
// AHB-DMA can access L2MEM, L2ROM, MSPI Flash, MSPI PSRAM
dev->intr_mem_start_addr.val = 0x40800000;
dev->intr_mem_end_addr.val = 0x44000000;
}
///////////////////////////////////// RX /////////////////////////////////////////
/**
* @brief Get DMA RX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_rx_get_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel, bool raw)
{
if (raw) {
return dev->in_intr[channel].raw.val;
} else {
return dev->in_intr[channel].st.val;
}
}
/**
* @brief Enable DMA RX channel interrupt
*/
static inline void ahb_dma_ll_rx_enable_interrupt(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->in_intr[channel].ena.val |= mask;
} else {
dev->in_intr[channel].ena.val &= ~mask;
}
}
/**
* @brief Clear DMA RX channel interrupt
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_clear_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->in_intr[channel].clr.val = mask;
}
/**
* @brief Get DMA RX channel interrupt status register address
*/
static inline volatile void *ahb_dma_ll_rx_get_interrupt_status_reg(ahb_dma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->in_intr[channel].st);
}
/**
* @brief Enable DMA RX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void ahb_dma_ll_rx_enable_owner_check(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].in.in_conf1.in_check_owner_chn = enable;
}
/**
* @brief Enable DMA RX channel burst reading data, disabled by default
*/
static inline void ahb_dma_ll_rx_enable_data_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
// dev->channel[channel].in.in_conf0.in_data_burst_mode_sel_chn = enable; // single/incr4/incr8/incr16
}
/**
* @brief Enable DMA RX channel burst reading descriptor link, disabled by default
*/
static inline void ahb_dma_ll_rx_enable_descriptor_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].in.in_conf0.indscr_burst_en_chn = enable;
}
/**
* @brief Reset DMA RX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_reset_channel(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_conf0.in_rst_chn = 1;
dev->channel[channel].in.in_conf0.in_rst_chn = 0;
}
/**
* @brief Check if DMA RX FIFO is full
* @param fifo_level only supports level 1
*/
static inline bool ahb_dma_ll_rx_is_fifo_full(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].in.infifo_status.val & 0x01;
}
/**
* @brief Check if DMA RX FIFO is empty
* @param fifo_level only supports level 1
*/
static inline bool ahb_dma_ll_rx_is_fifo_empty(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].in.infifo_status.val & 0x02;
}
/**
* @brief Get number of bytes in RX FIFO
* @param fifo_level only supports level 1
*/
static inline uint32_t ahb_dma_ll_rx_get_fifo_bytes(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].in.infifo_status.infifo_cnt_chn;
}
/**
* @brief Pop data from DMA RX FIFO
*/
static inline uint32_t ahb_dma_ll_rx_pop_data(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_pop.infifo_pop_chn = 1;
return dev->channel[channel].in.in_pop.infifo_rdata_chn;
}
/**
* @brief Set the descriptor link base address for RX channel
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_set_desc_addr(ahb_dma_dev_t *dev, uint32_t channel, uint32_t addr)
{
dev->in_link_addr[channel].inlink_addr_chn = addr;
}
/**
* @brief Start dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_start(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_link.inlink_start_chn = 1;
}
/**
* @brief Stop dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_stop(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_link.inlink_stop_chn = 1;
}
/**
* @brief Restart a new inlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_restart(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_link.inlink_restart_chn = 1;
}
/**
* @brief Enable DMA RX to return the address of current descriptor when receives error
*/
static inline void ahb_dma_ll_rx_enable_auto_return(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].in.in_link.inlink_auto_ret_chn = enable;
}
/**
* @brief Check if DMA RX descriptor FSM is in IDLE state
*/
static inline bool ahb_dma_ll_rx_is_desc_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].in.in_link.inlink_park_chn;
}
/**
* @brief Get RX success EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_rx_get_success_eof_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].in.in_suc_eof_des_addr.val;
}
/**
* @brief Get RX error EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_rx_get_error_eof_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].in.in_err_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched RX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_rx_get_prefetched_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].in.in_dscr.val;
}
/**
* @brief Set priority for DMA RX channel
*/
static inline void ahb_dma_ll_rx_set_priority(ahb_dma_dev_t *dev, uint32_t channel, uint32_t prio)
{
dev->channel[channel].in.in_pri.rx_pri_chn = prio;
}
/**
* @brief Connect DMA RX channel to a given peripheral
*/
static inline void ahb_dma_ll_rx_connect_to_periph(ahb_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
dev->channel[channel].in.in_peri_sel.peri_in_sel_chn = periph_id;
dev->channel[channel].in.in_conf0.mem_trans_en_chn = (periph == GDMA_TRIG_PERIPH_M2M);
}
/**
* @brief Disconnect DMA RX channel from peripheral
*/
static inline void ahb_dma_ll_rx_disconnect_from_periph(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_peri_sel.peri_in_sel_chn = AHB_DMA_LL_INVALID_PERIPH_ID;
dev->channel[channel].in.in_conf0.mem_trans_en_chn = false;
}
/**
* @brief Whether to enable the ETM subsystem for RX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void ahb_dma_ll_rx_enable_etm_task(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].in.in_conf0.in_etm_en_chn = enable;
}
///////////////////////////////////// TX /////////////////////////////////////////
/**
* @brief Get DMA TX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_tx_get_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel, bool raw)
{
if (raw) {
return dev->out_intr[channel].raw.val;
} else {
return dev->out_intr[channel].st.val;
}
}
/**
* @brief Enable DMA TX channel interrupt
*/
static inline void ahb_dma_ll_tx_enable_interrupt(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->out_intr[channel].ena.val |= mask;
} else {
dev->out_intr[channel].ena.val &= ~mask;
}
}
/**
* @brief Clear DMA TX channel interrupt
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_clear_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->out_intr[channel].clr.val = mask;
}
/**
* @brief Get DMA TX channel interrupt status register address
*/
static inline volatile void *ahb_dma_ll_tx_get_interrupt_status_reg(ahb_dma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->out_intr[channel].st);
}
/**
* @brief Enable DMA TX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void ahb_dma_ll_tx_enable_owner_check(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].out.out_conf1.out_check_owner_chn = enable;
}
/**
* @brief Enable DMA TX channel burst sending data, disabled by default
*/
static inline void ahb_dma_ll_tx_enable_data_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
// dev->channel[channel].out.out_conf0.out_data_burst_mode_sel_chn = enable;
}
/**
* @brief Enable DMA TX channel burst reading descriptor link, disabled by default
*/
static inline void ahb_dma_ll_tx_enable_descriptor_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].out.out_conf0.outdscr_burst_en_chn = enable;
}
/**
* @brief Set TX channel EOF mode
*/
static inline void ahb_dma_ll_tx_set_eof_mode(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mode)
{
dev->channel[channel].out.out_conf0.out_eof_mode_chn = mode;
}
/**
* @brief Enable DMA TX channel automatic write results back to descriptor after all data has been sent out, disabled by default
*/
static inline void ahb_dma_ll_tx_enable_auto_write_back(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].out.out_conf0.out_auto_wrback_chn = enable;
}
/**
* @brief Reset DMA TX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_reset_channel(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_conf0.out_rst_chn = 1;
dev->channel[channel].out.out_conf0.out_rst_chn = 0;
}
/**
* @brief Check if DMA TX FIFO is full
* @param fifo_level only supports level 1
*/
static inline bool ahb_dma_ll_tx_is_fifo_full(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].out.outfifo_status.val & 0x01;
}
/**
* @brief Check if DMA TX FIFO is empty
* @param fifo_level only supports level 1
*/
static inline bool ahb_dma_ll_tx_is_fifo_empty(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].out.outfifo_status.val & 0x02;
}
/**
* @brief Get number of bytes in TX FIFO
* @param fifo_level only supports level 1
*/
static inline uint32_t ahb_dma_ll_tx_get_fifo_bytes(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].out.outfifo_status.outfifo_cnt_chn;
}
/**
* @brief Push data into DMA TX FIFO
*/
static inline void ahb_dma_ll_tx_push_data(ahb_dma_dev_t *dev, uint32_t channel, uint32_t data)
{
dev->channel[channel].out.out_push.outfifo_wdata_chn = data;
dev->channel[channel].out.out_push.outfifo_push_chn = 1;
}
/**
* @brief Set the descriptor link base address for TX channel
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_set_desc_addr(ahb_dma_dev_t *dev, uint32_t channel, uint32_t addr)
{
dev->out_link_addr[channel].outlink_addr_chn = addr;
}
/**
* @brief Start dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_start(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_link.outlink_start_chn = 1;
}
/**
* @brief Stop dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_stop(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_link.outlink_stop_chn = 1;
}
/**
* @brief Restart a new outlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_restart(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_link.outlink_restart_chn = 1;
}
/**
* @brief Check if DMA TX descriptor FSM is in IDLE state
*/
static inline bool ahb_dma_ll_tx_is_desc_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].out.out_link.outlink_park_chn;
}
/**
* @brief Get TX EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_tx_get_eof_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].out.out_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched TX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_tx_get_prefetched_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].out.out_dscr.val;
}
/**
* @brief Set priority for DMA TX channel
*/
static inline void ahb_dma_ll_tx_set_priority(ahb_dma_dev_t *dev, uint32_t channel, uint32_t prio)
{
dev->channel[channel].out.out_pri.tx_pri_chn = prio;
}
/**
* @brief Connect DMA TX channel to a given peripheral
*/
static inline void ahb_dma_ll_tx_connect_to_periph(ahb_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
(void)periph;
dev->channel[channel].out.out_peri_sel.peri_out_sel_chn = periph_id;
}
/**
* @brief Disconnect DMA TX channel from peripheral
*/
static inline void ahb_dma_ll_tx_disconnect_from_periph(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_peri_sel.peri_out_sel_chn = AHB_DMA_LL_INVALID_PERIPH_ID;
}
/**
* @brief Whether to enable the ETM subsystem for TX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void ahb_dma_ll_tx_enable_etm_task(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].out.out_conf0.out_etm_en_chn = enable;
}
#ifdef __cplusplus
}
#endif

View File

@ -530,8 +530,8 @@ static inline void ahb_dma_ll_tx_enable_etm_task(ahb_dma_dev_t *dev, uint32_t ch
*/ */
static inline void ahb_dma_ll_tx_crc_clear(ahb_dma_dev_t *dev, uint32_t channel) static inline void ahb_dma_ll_tx_crc_clear(ahb_dma_dev_t *dev, uint32_t channel)
{ {
dev->out_crc[channel].crc_clear.out_crc_clear_chn_reg = 1; dev->out_crc_arb[channel].crc_clear.out_crc_clear_chn_reg = 1;
dev->out_crc[channel].crc_clear.out_crc_clear_chn_reg = 0; dev->out_crc_arb[channel].crc_clear.out_crc_clear_chn_reg = 0;
} }
/** /**
@ -540,7 +540,7 @@ static inline void ahb_dma_ll_tx_crc_clear(ahb_dma_dev_t *dev, uint32_t channel)
static inline void ahb_dma_ll_tx_crc_set_width(ahb_dma_dev_t *dev, uint32_t channel, uint32_t width) static inline void ahb_dma_ll_tx_crc_set_width(ahb_dma_dev_t *dev, uint32_t channel, uint32_t width)
{ {
HAL_ASSERT(width <= 32); HAL_ASSERT(width <= 32);
dev->out_crc[channel].crc_width.tx_crc_width_chn = (width - 1) / 8; dev->out_crc_arb[channel].crc_width.tx_crc_width_chn = (width - 1) / 8;
} }
/** /**
@ -548,7 +548,7 @@ static inline void ahb_dma_ll_tx_crc_set_width(ahb_dma_dev_t *dev, uint32_t chan
*/ */
static inline void ahb_dma_ll_tx_crc_set_init_value(ahb_dma_dev_t *dev, uint32_t channel, uint32_t value) static inline void ahb_dma_ll_tx_crc_set_init_value(ahb_dma_dev_t *dev, uint32_t channel, uint32_t value)
{ {
dev->out_crc[channel].crc_init_data.out_crc_init_data_chn = value; dev->out_crc_arb[channel].crc_init_data.out_crc_init_data_chn = value;
} }
/** /**
@ -556,7 +556,7 @@ static inline void ahb_dma_ll_tx_crc_set_init_value(ahb_dma_dev_t *dev, uint32_t
*/ */
static inline uint32_t ahb_dma_ll_tx_crc_get_result(ahb_dma_dev_t *dev, uint32_t channel) static inline uint32_t ahb_dma_ll_tx_crc_get_result(ahb_dma_dev_t *dev, uint32_t channel)
{ {
return dev->out_crc[channel].crc_final_result.out_crc_final_result_chn; return dev->out_crc_arb[channel].crc_final_result.out_crc_final_result_chn;
} }
/** /**
@ -564,8 +564,8 @@ static inline uint32_t ahb_dma_ll_tx_crc_get_result(ahb_dma_dev_t *dev, uint32_t
*/ */
static inline void ahb_dma_ll_tx_crc_latch_config(ahb_dma_dev_t *dev, uint32_t channel) static inline void ahb_dma_ll_tx_crc_latch_config(ahb_dma_dev_t *dev, uint32_t channel)
{ {
dev->out_crc[channel].crc_width.tx_crc_latch_flag_chn = 1; dev->out_crc_arb[channel].crc_width.tx_crc_latch_flag_chn = 1;
dev->out_crc[channel].crc_width.tx_crc_latch_flag_chn = 0; dev->out_crc_arb[channel].crc_width.tx_crc_latch_flag_chn = 0;
} }
/** /**
@ -574,14 +574,14 @@ static inline void ahb_dma_ll_tx_crc_latch_config(ahb_dma_dev_t *dev, uint32_t c
static inline void ahb_dma_ll_tx_crc_set_lfsr_data_mask(ahb_dma_dev_t *dev, uint32_t channel, uint32_t crc_bit, static inline void ahb_dma_ll_tx_crc_set_lfsr_data_mask(ahb_dma_dev_t *dev, uint32_t channel, uint32_t crc_bit,
uint32_t lfsr_mask, uint32_t data_mask, bool reverse_data_mask) uint32_t lfsr_mask, uint32_t data_mask, bool reverse_data_mask)
{ {
dev->out_crc[channel].crc_en_addr.tx_crc_en_addr_chn = crc_bit; dev->out_crc_arb[channel].crc_en_addr.tx_crc_en_addr_chn = crc_bit;
dev->out_crc[channel].crc_en_wr_data.tx_crc_en_wr_data_chn = lfsr_mask; dev->out_crc_arb[channel].crc_en_wr_data.tx_crc_en_wr_data_chn = lfsr_mask;
dev->out_crc[channel].crc_data_en_addr.tx_crc_data_en_addr_chn = crc_bit; dev->out_crc_arb[channel].crc_data_en_addr.tx_crc_data_en_addr_chn = crc_bit;
if (reverse_data_mask) { if (reverse_data_mask) {
// "& 0xff" because the hardware only support 8-bit data // "& 0xff" because the hardware only support 8-bit data
data_mask = hal_utils_bitwise_reverse8(data_mask & 0xFF); data_mask = hal_utils_bitwise_reverse8(data_mask & 0xFF);
} }
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->out_crc[channel].crc_data_en_wr_data, tx_crc_data_en_wr_data_chn, data_mask); HAL_FORCE_MODIFY_U32_REG_FIELD(dev->out_crc_arb[channel].crc_data_en_wr_data, tx_crc_data_en_wr_data_chn, data_mask);
} }
///////////////////////////////////// CRC-RX ///////////////////////////////////////// ///////////////////////////////////// CRC-RX /////////////////////////////////////////
@ -591,8 +591,8 @@ static inline void ahb_dma_ll_tx_crc_set_lfsr_data_mask(ahb_dma_dev_t *dev, uint
*/ */
static inline void ahb_dma_ll_rx_crc_clear(ahb_dma_dev_t *dev, uint32_t channel) static inline void ahb_dma_ll_rx_crc_clear(ahb_dma_dev_t *dev, uint32_t channel)
{ {
dev->in_crc[channel].crc_clear.in_crc_clear_chn_reg = 1; dev->in_crc_arb[channel].crc_clear.in_crc_clear_chn_reg = 1;
dev->in_crc[channel].crc_clear.in_crc_clear_chn_reg = 0; dev->in_crc_arb[channel].crc_clear.in_crc_clear_chn_reg = 0;
} }
/** /**
@ -601,7 +601,7 @@ static inline void ahb_dma_ll_rx_crc_clear(ahb_dma_dev_t *dev, uint32_t channel)
static inline void ahb_dma_ll_rx_crc_set_width(ahb_dma_dev_t *dev, uint32_t channel, uint32_t width) static inline void ahb_dma_ll_rx_crc_set_width(ahb_dma_dev_t *dev, uint32_t channel, uint32_t width)
{ {
HAL_ASSERT(width <= 32); HAL_ASSERT(width <= 32);
dev->in_crc[channel].crc_width.rx_crc_width_chn = (width - 1) / 8; dev->in_crc_arb[channel].crc_width.rx_crc_width_chn = (width - 1) / 8;
} }
/** /**
@ -609,7 +609,7 @@ static inline void ahb_dma_ll_rx_crc_set_width(ahb_dma_dev_t *dev, uint32_t chan
*/ */
static inline void ahb_dma_ll_rx_crc_set_init_value(ahb_dma_dev_t *dev, uint32_t channel, uint32_t value) static inline void ahb_dma_ll_rx_crc_set_init_value(ahb_dma_dev_t *dev, uint32_t channel, uint32_t value)
{ {
dev->in_crc[channel].crc_init_data.in_crc_init_data_chn = value; dev->in_crc_arb[channel].crc_init_data.in_crc_init_data_chn = value;
} }
/** /**
@ -617,7 +617,7 @@ static inline void ahb_dma_ll_rx_crc_set_init_value(ahb_dma_dev_t *dev, uint32_t
*/ */
static inline uint32_t ahb_dma_ll_rx_crc_get_result(ahb_dma_dev_t *dev, uint32_t channel) static inline uint32_t ahb_dma_ll_rx_crc_get_result(ahb_dma_dev_t *dev, uint32_t channel)
{ {
return dev->in_crc[channel].crc_final_result.in_crc_final_result_chn; return dev->in_crc_arb[channel].crc_final_result.in_crc_final_result_chn;
} }
/** /**
@ -625,8 +625,8 @@ static inline uint32_t ahb_dma_ll_rx_crc_get_result(ahb_dma_dev_t *dev, uint32_t
*/ */
static inline void ahb_dma_ll_rx_crc_latch_config(ahb_dma_dev_t *dev, uint32_t channel) static inline void ahb_dma_ll_rx_crc_latch_config(ahb_dma_dev_t *dev, uint32_t channel)
{ {
dev->in_crc[channel].crc_width.rx_crc_latch_flag_chn = 1; dev->in_crc_arb[channel].crc_width.rx_crc_latch_flag_chn = 1;
dev->in_crc[channel].crc_width.rx_crc_latch_flag_chn = 0; dev->in_crc_arb[channel].crc_width.rx_crc_latch_flag_chn = 0;
} }
/** /**
@ -635,14 +635,14 @@ static inline void ahb_dma_ll_rx_crc_latch_config(ahb_dma_dev_t *dev, uint32_t c
static inline void ahb_dma_ll_rx_crc_set_lfsr_data_mask(ahb_dma_dev_t *dev, uint32_t channel, uint32_t crc_bit, static inline void ahb_dma_ll_rx_crc_set_lfsr_data_mask(ahb_dma_dev_t *dev, uint32_t channel, uint32_t crc_bit,
uint32_t lfsr_mask, uint32_t data_mask, bool reverse_data_mask) uint32_t lfsr_mask, uint32_t data_mask, bool reverse_data_mask)
{ {
dev->in_crc[channel].crc_en_addr.rx_crc_en_addr_chn = crc_bit; dev->in_crc_arb[channel].crc_en_addr.rx_crc_en_addr_chn = crc_bit;
dev->in_crc[channel].crc_en_wr_data.rx_crc_en_wr_data_chn = lfsr_mask; dev->in_crc_arb[channel].crc_en_wr_data.rx_crc_en_wr_data_chn = lfsr_mask;
dev->in_crc[channel].crc_data_en_addr.rx_crc_data_en_addr_chn = crc_bit; dev->in_crc_arb[channel].crc_data_en_addr.rx_crc_data_en_addr_chn = crc_bit;
if (reverse_data_mask) { if (reverse_data_mask) {
// "& 0xff" because the hardware only support 8-bit data // "& 0xff" because the hardware only support 8-bit data
data_mask = hal_utils_bitwise_reverse8(data_mask & 0xFF); data_mask = hal_utils_bitwise_reverse8(data_mask & 0xFF);
} }
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->in_crc[channel].crc_data_en_wr_data, rx_crc_data_en_wr_data_chn, data_mask); HAL_FORCE_MODIFY_U32_REG_FIELD(dev->in_crc_arb[channel].crc_data_en_wr_data, rx_crc_data_en_wr_data_chn, data_mask);
} }
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -111,7 +111,7 @@ static const regdma_entries_config_t gdma_g0p2_regs_retention[] = {
.owner = ENTRY(0) | ENTRY(2) }, .owner = ENTRY(0) | ENTRY(2) },
}; };
const gdma_chx_reg_ctx_link_t gdma_chx_regs_retention[SOC_GDMA_PAIRS_PER_GROUP_MAX][SOC_GDMA_PAIRS_PER_GROUP_MAX] = { const gdma_chx_reg_ctx_link_t gdma_chx_regs_retention[SOC_GDMA_NUM_GROUPS_MAX][SOC_GDMA_PAIRS_PER_GROUP_MAX] = {
[0] = { [0] = {
[0] = {gdma_g0p0_regs_retention, ARRAY_SIZE(gdma_g0p0_regs_retention)}, [0] = {gdma_g0p0_regs_retention, ARRAY_SIZE(gdma_g0p0_regs_retention)},
[1] = {gdma_g0p1_regs_retention, ARRAY_SIZE(gdma_g0p1_regs_retention)}, [1] = {gdma_g0p1_regs_retention, ARRAY_SIZE(gdma_g0p1_regs_retention)},

View File

@ -0,0 +1,143 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/gdma_periph.h"
#include "soc/ahb_dma_reg.h"
const gdma_signal_conn_t gdma_periph_signals = {
.groups = {
[0] = {
.module = PERIPH_GDMA_MODULE,
.pairs = {
[0] = {
.rx_irq_id = ETS_DMA_IN_CH0_INTR_SOURCE,
.tx_irq_id = ETS_DMA_OUT_CH0_INTR_SOURCE,
},
[1] = {
.rx_irq_id = ETS_DMA_IN_CH1_INTR_SOURCE,
.tx_irq_id = ETS_DMA_OUT_CH1_INTR_SOURCE,
},
[2] = {
.rx_irq_id = ETS_DMA_IN_CH2_INTR_SOURCE,
.tx_irq_id = ETS_DMA_OUT_CH2_INTR_SOURCE,
}
}
}
}
};
#if SOC_GDMA_SUPPORT_SLEEP_RETENTION
/* GDMA Channel (Group0, Pair0) Registers Context
Include: GDMA_MISC_CONF_REG
GDMA_IN_INT_ENA_CH0_REG / GDMA_OUT_INT_ENA_CH0_REG / GDMA_IN_PERI_SEL_CH0_REG / GDMA_OUT_PERI_SEL_CH0_REG
GDMA_IN_CONF0_CH0_REG / GDMA_IN_CONF1_CH0_REG / GDMA_IN_LINK_CH0_REG / GDMA_IN_PRI_CH0_REG
GDMA_OUT_CONF0_CH0_REG / GDMA_OUT_CONF1_CH0_REG / GDMA_OUT_LINK_CH0_REG /GDMA_OUT_PRI_CH0_REG
AHB_DMA_TX_CH_ARB_WEIGH_CH0_REG / AHB_DMA_TX_ARB_WEIGH_OPT_DIR_CH0_REG
AHB_DMA_RX_CH_ARB_WEIGH_CH0_REG / AHB_DMA_RX_ARB_WEIGH_OPT_DIR_CH0_REG
AHB_DMA_IN_LINK_ADDR_CH0_REG / AHB_DMA_OUT_LINK_ADDR_CH0_REG
AHB_DMA_INTR_MEM_START_ADDR_REG / AHB_DMA_INTR_MEM_END_ADDR_REG
AHB_DMA_ARB_TIMEOUT_TX_REG / AHB_DMA_ARB_TIMEOUT_RX_REG
AHB_DMA_WEIGHT_EN_TX_REG / AHB_DMA_WEIGHT_EN_RX_REG
*/
#define G0P0_RETENTION_REGS_CNT_0 13
#define G0P0_RETENTION_MAP_BASE_0 (REG_AHB_DMA_BASE + 0x8)
#define G0P0_RETENTION_REGS_CNT_1 12
#define G0P0_RETENTION_MAP_BASE_1 (REG_AHB_DMA_BASE + 0x2dc)
static const uint32_t g0p0_regs_map0[4] = {0x4c801001, 0x604c0060, 0x0, 0x0};
static const uint32_t g0p0_regs_map1[4] = {0xc0000003, 0xfc900000, 0x0, 0x0};
static const regdma_entries_config_t gdma_g0p0_regs_retention[] = {
[0] = { .config = REGDMA_LINK_ADDR_MAP_INIT(REGDMA_AHB_DMA_LINK(0x00), \
G0P0_RETENTION_MAP_BASE_0, G0P0_RETENTION_MAP_BASE_0, \
G0P0_RETENTION_REGS_CNT_0, 0, 0, \
g0p0_regs_map0[0], g0p0_regs_map0[1], \
g0p0_regs_map0[2], g0p0_regs_map0[3]), \
.owner = ENTRY(0) | ENTRY(2) }, \
[1] = { .config = REGDMA_LINK_ADDR_MAP_INIT(REGDMA_AHB_DMA_LINK(0x00), \
G0P0_RETENTION_MAP_BASE_1, G0P0_RETENTION_MAP_BASE_1, \
G0P0_RETENTION_REGS_CNT_1, 0, 0, \
g0p0_regs_map1[0], g0p0_regs_map1[1], \
g0p0_regs_map1[2], g0p0_regs_map1[3]), \
.owner = ENTRY(0) | ENTRY(2) },
};
/* GDMA Channel (Group0, Pair1) Registers Context
Include: GDMA_MISC_CONF_REG
GDMA_IN_INT_ENA_CH1_REG / GDMA_OUT_INT_ENA_CH1_REG / GDMA_IN_PERI_SEL_CH1_REG / GDMA_OUT_PERI_SEL_CH1_REG
GDMA_IN_CONF0_CH1_REG / GDMA_IN_CONF1_CH1_REG / GDMA_IN_LINK_CH1_REG / GDMA_IN_PRI_CH1_REG
GDMA_OUT_CONF0_CH1_REG / GDMA_OUT_CONF1_CH1_REG / GDMA_OUT_LINK_CH1_REG /GDMA_OUT_PRI_CH1_REG
AHB_DMA_TX_CH_ARB_WEIGH_CH1_REG / AHB_DMA_TX_ARB_WEIGH_OPT_DIR_CH1_REG
AHB_DMA_RX_CH_ARB_WEIGH_CH1_REG / AHB_DMA_RX_ARB_WEIGH_OPT_DIR_CH1_REG
AHB_DMA_IN_LINK_ADDR_CH1_REG / AHB_DMA_OUT_LINK_ADDR_CH1_REG
AHB_DMA_INTR_MEM_START_ADDR_REG / AHB_DMA_INTR_MEM_END_ADDR_REG
AHB_DMA_ARB_TIMEOUT_TX_REG / AHB_DMA_ARB_TIMEOUT_RX_REG
AHB_DMA_WEIGHT_EN_TX_REG / AHB_DMA_WEIGHT_EN_RX_REG
*/
#define G0P1_RETENTION_REGS_CNT_0 13
#define G0P1_RETENTION_MAP_BASE_0 (REG_AHB_DMA_BASE + 0x18)
#define G0P1_RETENTION_REGS_CNT_1 12
#define G0P1_RETENTION_MAP_BASE_1 (REG_AHB_DMA_BASE + 0x304)
static const uint32_t g0p1_regs_map0[4] = {0x81001, 0x0, 0xc00604c0, 0x604};
static const uint32_t g0p1_regs_map1[4] = {0xc0000003, 0x3f4800, 0x0, 0x0};
static const regdma_entries_config_t gdma_g0p1_regs_retention[] = {
[0] = { .config = REGDMA_LINK_ADDR_MAP_INIT(REGDMA_AHB_DMA_LINK(0x00), \
G0P1_RETENTION_MAP_BASE_0, G0P1_RETENTION_MAP_BASE_0, \
G0P1_RETENTION_REGS_CNT_0, 0, 0, \
g0p1_regs_map0[0], g0p1_regs_map0[1], \
g0p1_regs_map0[2], g0p1_regs_map0[3]), \
.owner = ENTRY(0) | ENTRY(2) },
[1] = { .config = REGDMA_LINK_ADDR_MAP_INIT(REGDMA_AHB_DMA_LINK(0x00), \
G0P1_RETENTION_MAP_BASE_1, G0P1_RETENTION_MAP_BASE_1, \
G0P1_RETENTION_REGS_CNT_1, 0, 0, \
g0p1_regs_map1[0], g0p1_regs_map1[1], \
g0p1_regs_map1[2], g0p1_regs_map1[3]), \
.owner = ENTRY(0) | ENTRY(2) },
};
/* GDMA Channel (Group0, Pair2) Registers Context
Include: GDMA_MISC_CONF_REG
GDMA_IN_INT_ENA_CH2_REG / GDMA_OUT_INT_ENA_CH2_REG
GDMA_IN_PERI_SEL_CH2_REG / GDMA_OUT_PERI_SEL_CH2_REG
GDMA_IN_CONF0_CH2_REG / GDMA_IN_CONF1_CH2_REG / GDMA_IN_LINK_CH2_REG / GDMA_IN_PRI_CH2_REG
GDMA_OUT_CONF0_CH2_REG / GDMA_OUT_CONF1_CH2_REG / GDMA_OUT_LINK_CH2_REG /GDMA_OUT_PRI_CH2_REG
AHB_DMA_TX_CH_ARB_WEIGH_CH2_REG / AHB_DMA_TX_ARB_WEIGH_OPT_DIR_CH2_REG
AHB_DMA_RX_CH_ARB_WEIGH_CH2_REG / AHB_DMA_RX_ARB_WEIGH_OPT_DIR_CH2_REG
AHB_DMA_IN_LINK_ADDR_CH2_REG / AHB_DMA_OUT_LINK_ADDR_CH2_REG
AHB_DMA_INTR_MEM_START_ADDR_REG / AHB_DMA_INTR_MEM_END_ADDR_REG
AHB_DMA_ARB_TIMEOUT_TX_REG / AHB_DMA_ARB_TIMEOUT_RX_REG
AHB_DMA_WEIGHT_EN_TX_REG / AHB_DMA_WEIGHT_EN_RX_REG
*/
#define G0P2_RETENTION_REGS_CNT_0 3
#define G0P2_RETENTION_MAP_BASE_0 (REG_AHB_DMA_BASE + 0x28)
#define G0P2_RETENTION_REGS_CNT_1 22
#define G0P2_RETENTION_MAP_BASE_1 (REG_AHB_DMA_BASE + 0x1f0)
static const uint32_t g0p2_regs_map0[4] = {0x9001, 0x0, 0x0, 0x0};
static const uint32_t g0p2_regs_map1[4] = {0x13001813, 0x18, 0x18000, 0x7f26000};
static const regdma_entries_config_t gdma_g0p2_regs_retention[] = {
[0] = { .config = REGDMA_LINK_ADDR_MAP_INIT(REGDMA_AHB_DMA_LINK(0x00), \
G0P2_RETENTION_MAP_BASE_0, G0P2_RETENTION_MAP_BASE_0, \
G0P2_RETENTION_REGS_CNT_0, 0, 0, \
g0p2_regs_map0[0], g0p2_regs_map0[1], \
g0p2_regs_map0[2], g0p2_regs_map0[3]), \
.owner = ENTRY(0) | ENTRY(2) },
[1] = { .config = REGDMA_LINK_ADDR_MAP_INIT(REGDMA_AHB_DMA_LINK(0x00), \
G0P2_RETENTION_MAP_BASE_1, G0P2_RETENTION_MAP_BASE_1, \
G0P2_RETENTION_REGS_CNT_1, 0, 0, \
g0p2_regs_map1[0], g0p2_regs_map1[1], \
g0p2_regs_map1[2], g0p2_regs_map1[3]), \
.owner = ENTRY(0) | ENTRY(2) },
};
const gdma_chx_reg_ctx_link_t gdma_chx_regs_retention[SOC_GDMA_NUM_GROUPS_MAX][SOC_GDMA_PAIRS_PER_GROUP_MAX] = {
[0] = {
[0] = {gdma_g0p0_regs_retention, ARRAY_SIZE(gdma_g0p0_regs_retention)},
[1] = {gdma_g0p1_regs_retention, ARRAY_SIZE(gdma_g0p1_regs_retention)},
[2] = {gdma_g0p2_regs_retention, ARRAY_SIZE(gdma_g0p2_regs_retention)}
}
};
#endif

View File

@ -7,10 +7,22 @@ config SOC_UART_SUPPORTED
bool bool
default y default y
config SOC_GDMA_SUPPORTED
bool
default y
config SOC_AHB_GDMA_SUPPORTED
bool
default y
config SOC_GPTIMER_SUPPORTED config SOC_GPTIMER_SUPPORTED
bool bool
default y default y
config SOC_ASYNC_MEMCPY_SUPPORTED
bool
default y
config SOC_SUPPORTS_SECURE_DL_MODE config SOC_SUPPORTS_SECURE_DL_MODE
bool bool
default y default y
@ -111,6 +123,22 @@ config SOC_CPU_IDRAM_SPLIT_USING_PMP
bool bool
default y default y
config SOC_DMA_CAN_ACCESS_FLASH
bool
default y
config SOC_AHB_GDMA_VERSION
int
default 2
config SOC_GDMA_NUM_GROUPS_MAX
int
default 1
config SOC_GDMA_PAIRS_PER_GROUP_MAX
int
default 3
config SOC_GPIO_PORT config SOC_GPIO_PORT
int int
default 1 default 1

View File

@ -1132,139 +1132,84 @@ typedef union {
uint32_t val; uint32_t val;
} ahb_dma_out_peri_sel_chn_reg_t; } ahb_dma_out_peri_sel_chn_reg_t;
typedef struct {
volatile ahb_dma_in_int_raw_chn_reg_t raw;
volatile ahb_dma_in_int_st_chn_reg_t st;
volatile ahb_dma_in_int_ena_chn_reg_t ena;
volatile ahb_dma_in_int_clr_chn_reg_t clr;
} ahb_dma_in_int_chn_reg_t;
typedef struct { typedef struct {
volatile ahb_dma_in_int_raw_chn_reg_t in_int_raw_ch0; volatile ahb_dma_out_int_raw_chn_reg_t raw;
volatile ahb_dma_in_int_st_chn_reg_t in_int_st_ch0; volatile ahb_dma_out_int_st_chn_reg_t st;
volatile ahb_dma_in_int_ena_chn_reg_t in_int_ena_ch0; volatile ahb_dma_out_int_ena_chn_reg_t ena;
volatile ahb_dma_in_int_clr_chn_reg_t in_int_clr_ch0; volatile ahb_dma_out_int_clr_chn_reg_t clr;
volatile ahb_dma_in_int_raw_chn_reg_t in_int_raw_ch1; } ahb_dma_out_int_chn_reg_t;
volatile ahb_dma_in_int_st_chn_reg_t in_int_st_ch1;
volatile ahb_dma_in_int_ena_chn_reg_t in_int_ena_ch1; typedef struct {
volatile ahb_dma_in_int_clr_chn_reg_t in_int_clr_ch1; volatile ahb_dma_in_conf0_chn_reg_t in_conf0;
volatile ahb_dma_in_int_raw_chn_reg_t in_int_raw_ch2; volatile ahb_dma_in_conf1_chn_reg_t in_conf1;
volatile ahb_dma_in_int_st_chn_reg_t in_int_st_ch2; volatile ahb_dma_infifo_status_chn_reg_t infifo_status;
volatile ahb_dma_in_int_ena_chn_reg_t in_int_ena_ch2; volatile ahb_dma_in_pop_chn_reg_t in_pop;
volatile ahb_dma_in_int_clr_chn_reg_t in_int_clr_ch2; volatile ahb_dma_in_link_chn_reg_t in_link;
volatile ahb_dma_out_int_raw_chn_reg_t out_int_raw_ch0; volatile ahb_dma_in_state_chn_reg_t in_state;
volatile ahb_dma_out_int_st_chn_reg_t out_int_st_ch0; volatile ahb_dma_in_suc_eof_des_addr_chn_reg_t in_suc_eof_des_addr;
volatile ahb_dma_out_int_ena_chn_reg_t out_int_ena_ch0; volatile ahb_dma_in_err_eof_des_addr_chn_reg_t in_err_eof_des_addr;
volatile ahb_dma_out_int_clr_chn_reg_t out_int_clr_ch0; volatile ahb_dma_in_dscr_chn_reg_t in_dscr;
volatile ahb_dma_out_int_raw_chn_reg_t out_int_raw_ch1; volatile ahb_dma_in_dscr_bf0_chn_reg_t in_dscr_bf0;
volatile ahb_dma_out_int_st_chn_reg_t out_int_st_ch1; volatile ahb_dma_in_dscr_bf1_chn_reg_t in_dscr_bf1;
volatile ahb_dma_out_int_ena_chn_reg_t out_int_ena_ch1; volatile ahb_dma_in_pri_chn_reg_t in_pri;
volatile ahb_dma_out_int_clr_chn_reg_t out_int_clr_ch1; volatile ahb_dma_in_peri_sel_chn_reg_t in_peri_sel;
volatile ahb_dma_out_int_raw_chn_reg_t out_int_raw_ch2; } ahb_dma_in_chn_reg_t;
volatile ahb_dma_out_int_st_chn_reg_t out_int_st_ch2;
volatile ahb_dma_out_int_ena_chn_reg_t out_int_ena_ch2; typedef struct {
volatile ahb_dma_out_int_clr_chn_reg_t out_int_clr_ch2; volatile ahb_dma_out_conf0_chn_reg_t out_conf0;
volatile ahb_dma_out_conf1_chn_reg_t out_conf1;
volatile ahb_dma_outfifo_status_chn_reg_t outfifo_status;
volatile ahb_dma_out_push_chn_reg_t out_push;
volatile ahb_dma_out_link_chn_reg_t out_link;
volatile ahb_dma_out_state_chn_reg_t out_state;
volatile ahb_dma_out_eof_des_addr_chn_reg_t out_eof_des_addr;
volatile ahb_dma_out_eof_bfr_des_addr_chn_reg_t out_eof_bfr_des_addr;
volatile ahb_dma_out_dscr_chn_reg_t out_dscr;
volatile ahb_dma_out_dscr_bf0_chn_reg_t out_dscr_bf0;
volatile ahb_dma_out_dscr_bf1_chn_reg_t out_dscr_bf1;
volatile ahb_dma_out_pri_chn_reg_t out_pri;
volatile ahb_dma_out_peri_sel_chn_reg_t out_peri_sel;
} ahb_dma_out_chn_reg_t;
typedef struct {
volatile ahb_dma_in_chn_reg_t in;
uint32_t reserved_in[11];
volatile ahb_dma_out_chn_reg_t out;
uint32_t reserved_out[11];
} ahb_dma_chn_reg_t;
typedef struct {
uint32_t reserved[8];
ahb_dma_rx_ch_arb_weigh_chn_reg_t ch_arb_weigh;
ahb_dma_rx_arb_weigh_opt_dir_chn_reg_t arb_weigh_opt;
} ahb_dma_in_crc_arb_chn_reg_t;
typedef struct {
uint32_t reserved[8];
ahb_dma_tx_ch_arb_weigh_chn_reg_t ch_arb_weigh;
ahb_dma_tx_arb_weigh_opt_dir_chn_reg_t arb_weigh_opt;
} ahb_dma_out_crc_arb_chn_reg_t;
typedef struct {
volatile ahb_dma_in_int_chn_reg_t in_intr[3];
volatile ahb_dma_out_int_chn_reg_t out_intr[3];
volatile ahb_dma_ahb_test_reg_t ahb_test; volatile ahb_dma_ahb_test_reg_t ahb_test;
volatile ahb_dma_misc_conf_reg_t misc_conf; volatile ahb_dma_misc_conf_reg_t misc_conf;
volatile ahb_dma_date_reg_t date; volatile ahb_dma_date_reg_t date;
uint32_t reserved_06c; uint32_t reserved_06c;
volatile ahb_dma_in_conf0_chn_reg_t in_conf0_ch0; volatile ahb_dma_chn_reg_t channel[3];
volatile ahb_dma_in_conf1_chn_reg_t in_conf1_ch0; uint32_t reserved_2b0[3];
volatile ahb_dma_infifo_status_chn_reg_t infifo_status_ch0; volatile ahb_dma_out_crc_arb_chn_reg_t out_crc_arb[3];
volatile ahb_dma_in_pop_chn_reg_t in_pop_ch0; volatile ahb_dma_in_crc_arb_chn_reg_t in_crc_arb[3];
volatile ahb_dma_in_link_chn_reg_t in_link_ch0; volatile ahb_dma_in_link_addr_chn_reg_t in_link_addr[3];
volatile ahb_dma_in_state_chn_reg_t in_state_ch0; volatile ahb_dma_out_link_addr_chn_reg_t out_link_addr[3];
volatile ahb_dma_in_suc_eof_des_addr_chn_reg_t in_suc_eof_des_addr_ch0;
volatile ahb_dma_in_err_eof_des_addr_chn_reg_t in_err_eof_des_addr_ch0;
volatile ahb_dma_in_dscr_chn_reg_t in_dscr_ch0;
volatile ahb_dma_in_dscr_bf0_chn_reg_t in_dscr_bf0_ch0;
volatile ahb_dma_in_dscr_bf1_chn_reg_t in_dscr_bf1_ch0;
volatile ahb_dma_in_pri_chn_reg_t in_pri_ch0;
volatile ahb_dma_in_peri_sel_chn_reg_t in_peri_sel_ch0;
uint32_t reserved_0a4[11];
volatile ahb_dma_out_conf0_ch0_reg_t out_conf0_ch0;
volatile ahb_dma_out_conf1_chn_reg_t out_conf1_ch0;
volatile ahb_dma_outfifo_status_chn_reg_t outfifo_status_ch0;
volatile ahb_dma_out_push_chn_reg_t out_push_ch0;
volatile ahb_dma_out_link_chn_reg_t out_link_ch0;
volatile ahb_dma_out_state_chn_reg_t out_state_ch0;
volatile ahb_dma_out_eof_des_addr_chn_reg_t out_eof_des_addr_ch0;
volatile ahb_dma_out_eof_bfr_des_addr_chn_reg_t out_eof_bfr_des_addr_ch0;
volatile ahb_dma_out_dscr_chn_reg_t out_dscr_ch0;
volatile ahb_dma_out_dscr_bf0_chn_reg_t out_dscr_bf0_ch0;
volatile ahb_dma_out_dscr_bf1_chn_reg_t out_dscr_bf1_ch0;
volatile ahb_dma_out_pri_chn_reg_t out_pri_ch0;
volatile ahb_dma_out_peri_sel_chn_reg_t out_peri_sel_ch0;
uint32_t reserved_104[11];
volatile ahb_dma_in_conf0_chn_reg_t in_conf0_ch1;
volatile ahb_dma_in_conf1_chn_reg_t in_conf1_ch1;
volatile ahb_dma_infifo_status_chn_reg_t infifo_status_ch1;
volatile ahb_dma_in_pop_chn_reg_t in_pop_ch1;
volatile ahb_dma_in_link_chn_reg_t in_link_ch1;
volatile ahb_dma_in_state_chn_reg_t in_state_ch1;
volatile ahb_dma_in_suc_eof_des_addr_chn_reg_t in_suc_eof_des_addr_ch1;
volatile ahb_dma_in_err_eof_des_addr_chn_reg_t in_err_eof_des_addr_ch1;
volatile ahb_dma_in_dscr_chn_reg_t in_dscr_ch1;
volatile ahb_dma_in_dscr_bf0_chn_reg_t in_dscr_bf0_ch1;
volatile ahb_dma_in_dscr_bf1_chn_reg_t in_dscr_bf1_ch1;
volatile ahb_dma_in_pri_chn_reg_t in_pri_ch1;
volatile ahb_dma_in_peri_sel_chn_reg_t in_peri_sel_ch1;
uint32_t reserved_164[11];
volatile ahb_dma_out_conf0_chn_reg_t out_conf0_ch1;
volatile ahb_dma_out_conf1_chn_reg_t out_conf1_ch1;
volatile ahb_dma_outfifo_status_chn_reg_t outfifo_status_ch1;
volatile ahb_dma_out_push_chn_reg_t out_push_ch1;
volatile ahb_dma_out_link_chn_reg_t out_link_ch1;
volatile ahb_dma_out_state_chn_reg_t out_state_ch1;
volatile ahb_dma_out_eof_des_addr_chn_reg_t out_eof_des_addr_ch1;
volatile ahb_dma_out_eof_bfr_des_addr_chn_reg_t out_eof_bfr_des_addr_ch1;
volatile ahb_dma_out_dscr_chn_reg_t out_dscr_ch1;
volatile ahb_dma_out_dscr_bf0_chn_reg_t out_dscr_bf0_ch1;
volatile ahb_dma_out_dscr_bf1_chn_reg_t out_dscr_bf1_ch1;
volatile ahb_dma_out_pri_chn_reg_t out_pri_ch1;
volatile ahb_dma_out_peri_sel_chn_reg_t out_peri_sel_ch1;
uint32_t reserved_1c4[11];
volatile ahb_dma_in_conf0_chn_reg_t in_conf0_ch2;
volatile ahb_dma_in_conf1_chn_reg_t in_conf1_ch2;
volatile ahb_dma_infifo_status_chn_reg_t infifo_status_ch2;
volatile ahb_dma_in_pop_chn_reg_t in_pop_ch2;
volatile ahb_dma_in_link_chn_reg_t in_link_ch2;
volatile ahb_dma_in_state_chn_reg_t in_state_ch2;
volatile ahb_dma_in_suc_eof_des_addr_chn_reg_t in_suc_eof_des_addr_ch2;
volatile ahb_dma_in_err_eof_des_addr_chn_reg_t in_err_eof_des_addr_ch2;
volatile ahb_dma_in_dscr_chn_reg_t in_dscr_ch2;
volatile ahb_dma_in_dscr_bf0_chn_reg_t in_dscr_bf0_ch2;
volatile ahb_dma_in_dscr_bf1_chn_reg_t in_dscr_bf1_ch2;
volatile ahb_dma_in_pri_chn_reg_t in_pri_ch2;
volatile ahb_dma_in_peri_sel_chn_reg_t in_peri_sel_ch2;
uint32_t reserved_224[11];
volatile ahb_dma_out_conf0_chn_reg_t out_conf0_ch2;
volatile ahb_dma_out_conf1_chn_reg_t out_conf1_ch2;
volatile ahb_dma_outfifo_status_chn_reg_t outfifo_status_ch2;
volatile ahb_dma_out_push_chn_reg_t out_push_ch2;
volatile ahb_dma_out_link_chn_reg_t out_link_ch2;
volatile ahb_dma_out_state_chn_reg_t out_state_ch2;
volatile ahb_dma_out_eof_des_addr_chn_reg_t out_eof_des_addr_ch2;
volatile ahb_dma_out_eof_bfr_des_addr_chn_reg_t out_eof_bfr_des_addr_ch2;
volatile ahb_dma_out_dscr_chn_reg_t out_dscr_ch2;
volatile ahb_dma_out_dscr_bf0_chn_reg_t out_dscr_bf0_ch2;
volatile ahb_dma_out_dscr_bf1_chn_reg_t out_dscr_bf1_ch2;
volatile ahb_dma_out_pri_chn_reg_t out_pri_ch2;
volatile ahb_dma_out_peri_sel_chn_reg_t out_peri_sel_ch2;
uint32_t reserved_284[22];
volatile ahb_dma_tx_ch_arb_weigh_chn_reg_t tx_ch_arb_weigh_ch0;
volatile ahb_dma_tx_arb_weigh_opt_dir_chn_reg_t tx_arb_weigh_opt_dir_ch0;
uint32_t reserved_2e4[8];
volatile ahb_dma_tx_ch_arb_weigh_chn_reg_t tx_ch_arb_weigh_ch1;
volatile ahb_dma_tx_arb_weigh_opt_dir_chn_reg_t tx_arb_weigh_opt_dir_ch1;
uint32_t reserved_30c[8];
volatile ahb_dma_tx_ch_arb_weigh_chn_reg_t tx_ch_arb_weigh_ch2;
volatile ahb_dma_tx_arb_weigh_opt_dir_chn_reg_t tx_arb_weigh_opt_dir_ch2;
uint32_t reserved_334[8];
volatile ahb_dma_rx_ch_arb_weigh_chn_reg_t rx_ch_arb_weigh_ch0;
volatile ahb_dma_rx_arb_weigh_opt_dir_chn_reg_t rx_arb_weigh_opt_dir_ch0;
uint32_t reserved_35c[8];
volatile ahb_dma_rx_ch_arb_weigh_chn_reg_t rx_ch_arb_weigh_ch1;
volatile ahb_dma_rx_arb_weigh_opt_dir_chn_reg_t rx_arb_weigh_opt_dir_ch1;
uint32_t reserved_384[8];
volatile ahb_dma_rx_ch_arb_weigh_chn_reg_t rx_ch_arb_weigh_ch2;
volatile ahb_dma_rx_arb_weigh_opt_dir_chn_reg_t rx_arb_weigh_opt_dir_ch2;
volatile ahb_dma_in_link_addr_chn_reg_t in_link_addr_chn[3];
volatile ahb_dma_out_link_addr_chn_reg_t out_link_addr_chn[3];
volatile ahb_dma_intr_mem_start_addr_reg_t intr_mem_start_addr; volatile ahb_dma_intr_mem_start_addr_reg_t intr_mem_start_addr;
volatile ahb_dma_intr_mem_end_addr_reg_t intr_mem_end_addr; volatile ahb_dma_intr_mem_end_addr_reg_t intr_mem_end_addr;
volatile ahb_dma_arb_timeout_tx_reg_t arb_timeout_tx; volatile ahb_dma_arb_timeout_tx_reg_t arb_timeout_tx;

View File

@ -0,0 +1,30 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
// The following macros have a format SOC_[periph][instance_id] to make it work with `GDMA_MAKE_TRIGGER`
#define SOC_GDMA_TRIG_PERIPH_M2M0 (-1)
#define SOC_GDMA_TRIG_PERIPH_SPI2 (1)
#define SOC_GDMA_TRIG_PERIPH_UHCI0 (2)
#define SOC_GDMA_TRIG_PERIPH_I2S0 (3)
#define SOC_GDMA_TRIG_PERIPH_AES0 (6)
#define SOC_GDMA_TRIG_PERIPH_SHA0 (7)
#define SOC_GDMA_TRIG_PERIPH_ADC0 (8)
#define SOC_GDMA_TRIG_PERIPH_PARLIO0 (9)
// On which system bus is the DMA instance of the peripheral connection mounted
#define SOC_GDMA_BUS_ANY (-1)
#define SOC_GDMA_BUS_AHB (0)
#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS SOC_GDMA_BUS_ANY
#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_AES0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS SOC_GDMA_BUS_AHB

View File

@ -63,6 +63,7 @@
#define DR_REG_PCR_BASE 0x60096000 #define DR_REG_PCR_BASE 0x60096000
#define DR_REG_TEE_BASE 0x60098000 #define DR_REG_TEE_BASE 0x60098000
#define DR_REG_HP_APM_BASE 0x60099000 #define DR_REG_HP_APM_BASE 0x60099000
#define DR_REG_LP_APM0_BASE 0x60099800
#define DR_REG_MISC_BASE 0x6009F000 #define DR_REG_MISC_BASE 0x6009F000
/** /**

View File

@ -0,0 +1,68 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "esp_bit_defs.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum periph_retention_module {
SLEEP_RETENTION_MODULE_MIN = 0,
/* clock module, which includes system and modem */
SLEEP_RETENTION_MODULE_CLOCK_SYSTEM = 1,
SLEEP_RETENTION_MODULE_CLOCK_MODEM = 2,
/* modem module, which includes WiFi, BLE and 802.15.4 */
SLEEP_RETENTION_MODULE_WIFI_MAC = 10,
SLEEP_RETENTION_MODULE_WIFI_BB = 11,
SLEEP_RETENTION_MODULE_BLE_MAC = 12,
SLEEP_RETENTION_MODULE_BT_BB = 13,
SLEEP_RETENTION_MODULE_802154_MAC = 14,
/* digital peripheral module, which includes Interrupt Matrix, HP_SYSTEM,
* TEE, APM, UART, Timer Group, IOMUX, SPIMEM, SysTimer, etc.. */
SLEEP_RETENTION_MODULE_SYS_PERIPH = 16,
SLEEP_RETENTION_MODULE_ADC = 17,
SLEEP_RETENTION_MODULE_GDMA_CH0 = 24,
SLEEP_RETENTION_MODULE_GDMA_CH1 = 25,
SLEEP_RETENTION_MODULE_GDMA_CH2 = 26,
SLEEP_RETENTION_MODULE_MAX = 31
} periph_retention_module_t;
typedef enum periph_retention_module_bitmap {
/* clock module, which includes system and modem */
SLEEP_RETENTION_MODULE_BM_CLOCK_SYSTEM = BIT(SLEEP_RETENTION_MODULE_CLOCK_SYSTEM),
SLEEP_RETENTION_MODULE_BM_CLOCK_MODEM = BIT(SLEEP_RETENTION_MODULE_CLOCK_MODEM),
/* modem module, which includes WiFi, BLE and 802.15.4 */
SLEEP_RETENTION_MODULE_BM_WIFI_MAC = BIT(SLEEP_RETENTION_MODULE_WIFI_MAC),
SLEEP_RETENTION_MODULE_BM_WIFI_BB = BIT(SLEEP_RETENTION_MODULE_WIFI_BB),
SLEEP_RETENTION_MODULE_BM_BLE_MAC = BIT(SLEEP_RETENTION_MODULE_BLE_MAC),
SLEEP_RETENTION_MODULE_BM_BT_BB = BIT(SLEEP_RETENTION_MODULE_BT_BB),
SLEEP_RETENTION_MODULE_BM_802154_MAC = BIT(SLEEP_RETENTION_MODULE_802154_MAC),
/* digital peripheral module, which includes Interrupt Matrix, HP_SYSTEM,
* TEE, APM, UART, Timer Group, IOMUX, SPIMEM, SysTimer, etc.. */
SLEEP_RETENTION_MODULE_BM_SYS_PERIPH = BIT(SLEEP_RETENTION_MODULE_SYS_PERIPH),
SLEEP_RETENTION_MODULE_BM_ADC = BIT(SLEEP_RETENTION_MODULE_ADC),
SLEEP_RETENTION_MODULE_BM_GDMA_CH0 = BIT(SLEEP_RETENTION_MODULE_GDMA_CH0),
SLEEP_RETENTION_MODULE_BM_GDMA_CH1 = BIT(SLEEP_RETENTION_MODULE_GDMA_CH1),
SLEEP_RETENTION_MODULE_BM_GDMA_CH2 = BIT(SLEEP_RETENTION_MODULE_GDMA_CH2),
SLEEP_RETENTION_MODULE_BM_ALL = (uint32_t)-1
} periph_retention_module_bitmap_t;
#ifdef __cplusplus
}
#endif

View File

@ -20,15 +20,15 @@
// #define SOC_ADC_SUPPORTED 1 // TODO: [ESP32C5] IDF-8701 // #define SOC_ADC_SUPPORTED 1 // TODO: [ESP32C5] IDF-8701
// #define SOC_DEDICATED_GPIO_SUPPORTED 1 // TODO: [ESP32C5] IDF-8725 // #define SOC_DEDICATED_GPIO_SUPPORTED 1 // TODO: [ESP32C5] IDF-8725
#define SOC_UART_SUPPORTED 1 // TODO: [ESP32C5] IDF-8722 #define SOC_UART_SUPPORTED 1 // TODO: [ESP32C5] IDF-8722
// #define SOC_GDMA_SUPPORTED 1 // TODO: [ESP32C5] IDF-8710 #define SOC_GDMA_SUPPORTED 1
// #define SOC_AHB_GDMA_SUPPORTED 1 // TODO: [ESP32C5] IDF-8710 #define SOC_AHB_GDMA_SUPPORTED 1
#define SOC_GPTIMER_SUPPORTED 1 #define SOC_GPTIMER_SUPPORTED 1
// #define SOC_PCNT_SUPPORTED 1 // TODO: [ESP32C5] IDF-8683 // #define SOC_PCNT_SUPPORTED 1 // TODO: [ESP32C5] IDF-8683
// #define SOC_MCPWM_SUPPORTED 1 // TODO: [ESP32C5] IDF-8709 // #define SOC_MCPWM_SUPPORTED 1 // TODO: [ESP32C5] IDF-8709
// #define SOC_TWAI_SUPPORTED 1 // TODO: [ESP32C5] IDF-8691 // #define SOC_TWAI_SUPPORTED 1 // TODO: [ESP32C5] IDF-8691
// #define SOC_ETM_SUPPORTED 1 // TODO: [ESP32C5] IDF-8693 // #define SOC_ETM_SUPPORTED 1 // TODO: [ESP32C5] IDF-8693
// #define SOC_PARLIO_SUPPORTED 1 // TODO: [ESP32C5] IDF-8685, IDF-8686 // #define SOC_PARLIO_SUPPORTED 1 // TODO: [ESP32C5] IDF-8685, IDF-8686
// #define SOC_ASYNC_MEMCPY_SUPPORTED 1 // TODO: [ESP32C5] IDF-8716 #define SOC_ASYNC_MEMCPY_SUPPORTED 1
// #define SOC_USB_SERIAL_JTAG_SUPPORTED 1 // TODO: [ESP32C5] IDF-8721 // #define SOC_USB_SERIAL_JTAG_SUPPORTED 1 // TODO: [ESP32C5] IDF-8721
// #define SOC_TEMP_SENSOR_SUPPORTED 1 // TODO: [ESP32C5] IDF-8727 // #define SOC_TEMP_SENSOR_SUPPORTED 1 // TODO: [ESP32C5] IDF-8727
// #define SOC_WIFI_SUPPORTED 1 // TODO: [ESP32C5] IDF-8851 // #define SOC_WIFI_SUPPORTED 1 // TODO: [ESP32C5] IDF-8851
@ -55,7 +55,7 @@
#define SOC_FLASH_ENC_SUPPORTED 1 // TODO: [ESP32C5] IDF-8622 #define SOC_FLASH_ENC_SUPPORTED 1 // TODO: [ESP32C5] IDF-8622
// #define SOC_SECURE_BOOT_SUPPORTED 1 // TODO: [ESP32C5] IDF-8623 // #define SOC_SECURE_BOOT_SUPPORTED 1 // TODO: [ESP32C5] IDF-8623
// #define SOC_BOD_SUPPORTED 1 // TODO: [ESP32C5] IDF-8647 // #define SOC_BOD_SUPPORTED 1 // TODO: [ESP32C5] IDF-8647
// #define SOC_APM_SUPPORTED 1 // TODO: [ESP32C5] IDF-8614 // #define SOC_APM_SUPPORTED 1 // TODO: [ESP32C5] IDF-8614, IDF-8615
// #define SOC_PMU_SUPPORTED 1 // TODO: [ESP32C5] IDF-8667 // #define SOC_PMU_SUPPORTED 1 // TODO: [ESP32C5] IDF-8667
// #define SOC_PAU_SUPPORTED 1 // TODO: [ESP32C5] IDF-8638 // #define SOC_PAU_SUPPORTED 1 // TODO: [ESP32C5] IDF-8638
// #define SOC_LP_TIMER_SUPPORTED 1 // TODO: [ESP32C5] IDF-8636 // #define SOC_LP_TIMER_SUPPORTED 1 // TODO: [ESP32C5] IDF-8636
@ -164,11 +164,15 @@
See TRM DS chapter for more details */ See TRM DS chapter for more details */
// #define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100) // #define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
/*-------------------------- DMA Common CAPS ----------------------------------------*/
#define SOC_DMA_CAN_ACCESS_FLASH 1 /*!< DMA can access Flash memory */
/*-------------------------- GDMA CAPS -------------------------------------*/ /*-------------------------- GDMA CAPS -------------------------------------*/
// #define SOC_AHB_GDMA_VERSION 1U #define SOC_AHB_GDMA_VERSION 2
// #define SOC_GDMA_NUM_GROUPS_MAX 1U #define SOC_GDMA_NUM_GROUPS_MAX 1U
// #define SOC_GDMA_PAIRS_PER_GROUP_MAX 3 #define SOC_GDMA_PAIRS_PER_GROUP_MAX 3
// #define SOC_GDMA_SUPPORT_ETM 1 // Support ETM submodule // #define SOC_GDMA_SUPPORT_ETM 1 // Support ETM submodule TODO: IDF-9224
// #define SOC_GDMA_SUPPORT_SLEEP_RETENTION 1 // TODO: IDF-9225
/*-------------------------- ETM CAPS --------------------------------------*/ /*-------------------------- ETM CAPS --------------------------------------*/
// #define SOC_ETM_GROUPS 1U // Number of ETM groups // #define SOC_ETM_GROUPS 1U // Number of ETM groups

View File

@ -48,6 +48,7 @@ PROVIDE ( HP_SYSTEM = 0x60095000 );
PROVIDE ( PCR = 0x60096000 ); PROVIDE ( PCR = 0x60096000 );
PROVIDE ( TEE = 0x60098000 ); PROVIDE ( TEE = 0x60098000 );
PROVIDE ( HP_APM = 0x60099000 ); PROVIDE ( HP_APM = 0x60099000 );
PROVIDE ( LP_APM0 = 0x60099800 );
PROVIDE ( MISC = 0x6009F000 ); PROVIDE ( MISC = 0x6009F000 );
/* TODO: [ESP32C5] IDF-8845 Check the address */ /* TODO: [ESP32C5] IDF-8845 Check the address */

View File

@ -92,7 +92,7 @@ static const regdma_entries_config_t gdma_g0p2_regs_retention[] = {
.owner = ENTRY(0) | ENTRY(2) }, .owner = ENTRY(0) | ENTRY(2) },
}; };
const gdma_chx_reg_ctx_link_t gdma_chx_regs_retention[SOC_GDMA_PAIRS_PER_GROUP_MAX][SOC_GDMA_PAIRS_PER_GROUP_MAX] = { const gdma_chx_reg_ctx_link_t gdma_chx_regs_retention[SOC_GDMA_NUM_GROUPS_MAX][SOC_GDMA_PAIRS_PER_GROUP_MAX] = {
[0] = { [0] = {
[0] = {gdma_g0p0_regs_retention, ARRAY_SIZE(gdma_g0p0_regs_retention)}, [0] = {gdma_g0p0_regs_retention, ARRAY_SIZE(gdma_g0p0_regs_retention)},
[1] = {gdma_g0p1_regs_retention, ARRAY_SIZE(gdma_g0p1_regs_retention)}, [1] = {gdma_g0p1_regs_retention, ARRAY_SIZE(gdma_g0p1_regs_retention)},

View File

@ -92,7 +92,7 @@ static const regdma_entries_config_t gdma_g0p2_regs_retention[] = {
.owner = ENTRY(0) | ENTRY(2) }, .owner = ENTRY(0) | ENTRY(2) },
}; };
const gdma_chx_reg_ctx_link_t gdma_chx_regs_retention[SOC_GDMA_PAIRS_PER_GROUP_MAX][SOC_GDMA_PAIRS_PER_GROUP_MAX] = { const gdma_chx_reg_ctx_link_t gdma_chx_regs_retention[SOC_GDMA_NUM_GROUPS_MAX][SOC_GDMA_PAIRS_PER_GROUP_MAX] = {
[0] = { [0] = {
[0] = {gdma_g0p0_regs_retention, ARRAY_SIZE(gdma_g0p0_regs_retention)}, [0] = {gdma_g0p0_regs_retention, ARRAY_SIZE(gdma_g0p0_regs_retention)},
[1] = {gdma_g0p1_regs_retention, ARRAY_SIZE(gdma_g0p1_regs_retention)}, [1] = {gdma_g0p1_regs_retention, ARRAY_SIZE(gdma_g0p1_regs_retention)},

View File

@ -1,5 +1,5 @@
/** /**
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -1357,7 +1357,7 @@ typedef struct {
ahb_dma_rx_crc_data_en_addr_chn_reg_t crc_data_en_addr; ahb_dma_rx_crc_data_en_addr_chn_reg_t crc_data_en_addr;
ahb_dma_rx_ch_arb_weigh_chn_reg_t ch_arb_weigh; ahb_dma_rx_ch_arb_weigh_chn_reg_t ch_arb_weigh;
ahb_dma_rx_arb_weigh_opt_dir_chn_reg_t arb_weigh_opt; ahb_dma_rx_arb_weigh_opt_dir_chn_reg_t arb_weigh_opt;
} ahb_dma_in_crc_chn_reg_t; } ahb_dma_in_crc_arb_chn_reg_t;
typedef struct { typedef struct {
ahb_dma_out_crc_init_data_chn_reg_t crc_init_data; ahb_dma_out_crc_init_data_chn_reg_t crc_init_data;
@ -1370,7 +1370,7 @@ typedef struct {
ahb_dma_tx_crc_data_en_addr_chn_reg_t crc_data_en_addr; ahb_dma_tx_crc_data_en_addr_chn_reg_t crc_data_en_addr;
ahb_dma_tx_ch_arb_weigh_chn_reg_t ch_arb_weigh; ahb_dma_tx_ch_arb_weigh_chn_reg_t ch_arb_weigh;
ahb_dma_tx_arb_weigh_opt_dir_chn_reg_t arb_weigh_opt; ahb_dma_tx_arb_weigh_opt_dir_chn_reg_t arb_weigh_opt;
} ahb_dma_out_crc_chn_reg_t; } ahb_dma_out_crc_arb_chn_reg_t;
typedef struct { typedef struct {
volatile ahb_dma_in_chn_reg_t in; volatile ahb_dma_in_chn_reg_t in;
@ -1388,8 +1388,8 @@ typedef struct {
uint32_t reserved_06c; uint32_t reserved_06c;
volatile ahb_dma_chn_reg_t channel[3]; volatile ahb_dma_chn_reg_t channel[3];
uint32_t reserved_2b0[3]; uint32_t reserved_2b0[3];
volatile ahb_dma_out_crc_chn_reg_t out_crc[3]; volatile ahb_dma_out_crc_arb_chn_reg_t out_crc_arb[3];
volatile ahb_dma_in_crc_chn_reg_t in_crc[3]; volatile ahb_dma_in_crc_arb_chn_reg_t in_crc_arb[3];
volatile ahb_dma_in_link_addr_chn_reg_t in_link_addr[3]; volatile ahb_dma_in_link_addr_chn_reg_t in_link_addr[3];
volatile ahb_dma_out_link_addr_chn_reg_t out_link_addr[3]; volatile ahb_dma_out_link_addr_chn_reg_t out_link_addr[3];
volatile ahb_dma_intr_mem_start_addr_reg_t intr_mem_start_addr; volatile ahb_dma_intr_mem_start_addr_reg_t intr_mem_start_addr;

View File

@ -36,7 +36,7 @@ typedef struct {
uint32_t link_num; uint32_t link_num;
} gdma_chx_reg_ctx_link_t; } gdma_chx_reg_ctx_link_t;
extern const gdma_chx_reg_ctx_link_t gdma_chx_regs_retention[SOC_GDMA_PAIRS_PER_GROUP_MAX][SOC_GDMA_PAIRS_PER_GROUP_MAX]; extern const gdma_chx_reg_ctx_link_t gdma_chx_regs_retention[SOC_GDMA_NUM_GROUPS_MAX][SOC_GDMA_PAIRS_PER_GROUP_MAX];
#endif #endif
#endif #endif