2020-09-08 08:17:18 -04:00
|
|
|
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
2020-09-09 22:37:58 -04:00
|
|
|
#include "soc/soc_caps.h"
|
2020-09-08 08:17:18 -04:00
|
|
|
#include "soc/gdma_struct.h"
|
|
|
|
#include "soc/gdma_reg.h"
|
|
|
|
|
2020-10-20 05:16:30 -04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2020-12-09 07:29:26 -05:00
|
|
|
#define GDMA_LL_GET_HW(id) (((id) == 0) ? (&GDMA) : NULL)
|
|
|
|
|
2021-04-27 06:52:42 -04:00
|
|
|
#define GDMA_LL_RX_EVENT_MASK (0x3FF)
|
|
|
|
#define GDMA_LL_TX_EVENT_MASK (0xFF)
|
|
|
|
|
|
|
|
#define GDMA_LL_EVENT_TX_L3_FIFO_UDF (1<<7)
|
|
|
|
#define GDMA_LL_EVENT_TX_L3_FIFO_OVF (1<<6)
|
|
|
|
#define GDMA_LL_EVENT_TX_L1_FIFO_UDF (1<<5)
|
|
|
|
#define GDMA_LL_EVENT_TX_L1_FIFO_OVF (1<<4)
|
|
|
|
#define GDMA_LL_EVENT_TX_TOTAL_EOF (1<<3)
|
|
|
|
#define GDMA_LL_EVENT_TX_DESC_ERROR (1<<2)
|
|
|
|
#define GDMA_LL_EVENT_TX_EOF (1<<1)
|
|
|
|
#define GDMA_LL_EVENT_TX_DONE (1<<0)
|
|
|
|
|
|
|
|
#define GDMA_LL_EVENT_RX_L3_FIFO_UDF (1<<9)
|
|
|
|
#define GDMA_LL_EVENT_RX_L3_FIFO_OVF (1<<8)
|
|
|
|
#define GDMA_LL_EVENT_RX_L1_FIFO_UDF (1<<7)
|
|
|
|
#define GDMA_LL_EVENT_RX_L1_FIFO_OVF (1<<6)
|
|
|
|
#define GDMA_LL_EVENT_RX_WATER_MARK (1<<5)
|
|
|
|
#define GDMA_LL_EVENT_RX_DESC_EMPTY (1<<4)
|
|
|
|
#define GDMA_LL_EVENT_RX_DESC_ERROR (1<<3)
|
2020-09-08 08:17:18 -04:00
|
|
|
#define GDMA_LL_EVENT_RX_ERR_EOF (1<<2)
|
|
|
|
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
|
|
|
|
#define GDMA_LL_EVENT_RX_DONE (1<<0)
|
|
|
|
|
|
|
|
///////////////////////////////////// Common /////////////////////////////////////////
|
|
|
|
/**
|
|
|
|
* @brief Enable DMA channel M2M mode (TX channel n forward data to RX channel n), disabled by default
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_enable_m2m_mode(gdma_dev_t *dev, uint32_t channel, bool enable)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].conf0.mem_trans_en = enable;
|
2020-10-20 05:16:30 -04:00
|
|
|
if (enable) {
|
|
|
|
// to enable m2m mode, the tx chan has to be the same to rx chan, and set to a valid value
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].peri_sel.sel = 0;
|
|
|
|
dev->out[channel].peri_sel.sel = 0;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-04-27 06:52:42 -04:00
|
|
|
* @brief Enable DMA clock gating
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
|
|
|
|
{
|
|
|
|
dev->misc_conf.clk_en = enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
///////////////////////////////////// RX /////////////////////////////////////////
|
|
|
|
/**
|
|
|
|
* @brief Get DMA RX channel interrupt status word
|
2020-09-08 08:17:18 -04:00
|
|
|
*/
|
2021-04-27 06:52:42 -04:00
|
|
|
static inline uint32_t gdma_ll_rx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
|
2020-09-08 08:17:18 -04:00
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].int_st.val;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-04-27 06:52:42 -04:00
|
|
|
* @brief Enable DMA RX channel interrupt
|
2020-09-08 08:17:18 -04:00
|
|
|
*/
|
2021-04-27 06:52:42 -04:00
|
|
|
static inline void gdma_ll_rx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
|
2020-09-08 08:17:18 -04:00
|
|
|
{
|
|
|
|
if (enable) {
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].int_ena.val |= mask;
|
2020-09-08 08:17:18 -04:00
|
|
|
} else {
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].int_ena.val &= ~mask;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-04-27 06:52:42 -04:00
|
|
|
* @brief Clear DMA RX channel interrupt
|
2020-09-08 08:17:18 -04:00
|
|
|
*/
|
2021-04-27 06:52:42 -04:00
|
|
|
static inline void gdma_ll_rx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
|
2020-09-08 08:17:18 -04:00
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].int_clr.val = mask;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-04-27 06:52:42 -04:00
|
|
|
* @brief Get DMA RX channel interrupt status register address
|
2020-09-08 08:17:18 -04:00
|
|
|
*/
|
2021-04-27 06:52:42 -04:00
|
|
|
static inline volatile void *gdma_ll_rx_get_interrupt_status_reg(gdma_dev_t *dev, uint32_t channel)
|
2020-09-08 08:17:18 -04:00
|
|
|
{
|
2021-04-27 06:52:42 -04:00
|
|
|
return (volatile void *)(&dev->in[channel].int_st);
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
2020-10-20 05:16:30 -04:00
|
|
|
/**
|
|
|
|
* @brief Enable DMA RX channel to check the owner bit in the descriptor, disabled by default
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_enable_owner_check(gdma_dev_t *dev, uint32_t channel, bool enable)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].conf1.in_check_owner = enable;
|
2020-10-20 05:16:30 -04:00
|
|
|
}
|
|
|
|
|
2020-09-08 08:17:18 -04:00
|
|
|
/**
|
|
|
|
* @brief Enable DMA RX channel burst reading data, disabled by default
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_enable_data_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].conf0.in_data_burst_en = enable;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Enable DMA RX channel burst reading descriptor link, disabled by default
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_enable_descriptor_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].conf0.indscr_burst_en = enable;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Reset DMA RX channel FSM and FIFO pointer
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_reset_channel(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].conf0.in_rst = 1;
|
|
|
|
dev->in[channel].conf0.in_rst = 0;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set DMA RX channel memory block size
|
|
|
|
* @param size_index Supported value: GDMA_IN_EXT_MEM_BK_SIZE_16B, GDMA_IN_EXT_MEM_BK_SIZE_32B
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_set_block_size_psram(gdma_dev_t *dev, uint32_t channel, uint32_t size_index)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].conf1.in_ext_mem_bk_size = size_index;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set the water mark for RX channel, default value is 12
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_set_water_mark(gdma_dev_t *dev, uint32_t channel, uint32_t water_mark)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].conf1.dma_infifo_full_thrs = water_mark;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Check if DMA RX FIFO is full
|
|
|
|
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
|
|
|
|
*/
|
|
|
|
static inline bool gdma_ll_rx_is_fifo_full(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].infifo_status.val & (1 << 2 * (fifo_level - 1));
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Check if DMA RX FIFO is empty
|
|
|
|
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
|
|
|
|
*/
|
|
|
|
static inline bool gdma_ll_rx_is_fifo_empty(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].infifo_status.val & (1 << (2 * (fifo_level - 1) + 1));
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get number of bytes in RX FIFO (L1, L2, L3)
|
|
|
|
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
|
|
|
|
*/
|
|
|
|
static inline uint32_t gdma_ll_rx_get_fifo_bytes(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
|
|
|
|
{
|
|
|
|
switch (fifo_level) {
|
|
|
|
case 1:
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].infifo_status.infifo_cnt_l1;
|
2020-09-08 08:17:18 -04:00
|
|
|
case 2:
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].infifo_status.infifo_cnt_l2;
|
2020-09-08 08:17:18 -04:00
|
|
|
case 3:
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].infifo_status.infifo_cnt_l3;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Pop data from DMA RX FIFO
|
|
|
|
*/
|
|
|
|
static inline uint32_t gdma_ll_rx_pop_data(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].pop.infifo_pop = 1;
|
|
|
|
return dev->in[channel].pop.infifo_rdata;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set the descriptor link base address for RX channel
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_set_desc_addr(gdma_dev_t *dev, uint32_t channel, uint32_t addr)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].link.addr = addr;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Start dealing with RX descriptors
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_start(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].link.start = 1;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Stop dealing with RX descriptors
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_stop(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].link.stop = 1;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Restart a new inlink right after the last descriptor
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_restart(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].link.restart = 1;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Enable DMA RX to return the address of current descriptor when receives error
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_enable_auto_return(gdma_dev_t *dev, uint32_t channel, bool enable)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].link.auto_ret = enable;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Check if DMA RX FSM is in IDLE state
|
|
|
|
*/
|
|
|
|
static inline bool gdma_ll_rx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].link.park;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get RX success EOF descriptor's address
|
|
|
|
*/
|
|
|
|
static inline uint32_t gdma_ll_rx_get_success_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].suc_eof_des_addr;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get RX error EOF descriptor's address
|
|
|
|
*/
|
|
|
|
static inline uint32_t gdma_ll_rx_get_error_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].err_eof_des_addr;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get current RX descriptor's address
|
|
|
|
*/
|
|
|
|
static inline uint32_t gdma_ll_rx_get_current_desc_addr(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->in[channel].dscr;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set weight for DMA RX channel
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_set_weight(gdma_dev_t *dev, uint32_t channel, uint32_t weight)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].wight.rx_weight = weight;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set priority for DMA RX channel
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_set_priority(gdma_dev_t *dev, uint32_t channel, uint32_t prio)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].pri.rx_pri = prio;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Connect DMA RX channel to a given peripheral
|
|
|
|
*/
|
2021-04-27 06:52:42 -04:00
|
|
|
static inline void gdma_ll_rx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, int periph_id)
|
2020-09-08 08:17:18 -04:00
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].peri_sel.sel = periph_id;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Extend the L2 FIFO size for RX channel
|
|
|
|
* @note By default, the L2 FIFO size is SOC_GDMA_L2_FIFO_BASE_SIZE Bytes. Suggest to extend it to twice the block size when accessing PSRAM.
|
|
|
|
* @note `size_in_bytes` should aligned to 8 and larger than SOC_GDMA_L2_FIFO_BASE_SIZE
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_rx_extend_l2_fifo_size_to(gdma_dev_t *dev, uint32_t channel, uint32_t size_in_bytes)
|
|
|
|
{
|
|
|
|
if (size_in_bytes > SOC_GDMA_L2_FIFO_BASE_SIZE) {
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->in[channel].sram_size.in_size = (size_in_bytes - SOC_GDMA_L2_FIFO_BASE_SIZE) / 8;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
///////////////////////////////////// TX /////////////////////////////////////////
|
2021-04-27 06:52:42 -04:00
|
|
|
/**
|
|
|
|
* @brief Get DMA TX channel interrupt status word
|
|
|
|
*/
|
|
|
|
static inline uint32_t gdma_ll_tx_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
|
|
|
return dev->out[channel].int_st.val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Enable DMA TX channel interrupt
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
|
|
|
|
{
|
|
|
|
if (enable) {
|
|
|
|
dev->out[channel].int_ena.val |= mask;
|
|
|
|
} else {
|
|
|
|
dev->out[channel].int_ena.val &= ~mask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Clear DMA TX channel interrupt
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
|
|
|
|
{
|
|
|
|
dev->out[channel].int_clr.val = mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get DMA TX channel interrupt status register address
|
|
|
|
*/
|
|
|
|
static inline volatile void *gdma_ll_tx_get_interrupt_status_reg(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
|
|
|
return (volatile void *)(&dev->out[channel].int_st);
|
|
|
|
}
|
|
|
|
|
2020-10-20 05:16:30 -04:00
|
|
|
/**
|
|
|
|
* @brief Enable DMA TX channel to check the owner bit in the descriptor, disabled by default
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_enable_owner_check(gdma_dev_t *dev, uint32_t channel, bool enable)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].conf1.out_check_owner = enable;
|
2020-10-20 05:16:30 -04:00
|
|
|
}
|
|
|
|
|
2020-09-08 08:17:18 -04:00
|
|
|
/**
|
|
|
|
* @brief Enable DMA TX channel burst sending data, disabled by default
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_enable_data_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].conf0.out_data_burst_en = enable;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Enable DMA TX channel burst reading descriptor link, disabled by default
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_enable_descriptor_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].conf0.outdscr_burst_en = enable;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set TX channel EOF mode
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_set_eof_mode(gdma_dev_t *dev, uint32_t channel, uint32_t mode)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].conf0.out_eof_mode = mode;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Enable DMA TX channel automatic write results back to descriptor after all data has been sent out, disabled by default
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_enable_auto_write_back(gdma_dev_t *dev, uint32_t channel, bool enable)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].conf0.out_auto_wrback = enable;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Reset DMA TX channel FSM and FIFO pointer
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_reset_channel(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].conf0.out_rst = 1;
|
|
|
|
dev->out[channel].conf0.out_rst = 0;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set DMA TX channel memory block size
|
|
|
|
* @param size_index Supported value: GDMA_OUT_EXT_MEM_BK_SIZE_16B, GDMA_OUT_EXT_MEM_BK_SIZE_32B
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_set_block_size_psram(gdma_dev_t *dev, uint32_t channel, uint32_t size_index)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].conf1.out_ext_mem_bk_size = size_index;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Check if DMA TX FIFO is full
|
|
|
|
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
|
|
|
|
*/
|
|
|
|
static inline bool gdma_ll_tx_is_fifo_full(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->out[channel].outfifo_status.val & (1 << 2 * (fifo_level - 1));
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Check if DMA TX FIFO is empty
|
|
|
|
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
|
|
|
|
*/
|
|
|
|
static inline bool gdma_ll_tx_is_fifo_empty(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->out[channel].outfifo_status.val & (1 << (2 * (fifo_level - 1) + 1));
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get number of bytes in TX FIFO (L1, L2, L3)
|
|
|
|
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
|
|
|
|
*/
|
|
|
|
static inline uint32_t gdma_ll_tx_get_fifo_bytes(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
|
|
|
|
{
|
|
|
|
switch (fifo_level) {
|
|
|
|
case 1:
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->out[channel].outfifo_status.outfifo_cnt_l1;
|
2020-09-08 08:17:18 -04:00
|
|
|
case 2:
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->out[channel].outfifo_status.outfifo_cnt_l2;
|
2020-09-08 08:17:18 -04:00
|
|
|
case 3:
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->out[channel].outfifo_status.outfifo_cnt_l3;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Push data into DMA TX FIFO
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_push_data(gdma_dev_t *dev, uint32_t channel, uint32_t data)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].push.outfifo_wdata = data;
|
|
|
|
dev->out[channel].push.outfifo_push = 1;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set the descriptor link base address for TX channel
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_set_desc_addr(gdma_dev_t *dev, uint32_t channel, uint32_t addr)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].link.addr = addr;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Start dealing with TX descriptors
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_start(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].link.start = 1;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Stop dealing with TX descriptors
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_stop(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].link.stop = 1;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Restart a new outlink right after the last descriptor
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_restart(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].link.restart = 1;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Check if DMA TX FSM is in IDLE state
|
|
|
|
*/
|
|
|
|
static inline bool gdma_ll_tx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->out[channel].link.park;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get TX EOF descriptor's address
|
|
|
|
*/
|
|
|
|
static inline uint32_t gdma_ll_tx_get_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->out[channel].eof_des_addr;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get current TX descriptor's address
|
|
|
|
*/
|
|
|
|
static inline uint32_t gdma_ll_tx_get_current_desc_addr(gdma_dev_t *dev, uint32_t channel)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
return dev->out[channel].dscr;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set weight for DMA TX channel
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_set_weight(gdma_dev_t *dev, uint32_t channel, uint32_t weight)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].wight.tx_weight = weight;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set priority for DMA TX channel
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_set_priority(gdma_dev_t *dev, uint32_t channel, uint32_t prio)
|
|
|
|
{
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].pri.tx_pri = prio;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Connect DMA TX channel to a given peripheral
|
|
|
|
*/
|
2021-04-27 06:52:42 -04:00
|
|
|
static inline void gdma_ll_tx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, int periph_id)
|
2020-09-08 08:17:18 -04:00
|
|
|
{
|
2021-04-27 06:52:42 -04:00
|
|
|
dev->out[channel].peri_sel.sel = periph_id;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Extend the L2 FIFO size for TX channel
|
|
|
|
* @note By default, the L2 FIFO size is SOC_GDMA_L2_FIFO_BASE_SIZE Bytes. Suggest to extend it to twice the block size when accessing PSRAM.
|
|
|
|
* @note `size_in_bytes` should aligned to 8 and larger than SOC_GDMA_L2_FIFO_BASE_SIZE
|
|
|
|
*/
|
|
|
|
static inline void gdma_ll_tx_extend_fifo_size_to(gdma_dev_t *dev, uint32_t channel, uint32_t size_in_bytes)
|
|
|
|
{
|
|
|
|
if (size_in_bytes > SOC_GDMA_L2_FIFO_BASE_SIZE) {
|
2021-03-17 06:48:05 -04:00
|
|
|
dev->out[channel].sram_size.out_size = (size_in_bytes - SOC_GDMA_L2_FIFO_BASE_SIZE) / 8;
|
2020-09-08 08:17:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|