feat(esp_hw_support/dma2d): Add 2D-DMA support on ESP32P4

This commit is contained in:
Song Ruo Jing 2023-08-21 17:04:55 +08:00
parent a4268b88c5
commit 08eada7301
42 changed files with 4333 additions and 254 deletions

View File

@ -97,6 +97,10 @@ if(NOT BOOTLOADER_BUILD)
list(APPEND srcs "dma/dw_gdma.c") list(APPEND srcs "dma/dw_gdma.c")
endif() endif()
if(CONFIG_SOC_DMA2D_SUPPORTED)
list(APPEND srcs "dma/dma2d.c")
endif()
if(CONFIG_SOC_SYSTIMER_SUPPORTED) if(CONFIG_SOC_SYSTIMER_SUPPORTED)
list(APPEND srcs "port/${target}/systimer.c") list(APPEND srcs "port/${target}/systimer.c")
endif() endif()

View File

@ -348,7 +348,6 @@ menu "Hardware Settings"
clock support isn't done yet. So with this option, clock support isn't done yet. So with this option,
we use xtal on FPGA as the clock source. we use xtal on FPGA as the clock source.
# Invisible bringup bypass options for esp_hw_support component
config ESP_BRINGUP_BYPASS_RANDOM_SETTING config ESP_BRINGUP_BYPASS_RANDOM_SETTING
bool bool
default y if !SOC_RNG_SUPPORTED default y if !SOC_RNG_SUPPORTED

View File

@ -55,3 +55,23 @@ menu "DW_GDMA Configurations"
Wether to enable the debug log message for DW_GDMA driver. Wether to enable the debug log message for DW_GDMA driver.
Note that, this option only controls the DW_GDMA driver log, won't affect other drivers. Note that, this option only controls the DW_GDMA driver log, won't affect other drivers.
endmenu # DW_GDMA Configurations endmenu # DW_GDMA Configurations
menu "2D-DMA Configurations"
depends on SOC_DMA2D_SUPPORTED
config DMA2D_OPERATION_FUNC_IN_IRAM
bool "Place 2D-DMA operation functions into IRAM"
default n
help
Place 2D-DMA all operation functions, including control functions (e.g. start/stop/append/reset) and setter
functions (e.g. connect/strategy/callback registration) into IRAM, so that these functions can be IRAM-safe
and able to be called in the other IRAM interrupt context. It also helps optimizing the performance.
config DMA2D_ISR_IRAM_SAFE
bool "2D-DMA ISR IRAM-Safe"
default n
help
This will ensure the 2D-DMA interrupt handler is IRAM-Safe, allow to avoid flash
cache misses, and also be able to run whilst the cache is disabled.
(e.g. SPI Flash write).
endmenu # 2D-DMA Configurations

View File

@ -0,0 +1,979 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <sys/queue.h>
#include <sys/lock.h>
#include "esp_check.h"
#include "freertos/portmacro.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "esp_intr_alloc.h"
#include "esp_heap_caps.h"
#include "esp_memory_utils.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_private/periph_ctrl.h"
#include "dma2d_priv.h"
#include "esp_private/dma2d.h"
#include "hal/dma2d_hal.h"
#include "hal/dma2d_ll.h"
#include "soc/dma2d_channel.h"
#include "soc/dma2d_periph.h"
#include "soc/soc_caps.h"
#include "esp_bit_defs.h"
/**
* The 2D-DMA driver is designed with a pool & client model + queue design pattern.
*
* Pools represents the groups of 2D-DMA module, which contain the limited resource, channels.
* Clients represents the upper modules which are the consumers of the 2D-DMA channels, such as JPEG and PPA.
*
* Each pool has a queue to store the 2D-DMA transactions that are waiting to be processed.
*
* The upper modules should register themselves as the clients to a 2D-DMA pool. And then they should push the
* 2D-DMA transactions into the pool queue. The driver will continuously look for the desired resources from the pool to
* complete the transactions.
*/
static const char *TAG = "dma2d";
typedef struct dma2d_platform_t {
_lock_t mutex; // platform level mutex lock to protect the dma2d_acquire_pool/dma2d_release_pool process
dma2d_group_t *groups[SOC_DMA2D_GROUPS]; // array of 2D-DMA group instances
int group_ref_counts[SOC_DMA2D_GROUPS]; // reference count used to protect group install/uninstall
} dma2d_platform_t;
// 2D-DMA driver platform
static dma2d_platform_t s_platform = {
.groups = {},
};
// extern 2D-DMA channel reserved mask variables to be ORed in the constructors
uint32_t dma2d_tx_channel_reserved_mask[SOC_DMA2D_GROUPS] = { [0 ... SOC_DMA2D_GROUPS - 1] = 0 };
uint32_t dma2d_rx_channel_reserved_mask[SOC_DMA2D_GROUPS] = { [0 ... SOC_DMA2D_GROUPS - 1] = 0 };
// The most number of channels required for a 2D-DMA transaction (a PPA Blend operation requires 2 TX + 1 RX)
#define DMA2D_MAX_CHANNEL_NUM_PER_TRANSACTION 3
/* This static function is not thread-safe, group's spinlock protection should be added in its caller */
static bool acquire_free_channels_for_trans(dma2d_group_t *dma2d_group, const dma2d_trans_config_t *trans_desc, dma2d_trans_channel_info_t *channel_handle_array)
{
bool found = true;
uint32_t idx = 0;
uint32_t bundled_tx_channel_mask = 0;
if (trans_desc->tx_channel_num > 0) {
uint32_t tx_free_channel_mask;
if (!trans_desc->specified_tx_channel_mask) {
tx_free_channel_mask = dma2d_group->tx_channel_free_mask;
tx_free_channel_mask &= (((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_TX_REORDER) ? DMA2D_LL_TX_CHANNEL_SUPPORT_RO_MASK : UINT32_MAX) &
((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_TX_CSC) ? DMA2D_LL_TX_CHANNEL_SUPPORT_CSC_MASK : UINT32_MAX));
tx_free_channel_mask &= ~dma2d_group->tx_channel_reserved_mask;
if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_SIBLING) {
uint32_t rx_channel_candidate = dma2d_group->rx_channel_free_mask &
((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_RX_REORDER) ? DMA2D_LL_RX_CHANNEL_SUPPORT_RO_MASK : UINT32_MAX) &
((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_RX_CSC) ? DMA2D_LL_RX_CHANNEL_SUPPORT_CSC_MASK : UINT32_MAX) &
~dma2d_group->rx_channel_reserved_mask;
tx_free_channel_mask &= rx_channel_candidate;
}
// As long as __builtin_popcount(tx_free_channel_mask) >= trans_desc->tx_channel_num, it can meet the criteria of "found"
} else {
tx_free_channel_mask = trans_desc->specified_tx_channel_mask & dma2d_group->tx_channel_free_mask;
// tx_free_channel_mask need to be exactly equal to trans_desc->specified_tx_channel_mask to meet the criteria of "found"
}
for (int i = 0; i < trans_desc->tx_channel_num; i++) {
if (tx_free_channel_mask) {
int channel_id = 31 - __builtin_clz(tx_free_channel_mask); // channel 0 has the most features, acquire other channels first if possible
tx_free_channel_mask &= ~(1 << channel_id);
dma2d_group->tx_channel_free_mask &= ~(1 << channel_id);
bundled_tx_channel_mask |= (1 << channel_id);
// Record channel status
memset(&dma2d_group->tx_chans[channel_id]->base.status, 0, sizeof(dma2d_group->tx_chans[channel_id]->base.status));
dma2d_group->tx_chans[channel_id]->base.status.periph_sel_id = -1;
if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_TX_REORDER) {
dma2d_group->tx_chans[channel_id]->base.status.reorder_en = true;
} else if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_SIBLING) {
dma2d_group->tx_chans[channel_id]->base.status.has_sibling = true;
}
channel_handle_array[idx].chan = &dma2d_group->tx_chans[channel_id]->base;
channel_handle_array[idx].dir = DMA2D_CHANNEL_DIRECTION_TX;
idx++;
} else {
found = false;
goto revert;
}
}
}
if (trans_desc->rx_channel_num > 0) {
uint32_t rx_free_channel_mask;
if (trans_desc->specified_rx_channel_mask) {
rx_free_channel_mask = trans_desc->specified_rx_channel_mask & dma2d_group->rx_channel_free_mask;
// rx_free_channel_mask need to be exactly equal to trans_desc->specified_rx_channel_mask to meet the criteria of "found"
} else if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_SIBLING) {
// rx channel has already been determined
rx_free_channel_mask = bundled_tx_channel_mask;
} else {
rx_free_channel_mask = dma2d_group->rx_channel_free_mask;
rx_free_channel_mask &= (((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_RX_REORDER) ? DMA2D_LL_RX_CHANNEL_SUPPORT_RO_MASK : UINT32_MAX) &
((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_RX_CSC) ? DMA2D_LL_RX_CHANNEL_SUPPORT_CSC_MASK : UINT32_MAX));
rx_free_channel_mask &= ~dma2d_group->rx_channel_reserved_mask;
// As long as __builtin_popcount(rx_free_channel_mask) >= trans_desc->rx_channel_num, it can meet the criteria of "found"
}
// Requires one RX channel at most, no need a for loop
if (rx_free_channel_mask) {
int channel_id = 31 - __builtin_clz(rx_free_channel_mask); // channel 0 has full features, acquire other channels first if possible
rx_free_channel_mask &= ~(1 << channel_id);
dma2d_group->rx_channel_free_mask &= ~(1 << channel_id);
// Record channel status
memset(&dma2d_group->rx_chans[channel_id]->base.status, 0, sizeof(dma2d_group->rx_chans[channel_id]->base.status));
dma2d_group->rx_chans[channel_id]->base.status.periph_sel_id = -1;
if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_RX_REORDER) {
dma2d_group->rx_chans[channel_id]->base.status.reorder_en = true;
} else if (trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_SIBLING) {
dma2d_group->rx_chans[channel_id]->base.status.has_sibling = true;
}
channel_handle_array[idx].chan = &dma2d_group->rx_chans[channel_id]->base;
channel_handle_array[idx].dir = DMA2D_CHANNEL_DIRECTION_RX;
idx++;
// Record its bundled TX channels, to be freed in the isr
dma2d_rx_channel_t *rx_chan = dma2d_group->rx_chans[channel_id];
portENTER_CRITICAL_SAFE(&rx_chan->base.spinlock);
rx_chan->bundled_tx_channel_mask = bundled_tx_channel_mask;
portEXIT_CRITICAL_SAFE(&rx_chan->base.spinlock);
} else {
found = false;
goto revert;
}
}
revert:
if (!found) {
for (int i = 0; i < idx; i++) {
int free_channel_mask = (1 << channel_handle_array[i].chan->channel_id);
if (channel_handle_array[i].dir == DMA2D_CHANNEL_DIRECTION_TX) {
dma2d_group->tx_channel_free_mask |= free_channel_mask;
} else {
dma2d_group->rx_channel_free_mask |= free_channel_mask;
}
}
}
return found;
}
/* This function will free up the RX channel and its bundled TX channels, then check for whether there is next transaction to be picked up */
static bool free_up_channels(dma2d_group_t *group, dma2d_rx_channel_t *rx_chan)
{
bool need_yield = false;
uint32_t channel_id = rx_chan->base.channel_id;
// 1. Clean up channels
uint32_t bundled_tx_channel_mask = rx_chan->bundled_tx_channel_mask;
// Disable RX channel interrupt
portENTER_CRITICAL_SAFE(&rx_chan->base.spinlock);
dma2d_ll_rx_enable_interrupt(group->hal.dev, channel_id, UINT32_MAX, false);
// Reset RX channel event related pointers and flags
rx_chan->on_recv_eof = NULL;
rx_chan->on_desc_done = NULL;
// Disconnect RX channel from the peripheral
dma2d_ll_rx_disconnect_from_periph(group->hal.dev, channel_id);
// Clear the pointer that points to the finished transaction
rx_chan->base.status.transaction = NULL;
portEXIT_CRITICAL_SAFE(&rx_chan->base.spinlock);
// For every bundled TX channels:
while (rx_chan->bundled_tx_channel_mask) {
uint32_t nbit = __builtin_ffs(rx_chan->bundled_tx_channel_mask) - 1;
rx_chan->bundled_tx_channel_mask &= ~(1 << nbit);
dma2d_tx_channel_t *tx_chan = group->tx_chans[nbit];
// Disable TX channel interrupt
portENTER_CRITICAL_SAFE(&tx_chan->base.spinlock);
dma2d_ll_tx_enable_interrupt(group->hal.dev, nbit, UINT32_MAX, false);
// Reset TX channel event related pointers
tx_chan->on_desc_done = NULL;
// Disconnect TX channel from the peripheral
dma2d_ll_tx_disconnect_from_periph(group->hal.dev, nbit);
// Clear the pointer that points to the finished transaction
tx_chan->base.status.transaction = NULL;
portEXIT_CRITICAL_SAFE(&tx_chan->base.spinlock);
}
// Channel functionality flags will be reset and assigned new values inside `acquire_free_channels_for_trans`
// Channel reset will always be done at `dma2d_connect` (i.e. when the channel is selected for a new transaction)
// 2. Check if next pending transaction in the tailq can start
bool channels_found = false;
const dma2d_trans_config_t *next_trans = NULL;
dma2d_trans_channel_info_t channel_handle_array[DMA2D_MAX_CHANNEL_NUM_PER_TRANSACTION];
portENTER_CRITICAL_SAFE(&group->spinlock);
group->tx_channel_free_mask |= bundled_tx_channel_mask;
group->rx_channel_free_mask |= (1 << channel_id);
int rx_periph_sel_id = group->rx_chans[channel_id]->base.status.periph_sel_id;
if (rx_periph_sel_id != -1 && ((1 << rx_periph_sel_id) & DMA2D_LL_CHANNEL_PERIPH_M2M_FREE_ID_MASK)) {
group->periph_m2m_free_id_mask |= (1 << rx_periph_sel_id); // release m2m periph_sel_id
}
dma2d_trans_t *next_trans_elm = TAILQ_FIRST(&group->pending_trans_tailq);
if (next_trans_elm) {
// There is a pending transaction
next_trans = next_trans_elm->desc;
channels_found = acquire_free_channels_for_trans(group, next_trans, channel_handle_array);
}
if (channels_found) {
TAILQ_REMOVE(&group->pending_trans_tailq, next_trans_elm, entry);
}
portEXIT_CRITICAL_SAFE(&group->spinlock);
if (channels_found) {
// If the transaction can be processed, let consumer handle the transaction
uint32_t total_channel_num = next_trans->tx_channel_num + next_trans->rx_channel_num;
// Store the acquired rx_chan into trans_elm (dma2d_trans_t) in case upper driver later need it to call `dma2d_force_end`
// Upper driver controls the life cycle of trans_elm
for (int i = 0; i < total_channel_num; i++) {
if (channel_handle_array[i].dir == DMA2D_CHANNEL_DIRECTION_RX) {
next_trans_elm->rx_chan = channel_handle_array[i].chan;
}
// Also save the transaction pointer
channel_handle_array[i].chan->status.transaction = next_trans_elm;
}
need_yield |= next_trans->on_job_picked(total_channel_num, channel_handle_array, next_trans->user_config);
}
return need_yield;
}
static NOINLINE_ATTR bool _dma2d_default_tx_isr(dma2d_group_t *group, int channel_id)
{
bool need_yield = false;
dma2d_tx_channel_t *tx_chan = group->tx_chans[channel_id];
dma2d_event_data_t edata = {
.transaction = tx_chan->base.status.transaction,
};
// Clear pending interrupt event
uint32_t intr_status = dma2d_ll_tx_get_interrupt_status(group->hal.dev, channel_id);
dma2d_ll_tx_clear_interrupt_status(group->hal.dev, channel_id, intr_status);
// Handle callback
if (intr_status & DMA2D_LL_EVENT_TX_DONE) {
if (tx_chan->on_desc_done) {
need_yield |= tx_chan->on_desc_done(&tx_chan->base, &edata, tx_chan->user_data);
}
}
return need_yield;
}
static NOINLINE_ATTR bool _dma2d_default_rx_isr(dma2d_group_t *group, int channel_id)
{
bool need_yield = false;
dma2d_rx_channel_t *rx_chan = group->rx_chans[channel_id];
dma2d_event_data_t edata = {
.transaction = rx_chan->base.status.transaction,
};
// Clear pending interrupt event
uint32_t intr_status = dma2d_ll_rx_get_interrupt_status(group->hal.dev, channel_id);
dma2d_ll_rx_clear_interrupt_status(group->hal.dev, channel_id, intr_status);
// Save RX channel EOF callback pointers temporarily, could be overwritten by new ones
dma2d_event_callback_t on_recv_eof = rx_chan->on_recv_eof;
void *user_data = rx_chan->user_data;
uint32_t suc_eof_desc_addr = dma2d_ll_rx_get_success_eof_desc_addr(group->hal.dev, channel_id);
// It is guaranteed in hardware that if SUC_EOF/ERR_EOF interrupt is raised, it will always be raised together with
// RX_DONE interrupt at the same time.
// On RX_DONE triggered, it may be an indication of partially done, call `on_desc_done` callback, allowing 2D-DMA
// channel operations on the currently acquired channels. Channel may continue running again.
if (intr_status & DMA2D_LL_EVENT_RX_DONE) {
if (rx_chan->on_desc_done) {
need_yield |= rx_chan->on_desc_done(&rx_chan->base, &edata, user_data);
}
}
// If last transcation completes (regardless success or not), free the channels
if ((intr_status & DMA2D_LL_EVENT_RX_SUC_EOF) ||
(intr_status & DMA2D_LL_EVENT_RX_ERR_EOF) ||
(intr_status & DMA2D_LL_EVENT_RX_DESC_ERROR)) {
assert(dma2d_ll_rx_is_fsm_idle(group->hal.dev, channel_id));
need_yield |= free_up_channels(group, rx_chan);
}
// Handle last transaction's end callbacks (at this point, last transaction's channels are completely freed,
// therefore, we don't pass in channel handle to the callbacks anymore)
if (intr_status & DMA2D_LL_EVENT_RX_SUC_EOF) {
if (on_recv_eof) {
edata.rx_eof_desc_addr = suc_eof_desc_addr;
need_yield |= on_recv_eof(NULL, &edata, user_data);
}
}
return need_yield;
}
static void dma2d_default_isr(void *args)
{
dma2d_channel_t *chan = (dma2d_channel_t *)args;
dma2d_group_t *group = chan->group;
bool need_yield = false;
if (chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
need_yield |= _dma2d_default_tx_isr(group, chan->channel_id);
} else {
// For RX channel interrupt triggered, we need to check whether there is any interrupt triggered for the
// bundled TX channels but hasn't been processed. If so, handle TX interrupt first.
uint32_t bundled_tx_channel_mask = group->rx_chans[chan->channel_id]->bundled_tx_channel_mask;
while (bundled_tx_channel_mask) {
uint32_t chan_id = __builtin_ffs(bundled_tx_channel_mask) - 1;
bundled_tx_channel_mask &= ~(1 << chan_id);
need_yield |= _dma2d_default_tx_isr(group, chan_id);
}
need_yield |= _dma2d_default_rx_isr(group, chan->channel_id);
}
if (need_yield) {
portYIELD_FROM_ISR();
}
}
esp_err_t dma2d_acquire_pool(const dma2d_pool_config_t *config, dma2d_pool_handle_t *ret_pool)
{
esp_err_t ret = ESP_OK;
ESP_RETURN_ON_FALSE(config && ret_pool, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(config->pool_id < SOC_DMA2D_GROUPS, ESP_ERR_INVALID_ARG, TAG, "invalid pool_id");
if (config->intr_priority) {
ESP_RETURN_ON_FALSE(1 << (config->intr_priority) & ESP_INTR_FLAG_LOWMED, ESP_ERR_INVALID_ARG, TAG,
"invalid interrupt priority: %" PRIu32, config->intr_priority);
}
int group_id = config->pool_id; // A pool is referring to a module group in hardware
_lock_acquire(&s_platform.mutex);
if (!s_platform.groups[group_id]) {
dma2d_group_t *pre_alloc_group = heap_caps_calloc(1, sizeof(dma2d_group_t), DMA2D_MEM_ALLOC_CAPS);
dma2d_tx_channel_t *pre_alloc_tx_channels = heap_caps_calloc(SOC_DMA2D_TX_CHANNELS_PER_GROUP, sizeof(dma2d_tx_channel_t), DMA2D_MEM_ALLOC_CAPS);
dma2d_rx_channel_t *pre_alloc_rx_channels = heap_caps_calloc(SOC_DMA2D_RX_CHANNELS_PER_GROUP, sizeof(dma2d_rx_channel_t), DMA2D_MEM_ALLOC_CAPS);
if (pre_alloc_group && pre_alloc_tx_channels && pre_alloc_rx_channels) {
pre_alloc_group->group_id = group_id;
pre_alloc_group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
TAILQ_INIT(&pre_alloc_group->pending_trans_tailq);
pre_alloc_group->tx_channel_free_mask = (1 << SOC_DMA2D_TX_CHANNELS_PER_GROUP) - 1;
pre_alloc_group->rx_channel_free_mask = (1 << SOC_DMA2D_RX_CHANNELS_PER_GROUP) - 1;
pre_alloc_group->tx_channel_reserved_mask = dma2d_tx_channel_reserved_mask[group_id];
pre_alloc_group->rx_channel_reserved_mask = dma2d_rx_channel_reserved_mask[group_id];
pre_alloc_group->periph_m2m_free_id_mask = DMA2D_LL_CHANNEL_PERIPH_M2M_FREE_ID_MASK;
pre_alloc_group->intr_priority = -1;
for (int i = 0; i < SOC_DMA2D_TX_CHANNELS_PER_GROUP; i++) {
pre_alloc_group->tx_chans[i] = &pre_alloc_tx_channels[i];
dma2d_tx_channel_t *tx_chan = pre_alloc_group->tx_chans[i];
tx_chan->base.group = pre_alloc_group;
tx_chan->base.channel_id = i;
tx_chan->base.direction = DMA2D_CHANNEL_DIRECTION_TX;
tx_chan->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
}
for (int i = 0; i < SOC_DMA2D_RX_CHANNELS_PER_GROUP; i++) {
pre_alloc_group->rx_chans[i] = &pre_alloc_rx_channels[i];
dma2d_rx_channel_t *rx_chan = pre_alloc_group->rx_chans[i];
rx_chan->base.group = pre_alloc_group;
rx_chan->base.channel_id = i;
rx_chan->base.direction = DMA2D_CHANNEL_DIRECTION_RX;
rx_chan->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
}
s_platform.groups[group_id] = pre_alloc_group; // register to platform
// Enable bus clock for the 2D-DMA registers
PERIPH_RCC_ATOMIC() {
dma2d_ll_enable_bus_clock(group_id, true);
dma2d_ll_reset_register(group_id);
}
dma2d_hal_init(&pre_alloc_group->hal, group_id); // initialize HAL context
// Enable 2D-DMA module clock
dma2d_ll_hw_enable(s_platform.groups[group_id]->hal.dev, true);
// Configure 2D-DMA accessible memory range
dma2d_ll_set_accessible_mem_range(s_platform.groups[group_id]->hal.dev);
} else {
ret = ESP_ERR_NO_MEM;
free(pre_alloc_tx_channels);
free(pre_alloc_rx_channels);
free(pre_alloc_group);
}
}
// Tracks the number of consumers of 2D-DMA module (clients of the pool)
if (s_platform.groups[group_id]) {
s_platform.group_ref_counts[group_id]++;
}
// Allocate interrupts
// First figure out the interrupt priority
bool intr_priority_conflict = false;
if (s_platform.groups[group_id]->intr_priority == -1) {
s_platform.groups[group_id]->intr_priority = config->intr_priority;
} else if (config->intr_priority != 0) {
intr_priority_conflict = (s_platform.groups[group_id]->intr_priority != config->intr_priority);
}
ESP_GOTO_ON_FALSE(!intr_priority_conflict, ESP_ERR_INVALID_ARG, wrap_up, TAG, "intr_priority conflict, already is %d but attempt to %" PRIu32, s_platform.groups[group_id]->intr_priority, config->intr_priority);
uint32_t intr_flags = DMA2D_INTR_ALLOC_FLAGS;
if (s_platform.groups[group_id]->intr_priority) {
intr_flags |= (1 << s_platform.groups[group_id]->intr_priority);
} else {
intr_flags |= ESP_INTR_FLAG_LOWMED;
}
// Allocate TX and RX interrupts
if (s_platform.groups[group_id]) {
for (int i = 0; i < SOC_DMA2D_RX_CHANNELS_PER_GROUP; i++) {
dma2d_rx_channel_t *rx_chan = s_platform.groups[group_id]->rx_chans[i];
if (rx_chan->base.intr == NULL) {
ret = esp_intr_alloc_intrstatus(dma2d_periph_signals.groups[group_id].rx_irq_id[i],
intr_flags,
(uint32_t)dma2d_ll_rx_get_interrupt_status_reg(s_platform.groups[group_id]->hal.dev, i),
DMA2D_LL_RX_EVENT_MASK, dma2d_default_isr, &rx_chan->base, &rx_chan->base.intr);
if (ret != ESP_OK) {
ret = ESP_FAIL;
ESP_LOGE(TAG, "alloc interrupt failed on rx channel (%d, %d)", group_id, i);
goto wrap_up;
}
}
}
for (int i = 0; i < SOC_DMA2D_TX_CHANNELS_PER_GROUP; i++) {
dma2d_tx_channel_t *tx_chan = s_platform.groups[group_id]->tx_chans[i];
if (tx_chan->base.intr == NULL) {
ret = esp_intr_alloc_intrstatus(dma2d_periph_signals.groups[group_id].tx_irq_id[i],
intr_flags,
(uint32_t)dma2d_ll_tx_get_interrupt_status_reg(s_platform.groups[group_id]->hal.dev, i),
DMA2D_LL_TX_EVENT_MASK, dma2d_default_isr, &tx_chan->base, &tx_chan->base.intr);
if (ret != ESP_OK) {
ret = ESP_FAIL;
ESP_LOGE(TAG, "alloc interrupt failed on tx channel (%d, %d)", group_id, i);
goto wrap_up;
}
}
}
}
wrap_up:
_lock_release(&s_platform.mutex);
if (ret != ESP_OK && s_platform.groups[group_id]) {
dma2d_release_pool(s_platform.groups[group_id]);
}
*ret_pool = s_platform.groups[group_id];
return ret;
}
esp_err_t dma2d_release_pool(dma2d_pool_handle_t dma2d_pool)
{
esp_err_t ret = ESP_OK;
ESP_RETURN_ON_FALSE(dma2d_pool, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dma2d_group_t *dma2d_group = dma2d_pool;
bool do_deinitialize = false;
int group_id = dma2d_group->group_id;
_lock_acquire(&s_platform.mutex);
// Remove a client from the 2D-DMA pool
s_platform.group_ref_counts[group_id]--;
// If the pool has no client, then release pool resources
if (s_platform.group_ref_counts[group_id] == 0) {
assert(s_platform.groups[group_id]);
do_deinitialize = true;
// There must be no transaction pending (this should be handled by upper (consumer) driver)
// Transaction tailq should be empty at this moment
if (!TAILQ_EMPTY(&dma2d_group->pending_trans_tailq)) {
ret = ESP_ERR_NOT_ALLOWED;
ESP_LOGE(TAG, "Still pending transaction in the pool");
s_platform.group_ref_counts[group_id]++;
goto err;
}
s_platform.groups[group_id] = NULL; // deregister from platform
// Disable 2D-DMA module clock
dma2d_ll_hw_enable(dma2d_group->hal.dev, false);
// Disable the bus clock for the 2D-DMA registers
PERIPH_RCC_ATOMIC() {
dma2d_ll_enable_bus_clock(group_id, false);
}
}
if (do_deinitialize) {
for (int i = 0; i < SOC_DMA2D_RX_CHANNELS_PER_GROUP; i++) {
if (dma2d_group->rx_chans[i]->base.intr) {
esp_intr_free(dma2d_group->rx_chans[i]->base.intr);
}
}
for (int i = 0; i < SOC_DMA2D_TX_CHANNELS_PER_GROUP; i++) {
if (dma2d_group->tx_chans[i]->base.intr) {
esp_intr_free(dma2d_group->tx_chans[i]->base.intr);
}
}
free(*(dma2d_group->tx_chans));
free(*(dma2d_group->rx_chans));
free(dma2d_group);
s_platform.groups[group_id] = NULL;
}
err:
_lock_release(&s_platform.mutex);
return ret;
}
esp_err_t dma2d_connect(dma2d_channel_handle_t dma2d_chan, const dma2d_trigger_t *trig_periph)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE_ISR(dma2d_chan && trig_periph, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
int channel_id = dma2d_chan->channel_id;
// Find periph_sel_id for the channel
int peri_sel_id = trig_periph->periph_sel_id;
portENTER_CRITICAL_SAFE(&group->spinlock);
if (trig_periph->periph == DMA2D_TRIG_PERIPH_M2M) {
// Assign peri_sel_id to one of {4, 5, 6, 7}
assert(dma2d_chan->status.has_sibling);
// First find out the peri_sel_id of its sibling channel
int sibling_periph_sel_id = -1;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
sibling_periph_sel_id = group->rx_chans[channel_id]->base.status.periph_sel_id;
} else {
sibling_periph_sel_id = group->tx_chans[channel_id]->base.status.periph_sel_id;
}
if (peri_sel_id == -1) {
// Unspecified periph_sel_id, decide by the driver
if (sibling_periph_sel_id != -1 && ((1 << sibling_periph_sel_id) & DMA2D_LL_CHANNEL_PERIPH_M2M_FREE_ID_MASK)) {
peri_sel_id = sibling_periph_sel_id;
} else {
peri_sel_id = __builtin_ctz(group->periph_m2m_free_id_mask);
}
} else {
// Check whether specified periph_sel_id is valid
if (sibling_periph_sel_id != -1) {
if (sibling_periph_sel_id != peri_sel_id) {
peri_sel_id = -1; // Conflict id with its sibling channel
}
} else {
if (!((1 << peri_sel_id) & group->periph_m2m_free_id_mask & DMA2D_LL_CHANNEL_PERIPH_M2M_FREE_ID_MASK)) {
peri_sel_id = -1; // Occupied or invalid m2m peri_sel_id
}
}
}
}
if (peri_sel_id >= 0) {
dma2d_chan->status.periph_sel_id = peri_sel_id;
group->periph_m2m_free_id_mask &= ~(1 << peri_sel_id); // acquire m2m periph_sel_id
}
portEXIT_CRITICAL_SAFE(&group->spinlock);
ESP_GOTO_ON_FALSE_ISR(peri_sel_id >= 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid periph_sel_id");
portENTER_CRITICAL_SAFE(&dma2d_chan->spinlock);
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
dma2d_ll_tx_stop(group->hal.dev, channel_id);
dma2d_hal_tx_reset_channel(&group->hal, channel_id);
dma2d_ll_tx_connect_to_periph(group->hal.dev, channel_id, trig_periph->periph, peri_sel_id);
// Configure reorder functionality
dma2d_ll_tx_enable_reorder(group->hal.dev, channel_id, dma2d_chan->status.reorder_en);
// Assume dscr_port enable or not can be directly derived from trig_periph
dma2d_ll_tx_enable_dscr_port(group->hal.dev, channel_id, trig_periph->periph == DMA2D_TRIG_PERIPH_PPA_SR);
// Reset to certain settings
dma2d_ll_tx_enable_owner_check(group->hal.dev, channel_id, false);
dma2d_ll_tx_enable_auto_write_back(group->hal.dev, channel_id, false);
dma2d_ll_tx_enable_eof_mode(group->hal.dev, channel_id, true);
dma2d_ll_tx_enable_descriptor_burst(group->hal.dev, channel_id, false);
dma2d_ll_tx_set_data_burst_length(group->hal.dev, channel_id, DMA2D_DATA_BURST_LENGTH_128);
dma2d_ll_tx_enable_page_bound_wrap(group->hal.dev, channel_id, true);
dma2d_ll_tx_set_macro_block_size(group->hal.dev, channel_id, DMA2D_MACRO_BLOCK_SIZE_NONE);
if ((1 << channel_id) & DMA2D_LL_TX_CHANNEL_SUPPORT_CSC_MASK) {
dma2d_ll_tx_configure_color_space_conv(group->hal.dev, channel_id, DMA2D_CSC_TX_NONE);
}
// Disable and clear all interrupt events
dma2d_ll_tx_enable_interrupt(group->hal.dev, channel_id, UINT32_MAX, false); // disable all interrupt events
dma2d_ll_tx_clear_interrupt_status(group->hal.dev, channel_id, UINT32_MAX); // clear all pending events
} else {
dma2d_ll_rx_stop(group->hal.dev, channel_id);
dma2d_hal_rx_reset_channel(&group->hal, channel_id);
dma2d_ll_rx_connect_to_periph(group->hal.dev, channel_id, trig_periph->periph, peri_sel_id);
// Configure reorder functionality
dma2d_ll_rx_enable_reorder(group->hal.dev, channel_id, dma2d_chan->status.reorder_en);
// Assume dscr_port enable or not can be directly derived from trig_periph
dma2d_ll_rx_enable_dscr_port(group->hal.dev, channel_id, trig_periph->periph == DMA2D_TRIG_PERIPH_PPA_SR);
// Reset to certain settings
dma2d_ll_rx_enable_owner_check(group->hal.dev, channel_id, false);
dma2d_ll_rx_enable_descriptor_burst(group->hal.dev, channel_id, false);
dma2d_ll_rx_set_data_burst_length(group->hal.dev, channel_id, DMA2D_DATA_BURST_LENGTH_128);
dma2d_ll_rx_enable_page_bound_wrap(group->hal.dev, channel_id, true);
dma2d_ll_rx_set_macro_block_size(group->hal.dev, channel_id, DMA2D_MACRO_BLOCK_SIZE_NONE);
if ((1 << channel_id) & DMA2D_LL_RX_CHANNEL_SUPPORT_CSC_MASK) {
dma2d_ll_rx_configure_color_space_conv(group->hal.dev, channel_id, DMA2D_CSC_RX_NONE);
}
// Disable and clear all interrupt events
dma2d_ll_rx_enable_interrupt(group->hal.dev, channel_id, UINT32_MAX, false); // disable all interrupt events
dma2d_ll_rx_clear_interrupt_status(group->hal.dev, channel_id, UINT32_MAX); // clear all pending events
}
portEXIT_CRITICAL_SAFE(&dma2d_chan->spinlock);
err:
return ret;
}
esp_err_t dma2d_register_tx_event_callbacks(dma2d_channel_handle_t dma2d_chan, dma2d_tx_event_callbacks_t *cbs, void *user_data)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE_ISR(dma2d_chan && dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX && cbs, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE_ISR(dma2d_chan->intr, ESP_ERR_INVALID_STATE, err, TAG, "tx channel intr not allocated");
dma2d_group_t *group = dma2d_chan->group;
dma2d_tx_channel_t *tx_chan = __containerof(dma2d_chan, dma2d_tx_channel_t, base);
#if CONFIG_DMA2D_ISR_IRAM_SAFE
if (cbs->on_desc_done) {
ESP_GOTO_ON_FALSE_ISR(esp_ptr_in_iram(cbs->on_desc_done),
ESP_ERR_INVALID_ARG, err, TAG, "on_desc_done not in IRAM");
}
if (user_data) {
ESP_GOTO_ON_FALSE_ISR(esp_ptr_internal(user_data),
ESP_ERR_INVALID_ARG, err, TAG, "user context not in internal RAM");
}
#endif
// Enable/Disable 2D-DMA interrupt events for the TX channel
uint32_t mask = 0;
portENTER_CRITICAL_SAFE(&tx_chan->base.spinlock);
if (cbs->on_desc_done) {
tx_chan->on_desc_done = cbs->on_desc_done;
mask |= DMA2D_LL_EVENT_TX_DONE;
}
tx_chan->user_data = user_data;
dma2d_ll_tx_enable_interrupt(group->hal.dev, tx_chan->base.channel_id, mask, true);
portEXIT_CRITICAL_SAFE(&tx_chan->base.spinlock);
err:
return ret;
}
esp_err_t dma2d_register_rx_event_callbacks(dma2d_channel_handle_t dma2d_chan, dma2d_rx_event_callbacks_t *cbs, void *user_data)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE_ISR(dma2d_chan && dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_RX && cbs, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
dma2d_rx_channel_t *rx_chan = __containerof(dma2d_chan, dma2d_rx_channel_t, base);
#if CONFIG_DMA2D_ISR_IRAM_SAFE
if (cbs->on_recv_eof) {
ESP_GOTO_ON_FALSE_ISR(esp_ptr_in_iram(cbs->on_recv_eof),
ESP_ERR_INVALID_ARG, err, TAG, "on_recv_eof not in IRAM");
}
if (cbs->on_desc_done) {
ESP_GOTO_ON_FALSE_ISR(esp_ptr_in_iram(cbs->on_desc_done),
ESP_ERR_INVALID_ARG, err, TAG, "on_desc_done not in IRAM");
}
if (user_data) {
ESP_GOTO_ON_FALSE_ISR(esp_ptr_internal(user_data),
ESP_ERR_INVALID_ARG, err, TAG, "user context not in internal RAM");
}
#endif
// Enable/Disable 2D-DMA interrupt events for the RX channel
uint32_t mask = 0;
portENTER_CRITICAL_SAFE(&rx_chan->base.spinlock);
if (cbs->on_recv_eof) {
rx_chan->on_recv_eof = cbs->on_recv_eof;
mask |= DMA2D_LL_EVENT_RX_SUC_EOF;
}
if (cbs->on_desc_done) {
rx_chan->on_desc_done = cbs->on_desc_done;
mask |= DMA2D_LL_EVENT_RX_DONE;
}
rx_chan->user_data = user_data;
dma2d_ll_rx_enable_interrupt(group->hal.dev, rx_chan->base.channel_id, mask, true);
portEXIT_CRITICAL_SAFE(&rx_chan->base.spinlock);
err:
return ret;
}
esp_err_t dma2d_set_desc_addr(dma2d_channel_handle_t dma2d_chan, intptr_t desc_base_addr)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE_ISR(dma2d_chan && desc_base_addr, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
// 2D-DMA descriptor addr needs 8-byte alignment and not in TCM (addr not in TCM is IDF restriction)
ESP_GOTO_ON_FALSE_ISR((desc_base_addr & 0x7) == 0 && !esp_ptr_in_tcm((void *)desc_base_addr), ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
int channel_id = dma2d_chan->channel_id;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
dma2d_ll_tx_set_desc_addr(group->hal.dev, channel_id, desc_base_addr);
} else {
dma2d_ll_rx_set_desc_addr(group->hal.dev, channel_id, desc_base_addr);
}
err:
return ret;
}
esp_err_t dma2d_start(dma2d_channel_handle_t dma2d_chan)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE_ISR(dma2d_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
int channel_id = dma2d_chan->channel_id;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_RX) {
// dma2d driver relies on going into ISR to free the channels,
// so even if callbacks are not necessary in some cases, minimum interrupt events should be enabled to trigger ISR
dma2d_ll_rx_enable_interrupt(group->hal.dev, channel_id, DMA2D_RX_DEFAULT_INTR_FLAG, true);
}
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
assert(dma2d_ll_tx_is_fsm_idle(group->hal.dev, channel_id));
dma2d_ll_tx_start(group->hal.dev, channel_id);
} else {
assert(dma2d_ll_rx_is_fsm_idle(group->hal.dev, channel_id));
dma2d_ll_rx_start(group->hal.dev, channel_id);
}
err:
return ret;
}
esp_err_t dma2d_stop(dma2d_channel_handle_t dma2d_chan)
{
ESP_RETURN_ON_FALSE_ISR(dma2d_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
int channel_id = dma2d_chan->channel_id;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
dma2d_ll_tx_stop(group->hal.dev, channel_id);
} else {
dma2d_ll_rx_stop(group->hal.dev, channel_id);
}
return ESP_OK;
}
esp_err_t dma2d_append(dma2d_channel_handle_t dma2d_chan)
{
ESP_RETURN_ON_FALSE_ISR(dma2d_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
int channel_id = dma2d_chan->channel_id;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
dma2d_ll_tx_restart(group->hal.dev, channel_id);
} else {
dma2d_ll_rx_restart(group->hal.dev, channel_id);
}
return ESP_OK;
}
esp_err_t dma2d_reset(dma2d_channel_handle_t dma2d_chan)
{
ESP_RETURN_ON_FALSE_ISR(dma2d_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
int channel_id = dma2d_chan->channel_id;
portENTER_CRITICAL_SAFE(&dma2d_chan->spinlock);
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
dma2d_hal_tx_reset_channel(&group->hal, channel_id);
} else {
dma2d_hal_rx_reset_channel(&group->hal, channel_id);
}
portEXIT_CRITICAL_SAFE(&dma2d_chan->spinlock);
return ESP_OK;
}
esp_err_t dma2d_apply_strategy(dma2d_channel_handle_t dma2d_chan, const dma2d_strategy_config_t *config)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE_ISR(dma2d_chan && config, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
int channel_id = dma2d_chan->channel_id;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
dma2d_ll_tx_enable_owner_check(group->hal.dev, channel_id, config->owner_check);
dma2d_ll_tx_enable_auto_write_back(group->hal.dev, channel_id, config->auto_update_desc);
dma2d_ll_tx_enable_eof_mode(group->hal.dev, channel_id, config->eof_till_data_popped);
} else {
dma2d_ll_rx_enable_owner_check(group->hal.dev, channel_id, config->owner_check);
// RX channels do not have control over auto_write_back (always auto_write_back) and eof_mode
}
err:
return ret;
}
esp_err_t dma2d_set_transfer_ability(dma2d_channel_handle_t dma2d_chan, const dma2d_transfer_ability_t *ability)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE_ISR(dma2d_chan && ability, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE_ISR(ability->data_burst_length < DMA2D_DATA_BURST_LENGTH_INVALID, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE_ISR(ability->mb_size < DMA2D_MACRO_BLOCK_SIZE_INVALID, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
int channel_id = dma2d_chan->channel_id;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
dma2d_ll_tx_enable_descriptor_burst(group->hal.dev, channel_id, ability->desc_burst_en);
dma2d_ll_tx_set_data_burst_length(group->hal.dev, channel_id, ability->data_burst_length);
dma2d_ll_tx_enable_page_bound_wrap(group->hal.dev, channel_id, ability->data_burst_length != 1);
dma2d_ll_tx_set_macro_block_size(group->hal.dev, channel_id, ability->mb_size);
} else {
dma2d_ll_rx_enable_descriptor_burst(group->hal.dev, channel_id, ability->desc_burst_en);
dma2d_ll_rx_set_data_burst_length(group->hal.dev, channel_id, ability->data_burst_length);
dma2d_ll_rx_enable_page_bound_wrap(group->hal.dev, channel_id, ability->data_burst_length != 1);
dma2d_ll_rx_set_macro_block_size(group->hal.dev, channel_id, ability->mb_size);
}
err:
return ret;
}
esp_err_t dma2d_configure_color_space_conversion(dma2d_channel_handle_t dma2d_chan, const dma2d_csc_config_t *config)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE_ISR(dma2d_chan && config, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
dma2d_group_t *group = dma2d_chan->group;
int channel_id = dma2d_chan->channel_id;
if (dma2d_chan->direction == DMA2D_CHANNEL_DIRECTION_TX) {
ESP_GOTO_ON_FALSE_ISR((1 << channel_id) & DMA2D_LL_TX_CHANNEL_SUPPORT_CSC_MASK, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE_ISR(config->tx_csc_option < DMA2D_CSC_TX_INVALID, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE_ISR(config->post_scramble == 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE_ISR(config->pre_scramble == DMA2D_SCRAMBLE_ORDER_BYTE2_1_0 || (config->pre_scramble != DMA2D_SCRAMBLE_ORDER_BYTE2_1_0 && config->tx_csc_option != DMA2D_CSC_TX_NONE),
ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
dma2d_ll_tx_configure_color_space_conv(group->hal.dev, channel_id, config->tx_csc_option);
dma2d_ll_tx_set_csc_pre_scramble(group->hal.dev, channel_id, config->pre_scramble);
} else {
ESP_GOTO_ON_FALSE_ISR((1 << channel_id) & DMA2D_LL_RX_CHANNEL_SUPPORT_CSC_MASK, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE_ISR(config->rx_csc_option < DMA2D_CSC_RX_INVALID, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE_ISR((config->pre_scramble == DMA2D_SCRAMBLE_ORDER_BYTE2_1_0 && config->post_scramble == DMA2D_SCRAMBLE_ORDER_BYTE2_1_0) ||
((config->pre_scramble != DMA2D_SCRAMBLE_ORDER_BYTE2_1_0 || config->post_scramble != DMA2D_SCRAMBLE_ORDER_BYTE2_1_0) && config->rx_csc_option != DMA2D_CSC_RX_NONE),
ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
dma2d_ll_rx_configure_color_space_conv(group->hal.dev, channel_id, config->rx_csc_option);
dma2d_ll_rx_set_csc_pre_scramble(group->hal.dev, channel_id, config->pre_scramble);
dma2d_ll_rx_set_csc_post_scramble(group->hal.dev, channel_id, config->post_scramble);
}
err:
return ret;
}
esp_err_t dma2d_enqueue(dma2d_pool_handle_t dma2d_pool, const dma2d_trans_config_t *trans_desc, dma2d_trans_t *trans_placeholder)
{
esp_err_t ret = ESP_OK;
ESP_GOTO_ON_FALSE_ISR(dma2d_pool && trans_desc && trans_placeholder, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
ESP_GOTO_ON_FALSE_ISR(trans_desc->rx_channel_num <= 1, ESP_ERR_INVALID_ARG, err, TAG, "one trans at most has one rx channel");
uint32_t total_channel_num = trans_desc->tx_channel_num + trans_desc->rx_channel_num;
ESP_GOTO_ON_FALSE_ISR(total_channel_num <= DMA2D_MAX_CHANNEL_NUM_PER_TRANSACTION, ESP_ERR_INVALID_ARG, err, TAG, "too many channels acquiring for a trans");
dma2d_group_t *dma2d_group = dma2d_pool;
if (trans_desc->specified_tx_channel_mask || trans_desc->specified_rx_channel_mask) {
ESP_GOTO_ON_FALSE_ISR(
(trans_desc->specified_tx_channel_mask ? (trans_desc->specified_tx_channel_mask & dma2d_group->tx_channel_reserved_mask) : 1 ) &&
(trans_desc->specified_rx_channel_mask ? (trans_desc->specified_rx_channel_mask & dma2d_group->rx_channel_reserved_mask) : 1 ),
ESP_ERR_INVALID_ARG, err, TAG, "specified channel(s) not reserved");
ESP_GOTO_ON_FALSE_ISR(
(__builtin_popcount(trans_desc->specified_tx_channel_mask) == trans_desc->tx_channel_num) &&
(__builtin_popcount(trans_desc->specified_rx_channel_mask) == trans_desc->rx_channel_num) &&
(!trans_desc->tx_channel_num ? 1 : (trans_desc->specified_tx_channel_mask & ((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_TX_REORDER) ? DMA2D_LL_TX_CHANNEL_SUPPORT_RO_MASK : UINT32_MAX) & ((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_TX_CSC) ? DMA2D_LL_TX_CHANNEL_SUPPORT_CSC_MASK : UINT32_MAX))) &&
(!trans_desc->rx_channel_num ? 1 : (trans_desc->specified_rx_channel_mask & ((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_RX_REORDER) ? DMA2D_LL_RX_CHANNEL_SUPPORT_RO_MASK : UINT32_MAX) & ((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_RX_CSC) ? DMA2D_LL_RX_CHANNEL_SUPPORT_CSC_MASK : UINT32_MAX))) &&
((trans_desc->channel_flags & DMA2D_CHANNEL_FUNCTION_FLAG_SIBLING) ? (trans_desc->specified_tx_channel_mask == trans_desc->specified_rx_channel_mask) : 1),
ESP_ERR_INVALID_ARG, err, TAG, "specified channels cannot meet function requirements");
}
#if CONFIG_DMA2D_ISR_IRAM_SAFE
ESP_GOTO_ON_FALSE_ISR(trans_desc->on_job_picked && esp_ptr_in_iram(trans_desc->on_job_picked),
ESP_ERR_INVALID_ARG, err, TAG, "on_job_picked not in IRAM");
ESP_GOTO_ON_FALSE_ISR(trans_desc->user_config && esp_ptr_internal(trans_desc->user_config),
ESP_ERR_INVALID_ARG, err, TAG, "user context not in internal RAM");
#endif
trans_placeholder->desc = trans_desc;
dma2d_trans_channel_info_t channel_handle_array[DMA2D_MAX_CHANNEL_NUM_PER_TRANSACTION];
portENTER_CRITICAL_SAFE(&dma2d_group->spinlock);
bool enqueue = !acquire_free_channels_for_trans(dma2d_group, trans_desc, channel_handle_array);
if (enqueue) {
if (!trans_desc->specified_tx_channel_mask && !trans_desc->specified_rx_channel_mask) {
TAILQ_INSERT_TAIL(&dma2d_group->pending_trans_tailq, trans_placeholder, entry);
} else {
TAILQ_INSERT_HEAD(&dma2d_group->pending_trans_tailq, trans_placeholder, entry);
}
}
portEXIT_CRITICAL_SAFE(&dma2d_group->spinlock);
if (!enqueue) {
// Free channels available, start transaction immediately
// Store the acquired rx_chan into trans_placeholder (dma2d_trans_t) in case upper driver later need it to call `dma2d_force_end`
// Upper driver controls the life cycle of trans_placeholder
for (int i = 0; i < total_channel_num; i++) {
if (channel_handle_array[i].dir == DMA2D_CHANNEL_DIRECTION_RX) {
trans_placeholder->rx_chan = channel_handle_array[i].chan;
}
// Also save the transaction pointer
channel_handle_array[i].chan->status.transaction = trans_placeholder;
}
trans_desc->on_job_picked(total_channel_num, channel_handle_array, trans_desc->user_config);
}
err:
return ret;
}
esp_err_t dma2d_force_end(dma2d_trans_t *trans, bool *need_yield)
{
ESP_RETURN_ON_FALSE_ISR(trans && trans->rx_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
assert(trans->rx_chan->direction == DMA2D_CHANNEL_DIRECTION_RX);
dma2d_group_t *group = trans->rx_chan->group;
bool in_flight = false;
// We judge whether the transaction is in-flight by checking the RX channel it uses is in-use or free
portENTER_CRITICAL_SAFE(&group->spinlock);
if (!(group->rx_channel_free_mask & (1 << trans->rx_chan->channel_id))) {
in_flight = true;
dma2d_ll_rx_enable_interrupt(group->hal.dev, trans->rx_chan->channel_id, UINT32_MAX, false);
assert(!dma2d_ll_rx_is_fsm_idle(group->hal.dev, trans->rx_chan->channel_id));
}
portEXIT_CRITICAL_SAFE(&group->spinlock);
ESP_RETURN_ON_FALSE_ISR(in_flight, ESP_ERR_INVALID_STATE, TAG, "transaction not in-flight");
dma2d_rx_channel_t *rx_chan = group->rx_chans[trans->rx_chan->channel_id];
// Stop the RX channel and its bundled TX channels first
dma2d_stop(&rx_chan->base);
uint32_t tx_chans = rx_chan->bundled_tx_channel_mask;
for (int i = 0; i < SOC_DMA2D_TX_CHANNELS_PER_GROUP; i++) {
if (tx_chans & (1 << i)) {
dma2d_stop(&group->tx_chans[i]->base);
}
}
// Then release channels
*need_yield = free_up_channels(group, rx_chan);
return ESP_OK;
}
size_t dma2d_get_trans_elm_size(void)
{
return sizeof(dma2d_trans_t);
}

View File

@ -0,0 +1,94 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include <sys/queue.h>
#include "sdkconfig.h"
#include "freertos/FreeRTOS.h"
#include "esp_intr_alloc.h"
#include "esp_heap_caps.h"
#include "soc/soc_caps.h"
#include "hal/dma2d_hal.h"
#include "hal/dma2d_ll.h"
#include "esp_private/dma2d.h"
#ifdef __cplusplus
extern "C" {
#endif
#if CONFIG_DMA2D_OPERATION_FUNC_IN_IRAM || CONFIG_DMA2D_ISR_IRAM_SAFE
#define DMA2D_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
#else
#define DMA2D_MEM_ALLOC_CAPS MALLOC_CAP_DEFAULT
#endif
#if CONFIG_DMA2D_ISR_IRAM_SAFE
#define DMA2D_INTR_ALLOC_FLAGS (ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_IRAM)
#else
#define DMA2D_INTR_ALLOC_FLAGS ESP_INTR_FLAG_SHARED
#endif
#define DMA2D_RX_DEFAULT_INTR_FLAG (DMA2D_LL_EVENT_RX_SUC_EOF | DMA2D_LL_EVENT_RX_ERR_EOF | DMA2D_LL_EVENT_RX_DESC_ERROR)
typedef struct dma2d_channel_t dma2d_channel_t;
typedef struct dma2d_tx_channel_t dma2d_tx_channel_t;
typedef struct dma2d_rx_channel_t dma2d_rx_channel_t;
typedef struct dma2d_group_t dma2d_group_t;
struct dma2d_trans_s {
TAILQ_ENTRY(dma2d_trans_s) entry; // Link entry
const dma2d_trans_config_t *desc; // Pointer to the structure containing all configuration items of a transaction
dma2d_channel_handle_t rx_chan; // Pointer to the RX channel handle that will be used to do the transaction
};
struct dma2d_group_t {
int group_id; // Group ID, index from 0
dma2d_hal_context_t hal; // HAL instance is at group level
portMUX_TYPE spinlock; // Group level spinlock
TAILQ_HEAD(pending_trans, dma2d_trans_s) pending_trans_tailq; // Link head of pending 2D-DMA transactions
uint8_t tx_channel_free_mask; // Bit mask indicating the free TX channels at the moment
uint8_t rx_channel_free_mask; // Bit mask indicating the free RX channels at the moment
uint8_t tx_channel_reserved_mask; // Bit mask indicating the being reserved TX channels
uint8_t rx_channel_reserved_mask; // Bit mask indicating the being reserved RX channels
uint32_t periph_m2m_free_id_mask; // Bit mask indicating the available M2M peripheral selelction IDs at the moment
dma2d_tx_channel_t *tx_chans[SOC_DMA2D_TX_CHANNELS_PER_GROUP]; // Handles of 2D-DMA TX channels
dma2d_rx_channel_t *rx_chans[SOC_DMA2D_RX_CHANNELS_PER_GROUP]; // Handles of 2D-DMA RX channels
int intr_priority; // All channels in the same group should share the same interrupt priority
};
struct dma2d_channel_t {
dma2d_group_t *group; // Which group the channel belongs to
int channel_id; // Channel ID
dma2d_channel_direction_t direction; // Channel direction, TX or RX
intr_handle_t intr; // Per-channel interrupt handle
portMUX_TYPE spinlock; // Channel level spinlock
struct {
dma2d_trans_t *transaction; // Pointer to the 2D-DMA transaction context that is currently being processed on the channel
uint32_t reorder_en : 1; // This flag indicates the channel will enable reorder functionality
uint32_t has_sibling : 1; // This flag indicates its sibling channel is also in-use
int periph_sel_id : (DMA2D_LL_CHANNEL_PERIPH_SEL_BIT_WIDTH + 1); // This is used to record the periph_sel_id of each channel
} status;
};
struct dma2d_tx_channel_t {
dma2d_channel_t base; // 2D-DMA channel base class
void *user_data; // User registered DMA event data
dma2d_event_callback_t on_desc_done; // TX DONE event callback
};
struct dma2d_rx_channel_t {
dma2d_channel_t base; // 2D-DMA channel base class
void *user_data; // User registered DMA event data
dma2d_event_callback_t on_recv_eof; // RX EOF event callback
dma2d_event_callback_t on_desc_done; // RX DONE event callback
uint32_t bundled_tx_channel_mask; // Bit mask indicating the TX channels together with the RX channel to do the transaction
};
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,419 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
// DO NOT USE THESE APIS IN ANY APPLICATIONS
// DMA2D driver is not public for end users, but for ESP-IDF developers.
#pragma once
#include <stdbool.h>
#include "esp_err.h"
#include "hal/dma2d_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Type of 2D-DMA pool handle
*/
typedef struct dma2d_group_t *dma2d_pool_handle_t;
/**
* @brief Type of 2D-DMA transaction context (queue element)
*/
typedef struct dma2d_trans_s dma2d_trans_t;
/**
* @brief Helper macro to get the size for struct `dma2d_trans_t`
*/
#define SIZEOF_DMA2D_TRANS_T dma2d_get_trans_elm_size()
/**
* @brief Get the size for struct `dma2d_trans_t`
*
* @return size_t Size of struct `dma2d_trans_t`
*/
size_t dma2d_get_trans_elm_size(void);
/**
* @brief A collection of configuration items that used for allocating a 2D-DMA pool
*/
typedef struct {
uint32_t pool_id; /*!< The ID number of the 2D-DMA pool to allocate */
uint32_t intr_priority; /*!< 2D-DMA interrupt priority,
if set to 0, the driver will try to allocate an interrupt with a relative low priority (1,2,3) */
} dma2d_pool_config_t;
/**
* @brief Acquire a 2D-DMA pool
*
* @param[in] config Pointer to a collection of configurations for the 2D-DMA pool
* @param[out] ret_pool Returned pool handle
* @return
* - ESP_OK: Acquire the 2D-DMA pool successfully
* - ESP_ERR_INVALID_ARG: Acquire the 2D-DMA pool failed because of invalid argument
* - ESP_ERR_NO_MEM: Acquire the 2D-DMA pool failed because out of memory
* - ESP_FAIL: Acquire the 2D-DMA pool failed because of other error
*/
esp_err_t dma2d_acquire_pool(const dma2d_pool_config_t *config, dma2d_pool_handle_t *ret_pool);
/**
* @brief Release a 2D-DMA pool
*
* @warning Upper driver should make sure there is no pending transaction (enqueued by the driver, but haven't be
* processed) before calling this function.
*
* @param[in] dma2d_pool 2D-DMA pool handle, allocated by `dma2d_acquire_pool`
* @return
* - ESP_OK: Release the 2D-DMA pool successfully
* - ESP_ERR_INVALID_ARG: Release the 2D-DMA pool failed because of invalid argument
* - ESP_ERR_NOT_ALLOWED: Release the 2D-DMA pool failed because there is pending transactions in the pool,
* pool can not be destroyed
*/
esp_err_t dma2d_release_pool(dma2d_pool_handle_t dma2d_pool);
/**
* @brief Type of 2D-DMA channel handle
*/
typedef struct dma2d_channel_t *dma2d_channel_handle_t;
/**
* @brief Struct to save the necessary information of a 2D-DMA channel for upper drivers to configure the channels
*/
typedef struct {
dma2d_channel_direction_t dir; /*!< Direction of the DMA channel */
dma2d_channel_handle_t chan; /*!< Handle of the DMA channel */
} dma2d_trans_channel_info_t;
/**
* @brief Callback function to start a 2D-DMA transaction. This callback is being called when all necessary channels to
* do the transaction have been acquired.
*
* Inside this function, usually configure the DMA channels, and then call `dma2d_start` to start the transaction.
*
* @note This function could run in current thread, or in other threads, or in ISR context (needs to follow ISR rules!)
*
* @param[in] num_chans Number of DMA channels acquired for the transaction
* @param[in] dma2d_chans List of the channels acquired
* @param[in] user_config User registered data (usually dma channel configurations) from `dma2d_trans_config_t::user_config` which is passed into `dma2d_enqueue`
*
* @return Whether a task switch is needed after the callback function returns,
* this is usually due to the callback wakes up some high priority task.
*/
typedef bool (*dma2d_trans_callback_t)(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config);
/**
* @brief 2D-DMA channel special function flags
*
* These flags are supposed to be used to specify the 2D-DMA channel requirements for a transaction.
*/
#define DMA2D_CHANNEL_FUNCTION_FLAG_TX_REORDER (1 << 0) /*!< TX channel that has reorder functionality */
#define DMA2D_CHANNEL_FUNCTION_FLAG_RX_REORDER (1 << 1) /*!< RX channel that has reorder functionality */
#define DMA2D_CHANNEL_FUNCTION_FLAG_TX_CSC (1 << 2) /*!< TX channel that has color space conversion functionality */
#define DMA2D_CHANNEL_FUNCTION_FLAG_RX_CSC (1 << 3) /*!< RX channel that has color space conversion functionality */
#define DMA2D_CHANNEL_FUNCTION_FLAG_SIBLING (1 << 4) /*!< TX and RX channel with same channel ID */
/**
* @brief A collection of configuration items for a 2D-DMA transaction
*/
typedef struct {
uint32_t tx_channel_num; /*!< Number of 2D-DMA TX channels required */
uint32_t rx_channel_num; /*!< Number of 2D-DMA RX channels required */
// Special function requirements for channels
uint32_t channel_flags; /*!< Bitwise OR of `DMA2D_CHANNEL_FUNCTION_FLAG_*` flags indicating the required functions on the channels for the transaction */
// Specified to use reserved channels
uint32_t specified_tx_channel_mask; /*!< Bit mask of the specific TX channels to be used, the specified TX channels should have been reserved */
uint32_t specified_rx_channel_mask; /*!< Bit mask of the specific RX channels to be used, the specified RX channels should have been reserved */
dma2d_trans_callback_t on_job_picked; /*!< Callback function to be called when all necessary channels to do the transaction have been acquired */
void *user_config; /*!< User registered data to be passed into `on_job_picked` callback */
} dma2d_trans_config_t;
/**
* @brief Enqueue a 2D-DMA transaction to be picked up by a certain 2D-DMA pool
*
* @param[in] dma2d_pool 2D-DMA pool handle, allocated by `dma2d_acquire_pool`
* @param[in] trans_desc Pointer to a collection of configurations for a transaction
* The context must exist at least until `on_job_picked` callback function is called.
* @param[in] trans_placeholder Address to the memory for storing this transaction context
* Caller must malloc a placeholder for storing the 2D-DMA transaction, and pass it into the function.
* Size of the placeholder can be get from `SIZEOF_DMA2D_TRANS_T` macro.
* Freeing the 2D-DMA transaction placeholder should also be taken care by the upper driver.
* It can be freed when `on_job_picked` callback function is called or anytime later.
* @return
* - ESP_OK: Enqueue the 2D-DMA transaction successfully
* - ESP_ERR_INVALID_ARG: Enqueue the 2D-DMA transaction failed because of invalid argument
*/
esp_err_t dma2d_enqueue(dma2d_pool_handle_t dma2d_pool, const dma2d_trans_config_t *trans_desc, dma2d_trans_t *trans_placeholder);
/**
* @brief Force end an in-flight 2D-DMA transaction
*
* This API is useful when the error was caused by the DMA consumer (such as JPEG). The error can only be detected
* by the consumer module, and the error info will only be propagated to the consumer driver. The 2D-DMA channels being
* involved to transfer the data has no way to be informed about the error at its upstream, it will keep waiting for
* the data.
*
* Therefore, when the consumer driver is doing the error handling, it is required to call this API to end the on-going
* transaction and release the taken TX and RX channels. It will stop and free the TX and RX channels that are bundled
* together to process the transaction.
*
* @param[in] trans Pointer to the 2D-DMA transaction context
* @param[out] need_yield Pointer to a status flag to record whether a task switch is needed if this API is being called in an ISR
* @return
* - ESP_OK: Force end an in-flight transaction successfully
* - ESP_ERR_INVALID_ARG: Force end failed because of invalid argument
* - ESP_ERR_INVALID_STATE: Force end failed because the transaction is not yet in-flight
*/
esp_err_t dma2d_force_end(dma2d_trans_t *trans, bool *need_yield);
/*********************************************** DMA CHANNEL OPERATIONS ***********************************************/
/**
* @brief Type of 2D-DMA engine trigger
*/
typedef struct {
dma2d_trigger_peripheral_t periph; /*!< Target peripheral which will trigger DMA operations */
int periph_sel_id; /*!< Peripheral selection ID. Supported IDs are listed in `soc/dma2d_channel.h` */
} dma2d_trigger_t;
/**
* @brief Connect 2D-DMA channel to trigger peripheral, and configure all other channel settings to a certain state (the channel will be reset first)
*
* Usually only to be called in `on_job_picked` callback, and is the first step to do inside the callback, since it resets other configurations to a default mode.
*
* @param[in] dma2d_chan 2D-DMA channel handle, get from the `on_job_picked` callback input argument `dma2d_chans`
* @param[in] trig_periph 2D-DMA trigger peripheral
* @return
* - ESP_OK: Connect 2D-DMA channel successfully
* - ESP_ERR_INVALID_ARG: Connect 2D-DMA channel failed because of invalid argument
*/
esp_err_t dma2d_connect(dma2d_channel_handle_t dma2d_chan, const dma2d_trigger_t *trig_periph);
/**
* @brief A collection of strategy items that each 2D-DMA channel could apply
*/
typedef struct {
bool owner_check; /*!< If set / clear, DMA channel enables / disables checking owner validity */
bool auto_update_desc; /*!< If set / clear, DMA channel enables / disables hardware to update descriptor automatically (TX channel only) */
bool eof_till_data_popped; /*!< If set, EOF flag is generated until the data needs to read has been popped from the DMA FIFO (TX channel only) */
} dma2d_strategy_config_t;
/**
* @brief Apply channel strategy for 2D-DMA channel
*
* Usually only to be called in `on_job_picked` callback.
*
* @param[in] dma2d_chan 2D-DMA channel handle, get from the `on_job_picked` callback input argument `dma2d_chans`
* @param[in] config Configuration of 2D-DMA channel strategy
* @return
* - ESP_OK: Apply channel strategy successfully
* - ESP_ERR_INVALID_ARG: Apply channel strategy failed because of invalid argument
*/
esp_err_t dma2d_apply_strategy(dma2d_channel_handle_t dma2d_chan, const dma2d_strategy_config_t *config);
/**
* @brief A collection of transfer ability items that each 2D-DMA channel could apply to improve transfer efficiency
*
* @note The 2D-DMA driver has no knowledge about the DMA buffer (address and size) used by upper layer.
* So it's the responsibility of the **upper layer** to take care of the buffer address and size.
* Usually RX buffer at least requires 4-byte alignment to avoid overwriting other data by DMA write PSRAM process
* or its data being overwritten.
*/
typedef struct {
bool desc_burst_en; /*!< If set / clear, DMA channel enables / disables burst reading descriptor link */
dma2d_data_burst_length_t data_burst_length; /*!< Configure the DMA channel burst reading data length */
dma2d_macro_block_size_t mb_size; /*!< Configure the DMA channel macro block size (only useful in DMA2D_DESCRIPTOR_BLOCK_RW_MODE_MULTIPLE mode) */
} dma2d_transfer_ability_t;
/**
* @brief Configure 2D-DMA channel transfer ability for transfer efficiency
*
* Usually only to be called in `on_job_picked` callback.
*
* @param[in] dma2d_chan 2D-DMA channel handle, get from the `on_job_picked` callback input argument `dma2d_chans`
* @param[in] ability Configuration of 2D-DMA channel transfer ability
* @return
* - ESP_OK: Set channel transfer ability successfully
* - ESP_ERR_INVALID_ARG: Set channel transfer ability failed because of invalid argument
*/
esp_err_t dma2d_set_transfer_ability(dma2d_channel_handle_t dma2d_chan, const dma2d_transfer_ability_t *ability);
/**
* @brief A collection of color space conversion (CSC) items that each 2D-DMA channel could apply
*/
typedef struct {
union {
dma2d_csc_tx_option_t tx_csc_option; /*!< TX direction (into DMA) pixel format conversion option */
dma2d_csc_rx_option_t rx_csc_option; /*!< RX direction (out from DMA) pixel format conversion option */
};
dma2d_scramble_order_t pre_scramble; /*!< DMA channel data scramble order before color conversion */
dma2d_scramble_order_t post_scramble; /*!< DMA channel data scramble order after color conversion (only available for RX channels) */
} dma2d_csc_config_t;
/**
* @brief Configure color space conversion setting for 2D-DMA channel
*
* Usually only to be called in `on_job_picked` callback.
*
* @param[in] dma2d_chan 2D-DMA channel handle, get from the `on_job_picked` callback input argument `dma2d_chans`
* @param[in] config Configuration of 2D-DMA channel color space conversion
* @return
* - ESP_OK: Configure DMA color space conversion successfully
* - ESP_ERR_INVALID_ARG: Configure DMA color space conversion failed because of invalid argument
*/
esp_err_t dma2d_configure_color_space_conversion(dma2d_channel_handle_t dma2d_chan, const dma2d_csc_config_t *config);
/**
* @brief Type of 2D-DMA event data
*/
typedef struct {
union {
intptr_t rx_eof_desc_addr; /*!< EOF descriptor address of RX channel */
intptr_t tx_eof_desc_addr; /*!< EOF descriptor address of TX channel */
};
dma2d_trans_t *transaction; /*!< Pointer to the transaction context processed */
} dma2d_event_data_t;
/**
* @brief Type of 2D-DMA event callback
*
* @param dma2d_chan 2D-DMA channel handle. Depends on the callback events, sometimes will pass NULL to this parameter.
* @param event_data 2D-DMA event data
* @param user_data User registered data from `dma2d_register_tx/rx_event_callbacks`
*
* @return Whether a task switch is needed after the callback function returns,
* this is usually due to the callback wakes up some high priority task.
*/
typedef bool (*dma2d_event_callback_t)(dma2d_channel_handle_t dma2d_chan, dma2d_event_data_t *event_data, void *user_data);
/**
* @brief Group of supported 2D-DMA TX callbacks
* @note The callbacks are all running under ISR environment
*/
typedef struct {
dma2d_event_callback_t on_desc_done; /*!< Invoked when TX engine completes processing all data in a descriptor.
When TX_DONE interrupt gets triggered but not EOF, it is considered
to be still in the middle of a complete transaction (partially done),
you are allowed to configure 2D-DMA channel hardware/descriptor in this
callback, and let the channels start running again */
} dma2d_tx_event_callbacks_t;
/**
* @brief Group of supported 2D-DMA RX callbacks
* @note The callbacks are all running under ISR environment
*
* Users should be clear on the unique responsibility of each callback when writing the callback functions, such as
* where to free the transaction memory.
*/
typedef struct {
dma2d_event_callback_t on_recv_eof; /*!< Invoked when RX engine meets EOF descriptor.
Note that in this callback, RX channel handle will not be given.
This is because at the moment of `on_recv_eof` callback is called,
the channels are returned to the pool and may have already been used to start another new transaction */
dma2d_event_callback_t on_desc_done; /*!< Invoked when RX engine completes processing all data in a descriptor.
When RX_DONE interrupt gets triggered but not EOF, it is considered
to be still in the middle of a complete transaction (partially done),
you are allowed to configure 2D-DMA channel hardware/descriptor in this
callback, and let the channels start running again */
} dma2d_rx_event_callbacks_t;
/**
* @brief Set 2D-DMA event callbacks for TX channel
*
* Usually only to be called in `on_job_picked` callback.
*
* @param[in] dma2d_chan 2D-DMA TX channel handle, get from the `on_job_picked` callback input argument `dma2d_chans`
* @param[in] cbs Group of callback functions
* @param[in] user_data User data, which will be passed to callback functions directly
* @return
* - ESP_OK: Set event callbacks successfully
* - ESP_ERR_INVALID_ARG: Set event callbacks failed because of invalid argument
*/
esp_err_t dma2d_register_tx_event_callbacks(dma2d_channel_handle_t dma2d_chan, dma2d_tx_event_callbacks_t *cbs, void *user_data);
/**
* @brief Set 2D-DMA event callbacks for RX channel
*
* Usually only to be called in `on_job_picked` callback.
*
* @param[in] dma2d_chan 2D-DMA RX channel handle, get from the `on_job_picked` callback input argument `dma2d_chans`
* @param[in] cbs Group of callback functions
* @param[in] user_data User data, which will be passed to callback functions directly
* @return
* - ESP_OK: Set event callbacks successfully
* - ESP_ERR_INVALID_ARG: Set event callbacks failed because of invalid argument
*/
esp_err_t dma2d_register_rx_event_callbacks(dma2d_channel_handle_t dma2d_chan, dma2d_rx_event_callbacks_t *cbs, void *user_data);
/**
* @brief Set descriptor address for 2D-DMA channel
*
* Usually only to be called in `on_job_picked` callback.
*
* @param[in] dma2d_chan 2D-DMA channel handle, get from the `on_job_picked` callback input argument `dma2d_chans`
* @param[in] desc_base_addr Base address of descriptors
* @return
* - ESP_OK: Set 2D-DMA descriptor addr successfully
* - ESP_ERR_INVALID_ARG: Set 2D-DMA descriptor addr failed because of invalid argument
*/
esp_err_t dma2d_set_desc_addr(dma2d_channel_handle_t dma2d_chan, intptr_t desc_base_addr);
/**
* @brief Start engine for 2D-DMA channel
*
* Usually only to be called in `on_job_picked` callback.
*
* @param[in] dma2d_chan 2D-DMA channel handle, get from the `on_job_picked` callback input argument `dma2d_chans`
* @return
* - ESP_OK: Start 2D-DMA engine successfully
* - ESP_ERR_INVALID_ARG: Start 2D-DMA engine failed because of invalid argument
*/
esp_err_t dma2d_start(dma2d_channel_handle_t dma2d_chan);
/**
* @brief Stop engine for 2D-DMA channel
*
* Usually to be called in ISR context.
*
* @param[in] dma2d_chan 2D-DMA channel handle
* @return
* - ESP_OK: Stop 2D-DMA engine successfully
* - ESP_ERR_INVALID_ARG: Stop 2D-DMA engine failed because of invalid argument
*/
esp_err_t dma2d_stop(dma2d_channel_handle_t dma2d_chan);
/**
* @brief Make the appended descriptors be aware to the 2D-DMA engine
*
* Usually to be called in ISR context.
*
* @param[in] dma2d_chan 2D-DMA channel handle
* @return
* - ESP_OK: Send append command to 2D-DMA engine successfully
* - ESP_ERR_INVALID_ARG: Send append command to 2D-DMA engine failed because of invalid argument
*/
esp_err_t dma2d_append(dma2d_channel_handle_t dma2d_chan);
/**
* @brief Reset engine for 2D-DMA channel
*
* Usually to be called in ISR context.
*
* @param[in] dma2d_chan 2D-DMA channel handle
* @return
* - ESP_OK: Reset 2D-DMA engine successfully
* - ESP_ERR_INVALID_ARG: Reset 2D-DMA engine failed because of invalid argument
*/
esp_err_t dma2d_reset(dma2d_channel_handle_t dma2d_chan);
#ifdef __cplusplus
}
#endif

View File

@ -82,3 +82,37 @@ entries:
if DW_GDMA_SETTER_FUNC_IN_IRAM = y: if DW_GDMA_SETTER_FUNC_IN_IRAM = y:
dw_gdma: dw_gdma_channel_set_block_markers (noflash) dw_gdma: dw_gdma_channel_set_block_markers (noflash)
dw_gdma: dw_gdma_lli_set_block_markers (noflash) dw_gdma: dw_gdma_lli_set_block_markers (noflash)
[mapping:dma2d_driver]
archive: libesp_hw_support.a
entries:
# performance optimization, always put the 2D-DMA default interrupt handler in IRAM
if SOC_DMA2D_SUPPORTED = y:
dma2d: acquire_free_channels_for_trans (noflash)
dma2d: free_up_channels (noflash)
dma2d: _dma2d_default_tx_isr (noflash)
dma2d: _dma2d_default_rx_isr (noflash)
dma2d: dma2d_default_isr (noflash)
# put 2D-DMA operation functions in IRAM
if DMA2D_OPERATION_FUNC_IN_IRAM = y:
dma2d: dma2d_connect (noflash)
dma2d: dma2d_register_tx_event_callbacks (noflash)
dma2d: dma2d_register_rx_event_callbacks (noflash)
dma2d: dma2d_set_desc_addr (noflash)
dma2d: dma2d_start (noflash)
dma2d: dma2d_stop (noflash)
dma2d: dma2d_append (noflash)
dma2d: dma2d_reset (noflash)
dma2d: dma2d_force_end (noflash)
dma2d: dma2d_apply_strategy (noflash)
dma2d: dma2d_set_transfer_ability (noflash)
dma2d: dma2d_configure_color_space_conversion (noflash)
dma2d: dma2d_enqueue (noflash)
[mapping:dma2d_hal]
archive: libhal.a
entries:
if DMA2D_ISR_IRAM_SAFE = y || DMA2D_OPERATION_FUNC_IN_IRAM = y:
dma2d_hal: dma2d_hal_tx_reset_channel (noflash)
dma2d_hal: dma2d_hal_rx_reset_channel (noflash)

View File

@ -8,6 +8,12 @@ components/esp_hw_support/test_apps/dma:
depends_filepatterns: depends_filepatterns:
- components/esp_hw_support/dma/**/* - components/esp_hw_support/dma/**/*
components/esp_hw_support/test_apps/dma2d:
disable:
- if: SOC_DMA2D_SUPPORTED != 1
depends_filepatterns:
- components/esp_hw_support/dma/**/*
components/esp_hw_support/test_apps/esp_hw_support_unity_tests: components/esp_hw_support/test_apps/esp_hw_support_unity_tests:
disable: disable:
- if: SOC_GPSPI_SUPPORTED != 1 - if: SOC_GPSPI_SUPPORTED != 1

View File

@ -0,0 +1,10 @@
# This is the project CMakeLists.txt file for the test subproject
cmake_minimum_required(VERSION 3.16)
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
# "Trim" the build. Include the minimal set of components, main, and anything it depends on. We also depend on esp_psram
# as we set CONFIG_SPIRAM_... options.
set(COMPONENTS main esp_psram)
project(dma2d_test)

View File

@ -0,0 +1,2 @@
| Supported Targets | ESP32-P4 |
| ----------------- | -------- |

View File

@ -0,0 +1,10 @@
set(srcs "test_app_main.c"
"test_dma2d.c"
"dma2d_test_utils.c")
# In order for the cases defined by `TEST_CASE` to be linked into the final elf,
# the component can be registered as WHOLE_ARCHIVE
idf_component_register(SRCS ${srcs}
INCLUDE_DIRS "."
PRIV_REQUIRES unity esp_mm
WHOLE_ARCHIVE)

View File

@ -0,0 +1,162 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include "unity.h"
#include "dma2d_test_utils.h"
#include "esp_private/dma2d.h"
#include "soc/dma2d_channel.h"
#include "esp_heap_caps.h"
#include "esp_memory_utils.h"
#include "esp_check.h"
#include "esp_err.h"
#include "esp_attr.h"
#include "esp_log.h"
__attribute__((unused)) static const char *TAG = "dma2d_m2m";
#if CONFIG_DMA2D_OPERATION_FUNC_IN_IRAM || CONFIG_DMA2D_ISR_IRAM_SAFE
#define DMA2D_M2M_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
#else
#define DMA2D_M2M_MEM_ALLOC_CAPS MALLOC_CAP_DEFAULT
#endif
#if CONFIG_DMA2D_ISR_IRAM_SAFE
#define DMA2D_M2M_ATTR IRAM_ATTR
#else
#define DMA2D_M2M_ATTR
#endif
typedef dma2d_m2m_trans_config_t dma2d_m2m_trans_desc_t;
typedef struct {
dma2d_m2m_trans_desc_t m2m_trans_desc;
dma2d_trans_config_t dma_chan_desc;
uint32_t dma_trans_placeholder_head; /* Head of the memory for storing the 2D-DMA transaction elm */
} dma2d_m2m_transaction_t;
static dma2d_pool_handle_t dma2d_pool_handle;
esp_err_t dma2d_m2m_init(void)
{
dma2d_pool_config_t dma2d_pool_config = {
.pool_id = 0,
};
return dma2d_acquire_pool(&dma2d_pool_config, &dma2d_pool_handle);
}
esp_err_t dma2d_m2m_deinit(void)
{
return dma2d_release_pool(dma2d_pool_handle);
}
static bool DMA2D_M2M_ATTR dma2d_m2m_transaction_done_cb(dma2d_channel_handle_t dma2d_chan, dma2d_event_data_t *event_data, void *user_data)
{
bool need_yield = false;
dma2d_m2m_transaction_t *trans_config = (dma2d_m2m_transaction_t *)user_data;
dma2d_m2m_trans_desc_t *m2m_trans_desc = &trans_config->m2m_trans_desc;
if (m2m_trans_desc->trans_eof_cb) {
need_yield |= m2m_trans_desc->trans_eof_cb(m2m_trans_desc->user_data);
}
free(trans_config);
return need_yield;
}
static bool DMA2D_M2M_ATTR dma2d_m2m_transaction_on_picked(uint32_t channel_num, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config)
{
assert(channel_num == 2 && dma2d_chans && user_config);
dma2d_m2m_transaction_t *trans_config = (dma2d_m2m_transaction_t *)user_config;
dma2d_m2m_trans_desc_t *m2m_trans_desc = &trans_config->m2m_trans_desc;
// Get the required 2D-DMA channel handles
uint32_t dma_tx_chan_idx = 0;
uint32_t dma_rx_chan_idx = 1;
if (dma2d_chans[0].dir == DMA2D_CHANNEL_DIRECTION_RX) {
dma_tx_chan_idx = 1;
dma_rx_chan_idx = 0;
}
dma2d_channel_handle_t dma_tx_chan = dma2d_chans[dma_tx_chan_idx].chan;
dma2d_channel_handle_t dma_rx_chan = dma2d_chans[dma_rx_chan_idx].chan;
dma2d_trigger_t trig_periph = {
.periph = DMA2D_TRIG_PERIPH_M2M,
.periph_sel_id = SOC_DMA2D_TRIG_PERIPH_M2M_TX,
};
dma2d_connect(dma_tx_chan, &trig_periph);
trig_periph.periph_sel_id = SOC_DMA2D_TRIG_PERIPH_M2M_RX;
dma2d_connect(dma_rx_chan, &trig_periph);
if (m2m_trans_desc->transfer_ability_config) {
dma2d_set_transfer_ability(dma_tx_chan, m2m_trans_desc->transfer_ability_config);
dma2d_set_transfer_ability(dma_rx_chan, m2m_trans_desc->transfer_ability_config);
}
if (m2m_trans_desc->tx_strategy_config) {
dma2d_apply_strategy(dma_tx_chan, m2m_trans_desc->tx_strategy_config);
}
if (m2m_trans_desc->rx_strategy_config) {
dma2d_apply_strategy(dma_rx_chan, m2m_trans_desc->rx_strategy_config);
}
if (m2m_trans_desc->tx_csc_config) {
dma2d_configure_color_space_conversion(dma_tx_chan, m2m_trans_desc->tx_csc_config);
}
if (m2m_trans_desc->rx_csc_config) {
dma2d_configure_color_space_conversion(dma_rx_chan, m2m_trans_desc->rx_csc_config);
}
dma2d_rx_event_callbacks_t dma_cbs = {
.on_recv_eof = dma2d_m2m_transaction_done_cb,
};
dma2d_register_rx_event_callbacks(dma_rx_chan, &dma_cbs, (void *)trans_config);
dma2d_set_desc_addr(dma_tx_chan, m2m_trans_desc->tx_desc_base_addr);
dma2d_set_desc_addr(dma_rx_chan, m2m_trans_desc->rx_desc_base_addr);
dma2d_start(dma_tx_chan);
dma2d_start(dma_rx_chan);
// No need to yield
return false;
}
esp_err_t dma2d_m2m(const dma2d_m2m_trans_config_t *trans_config)
{
#if CONFIG_DMA2D_ISR_IRAM_SAFE
if (trans_config->trans_eof_cb) {
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(trans_config->trans_eof_cb),
ESP_ERR_INVALID_ARG, TAG, "trans_eof_cb not in IRAM");
}
if (trans_config->user_data) {
ESP_RETURN_ON_FALSE(esp_ptr_internal(trans_config->user_data),
ESP_ERR_INVALID_ARG, TAG, "user context not in internal RAM");
}
#endif
dma2d_m2m_transaction_t *dma2d_m2m_trans = (dma2d_m2m_transaction_t *)heap_caps_calloc(1, sizeof(dma2d_m2m_transaction_t) + SIZEOF_DMA2D_TRANS_T, DMA2D_M2M_MEM_ALLOC_CAPS);
TEST_ASSERT_NOT_NULL(dma2d_m2m_trans);
dma2d_m2m_trans->dma_chan_desc.tx_channel_num = 1;
dma2d_m2m_trans->dma_chan_desc.rx_channel_num = 1;
dma2d_m2m_trans->dma_chan_desc.channel_flags = DMA2D_CHANNEL_FUNCTION_FLAG_SIBLING;
dma2d_m2m_trans->dma_chan_desc.channel_flags |= (trans_config->tx_csc_config == NULL) ? 0 : DMA2D_CHANNEL_FUNCTION_FLAG_TX_CSC;
dma2d_m2m_trans->dma_chan_desc.channel_flags |= (trans_config->rx_csc_config == NULL) ? 0 : DMA2D_CHANNEL_FUNCTION_FLAG_RX_CSC;
dma2d_m2m_trans->dma_chan_desc.specified_tx_channel_mask = 0;
dma2d_m2m_trans->dma_chan_desc.specified_rx_channel_mask = 0;
memcpy(&dma2d_m2m_trans->m2m_trans_desc, trans_config, sizeof(dma2d_m2m_trans_config_t));
dma2d_m2m_trans->dma_chan_desc.user_config = (void *)dma2d_m2m_trans;
dma2d_m2m_trans->dma_chan_desc.on_job_picked = dma2d_m2m_transaction_on_picked;
esp_err_t ret = dma2d_enqueue(dma2d_pool_handle, &dma2d_m2m_trans->dma_chan_desc, (dma2d_trans_t *)&dma2d_m2m_trans->dma_trans_placeholder_head);
if (ret != ESP_OK) {
free(dma2d_m2m_trans);
}
return ret;
}

View File

@ -0,0 +1,63 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "esp_private/dma2d.h"
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Initialize the 2D-DMA module to perform memcopy operations
*/
esp_err_t dma2d_m2m_init(void);
/**
* @brief De-initialize the 2D-DMA module
*/
esp_err_t dma2d_m2m_deinit(void);
/**
* @brief Callback function when a memcopy operation is done
*
* @param user_data User registered data from `dma2d_m2m_trans_config_t`
*
* @return Whether a task switch is needed after the callback function returns,
* this is usually due to the callback wakes up some high priority task.
*/
typedef bool (*dma2d_m2m_trans_eof_callback_t)(void *user_data);
/**
* @brief A collection of configuration items for perferming a memcopy operation with 2D-DMA
*/
typedef struct {
intptr_t tx_desc_base_addr; /*!< 2D-DMA TX descriptor address */
intptr_t rx_desc_base_addr; /*!< 2D-DMA RX descriptor address */
dma2d_m2m_trans_eof_callback_t trans_eof_cb; /*!< Callback function to be called when the memcopy operation is done */
void *user_data; /*!< User registered data to be passed into `trans_eof_cb` callback */
dma2d_transfer_ability_t *transfer_ability_config; /*!< Pointer to a collection of 2D-DMA transfer ability configuration */
dma2d_strategy_config_t *tx_strategy_config; /*!< Pointer to a collection of 2D-DMA TX strategy configuration */
dma2d_strategy_config_t *rx_strategy_config; /*!< Pointer to a collection of 2D-DMA RX strategy configuration */
dma2d_csc_config_t *tx_csc_config; /*!< Pointer to a collection of 2D-DMA TX color space conversion configuration */
dma2d_csc_config_t *rx_csc_config; /*!< Pointer to a collection of 2D-DMA RX color space conversion configuration */
} dma2d_m2m_trans_config_t;
/**
* @brief Do a memcopy operation with 2D-DMA module
*
* @param trans_config Pointer to a collection of configurations for the memcopy operation
* @return
* - ESP_OK: Enqueue the transaction to 2D-DMA pool successfully
* - ESP_ERR_INVALID_ARG: Enqueue the transaction to 2D-DMA pool failed because of invalid argument
*/
esp_err_t dma2d_m2m(const dma2d_m2m_trans_config_t *trans_config);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,36 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "unity.h"
#include "unity_test_runner.h"
#include "esp_heap_caps.h"
#include "unity_test_utils.h"
// Some resources are lazy allocated in the driver, the threshold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (200)
void setUp(void)
{
unity_utils_record_free_mem();
}
void tearDown(void)
{
esp_reent_cleanup(); //clean up some of the newlib's lazy allocations
unity_utils_evaluate_leaks_direct(TEST_MEMORY_LEAK_THRESHOLD);
}
void app_main(void)
{
printf(" ______ ___ ___ ______ _____ ______ \n");
printf("(______) (___)_(___) (______) _(_____) (______) \n");
printf("(_) (_) (_) (_) (_) (_)____(_) (_) _(_) (_) (_) \n");
printf("(_) (_)(_) (_) (_) (________) _(_) (_) (_)\n");
printf("(_)___(_) (_) (_) (_) (_) (_)___ (_)___(_) \n");
printf("(______) (_) (_) (_) (_) (_______) (______) \n");
unity_run_menu();
}

View File

@ -0,0 +1,677 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include <stdlib.h>
#include <sys/param.h>
#include "unity.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "esp_private/dma2d.h"
#include "dma2d_test_utils.h"
#include "hal/dma2d_types.h"
#include "hal/color_types.h"
#include "esp_heap_caps.h"
#include "esp_cache.h"
// All test will perform `M2M_TRANS_TIMES` times memcpy transactions, utilizing all available 2D-DMA channels.
// This tests the hardware capability of multiple 2D-DMA transactions running together, and the driver capbility of
// transactions being send to a queue, and waiting for free channels becoming available, and being picked to start the
// real hardware operation.
#define M2M_TRANS_TIMES (8)
// Descriptor and buffer address and size should aligned to 64 bytes (the cacheline size alignment restriction) to be used by CPU
static dma2d_descriptor_t *tx_dsc[M2M_TRANS_TIMES];
static dma2d_descriptor_t *rx_dsc[M2M_TRANS_TIMES];
static dma2d_m2m_trans_config_t m2m_trans_config[M2M_TRANS_TIMES];
static void dma2d_link_dscr_init(uint32_t *head, uint32_t *next, void *buf_ptr,
uint32_t ha, uint32_t va, uint32_t hb, uint32_t vb,
uint32_t eof, uint32_t en_2d, uint32_t pbyte, uint32_t mod,
uint32_t bias_x, uint32_t bias_y)
{
dma2d_descriptor_t *dma2d = (dma2d_descriptor_t *)head;
memset(dma2d, 0, sizeof(dma2d_descriptor_t));
dma2d->owner = DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA;
dma2d->suc_eof = eof;
dma2d->dma2d_en = en_2d;
dma2d->err_eof = 0;
dma2d->hb_length = hb;
dma2d->vb_size = vb;
dma2d->pbyte = pbyte;
dma2d->ha_length = ha;
dma2d->va_size = va;
dma2d->mode = mod;
dma2d->y = bias_y;
dma2d->x = bias_x;
dma2d->buffer = buf_ptr;
dma2d->next = (dma2d_descriptor_t *)next;
}
static bool IRAM_ATTR dma2d_m2m_suc_eof_event_cb(void *user_data)
{
BaseType_t xHigherPriorityTaskWoken = pdFALSE;
SemaphoreHandle_t sem = (SemaphoreHandle_t)user_data;
xSemaphoreGiveFromISR(sem, &xHigherPriorityTaskWoken);
return (xHigherPriorityTaskWoken == pdTRUE);
}
TEST_CASE("DMA2D_M2M_1D_basic", "[DMA2D]")
{
// Test a 16KB data block pure memcopy
const uint32_t data_size = 16 * 1024; // unit: byte
memset(m2m_trans_config, 0, M2M_TRANS_TIMES * sizeof(dma2d_m2m_trans_config_t));
TEST_ESP_OK(dma2d_m2m_init());
dma2d_descriptor_t *tx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
dma2d_descriptor_t *rx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
TEST_ASSERT_NOT_NULL(tx_link_buffer);
TEST_ASSERT_NOT_NULL(rx_link_buffer);
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
tx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)tx_link_buffer + 64 * i);
rx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)rx_link_buffer + 64 * i);
}
uint8_t *prtx;
uint8_t *prrx;
uint8_t *tx_buf = heap_caps_aligned_calloc(64, data_size * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_SPIRAM);
uint8_t *rx_buf = heap_caps_aligned_calloc(64, data_size * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_SPIRAM);
TEST_ASSERT_NOT_NULL(tx_buf);
TEST_ASSERT_NOT_NULL(rx_buf);
dma2d_transfer_ability_t transfer_ability_config = {
.data_burst_length = DMA2D_DATA_BURST_LENGTH_64,
.desc_burst_en = true,
.mb_size = DMA2D_MACRO_BLOCK_SIZE_NONE,
};
SemaphoreHandle_t counting_sem = xSemaphoreCreateCounting(M2M_TRANS_TIMES, 0);
// Preparation
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
// Buffer data preparation
prtx = tx_buf + i * data_size;
prrx = rx_buf + i * data_size;
for (int idx = 0; idx < data_size; idx++) {
prtx[idx] = (i + idx + 0x45) & 0xFF;
prrx[idx] = 0;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, data_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, data_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
data_size >> 14, data_size >> 14,
data_size & 0x3FFF, data_size & 0x3FFF,
1, 0, DMA2D_DESCRIPTOR_PBYTE_1B0_PER_PIXEL,
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, 0, 0);
dma2d_link_dscr_init((uint32_t *)rx_dsc[i], NULL, (void *)prrx,
0, data_size >> 14,
0, data_size & 0x3FFF,
0, 0, DMA2D_DESCRIPTOR_PBYTE_1B0_PER_PIXEL,
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, 0, 0);
// Writeback the DMA descriptors
esp_cache_msync((void *)tx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)rx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// Construct dma2d_m2m_trans_config_t structure
m2m_trans_config[i].tx_desc_base_addr = (intptr_t)tx_dsc[i];
m2m_trans_config[i].rx_desc_base_addr = (intptr_t)rx_dsc[i];
m2m_trans_config[i].trans_eof_cb = dma2d_m2m_suc_eof_event_cb;
m2m_trans_config[i].user_data = (void *)counting_sem;
m2m_trans_config[i].transfer_ability_config = &transfer_ability_config;
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
printf("trans %d\n", i);
TEST_ESP_OK(dma2d_m2m(&m2m_trans_config[i]));
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
xSemaphoreTake(counting_sem, portMAX_DELAY);
printf("trans %d done\n", i);
}
printf("All transactions done!\n");
// Check result
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * data_size;
prrx = rx_buf + i * data_size;
for (int idx = 0; idx < data_size; idx++) {
TEST_ASSERT_EQUAL(prtx[idx], prrx[idx]);
TEST_ASSERT_EQUAL(prtx[idx], (i + idx + 0x45) & 0xFF);
}
}
free(tx_link_buffer);
free(rx_link_buffer);
free(tx_buf);
free(rx_buf);
vSemaphoreDelete(counting_sem);
TEST_ESP_OK(dma2d_m2m_deinit());
}
static void rgb565_to_rgb888(uint16_t rgb565, void *__r, void *__g, void *__b)
{
uint8_t *r = (uint8_t *)__r;
uint8_t *g = (uint8_t *)__g;
uint8_t *b = (uint8_t *)__b;
uint32_t _rgb565 = rgb565;
uint8_t _b = (_rgb565>>8) & 0xF8;
uint8_t _g = (_rgb565>>3) & 0xFC;
uint8_t _r = (_rgb565<<3) & 0xF8;
// *r = (_r & 0x08) ? (_r | 0x1) : (_r);
// *g = (_g & 0x04) ? (_g | 0x1) : (_g);
// *b = (_b & 0x08) ? (_b | 0x1) : (_b);
*r = _r | ( (_r >>3) & 0x7);
*g = _g | ( (_g >>2) & 0x3);
*b = _b | ( (_b >>3) & 0x7);
}
static int rgb565_to_rgb888_and_cmp(void *_rgb565, void *__rgb888, int pix)
{
uint16_t *rgb565 = (uint16_t *)_rgb565;
uint8_t *_rgb888 = (uint8_t *)__rgb888;
uint8_t _r,_g,_b;
for (int i = 0; i < pix; i++) {
rgb565_to_rgb888(rgb565[i], &_r, &_g, &_b);
if (_r != _rgb888[0] || _g != _rgb888[1] || _b != _rgb888[2]) {
printf("idx %d - conv fail, %x:%x:%x, rgb565:%x, _rgb888:%x:%x:%x\r\n",
i, _r, _g, _b, rgb565[i], _rgb888[0], _rgb888[1] ,_rgb888[2]);
return -1;
}
_rgb888 += 3;
}
return 0;
}
TEST_CASE("DMA2D_M2M_1D_RGB565_to_RGB888", "[DMA2D]")
{
// Test a 4K pixel 1D buffer (original pixel in RGB565 format, convert to RGB888 format)
const uint32_t item_size = 1024 * 4;
memset(m2m_trans_config, 0, M2M_TRANS_TIMES * sizeof(dma2d_m2m_trans_config_t));
TEST_ESP_OK(dma2d_m2m_init());
dma2d_descriptor_t *tx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
dma2d_descriptor_t *rx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
TEST_ASSERT_NOT_NULL(tx_link_buffer);
TEST_ASSERT_NOT_NULL(rx_link_buffer);
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
tx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)tx_link_buffer + 64 * i);
rx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)rx_link_buffer + 64 * i);
}
uint8_t *prtx;
uint8_t *prrx;
uint8_t *tx_buf = heap_caps_aligned_calloc(64, item_size * 2 * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_SPIRAM);
uint8_t *rx_buf = heap_caps_aligned_calloc(64, item_size * 3 * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_SPIRAM);
TEST_ASSERT_NOT_NULL(tx_buf);
TEST_ASSERT_NOT_NULL(rx_buf);
SemaphoreHandle_t counting_sem = xSemaphoreCreateCounting(M2M_TRANS_TIMES, 0);
dma2d_transfer_ability_t transfer_ability_config = {
.data_burst_length = DMA2D_DATA_BURST_LENGTH_128,
.desc_burst_en = true,
.mb_size = DMA2D_MACRO_BLOCK_SIZE_NONE,
};
dma2d_csc_config_t m2m_dma2d_tx_csc = {
.tx_csc_option = DMA2D_CSC_TX_RGB565_TO_RGB888,
.pre_scramble = DMA2D_SCRAMBLE_ORDER_BYTE2_1_0,
};
// Preparation
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
// Buffer data preparation
prtx = tx_buf + i * item_size * 2;
prrx = rx_buf + i * item_size * 3;
for (int idx = 0; idx < item_size; idx++) {
uint32_t r = (idx * 32 / item_size) & 0x1F;
uint32_t g = (idx * 64 / item_size) & 0x3F;
uint32_t b = (idx * 32 / item_size) & 0x1F;
prtx[idx * 2] = (r << 3) || (g >> 3);
prtx[idx * 2 + 1] = (g << 5) || b;
prrx[idx * 3] = 0;
prrx[idx * 3 + 1] = 0;
prrx[idx * 3 + 2] = 0;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, item_size * 2, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, item_size * 3, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
(item_size * 2) >> 14, (item_size * 2) >> 14,
(item_size * 2) & 0x3FFF, (item_size * 2) & 0x3FFF,
1, 0, DMA2D_DESCRIPTOR_PBYTE_1B0_PER_PIXEL,
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, 0, 0);
dma2d_link_dscr_init((uint32_t *)rx_dsc[i], NULL, (void *)prrx,
0, (item_size * 3) >> 14,
0, (item_size * 3) & 0x3FFF,
0, 0, DMA2D_DESCRIPTOR_PBYTE_1B0_PER_PIXEL,
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, 0, 0);
// Writeback the DMA descriptors
esp_cache_msync((void *)tx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)rx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// Construct dma2d_m2m_trans_config_t structure
m2m_trans_config[i].tx_desc_base_addr = (intptr_t)tx_dsc[i];
m2m_trans_config[i].rx_desc_base_addr = (intptr_t)rx_dsc[i];
m2m_trans_config[i].trans_eof_cb = dma2d_m2m_suc_eof_event_cb;
m2m_trans_config[i].user_data = (void *)counting_sem;
m2m_trans_config[i].transfer_ability_config = &transfer_ability_config;
m2m_trans_config[i].tx_csc_config = &m2m_dma2d_tx_csc;
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
printf("trans %d\n", i);
TEST_ESP_OK(dma2d_m2m(&m2m_trans_config[i]));
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
xSemaphoreTake(counting_sem, portMAX_DELAY);
printf("trans %d done\n", i);
}
printf("All transactions done!\n");
// Check result
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * item_size * 2;
prrx = rx_buf + i * item_size * 3;
TEST_ASSERT_EQUAL(0, rgb565_to_rgb888_and_cmp(prtx, prrx, item_size));
}
free(tx_link_buffer);
free(rx_link_buffer);
free(tx_buf);
free(rx_buf);
vSemaphoreDelete(counting_sem);
TEST_ESP_OK(dma2d_m2m_deinit());
}
TEST_CASE("DMA2D_M2M_2D_basic", "[DMA2D]")
{
// Test a 128 x 128 pixel data block (one byte per pixel - assume A8)
const color_space_pixel_format_t pixel_format = {
.color_space = COLOR_SPACE_ALPHA,
.pixel_format = COLOR_PIXEL_A8,
};
const uint32_t stripe_size = 128; // unit: bytes
memset(m2m_trans_config, 0, M2M_TRANS_TIMES * sizeof(dma2d_m2m_trans_config_t));
TEST_ESP_OK(dma2d_m2m_init());
dma2d_descriptor_t *tx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
dma2d_descriptor_t *rx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
TEST_ASSERT_NOT_NULL(tx_link_buffer);
TEST_ASSERT_NOT_NULL(rx_link_buffer);
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
tx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)tx_link_buffer + 64 * i);
rx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)rx_link_buffer + 64 * i);
}
uint8_t *prtx;
uint8_t *prrx;
uint8_t *tx_buf = heap_caps_aligned_calloc(64, stripe_size * stripe_size * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_SPIRAM);
uint8_t *rx_buf = heap_caps_aligned_calloc(64, stripe_size * stripe_size * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_SPIRAM);
TEST_ASSERT_NOT_NULL(tx_buf);
TEST_ASSERT_NOT_NULL(rx_buf);
SemaphoreHandle_t counting_sem = xSemaphoreCreateCounting(M2M_TRANS_TIMES, 0);
dma2d_transfer_ability_t transfer_ability_config = {
.data_burst_length = DMA2D_DATA_BURST_LENGTH_128,
.desc_burst_en = true,
.mb_size = DMA2D_MACRO_BLOCK_SIZE_NONE,
};
// Preparation
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
// Buffer data preparation
prtx = tx_buf + i * stripe_size * stripe_size;
prrx = rx_buf + i * stripe_size * stripe_size;
for (int idx = 0; idx < stripe_size * stripe_size; idx++) {
prtx[idx] = (i + idx + 0x45) & 0xFF;
prrx[idx] = 0;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, stripe_size * stripe_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, stripe_size * stripe_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
stripe_size, stripe_size,
stripe_size, stripe_size,
1, 1, dma2d_desc_pixel_format_to_pbyte_value(pixel_format), // 1 bytes/pixel
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, 0, 0);
dma2d_link_dscr_init((uint32_t *)rx_dsc[i], NULL, (void *)prrx,
stripe_size, stripe_size,
stripe_size, stripe_size,
0, 1, dma2d_desc_pixel_format_to_pbyte_value(pixel_format), // 1 bytes/pixel
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, 0, 0);
// Writeback the DMA descriptors
esp_cache_msync((void *)tx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)rx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// Construct dma2d_m2m_trans_config_t structure
m2m_trans_config[i].tx_desc_base_addr = (intptr_t)tx_dsc[i];
m2m_trans_config[i].rx_desc_base_addr = (intptr_t)rx_dsc[i];
m2m_trans_config[i].trans_eof_cb = dma2d_m2m_suc_eof_event_cb;
m2m_trans_config[i].user_data = (void *)counting_sem;
m2m_trans_config[i].transfer_ability_config = &transfer_ability_config;
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
printf("trans %d\n", i);
TEST_ESP_OK(dma2d_m2m(&m2m_trans_config[i]));
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
xSemaphoreTake(counting_sem, portMAX_DELAY);
printf("trans %d done\n", i);
}
printf("All transactions done!\n");
// Check result
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * stripe_size * stripe_size;
prrx = rx_buf + i * stripe_size * stripe_size;
for (int idx = 0; idx < stripe_size * stripe_size; idx++) {
TEST_ASSERT_EQUAL(prtx[idx], prrx[idx]);
TEST_ASSERT_EQUAL(prtx[idx], (i + idx + 0x45) & 0xFF);
}
}
free(tx_link_buffer);
free(rx_link_buffer);
free(tx_buf);
free(rx_buf);
vSemaphoreDelete(counting_sem);
TEST_ESP_OK(dma2d_m2m_deinit());
}
static void rgb888_to_rgb565(uint8_t r, uint8_t g, uint8_t b, uint16_t *rgb565)
{
uint16_t _rgb565 = (b >> 3);
_rgb565 = (_rgb565 << 6) | (g >>2);
_rgb565 = (_rgb565 << 5) | (r >>3);
*rgb565 = _rgb565;
}
static int rgb888_to_rgb565_and_cmp(void *__rgb888, void *__rgb565, int pix)
{
uint8_t *_rgb888 = (uint8_t *)__rgb888;
uint16_t *rgb565 = (uint16_t *)__rgb565;
uint16_t _rgb565 = 0;
uint8_t *rgb888 = _rgb888;
for (int i = 0; i < pix; i++) {
rgb888_to_rgb565(rgb888[0], rgb888[1], rgb888[2], &_rgb565);
if (_rgb565 != rgb565[0]) {
printf("conv fail, r:%x, g:%x, b:%x, rgb565:%x, _rgb565:%x\r\n",
rgb888[0], rgb888[1], rgb888[2], rgb565[0], _rgb565);
return -1;
}
rgb888 += 3;
rgb565++;
}
return 0;
}
TEST_CASE("DMA2D_M2M_2D_RGB888_to_RGB565", "[DMA2D]")
{
// Test a 64 x 64 pixel data block (original pixel in RGB888 format, convert to RGB565 format)
const color_space_pixel_format_t in_pixel_format = {
.color_space = COLOR_SPACE_RGB,
.pixel_format = COLOR_PIXEL_RGB888,
};
const color_space_pixel_format_t out_pixel_format = {
.color_space = COLOR_SPACE_RGB,
.pixel_format = COLOR_PIXEL_RGB565,
};
const uint32_t stripe_pixel_size = 64; // unit: pixel
memset(m2m_trans_config, 0, M2M_TRANS_TIMES * sizeof(dma2d_m2m_trans_config_t));
TEST_ESP_OK(dma2d_m2m_init());
dma2d_descriptor_t *tx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
dma2d_descriptor_t *rx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
TEST_ASSERT_NOT_NULL(tx_link_buffer);
TEST_ASSERT_NOT_NULL(rx_link_buffer);
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
tx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)tx_link_buffer + 64 * i);
rx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)rx_link_buffer + 64 * i);
}
uint8_t *prtx;
uint8_t *prrx;
uint8_t *tx_buf = heap_caps_aligned_calloc(64, stripe_pixel_size * stripe_pixel_size * 3 * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_SPIRAM);
uint8_t *rx_buf = heap_caps_aligned_calloc(64, stripe_pixel_size * stripe_pixel_size * 2 * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_SPIRAM);
TEST_ASSERT_NOT_NULL(tx_buf);
TEST_ASSERT_NOT_NULL(rx_buf);
SemaphoreHandle_t counting_sem = xSemaphoreCreateCounting(M2M_TRANS_TIMES, 0);
dma2d_transfer_ability_t transfer_ability_config = {
.data_burst_length = DMA2D_DATA_BURST_LENGTH_16,
.desc_burst_en = true,
.mb_size = DMA2D_MACRO_BLOCK_SIZE_NONE,
};
dma2d_csc_config_t m2m_dma2d_tx_csc = {
.tx_csc_option = DMA2D_CSC_TX_RGB888_TO_RGB565,
.pre_scramble = DMA2D_SCRAMBLE_ORDER_BYTE2_1_0,
};
// Preparation
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
// Buffer data preparation
prtx = tx_buf + i * stripe_pixel_size * stripe_pixel_size * 3;
prrx = rx_buf + i * stripe_pixel_size * stripe_pixel_size * 2;
for (int idx = 0; idx < stripe_pixel_size * stripe_pixel_size; idx++) {
uint32_t r = (i + idx + 0x5A) & 0xFF;
uint32_t g = (i + idx + 0x4C) & 0xFF;
uint32_t b = (i + idx + 0x9E) & 0xFF;
prtx[idx * 3] = r;
prtx[idx * 3 + 1] = g;
prtx[idx * 3 + 2] = b;
prrx[idx * 2] = 0;
prrx[idx * 2 + 1] = 0;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, stripe_pixel_size * stripe_pixel_size * 3, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, stripe_pixel_size * stripe_pixel_size * 2, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
stripe_pixel_size, stripe_pixel_size,
stripe_pixel_size, stripe_pixel_size,
1, 1, dma2d_desc_pixel_format_to_pbyte_value(in_pixel_format),
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, 0, 0);
dma2d_link_dscr_init((uint32_t *)rx_dsc[i], NULL, (void *)prrx,
stripe_pixel_size, stripe_pixel_size,
stripe_pixel_size, stripe_pixel_size,
0, 1, dma2d_desc_pixel_format_to_pbyte_value(out_pixel_format),
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, 0, 0);
// Writeback the DMA descriptors
esp_cache_msync((void *)tx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)rx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// Construct dma2d_m2m_trans_config_t structure
m2m_trans_config[i].tx_desc_base_addr = (intptr_t)tx_dsc[i];
m2m_trans_config[i].rx_desc_base_addr = (intptr_t)rx_dsc[i];
m2m_trans_config[i].trans_eof_cb = dma2d_m2m_suc_eof_event_cb;
m2m_trans_config[i].user_data = (void *)counting_sem;
m2m_trans_config[i].transfer_ability_config = &transfer_ability_config;
m2m_trans_config[i].tx_csc_config = &m2m_dma2d_tx_csc;
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
printf("trans %d\n", i);
TEST_ESP_OK(dma2d_m2m(&m2m_trans_config[i]));
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
xSemaphoreTake(counting_sem, portMAX_DELAY);
printf("trans %d done\n", i);
}
printf("All transactions done!\n");
// Check result
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * stripe_pixel_size * stripe_pixel_size * 3;
prrx = rx_buf + i * stripe_pixel_size * stripe_pixel_size * 2;
TEST_ASSERT_EQUAL(0, rgb888_to_rgb565_and_cmp(prtx, prrx, stripe_pixel_size * stripe_pixel_size));
}
free(tx_link_buffer);
free(rx_link_buffer);
free(tx_buf);
free(rx_buf);
vSemaphoreDelete(counting_sem);
TEST_ESP_OK(dma2d_m2m_deinit());
}
TEST_CASE("DMA2D_M2M_2D_window", "[DMA2D]")
{
// Test 2D memcpy to a 2 x 2 block at (2, 4) in a 8 x 8 picture (pixel in RGB565 format)
const color_space_pixel_format_t pixel_format = {
.color_space = COLOR_SPACE_RGB,
.pixel_format = COLOR_PIXEL_RGB565,
};
const uint32_t va = 8, ha = 8; // Define picture height and width (unit: pixel)
const uint32_t vb = 2, hb = 2; // Define block height and width (unit: pixel)
const uint32_t x_offset = 2, y_offset = 4; // Define block location in the picture (unit: pixel)
memset(m2m_trans_config, 0, M2M_TRANS_TIMES * sizeof(dma2d_m2m_trans_config_t));
TEST_ESP_OK(dma2d_m2m_init());
dma2d_descriptor_t *tx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
dma2d_descriptor_t *rx_link_buffer = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, M2M_TRANS_TIMES, 64, MALLOC_CAP_DEFAULT);
TEST_ASSERT_NOT_NULL(tx_link_buffer);
TEST_ASSERT_NOT_NULL(rx_link_buffer);
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
tx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)tx_link_buffer + 64 * i);
rx_dsc[i] = (dma2d_descriptor_t *)((uint32_t)rx_link_buffer + 64 * i);
}
uint8_t *prtx;
uint8_t *prrx;
uint8_t *tx_buf = heap_caps_aligned_calloc(64, 64 * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_INTERNAL);
uint8_t *rx_buf = heap_caps_aligned_calloc(64, va * ha * 2 * M2M_TRANS_TIMES, sizeof(uint8_t), MALLOC_CAP_INTERNAL);
TEST_ASSERT_NOT_NULL(tx_buf);
TEST_ASSERT_NOT_NULL(rx_buf);
SemaphoreHandle_t counting_sem = xSemaphoreCreateCounting(M2M_TRANS_TIMES, 0);
dma2d_transfer_ability_t transfer_ability_config = {
.data_burst_length = DMA2D_DATA_BURST_LENGTH_128,
.desc_burst_en = true,
.mb_size = DMA2D_MACRO_BLOCK_SIZE_NONE,
};
// Preparation
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
// Buffer data preparation
prtx = tx_buf + i * 64;
prrx = rx_buf + i * va * ha * 2;
for (int idx = 0; idx < vb * hb; idx++) {
prtx[idx * 2] = 0x55 + idx + i;
prtx[idx * 2 + 1] = 0xAA + idx + i;
}
for (int idx = 0; idx < va * ha; idx++) {
prrx[idx * 2] = 0xFF;
prrx[idx * 2 + 1] = 0xFF;
}
// Writeback and invalidate the TX and RX buffers
esp_cache_msync((void *)prtx, 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
esp_cache_msync((void *)prrx, va * ha * 2, ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE);
// DMA description preparation
dma2d_link_dscr_init((uint32_t *)tx_dsc[i], NULL, (void *)prtx,
hb, vb,
hb, vb,
1, 1, dma2d_desc_pixel_format_to_pbyte_value(pixel_format),
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, 0, 0);
dma2d_link_dscr_init((uint32_t *)rx_dsc[i], NULL, (void *)prrx,
ha, va,
hb, vb,
0, 1, dma2d_desc_pixel_format_to_pbyte_value(pixel_format),
DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE, x_offset, y_offset);
// Writeback the DMA descriptors
esp_cache_msync((void *)tx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)rx_dsc[i], 64, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// Construct dma2d_m2m_trans_config_t structure
m2m_trans_config[i].tx_desc_base_addr = (intptr_t)tx_dsc[i];
m2m_trans_config[i].rx_desc_base_addr = (intptr_t)rx_dsc[i];
m2m_trans_config[i].trans_eof_cb = dma2d_m2m_suc_eof_event_cb;
m2m_trans_config[i].user_data = (void *)counting_sem;
m2m_trans_config[i].transfer_ability_config = &transfer_ability_config;
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
printf("trans %d\n", i);
TEST_ESP_OK(dma2d_m2m(&m2m_trans_config[i]));
}
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
xSemaphoreTake(counting_sem, portMAX_DELAY);
printf("trans %d done\n", i);
}
printf("All transactions done!\n");
// Print the picture and check result
for (int i = 0; i < M2M_TRANS_TIMES; i++) {
prtx = tx_buf + i * 64;
prrx = rx_buf + i * va * ha * 2;
printf("pic:\n");
for (int idx = 0; idx < va * ha; idx++) {
printf("%02X%02X ", prrx[idx * 2], prrx[idx * 2 + 1]);
if (idx % ha == (ha - 1)) {
printf("\n");
}
bool pixel_in_window = false;
for (int window_y = 0; window_y < vb; window_y++) {
if (idx >= (ha * (y_offset + window_y) + x_offset) && idx < (ha * (y_offset + window_y) + x_offset + hb)) {
uint32_t window_x = idx - ha * (y_offset + window_y) - x_offset;
TEST_ASSERT_EQUAL(prtx[(window_y * hb + window_x) * 2], prrx[idx * 2]);
TEST_ASSERT_EQUAL(prtx[(window_y * hb + window_x) * 2 + 1], prrx[idx * 2 + 1]);
pixel_in_window = true;
break;
}
}
if (!pixel_in_window) {
TEST_ASSERT_EQUAL(0XFF, prrx[idx * 2]);
TEST_ASSERT_EQUAL(0XFF, prrx[idx * 2 + 1]);
}
}
}
free(tx_link_buffer);
free(rx_link_buffer);
free(tx_buf);
free(rx_buf);
vSemaphoreDelete(counting_sem);
TEST_ESP_OK(dma2d_m2m_deinit());
}

View File

@ -0,0 +1,18 @@
# SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32p4
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'release',
],
indirect=True,
)
def test_dma2d(dut: Dut) -> None:
dut.run_all_single_board_cases()

View File

@ -0,0 +1,6 @@
# set compiler optimization level
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y
# we can silent the assertion to save the binary footprint
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y

View File

@ -0,0 +1,3 @@
CONFIG_FREERTOS_HZ=1000
CONFIG_ESP_TASK_WDT_EN=n
CONFIG_IDF_EXPERIMENTAL_FEATURES=y

View File

@ -0,0 +1,3 @@
CONFIG_SPIRAM=y
CONFIG_SPIRAM_MODE_HEX=y
CONFIG_SPIRAM_SPEED_200M=y

View File

@ -41,6 +41,7 @@ if(NOT CONFIG_APP_BUILD_TYPE_PURE_RAM_APP)
endif() endif()
if(NOT BOOTLOADER_BUILD) if(NOT BOOTLOADER_BUILD)
list(APPEND srcs "color_hal.c")
if(NOT CONFIG_APP_BUILD_TYPE_PURE_RAM_APP) if(NOT CONFIG_APP_BUILD_TYPE_PURE_RAM_APP)
if(CONFIG_SOC_SPI_FLASH_SUPPORTED) if(CONFIG_SOC_SPI_FLASH_SUPPORTED)
@ -127,6 +128,10 @@ if(NOT BOOTLOADER_BUILD)
list(APPEND srcs "dw_gdma_hal.c") list(APPEND srcs "dw_gdma_hal.c")
endif() endif()
if(CONFIG_SOC_DMA2D_SUPPORTED)
list(APPEND srcs "dma2d_hal.c")
endif()
if(CONFIG_SOC_I2S_SUPPORTED) if(CONFIG_SOC_I2S_SUPPORTED)
list(APPEND srcs "i2s_hal.c") list(APPEND srcs "i2s_hal.c")
endif() endif()

View File

@ -0,0 +1,42 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include "hal/color_hal.h"
#include "hal/assert.h"
uint32_t color_hal_pixel_format_get_bit_depth(color_space_pixel_format_t format)
{
switch (format.color_type_id) {
case COLOR_TYPE_ID(COLOR_SPACE_GRAY, COLOR_PIXEL_GRAY4):
case COLOR_TYPE_ID(COLOR_SPACE_ALPHA, COLOR_PIXEL_A4):
case COLOR_TYPE_ID(COLOR_SPACE_CLUT, COLOR_PIXEL_L4):
return 4;
case COLOR_TYPE_ID(COLOR_SPACE_RAW, COLOR_PIXEL_RAW8):
case COLOR_TYPE_ID(COLOR_SPACE_GRAY, COLOR_PIXEL_GRAY8):
case COLOR_TYPE_ID(COLOR_SPACE_ALPHA, COLOR_PIXEL_A8):
case COLOR_TYPE_ID(COLOR_SPACE_CLUT, COLOR_PIXEL_L8):
return 8;
case COLOR_TYPE_ID(COLOR_SPACE_RAW, COLOR_PIXEL_RAW10):
return 10;
case COLOR_TYPE_ID(COLOR_SPACE_RAW, COLOR_PIXEL_RAW12):
case COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV420):
case COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV411):
return 12;
case COLOR_TYPE_ID(COLOR_SPACE_RGB, COLOR_PIXEL_RGB565):
case COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV422):
return 16;
case COLOR_TYPE_ID(COLOR_SPACE_RGB, COLOR_PIXEL_RGB888):
case COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV444):
return 24;
case COLOR_TYPE_ID(COLOR_SPACE_ARGB, COLOR_PIXEL_ARGB8888):
return 32;
default:
// Unknown color space pixel format, unknown bit depth
HAL_ASSERT(false);
return 0;
}
}

View File

@ -0,0 +1,29 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "hal/dma2d_hal.h"
#include "hal/dma2d_ll.h"
void dma2d_hal_init(dma2d_hal_context_t *hal, int group_id)
{
hal->dev = DMA2D_LL_GET_HW(group_id);
}
void dma2d_hal_tx_reset_channel(dma2d_hal_context_t *hal, uint32_t channel)
{
dma2d_ll_tx_disable_cmd(hal->dev, channel, true);
while (!dma2d_ll_tx_is_reset_avail(hal->dev, channel));
dma2d_ll_tx_reset_channel(hal->dev, channel);
dma2d_ll_tx_disable_cmd(hal->dev, channel, false);
}
void dma2d_hal_rx_reset_channel(dma2d_hal_context_t *hal, uint32_t channel)
{
dma2d_ll_rx_disable_cmd(hal->dev, channel, true);
while (!dma2d_ll_rx_is_reset_avail(hal->dev, channel));
dma2d_ll_rx_reset_channel(hal->dev, channel);
dma2d_ll_rx_disable_cmd(hal->dev, channel, false);
}

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -242,9 +242,9 @@ static inline void gdma_ll_rx_enable_auto_return(gdma_dev_t *dev, uint32_t chann
} }
/** /**
* @brief Check if DMA RX FSM is in IDLE state * @brief Check if DMA RX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_rx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_rx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].in.in_link.park; return dev->channel[channel].in.in_link.park;
} }
@ -468,9 +468,9 @@ static inline void gdma_ll_tx_restart(gdma_dev_t *dev, uint32_t channel)
} }
/** /**
* @brief Check if DMA TX FSM is in IDLE state * @brief Check if DMA TX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_tx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_tx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].out.out_link.park; return dev->channel[channel].out.out_link.park;
} }

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -242,9 +242,9 @@ static inline void gdma_ll_rx_enable_auto_return(gdma_dev_t *dev, uint32_t chann
} }
/** /**
* @brief Check if DMA RX FSM is in IDLE state * @brief Check if DMA RX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_rx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_rx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].in.in_link.park; return dev->channel[channel].in.in_link.park;
} }
@ -468,9 +468,9 @@ static inline void gdma_ll_tx_restart(gdma_dev_t *dev, uint32_t channel)
} }
/** /**
* @brief Check if DMA TX FSM is in IDLE state * @brief Check if DMA TX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_tx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_tx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].out.out_link.park; return dev->channel[channel].out.out_link.park;
} }

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -278,9 +278,9 @@ static inline void gdma_ll_rx_enable_auto_return(gdma_dev_t *dev, uint32_t chann
} }
/** /**
* @brief Check if DMA RX FSM is in IDLE state * @brief Check if DMA RX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_rx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_rx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].in.in_link.inlink_park; return dev->channel[channel].in.in_link.inlink_park;
} }
@ -514,9 +514,9 @@ static inline void gdma_ll_tx_restart(gdma_dev_t *dev, uint32_t channel)
} }
/** /**
* @brief Check if DMA TX FSM is in IDLE state * @brief Check if DMA TX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_tx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_tx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].out.out_link.outlink_park; return dev->channel[channel].out.out_link.outlink_park;
} }

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -278,9 +278,9 @@ static inline void gdma_ll_rx_enable_auto_return(gdma_dev_t *dev, uint32_t chann
} }
/** /**
* @brief Check if DMA RX FSM is in IDLE state * @brief Check if DMA RX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_rx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_rx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].in.in_link.inlink_park; return dev->channel[channel].in.in_link.inlink_park;
} }
@ -514,9 +514,9 @@ static inline void gdma_ll_tx_restart(gdma_dev_t *dev, uint32_t channel)
} }
/** /**
* @brief Check if DMA TX FSM is in IDLE state * @brief Check if DMA TX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_tx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_tx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].out.out_link.outlink_park; return dev->channel[channel].out.out_link.outlink_park;
} }

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -215,9 +215,9 @@ static inline void ahb_dma_ll_rx_enable_auto_return(ahb_dma_dev_t *dev, uint32_t
} }
/** /**
* @brief Check if DMA RX FSM is in IDLE state * @brief Check if DMA RX descriptor FSM is in IDLE state
*/ */
static inline bool ahb_dma_ll_rx_is_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel) static inline bool ahb_dma_ll_rx_is_desc_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].in.in_link.inlink_park_chn; return dev->channel[channel].in.in_link.inlink_park_chn;
} }
@ -451,9 +451,9 @@ static inline void ahb_dma_ll_tx_restart(ahb_dma_dev_t *dev, uint32_t channel)
} }
/** /**
* @brief Check if DMA TX FSM is in IDLE state * @brief Check if DMA TX descriptor FSM is in IDLE state
*/ */
static inline bool ahb_dma_ll_tx_is_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel) static inline bool ahb_dma_ll_tx_is_desc_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].out.out_link.outlink_park_chn; return dev->channel[channel].out.out_link.outlink_park_chn;
} }

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -189,9 +189,9 @@ static inline void axi_dma_ll_rx_enable_auto_return(axi_dma_dev_t *dev, uint32_t
} }
/** /**
* @brief Check if DMA RX FSM is in IDLE state * @brief Check if DMA RX descriptor FSM is in IDLE state
*/ */
static inline bool axi_dma_ll_rx_is_fsm_idle(axi_dma_dev_t *dev, uint32_t channel) static inline bool axi_dma_ll_rx_is_desc_fsm_idle(axi_dma_dev_t *dev, uint32_t channel)
{ {
return dev->in[channel].conf.in_link1.inlink_park_chn; return dev->in[channel].conf.in_link1.inlink_park_chn;
} }
@ -397,9 +397,9 @@ static inline void axi_dma_ll_tx_restart(axi_dma_dev_t *dev, uint32_t channel)
} }
/** /**
* @brief Check if DMA TX FSM is in IDLE state * @brief Check if DMA TX descriptor FSM is in IDLE state
*/ */
static inline bool axi_dma_ll_tx_is_fsm_idle(axi_dma_dev_t *dev, uint32_t channel) static inline bool axi_dma_ll_tx_is_desc_fsm_idle(axi_dma_dev_t *dev, uint32_t channel)
{ {
return dev->out[channel].conf.out_link1.outlink_park_chn; return dev->out[channel].conf.out_link1.outlink_park_chn;
} }

View File

@ -60,8 +60,6 @@ static inline uint32_t periph_ll_get_rst_en_mask(periph_module_t periph, bool en
return HP_SYS_CLKRST_REG_RST_EN_PVT_TOP; return HP_SYS_CLKRST_REG_RST_EN_PVT_TOP;
case PERIPH_ISP_MODULE: case PERIPH_ISP_MODULE:
return HP_SYS_CLKRST_REG_RST_EN_ISP; return HP_SYS_CLKRST_REG_RST_EN_ISP;
case PERIPH_DMA2D_MODULE:
return HP_SYS_CLKRST_REG_RST_EN_DMA2D;
case PERIPH_UHCI_MODULE: case PERIPH_UHCI_MODULE:
return HP_SYS_CLKRST_REG_RST_EN_UHCI; return HP_SYS_CLKRST_REG_RST_EN_UHCI;
case PERIPH_I3C_MODULE: case PERIPH_I3C_MODULE:
@ -141,7 +139,6 @@ static inline uint32_t periph_ll_get_rst_en_reg(periph_module_t periph)
switch (periph) { switch (periph) {
case PERIPH_PVT_MODULE: case PERIPH_PVT_MODULE:
case PERIPH_ISP_MODULE: case PERIPH_ISP_MODULE:
case PERIPH_DMA2D_MODULE:
return HP_SYS_CLKRST_HP_RST_EN0_REG; return HP_SYS_CLKRST_HP_RST_EN0_REG;
case PERIPH_UHCI_MODULE: case PERIPH_UHCI_MODULE:
case PERIPH_I3C_MODULE: case PERIPH_I3C_MODULE:

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -297,9 +297,9 @@ static inline void gdma_ll_rx_enable_auto_return(gdma_dev_t *dev, uint32_t chann
} }
/** /**
* @brief Check if DMA RX FSM is in IDLE state * @brief Check if DMA RX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_rx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_rx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].in.link.park; return dev->channel[channel].in.link.park;
} }
@ -565,9 +565,9 @@ static inline void gdma_ll_tx_restart(gdma_dev_t *dev, uint32_t channel)
} }
/** /**
* @brief Check if DMA TX FSM is in IDLE state * @brief Check if DMA TX descriptor FSM is in IDLE state
*/ */
static inline bool gdma_ll_tx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel) static inline bool gdma_ll_tx_is_desc_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{ {
return dev->channel[channel].out.link.park; return dev->channel[channel].out.link.park;
} }

View File

@ -0,0 +1,27 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "hal/color_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Get the bit depth (bits/pixel) of each color space pixel format
*
* @param color_type_id Value constructed in color_space_pixel_format_t struct
*
* @return Number of bits per pixel
*/
uint32_t color_hal_pixel_format_get_bit_depth(color_space_pixel_format_t color_type_id);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,56 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*******************************************************************************
* NOTICE
* The HAL is not public api, don't use in application code.
* See readme.md in soc/README.md
******************************************************************************/
#pragma once
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct dma2d_dev_t *dma2d_soc_handle_t; // 2D-DMA SOC layer handle
/**
* Context that should be maintained by both the driver and the HAL
*/
typedef struct {
dma2d_soc_handle_t dev; // 2D-DMA SOC layer handle (i.e. register base address)
} dma2d_hal_context_t;
/**
* @brief Init the 2D-DMA hal. This function should be called first before other hal layer function is called
*
* @param hal Context of the HAL layer
* @param group_id The 2D-DMA group number
*/
void dma2d_hal_init(dma2d_hal_context_t *hal, int group_id);
/**
* @brief Reset 2D-DMA TX channel
*
* @param hal Context of the HAL layer
* @param channel TX channel ID
*/
void dma2d_hal_tx_reset_channel(dma2d_hal_context_t *hal, uint32_t channel);
/**
* @brief Reset 2D-DMA RX channel
*
* @param hal Context of the HAL layer
* @param channel RX channel ID
*/
void dma2d_hal_rx_reset_channel(dma2d_hal_context_t *hal, uint32_t channel);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,252 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "esp_assert.h"
#include "hal/assert.h"
#include "hal/color_hal.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Type of 2D-DMA descriptor
*/
typedef struct dma2d_descriptor_align8_s dma2d_descriptor_align8_t;
struct dma2d_descriptor_align8_s {
struct {
uint32_t vb_size : 14; /*!< Vertical height of the block, unit: pixel
When dma2d_en field is 0, this field is the lower 14-bit of the buffer memory space size, unit: byte */
uint32_t hb_length : 14; /*!< Horizontal width of the block, unit: pixel
When dma2d_en field is 0, this field is the lower 14-bit of the buffer length, unit: byte
For data receive, this field is filled by DMA after transfer finishes */
uint32_t err_eof : 1; /*!< Whether the received buffer contains error
For data transfer, this bit is fixed to 0 */
uint32_t dma2d_en : 1; /*!< Whether to enable 2D functionality */
uint32_t suc_eof : 1; /*!< Whether the descriptor is the last one in the link */
uint32_t owner : 1; /*!< Who is allowed to access the buffer that this descriptor points to, select DMA2D_DESCRIPTOR_BUFFER_OWNER_CPU or DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA
When owner is chosen to be DMA, after DMA finishs with the descriptor, it will clear this bit
For data transfer, the bit won't be cleared unless DMA2D_OUT_AUTO_WRBACK is enabled */
}; /*!< Descriptor Word 0 */
struct {
uint32_t va_size : 14; /*!< Vertical height of the picture, unit: pixel
When dma2d_en field is 0, this field is the higher 14-bit of the buffer memory space size, unit: byte */
uint32_t ha_length : 14; /*!< Horizontal width of the picture, unit: pixel
When dma2d_en field is 0, this field is the higher 14-bit of the buffer length, unit: byte
For data receive, this field is filled by DMA after transfer finishes */
uint32_t pbyte : 4; /*!< Number of bytes per pixel (make use of dma2d_desc_pixel_format_to_pbyte_value to get the value)
When dma2d_en field is 0, this field has no use */
}; /*!< Descriptor Word 1 */
struct {
uint32_t y : 14; /*!< The y-coordinate value of the hb * vb block, unit: pixel
When dma2d_en field is 0, this field has no use */
uint32_t x : 14; /*!< The x-coordinate value of the hb * vb block, unit: pixel
When dma2d_en field is 0, this field has no use */
uint32_t mode : 1; /*!< Data block read/write mode, select DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE or DMA2D_DESCRIPTOR_BLOCK_RW_MODE_MULTIPLE
When dma2d_en field is 0, this field must be 0 (i.e. DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE mode) */
uint32_t reserved29 : 3;
}; /*!< Descriptor Word 2 */
void *buffer; /*!< Pointer to the buffer (of ha * va picture)
For RX buffer, there is 4-byte alignment addr and size requirement (spiram and axi bus restriction, otherwises, could overwrite other data or be overwritten) */
dma2d_descriptor_align8_t *next; /*!< Pointer to the next descriptor (set to NULL if the descriptor is the last one, e.g. suc_eof=1) */
} __attribute__((aligned(8)));
ESP_STATIC_ASSERT(sizeof(dma2d_descriptor_align8_t) == 24, "dma2d_descriptor_align8_t should occupy 24 bytes in memory");
// 2D-DMA descriptor requires 8-byte alignment
typedef dma2d_descriptor_align8_t dma2d_descriptor_t;
#define DMA2D_DESCRIPTOR_BUFFER_OWNER_CPU (0) /*!< 2D-DMA buffer is allowed to be accessed by CPU */
#define DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA (1) /*!< 2D-DMA buffer is allowed to be accessed by 2D-DMA engine */
#define DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE (0) /*!< 2D-DMA data block single R/W mode, only r/w one hb * vb block */
#define DMA2D_DESCRIPTOR_BLOCK_RW_MODE_MULTIPLE (1) /*!< 2D-DMA data block multiple R/W mode, continues r/w hb * vb blocks until the ha * va picture is done */
#define DMA2D_DESCRIPTOR_PBYTE_0B5_PER_PIXEL (0) /*!< 2D-DMA descriptor pbyte value when 0.5 bytes/pixel */
#define DMA2D_DESCRIPTOR_PBYTE_1B0_PER_PIXEL (1) /*!< 2D-DMA descriptor pbyte value when 1 bytes/pixel */
#define DMA2D_DESCRIPTOR_PBYTE_1B5_PER_PIXEL (2) /*!< 2D-DMA descriptor pbyte value when 1.5 bytes/pixel */
#define DMA2D_DESCRIPTOR_PBYTE_2B0_PER_PIXEL (3) /*!< 2D-DMA descriptor pbyte value when 2 bytes/pixel */
#define DMA2D_DESCRIPTOR_PBYTE_3B0_PER_PIXEL (4) /*!< 2D-DMA descriptor pbyte value when 3 bytes/pixel */
#define DMA2D_DESCRIPTOR_PBYTE_4B0_PER_PIXEL (5) /*!< 2D-DMA descriptor pbyte value when 4 bytes/pixel */
// Helper function to convert pixel format to 2D-DMA descriptor pbyte value
static inline uint32_t dma2d_desc_pixel_format_to_pbyte_value(color_space_pixel_format_t pixel_format)
{
switch (color_hal_pixel_format_get_bit_depth(pixel_format)) {
case 4:
return DMA2D_DESCRIPTOR_PBYTE_0B5_PER_PIXEL;
case 8:
return DMA2D_DESCRIPTOR_PBYTE_1B0_PER_PIXEL;
case 12:
return DMA2D_DESCRIPTOR_PBYTE_1B5_PER_PIXEL;
case 16:
return DMA2D_DESCRIPTOR_PBYTE_2B0_PER_PIXEL;
case 24:
return DMA2D_DESCRIPTOR_PBYTE_3B0_PER_PIXEL;
case 32:
return DMA2D_DESCRIPTOR_PBYTE_4B0_PER_PIXEL;
default:
// Unsupported bit depth
abort();
}
}
/**
* @brief Enumeration of peripherals which have the 2D-DMA capability
*/
typedef enum {
DMA2D_TRIG_PERIPH_M2M, /*!< 2D-DMA trigger peripheral: M2M */
DMA2D_TRIG_PERIPH_JPEG_ENCODER, /*!< 2D-DMA trigger peripheral: JPEG Encoder */
DMA2D_TRIG_PERIPH_JPEG_DECODER, /*!< 2D-DMA trigger peripheral: JPEG Decoder */
DMA2D_TRIG_PERIPH_PPA_SR, /*!< 2D-DMA trigger peripheral: PPA SR engine */
DMA2D_TRIG_PERIPH_PPA_BLEND, /*!< 2D-DMA trigger peripheral: PPA Blending engine */
} dma2d_trigger_peripheral_t;
/**
* @brief Enumeration of 2D-DMA channel direction
*/
typedef enum {
DMA2D_CHANNEL_DIRECTION_TX, /*!< 2D-DMA channel direction: TX */
DMA2D_CHANNEL_DIRECTION_RX, /*!< 2D-DMA channel direction: RX */
} dma2d_channel_direction_t;
/**
* @brief Enumeration of 2D-DMA data burst length options
*/
typedef enum {
DMA2D_DATA_BURST_LENGTH_1, /*!< 2D-DMA block size: single byte */
DMA2D_DATA_BURST_LENGTH_16, /*!< 2D-DMA block size: 16 bytes */
DMA2D_DATA_BURST_LENGTH_32, /*!< 2D-DMA block size: 32 bytes */
DMA2D_DATA_BURST_LENGTH_64, /*!< 2D-DMA block size: 64 bytes */
DMA2D_DATA_BURST_LENGTH_128, /*!< 2D-DMA block size: 128 bytes */
DMA2D_DATA_BURST_LENGTH_INVALID, /*!< Invalid 2D-DMA block size */
} dma2d_data_burst_length_t;
/**
* @brief Enumeration of 2D-DMA macro block size options
* Only useful in DMA2D_DESCRIPTOR_BLOCK_RW_MODE_MULTIPLE mode (dma2d_en = 1, mode = 1)
* Descriptor vb and hb fields has to be multiples of macro block sizes
*/
typedef enum {
DMA2D_MACRO_BLOCK_SIZE_NONE, /*!< 2D-DMA no macro block */
DMA2D_MACRO_BLOCK_SIZE_8_8, /*!< 2D-DMA 8 pixel x 8 pixel macro block */
DMA2D_MACRO_BLOCK_SIZE_8_16, /*!< 2D-DMA 8 pixel x 16 pixel macro block */
DMA2D_MACRO_BLOCK_SIZE_16_16, /*!< 2D-DMA 16 pixel x 16 pixel macro block */
DMA2D_MACRO_BLOCK_SIZE_INVALID, /*!< Invalid 2D-DMA macro block size */
} dma2d_macro_block_size_t;
/**
* @brief Enumeration of 2D-DMA pixel bytes scamble order in color space conversion
*
* Assuming the original pixel byte order (from MSB to LSB) is 2-1-0 .
*/
typedef enum {
DMA2D_SCRAMBLE_ORDER_BYTE2_1_0, /*!< 2D-DMA pixel data scrambled as BYTE2-1-0 (no scramble) */
DMA2D_SCRAMBLE_ORDER_BYTE2_0_1, /*!< 2D-DMA pixel data scrambled as BYTE2-0-1 */
DMA2D_SCRAMBLE_ORDER_BYTE1_0_2, /*!< 2D-DMA pixel data scrambled as BYTE1-0-2 */
DMA2D_SCRAMBLE_ORDER_BYTE1_2_0, /*!< 2D-DMA pixel data scrambled as BYTE1-2-0 */
DMA2D_SCRAMBLE_ORDER_BYTE0_2_1, /*!< 2D-DMA pixel data scrambled as BYTE0-2-1 */
DMA2D_SCRAMBLE_ORDER_BYTE0_1_2, /*!< 2D-DMA pixel data scrambled as BYTE0-1-2 */
DMA2D_SCRAMBLE_ORDER_INVALID, /*!< Invalid 2D-DMA pixel data scramble order */
} dma2d_scramble_order_t;
//*********************BT601***********************************//
// Y = 16 + 0.257 * R + 0.504 * g + 0.098 * b //
// Cb = 128 - 0.148 * R - 0.291 * g + 0.439 * b //
// Cr = 128 + 0.439 * R - 0.368 * g - 0.071 * b //
// R = 1.164 *(Y - 16) + 1.596 *(Cr - 128) //
// G = 1.164 *(Y - 16) - 0.392 *(Cb - 128) - 0.812 *(Cr - 128)//
// B = 1.164 *(Y - 16) + 2.016 *(Cb - 128) //
//*********************BT601***********************************//
//*********************BT709***********************************//
// Y = 16 + 0.183 * R + 0.614 * g + 0.062 * b //
// Cb = 128 - 0.101 * R - 0.339 * g + 0.439 * b //
// Cr = 128 + 0.439 * R - 0.399 * g - 0.040 * b //
// R = 1.164 *(Y - 16) + 1.792 *(Cr - 128) //
// G = 1.164 *(Y - 16) - 0.213 *(Cb - 128) - 0.534 *(Cr - 128)//
// B = 1.164 *(Y - 16) + 2.114 *(Cb - 128) //
//*********************BT709***********************************//
// 256 * Q = A[9:0] * x + B[10:0] * y + C[9:0] * z + D[17:0]
#define DMA2D_COLOR_SPACE_CONV_PARAM_RGB2YUV_BT601 \
{ \
{ 66, 129, 25, 4096}, \
{ -38, -74, 112, 32768}, \
{ 112, -94, -18, 32768}, \
}
#define DMA2D_COLOR_SPACE_CONV_PARAM_RGB2YUV_BT709 \
{ \
{ 47, 157, 16, 4096}, \
{ -26, -86, 112, 32768}, \
{ 112, -102, -10, 32768}, \
}
#define DMA2D_COLOR_SPACE_CONV_PARAM_YUV2RGB_BT601 \
{ \
{ 298, 0, 409, -56906}, \
{ 298, -100, -208, 34707}, \
{ 298, 516, 0, -70836}, \
}
#define DMA2D_COLOR_SPACE_CONV_PARAM_YUV2RGB_BT709 \
{ \
{ 298, 0, 459, -63367}, \
{ 298, -55, -136, 19681}, \
{ 298, 541, 0, -73918}, \
}
/**
* @brief Enumeration of 2D-DMA TX color space conversion (CSC) option
*/
typedef enum {
DMA2D_CSC_TX_NONE, /*!< 2D-DMA TX perform no CSC */
DMA2D_CSC_TX_SCRAMBLE, /*!< 2D-DMA TX perform only data scramble */
DMA2D_CSC_TX_RGB888_TO_RGB565, /*!< 2D-DMA TX perform RGB888 to RGB565 conversion */
DMA2D_CSC_TX_RGB565_TO_RGB888, /*!< 2D-DMA TX perform RGB565 to RGB888 conversion */
DMA2D_CSC_TX_RGB888_TO_YUV444_601, /*!< 2D-DMA TX perform RGB888 to YUV444 conversion (follow BT601 standard) */
DMA2D_CSC_TX_RGB888_TO_YUV444_709, /*!< 2D-DMA TX perform RGB888 to YUV444 conversion (follow BT709 standard) */
DMA2D_CSC_TX_RGB888_TO_YUV422_601, /*!< 2D-DMA TX perform RGB888 to YUV422-MIPI conversion (follow BT601 standard) */
DMA2D_CSC_TX_RGB888_TO_YUV422_709, /*!< 2D-DMA TX perform RGB888 to YUV422-MIPI conversion (follow BT709 standard) */
DMA2D_CSC_TX_YUV444_TO_RGB888_601, /*!< 2D-DMA TX perform YUV444 to RGB888 conversion (follow BT601 standard) */
DMA2D_CSC_TX_YUV444_TO_RGB888_709, /*!< 2D-DMA TX perform YUV444 to RGB888 conversion (follow BT709 standard) */
DMA2D_CSC_TX_YUV422_TO_RGB888_601, /*!< 2D-DMA TX perform YUV422-MIPI to RGB888 conversion (follow BT601 standard) */
DMA2D_CSC_TX_YUV422_TO_RGB888_709, /*!< 2D-DMA TX perform YUV422-MIPI to RGB888 conversion (follow BT709 standard) */
DMA2D_CSC_TX_INVALID, /*!< Invalid 2D-DMA TX color space conversion */
} dma2d_csc_tx_option_t;
/**
* @brief Enumeration of 2D-DMA RX color space conversion (CSC) option
* RX side only JPEG requires CSC
*/
typedef enum {
DMA2D_CSC_RX_NONE, /*!< 2D-DMA RX perform no CSC */
DMA2D_CSC_RX_SCRAMBLE, /*!< 2D-DMA RX perform only data scramble */
DMA2D_CSC_RX_YUV422_TO_YUV444, /*!< 2D-DMA RX perform YUV422 to YUV444 conversion */
DMA2D_CSC_RX_YUV420_TO_YUV444, /*!< 2D-DMA RX perform YUV420 to YUV444 conversion */
DMA2D_CSC_RX_YUV420_TO_RGB888_601, /*!< 2D-DMA RX perform YUV420 to RGB888 conversion (follow BT601 standard) */
DMA2D_CSC_RX_YUV420_TO_RGB565_601, /*!< 2D-DMA RX perform YUV420 to RGB565 conversion (follow BT601 standard) */
DMA2D_CSC_RX_YUV420_TO_RGB888_709, /*!< 2D-DMA RX perform YUV420 to RGB888 conversion (follow BT709 standard) */
DMA2D_CSC_RX_YUV420_TO_RGB565_709, /*!< 2D-DMA RX perform YUV420 to RGB565 conversion (follow BT709 standard) */
DMA2D_CSC_RX_YUV422_TO_RGB888_601, /*!< 2D-DMA RX perform YUV422 to RGB888 conversion (follow BT601 standard) */
DMA2D_CSC_RX_YUV422_TO_RGB565_601, /*!< 2D-DMA RX perform YUV422 to RGB565 conversion (follow BT601 standard) */
DMA2D_CSC_RX_YUV422_TO_RGB888_709, /*!< 2D-DMA RX perform YUV422 to RGB888 conversion (follow BT709 standard) */
DMA2D_CSC_RX_YUV422_TO_RGB565_709, /*!< 2D-DMA RX perform YUV422 to RGB565 conversion (follow BT709 standard) */
DMA2D_CSC_RX_YUV444_TO_RGB888_601, /*!< 2D-DMA RX perform YUV444 to RGB888 conversion (follow BT601 standard) */
DMA2D_CSC_RX_YUV444_TO_RGB565_601, /*!< 2D-DMA RX perform YUV444 to RGB565 conversion (follow BT601 standard) */
DMA2D_CSC_RX_YUV444_TO_RGB888_709, /*!< 2D-DMA RX perform YUV444 to RGB888 conversion (follow BT709 standard) */
DMA2D_CSC_RX_YUV444_TO_RGB565_709, /*!< 2D-DMA RX perform YUV444 to RGB565 conversion (follow BT709 standard) */
DMA2D_CSC_RX_INVALID, /*!< Invalid 2D-DMA RX color space conversion */
} dma2d_csc_rx_option_t;
#ifdef __cplusplus
}
#endif

View File

@ -35,6 +35,10 @@ if(CONFIG_SOC_GDMA_SUPPORTED)
list(APPEND srcs "${target}/gdma_periph.c") list(APPEND srcs "${target}/gdma_periph.c")
endif() endif()
if(CONFIG_SOC_DMA2D_SUPPORTED)
list(APPEND srcs "${target}/dma2d_periph.c")
endif()
if(CONFIG_SOC_GPSPI_SUPPORTED) if(CONFIG_SOC_GPSPI_SUPPORTED)
list(APPEND srcs "${target}/spi_periph.c") list(APPEND srcs "${target}/spi_periph.c")
endif() endif()

View File

@ -0,0 +1,24 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/dma2d_periph.h"
#include "soc/interrupts.h"
const dma2d_signal_conn_t dma2d_periph_signals = {
.groups = {
[0] = {
.tx_irq_id = {
[0] = ETS_DMA2D_OUT_CH0_INTR_SOURCE,
[1] = ETS_DMA2D_OUT_CH1_INTR_SOURCE,
[2] = ETS_DMA2D_OUT_CH2_INTR_SOURCE,
},
.rx_irq_id = {
[0] = ETS_DMA2D_IN_CH0_INTR_SOURCE,
[1] = ETS_DMA2D_IN_CH1_INTR_SOURCE,
}
}
}
};

View File

@ -27,6 +27,10 @@ config SOC_DW_GDMA_SUPPORTED
bool bool
default y default y
config SOC_DMA2D_SUPPORTED
bool
default y
config SOC_GPTIMER_SUPPORTED config SOC_GPTIMER_SUPPORTED
bool bool
default y default y
@ -403,6 +407,18 @@ config SOC_GDMA_SUPPORT_ETM
bool bool
default y default y
config SOC_DMA2D_GROUPS
int
default 1
config SOC_DMA2D_TX_CHANNELS_PER_GROUP
int
default 3
config SOC_DMA2D_RX_CHANNELS_PER_GROUP
int
default 2
config SOC_ETM_GROUPS config SOC_ETM_GROUPS
int int
default 1 default 1

View File

@ -0,0 +1,19 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
// The following macros are matched with the 2D-DMA peri_sel field peripheral selection ID
#define SOC_DMA2D_TRIG_PERIPH_JPEG_RX (0)
#define SOC_DMA2D_TRIG_PERIPH_PPA_SR_RX (1)
#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_RX (2)
#define SOC_DMA2D_TRIG_PERIPH_M2M_RX (-1) // Any value of 3 ~ 7, but TX and RX needs the same ID for M2M
#define SOC_DMA2D_TRIG_PERIPH_JPEG_TX (0)
#define SOC_DMA2D_TRIG_PERIPH_PPA_SR_TX (1)
#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_FG_TX (2)
#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_BG_TX (3)
#define SOC_DMA2D_TRIG_PERIPH_M2M_TX (-1) // Any value of 4 ~ 7, but TX and RX needs the same ID for M2M

View File

@ -1,5 +1,5 @@
/** /**
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -690,90 +690,6 @@ typedef union {
uint32_t val; uint32_t val;
} dma2d_out_scramble_chn_reg_t; } dma2d_out_scramble_chn_reg_t;
/** Type of out_color_param0_chn register
* Configures the tx color convert parameter of channel 0
*/
typedef union {
struct {
/** out_color_param_h0_chn : R/W; bitpos: [20:0]; default: 298;
* Set first 2 parameter of most significant byte of pending 3 bytes
*/
uint32_t out_color_param_h0_chn:21;
uint32_t reserved_21:11;
};
uint32_t val;
} dma2d_out_color_param0_chn_reg_t;
/** Type of out_color_param1_chn register
* Configures the tx color convert parameter of channel 0
*/
typedef union {
struct {
/** out_color_param_h1_chn : R/W; bitpos: [27:0]; default: 210164121;
* Set last 2 parameter of most significant byte of pending 3 bytes
*/
uint32_t out_color_param_h1_chn:28;
uint32_t reserved_28:4;
};
uint32_t val;
} dma2d_out_color_param1_chn_reg_t;
/** Type of out_color_param2_chn register
* Configures the tx color convert parameter of channel 0
*/
typedef union {
struct {
/** out_color_param_m0_chn : R/W; bitpos: [20:0]; default: 1995050;
* Set first 2 parameter of midium significant byte of pending 3 bytes
*/
uint32_t out_color_param_m0_chn:21;
uint32_t reserved_21:11;
};
uint32_t val;
} dma2d_out_color_param2_chn_reg_t;
/** Type of out_color_param3_chn register
* Configures the tx color convert parameter of channel 0
*/
typedef union {
struct {
/** out_color_param_m1_chn : R/W; bitpos: [27:0]; default: 35540784;
* Set last 2 parameter of midium significant byte of pending 3 bytes
*/
uint32_t out_color_param_m1_chn:28;
uint32_t reserved_28:4;
};
uint32_t val;
} dma2d_out_color_param3_chn_reg_t;
/** Type of out_color_param4_chn register
* Configures the tx color convert parameter of channel 0
*/
typedef union {
struct {
/** out_color_param_l0_chn : R/W; bitpos: [20:0]; default: 528682;
* Set first 2 parameter of least significant byte of pending 3 bytes
*/
uint32_t out_color_param_l0_chn:21;
uint32_t reserved_21:11;
};
uint32_t val;
} dma2d_out_color_param4_chn_reg_t;
/** Type of out_color_param5_chn register
* Configures the tx color convert parameter of channel 0
*/
typedef union {
struct {
/** out_color_param_l1_chn : R/W; bitpos: [27:0]; default: 195899392;
* Set last 2 parameter of least significant byte of pending 3 bytes
*/
uint32_t out_color_param_l1_chn:28;
uint32_t reserved_28:4;
};
uint32_t val;
} dma2d_out_color_param5_chn_reg_t;
/** Type of out_etm_conf_chn register /** Type of out_etm_conf_chn register
* Configures the tx etm of channel 0 * Configures the tx etm of channel 0
*/ */
@ -1529,90 +1445,6 @@ typedef union {
uint32_t val; uint32_t val;
} dma2d_in_scramble_chn_reg_t; } dma2d_in_scramble_chn_reg_t;
/** Type of in_color_param0_chn register
* Configures the rx color convert parameter of channel 0
*/
typedef union {
struct {
/** in_color_param_h0_chn : R/W; bitpos: [20:0]; default: 298;
* Set first 2 parameter of most significant byte of pending 3 bytes
*/
uint32_t in_color_param_h0_chn:21;
uint32_t reserved_21:11;
};
uint32_t val;
} dma2d_in_color_param0_chn_reg_t;
/** Type of in_color_param1_chn register
* Configures the rx color convert parameter of channel 0
*/
typedef union {
struct {
/** in_color_param_h1_chn : R/W; bitpos: [27:0]; default: 210164121;
* Set last 2 parameter of most significant byte of pending 3 bytes
*/
uint32_t in_color_param_h1_chn:28;
uint32_t reserved_28:4;
};
uint32_t val;
} dma2d_in_color_param1_chn_reg_t;
/** Type of in_color_param2_chn register
* Configures the rx color convert parameter of channel 0
*/
typedef union {
struct {
/** in_color_param_m0_chn : R/W; bitpos: [20:0]; default: 1995050;
* Set first 2 parameter of midium significant byte of pending 3 bytes
*/
uint32_t in_color_param_m0_chn:21;
uint32_t reserved_21:11;
};
uint32_t val;
} dma2d_in_color_param2_chn_reg_t;
/** Type of in_color_param3_chn register
* Configures the rx color convert parameter of channel 0
*/
typedef union {
struct {
/** in_color_param_m1_chn : R/W; bitpos: [27:0]; default: 35540784;
* Set last 2 parameter of midium significant byte of pending 3 bytes
*/
uint32_t in_color_param_m1_chn:28;
uint32_t reserved_28:4;
};
uint32_t val;
} dma2d_in_color_param3_chn_reg_t;
/** Type of in_color_param4_chn register
* Configures the rx color convert parameter of channel 0
*/
typedef union {
struct {
/** in_color_param_l0_chn : R/W; bitpos: [20:0]; default: 528682;
* Set first 2 parameter of least significant byte of pending 3 bytes
*/
uint32_t in_color_param_l0_chn:21;
uint32_t reserved_21:11;
};
uint32_t val;
} dma2d_in_color_param4_chn_reg_t;
/** Type of in_color_param5_chn register
* Configures the rx color convert parameter of channel 0
*/
typedef union {
struct {
/** in_color_param_l1_chn : R/W; bitpos: [27:0]; default: 195899392;
* Set last 2 parameter of least significant byte of pending 3 bytes
*/
uint32_t in_color_param_l1_chn:28;
uint32_t reserved_28:4;
};
uint32_t val;
} dma2d_in_color_param5_chn_reg_t;
/** Type of in_etm_conf_chn register /** Type of in_etm_conf_chn register
* Configures the rx etm of channel 0 * Configures the rx etm of channel 0
*/ */
@ -1718,7 +1550,7 @@ typedef union {
*/ */
typedef union { typedef union {
struct { struct {
/** access_intr_mem_start_addr : R/W; bitpos: [31:0]; default: 806354944; /** access_intr_mem_start_addr : R/W; bitpos: [31:0]; default: 806354944 (0x30100000);
* The start address of accessible address space. * The start address of accessible address space.
*/ */
uint32_t access_intr_mem_start_addr:32; uint32_t access_intr_mem_start_addr:32;
@ -1731,7 +1563,7 @@ typedef union {
*/ */
typedef union { typedef union {
struct { struct {
/** access_intr_mem_end_addr : R/W; bitpos: [31:0]; default: 2415919103; /** access_intr_mem_end_addr : R/W; bitpos: [31:0]; default: 2415919103 (0x8FFFFFFF);
* The end address of accessible address space. The access address beyond this range * The end address of accessible address space. The access address beyond this range
* would lead to descriptor error. * would lead to descriptor error.
*/ */
@ -1745,7 +1577,7 @@ typedef union {
*/ */
typedef union { typedef union {
struct { struct {
/** access_extr_mem_start_addr : R/W; bitpos: [31:0]; default: 806354944; /** access_extr_mem_start_addr : R/W; bitpos: [31:0]; default: 806354944 (0x30100000);
* The start address of accessible address space. * The start address of accessible address space.
*/ */
uint32_t access_extr_mem_start_addr:32; uint32_t access_extr_mem_start_addr:32;
@ -1758,7 +1590,7 @@ typedef union {
*/ */
typedef union { typedef union {
struct { struct {
/** access_extr_mem_end_addr : R/W; bitpos: [31:0]; default: 2415919103; /** access_extr_mem_end_addr : R/W; bitpos: [31:0]; default: 2415919103 (0x8FFFFFFF);
* The end address of accessible address space. The access address beyond this range * The end address of accessible address space. The access address beyond this range
* would lead to descriptor error. * would lead to descriptor error.
*/ */
@ -1847,36 +1679,69 @@ typedef union {
uint32_t val; uint32_t val;
} dma2d_rdn_eco_low_reg_t; } dma2d_rdn_eco_low_reg_t;
/** Type of in/out_color_param_h/m/l_chn register
* Configures the rx/tx color convert parameter of channel n
*/
typedef union {
struct {
struct {
/** a: R/W; bitpos: [9:0]; default: h:298, m:298, l:298
* Set the first parameter of the most/medium/least significant byte of pending 3 bytes
*/
uint32_t a : 10;
/** b: R/W; bitpos: [20:10]; default: h:0, in_m:1948, l:516
* Set the second parameter of the most/medium/least significant byte of pending 3 bytes
*/
uint32_t b : 11;
uint32_t reserved21 : 11;
};
struct {
/** c: R/W; bitpos: [41:32]; default: h:409, m:816, l:0
* Set the third parameter of the most/medium/least significant byte of pending 3 bytes
*/
uint32_t c : 10;
/** d: R/W; bitpos: [59:42]; default: h:205238, m:34707, l:191308
* Set the fourth parameter of the most/medium/least significant byte of pending 3 bytes
*/
uint32_t d : 18;
uint32_t reserved60 : 4;
};
};
uint32_t val[2];
} dma2d_color_param_reg_t;
typedef struct { typedef struct {
volatile dma2d_out_conf0_chn_reg_t out_conf0_ch0; volatile dma2d_color_param_reg_t param_h;
volatile dma2d_out_int_raw_chn_reg_t out_int_raw_ch0; volatile dma2d_color_param_reg_t param_m;
volatile dma2d_out_int_ena_chn_reg_t out_int_ena_ch0; volatile dma2d_color_param_reg_t param_l;
volatile dma2d_out_int_st_chn_reg_t out_int_st_ch0; } dma2d_color_param_group_chn_reg_t;
volatile dma2d_out_int_clr_chn_reg_t out_int_clr_ch0;
volatile dma2d_outfifo_status_chn_reg_t outfifo_status_ch0; typedef struct {
volatile dma2d_out_push_chn_reg_t out_push_ch0; volatile dma2d_out_conf0_chn_reg_t out_conf0;
volatile dma2d_out_link_conf_chn_reg_t out_link_conf_ch0; volatile dma2d_out_int_raw_chn_reg_t out_int_raw;
volatile dma2d_out_link_addr_chn_reg_t out_link_addr_ch0; volatile dma2d_out_int_ena_chn_reg_t out_int_ena;
volatile dma2d_out_state_chn_reg_t out_state_ch0; volatile dma2d_out_int_st_chn_reg_t out_int_st;
volatile dma2d_out_eof_des_addr_chn_reg_t out_eof_des_addr_ch0; volatile dma2d_out_int_clr_chn_reg_t out_int_clr;
volatile dma2d_out_dscr_chn_reg_t out_dscr_ch0; volatile dma2d_outfifo_status_chn_reg_t outfifo_status;
volatile dma2d_out_dscr_bf0_chn_reg_t out_dscr_bf0_ch0; volatile dma2d_out_push_chn_reg_t out_push;
volatile dma2d_out_dscr_bf1_chn_reg_t out_dscr_bf1_ch0; volatile dma2d_out_link_conf_chn_reg_t out_link_conf;
volatile dma2d_out_peri_sel_chn_reg_t out_peri_sel_ch0; volatile dma2d_out_link_addr_chn_reg_t out_link_addr;
volatile dma2d_out_arb_chn_reg_t out_arb_ch0; volatile dma2d_out_state_chn_reg_t out_state;
volatile dma2d_out_ro_status_chn_reg_t out_ro_status_ch0; volatile dma2d_out_eof_des_addr_chn_reg_t out_eof_des_addr;
volatile dma2d_out_ro_pd_conf_chn_reg_t out_ro_pd_conf_ch0; //only chn0 volatile dma2d_out_dscr_chn_reg_t out_dscr;
volatile dma2d_out_color_convert_chn_reg_t out_color_convert_ch0; volatile dma2d_out_dscr_bf0_chn_reg_t out_dscr_bf0;
volatile dma2d_out_scramble_chn_reg_t out_scramble_ch0; volatile dma2d_out_dscr_bf1_chn_reg_t out_dscr_bf1;
volatile dma2d_out_color_param0_chn_reg_t out_color_param0_ch0; volatile dma2d_out_peri_sel_chn_reg_t out_peri_sel;
volatile dma2d_out_color_param1_chn_reg_t out_color_param1_ch0; volatile dma2d_out_arb_chn_reg_t out_arb;
volatile dma2d_out_color_param2_chn_reg_t out_color_param2_ch0; volatile dma2d_out_ro_status_chn_reg_t out_ro_status;
volatile dma2d_out_color_param3_chn_reg_t out_color_param3_ch0; volatile dma2d_out_ro_pd_conf_chn_reg_t out_ro_pd_conf; /* only exist on channel0 */
volatile dma2d_out_color_param4_chn_reg_t out_color_param4_ch0; volatile dma2d_out_color_convert_chn_reg_t out_color_convert;
volatile dma2d_out_color_param5_chn_reg_t out_color_param5_ch0; volatile dma2d_out_scramble_chn_reg_t out_scramble;
volatile dma2d_out_etm_conf_chn_reg_t out_etm_conf_ch0; volatile dma2d_color_param_group_chn_reg_t out_color_param_group;
volatile dma2d_out_dscr_port_blk_chn_reg_t out_dscr_port_blk_ch0; volatile dma2d_out_etm_conf_chn_reg_t out_etm_conf;
uint32_t reserved[36]; volatile dma2d_out_dscr_port_blk_chn_reg_t out_dscr_port_blk;
uint32_t reserved_out[36];
} dma2d_out_chn_reg_t; } dma2d_out_chn_reg_t;
typedef struct { typedef struct {
@ -1898,15 +1763,10 @@ typedef struct {
volatile dma2d_in_peri_sel_chn_reg_t in_peri_sel; volatile dma2d_in_peri_sel_chn_reg_t in_peri_sel;
volatile dma2d_in_arb_chn_reg_t in_arb; volatile dma2d_in_arb_chn_reg_t in_arb;
volatile dma2d_in_ro_status_chn_reg_t in_ro_status; volatile dma2d_in_ro_status_chn_reg_t in_ro_status;
volatile dma2d_in_ro_pd_conf_chn_reg_t in_ro_pd_conf; //only ch0 volatile dma2d_in_ro_pd_conf_chn_reg_t in_ro_pd_conf;
volatile dma2d_in_color_convert_chn_reg_t in_color_convert; //only ch0 volatile dma2d_in_color_convert_chn_reg_t in_color_convert;
volatile dma2d_in_scramble_chn_reg_t in_scramble; //only ch0 volatile dma2d_in_scramble_chn_reg_t in_scramble;
volatile dma2d_in_color_param0_chn_reg_t in_color_param0; //only ch0 volatile dma2d_color_param_group_chn_reg_t in_color_param_group;
volatile dma2d_in_color_param1_chn_reg_t in_color_param1; //only ch0
volatile dma2d_in_color_param2_chn_reg_t in_color_param2; //only ch0
volatile dma2d_in_color_param3_chn_reg_t in_color_param3; //only ch0
volatile dma2d_in_color_param4_chn_reg_t in_color_param4; //only ch0
volatile dma2d_in_color_param5_chn_reg_t in_color_param5; //only ch0
volatile dma2d_in_etm_conf_chn_reg_t in_etm_conf; volatile dma2d_in_etm_conf_chn_reg_t in_etm_conf;
uint32_t reserved_570[36]; uint32_t reserved_570[36];
} dma2d_in_ch0_reg_t; } dma2d_in_ch0_reg_t;
@ -1931,14 +1791,15 @@ typedef struct {
volatile dma2d_in_arb_chn_reg_t in_arb; volatile dma2d_in_arb_chn_reg_t in_arb;
volatile dma2d_in_ro_status_chn_reg_t in_ro_status; volatile dma2d_in_ro_status_chn_reg_t in_ro_status;
volatile dma2d_in_etm_conf_chn_reg_t in_etm_conf; volatile dma2d_in_etm_conf_chn_reg_t in_etm_conf;
uint32_t reserved_64c[45];
} dma2d_in_ch1_reg_t; } dma2d_in_ch1_reg_t;
typedef struct { typedef struct dma2d_dev_t {
volatile dma2d_out_chn_reg_t out_channel[3]; volatile dma2d_out_chn_reg_t out_channel[3];
uint32_t reserved_300[128]; uint32_t reserved_300[128];
volatile dma2d_in_ch0_reg_t in_channel0; volatile dma2d_in_ch0_reg_t in_channel0;
volatile dma2d_in_ch1_reg_t in_channel1; volatile dma2d_in_ch1_reg_t in_channel1;
uint32_t reserved_6dc[237]; uint32_t reserved_700[192];
volatile dma2d_axi_err_reg_t axi_err; volatile dma2d_axi_err_reg_t axi_err;
volatile dma2d_rst_conf_reg_t rst_conf; volatile dma2d_rst_conf_reg_t rst_conf;
volatile dma2d_intr_mem_start_addr_reg_t intr_mem_start_addr; volatile dma2d_intr_mem_start_addr_reg_t intr_mem_start_addr;

View File

@ -25,6 +25,7 @@
#define SOC_AHB_GDMA_SUPPORTED 1 #define SOC_AHB_GDMA_SUPPORTED 1
#define SOC_AXI_GDMA_SUPPORTED 1 #define SOC_AXI_GDMA_SUPPORTED 1
#define SOC_DW_GDMA_SUPPORTED 1 #define SOC_DW_GDMA_SUPPORTED 1
#define SOC_DMA2D_SUPPORTED 1
#define SOC_GPTIMER_SUPPORTED 1 #define SOC_GPTIMER_SUPPORTED 1
#define SOC_PCNT_SUPPORTED 1 #define SOC_PCNT_SUPPORTED 1
// #define SOC_LCDCAM_SUPPORTED 1 // TODO: IDF-7465 // #define SOC_LCDCAM_SUPPORTED 1 // TODO: IDF-7465
@ -180,6 +181,12 @@
#define SOC_AXI_GDMA_SUPPORT_PSRAM 1 #define SOC_AXI_GDMA_SUPPORT_PSRAM 1
#define SOC_GDMA_SUPPORT_ETM 1 #define SOC_GDMA_SUPPORT_ETM 1
/*-------------------------- 2D-DMA CAPS -------------------------------------*/
#define SOC_DMA2D_GROUPS (1U) // Number of 2D-DMA groups
#define SOC_DMA2D_TX_CHANNELS_PER_GROUP (3) // Number of 2D-DMA TX (OUT) channels in each group
#define SOC_DMA2D_RX_CHANNELS_PER_GROUP (2) // Number of 2D-DMA RX (IN) channels in each group
// #define SOC_DMA2D_SUPPORT_ETM (1) // Support ETM submodule
/*-------------------------- ETM CAPS --------------------------------------*/ /*-------------------------- ETM CAPS --------------------------------------*/
#define SOC_ETM_GROUPS 1U // Number of ETM groups #define SOC_ETM_GROUPS 1U // Number of ETM groups
#define SOC_ETM_CHANNELS_PER_GROUP 50 // Number of ETM channels in the group #define SOC_ETM_CHANNELS_PER_GROUP 50 // Number of ETM channels in the group

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -100,9 +100,9 @@ PROVIDE ( I3C_MST = 0x500DA000 );
PROVIDE ( I3C_MST_MEM = 0x500DA000 ); PROVIDE ( I3C_MST_MEM = 0x500DA000 );
PROVIDE ( I3C_SLV = 0x500DB000 ); PROVIDE ( I3C_SLV = 0x500DB000 );
PROVIDE ( JPEG = 0x50086000 );
PROVIDE ( PPA = 0x50087000 ); PROVIDE ( PPA = 0x50087000 );
PROVIDE ( DMA2D = 0x50088000 ); PROVIDE ( DMA2D = 0x50088000 );
PROVIDE ( JPEG = 0x50086000 );
PROVIDE ( USB_WRAP = 0x50080000 ); PROVIDE ( USB_WRAP = 0x50080000 );
PROVIDE ( USB_DWC_HS = 0x50000000 ); PROVIDE ( USB_DWC_HS = 0x50000000 );

View File

@ -0,0 +1,29 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "soc/soc_caps.h"
#ifdef __cplusplus
extern "C" {
#endif
#if SOC_DMA2D_SUPPORTED
typedef struct {
struct {
const int tx_irq_id[SOC_DMA2D_TX_CHANNELS_PER_GROUP];
const int rx_irq_id[SOC_DMA2D_RX_CHANNELS_PER_GROUP];
} groups[SOC_DMA2D_GROUPS];
} dma2d_signal_conn_t;
extern const dma2d_signal_conn_t dma2d_periph_signals;
#endif
#ifdef __cplusplus
}
#endif