mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
Merge branch 'feature/gdma_channel_allocator' into 'master'
gdma channel allocator Closes IDF-2124 See merge request espressif/esp-idf!11570
This commit is contained in:
commit
2d3f22918f
@ -58,6 +58,7 @@ if(${target} STREQUAL "esp32s3")
|
||||
list(APPEND srcs "adc_common.c"
|
||||
"dac_common.c"
|
||||
"dedic_gpio.c"
|
||||
"gdma.c"
|
||||
"spi_slave_hd.c"
|
||||
"touch_sensor_common.c"
|
||||
"twai.c"
|
||||
@ -65,7 +66,8 @@ if(${target} STREQUAL "esp32s3")
|
||||
endif()
|
||||
|
||||
if(IDF_TARGET STREQUAL "esp32c3")
|
||||
list(APPEND srcs "spi_slave_hd.c"
|
||||
list(APPEND srcs "gdma.c"
|
||||
"spi_slave_hd.c"
|
||||
"esp32c3/adc.c")
|
||||
endif()
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
# Component Makefile
|
||||
#
|
||||
COMPONENT_SRCDIRS := . $(IDF_TARGET)
|
||||
COMPONENT_OBJEXCLUDE += spi_slave_hd.o dedic_gpio.o
|
||||
COMPONENT_OBJEXCLUDE += spi_slave_hd.o dedic_gpio.o gdma.o
|
||||
|
||||
COMPONENT_ADD_INCLUDEDIRS := include $(IDF_TARGET)/include $(IDF_TARGET)/include/driver
|
||||
|
||||
|
642
components/driver/gdma.c
Normal file
642
components/driver/gdma.c
Normal file
@ -0,0 +1,642 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <sys/cdefs.h>
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "soc/soc_caps.h"
|
||||
#include "soc/periph_defs.h"
|
||||
#include "esp_intr_alloc.h"
|
||||
#include "esp_log.h"
|
||||
#include "driver/periph_ctrl.h"
|
||||
#include "esp_private/gdma.h"
|
||||
#include "hal/gdma_hal.h"
|
||||
#include "hal/gdma_ll.h"
|
||||
#include "soc/gdma_periph.h"
|
||||
|
||||
static const char *TAG = "gdma";
|
||||
|
||||
#define DMA_CHECK(a, msg, tag, ret, ...) \
|
||||
do { \
|
||||
if (unlikely(!(a))) { \
|
||||
ESP_LOGE(TAG, "%s(%d): " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
|
||||
ret_code = ret; \
|
||||
goto tag; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define GDMA_INVALID_PERIPH_TRIG (0x3F)
|
||||
#define SEARCH_REQUEST_RX_CHANNEL (1 << 0)
|
||||
#define SEARCH_REQUEST_TX_CHANNEL (1 << 1)
|
||||
|
||||
typedef struct gdma_platform_t gdma_platform_t;
|
||||
typedef struct gdma_group_t gdma_group_t;
|
||||
typedef struct gdma_pair_t gdma_pair_t;
|
||||
typedef struct gdma_channel_t gdma_channel_t;
|
||||
typedef struct gdma_tx_channel_t gdma_tx_channel_t;
|
||||
typedef struct gdma_rx_channel_t gdma_rx_channel_t;
|
||||
|
||||
struct gdma_platform_t {
|
||||
portMUX_TYPE spinlock; // platform level spinlock
|
||||
gdma_group_t *groups[SOC_GDMA_GROUPS]; // array of GDMA group instances
|
||||
};
|
||||
|
||||
struct gdma_group_t {
|
||||
int group_id; // Group ID, index from 0
|
||||
gdma_hal_context_t hal; // HAL instance is at group level
|
||||
portMUX_TYPE spinlock; // group level spinlock
|
||||
int ref_count; // reference count
|
||||
gdma_pair_t *pairs[SOC_GDMA_PAIRS_PER_GROUP]; // handles of GDMA pairs
|
||||
};
|
||||
|
||||
struct gdma_pair_t {
|
||||
gdma_group_t *group; // which group the pair belongs to
|
||||
int pair_id; // Pair ID, index from 0
|
||||
gdma_tx_channel_t *tx_chan; // pointer of tx channel in the pair
|
||||
gdma_rx_channel_t *rx_chan; // pointer of rx channel in the pair
|
||||
int occupy_code; // each bit indicates which channel has been occupied (an occupied channel will be skipped during channel search)
|
||||
intr_handle_t intr; // Interrupt is at pair level
|
||||
portMUX_TYPE spinlock; // pair level spinlock
|
||||
int ref_count; // reference count
|
||||
};
|
||||
|
||||
struct gdma_channel_t {
|
||||
gdma_pair_t *pair; // which pair the channel belongs to
|
||||
gdma_channel_direction_t direction; // channel direction
|
||||
int periph_id; // Peripheral instance ID, indicates which peripheral is connected to this GDMA channel
|
||||
esp_err_t (*del)(gdma_channel_t *channel); // channel deletion function, it's polymorphic, see `gdma_del_tx_channel` or `gdma_del_rx_channel`
|
||||
};
|
||||
|
||||
struct gdma_tx_channel_t {
|
||||
gdma_channel_t base; // GDMA channel, base class
|
||||
void *user_data; // user registered DMA event data
|
||||
gdma_event_callback_t on_trans_eof; // TX EOF callback
|
||||
};
|
||||
|
||||
struct gdma_rx_channel_t {
|
||||
gdma_channel_t base; // GDMA channel, base class
|
||||
void *user_data; // user registered DMA event data
|
||||
gdma_event_callback_t on_recv_eof; // RX EOF callback
|
||||
};
|
||||
|
||||
static gdma_group_t *gdma_acquire_group_handle(int group_id);
|
||||
static void gdma_release_group_handle(gdma_group_t *group);
|
||||
static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id);
|
||||
static void gdma_release_pair_handle(gdma_pair_t *pair);
|
||||
static void gdma_uninstall_pair(gdma_pair_t *pair);
|
||||
static void gdma_uninstall_group(gdma_group_t *group);
|
||||
static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel);
|
||||
static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel);
|
||||
static esp_err_t gdma_install_interrupt(gdma_pair_t *pair);
|
||||
|
||||
// gdma driver platform
|
||||
static gdma_platform_t s_platform = {
|
||||
.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED,
|
||||
.groups = {} // groups will be lazy installed
|
||||
};
|
||||
|
||||
esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_tx_channel_t *alloc_tx_channel = NULL;
|
||||
gdma_rx_channel_t *alloc_rx_channel = NULL;
|
||||
int search_code = 0;
|
||||
gdma_pair_t *pair = NULL;
|
||||
gdma_group_t *group = NULL;
|
||||
DMA_CHECK(config && ret_chan, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
|
||||
if (config->flags.reserve_sibling) {
|
||||
search_code = SEARCH_REQUEST_RX_CHANNEL | SEARCH_REQUEST_TX_CHANNEL; // search for a pair of channels
|
||||
}
|
||||
if (config->direction == GDMA_CHANNEL_DIRECTION_TX) {
|
||||
search_code |= SEARCH_REQUEST_TX_CHANNEL; // search TX only
|
||||
alloc_tx_channel = calloc(1, sizeof(gdma_tx_channel_t));
|
||||
DMA_CHECK(alloc_tx_channel, "no mem for gdma tx channel", err, ESP_ERR_NO_MEM);
|
||||
} else if (config->direction == GDMA_CHANNEL_DIRECTION_RX) {
|
||||
search_code |= SEARCH_REQUEST_RX_CHANNEL; // search RX only
|
||||
alloc_rx_channel = calloc(1, sizeof(gdma_rx_channel_t));
|
||||
DMA_CHECK(alloc_rx_channel, "no mem for gdma rx channel", err, ESP_ERR_NO_MEM);
|
||||
}
|
||||
|
||||
if (config->sibling_chan) {
|
||||
pair = config->sibling_chan->pair;
|
||||
DMA_CHECK(pair, "invalid sibling channel", err, ESP_ERR_INVALID_ARG);
|
||||
DMA_CHECK(config->sibling_chan->direction != config->direction,
|
||||
"sibling channel should have a different direction", err, ESP_ERR_INVALID_ARG);
|
||||
group = pair->group;
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
pair->ref_count++; // channel obtains a reference to pair
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
goto search_done; // skip the search path below if user has specify a sibling channel
|
||||
}
|
||||
|
||||
for (int i = 0; i < SOC_GDMA_GROUPS && search_code; i++) { // loop to search group
|
||||
group = gdma_acquire_group_handle(i);
|
||||
for (int j = 0; j < SOC_GDMA_PAIRS_PER_GROUP && search_code && group; j++) { // loop to search pair
|
||||
pair = gdma_acquire_pair_handle(group, j);
|
||||
if (pair) {
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
if (!(search_code & pair->occupy_code)) { // pair has suitable position for acquired channel(s)
|
||||
pair->occupy_code |= search_code;
|
||||
pair->ref_count++; // channel obtains a reference to pair
|
||||
search_code = 0; // exit search loop
|
||||
}
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
}
|
||||
gdma_release_pair_handle(pair);
|
||||
} // loop used to search pair
|
||||
gdma_release_group_handle(group);
|
||||
} // loop used to search group
|
||||
DMA_CHECK(search_code == 0, "no free gdma channel, search code=%d", err, ESP_ERR_NOT_FOUND, search_code);
|
||||
|
||||
search_done:
|
||||
// register TX channel
|
||||
if (alloc_tx_channel) {
|
||||
pair->tx_chan = alloc_tx_channel;
|
||||
alloc_tx_channel->base.pair = pair;
|
||||
alloc_tx_channel->base.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
alloc_tx_channel->base.periph_id = GDMA_INVALID_PERIPH_TRIG;
|
||||
alloc_tx_channel->base.del = gdma_del_tx_channel; // set channel deletion function
|
||||
*ret_chan = &alloc_tx_channel->base; // return the installed channel
|
||||
}
|
||||
|
||||
// register RX channel
|
||||
if (alloc_rx_channel) {
|
||||
pair->rx_chan = alloc_rx_channel;
|
||||
alloc_rx_channel->base.pair = pair;
|
||||
alloc_rx_channel->base.direction = GDMA_CHANNEL_DIRECTION_RX;
|
||||
alloc_rx_channel->base.periph_id = GDMA_INVALID_PERIPH_TRIG;
|
||||
alloc_rx_channel->base.del = gdma_del_rx_channel; // set channel deletion function
|
||||
*ret_chan = &alloc_rx_channel->base; // return the installed channel
|
||||
}
|
||||
|
||||
ESP_LOGD(TAG, "new %s channel (%d,%d) at %p", (config->direction == GDMA_CHANNEL_DIRECTION_TX) ? "tx" : "rx",
|
||||
group->group_id, pair->pair_id, *ret_chan);
|
||||
return ESP_OK;
|
||||
|
||||
err:
|
||||
if (alloc_tx_channel) {
|
||||
free(alloc_tx_channel);
|
||||
}
|
||||
if (alloc_rx_channel) {
|
||||
free(alloc_rx_channel);
|
||||
}
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_del_channel(gdma_channel_handle_t dma_chan)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
DMA_CHECK(dma_chan, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
|
||||
ret_code = dma_chan->del(dma_chan); // call `gdma_del_tx_channel` or `gdma_del_rx_channel`
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_get_channel_id(gdma_channel_handle_t dma_chan, int *channel_id)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_pair_t *pair = NULL;
|
||||
DMA_CHECK(dma_chan, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
pair = dma_chan->pair;
|
||||
*channel_id = pair->pair_id;
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_periph)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_pair_t *pair = NULL;
|
||||
gdma_group_t *group = NULL;
|
||||
DMA_CHECK(dma_chan, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
DMA_CHECK(dma_chan->periph_id == GDMA_INVALID_PERIPH_TRIG, "channel is using by peripheral: %d", err, ESP_ERR_INVALID_STATE, dma_chan->periph_id);
|
||||
pair = dma_chan->pair;
|
||||
group = pair->group;
|
||||
|
||||
dma_chan->periph_id = trig_periph.instance_id;
|
||||
// enable/disable m2m mode
|
||||
gdma_ll_enable_m2m_mode(group->hal.dev, pair->pair_id, trig_periph.periph == GDMA_TRIG_PERIPH_M2M);
|
||||
|
||||
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
|
||||
gdma_ll_tx_reset_channel(group->hal.dev, pair->pair_id); // reset channel
|
||||
if (trig_periph.periph != GDMA_TRIG_PERIPH_M2M) {
|
||||
gdma_ll_tx_connect_to_periph(group->hal.dev, pair->pair_id, trig_periph.instance_id);
|
||||
}
|
||||
} else {
|
||||
gdma_ll_rx_reset_channel(group->hal.dev, pair->pair_id); // reset channel
|
||||
if (trig_periph.periph != GDMA_TRIG_PERIPH_M2M) {
|
||||
gdma_ll_rx_connect_to_periph(group->hal.dev, pair->pair_id, trig_periph.instance_id);
|
||||
}
|
||||
}
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_disconnect(gdma_channel_handle_t dma_chan)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_pair_t *pair = NULL;
|
||||
gdma_group_t *group = NULL;
|
||||
DMA_CHECK(dma_chan, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
DMA_CHECK(dma_chan->periph_id != GDMA_INVALID_PERIPH_TRIG, "no peripheral is connected to the channel", err, ESP_ERR_INVALID_STATE);
|
||||
pair = dma_chan->pair;
|
||||
group = pair->group;
|
||||
|
||||
dma_chan->periph_id = GDMA_INVALID_PERIPH_TRIG;
|
||||
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
|
||||
gdma_ll_tx_connect_to_periph(group->hal.dev, pair->pair_id, GDMA_INVALID_PERIPH_TRIG);
|
||||
} else {
|
||||
gdma_ll_rx_connect_to_periph(group->hal.dev, pair->pair_id, GDMA_INVALID_PERIPH_TRIG);
|
||||
}
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_apply_strategy(gdma_channel_handle_t dma_chan, const gdma_strategy_config_t *config)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_pair_t *pair = NULL;
|
||||
gdma_group_t *group = NULL;
|
||||
DMA_CHECK(dma_chan, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
pair = dma_chan->pair;
|
||||
group = pair->group;
|
||||
|
||||
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
|
||||
gdma_ll_tx_enable_owner_check(group->hal.dev, pair->pair_id, config->owner_check);
|
||||
gdma_ll_tx_enable_auto_write_back(group->hal.dev, pair->pair_id, config->auto_update_desc);
|
||||
} else {
|
||||
gdma_ll_rx_enable_owner_check(group->hal.dev, pair->pair_id, config->owner_check);
|
||||
}
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_register_tx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_tx_event_callbacks_t *cbs, void *user_data)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_pair_t *pair = NULL;
|
||||
gdma_group_t *group = NULL;
|
||||
DMA_CHECK(dma_chan && dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
pair = dma_chan->pair;
|
||||
group = pair->group;
|
||||
gdma_tx_channel_t *tx_chan = __containerof(dma_chan, gdma_tx_channel_t, base);
|
||||
|
||||
// lazy install interrupt service
|
||||
DMA_CHECK(gdma_install_interrupt(pair) == ESP_OK, "install interrupt service failed", err, ESP_FAIL);
|
||||
|
||||
// enable/disable GDMA interrupt events for TX channel
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
gdma_ll_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_TX_EOF, cbs->on_trans_eof != NULL);
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
|
||||
tx_chan->on_trans_eof = cbs->on_trans_eof;
|
||||
tx_chan->user_data = user_data;
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_register_rx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_rx_event_callbacks_t *cbs, void *user_data)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_pair_t *pair = NULL;
|
||||
gdma_group_t *group = NULL;
|
||||
DMA_CHECK(dma_chan && dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
pair = dma_chan->pair;
|
||||
group = pair->group;
|
||||
gdma_rx_channel_t *rx_chan = __containerof(dma_chan, gdma_rx_channel_t, base);
|
||||
|
||||
// lazy install interrupt service
|
||||
DMA_CHECK(gdma_install_interrupt(pair) == ESP_OK, "install interrupt service failed", err, ESP_FAIL);
|
||||
|
||||
// enable/disable GDMA interrupt events for RX channel
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
gdma_ll_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_RX_SUC_EOF, cbs->on_recv_eof != NULL);
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
|
||||
rx_chan->on_recv_eof = cbs->on_recv_eof;
|
||||
rx_chan->user_data = user_data;
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_start(gdma_channel_handle_t dma_chan, intptr_t desc_base_addr)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_pair_t *pair = NULL;
|
||||
gdma_group_t *group = NULL;
|
||||
DMA_CHECK(dma_chan, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
pair = dma_chan->pair;
|
||||
group = pair->group;
|
||||
|
||||
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
|
||||
gdma_ll_rx_set_desc_addr(group->hal.dev, pair->pair_id, desc_base_addr);
|
||||
gdma_ll_rx_start(group->hal.dev, pair->pair_id);
|
||||
} else {
|
||||
gdma_ll_tx_set_desc_addr(group->hal.dev, pair->pair_id, desc_base_addr);
|
||||
gdma_ll_tx_start(group->hal.dev, pair->pair_id);
|
||||
}
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_stop(gdma_channel_handle_t dma_chan)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_pair_t *pair = NULL;
|
||||
gdma_group_t *group = NULL;
|
||||
DMA_CHECK(dma_chan, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
pair = dma_chan->pair;
|
||||
group = pair->group;
|
||||
|
||||
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
|
||||
gdma_ll_rx_stop(group->hal.dev, pair->pair_id);
|
||||
} else {
|
||||
gdma_ll_tx_stop(group->hal.dev, pair->pair_id);
|
||||
}
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
esp_err_t gdma_append(gdma_channel_handle_t dma_chan)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_pair_t *pair = NULL;
|
||||
gdma_group_t *group = NULL;
|
||||
DMA_CHECK(dma_chan, "invalid argument", err, ESP_ERR_INVALID_ARG);
|
||||
pair = dma_chan->pair;
|
||||
group = pair->group;
|
||||
|
||||
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
|
||||
gdma_ll_rx_restart(group->hal.dev, pair->pair_id);
|
||||
} else {
|
||||
gdma_ll_tx_restart(group->hal.dev, pair->pair_id);
|
||||
}
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
static inline bool gdma_is_group_busy(gdma_group_t *group)
|
||||
{
|
||||
return group->ref_count;
|
||||
}
|
||||
|
||||
static void gdma_uninstall_group(gdma_group_t *group)
|
||||
{
|
||||
int group_id = group->group_id;
|
||||
bool do_deinitialize = false;
|
||||
|
||||
if (s_platform.groups[group_id] && !gdma_is_group_busy(group)) {
|
||||
portENTER_CRITICAL(&s_platform.spinlock);
|
||||
if (s_platform.groups[group_id] && !gdma_is_group_busy(group)) {
|
||||
do_deinitialize = true;
|
||||
s_platform.groups[group_id] = NULL; // deregister from platfrom
|
||||
gdma_ll_enable_clock(group->hal.dev, false);
|
||||
periph_module_disable(gdma_periph_signals.groups[group_id].module);
|
||||
}
|
||||
portEXIT_CRITICAL(&s_platform.spinlock);
|
||||
}
|
||||
|
||||
if (do_deinitialize) {
|
||||
free(group);
|
||||
ESP_LOGD(TAG, "del group %d", group_id);
|
||||
}
|
||||
}
|
||||
|
||||
static gdma_group_t *gdma_acquire_group_handle(int group_id)
|
||||
{
|
||||
gdma_group_t *group = NULL;
|
||||
bool new_group = false;
|
||||
portENTER_CRITICAL(&s_platform.spinlock);
|
||||
if (!s_platform.groups[group_id]) {
|
||||
// lazy install group
|
||||
group = calloc(1, sizeof(gdma_group_t));
|
||||
if (group) {
|
||||
new_group = true;
|
||||
s_platform.groups[group_id] = group; // register to platform
|
||||
group->group_id = group_id;
|
||||
group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
||||
periph_module_enable(gdma_periph_signals.groups[group_id].module); // enable APB to access GDMA registers
|
||||
gdma_hal_init(&group->hal, group_id); // initialize HAL context
|
||||
gdma_ll_enable_clock(group->hal.dev, true); // enable gdma clock
|
||||
}
|
||||
} else {
|
||||
group = s_platform.groups[group_id];
|
||||
}
|
||||
if (group) {
|
||||
// someone acquired the group handle means we have a new object that refer to this group
|
||||
group->ref_count++;
|
||||
}
|
||||
portEXIT_CRITICAL(&s_platform.spinlock);
|
||||
|
||||
if (new_group) {
|
||||
ESP_LOGD(TAG, "new group (%d) at %p", group->group_id, group);
|
||||
}
|
||||
return group;
|
||||
}
|
||||
|
||||
static void gdma_release_group_handle(gdma_group_t *group)
|
||||
{
|
||||
if (group) {
|
||||
portENTER_CRITICAL(&group->spinlock);
|
||||
group->ref_count--;
|
||||
portEXIT_CRITICAL(&group->spinlock);
|
||||
gdma_uninstall_group(group);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool gdma_is_pair_busy(gdma_pair_t *pair)
|
||||
{
|
||||
return pair->ref_count;
|
||||
}
|
||||
|
||||
static void gdma_uninstall_pair(gdma_pair_t *pair)
|
||||
{
|
||||
gdma_group_t *group = pair->group;
|
||||
int pair_id = pair->pair_id;
|
||||
bool do_deinitialize = false;
|
||||
|
||||
if (group->pairs[pair_id] && !gdma_is_pair_busy(pair)) {
|
||||
portENTER_CRITICAL(&group->spinlock);
|
||||
if (group->pairs[pair_id] && !gdma_is_pair_busy(pair)) {
|
||||
do_deinitialize = true;
|
||||
group->pairs[pair_id] = NULL; // deregister from pair
|
||||
group->ref_count--; // decrease reference count, because this pair won't refer to the group
|
||||
}
|
||||
portEXIT_CRITICAL(&group->spinlock);
|
||||
}
|
||||
if (do_deinitialize) {
|
||||
if (pair->intr) {
|
||||
esp_intr_free(pair->intr); // free interrupt resource
|
||||
ESP_LOGD(TAG, "uninstall interrupt service for pair (%d,%d)", group->group_id, pair_id);
|
||||
}
|
||||
free(pair);
|
||||
ESP_LOGD(TAG, "del pair (%d,%d)", group->group_id, pair_id);
|
||||
gdma_uninstall_group(group);
|
||||
}
|
||||
}
|
||||
|
||||
static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id)
|
||||
{
|
||||
gdma_pair_t *pair = NULL;
|
||||
bool new_pair = false;
|
||||
portENTER_CRITICAL(&group->spinlock);
|
||||
if (!group->pairs[pair_id]) {
|
||||
// lazy install pair
|
||||
pair = calloc(1, sizeof(gdma_pair_t));
|
||||
if (pair) {
|
||||
new_pair = true;
|
||||
group->pairs[pair_id] = pair; // register to group
|
||||
group->ref_count++; // pair obtains a reference to group
|
||||
pair->group = group;
|
||||
pair->pair_id = pair_id;
|
||||
pair->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
||||
}
|
||||
} else {
|
||||
pair = group->pairs[pair_id];
|
||||
}
|
||||
if (pair) {
|
||||
// someone acquired the pair handle means we have a new object that refer to this pair
|
||||
pair->ref_count++;
|
||||
}
|
||||
portEXIT_CRITICAL(&group->spinlock);
|
||||
|
||||
if (new_pair) {
|
||||
ESP_LOGD(TAG, "new pair (%d,%d) at %p", group->group_id, pair->pair_id, pair);
|
||||
}
|
||||
return pair;
|
||||
}
|
||||
|
||||
static void gdma_release_pair_handle(gdma_pair_t *pair)
|
||||
{
|
||||
if (pair) {
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
pair->ref_count--;
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
gdma_uninstall_pair(pair);
|
||||
}
|
||||
}
|
||||
|
||||
static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
|
||||
{
|
||||
gdma_pair_t *pair = dma_channel->pair;
|
||||
gdma_tx_channel_t *tx_chan = __containerof(dma_channel, gdma_tx_channel_t, base);
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
pair->tx_chan = NULL;
|
||||
pair->ref_count--; // decrease reference count, because this channel won't refer to the pair
|
||||
pair->occupy_code &= ~SEARCH_REQUEST_TX_CHANNEL;
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
|
||||
ESP_LOGD(TAG, "del tx channel (%d,%d)", pair->group->group_id, pair->pair_id);
|
||||
free(tx_chan);
|
||||
|
||||
gdma_uninstall_pair(pair);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
|
||||
{
|
||||
gdma_pair_t *pair = dma_channel->pair;
|
||||
gdma_rx_channel_t *rx_chan = __containerof(dma_channel, gdma_rx_channel_t, base);
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
pair->rx_chan = NULL;
|
||||
pair->ref_count--; // decrease reference count, because this channel won't refer to the pair
|
||||
pair->occupy_code &= ~SEARCH_REQUEST_RX_CHANNEL;
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
|
||||
ESP_LOGD(TAG, "del rx channel (%d,%d)", pair->group->group_id, pair->pair_id);
|
||||
free(rx_chan);
|
||||
|
||||
gdma_uninstall_pair(pair);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static void IRAM_ATTR gdma_default_isr(void *args)
|
||||
{
|
||||
gdma_pair_t *pair = (gdma_pair_t *)args;
|
||||
gdma_group_t *group = pair->group;
|
||||
gdma_rx_channel_t *rx_chan = pair->rx_chan;
|
||||
gdma_tx_channel_t *tx_chan = pair->tx_chan;
|
||||
bool need_yield = false;
|
||||
// clear pending interrupt event
|
||||
uint32_t intr_status = gdma_ll_get_interrupt_status(group->hal.dev, pair->pair_id);
|
||||
gdma_ll_clear_interrupt_status(group->hal.dev, pair->pair_id, intr_status);
|
||||
|
||||
if (intr_status & GDMA_LL_EVENT_RX_SUC_EOF) {
|
||||
if (rx_chan && rx_chan->on_recv_eof) {
|
||||
uint32_t eof_addr = gdma_ll_rx_get_success_eof_desc_addr(group->hal.dev, pair->pair_id);
|
||||
gdma_event_data_t edata = {
|
||||
.rx_eof_desc_addr = eof_addr
|
||||
};
|
||||
if (rx_chan->on_recv_eof(&rx_chan->base, &edata, rx_chan->user_data)) {
|
||||
need_yield = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (intr_status & GDMA_LL_EVENT_TX_EOF) {
|
||||
if (tx_chan && tx_chan->on_trans_eof) {
|
||||
uint32_t eof_addr = gdma_ll_tx_get_eof_desc_addr(group->hal.dev, pair->pair_id);
|
||||
gdma_event_data_t edata = {
|
||||
.tx_eof_desc_addr = eof_addr
|
||||
};
|
||||
if (tx_chan->on_trans_eof(&tx_chan->base, &edata, tx_chan->user_data)) {
|
||||
need_yield = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (need_yield) {
|
||||
portYIELD_FROM_ISR();
|
||||
}
|
||||
}
|
||||
|
||||
static esp_err_t gdma_install_interrupt(gdma_pair_t *pair)
|
||||
{
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
gdma_group_t *group = pair->group;
|
||||
int isr_flags = 0;
|
||||
bool do_install_isr = false;
|
||||
|
||||
if (!pair->intr) {
|
||||
portENTER_CRITICAL(&pair->spinlock);
|
||||
if (!pair->intr) {
|
||||
do_install_isr = true;
|
||||
ret_code = esp_intr_alloc(gdma_periph_signals.groups[group->group_id].pairs[pair->pair_id].irq_id, isr_flags, gdma_default_isr, pair, &pair->intr);
|
||||
gdma_ll_enable_interrupt(group->hal.dev, pair->pair_id, UINT32_MAX, false); // disable all interupt events
|
||||
gdma_ll_clear_interrupt_status(group->hal.dev, pair->pair_id, UINT32_MAX); // clear all pending events
|
||||
}
|
||||
portEXIT_CRITICAL(&pair->spinlock);
|
||||
}
|
||||
if (do_install_isr) {
|
||||
DMA_CHECK(ret_code == ESP_OK, "alloc interrupt failed", err, ret_code);
|
||||
ESP_LOGD(TAG, "install interrupt service for pair (%d,%d)", group->group_id, pair->pair_id);
|
||||
}
|
||||
|
||||
err:
|
||||
return ret_code;
|
||||
}
|
285
components/driver/include/esp_private/gdma.h
Normal file
285
components/driver/include/esp_private/gdma.h
Normal file
@ -0,0 +1,285 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// DO NOT USE THESE APIS IN ANY APPLICATIONS
|
||||
// GDMA driver is not public for end users, but for ESP-IDF developpers.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "soc/gdma_channel.h"
|
||||
#include "esp_err.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Type of GDMA channel handle
|
||||
*
|
||||
*/
|
||||
typedef struct gdma_channel_t *gdma_channel_handle_t;
|
||||
|
||||
/**
|
||||
* @brief Enumeration of peripherals which have the DMA capability
|
||||
* @note Some peripheral might not be available on certain chip, please refer to `soc_caps.h` for detail.
|
||||
*
|
||||
*/
|
||||
typedef enum {
|
||||
GDMA_TRIG_PERIPH_M2M, /*!< GDMA trigger peripheral: M2M */
|
||||
GDMA_TRIG_PERIPH_UART, /*!< GDMA trigger peripheral: UART */
|
||||
GDMA_TRIG_PERIPH_SPI, /*!< GDMA trigger peripheral: SPI */
|
||||
GDMA_TRIG_PERIPH_I2S, /*!< GDMA trigger peripheral: I2S */
|
||||
GDMA_TRIG_PERIPH_AES, /*!< GDMA trigger peripheral: AES */
|
||||
GDMA_TRIG_PERIPH_SHA, /*!< GDMA trigger peripheral: SHA */
|
||||
GDMA_TRIG_PERIPH_ADC, /*!< GDMA trigger peripheral: ADC */
|
||||
GDMA_TRIG_PERIPH_DAC, /*!< GDMA trigger peripheral: DAC */
|
||||
GDMA_TRIG_PERIPH_LCD, /*!< GDMA trigger peripheral: LCD */
|
||||
GDMA_TRIG_PERIPH_CAM /*!< GDMA trigger peripheral: CAM */
|
||||
} gdma_trigger_peripheral_t;
|
||||
|
||||
/**
|
||||
* @brief Enumeration of GDMA channel direction
|
||||
*
|
||||
*/
|
||||
typedef enum {
|
||||
GDMA_CHANNEL_DIRECTION_TX, /*!< GDMA channel direction: TX */
|
||||
GDMA_CHANNEL_DIRECTION_RX, /*!< GDMA channel direction: RX */
|
||||
} gdma_channel_direction_t;
|
||||
|
||||
/**
|
||||
* @brief Collection of configuration items that used for allocating GDMA channel
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
gdma_channel_handle_t sibling_chan; /*!< DMA sibling channel handle (NULL means having sibling is not necessary) */
|
||||
gdma_channel_direction_t direction; /*!< DMA channel direction */
|
||||
struct {
|
||||
int reserve_sibling: 1; /*!< If set, DMA channel allocator would prefer to allocate new channel in a new pair, and reserve sibling channel for future use */
|
||||
} flags;
|
||||
} gdma_channel_alloc_config_t;
|
||||
|
||||
/**
|
||||
* @brief Type of GDMA event data
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
union {
|
||||
intptr_t rx_eof_desc_addr; /*!< EOF descriptor address of RX channel */
|
||||
intptr_t tx_eof_desc_addr; /*!< EOF descriptor address of TX channel */
|
||||
};
|
||||
} gdma_event_data_t;
|
||||
|
||||
/**
|
||||
* @brief Type of GDMA event callback
|
||||
* @param dma_chan GDMA channel handle, created from `gdma_new_channel`
|
||||
* @param event_data GDMA event data
|
||||
* @param user_data User registered data from `gdma_register_tx_event_callbacks` or `gdma_register_rx_event_callbacks`
|
||||
*
|
||||
*/
|
||||
typedef bool (*gdma_event_callback_t)(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data);
|
||||
|
||||
/**
|
||||
* @brief Group of supported GDMA TX callbacks
|
||||
* @note The callbacks are all running under ISR environment
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
gdma_event_callback_t on_trans_eof; /*!< Invoked when TX engine meets EOF descriptor */
|
||||
} gdma_tx_event_callbacks_t;
|
||||
|
||||
/**
|
||||
* @brief Group of supported GDMA RX callbacks
|
||||
* @note The callbacks are all running under ISR environment
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
gdma_event_callback_t on_recv_eof; /*!< Invoked when RX engine meets EOF descriptor */
|
||||
} gdma_rx_event_callbacks_t;
|
||||
|
||||
/**
|
||||
* @brief Type of GDMA engine trigger
|
||||
* @note It's recommended to initialize this structure with `GDMA_MAKE_TRIGGER`.
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
gdma_trigger_peripheral_t periph; /*!< Target peripheral which will trigger DMA operations */
|
||||
int instance_id; /*!< Peripheral instance ID. Supported IDs are listed in `soc/gdma_channel.h`, e.g. SOC_GDMA_TRIG_PERIPH_UART0 */
|
||||
} gdma_trigger_t;
|
||||
|
||||
/**
|
||||
* @brief Helper macro to initialize GDMA trigger
|
||||
* @note value of `peri` must be selected from `gdma_trigger_peripheral_t` enum.
|
||||
* e.g. GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_UART,0)
|
||||
*
|
||||
*/
|
||||
#define GDMA_MAKE_TRIGGER(peri, id) \
|
||||
(gdma_trigger_t) { .periph = peri, .instance_id = SOC_##peri##id }
|
||||
|
||||
/**
|
||||
* @brief A collection of strategy item that each GDMA channel could apply
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
bool owner_check; /*!< If set / clear, DMA channel enables / disables checking owner validity */
|
||||
bool auto_update_desc; /*!< If set / clear, DMA channel enables / disables hardware to update descriptor automatically (TX channel only) */
|
||||
} gdma_strategy_config_t;
|
||||
|
||||
/**
|
||||
* @brief Create GDMA channel
|
||||
* @note This API won't install interrupt service for the allocated channel.
|
||||
* If interrupt service is needed, user has to register GDMA event callback by `gdma_register_tx_event_callbacks` or `gdma_register_rx_event_callbacks`.
|
||||
*
|
||||
* @param[in] config Pointer to a collection of configurations for allocating GDMA channel
|
||||
* @param[out] ret_chan Returnned channel handle
|
||||
* @return
|
||||
* - ESP_OK: Create DMA channel successfully
|
||||
* - ESP_ERR_INVALID_ARG: Create DMA channel failed because of invalid argument
|
||||
* - ESP_ERR_NO_MEM: Create DMA channel failed because out of memory
|
||||
* - ESP_ERR_TIMEOUT: Create DMA channel failed because of time out
|
||||
* - ESP_FAIL: Create DMA channel failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan);
|
||||
|
||||
/**
|
||||
* @brief Connect GDMA channel to trigger peripheral
|
||||
*
|
||||
* @note Suggest to use helper macro `GDMA_MAKE_TRIGGER` to construct parameter `trig_periph`. e.g. GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SHA,0)
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @param[in] trig_periph GDMA trigger peripheral
|
||||
* @return
|
||||
* - ESP_OK: Connect GDMA channel successfully
|
||||
* - ESP_ERR_INVALID_ARG: Connect GDMA channel failed because of invalid argument
|
||||
* - ESP_ERR_INVALID_STATE: Connect GDMA channel failed because DMA channel is working with another peripheral
|
||||
* - ESP_FAIL: Connect GDMA channel failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_periph);
|
||||
|
||||
/**
|
||||
* @brief Disconnect GMA channel from peripheral
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @return
|
||||
* - ESP_OK: Disconnect GDMA channel successfully
|
||||
* - ESP_ERR_INVALID_ARG: Disconnect GDMA channel failed because of invalid argument
|
||||
* - ESP_ERR_INVALID_STATE: Disconnect GDMA channel failed because DMA channel is not connected to any peripheral
|
||||
* - ESP_FAIL: Disconnect DMA channel failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_disconnect(gdma_channel_handle_t dma_chan);
|
||||
|
||||
/**
|
||||
* @brief Apply channel strategy for GDMA channel
|
||||
*
|
||||
* @param dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @param config Configuration of GDMA channel strategy
|
||||
* - ESP_OK: Apply channel strategy successfully
|
||||
* - ESP_ERR_INVALID_ARG: Apply channel strategy failed because of invalid argument
|
||||
* - ESP_FAIL: Apply channel strategy failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_apply_strategy(gdma_channel_handle_t dma_chan, const gdma_strategy_config_t *config);
|
||||
|
||||
/**
|
||||
* @brief Delete GDMA channel
|
||||
* @note If you call `gdma_new_channel` several times for a same peripheral, make sure you call this API the same times.
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @return
|
||||
* - ESP_OK: Delete GDMA channel successfully
|
||||
* - ESP_ERR_INVALID_ARG: Delete GDMA channel failed because of invalid argument
|
||||
* - ESP_FAIL: Delete GDMA channel failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_del_channel(gdma_channel_handle_t dma_chan);
|
||||
|
||||
/**
|
||||
* @brief Get the channel ID
|
||||
*
|
||||
* @note This API breaks the encapsulation of GDMA Channel Object.
|
||||
* With the returned channel ID, you can even bypass all other GDMA driver API and access Low Level API directly.
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @param[out] channel_id Returned channel ID
|
||||
* @return
|
||||
* - ESP_OK: Get GDMA channel ID successfully
|
||||
* - ESP_ERR_INVALID_ARG: Get GDMA channel ID failed because of invalid argument
|
||||
* - ESP_FAIL: Get GDMA channel ID failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_get_channel_id(gdma_channel_handle_t dma_chan, int *channel_id);
|
||||
|
||||
/**
|
||||
* @brief Set GDMA event callbacks for TX channel
|
||||
* @note This API will install GDMA interrupt service for the channel internally
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @param[in] cbs Group of callback functions
|
||||
* @param[in] user_data User data, which will be passed to callback functions directly
|
||||
* @return
|
||||
* - ESP_OK: Set event callbacks successfully
|
||||
* - ESP_ERR_INVALID_ARG: Set event callbacks failed because of invalid argument
|
||||
* - ESP_FAIL: Set event callbacks failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_register_tx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_tx_event_callbacks_t *cbs, void *user_data);
|
||||
|
||||
/**
|
||||
* @brief Set GDMA event callbacks for RX channel
|
||||
* @note This API will install GDMA interrupt service for the channel internally
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @param[in] cbs Group of callback functions
|
||||
* @param[in] user_data User data, which will be passed to callback functions directly
|
||||
* @return
|
||||
* - ESP_OK: Set event callbacks successfully
|
||||
* - ESP_ERR_INVALID_ARG: Set event callbacks failed because of invalid argument
|
||||
* - ESP_FAIL: Set event callbacks failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_register_rx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_rx_event_callbacks_t *cbs, void *user_data);
|
||||
|
||||
/**
|
||||
* @brief Set DMA descriptor address and start engine
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @param[in] desc_base_addr Base address of descriptors (usually the descriptors are chained into a link or ring)
|
||||
* @return
|
||||
* - ESP_OK: Start DMA engine successfully
|
||||
* - ESP_ERR_INVALID_ARG: Start DMA engine failed because of invalid argument
|
||||
* - ESP_FAIL: Start DMA engine failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_start(gdma_channel_handle_t dma_chan, intptr_t desc_base_addr);
|
||||
|
||||
/**
|
||||
* @brief Stop DMA engine
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @return
|
||||
* - ESP_OK: Stop DMA engine successfully
|
||||
* - ESP_ERR_INVALID_ARG: Stop DMA engine failed because of invalid argument
|
||||
* - ESP_FAIL: Stop DMA engine failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_stop(gdma_channel_handle_t dma_chan);
|
||||
|
||||
/**
|
||||
* @brief Make the appended descriptors be aware to the DMA engine
|
||||
* @note This API could also resume a paused DMA engine, make sure new descriptors have been appended to the descriptor chain before calling it.
|
||||
*
|
||||
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
|
||||
* @return
|
||||
* - ESP_OK: Send append command to DMA engine successfully
|
||||
* - ESP_ERR_INVALID_ARG: Send append command to DMA engine failed because of invalid argument
|
||||
* - ESP_FAIL: Send append command to DMA engine failed because of other error
|
||||
*/
|
||||
esp_err_t gdma_append(gdma_channel_handle_t dma_chan);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
@ -39,6 +39,7 @@
|
||||
//This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
|
||||
#if SOC_GDMA_SUPPORTED
|
||||
#include "hal/gdma_ll.h"
|
||||
#include "soc/gdma_channel.h"
|
||||
#include "soc/spi_caps.h"
|
||||
|
||||
#define spi_dma_set_rx_channel_priority(gdma_chan, priority) gdma_ll_rx_set_priority(&GDMA, gdma_chan, priority);
|
||||
@ -205,11 +206,11 @@ void spicommon_connect_spi_and_dma(spi_host_device_t host, int dma_chan)
|
||||
int gdma_chan, periph_id;
|
||||
if (dma_chan == 1) {
|
||||
gdma_chan = SOC_GDMA_SPI2_DMA_CHANNEL;
|
||||
periph_id = GDMA_LL_TRIG_SRC_SPI2;
|
||||
#ifdef GDMA_LL_TRIG_SRC_SPI3
|
||||
periph_id = SOC_GDMA_TRIG_PERIPH_SPI2;
|
||||
#ifdef SOC_GDMA_TRIG_PERIPH_SPI3
|
||||
} else if (dma_chan == 2) {
|
||||
gdma_chan = SOC_GDMA_SPI3_DMA_CHANNEL;
|
||||
periph_id = GDMA_LL_TRIG_SRC_SPI3;
|
||||
periph_id = SOC_GDMA_TRIG_PERIPH_SPI3;
|
||||
#endif
|
||||
} else {
|
||||
abort();
|
||||
|
60
components/driver/test/test_gdma.c
Normal file
60
components/driver/test/test_gdma.c
Normal file
@ -0,0 +1,60 @@
|
||||
#include "unity.h"
|
||||
#include "esp_private/gdma.h"
|
||||
#include "soc/soc_caps.h"
|
||||
|
||||
#if SOC_GDMA_SUPPORTED
|
||||
|
||||
TEST_CASE("GDMA channel allocation", "[gdma]")
|
||||
{
|
||||
gdma_channel_alloc_config_t channel_config = {};
|
||||
gdma_channel_handle_t tx_channels[SOC_GDMA_PAIRS_PER_GROUP] = {};
|
||||
gdma_channel_handle_t rx_channels[SOC_GDMA_PAIRS_PER_GROUP] = {};
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
|
||||
// install TX channels for different peripherals
|
||||
for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[i]));
|
||||
TEST_ESP_OK(gdma_connect(tx_channels[i], GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0)));
|
||||
};
|
||||
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_channel(&channel_config, &tx_channels[0]));
|
||||
|
||||
// install RX channels for different peripherals
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_RX;
|
||||
for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &rx_channels[i]));
|
||||
TEST_ESP_OK(gdma_connect(rx_channels[i], GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0)));
|
||||
}
|
||||
TEST_ASSERT_EQUAL(ESP_ERR_NOT_FOUND, gdma_new_channel(&channel_config, &rx_channels[0]));
|
||||
|
||||
for (int i = 0; i < SOC_GDMA_PAIRS_PER_GROUP; i++) {
|
||||
TEST_ESP_OK(gdma_disconnect(tx_channels[i]));
|
||||
TEST_ESP_OK(gdma_disconnect(rx_channels[i]));
|
||||
TEST_ESP_OK(gdma_del_channel(tx_channels[i]));
|
||||
TEST_ESP_OK(gdma_del_channel(rx_channels[i]));
|
||||
}
|
||||
|
||||
// install single and paired TX/RX channels
|
||||
#if SOC_GDMA_PAIRS_PER_GROUP >= 2
|
||||
// single tx channel
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[0]));
|
||||
|
||||
// create tx channel and reserve sibling
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_TX;
|
||||
channel_config.flags.reserve_sibling = 1;
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &tx_channels[1]));
|
||||
// create rx channel and specify sibling channel
|
||||
channel_config.flags.reserve_sibling = 0;
|
||||
channel_config.sibling_chan = tx_channels[1]; // specify sibling channel
|
||||
channel_config.direction = GDMA_CHANNEL_DIRECTION_RX;
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &rx_channels[1]));
|
||||
channel_config.sibling_chan = NULL;
|
||||
TEST_ESP_OK(gdma_new_channel(&channel_config, &rx_channels[0]));
|
||||
for (int i = 0; i < 2; i++) {
|
||||
TEST_ESP_OK(gdma_del_channel(tx_channels[i]));
|
||||
TEST_ESP_OK(gdma_del_channel(rx_channels[i]));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
@ -48,6 +48,7 @@ typedef struct {
|
||||
*/
|
||||
typedef struct async_memcpy_context_t {
|
||||
async_memcpy_impl_t mcp_impl; // implementation layer
|
||||
portMUX_TYPE spinlock; // spinlock, prevent operating descriptors concurrently
|
||||
intr_handle_t intr_hdl; // interrupt handle
|
||||
uint32_t flags; // extra driver flags
|
||||
dma_descriptor_t *tx_desc; // pointer to the next free TX descriptor
|
||||
@ -73,11 +74,6 @@ esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_me
|
||||
mcp_hdl = heap_caps_calloc(1, total_malloc_size, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
ASMCP_CHECK(mcp_hdl, "allocate context memory failed", err, ESP_ERR_NO_MEM);
|
||||
|
||||
int int_flags = ESP_INTR_FLAG_IRAM; // interrupt can still work when cache is disabled
|
||||
// allocate interrupt handle, it's target dependent
|
||||
ret_code = async_memcpy_impl_allocate_intr(&mcp_hdl->mcp_impl, int_flags, &mcp_hdl->intr_hdl);
|
||||
ASMCP_CHECK(ret_code == ESP_OK, "allocate interrupt handle failed", err, ret_code);
|
||||
|
||||
mcp_hdl->flags = config->flags;
|
||||
mcp_hdl->out_streams = mcp_hdl->streams_pool;
|
||||
mcp_hdl->in_streams = mcp_hdl->streams_pool + config->backlog;
|
||||
@ -96,20 +92,18 @@ esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_me
|
||||
mcp_hdl->tx_desc = &mcp_hdl->out_streams[0].desc;
|
||||
mcp_hdl->rx_desc = &mcp_hdl->in_streams[0].desc;
|
||||
mcp_hdl->next_rx_desc_to_check = &mcp_hdl->in_streams[0].desc;
|
||||
mcp_hdl->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
||||
|
||||
// initialize implementation layer
|
||||
async_memcpy_impl_init(&mcp_hdl->mcp_impl, &mcp_hdl->out_streams[0].desc, &mcp_hdl->in_streams[0].desc);
|
||||
async_memcpy_impl_init(&mcp_hdl->mcp_impl);
|
||||
|
||||
*asmcp = mcp_hdl;
|
||||
|
||||
async_memcpy_impl_start(&mcp_hdl->mcp_impl);
|
||||
async_memcpy_impl_start(&mcp_hdl->mcp_impl, (intptr_t)&mcp_hdl->out_streams[0].desc, (intptr_t)&mcp_hdl->in_streams[0].desc);
|
||||
|
||||
return ESP_OK;
|
||||
err:
|
||||
if (mcp_hdl) {
|
||||
if (mcp_hdl->intr_hdl) {
|
||||
esp_intr_free(mcp_hdl->intr_hdl);
|
||||
}
|
||||
free(mcp_hdl);
|
||||
}
|
||||
if (asmcp) {
|
||||
@ -123,7 +117,6 @@ esp_err_t esp_async_memcpy_uninstall(async_memcpy_t asmcp)
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG);
|
||||
|
||||
esp_intr_free(asmcp->intr_hdl);
|
||||
async_memcpy_impl_stop(&asmcp->mcp_impl);
|
||||
async_memcpy_impl_deinit(&asmcp->mcp_impl);
|
||||
free(asmcp);
|
||||
@ -243,7 +236,7 @@ esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n,
|
||||
ASMCP_CHECK(n <= DMA_DESCRIPTOR_BUFFER_MAX_SIZE * asmcp->max_stream_num, "buffer size too large", err, ESP_ERR_INVALID_ARG);
|
||||
|
||||
// Prepare TX and RX descriptor
|
||||
portENTER_CRITICAL_SAFE(&asmcp->mcp_impl.hal_lock);
|
||||
portENTER_CRITICAL_SAFE(&asmcp->spinlock);
|
||||
rx_prepared_size = async_memcpy_prepare_receive(asmcp, dst, n, &rx_start_desc, &rx_end_desc);
|
||||
tx_prepared_size = async_memcpy_prepare_transmit(asmcp, src, n, &tx_start_desc, &tx_end_desc);
|
||||
if ((rx_prepared_size == n) && (tx_prepared_size == n)) {
|
||||
@ -269,7 +262,7 @@ esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n,
|
||||
asmcp->tx_desc = desc->next;
|
||||
async_memcpy_impl_restart(&asmcp->mcp_impl);
|
||||
}
|
||||
portEXIT_CRITICAL_SAFE(&asmcp->mcp_impl.hal_lock);
|
||||
portEXIT_CRITICAL_SAFE(&asmcp->spinlock);
|
||||
|
||||
// It's unlikely that we have space for rx descriptor but no space for tx descriptor
|
||||
// Both tx and rx descriptor should move in the same pace
|
||||
@ -289,14 +282,14 @@ IRAM_ATTR void async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t *impl)
|
||||
async_memcpy_context_t *asmcp = __containerof(impl, async_memcpy_context_t, mcp_impl);
|
||||
|
||||
// get the RX eof descriptor address
|
||||
dma_descriptor_t *eof = async_memcpy_impl_get_rx_suc_eof_descriptor(impl);
|
||||
dma_descriptor_t *eof = (dma_descriptor_t *)impl->rx_eof_addr;
|
||||
// traversal all unchecked descriptors
|
||||
do {
|
||||
portENTER_CRITICAL_ISR(&impl->hal_lock);
|
||||
portENTER_CRITICAL_ISR(&asmcp->spinlock);
|
||||
// There is an assumption that the usage of rx descriptors are in the same pace as tx descriptors (this is determined by M2M DMA working mechanism)
|
||||
// And once the rx descriptor is recycled, the corresponding tx desc is guaranteed to be returned by DMA
|
||||
to_continue = async_memcpy_get_next_rx_descriptor(asmcp, eof, &next_desc);
|
||||
portEXIT_CRITICAL_ISR(&impl->hal_lock);
|
||||
portEXIT_CRITICAL_ISR(&asmcp->spinlock);
|
||||
if (next_desc) {
|
||||
in_stream = __containerof(next_desc, async_memcpy_stream_t, desc);
|
||||
// invoke user registered callback if available
|
||||
|
@ -24,76 +24,85 @@
|
||||
#include "esp_err.h"
|
||||
#include "esp_async_memcpy_impl.h"
|
||||
|
||||
IRAM_ATTR static void async_memcpy_impl_default_isr_handler(void *args)
|
||||
IRAM_ATTR static bool async_memcpy_impl_rx_eof_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
|
||||
{
|
||||
async_memcpy_impl_t *mcp_impl = (async_memcpy_impl_t *)args;
|
||||
async_memcpy_impl_t *mcp_impl = (async_memcpy_impl_t *)user_data;
|
||||
mcp_impl->rx_eof_addr = event_data->rx_eof_desc_addr;
|
||||
|
||||
portENTER_CRITICAL_ISR(&mcp_impl->hal_lock);
|
||||
uint32_t status = gdma_ll_get_interrupt_status(mcp_impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_clear_interrupt_status(mcp_impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, status);
|
||||
portEXIT_CRITICAL_ISR(&mcp_impl->hal_lock);
|
||||
|
||||
// End-Of-Frame on RX side
|
||||
if (status & GDMA_LL_EVENT_RX_SUC_EOF) {
|
||||
async_memcpy_isr_on_rx_done_event(mcp_impl);
|
||||
}
|
||||
|
||||
if (mcp_impl->isr_need_yield) {
|
||||
mcp_impl->isr_need_yield = false;
|
||||
portYIELD_FROM_ISR();
|
||||
}
|
||||
async_memcpy_isr_on_rx_done_event(mcp_impl);
|
||||
return mcp_impl->isr_need_yield;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_allocate_intr(async_memcpy_impl_t *impl, int int_flags, intr_handle_t *intr)
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl)
|
||||
{
|
||||
return esp_intr_alloc(ETS_DMA_CH0_INTR_SOURCE, int_flags, async_memcpy_impl_default_isr_handler, impl, intr);
|
||||
}
|
||||
esp_err_t ret = ESP_OK;
|
||||
// create TX channel and reserve sibling channel for future use
|
||||
gdma_channel_alloc_config_t tx_alloc_config = {
|
||||
.flags.reserve_sibling = 1,
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
ret = gdma_new_channel(&tx_alloc_config, &impl->tx_channel);
|
||||
if (ret != ESP_OK) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl, dma_descriptor_t *outlink_base, dma_descriptor_t *inlink_base)
|
||||
{
|
||||
impl->hal_lock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
||||
impl->hal.dev = &GDMA;
|
||||
periph_module_enable(PERIPH_GDMA_MODULE);
|
||||
gdma_ll_enable_clock(impl->hal.dev, true);
|
||||
gdma_ll_tx_reset_channel(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_rx_reset_channel(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_enable_interrupt(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, UINT32_MAX, true);
|
||||
gdma_ll_clear_interrupt_status(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, UINT32_MAX);
|
||||
gdma_ll_enable_m2m_mode(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
|
||||
gdma_ll_tx_enable_auto_write_back(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
|
||||
gdma_ll_tx_enable_owner_check(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
|
||||
gdma_ll_rx_enable_owner_check(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
|
||||
gdma_ll_tx_set_desc_addr(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, (uint32_t)outlink_base);
|
||||
gdma_ll_rx_set_desc_addr(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, (uint32_t)inlink_base);
|
||||
return ESP_OK;
|
||||
// create RX channel and specify it should be reside in the same pair as TX
|
||||
gdma_channel_alloc_config_t rx_alloc_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_RX,
|
||||
.sibling_chan = impl->tx_channel,
|
||||
};
|
||||
ret = gdma_new_channel(&rx_alloc_config, &impl->rx_channel);
|
||||
if (ret != ESP_OK) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
gdma_connect(impl->rx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0));
|
||||
gdma_connect(impl->tx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0));
|
||||
|
||||
gdma_strategy_config_t strategy_config = {
|
||||
.auto_update_desc = true,
|
||||
.owner_check = true
|
||||
};
|
||||
|
||||
gdma_apply_strategy(impl->tx_channel, &strategy_config);
|
||||
gdma_apply_strategy(impl->rx_channel, &strategy_config);
|
||||
|
||||
gdma_rx_event_callbacks_t cbs = {
|
||||
.on_recv_eof = async_memcpy_impl_rx_eof_callback
|
||||
};
|
||||
ret = gdma_register_rx_event_callbacks(impl->rx_channel, &cbs, impl);
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl)
|
||||
{
|
||||
periph_module_disable(PERIPH_GDMA_MODULE);
|
||||
gdma_disconnect(impl->rx_channel);
|
||||
gdma_disconnect(impl->tx_channel);
|
||||
gdma_del_channel(impl->rx_channel);
|
||||
gdma_del_channel(impl->tx_channel);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl)
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl, intptr_t outlink_base, intptr_t inlink_base)
|
||||
{
|
||||
gdma_ll_rx_start(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_tx_start(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_enable_interrupt(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, GDMA_LL_EVENT_RX_SUC_EOF, true);
|
||||
gdma_start(impl->rx_channel, inlink_base);
|
||||
gdma_start(impl->tx_channel, outlink_base);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_stop(async_memcpy_impl_t *impl)
|
||||
{
|
||||
gdma_ll_enable_interrupt(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, GDMA_LL_EVENT_RX_SUC_EOF, false);
|
||||
gdma_ll_rx_stop(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_tx_stop(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_stop(impl->rx_channel);
|
||||
gdma_stop(impl->tx_channel);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_restart(async_memcpy_impl_t *impl)
|
||||
{
|
||||
gdma_ll_rx_restart(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_tx_restart(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_append(impl->rx_channel);
|
||||
gdma_append(impl->tx_channel);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
@ -101,8 +110,3 @@ bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
dma_descriptor_t *async_memcpy_impl_get_rx_suc_eof_descriptor(async_memcpy_impl_t *impl)
|
||||
{
|
||||
return (dma_descriptor_t *)gdma_ll_rx_get_success_eof_desc_addr(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
}
|
||||
|
@ -24,8 +24,7 @@
|
||||
#include "hal/cp_dma_ll.h"
|
||||
#include "hal/cp_dma_hal.h"
|
||||
#elif SOC_GDMA_SUPPORTED
|
||||
#include "hal/gdma_ll.h"
|
||||
#include "hal/gdma_hal.h"
|
||||
#include "esp_private/gdma.h"
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -33,12 +32,15 @@
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
portMUX_TYPE hal_lock; // spin lock for HAL object
|
||||
#if SOC_CP_DMA_SUPPORTED
|
||||
cp_dma_hal_context_t hal; // CP DMA hal
|
||||
intr_handle_t intr; // CP DMA interrupt handle
|
||||
portMUX_TYPE hal_lock; // CP DMA HAL level spin lock
|
||||
#elif SOC_GDMA_SUPPORTED
|
||||
gdma_hal_context_t hal; // General DMA hal
|
||||
gdma_channel_handle_t tx_channel;
|
||||
gdma_channel_handle_t rx_channel;
|
||||
#endif
|
||||
intptr_t rx_eof_addr;
|
||||
bool isr_need_yield; // if current isr needs a yield for higher priority task
|
||||
} async_memcpy_impl_t;
|
||||
|
||||
@ -49,28 +51,13 @@ typedef struct {
|
||||
*/
|
||||
void async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t *impl);
|
||||
|
||||
/**
|
||||
* @brief Allocate interrupt handle, register default isr handler
|
||||
*
|
||||
* @param impl async mcp implementation layer context pointer
|
||||
* @param int_flags interrupt flags
|
||||
* @param intr Returned interrupt handle
|
||||
* @return
|
||||
* - ESP_OK: Allocate interrupt handle successfully
|
||||
* - ESP_ERR_INVALID_ARG: Allocate interrupt handle failed because of invalid argument
|
||||
* - ESP_FAIL: Allocate interrupt handle failed because of other error
|
||||
*/
|
||||
esp_err_t async_memcpy_impl_allocate_intr(async_memcpy_impl_t *impl, int int_flags, intr_handle_t *intr);
|
||||
|
||||
/**
|
||||
* @brief Initialize async mcp implementation layer
|
||||
*
|
||||
* @param impl async mcp implementation layer context pointer
|
||||
* @param outlink_base Pointer to the first TX descriptor
|
||||
* @param inlink_base Pointer to the first RX descriptor
|
||||
* @return Always return ESP_OK
|
||||
*/
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl, dma_descriptor_t *outlink_base, dma_descriptor_t *inlink_base);
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl);
|
||||
|
||||
/**
|
||||
* @brief Deinitialize async mcp implementation layer
|
||||
@ -84,9 +71,11 @@ esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl);
|
||||
* @brief Start async mcp (on implementation layer)
|
||||
*
|
||||
* @param impl async mcp implementation layer context pointer
|
||||
* @param outlink_base base descriptor address for TX DMA channel
|
||||
* @param inlink_base base descriptor address for RX DMA channel
|
||||
* @return Always return ESP_OK
|
||||
*/
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl);
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl, intptr_t outlink_base, intptr_t inlink_base);
|
||||
|
||||
/**
|
||||
* @brief Stop async mcp (on implementation layer)
|
||||
@ -114,11 +103,3 @@ esp_err_t async_memcpy_impl_restart(async_memcpy_impl_t *impl);
|
||||
* @return True if both address are valid
|
||||
*/
|
||||
bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *src, void *dst);
|
||||
|
||||
/**
|
||||
* @brief Get the EOF RX descriptor address
|
||||
*
|
||||
* @param impl async mcp implementation layer context pointer
|
||||
* @return Pointer to the EOF RX descriptor
|
||||
*/
|
||||
dma_descriptor_t *async_memcpy_impl_get_rx_suc_eof_descriptor(async_memcpy_impl_t *impl);
|
||||
|
@ -33,6 +33,7 @@ IRAM_ATTR static void async_memcpy_impl_default_isr_handler(void *args)
|
||||
|
||||
// End-Of-Frame on RX side
|
||||
if (status & CP_DMA_LL_EVENT_RX_EOF) {
|
||||
mcp_impl->rx_eof_addr = cp_dma_ll_get_rx_eof_descriptor_address(mcp_impl->hal.dev);
|
||||
async_memcpy_isr_on_rx_done_event(mcp_impl);
|
||||
}
|
||||
|
||||
@ -42,30 +43,30 @@ IRAM_ATTR static void async_memcpy_impl_default_isr_handler(void *args)
|
||||
}
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_allocate_intr(async_memcpy_impl_t *impl, int int_flags, intr_handle_t *intr)
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl)
|
||||
{
|
||||
return esp_intr_alloc(ETS_DMA_COPY_INTR_SOURCE, int_flags, async_memcpy_impl_default_isr_handler, impl, intr);
|
||||
}
|
||||
esp_err_t ret = ESP_OK;
|
||||
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl, dma_descriptor_t *outlink_base, dma_descriptor_t *inlink_base)
|
||||
{
|
||||
impl->hal_lock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
||||
cp_dma_hal_config_t config = {
|
||||
.inlink_base = inlink_base,
|
||||
.outlink_base = outlink_base
|
||||
};
|
||||
cp_dma_hal_config_t config = {};
|
||||
cp_dma_hal_init(&impl->hal, &config);
|
||||
return ESP_OK;
|
||||
|
||||
ret = esp_intr_alloc(ETS_DMA_COPY_INTR_SOURCE, ESP_INTR_FLAG_IRAM, async_memcpy_impl_default_isr_handler, impl, &impl->intr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl)
|
||||
{
|
||||
esp_err_t ret = ESP_OK;
|
||||
|
||||
cp_dma_hal_deinit(&impl->hal);
|
||||
return ESP_OK;
|
||||
ret = esp_intr_free(impl->intr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl)
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl, intptr_t outlink_base, intptr_t inlink_base)
|
||||
{
|
||||
cp_dma_hal_set_desc_base_addr(&impl->hal, outlink_base, inlink_base);
|
||||
cp_dma_hal_start(&impl->hal); // enable DMA and interrupt
|
||||
return ESP_OK;
|
||||
}
|
||||
@ -88,8 +89,3 @@ bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *
|
||||
// CP_DMA can only access SRAM
|
||||
return esp_ptr_internal(src) && esp_ptr_internal(dst);
|
||||
}
|
||||
|
||||
dma_descriptor_t *async_memcpy_impl_get_rx_suc_eof_descriptor(async_memcpy_impl_t *impl)
|
||||
{
|
||||
return (dma_descriptor_t *)cp_dma_ll_get_rx_eof_descriptor_address(impl->hal.dev);
|
||||
}
|
||||
|
@ -73,6 +73,7 @@ if(NOT BOOTLOADER_BUILD)
|
||||
list(APPEND srcs
|
||||
"adc_hal.c"
|
||||
"dac_hal.c"
|
||||
"gdma_hal.c"
|
||||
"pcnt_hal.c"
|
||||
"spi_flash_hal_gpspi.c"
|
||||
"spi_slave_hd_hal.c"
|
||||
@ -88,6 +89,7 @@ if(NOT BOOTLOADER_BUILD)
|
||||
if(${target} STREQUAL "esp32c3")
|
||||
list(APPEND srcs
|
||||
"ds_hal.c"
|
||||
"gdma_hal.c"
|
||||
"esp32c3/adc_hal.c"
|
||||
"esp32c3/brownout_hal.c"
|
||||
"esp32c3/systimer_hal.c"
|
||||
|
@ -123,7 +123,7 @@ void adc_hal_digi_init(adc_dma_hal_context_t *adc_dma_ctx, adc_dma_hal_config_t
|
||||
adc_dma_ctx->dev = &GDMA;
|
||||
gdma_ll_enable_clock(adc_dma_ctx->dev, true);
|
||||
gdma_ll_clear_interrupt_status(adc_dma_ctx->dev, dma_config->dma_chan, UINT32_MAX);
|
||||
gdma_ll_rx_connect_to_periph(adc_dma_ctx->dev, dma_config->dma_chan, GDMA_LL_TRIG_SRC_ADC_DAC);
|
||||
gdma_ll_rx_connect_to_periph(adc_dma_ctx->dev, dma_config->dma_chan, SOC_GDMA_TRIG_PERIPH_ADC0);
|
||||
}
|
||||
|
||||
/*---------------------------------------------------------------
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "hal/crypto_dma_ll.h"
|
||||
#elif SOC_AES_GENERAL_DMA
|
||||
#include "hal/gdma_ll.h"
|
||||
#include "soc/gdma_channel.h"
|
||||
#endif
|
||||
|
||||
uint8_t aes_hal_setkey(const uint8_t *key, size_t key_bytes, int mode)
|
||||
@ -71,8 +72,8 @@ static inline void aes_hal_dma_init(const lldesc_t *input, const lldesc_t *outpu
|
||||
gdma_ll_rx_enable_descriptor_burst(&GDMA, SOC_GDMA_AES_DMA_CHANNEL, false);
|
||||
gdma_ll_rx_enable_data_burst(&GDMA, SOC_GDMA_AES_DMA_CHANNEL, false);
|
||||
|
||||
gdma_ll_tx_connect_to_periph(&GDMA, SOC_GDMA_AES_DMA_CHANNEL, GDMA_LL_PERIPH_ID_AES);
|
||||
gdma_ll_rx_connect_to_periph(&GDMA, SOC_GDMA_AES_DMA_CHANNEL, GDMA_LL_PERIPH_ID_AES);
|
||||
gdma_ll_tx_connect_to_periph(&GDMA, SOC_GDMA_AES_DMA_CHANNEL, SOC_GDMA_TRIG_PERIPH_AES0);
|
||||
gdma_ll_rx_connect_to_periph(&GDMA, SOC_GDMA_AES_DMA_CHANNEL, SOC_GDMA_TRIG_PERIPH_AES0);
|
||||
|
||||
#if SOC_GDMA_SUPPORT_EXTMEM
|
||||
/* An L2 FIFO bigger than 40 bytes is need when accessing external ram */
|
||||
|
@ -2,7 +2,7 @@ COMPONENT_SRCDIRS := . esp32
|
||||
COMPONENT_ADD_INCLUDEDIRS := esp32/include include
|
||||
COMPONENT_ADD_LDFRAGMENTS += linker.lf
|
||||
|
||||
COMPONENT_OBJEXCLUDE += ./spi_slave_hd_hal.o ./spi_flash_hal_gpspi.o ./spi_slave_hd_hal.o ./ds_hal.o
|
||||
COMPONENT_OBJEXCLUDE += ./spi_slave_hd_hal.o ./spi_flash_hal_gpspi.o ./spi_slave_hd_hal.o ./ds_hal.o ./gdma_hal.o
|
||||
|
||||
ifndef CONFIG_ETH_USE_ESP32_EMAC
|
||||
COMPONENT_OBJEXCLUDE += esp32/emac_hal.o
|
||||
|
@ -23,6 +23,8 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define GDMA_LL_GET_HW(id) (((id) == 0) ? (&GDMA) : NULL)
|
||||
|
||||
#define GDMA_LL_EVENT_TX_FIFO_UDF (1<<12)
|
||||
#define GDMA_LL_EVENT_TX_FIFO_OVF (1<<11)
|
||||
#define GDMA_LL_EVENT_RX_FIFO_UDF (1<<10)
|
||||
@ -37,22 +39,6 @@ extern "C" {
|
||||
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
|
||||
#define GDMA_LL_EVENT_RX_DONE (1<<0)
|
||||
|
||||
#define GDMA_LL_TRIG_SRC_SPI2 (0)
|
||||
#define GDMA_LL_TRIG_SRC_UART (2)
|
||||
#define GDMA_LL_TRIG_SRC_I2S0 (3)
|
||||
#define GDMA_LL_TRIG_SRC_AES (6)
|
||||
#define GDMA_LL_TRIG_SRC_SHA (7)
|
||||
#define GDMA_LL_TRIG_SRC_ADC_DAC (8)
|
||||
|
||||
typedef enum {
|
||||
GDMA_LL_PERIPH_ID_SPI2 = 0,
|
||||
GDMA_LL_PERIPH_ID_UART = 2,
|
||||
GDMA_LL_PERIPH_ID_I2S0 = 3,
|
||||
GDMA_LL_PERIPH_ID_AES = 6,
|
||||
GDMA_LL_PERIPH_ID_SHA = 7,
|
||||
GDMA_LL_PERIPH_ID_ADC_DAC = 8,
|
||||
} gdma_ll_periph_id_t;
|
||||
|
||||
///////////////////////////////////// Common /////////////////////////////////////////
|
||||
/**
|
||||
* @brief Enable DMA channel M2M mode (TX channel n forward data to RX channel n), disabled by default
|
||||
@ -256,7 +242,7 @@ static inline void gdma_ll_rx_set_priority(gdma_dev_t *dev, uint32_t channel, ui
|
||||
/**
|
||||
* @brief Connect DMA RX channel to a given peripheral
|
||||
*/
|
||||
static inline void gdma_ll_rx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, gdma_ll_periph_id_t periph_id)
|
||||
static inline void gdma_ll_rx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, int periph_id)
|
||||
{
|
||||
dev->channel[channel].in.in_peri_sel.sel = periph_id;
|
||||
}
|
||||
@ -414,7 +400,7 @@ static inline void gdma_ll_tx_set_priority(gdma_dev_t *dev, uint32_t channel, ui
|
||||
/**
|
||||
* @brief Connect DMA TX channel to a given peripheral
|
||||
*/
|
||||
static inline void gdma_ll_tx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, gdma_ll_periph_id_t periph_id)
|
||||
static inline void gdma_ll_tx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, int periph_id)
|
||||
{
|
||||
dev->channel[channel].out.out_peri_sel.sel = periph_id;
|
||||
}
|
||||
|
@ -27,10 +27,13 @@ void cp_dma_hal_init(cp_dma_hal_context_t *hal, const cp_dma_hal_config_t *confi
|
||||
cp_dma_ll_enable_intr(hal->dev, UINT32_MAX, false);
|
||||
cp_dma_ll_clear_intr_status(hal->dev, UINT32_MAX);
|
||||
cp_dma_ll_enable_owner_check(hal->dev, true);
|
||||
}
|
||||
|
||||
void cp_dma_hal_set_desc_base_addr(cp_dma_hal_context_t *hal, intptr_t outlink_base, intptr_t inlink_base)
|
||||
{
|
||||
/* set base address of the first descriptor */
|
||||
cp_dma_ll_tx_set_descriptor_base_addr(hal->dev, (uint32_t)config->outlink_base);
|
||||
cp_dma_ll_rx_set_descriptor_base_addr(hal->dev, (uint32_t)config->inlink_base);
|
||||
cp_dma_ll_tx_set_descriptor_base_addr(hal->dev, outlink_base);
|
||||
cp_dma_ll_rx_set_descriptor_base_addr(hal->dev, inlink_base);
|
||||
}
|
||||
|
||||
void cp_dma_hal_deinit(cp_dma_hal_context_t *hal)
|
||||
|
@ -45,8 +45,6 @@ typedef struct {
|
||||
} cp_dma_hal_context_t;
|
||||
|
||||
typedef struct {
|
||||
dma_descriptor_t *outlink_base; /*!< Address of the first outlink descriptor */
|
||||
dma_descriptor_t *inlink_base; /*!< Address of the first inlink descriptor */
|
||||
} cp_dma_hal_config_t;
|
||||
|
||||
/**
|
||||
@ -62,6 +60,11 @@ void cp_dma_hal_init(cp_dma_hal_context_t *hal, const cp_dma_hal_config_t *confi
|
||||
*/
|
||||
void cp_dma_hal_deinit(cp_dma_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Set descriptor base address
|
||||
*/
|
||||
void cp_dma_hal_set_desc_base_addr(cp_dma_hal_context_t *hal, intptr_t outlink_base, intptr_t inlink_base);
|
||||
|
||||
/**
|
||||
* @brief Start mem2mem DMA state machine
|
||||
*/
|
||||
|
@ -23,6 +23,8 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define GDMA_LL_GET_HW(id) (((id) == 0) ? (&GDMA) : NULL)
|
||||
|
||||
#define GDMA_LL_EVENT_TX_L3_FIFO_UDF (1<<17)
|
||||
#define GDMA_LL_EVENT_TX_L3_FIFO_OVF (1<<16)
|
||||
#define GDMA_LL_EVENT_TX_L1_FIFO_UDF (1<<15)
|
||||
@ -42,29 +44,6 @@ extern "C" {
|
||||
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
|
||||
#define GDMA_LL_EVENT_RX_DONE (1<<0)
|
||||
|
||||
#define GDMA_LL_TRIG_SRC_SPI2 (0)
|
||||
#define GDMA_LL_TRIG_SRC_SPI3 (1)
|
||||
#define GDMA_LL_TRIG_SRC_UART (2)
|
||||
#define GDMA_LL_TRIG_SRC_I2S0 (3)
|
||||
#define GDMA_LL_TRIG_SRC_I2S1 (4)
|
||||
#define GDMA_LL_TRIG_SRC_LCD_CAM (5)
|
||||
#define GDMA_LL_TRIG_SRC_AES (6)
|
||||
#define GDMA_LL_TRIG_SRC_SHA (7)
|
||||
#define GDMA_LL_TRIG_SRC_ADC_DAC (8)
|
||||
|
||||
typedef enum {
|
||||
GDMA_LL_PERIPH_ID_SPI2 = 0,
|
||||
GDMA_LL_PERIPH_ID_SPI3,
|
||||
GDMA_LL_PERIPH_ID_UART,
|
||||
GDMA_LL_PERIPH_ID_I2S0,
|
||||
GDMA_LL_PERIPH_ID_I2S1,
|
||||
GDMA_LL_PERIPH_ID_LCD_CAM,
|
||||
GDMA_LL_PERIPH_ID_AES,
|
||||
GDMA_LL_PERIPH_ID_SHA,
|
||||
GDMA_LL_PERIPH_ID_ADC_DAC,
|
||||
} gdma_ll_periph_id_t;
|
||||
|
||||
|
||||
///////////////////////////////////// Common /////////////////////////////////////////
|
||||
/**
|
||||
* @brief Enable DMA channel M2M mode (TX channel n forward data to RX channel n), disabled by default
|
||||
@ -300,7 +279,7 @@ static inline void gdma_ll_rx_set_priority(gdma_dev_t *dev, uint32_t channel, ui
|
||||
/**
|
||||
* @brief Connect DMA RX channel to a given peripheral
|
||||
*/
|
||||
static inline void gdma_ll_rx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, gdma_ll_periph_id_t periph_id)
|
||||
static inline void gdma_ll_rx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, int periph_id)
|
||||
{
|
||||
dev->peri_sel[channel].peri_in_sel = periph_id;
|
||||
}
|
||||
@ -495,7 +474,7 @@ static inline void gdma_ll_tx_set_priority(gdma_dev_t *dev, uint32_t channel, ui
|
||||
/**
|
||||
* @brief Connect DMA TX channel to a given peripheral
|
||||
*/
|
||||
static inline void gdma_ll_tx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, gdma_ll_periph_id_t periph_id)
|
||||
static inline void gdma_ll_tx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, int periph_id)
|
||||
{
|
||||
dev->peri_sel[channel].peri_out_sel = periph_id;
|
||||
}
|
||||
|
21
components/hal/gdma_hal.c
Normal file
21
components/hal/gdma_hal.c
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "hal/gdma_hal.h"
|
||||
#include "hal/gdma_ll.h"
|
||||
|
||||
void gdma_hal_init(gdma_hal_context_t *hal, int group_id)
|
||||
{
|
||||
hal->dev = GDMA_LL_GET_HW(group_id);
|
||||
}
|
@ -30,6 +30,8 @@ typedef struct {
|
||||
gdma_dev_t *dev;
|
||||
} gdma_hal_context_t;
|
||||
|
||||
void gdma_hal_init(gdma_hal_context_t *hal, int group_id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "hal/crypto_dma_ll.h"
|
||||
#elif SOC_SHA_GENERAL_DMA
|
||||
#include "hal/gdma_ll.h"
|
||||
#include "soc/gdma_channel.h"
|
||||
#endif
|
||||
|
||||
#define SHA1_STATE_LEN_WORDS (160 / 32)
|
||||
@ -106,7 +107,7 @@ static inline void sha_hal_dma_init(lldesc_t *input)
|
||||
gdma_ll_tx_enable_data_burst(&GDMA, SOC_GDMA_SHA_DMA_CHANNEL, false);
|
||||
gdma_ll_tx_enable_auto_write_back(&GDMA, SOC_GDMA_SHA_DMA_CHANNEL, false);
|
||||
|
||||
gdma_ll_tx_connect_to_periph(&GDMA, SOC_GDMA_SHA_DMA_CHANNEL, GDMA_LL_PERIPH_ID_SHA);
|
||||
gdma_ll_tx_connect_to_periph(&GDMA, SOC_GDMA_SHA_DMA_CHANNEL, SOC_GDMA_TRIG_PERIPH_SHA0);
|
||||
|
||||
#if SOC_GDMA_SUPPORT_EXTMEM
|
||||
/* Atleast 40 bytes when accessing external RAM */
|
||||
|
2
components/soc/esp32/include/soc/gdma_channel.h
Normal file
2
components/soc/esp32/include/soc/gdma_channel.h
Normal file
@ -0,0 +1,2 @@
|
||||
// ESP32 doesn't feature General DMA peripheral.
|
||||
// We keep this file here only for consistency's sake.
|
@ -1,5 +1,6 @@
|
||||
set(srcs
|
||||
"adc_periph.c"
|
||||
"gdma_periph.c"
|
||||
"gpio_periph.c"
|
||||
"interrupts.c"
|
||||
"spi_periph.c"
|
||||
|
34
components/soc/esp32c3/gdma_periph.c
Normal file
34
components/soc/esp32c3/gdma_periph.c
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "soc/gdma_periph.h"
|
||||
|
||||
const gdma_signal_conn_t gdma_periph_signals = {
|
||||
.groups = {
|
||||
[0] = {
|
||||
.module = PERIPH_GDMA_MODULE,
|
||||
.pairs = {
|
||||
[0] = {
|
||||
.irq_id = ETS_DMA_CH0_INTR_SOURCE
|
||||
},
|
||||
[1] = {
|
||||
.irq_id = ETS_DMA_CH1_INTR_SOURCE
|
||||
},
|
||||
[2] = {
|
||||
.irq_id = ETS_DMA_CH2_INTR_SOURCE
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
@ -14,4 +14,5 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#define SOC_GDMA_CHANNELS_NUM (3) /*!< GDMA has 3 TX and 3 RX */
|
||||
#define SOC_GDMA_GROUPS (1)
|
||||
#define SOC_GDMA_PAIRS_PER_GROUP (3)
|
||||
|
24
components/soc/esp32c3/include/soc/gdma_channel.h
Normal file
24
components/soc/esp32c3/include/soc/gdma_channel.h
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
// The following macros have a format SOC_[periph][instance_id] to make it work with `GDMA_MAKE_TRIGGER`
|
||||
#define SOC_GDMA_TRIG_PERIPH_M2M0 (-1)
|
||||
#define SOC_GDMA_TRIG_PERIPH_SPI2 (0)
|
||||
#define SOC_GDMA_TRIG_PERIPH_UART0 (2)
|
||||
#define SOC_GDMA_TRIG_PERIPH_I2S0 (3)
|
||||
#define SOC_GDMA_TRIG_PERIPH_AES0 (6)
|
||||
#define SOC_GDMA_TRIG_PERIPH_SHA0 (7)
|
||||
#define SOC_GDMA_TRIG_PERIPH_ADC0 (8)
|
@ -12,7 +12,6 @@
|
||||
// Attention: These fixed DMA channels are temporarily workaround before we have a centralized DMA controller API to help alloc the channel dynamically
|
||||
// Remove them when GDMA driver API is ready
|
||||
#define SOC_GDMA_AES_DMA_CHANNEL (0)
|
||||
#define SOC_GDMA_M2M_DMA_CHANNEL (0)
|
||||
#define SOC_GDMA_SHA_DMA_CHANNEL (1)
|
||||
#define SOC_GDMA_SPI2_DMA_CHANNEL (2)
|
||||
#define SOC_GDMA_ADC_DMA_CHANNEL (0)
|
||||
|
2
components/soc/esp32s2/include/soc/gdma_channel.h
Normal file
2
components/soc/esp32s2/include/soc/gdma_channel.h
Normal file
@ -0,0 +1,2 @@
|
||||
// ESP32-S2 doesn't feature General DMA peripheral.
|
||||
// We keep this file here only for consistency's sake.
|
@ -2,6 +2,7 @@ set(srcs
|
||||
"adc_periph.c"
|
||||
"dac_periph.c"
|
||||
"dedic_gpio_periph.c"
|
||||
"gdma_periph.c"
|
||||
"gpio_periph.c"
|
||||
"i2c_periph.c"
|
||||
"i2s_periph.c"
|
||||
|
40
components/soc/esp32s3/gdma_periph.c
Normal file
40
components/soc/esp32s3/gdma_periph.c
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "soc/gdma_periph.h"
|
||||
|
||||
const gdma_signal_conn_t gdma_periph_signals = {
|
||||
.groups = {
|
||||
[0] = {
|
||||
.module = PERIPH_GDMA_MODULE,
|
||||
.pairs = {
|
||||
[0] = {
|
||||
.irq_id = ETS_DMA_CH0_INTR_SOURCE
|
||||
},
|
||||
[1] = {
|
||||
.irq_id = ETS_DMA_CH1_INTR_SOURCE
|
||||
},
|
||||
[2] = {
|
||||
.irq_id = ETS_DMA_CH2_INTR_SOURCE
|
||||
},
|
||||
[3] = {
|
||||
.irq_id = ETS_DMA_CH3_INTR_SOURCE
|
||||
},
|
||||
[4] = {
|
||||
.irq_id = ETS_DMA_CH4_INTR_SOURCE
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
@ -14,6 +14,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#define SOC_GDMA_CHANNELS_NUM (5) /*!< GDMA has 5 TX and 5 RX channels in ESP32-S3 */
|
||||
#define SOC_GDMA_L2_FIFO_BASE_SIZE (16) /*!< GDMA L2 FIFO basic size is 16 Bytes */
|
||||
#define SOC_GDMA_SUPPORT_EXTMEM (1)
|
||||
#define SOC_GDMA_GROUPS (1)
|
||||
#define SOC_GDMA_PAIRS_PER_GROUP (5)
|
||||
#define SOC_GDMA_L2_FIFO_BASE_SIZE (16)
|
||||
#define SOC_GDMA_SUPPORT_EXTMEM (1)
|
||||
|
29
components/soc/esp32s3/include/soc/gdma_channel.h
Normal file
29
components/soc/esp32s3/include/soc/gdma_channel.h
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
// The following macros have a format SOC_[periph][instance_id] to make it work with `GDMA_MAKE_TRIGGER`
|
||||
#define SOC_GDMA_TRIG_PERIPH_M2M0 (-1)
|
||||
#define SOC_GDMA_TRIG_PERIPH_SPI2 (0)
|
||||
#define SOC_GDMA_TRIG_PERIPH_SPI3 (1)
|
||||
#define SOC_GDMA_TRIG_PERIPH_UART0 (2)
|
||||
#define SOC_GDMA_TRIG_PERIPH_I2S0 (3)
|
||||
#define SOC_GDMA_TRIG_PERIPH_I2S1 (4)
|
||||
#define SOC_GDMA_TRIG_PERIPH_LCD0 (5)
|
||||
#define SOC_GDMA_TRIG_PERIPH_CAM0 (5)
|
||||
#define SOC_GDMA_TRIG_PERIPH_AES0 (6)
|
||||
#define SOC_GDMA_TRIG_PERIPH_SHA0 (7)
|
||||
#define SOC_GDMA_TRIG_PERIPH_ADC0 (8)
|
||||
#define SOC_GDMA_TRIG_PERIPH_DAC0 (8)
|
@ -151,7 +151,6 @@
|
||||
|
||||
// Attention: These fixed DMA channels are temporarily workaround before we have a centralized DMA controller API to help alloc the channel dynamically
|
||||
// Remove them when GDMA driver API is ready
|
||||
#define SOC_GDMA_M2M_DMA_CHANNEL (0)
|
||||
#define SOC_GDMA_SPI2_DMA_CHANNEL (1)
|
||||
#define SOC_GDMA_SPI3_DMA_CHANNEL (2)
|
||||
#define SOC_GDMA_SHA_DMA_CHANNEL (3)
|
||||
|
37
components/soc/include/soc/gdma_periph.h
Normal file
37
components/soc/include/soc/gdma_periph.h
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "soc/soc_caps.h"
|
||||
#include "soc/periph_defs.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
struct {
|
||||
const periph_module_t module;
|
||||
struct {
|
||||
const int irq_id;
|
||||
} pairs[SOC_GDMA_PAIRS_PER_GROUP];
|
||||
} groups[SOC_GDMA_GROUPS];
|
||||
} gdma_signal_conn_t;
|
||||
|
||||
extern const gdma_signal_conn_t gdma_periph_signals;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
Loading…
x
Reference in New Issue
Block a user