Merge branch 'feature/async_memcpy' into 'master'

async_mcp: support async memory copy on esp32s2 and esp32s3

See merge request espressif/esp-idf!10242
This commit is contained in:
Michael (XIAO Xufeng) 2020-09-17 16:54:28 +08:00
commit 3c283b490a
30 changed files with 6952 additions and 7226 deletions

View File

@ -14,7 +14,6 @@ else()
set(srcs "cache_err_int.c"
"memprot.c"
"clk.c"
"cp_dma.c"
"crosscore_int.c"
"dport_access.c"
"hw_random.c"

View File

@ -1,235 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <sys/cdefs.h>
#include <stdatomic.h>
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
#include "esp_compiler.h"
#include "esp_intr_alloc.h"
#include "esp_heap_caps.h"
#include "esp_log.h"
#include "soc/soc_caps.h"
#include "soc/cp_dma_caps.h"
#include "hal/cp_dma_hal.h"
#include "hal/cp_dma_ll.h"
#include "cp_dma.h"
#include "soc/periph_defs.h"
static const char *TAG = "cp_dma";
#define CP_DMA_CHECK(a, msg, tag, ret, ...) \
do { \
if (unlikely(!(a))) { \
ESP_LOGE(TAG, "%s(%d): " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
ret_code = ret; \
goto tag; \
} \
} while (0)
/**
* @brief Stream is high level abstraction over descriptor.
* It combines the descriptor used by DMA and the callback function registered by user.
* The benifit is, we can converter the descriptor address into stream handle.
*/
typedef struct {
cp_dma_descriptor_t tx_desc;
cp_dma_isr_cb_t cb;
void *cb_args;
} cp_dma_out_stream_t;
typedef struct {
cp_dma_descriptor_t rx_desc;
cp_dma_isr_cb_t cb;
void *cb_args;
} cp_dma_in_stream_t;
typedef struct cp_dma_driver_context_s {
uint32_t max_out_stream;
uint32_t max_in_stream;
uint32_t flags;
cp_dma_hal_context_t hal; // HAL context
intr_handle_t intr_hdl; // interrupt handle
portMUX_TYPE spin_lock;
cp_dma_out_stream_t *out_streams; // pointer to the first out stream
cp_dma_in_stream_t *in_streams; // pointer to the first in stream
uint8_t streams[0]; // stream buffer (out streams + in streams), the size if configured by user
} cp_dma_driver_context_t;
static void cp_dma_isr_default_handler(void *arg) IRAM_ATTR;
esp_err_t cp_dma_driver_install(const cp_dma_config_t *config, cp_dma_driver_t *drv_hdl)
{
esp_err_t ret_code = ESP_OK;
cp_dma_driver_context_t *cp_dma_driver = NULL;
CP_DMA_CHECK(config, "configuration can't be null", err, ESP_ERR_INVALID_ARG);
CP_DMA_CHECK(drv_hdl, "driver handle can't be null", err, ESP_ERR_INVALID_ARG);
size_t total_malloc_size = sizeof(cp_dma_driver_context_t) + sizeof(cp_dma_out_stream_t) * config->max_out_stream + sizeof(cp_dma_in_stream_t) * config->max_in_stream;
if (config->flags & CP_DMA_FLAGS_WORK_WITH_CACHE_DISABLED) {
// to work when cache is disabled, make sure to put driver handle in DRAM
cp_dma_driver = heap_caps_calloc(1, total_malloc_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
} else {
cp_dma_driver = calloc(1, total_malloc_size);
}
CP_DMA_CHECK(cp_dma_driver, "allocate driver memory failed", err, ESP_ERR_NO_MEM);
int int_flags = 0;
if (config->flags & CP_DMA_FLAGS_WORK_WITH_CACHE_DISABLED) {
int_flags |= ESP_INTR_FLAG_IRAM; // make sure interrupt can still work when cache is disabled
}
ret_code = esp_intr_alloc(ETS_DMA_COPY_INTR_SOURCE, int_flags, cp_dma_isr_default_handler, cp_dma_driver, &cp_dma_driver->intr_hdl);
CP_DMA_CHECK(ret_code == ESP_OK, "allocate intr failed", err, ret_code);
cp_dma_driver->out_streams = (cp_dma_out_stream_t *)cp_dma_driver->streams;
cp_dma_driver->in_streams = (cp_dma_in_stream_t *)(cp_dma_driver->streams + config->max_out_stream * sizeof(cp_dma_out_stream_t));
// HAL layer has no idea about "data stream" but TX/RX descriptors
// We put all descritprs' addresses into an array, HAL driver will make it a loop during initialization
{
cp_dma_descriptor_t *tx_descriptors[config->max_out_stream];
cp_dma_descriptor_t *rx_descriptors[config->max_in_stream];
for (int i = 0; i < config->max_out_stream; i++) {
tx_descriptors[i] = &cp_dma_driver->out_streams[i].tx_desc;
}
for (int i = 0; i < config->max_in_stream; i++) {
rx_descriptors[i] = &cp_dma_driver->in_streams[i].rx_desc;
}
cp_dma_hal_init(&cp_dma_driver->hal, tx_descriptors, config->max_out_stream, rx_descriptors, config->max_in_stream);
} // limit the scope of tx_descriptors and rx_descriptors so that goto can jump after this code block
cp_dma_driver->spin_lock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
cp_dma_driver->max_in_stream = config->max_in_stream;
cp_dma_driver->max_out_stream = config->max_out_stream;
*drv_hdl = cp_dma_driver;
cp_dma_hal_start(&cp_dma_driver->hal); // enable DMA and interrupt
return ESP_OK;
err:
if (cp_dma_driver) {
if (cp_dma_driver->intr_hdl) {
esp_intr_free(cp_dma_driver->intr_hdl);
}
free(cp_dma_driver);
}
if (drv_hdl) {
*drv_hdl = NULL;
}
return ret_code;
}
esp_err_t cp_dma_driver_uninstall(cp_dma_driver_t drv_hdl)
{
esp_err_t ret_code = ESP_OK;
CP_DMA_CHECK(drv_hdl, "driver handle can't be null", err, ESP_ERR_INVALID_ARG);
esp_intr_free(drv_hdl->intr_hdl);
cp_dma_hal_stop(&drv_hdl->hal);
cp_dma_hal_deinit(&drv_hdl->hal);
free(drv_hdl);
return ESP_OK;
err:
return ret_code;
}
esp_err_t cp_dma_memcpy(cp_dma_driver_t drv_hdl, void *dst, void *src, size_t n, cp_dma_isr_cb_t cb_isr, void *cb_args)
{
esp_err_t ret_code = ESP_OK;
cp_dma_descriptor_t *rx_start_desc = NULL;
cp_dma_descriptor_t *rx_end_desc = NULL;
cp_dma_descriptor_t *tx_start_desc = NULL;
cp_dma_descriptor_t *tx_end_desc = NULL;
int rx_prepared_size = 0;
int tx_prepared_size = 0;
CP_DMA_CHECK(drv_hdl, "driver handle can't be null", err, ESP_ERR_INVALID_ARG);
// CP_DMA can only access SRAM
CP_DMA_CHECK(esp_ptr_internal(src) && esp_ptr_internal(dst), "address not in SRAM", err, ESP_ERR_INVALID_ARG);
CP_DMA_CHECK(n <= SOC_CP_DMA_MAX_BUFFER_SIZE * drv_hdl->max_out_stream, "exceed max num of tx stream", err, ESP_ERR_INVALID_ARG);
CP_DMA_CHECK(n <= SOC_CP_DMA_MAX_BUFFER_SIZE * drv_hdl->max_in_stream, "exceed max num of rx stream", err, ESP_ERR_INVALID_ARG);
if (cb_isr && (drv_hdl->flags & CP_DMA_FLAGS_WORK_WITH_CACHE_DISABLED)) {
CP_DMA_CHECK(esp_ptr_in_iram(cb_isr), "callback(%p) not in IRAM", err, ESP_ERR_INVALID_ARG, cb_isr);
}
// Prepare TX and RX descriptor
portENTER_CRITICAL_SAFE(&drv_hdl->spin_lock);
// prepare functions will not change internal status of HAL until cp_dma_hal_restart_* are called
rx_prepared_size = cp_dma_hal_prepare_receive(&drv_hdl->hal, dst, n, &rx_start_desc, &rx_end_desc);
tx_prepared_size = cp_dma_hal_prepare_transmit(&drv_hdl->hal, src, n, &tx_start_desc, &tx_end_desc);
if ((rx_prepared_size == n) && (tx_prepared_size == n)) {
// register user callback to the end-of-frame descriptor (must before we restart RX)
cp_dma_in_stream_t *data_stream_rx = __containerof(rx_end_desc, cp_dma_in_stream_t, rx_desc);
data_stream_rx->cb = cb_isr;
data_stream_rx->cb_args = cb_args;
// The restart should be called with the exact returned start and end desc from previous successful prepare calls
cp_dma_hal_restart_rx(&drv_hdl->hal, rx_start_desc, rx_end_desc);
cp_dma_hal_restart_tx(&drv_hdl->hal, tx_start_desc, tx_end_desc);
}
portEXIT_CRITICAL_SAFE(&drv_hdl->spin_lock);
CP_DMA_CHECK(rx_prepared_size == n, "out of rx descriptor", err, ESP_FAIL);
// It's unlikely that we have space for rx descriptor but no space for tx descriptor
// Because in CP_DMA, both tx and rx descriptor should move in the same pace
CP_DMA_CHECK(tx_prepared_size == n, "out of tx descriptor", err, ESP_FAIL);
return ESP_OK;
err:
return ret_code;
}
/**
* @brief Default ISR handler provided by ESP-IDF
*/
static void cp_dma_isr_default_handler(void *args)
{
cp_dma_driver_context_t *cp_dma_driver = (cp_dma_driver_context_t *)args;
cp_dma_in_stream_t *in_stream = NULL;
cp_dma_descriptor_t *next_desc = NULL;
bool need_yield = false;
bool to_continue = false;
portENTER_CRITICAL_ISR(&cp_dma_driver->spin_lock);
uint32_t status = cp_dma_hal_get_intr_status(&cp_dma_driver->hal);
cp_dma_hal_clear_intr_status(&cp_dma_driver->hal, status);
portEXIT_CRITICAL_ISR(&cp_dma_driver->spin_lock);
ESP_EARLY_LOGD(TAG, "intr status=0x%x", status);
// End-Of-Frame on RX side
if (status & CP_DMA_LL_EVENT_RX_EOF) {
cp_dma_descriptor_t *eof = (cp_dma_descriptor_t *)cp_dma_ll_get_rx_eof_descriptor_address(cp_dma_driver->hal.dev);
// traversal all unchecked descriptors
do {
portENTER_CRITICAL_ISR(&cp_dma_driver->spin_lock);
// There is an assumption that the usage of rx descriptors are in the same pace as tx descriptors (this is determined by CP DMA working mechanism)
// And once the rx descriptor is recycled, the corresponding tx desc is guaranteed to be returned by DMA
to_continue = cp_dma_hal_get_next_rx_descriptor(&cp_dma_driver->hal, eof, &next_desc);
portEXIT_CRITICAL_ISR(&cp_dma_driver->spin_lock);
if (next_desc) {
in_stream = __containerof(next_desc, cp_dma_in_stream_t, rx_desc);
// invoke user registered callback if available
if (in_stream->cb) {
cp_dma_event_t e = {.id = CP_DMA_EVENT_M2M_DONE};
if (in_stream->cb(cp_dma_driver, &e, in_stream->cb_args)) {
need_yield = true;
}
in_stream->cb = NULL;
in_stream->cb_args = NULL;
}
}
} while (to_continue);
}
if (need_yield) {
portYIELD_FROM_ISR();
}
}

View File

@ -1,126 +0,0 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
#include "esp_err.h"
/**
* @brief Handle of CP_DMA driver
*
*/
typedef struct cp_dma_driver_context_s *cp_dma_driver_t;
/**
* @brief CP_DMA event ID
*
*/
typedef enum {
CP_DMA_EVENT_M2M_DONE, /*!< One or more memory copy transactions are done */
} cp_dma_event_id_t;
/**
* @brief Type defined for CP_DMA event object (including event ID, event data)
*
*/
typedef struct {
cp_dma_event_id_t id; /*!< Event ID */
void *data; /*!< Event data */
} cp_dma_event_t;
/**
* @brief Type defined for cp_dma ISR callback function
*
* @param drv_hdl Handle of CP_DMA driver
* @param event Event object, which contains the event ID, event data, and so on
* @param cb_args User defined arguments for the callback function. It's passed in cp_dma_memcpy function
* @return Whether a high priority task is woken up by the callback function
*
*/
typedef bool (*cp_dma_isr_cb_t)(cp_dma_driver_t drv_hdl, cp_dma_event_t *event, void *cb_args);
/**
* @brief Type defined for configuration of CP_DMA driver
*
*/
typedef struct {
uint32_t max_out_stream; /*!< maximum number of out link streams that can work simultaneously */
uint32_t max_in_stream; /*!< maximum number of in link streams that can work simultaneously */
uint32_t flags; /*!< Extra flags to control some special behaviour of CP_DMA, OR'ed of CP_DMA_FLAGS_xxx macros */
} cp_dma_config_t;
#define CP_DMA_FLAGS_WORK_WITH_CACHE_DISABLED (1 << 0) /*!< CP_DMA can work even when cache is diabled */
/**
* @brief Default configuration for CP_DMA driver
*
*/
#define CP_DMA_DEFAULT_CONFIG() \
{ \
.max_out_stream = 8, \
.max_in_stream = 8, \
.flags = 0, \
}
/**
* @brief Install CP_DMA driver
*
* @param[in] config Configuration of CP_DMA driver
* @param[out] drv_hdl Returned handle of CP_DMA driver or NULL if driver installation failed
* @return
* - ESP_OK: Install CP_DMA driver successfully
* - ESP_ERR_INVALID_ARG: Install CP_DMA driver failed because of some invalid argument
* - ESP_ERR_NO_MEM: Install CP_DMA driver failed because there's no enough capable memory
* - ESP_FAIL: Install CP_DMA driver failed because of other error
*/
esp_err_t cp_dma_driver_install(const cp_dma_config_t *config, cp_dma_driver_t *drv_hdl);
/**
* @brief Uninstall CP_DMA driver
*
* @param[in] drv_hdl Handle of CP_DMA driver that returned from cp_dma_driver_install
* @return
* - ESP_OK: Uninstall CP_DMA driver successfully
* - ESP_ERR_INVALID_ARG: Uninstall CP_DMA driver failed because of some invalid argument
* - ESP_FAIL: Uninstall CP_DMA driver failed because of other error
*/
esp_err_t cp_dma_driver_uninstall(cp_dma_driver_t drv_hdl);
/**
* @brief Send an asynchronous memory copy request
*
* @param[in] drv_hdl Handle of CP_DMA driver that returned from cp_dma_driver_install
* @param[in] dst Destination address (copy to)
* @param[in] src Source address (copy from)
* @param[in] n Number of bytes to copy
* @param[in] cb_isr Callback function, which got invoked in ISR context. A NULL pointer here can bypass the callback.
* @param[in] cb_args User defined argument to be passed to the callback function
* @return
* - ESP_OK: Send memcopy request successfully
* - ESP_ERR_INVALID_ARG: Send memcopy request failed because of some invalid argument
* - ESP_FAIL: Send memcopy request failed because of other error
*
* @note The callback function is invoked in ISR context, please never handle heavy load in the callback.
* The default ISR handler is placed in IRAM, please place callback function in IRAM as well by applying IRAM_ATTR to it.
*/
esp_err_t cp_dma_memcpy(cp_dma_driver_t drv_hdl, void *dst, void *src, size_t n, cp_dma_isr_cb_t cb_isr, void *cb_args);
#ifdef __cplusplus
}
#endif

View File

@ -26,6 +26,7 @@ PROVIDE ( GPSPI3 = 0x60025000 );
PROVIDE ( SYSCON = 0x60026000 );
PROVIDE ( I2C1 = 0x60027000 );
PROVIDE ( GPSPI4 = 0x60037000 );
PROVIDE ( GDMA = 0x6003F000 );
PROVIDE ( UART2 = 0x60010000 );
PROVIDE ( APB_SARADC = 0x60040000 );
PROVIDE ( LCD_CAM = 0x60041000 );

View File

@ -1,4 +1,4 @@
idf_component_register(SRCS "panic.c" "system_api.c" "startup.c" "sleep_modes.c" "system_time.c"
idf_component_register(SRCS "esp_async_memcpy.c" "panic.c" "system_api.c" "startup.c" "sleep_modes.c" "system_time.c"
INCLUDE_DIRS include
PRIV_REQUIRES spi_flash app_update
# requirements due to startup code

View File

@ -0,0 +1,313 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
#include "hal/dma_types.h"
#include "esp_compiler.h"
#include "esp_heap_caps.h"
#include "esp_log.h"
#include "esp_async_memcpy.h"
#include "esp_async_memcpy_impl.h"
static const char *TAG = "async_memcpy";
#define ASMCP_CHECK(a, msg, tag, ret, ...) \
do \
{ \
if (unlikely(!(a))) \
{ \
ESP_LOGE(TAG, "%s(%d): " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
ret_code = ret; \
goto tag; \
} \
} while (0)
/**
* @brief Type of async mcp stream
* mcp stream inherits DMA descriptor, besides that, it has a callback function member
*/
typedef struct {
dma_descriptor_t desc;
async_memcpy_isr_cb_t cb;
void *cb_args;
} async_memcpy_stream_t;
/**
* @brief Type of async mcp driver context
*/
typedef struct async_memcpy_context_t {
async_memcpy_impl_t mcp_impl; // implementation layer
intr_handle_t intr_hdl; // interrupt handle
uint32_t flags; // extra driver flags
dma_descriptor_t *tx_desc; // pointer to the next free TX descriptor
dma_descriptor_t *rx_desc; // pointer to the next free RX descriptor
dma_descriptor_t *next_rx_desc_to_check; // pointer to the next RX descriptor to recycle
uint32_t max_stream_num; // maximum number of streams
async_memcpy_stream_t *out_streams; // pointer to the first TX stream
async_memcpy_stream_t *in_streams; // pointer to the first RX stream
async_memcpy_stream_t streams_pool[0]; // stream pool (TX + RX), the size is configured during driver installation
} async_memcpy_context_t;
esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_memcpy_t *asmcp)
{
esp_err_t ret_code = ESP_OK;
async_memcpy_context_t *mcp_hdl = NULL;
ASMCP_CHECK(config, "configuration can't be null", err, ESP_ERR_INVALID_ARG);
ASMCP_CHECK(asmcp, "can't assign mcp handle to null", err, ESP_ERR_INVALID_ARG);
// context memory size + stream pool size
size_t total_malloc_size = sizeof(async_memcpy_context_t) + sizeof(async_memcpy_stream_t) * config->backlog * 2;
// to work when cache is disabled, the driver handle should located in SRAM
mcp_hdl = heap_caps_calloc(1, total_malloc_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ASMCP_CHECK(mcp_hdl, "allocate context memory failed", err, ESP_ERR_NO_MEM);
int int_flags = ESP_INTR_FLAG_IRAM; // interrupt can still work when cache is disabled
// allocate interrupt handle, it's target dependent
ret_code = async_memcpy_impl_allocate_intr(&mcp_hdl->mcp_impl, int_flags, &mcp_hdl->intr_hdl);
ASMCP_CHECK(ret_code == ESP_OK, "allocate interrupt handle failed", err, ret_code);
mcp_hdl->flags = config->flags;
mcp_hdl->out_streams = mcp_hdl->streams_pool;
mcp_hdl->in_streams = mcp_hdl->streams_pool + config->backlog;
mcp_hdl->max_stream_num = config->backlog;
// circle TX/RX descriptors
for (int i = 0; i < mcp_hdl->max_stream_num; i++) {
mcp_hdl->out_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
mcp_hdl->out_streams[i].desc.next = &mcp_hdl->out_streams[i + 1].desc;
mcp_hdl->in_streams[i].desc.dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
mcp_hdl->in_streams[i].desc.next = &mcp_hdl->in_streams[i + 1].desc;
}
mcp_hdl->out_streams[mcp_hdl->max_stream_num - 1].desc.next = &mcp_hdl->out_streams[0].desc;
mcp_hdl->in_streams[mcp_hdl->max_stream_num - 1].desc.next = &mcp_hdl->in_streams[0].desc;
mcp_hdl->tx_desc = &mcp_hdl->out_streams[0].desc;
mcp_hdl->rx_desc = &mcp_hdl->in_streams[0].desc;
mcp_hdl->next_rx_desc_to_check = &mcp_hdl->in_streams[0].desc;
// initialize implementation layer
async_memcpy_impl_init(&mcp_hdl->mcp_impl, &mcp_hdl->out_streams[0].desc, &mcp_hdl->in_streams[0].desc);
*asmcp = mcp_hdl;
async_memcpy_impl_start(&mcp_hdl->mcp_impl);
return ESP_OK;
err:
if (mcp_hdl) {
if (mcp_hdl->intr_hdl) {
esp_intr_free(mcp_hdl->intr_hdl);
}
free(mcp_hdl);
}
if (asmcp) {
*asmcp = NULL;
}
return ret_code;
}
esp_err_t esp_async_memcpy_uninstall(async_memcpy_t asmcp)
{
esp_err_t ret_code = ESP_OK;
ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG);
esp_intr_free(asmcp->intr_hdl);
async_memcpy_impl_stop(&asmcp->mcp_impl);
async_memcpy_impl_deinit(&asmcp->mcp_impl);
free(asmcp);
return ESP_OK;
err:
return ret_code;
}
static int async_memcpy_prepare_receive(async_memcpy_t asmcp, void *buffer, size_t size, dma_descriptor_t **start_desc, dma_descriptor_t **end_desc)
{
uint32_t prepared_length = 0;
uint8_t *buf = (uint8_t *)buffer;
dma_descriptor_t *desc = asmcp->rx_desc; // descriptor iterator
dma_descriptor_t *start = desc;
dma_descriptor_t *end = desc;
while (size > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
desc->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
desc->buffer = &buf[prepared_length];
desc = desc->next; // move to next descriptor
prepared_length += DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
size -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
} else {
// out of RX descriptors
goto _exit;
}
}
if (size) {
if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
end = desc; // the last descriptor used
desc->dw0.size = size;
desc->buffer = &buf[prepared_length];
desc = desc->next; // move to next descriptor
prepared_length += size;
} else {
// out of RX descriptors
goto _exit;
}
}
_exit:
*start_desc = start;
*end_desc = end;
return prepared_length;
}
static int async_memcpy_prepare_transmit(async_memcpy_t asmcp, void *buffer, size_t len, dma_descriptor_t **start_desc, dma_descriptor_t **end_desc)
{
uint32_t prepared_length = 0;
uint8_t *buf = (uint8_t *)buffer;
dma_descriptor_t *desc = asmcp->tx_desc; // descriptor iterator
dma_descriptor_t *start = desc;
dma_descriptor_t *end = desc;
while (len > DMA_DESCRIPTOR_BUFFER_MAX_SIZE) {
if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
desc->dw0.suc_eof = 0; // not the end of the transaction
desc->dw0.size = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
desc->dw0.length = DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
desc->buffer = &buf[prepared_length];
desc = desc->next; // move to next descriptor
prepared_length += DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
len -= DMA_DESCRIPTOR_BUFFER_MAX_SIZE;
} else {
// out of TX descriptors
goto _exit;
}
}
if (len) {
if (desc->dw0.owner != DMA_DESCRIPTOR_BUFFER_OWNER_DMA) {
end = desc; // the last descriptor used
desc->dw0.suc_eof = 1; // end of the transaction
desc->dw0.size = len;
desc->dw0.length = len;
desc->buffer = &buf[prepared_length];
desc = desc->next; // move to next descriptor
prepared_length += len;
} else {
// out of TX descriptors
goto _exit;
}
}
*start_desc = start;
*end_desc = end;
_exit:
return prepared_length;
}
static bool async_memcpy_get_next_rx_descriptor(async_memcpy_t asmcp, dma_descriptor_t *eof_desc, dma_descriptor_t **next_desc)
{
dma_descriptor_t *next = asmcp->next_rx_desc_to_check;
// additional check, to avoid potential interrupt got triggered by mistake
if (next->dw0.owner == DMA_DESCRIPTOR_BUFFER_OWNER_CPU) {
asmcp->next_rx_desc_to_check = asmcp->next_rx_desc_to_check->next;
*next_desc = next;
// return if we need to continue
return eof_desc == next ? false : true;
}
*next_desc = NULL;
return false;
}
esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n, async_memcpy_isr_cb_t cb_isr, void *cb_args)
{
esp_err_t ret_code = ESP_OK;
dma_descriptor_t *rx_start_desc = NULL;
dma_descriptor_t *rx_end_desc = NULL;
dma_descriptor_t *tx_start_desc = NULL;
dma_descriptor_t *tx_end_desc = NULL;
int rx_prepared_size = 0;
int tx_prepared_size = 0;
ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG);
ASMCP_CHECK(async_memcpy_impl_is_buffer_address_valid(&asmcp->mcp_impl, src, dst), "buffer address not valid", err, ESP_ERR_INVALID_ARG);
ASMCP_CHECK(n <= DMA_DESCRIPTOR_BUFFER_MAX_SIZE * asmcp->max_stream_num, "buffer size too large", err, ESP_ERR_INVALID_ARG);
// Prepare TX and RX descriptor
portENTER_CRITICAL_SAFE(&asmcp->mcp_impl.hal_lock);
rx_prepared_size = async_memcpy_prepare_receive(asmcp, dst, n, &rx_start_desc, &rx_end_desc);
tx_prepared_size = async_memcpy_prepare_transmit(asmcp, src, n, &tx_start_desc, &tx_end_desc);
if ((rx_prepared_size == n) && (tx_prepared_size == n)) {
// register user callback to the last descriptor
async_memcpy_stream_t *mcp_stream = __containerof(rx_end_desc, async_memcpy_stream_t, desc);
mcp_stream->cb = cb_isr;
mcp_stream->cb_args = cb_args;
// restart RX firstly
dma_descriptor_t *desc = rx_start_desc;
while (desc != rx_end_desc) {
desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
desc = desc->next;
}
desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
asmcp->rx_desc = desc->next;
// restart TX secondly
desc = tx_start_desc;
while (desc != tx_end_desc) {
desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
desc = desc->next;
}
desc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
asmcp->tx_desc = desc->next;
async_memcpy_impl_restart(&asmcp->mcp_impl);
}
portEXIT_CRITICAL_SAFE(&asmcp->mcp_impl.hal_lock);
// It's unlikely that we have space for rx descriptor but no space for tx descriptor
// Both tx and rx descriptor should move in the same pace
ASMCP_CHECK(rx_prepared_size == n, "out of rx descriptor", err, ESP_FAIL);
ASMCP_CHECK(tx_prepared_size == n, "out of tx descriptor", err, ESP_FAIL);
return ESP_OK;
err:
return ret_code;
}
IRAM_ATTR void async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t *impl)
{
bool to_continue = false;
async_memcpy_stream_t *in_stream = NULL;
dma_descriptor_t *next_desc = NULL;
async_memcpy_context_t *asmcp = __containerof(impl, async_memcpy_context_t, mcp_impl);
// get the RX eof descriptor address
dma_descriptor_t *eof = async_memcpy_impl_get_rx_suc_eof_descriptor(impl);
// traversal all unchecked descriptors
do {
portENTER_CRITICAL_ISR(&impl->hal_lock);
// There is an assumption that the usage of rx descriptors are in the same pace as tx descriptors (this is determined by M2M DMA working mechanism)
// And once the rx descriptor is recycled, the corresponding tx desc is guaranteed to be returned by DMA
to_continue = async_memcpy_get_next_rx_descriptor(asmcp, eof, &next_desc);
portEXIT_CRITICAL_ISR(&impl->hal_lock);
if (next_desc) {
in_stream = __containerof(next_desc, async_memcpy_stream_t, desc);
// invoke user registered callback if available
if (in_stream->cb) {
async_memcpy_event_t e = {0};
if (in_stream->cb(asmcp, &e, in_stream->cb_args)) {
impl->isr_need_yield = true;
}
in_stream->cb = NULL;
in_stream->cb_args = NULL;
}
}
} while (to_continue);
}

View File

@ -0,0 +1,115 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stdbool.h>
#include "esp_err.h"
/**
* @brief Type of async memcpy handle
*
*/
typedef struct async_memcpy_context_t *async_memcpy_t;
/**
* @brief Type of async memcpy event object
*
*/
typedef struct {
void *data; /*!< Event data */
} async_memcpy_event_t;
/**
* @brief Type of async memcpy interrupt callback function
*
* @param mcp_hdl Handle of async memcpy
* @param event Event object, which contains related data, reserved for future
* @param cb_args User defined arguments, passed from esp_async_memcpy function
* @return Whether a high priority task is woken up by the callback function
*
* @note User can call OS primitives (semaphore, mutex, etc) in the callback function.
* Keep in mind, if any OS primitive wakes high priority task up, the callback should return true.
*/
typedef bool (*async_memcpy_isr_cb_t)(async_memcpy_t mcp_hdl, async_memcpy_event_t *event, void *cb_args);
/**
* @brief Type of async memcpy configuration
*
*/
typedef struct {
uint32_t backlog; /*!< Maximum number of streams that can be handled simultaneously */
uint32_t flags; /*!< Extra flags to control async memcpy feature */
} async_memcpy_config_t;
/**
* @brief Default configuration for async memcpy
*
*/
#define ASYNC_MEMCPY_DEFAULT_CONFIG() \
{ \
.backlog = 8, \
.flags = 0, \
}
/**
* @brief Install async memcpy driver
*
* @param[in] config Configuration of async memcpy
* @param[out] asmcp Handle of async memcpy that returned from this API. If driver installation is failed, asmcp would be assigned to NULL.
* @return
* - ESP_OK: Install async memcpy driver successfully
* - ESP_ERR_INVALID_ARG: Install async memcpy driver failed because of invalid argument
* - ESP_ERR_NO_MEM: Install async memcpy driver failed because out of memory
* - ESP_FAIL: Install async memcpy driver failed because of other error
*/
esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_memcpy_t *asmcp);
/**
* @brief Uninstall async memcpy driver
*
* @param[in] asmcp Handle of async memcpy driver that returned from esp_async_memcpy_install
* @return
* - ESP_OK: Uninstall async memcpy driver successfully
* - ESP_ERR_INVALID_ARG: Uninstall async memcpy driver failed because of invalid argument
* - ESP_FAIL: Uninstall async memcpy driver failed because of other error
*/
esp_err_t esp_async_memcpy_uninstall(async_memcpy_t asmcp);
/**
* @brief Send an asynchronous memory copy request
*
* @param[in] asmcp Handle of async memcpy driver that returned from esp_async_memcpy_install
* @param[in] dst Destination address (copy to)
* @param[in] src Source address (copy from)
* @param[in] n Number of bytes to copy
* @param[in] cb_isr Callback function, which got invoked in interrupt context. Set to NULL can bypass the callback.
* @param[in] cb_args User defined argument to be passed to the callback function
* @return
* - ESP_OK: Send memory copy request successfully
* - ESP_ERR_INVALID_ARG: Send memory copy request failed because of invalid argument
* - ESP_FAIL: Send memory copy request failed because of other error
*
* @note The callback function is invoked in interrupt context, never do blocking jobs in the callback.
*/
esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n, async_memcpy_isr_cb_t cb_isr, void *cb_args);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,107 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "freertos/FreeRTOS.h"
#include "soc/periph_defs.h"
#include "soc/soc_memory_layout.h"
#include "soc/soc_caps.h"
#include "hal/gdma_ll.h"
#include "hal/gdma_hal.h"
#include "driver/periph_ctrl.h"
#include "esp_log.h"
#include "esp_attr.h"
#include "esp_err.h"
#include "esp_async_memcpy_impl.h"
IRAM_ATTR static void async_memcpy_impl_default_isr_handler(void *args)
{
async_memcpy_impl_t *mcp_impl = (async_memcpy_impl_t *)args;
portENTER_CRITICAL_ISR(&mcp_impl->hal_lock);
uint32_t status = gdma_ll_get_interrupt_status(mcp_impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
gdma_ll_clear_interrupt_status(mcp_impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, status);
portEXIT_CRITICAL_ISR(&mcp_impl->hal_lock);
// End-Of-Frame on RX side
if (status & GDMA_LL_EVENT_RX_SUC_EOF) {
async_memcpy_isr_on_rx_done_event(mcp_impl);
}
if (mcp_impl->isr_need_yield) {
mcp_impl->isr_need_yield = false;
portYIELD_FROM_ISR();
}
}
esp_err_t async_memcpy_impl_allocate_intr(async_memcpy_impl_t *impl, int int_flags, intr_handle_t *intr)
{
return esp_intr_alloc(ETS_DMA_CH0_INTR_SOURCE, int_flags, async_memcpy_impl_default_isr_handler, impl, intr);
}
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl, dma_descriptor_t *outlink_base, dma_descriptor_t *inlink_base)
{
impl->hal_lock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
impl->hal.dev = &GDMA;
periph_module_enable(PERIPH_GDMA_MODULE);
gdma_ll_enable_clock(impl->hal.dev, true);
gdma_ll_tx_reset_channel(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
gdma_ll_rx_reset_channel(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
gdma_ll_enable_interrupt(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, UINT32_MAX, true);
gdma_ll_clear_interrupt_status(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, UINT32_MAX);
gdma_ll_enable_m2m_mode(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
gdma_ll_tx_enable_auto_write_back(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
gdma_ll_enable_owner_check(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
gdma_ll_tx_set_desc_addr(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, (uint32_t)outlink_base);
gdma_ll_rx_set_desc_addr(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, (uint32_t)inlink_base);
return ESP_OK;
}
esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl)
{
periph_module_disable(PERIPH_GDMA_MODULE);
return ESP_OK;
}
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl)
{
gdma_ll_rx_start(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
gdma_ll_tx_start(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
gdma_ll_enable_interrupt(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, GDMA_LL_EVENT_RX_SUC_EOF, true);
return ESP_OK;
}
esp_err_t async_memcpy_impl_stop(async_memcpy_impl_t *impl)
{
gdma_ll_enable_interrupt(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, GDMA_LL_EVENT_RX_SUC_EOF, false);
gdma_ll_rx_stop(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
gdma_ll_tx_stop(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
return ESP_OK;
}
esp_err_t async_memcpy_impl_restart(async_memcpy_impl_t *impl)
{
gdma_ll_rx_restart(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
gdma_ll_tx_restart(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
return ESP_OK;
}
bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *src, void *dst)
{
return true;
}
dma_descriptor_t *async_memcpy_impl_get_rx_suc_eof_descriptor(async_memcpy_impl_t *impl)
{
return (dma_descriptor_t *)gdma_ll_rx_get_success_eof_desc_addr(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
}

View File

@ -1 +1,2 @@
COMPONENT_SRCDIRS += port port/esp32
COMPONENT_OBJEXCLUDE += port/async_memcpy_impl_gdma.o

View File

@ -1,4 +1,4 @@
set(srcs "dport_panic_highint_hdl.S" "clk.c" "reset_reason.c")
set(srcs "async_memcpy_impl_cp_dma.c" "dport_panic_highint_hdl.S" "clk.c" "reset_reason.c")
add_prefix(srcs "${CMAKE_CURRENT_LIST_DIR}/" ${srcs})
target_sources(${COMPONENT_LIB} PRIVATE ${srcs})

View File

@ -0,0 +1,95 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "freertos/FreeRTOS.h"
#include "soc/periph_defs.h"
#include "soc/soc_memory_layout.h"
#include "hal/cp_dma_hal.h"
#include "hal/cp_dma_ll.h"
#include "esp_log.h"
#include "esp_attr.h"
#include "esp_err.h"
#include "esp_async_memcpy_impl.h"
IRAM_ATTR static void async_memcpy_impl_default_isr_handler(void *args)
{
async_memcpy_impl_t *mcp_impl = (async_memcpy_impl_t *)args;
portENTER_CRITICAL_ISR(&mcp_impl->hal_lock);
uint32_t status = cp_dma_hal_get_intr_status(&mcp_impl->hal);
cp_dma_hal_clear_intr_status(&mcp_impl->hal, status);
portEXIT_CRITICAL_ISR(&mcp_impl->hal_lock);
// End-Of-Frame on RX side
if (status & CP_DMA_LL_EVENT_RX_EOF) {
async_memcpy_isr_on_rx_done_event(mcp_impl);
}
if (mcp_impl->isr_need_yield) {
mcp_impl->isr_need_yield = false;
portYIELD_FROM_ISR();
}
}
esp_err_t async_memcpy_impl_allocate_intr(async_memcpy_impl_t *impl, int int_flags, intr_handle_t *intr)
{
return esp_intr_alloc(ETS_DMA_COPY_INTR_SOURCE, int_flags, async_memcpy_impl_default_isr_handler, impl, intr);
}
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl, dma_descriptor_t *outlink_base, dma_descriptor_t *inlink_base)
{
impl->hal_lock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
cp_dma_hal_config_t config = {
.inlink_base = inlink_base,
.outlink_base = outlink_base
};
cp_dma_hal_init(&impl->hal, &config);
return ESP_OK;
}
esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl)
{
cp_dma_hal_deinit(&impl->hal);
return ESP_OK;
}
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl)
{
cp_dma_hal_start(&impl->hal); // enable DMA and interrupt
return ESP_OK;
}
esp_err_t async_memcpy_impl_stop(async_memcpy_impl_t *impl)
{
cp_dma_hal_stop(&impl->hal); // disable DMA and interrupt
return ESP_OK;
}
esp_err_t async_memcpy_impl_restart(async_memcpy_impl_t *impl)
{
cp_dma_hal_restart_rx(&impl->hal);
cp_dma_hal_restart_tx(&impl->hal);
return ESP_OK;
}
bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *src, void *dst)
{
// CP_DMA can only access SRAM
return esp_ptr_internal(src) && esp_ptr_internal(dst);
}
dma_descriptor_t *async_memcpy_impl_get_rx_suc_eof_descriptor(async_memcpy_impl_t *impl)
{
return (dma_descriptor_t *)cp_dma_ll_get_rx_eof_descriptor_address(impl->hal.dev);
}

View File

@ -1,4 +1,4 @@
set(srcs "dport_panic_highint_hdl.S")
set(srcs "../async_memcpy_impl_gdma.c" "dport_panic_highint_hdl.S" "clk.c" "reset_reason.c")
add_prefix(srcs "${CMAKE_CURRENT_LIST_DIR}/" ${srcs})
#ld_include_panic_highint_hdl is added as an undefined symbol because otherwise the

View File

@ -0,0 +1,124 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdint.h>
#include <stdbool.h>
#include "esp_err.h"
#include "esp_intr_alloc.h"
#include "soc/soc_caps.h"
#include "hal/dma_types.h"
#include "freertos/FreeRTOS.h"
#if SOC_CP_DMA_SUPPORTED
#include "hal/cp_dma_ll.h"
#include "hal/cp_dma_hal.h"
#elif SOC_GDMA_SUPPORTED
#include "hal/gdma_ll.h"
#include "hal/gdma_hal.h"
#endif
/**
* @brief Type of async mcp implementation layer context
*
*/
typedef struct {
portMUX_TYPE hal_lock; // spin lock for HAL object
#if SOC_CP_DMA_SUPPORTED
cp_dma_hal_context_t hal; // CP DMA hal
#elif SOC_GDMA_SUPPORTED
gdma_hal_context_t hal; // General DMA hal
#endif
bool isr_need_yield; // if current isr needs a yield for higher priority task
} async_memcpy_impl_t;
/**
* @brief ISR callback function, invoked when RX done event triggered
*
* @param impl async mcp implementation layer context pointer
*/
void async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t *impl);
/**
* @brief Allocate interrupt handle, register default isr handler
*
* @param impl async mcp implementation layer context pointer
* @param int_flags interrupt flags
* @param intr Returned interrupt handle
* @return
* - ESP_OK: Allocate interrupt handle successfully
* - ESP_ERR_INVALID_ARG: Allocate interrupt handle failed because of invalid argument
* - ESP_FAIL: Allocate interrupt handle failed because of other error
*/
esp_err_t async_memcpy_impl_allocate_intr(async_memcpy_impl_t *impl, int int_flags, intr_handle_t *intr);
/**
* @brief Initialize async mcp implementation layer
*
* @param impl async mcp implementation layer context pointer
* @param outlink_base Pointer to the first TX descriptor
* @param inlink_base Pointer to the first RX descriptor
* @return Always return ESP_OK
*/
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl, dma_descriptor_t *outlink_base, dma_descriptor_t *inlink_base);
/**
* @brief Deinitialize async mcp implementation layer
*
* @param impl async mcp implementation layer context pointer
* @return Always return ESP_OK
*/
esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl);
/**
* @brief Start async mcp (on implementation layer)
*
* @param impl async mcp implementation layer context pointer
* @return Always return ESP_OK
*/
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl);
/**
* @brief Stop async mcp (on implementation layer)
*
* @param impl async mcp implementation layer context pointer
* @return Always return ESP_OK
*/
esp_err_t async_memcpy_impl_stop(async_memcpy_impl_t *impl);
/**
* @brief Restart async mcp DMA engine
*
* @param impl async mcp implementation layer context pointer
* @return Always return ESP_OK
*/
esp_err_t async_memcpy_impl_restart(async_memcpy_impl_t *impl);
/**
* @brief check if buffer address is valid
* @note This is related to underlying target (e.g. on esp32-s2, only buffer located in SRAM is supported)
*
* @param impl async mcp implementation layer context pointer
* @param src Source buffer address
* @param dst Destination buffer address
* @return True if both address are valid
*/
bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *src, void *dst);
/**
* @brief Get the EOF RX descriptor address
*
* @param impl async mcp implementation layer context pointer
* @return Pointer to the EOF RX descriptor
*/
dma_descriptor_t *async_memcpy_impl_get_rx_suc_eof_descriptor(async_memcpy_impl_t *impl);

View File

@ -10,12 +10,14 @@
#include "unity.h"
#include "test_utils.h"
#include "ccomp_timer.h"
#include "soc/cp_dma_caps.h"
#include "cp_dma.h"
#include "esp_async_memcpy.h"
#include "soc/soc_caps.h"
#if SOC_CP_DMA_SUPPORTED || SOC_GDMA_SUPPORTED
#define ALIGN_UP(addr, align) (((addr) + (align)-1) & ~((align)-1))
static void cp_dma_setup_testbench(uint32_t seed, uint32_t *buffer_size, uint8_t **src_buf, uint8_t **dst_buf, uint8_t **from_addr, uint8_t **to_addr, uint32_t align)
static void async_memcpy_setup_testbench(uint32_t seed, uint32_t *buffer_size, uint8_t **src_buf, uint8_t **dst_buf, uint8_t **from_addr, uint8_t **to_addr, uint32_t align)
{
srand(seed);
printf("allocating memory buffer...\r\n");
@ -43,7 +45,7 @@ static void cp_dma_setup_testbench(uint32_t seed, uint32_t *buffer_size, uint8_t
}
}
static void cp_dma_verify_and_clear_testbench(uint32_t seed, uint32_t buffer_size, uint8_t *src_buf, uint8_t *dst_buf, uint8_t *from_addr, uint8_t *to_addr)
static void async_memcpy_verify_and_clear_testbench(uint32_t seed, uint32_t buffer_size, uint8_t *src_buf, uint8_t *dst_buf, uint8_t *from_addr, uint8_t *to_addr)
{
srand(seed);
for (int i = 0; i < buffer_size; i++) {
@ -59,13 +61,12 @@ static void cp_dma_verify_and_clear_testbench(uint32_t seed, uint32_t buffer_siz
free(dst_buf);
}
TEST_CASE("memory copy by DMA one by one", "[CP_DMA]")
TEST_CASE("memory copy by DMA one by one", "[async mcp]")
{
cp_dma_config_t config = CP_DMA_DEFAULT_CONFIG();
config.max_in_stream = 4;
config.max_out_stream = 4;
cp_dma_driver_t driver = NULL;
TEST_ESP_OK(cp_dma_driver_install(&config, &driver));
async_memcpy_config_t config = ASYNC_MEMCPY_DEFAULT_CONFIG();
config.backlog = 4;
async_memcpy_t driver = NULL;
TEST_ESP_OK(esp_async_memcpy_install(&config, &driver));
uint32_t test_buffer_len[] = {256, 512, 1024, 2048, 4096, 5011};
uint8_t *sbuf = NULL;
@ -76,24 +77,22 @@ TEST_CASE("memory copy by DMA one by one", "[CP_DMA]")
for (int i = 0; i < sizeof(test_buffer_len) / sizeof(test_buffer_len[0]); i++) {
// Test different align edge
for (int align = 0; align < 4; align++) {
cp_dma_setup_testbench(i, &test_buffer_len[i], &sbuf, &dbuf, &from, &to, align);
TEST_ESP_OK(cp_dma_memcpy(driver, to, from, test_buffer_len[i], NULL, NULL));
cp_dma_verify_and_clear_testbench(i, test_buffer_len[i], sbuf, dbuf, from, to);
async_memcpy_setup_testbench(i, &test_buffer_len[i], &sbuf, &dbuf, &from, &to, align);
TEST_ESP_OK(esp_async_memcpy(driver, to, from, test_buffer_len[i], NULL, NULL));
async_memcpy_verify_and_clear_testbench(i, test_buffer_len[i], sbuf, dbuf, from, to);
vTaskDelay(pdMS_TO_TICKS(100));
}
}
TEST_ESP_OK(cp_dma_driver_uninstall(driver));
TEST_ESP_OK(esp_async_memcpy_uninstall(driver));
}
TEST_CASE("memory copy by DMA on the fly", "[CP_DMA]")
TEST_CASE("memory copy by DMA on the fly", "[async mcp]")
{
cp_dma_config_t config = CP_DMA_DEFAULT_CONFIG();
config.max_in_stream = 4;
config.max_out_stream = 4;
cp_dma_driver_t driver = NULL;
TEST_ESP_OK(cp_dma_driver_install(&config, &driver));
async_memcpy_config_t config = ASYNC_MEMCPY_DEFAULT_CONFIG();
async_memcpy_t driver = NULL;
TEST_ESP_OK(esp_async_memcpy_install(&config, &driver));
uint32_t test_buffer_len[] = {512, 1024, 2048, 4096, 5011};
uint8_t *sbufs[] = {0, 0, 0, 0, 0};
@ -103,82 +102,79 @@ TEST_CASE("memory copy by DMA on the fly", "[CP_DMA]")
// Aligned case
for (int i = 0; i < sizeof(sbufs) / sizeof(sbufs[0]); i++) {
cp_dma_setup_testbench(i, &test_buffer_len[i], &sbufs[i], &dbufs[i], &froms[i], &tos[i], 0);
async_memcpy_setup_testbench(i, &test_buffer_len[i], &sbufs[i], &dbufs[i], &froms[i], &tos[i], 0);
}
for (int i = 0; i < sizeof(test_buffer_len) / sizeof(test_buffer_len[0]); i++) {
TEST_ESP_OK(cp_dma_memcpy(driver, tos[i], froms[i], test_buffer_len[i], NULL, NULL));
TEST_ESP_OK(esp_async_memcpy(driver, tos[i], froms[i], test_buffer_len[i], NULL, NULL));
}
for (int i = 0; i < sizeof(sbufs) / sizeof(sbufs[0]); i++) {
cp_dma_verify_and_clear_testbench(i, test_buffer_len[i], sbufs[i], dbufs[i], froms[i], tos[i]);
async_memcpy_verify_and_clear_testbench(i, test_buffer_len[i], sbufs[i], dbufs[i], froms[i], tos[i]);
}
// Non-aligned case
for (int i = 0; i < sizeof(sbufs) / sizeof(sbufs[0]); i++) {
cp_dma_setup_testbench(i, &test_buffer_len[i], &sbufs[i], &dbufs[i], &froms[i], &tos[i], 3);
async_memcpy_setup_testbench(i, &test_buffer_len[i], &sbufs[i], &dbufs[i], &froms[i], &tos[i], 3);
}
for (int i = 0; i < sizeof(test_buffer_len) / sizeof(test_buffer_len[0]); i++) {
TEST_ESP_OK(cp_dma_memcpy(driver, tos[i], froms[i], test_buffer_len[i], NULL, NULL));
TEST_ESP_OK(esp_async_memcpy(driver, tos[i], froms[i], test_buffer_len[i], NULL, NULL));
}
for (int i = 0; i < sizeof(sbufs) / sizeof(sbufs[0]); i++) {
cp_dma_verify_and_clear_testbench(i, test_buffer_len[i], sbufs[i], dbufs[i], froms[i], tos[i]);
async_memcpy_verify_and_clear_testbench(i, test_buffer_len[i], sbufs[i], dbufs[i], froms[i], tos[i]);
}
TEST_ESP_OK(cp_dma_driver_uninstall(driver));
TEST_ESP_OK(esp_async_memcpy_uninstall(driver));
}
#define TEST_CP_DMA_MECP_DMAY_BENCH_COUNTS (16)
static uint32_t test_cp_dma_memcpy_bench_len = 4096;
#define TEST_ASYNC_MEMCPY_BENCH_COUNTS (16)
static uint32_t test_async_memcpy_bench_len = 4095;
static int count = 0;
static IRAM_ATTR bool test_cp_dma_memcpy_cb(cp_dma_driver_t drv_hdl, cp_dma_event_t *event, void *cb_args)
static IRAM_ATTR bool test_async_memcpy_isr_cb(async_memcpy_t mcp_hdl, async_memcpy_event_t *event, void *cb_args)
{
SemaphoreHandle_t sem = (SemaphoreHandle_t)cb_args;
BaseType_t high_task_wakeup = pdFALSE;
switch (event->id) {
case CP_DMA_EVENT_M2M_DONE:
count++;
if (count == TEST_CP_DMA_MECP_DMAY_BENCH_COUNTS) {
xSemaphoreGiveFromISR(sem, &high_task_wakeup);
}
break;
default:
break;
count++;
if (count == TEST_ASYNC_MEMCPY_BENCH_COUNTS) {
xSemaphoreGiveFromISR(sem, &high_task_wakeup);
}
return high_task_wakeup == pdTRUE;
}
TEST_CASE("memory copy by DMA with callback", "[CP_DMA][performance]")
TEST_CASE("memory copy by DMA with callback", "[async mcp]")
{
SemaphoreHandle_t sem = xSemaphoreCreateBinary();
cp_dma_config_t config = CP_DMA_DEFAULT_CONFIG();
cp_dma_driver_t driver = NULL;
TEST_ESP_OK(cp_dma_driver_install(&config, &driver));
async_memcpy_config_t config = ASYNC_MEMCPY_DEFAULT_CONFIG();
config.backlog = TEST_ASYNC_MEMCPY_BENCH_COUNTS;
async_memcpy_t driver = NULL;
TEST_ESP_OK(esp_async_memcpy_install(&config, &driver));
uint8_t *sbuf = NULL;
uint8_t *dbuf = NULL;
uint8_t *from = NULL;
uint8_t *to = NULL;
cp_dma_setup_testbench(0, &test_cp_dma_memcpy_bench_len, &sbuf, &dbuf, &from, &to, 0);
async_memcpy_setup_testbench(0, &test_async_memcpy_bench_len, &sbuf, &dbuf, &from, &to, 0);
count = 0;
ccomp_timer_start();
for (int i = 0; i < TEST_CP_DMA_MECP_DMAY_BENCH_COUNTS; i++) {
TEST_ESP_OK(cp_dma_memcpy(driver, to, from, test_cp_dma_memcpy_bench_len, test_cp_dma_memcpy_cb, sem));
for (int i = 0; i < TEST_ASYNC_MEMCPY_BENCH_COUNTS; i++) {
TEST_ESP_OK(esp_async_memcpy(driver, to, from, test_async_memcpy_bench_len, test_async_memcpy_isr_cb, sem));
}
// wait for done semaphore
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(sem, pdMS_TO_TICKS(1000)));
esp_rom_printf("memcpy %d Bytes data by HW costs %lldus\r\n", test_cp_dma_memcpy_bench_len, ccomp_timer_stop() / TEST_CP_DMA_MECP_DMAY_BENCH_COUNTS);
esp_rom_printf("memcpy %d Bytes data by HW costs %lldus\r\n", test_async_memcpy_bench_len, ccomp_timer_stop() / TEST_ASYNC_MEMCPY_BENCH_COUNTS);
ccomp_timer_start();
for (int i = 0; i < TEST_CP_DMA_MECP_DMAY_BENCH_COUNTS; i++) {
memcpy(to, from, test_cp_dma_memcpy_bench_len);
for (int i = 0; i < TEST_ASYNC_MEMCPY_BENCH_COUNTS; i++) {
memcpy(to, from, test_async_memcpy_bench_len);
}
esp_rom_printf("memcpy %d Bytes data by SW costs %lldus\r\n", test_cp_dma_memcpy_bench_len, ccomp_timer_stop() / TEST_CP_DMA_MECP_DMAY_BENCH_COUNTS);
esp_rom_printf("memcpy %d Bytes data by SW costs %lldus\r\n", test_async_memcpy_bench_len, ccomp_timer_stop() / TEST_ASYNC_MEMCPY_BENCH_COUNTS);
cp_dma_verify_and_clear_testbench(0, test_cp_dma_memcpy_bench_len, sbuf, dbuf, from, to);
async_memcpy_verify_and_clear_testbench(0, test_async_memcpy_bench_len, sbuf, dbuf, from, to);
TEST_ESP_OK(cp_dma_driver_uninstall(driver));
TEST_ESP_OK(esp_async_memcpy_uninstall(driver));
vSemaphoreDelete(sem);
}
#endif //SOC_CP_DMA_SUPPORTED || SOC_GDMA_SUPPORTED

View File

@ -14,12 +14,8 @@
#include "hal/cp_dma_hal.h"
#include "hal/cp_dma_ll.h"
#include "soc/cp_dma_caps.h"
#define MCP_DESCRIPTOR_BUFFER_OWNER_DMA (1)
#define MCP_DESCRIPTOR_BUFFER_OWNER_CPU (0)
void cp_dma_hal_init(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *tx_descriptors[], uint32_t tx_desc_num, cp_dma_descriptor_t *rx_descriptors[], uint32_t rx_desc_num)
void cp_dma_hal_init(cp_dma_hal_context_t *hal, const cp_dma_hal_config_t *config)
{
hal->dev = &CP_DMA;
cp_dma_ll_enable_clock(hal->dev, true);
@ -31,37 +27,15 @@ void cp_dma_hal_init(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *tx_descript
cp_dma_ll_clear_intr_status(hal->dev, UINT32_MAX);
cp_dma_ll_enable_owner_check(hal->dev, true);
// circle TX descriptors
for (int i = 0; i < tx_desc_num; i++) {
tx_descriptors[i]->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_CPU;
tx_descriptors[i]->next = tx_descriptors[i + 1];
}
tx_descriptors[tx_desc_num - 1]->next = tx_descriptors[0];
// circle RX descriptors
for (int i = 0; i < rx_desc_num; i++) {
rx_descriptors[i]->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_CPU;
rx_descriptors[i]->next = rx_descriptors[i + 1];
}
rx_descriptors[rx_desc_num - 1]->next = rx_descriptors[0];
// set the start of each descriptor chain
hal->tx_desc = tx_descriptors[0];
hal->rx_desc = rx_descriptors[0];
/* set base address of the first descriptor */
cp_dma_ll_tx_set_descriptor_base_addr(hal->dev, (uint32_t)hal->tx_desc);
cp_dma_ll_rx_set_descriptor_base_addr(hal->dev, (uint32_t)hal->rx_desc);
hal->next_rx_desc_to_check = rx_descriptors[0];
cp_dma_ll_tx_set_descriptor_base_addr(hal->dev, (uint32_t)config->outlink_base);
cp_dma_ll_rx_set_descriptor_base_addr(hal->dev, (uint32_t)config->inlink_base);
}
void cp_dma_hal_deinit(cp_dma_hal_context_t *hal)
{
cp_dma_ll_enable_clock(hal->dev, false);
hal->dev = NULL;
hal->tx_desc = NULL;
hal->rx_desc = NULL;
}
void cp_dma_hal_start(cp_dma_hal_context_t *hal)
@ -77,7 +51,6 @@ void cp_dma_hal_stop(cp_dma_hal_context_t *hal)
{
// disable interrupt
cp_dma_ll_enable_intr(hal->dev, CP_DMA_LL_EVENT_RX_EOF, false);
cp_dma_ll_enable_intr(hal->dev, CP_DMA_LL_EVENT_TX_EOF, false);
// disable DMA
cp_dma_ll_start_rx(hal->dev, false);
@ -94,125 +67,12 @@ void cp_dma_hal_clear_intr_status(cp_dma_hal_context_t *hal, uint32_t mask)
cp_dma_ll_clear_intr_status(hal->dev, mask);
}
int cp_dma_hal_prepare_transmit(cp_dma_hal_context_t *hal, void *buffer, size_t len, cp_dma_descriptor_t **start_desc, cp_dma_descriptor_t **end_desc)
void cp_dma_hal_restart_tx(cp_dma_hal_context_t *hal)
{
uint32_t prepared_length = 0;
uint8_t *buf = (uint8_t *)buffer;
cp_dma_descriptor_t *desc = hal->tx_desc; // descriptor iterator
cp_dma_descriptor_t *start = desc;
cp_dma_descriptor_t *end = desc;
while (len > SOC_CP_DMA_MAX_BUFFER_SIZE) {
if (desc->dw0.owner != MCP_DESCRIPTOR_BUFFER_OWNER_DMA) {
desc->dw0.eof = 0; // not the end of the transaction
desc->dw0.size = SOC_CP_DMA_MAX_BUFFER_SIZE;
desc->dw0.length = SOC_CP_DMA_MAX_BUFFER_SIZE;
desc->buffer = &buf[prepared_length];
desc = desc->next; // move to next descriptor
prepared_length += SOC_CP_DMA_MAX_BUFFER_SIZE;
len -= SOC_CP_DMA_MAX_BUFFER_SIZE;
} else {
// out of TX descriptors
goto _exit;
}
}
if (len) {
if (desc->dw0.owner != MCP_DESCRIPTOR_BUFFER_OWNER_DMA) {
end = desc; // the last descriptor used
desc->dw0.eof = 1; // end of the transaction
desc->dw0.size = len;
desc->dw0.length = len;
desc->buffer = &buf[prepared_length];
desc = desc->next; // move to next descriptor
prepared_length += len;
} else {
// out of TX descriptors
goto _exit;
}
}
*start_desc = start;
*end_desc = end;
_exit:
return prepared_length;
}
int cp_dma_hal_prepare_receive(cp_dma_hal_context_t *hal, void *buffer, size_t size, cp_dma_descriptor_t **start_desc, cp_dma_descriptor_t **end_desc)
{
uint32_t prepared_length = 0;
uint8_t *buf = (uint8_t *)buffer;
cp_dma_descriptor_t *desc = hal->rx_desc; // descriptor iterator
cp_dma_descriptor_t *start = desc;
cp_dma_descriptor_t *end = desc;
while (size > SOC_CP_DMA_MAX_BUFFER_SIZE) {
if (desc->dw0.owner != MCP_DESCRIPTOR_BUFFER_OWNER_DMA) {
desc->dw0.size = SOC_CP_DMA_MAX_BUFFER_SIZE;
desc->buffer = &buf[prepared_length];
desc = desc->next; // move to next descriptor
prepared_length += SOC_CP_DMA_MAX_BUFFER_SIZE;
size -= SOC_CP_DMA_MAX_BUFFER_SIZE;
} else {
// out of TX descriptors
goto _exit;
}
}
if (size) {
if (desc->dw0.owner != MCP_DESCRIPTOR_BUFFER_OWNER_DMA) {
end = desc; // the last descriptor used
desc->dw0.size = size;
desc->buffer = &buf[prepared_length];
desc = desc->next; // move to next descriptor
prepared_length += size;
} else {
// out of TX descriptors
goto _exit;
}
}
*start_desc = start;
*end_desc = end;
_exit:
return prepared_length;
}
void cp_dma_hal_restart_tx(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *start_desc, cp_dma_descriptor_t *end_desc)
{
// Give descriptor owner to DMA
cp_dma_descriptor_t *desc = start_desc;
while (desc != end_desc) {
desc->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_DMA;
desc = desc->next;
}
desc->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_DMA;
hal->tx_desc = end_desc->next; // update the next available descriptor in HAL
cp_dma_ll_restart_tx(hal->dev);
}
void cp_dma_hal_restart_rx(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *start_desc, cp_dma_descriptor_t *end_desc)
void cp_dma_hal_restart_rx(cp_dma_hal_context_t *hal)
{
// Give descriptor owner to DMA
cp_dma_descriptor_t *desc = start_desc;
while (desc != end_desc) {
desc->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_DMA;
desc = desc->next;
}
desc->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_DMA;
hal->rx_desc = end_desc->next; // update the next available descriptor in HAL
cp_dma_ll_restart_rx(hal->dev);
}
bool cp_dma_hal_get_next_rx_descriptor(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *eof_desc, cp_dma_descriptor_t **next_desc)
{
cp_dma_descriptor_t *next = hal->next_rx_desc_to_check;
// additional check, to avoid potential interrupt got triggered by mistake
if (next->dw0.owner == MCP_DESCRIPTOR_BUFFER_OWNER_CPU) {
hal->next_rx_desc_to_check = hal->next_rx_desc_to_check->next;
*next_desc = next;
// return if we need to continue
return eof_desc == next ? false : true;
} else {
*next_desc = NULL;
return false;
}
}

View File

@ -33,48 +33,29 @@ extern "C" {
#include <stddef.h>
#include <stdbool.h>
#include "esp_attr.h"
#include "hal/dma_types.h"
#include "soc/cp_dma_struct.h"
typedef struct cp_dma_descriptor {
struct {
uint32_t size : 12; /*!< buffer size */
uint32_t length : 12; /*!< specify number of valid bytes in the buffer */
uint32_t reversed24_27 : 4; /*!< reserved */
uint32_t err : 1; /*!< specify whether a received buffer contains error */
uint32_t reserved29 : 1; /*!< reserved */
uint32_t eof : 1; /*!< if this dma link is the last one, you shoule set this bit 1 */
uint32_t owner : 1; /*!< specify the owner of buffer that this descriptor points to, 1=DMA, 0=CPU. DMA will clear it after use. */
} dw0; /*!< descriptor word 0 */
void *buffer; /*!< pointer to the buffer */
struct cp_dma_descriptor *next; /*!< pointer to the next descriptor or NULL if this descriptor is the last one */
} cp_dma_descriptor_t;
_Static_assert(sizeof(cp_dma_descriptor_t) == 12, "cp_dma_descriptor_t should occupy 12 bytes in memory");
/**
* @brief HAL context
*
* @note `tx_desc` and `rx_desc` are internal state of the HAL, will be modified during the operations.
* Upper layer of HAL should keep the buffer address themselves and make sure the buffers are freed when the HAL is no longer used.
*
*/
typedef struct {
cp_dma_dev_t *dev;
cp_dma_descriptor_t *tx_desc;
cp_dma_descriptor_t *rx_desc;
cp_dma_descriptor_t *next_rx_desc_to_check;
} cp_dma_hal_context_t;
typedef struct {
dma_descriptor_t *outlink_base; /*!< Address of the first outlink descriptor */
dma_descriptor_t *inlink_base; /*!< Address of the first inlink descriptor */
} cp_dma_hal_config_t;
/**
* @brief Initialize HAL layer context
*
* @param hal HAL layer context, memroy should be allocated at driver layer
* @param tx_descriptors out link descriptor pool
* @param tx_desc_num number of out link descriptors
* @param rx_descriptors in line descriptor pool
* @param rx_desc_num number of in link descriptors
* @param hal HAL layer context, whose memroy should be allocated at driver layer
* @param config configuration for the HAL layer
*/
void cp_dma_hal_init(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *tx_descriptors[], uint32_t tx_desc_num, cp_dma_descriptor_t *rx_descriptors[], uint32_t rx_desc_num);
void cp_dma_hal_init(cp_dma_hal_context_t *hal, const cp_dma_hal_config_t *config);
/**
* @brief Deinitialize HAL layer context
@ -105,39 +86,6 @@ uint32_t cp_dma_hal_get_intr_status(cp_dma_hal_context_t *hal) IRAM_ATTR;
*/
void cp_dma_hal_clear_intr_status(cp_dma_hal_context_t *hal, uint32_t mask) IRAM_ATTR;
/**
* @brief Get next RX descriptor that needs recycling
*
* @param eof_desc EOF descriptor for this iteration
* @param[out] next_desc Next descriptor needs to check
* @return Whether to continue
*/
bool cp_dma_hal_get_next_rx_descriptor(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *eof_desc, cp_dma_descriptor_t **next_desc);
/**
* @brief Prepare buffer to be transmitted
*
* @param hal HAL layer context
* @param buffer buffer address
* @param len buffer size
* @param[out] start_desc The first descriptor that carry the TX transaction
* @param[out] end_desc The last descriptor that carry the TX transaction
* @return Number of bytes has been parepared to transmit
*/
int cp_dma_hal_prepare_transmit(cp_dma_hal_context_t *hal, void *buffer, size_t len, cp_dma_descriptor_t **start_desc, cp_dma_descriptor_t **end_desc);
/**
* @brief Prepare buffer to receive
*
* @param hal HAL layer context
* @param buffer buffer address
* @param size buffer size
* @param[out] start_desc The first descriptor that carries the RX transaction
* @param[out] end_desc The last descriptor that carries the RX transaction
* @return Number of bytes has been parepared to receive
*/
int cp_dma_hal_prepare_receive(cp_dma_hal_context_t *hal, void *buffer, size_t size, cp_dma_descriptor_t **start_desc, cp_dma_descriptor_t **end_desc);
/**@{*/
/**
* @brief Give the owner of descriptors between [start_desc, end_desc] to DMA, and restart DMA HW engine
@ -146,8 +94,8 @@ int cp_dma_hal_prepare_receive(cp_dma_hal_context_t *hal, void *buffer, size_t s
* @param start_desc The first descriptor that carries one transaction
* @param end_desc The last descriptor that carries one transaction
*/
void cp_dma_hal_restart_tx(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *start_desc, cp_dma_descriptor_t *end_desc);
void cp_dma_hal_restart_rx(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *start_desc, cp_dma_descriptor_t *end_desc);
void cp_dma_hal_restart_tx(cp_dma_hal_context_t *hal);
void cp_dma_hal_restart_rx(cp_dma_hal_context_t *hal);
/**@}*/
#ifdef __cplusplus

View File

@ -74,10 +74,6 @@ static inline uint32_t periph_ll_get_clk_en_mask(periph_module_t periph)
return SYSTEM_SPI3_CLK_EN;
case PERIPH_VSPI_MODULE:
return SYSTEM_SPI4_CLK_EN;
case PERIPH_SPI2_DMA_MODULE:
return SYSTEM_SPI2_DMA_CLK_EN;
case PERIPH_SPI3_DMA_MODULE:
return SYSTEM_SPI3_DMA_CLK_EN;
case PERIPH_SDMMC_MODULE:
return SYSTEM_SDIO_HOST_CLK_EN;
case PERIPH_TWAI_MODULE:
@ -94,6 +90,10 @@ static inline uint32_t periph_ll_get_clk_en_mask(periph_module_t periph)
return SYSTEM_BT_BASEBAND_EN;
case PERIPH_BT_LC_MODULE:
return SYSTEM_BT_LC_EN;
case PERIPH_SYSTIMER_MODULE:
return SYSTEM_SYSTIMER_CLK_EN;
case PERIPH_GDMA_MODULE:
return SYSTEM_DMA_CLK_EN;
default:
return 0;
}
@ -151,14 +151,14 @@ static inline uint32_t periph_ll_get_rst_en_mask(periph_module_t periph, bool en
return SYSTEM_SPI3_RST;
case PERIPH_VSPI_MODULE:
return SYSTEM_SPI4_RST;
case PERIPH_SPI2_DMA_MODULE:
return SYSTEM_SPI2_DMA_RST;
case PERIPH_SPI3_DMA_MODULE:
return SYSTEM_SPI3_DMA_RST;
case PERIPH_SDMMC_MODULE:
return SYSTEM_SDIO_HOST_RST;
case PERIPH_TWAI_MODULE:
return SYSTEM_TWAI_RST;
case PERIPH_SYSTIMER_MODULE:
return SYSTEM_SYSTIMER_RST;
case PERIPH_GDMA_MODULE:
return SYSTEM_DMA_RST;
default:
return 0;
}
@ -176,7 +176,7 @@ static uint32_t periph_ll_get_clk_en_reg(periph_module_t periph)
return SYSTEM_WIFI_CLK_EN_REG ;
case PERIPH_UART2_MODULE:
case PERIPH_SDMMC_MODULE:
case PERIPH_SPI_SHARED_DMA_MODULE:
case PERIPH_GDMA_MODULE:
return SYSTEM_PERIP_CLK_EN1_REG;
default:
return SYSTEM_PERIP_CLK_EN0_REG;
@ -195,7 +195,7 @@ static uint32_t periph_ll_get_rst_en_reg(periph_module_t periph)
return SYSTEM_CORE_RST_EN_REG;
case PERIPH_UART2_MODULE:
case PERIPH_SDMMC_MODULE:
case PERIPH_SPI_SHARED_DMA_MODULE:
case PERIPH_GDMA_MODULE:
return SYSTEM_PERIP_RST_EN1_REG;
default:
return SYSTEM_PERIP_RST_EN0_REG;
@ -228,4 +228,3 @@ static inline bool IRAM_ATTR periph_ll_periph_enabled(periph_module_t periph)
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,495 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stdbool.h>
#include "soc/gdma_struct.h"
#include "soc/gdma_reg.h"
#include "soc/gdma_caps.h"
#define GDMA_LL_EVENT_TX_L3_FIFO_UDF (1<<17)
#define GDMA_LL_EVENT_TX_L3_FIFO_OVF (1<<16)
#define GDMA_LL_EVENT_TX_L1_FIFO_UDF (1<<15)
#define GDMA_LL_EVENT_TX_L1_FIFO_OVF (1<<14)
#define GDMA_LL_EVENT_RX_L3_FIFO_UDF (1<<13)
#define GDMA_LL_EVENT_RX_L3_FIFO_OVF (1<<12)
#define GDMA_LL_EVENT_RX_L1_FIFO_UDF (1<<11)
#define GDMA_LL_EVENT_RX_L1_FIFO_OVF (1<<10)
#define GDMA_LL_EVENT_RX_WATER_MARK (1<<9)
#define GDMA_LL_EVENT_TX_TOTAL_EOF (1<<8)
#define GDMA_LL_EVENT_RX_DESC_EMPTY (1<<7)
#define GDMA_LL_EVENT_TX_DESC_ERROR (1<<6)
#define GDMA_LL_EVENT_RX_DESC_ERROR (1<<5)
#define GDMA_LL_EVENT_TX_EOF (1<<4)
#define GDMA_LL_EVENT_TX_DONE (1<<3)
#define GDMA_LL_EVENT_RX_ERR_EOF (1<<2)
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_TRIG_SRC_SPI2 (0)
#define GDMA_LL_TRIG_SRC_SPI3 (1)
#define GDMA_LL_TRIG_SRC_UART (2)
#define GDMA_LL_TRIG_SRC_I2S0 (3)
#define GDMA_LL_TRIG_SRC_I2S1 (4)
#define GDMA_LL_TRIG_SRC_LCD_CAM (5)
#define GDMA_LL_TRIG_SRC_AES (6)
#define GDMA_LL_TRIG_SRC_SHA (7)
#define GDMA_LL_TRIG_SRC_ADC_DAC (8)
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable DMA channel M2M mode (TX channel n forward data to RX channel n), disabled by default
*/
static inline void gdma_ll_enable_m2m_mode(gdma_dev_t *dev, uint32_t channel, bool enable)
{
dev->conf0[channel].mem_trans_en = enable;
if (!enable) {
dev->peri_sel[channel].peri_in_sel = 0;
dev->peri_sel[channel].peri_out_sel = 0;
}
}
/**
* @brief Enable DMA to check the owner bit in the descriptor, disabled by default
*/
static inline void gdma_ll_enable_owner_check(gdma_dev_t *dev, uint32_t channel, bool enable)
{
dev->conf1[channel].check_owner = enable;
}
/**
* @brief Get DMA interrupt status word
*/
static inline uint32_t gdma_ll_get_interrupt_status(gdma_dev_t *dev, uint32_t channel)
{
return dev->int_st[channel].val;
}
/**
* @brief Enable DMA interrupt
*/
static inline void gdma_ll_enable_interrupt(gdma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->int_ena[channel].val |= mask;
} else {
dev->int_ena[channel].val &= ~mask;
}
}
/**
* @brief Clear DMA interrupt
*/
static inline void gdma_ll_clear_interrupt_status(gdma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->int_clr[channel].val = mask;
}
/**
* @brief Enable DMA clock gating
*/
static inline void gdma_ll_enable_clock(gdma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}
///////////////////////////////////// RX /////////////////////////////////////////
/**
* @brief Enable DMA RX channel burst reading data, disabled by default
*/
static inline void gdma_ll_rx_enable_data_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
dev->conf0[channel].in_data_burst_en = enable;
}
/**
* @brief Enable DMA RX channel burst reading descriptor link, disabled by default
*/
static inline void gdma_ll_rx_enable_descriptor_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
dev->conf0[channel].indscr_burst_en = enable;
}
/**
* @brief Reset DMA RX channel FSM and FIFO pointer
*/
static inline void gdma_ll_rx_reset_channel(gdma_dev_t *dev, uint32_t channel)
{
dev->conf0[channel].in_rst = 1;
dev->conf0[channel].in_rst = 0;
}
/**
* @brief Set DMA RX channel memory block size
* @param size_index Supported value: GDMA_IN_EXT_MEM_BK_SIZE_16B, GDMA_IN_EXT_MEM_BK_SIZE_32B
*/
static inline void gdma_ll_rx_set_block_size_psram(gdma_dev_t *dev, uint32_t channel, uint32_t size_index)
{
dev->conf1[channel].in_ext_mem_bk_size = size_index;
}
/**
* @brief Set the water mark for RX channel, default value is 12
*/
static inline void gdma_ll_rx_set_water_mark(gdma_dev_t *dev, uint32_t channel, uint32_t water_mark)
{
dev->conf1[channel].infifo_full_thrs = water_mark;
}
/**
* @brief Check if DMA RX FIFO is full
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
*/
static inline bool gdma_ll_rx_is_fifo_full(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->infifo_status[channel].val & (1 << 2 * (fifo_level - 1));
}
/**
* @brief Check if DMA RX FIFO is empty
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
*/
static inline bool gdma_ll_rx_is_fifo_empty(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->infifo_status[channel].val & (1 << (2 * (fifo_level - 1) + 1));
}
/**
* @brief Get number of bytes in RX FIFO (L1, L2, L3)
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
*/
static inline uint32_t gdma_ll_rx_get_fifo_bytes(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
switch (fifo_level) {
case 1:
return dev->infifo_status[channel].infifo_cnt_l1;
case 2:
return dev->infifo_status[channel].infifo_cnt_l2;
case 3:
return dev->infifo_status[channel].infifo_cnt_l3;
}
}
/**
* @brief Pop data from DMA RX FIFO
*/
static inline uint32_t gdma_ll_rx_pop_data(gdma_dev_t *dev, uint32_t channel)
{
dev->in_pop[channel].infifo_pop = 1;
return dev->in_pop[channel].infifo_rdata;
}
/**
* @brief Set the descriptor link base address for RX channel
*/
static inline void gdma_ll_rx_set_desc_addr(gdma_dev_t *dev, uint32_t channel, uint32_t addr)
{
dev->in_link[channel].addr = addr;
}
/**
* @brief Start dealing with RX descriptors
*/
static inline void gdma_ll_rx_start(gdma_dev_t *dev, uint32_t channel)
{
dev->in_link[channel].start = 1;
}
/**
* @brief Stop dealing with RX descriptors
*/
static inline void gdma_ll_rx_stop(gdma_dev_t *dev, uint32_t channel)
{
dev->in_link[channel].stop = 1;
}
/**
* @brief Restart a new inlink right after the last descriptor
*/
static inline void gdma_ll_rx_restart(gdma_dev_t *dev, uint32_t channel)
{
dev->in_link[channel].restart = 1;
}
/**
* @brief Enable DMA RX to return the address of current descriptor when receives error
*/
static inline void gdma_ll_rx_enable_auto_return(gdma_dev_t *dev, uint32_t channel, bool enable)
{
dev->in_link[channel].auto_ret = enable;
}
/**
* @brief Check if DMA RX FSM is in IDLE state
*/
static inline bool gdma_ll_rx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{
return dev->in_link[channel].park;
}
/**
* @brief Get RX success EOF descriptor's address
*/
static inline uint32_t gdma_ll_rx_get_success_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
return dev->in_suc_eof_des_addr[channel];
}
/**
* @brief Get RX error EOF descriptor's address
*/
static inline uint32_t gdma_ll_rx_get_error_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
return dev->in_err_eof_des_addr[channel];
}
/**
* @brief Get current RX descriptor's address
*/
static inline uint32_t gdma_ll_rx_get_current_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
return dev->in_dscr[channel];
}
/**
* @brief Set weight for DMA RX channel
*/
static inline void gdma_ll_rx_set_weight(gdma_dev_t *dev, uint32_t channel, uint32_t weight)
{
dev->wight[channel].rx_weight = weight;
}
/**
* @brief Set priority for DMA RX channel
*/
static inline void gdma_ll_rx_set_priority(gdma_dev_t *dev, uint32_t channel, uint32_t prio)
{
dev->pri[channel].rx_pri = prio;
}
/**
* @brief Connect DMA RX channel to a given peripheral
*/
static inline void gdma_ll_rx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, uint32_t periph_id)
{
dev->peri_sel[channel].peri_in_sel = periph_id;
}
/**
* @brief Extend the L2 FIFO size for RX channel
* @note By default, the L2 FIFO size is SOC_GDMA_L2_FIFO_BASE_SIZE Bytes. Suggest to extend it to twice the block size when accessing PSRAM.
* @note `size_in_bytes` should aligned to 8 and larger than SOC_GDMA_L2_FIFO_BASE_SIZE
*/
static inline void gdma_ll_rx_extend_l2_fifo_size_to(gdma_dev_t *dev, uint32_t channel, uint32_t size_in_bytes)
{
if (size_in_bytes > SOC_GDMA_L2_FIFO_BASE_SIZE) {
dev->sram_size[channel].in_size = (size_in_bytes - SOC_GDMA_L2_FIFO_BASE_SIZE) / 8;
}
}
///////////////////////////////////// TX /////////////////////////////////////////
/**
* @brief Enable DMA TX channel burst sending data, disabled by default
*/
static inline void gdma_ll_tx_enable_data_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
dev->conf0[channel].out_data_burst_en = enable;
}
/**
* @brief Enable DMA TX channel burst reading descriptor link, disabled by default
*/
static inline void gdma_ll_tx_enable_descriptor_burst(gdma_dev_t *dev, uint32_t channel, bool enable)
{
dev->conf0[channel].outdscr_burst_en = enable;
}
/**
* @brief Set TX channel EOF mode
*/
static inline void gdma_ll_tx_set_eof_mode(gdma_dev_t *dev, uint32_t channel, uint32_t mode)
{
dev->conf0[channel].out_eof_mode = mode;
}
/**
* @brief Enable DMA TX channel automatic write results back to descriptor after all data has been sent out, disabled by default
*/
static inline void gdma_ll_tx_enable_auto_write_back(gdma_dev_t *dev, uint32_t channel, bool enable)
{
dev->conf0[channel].out_auto_wrback = enable;
}
/**
* @brief Reset DMA TX channel FSM and FIFO pointer
*/
static inline void gdma_ll_tx_reset_channel(gdma_dev_t *dev, uint32_t channel)
{
dev->conf0[channel].out_rst = 1;
dev->conf0[channel].out_rst = 0;
}
/**
* @brief Set DMA TX channel memory block size
* @param size_index Supported value: GDMA_OUT_EXT_MEM_BK_SIZE_16B, GDMA_OUT_EXT_MEM_BK_SIZE_32B
*/
static inline void gdma_ll_tx_set_block_size_psram(gdma_dev_t *dev, uint32_t channel, uint32_t size_index)
{
dev->conf1[channel].out_ext_mem_bk_size = size_index;
}
/**
* @brief Check if DMA TX FIFO is full
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
*/
static inline bool gdma_ll_tx_is_fifo_full(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->outfifo_status[channel].val & (1 << 2 * (fifo_level - 1));
}
/**
* @brief Check if DMA TX FIFO is empty
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
*/
static inline bool gdma_ll_tx_is_fifo_empty(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->outfifo_status[channel].val & (1 << (2 * (fifo_level - 1) + 1));
}
/**
* @brief Get number of bytes in TX FIFO (L1, L2, L3)
* @param fifo_level (1,2,3) <=> (L1, L2, L3)
*/
static inline uint32_t gdma_ll_tx_get_fifo_bytes(gdma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
switch (fifo_level) {
case 1:
return dev->outfifo_status[channel].outfifo_cnt_l1;
case 2:
return dev->outfifo_status[channel].outfifo_cnt_l2;
case 3:
return dev->outfifo_status[channel].outfifo_cnt_l3;
}
}
/**
* @brief Push data into DMA TX FIFO
*/
static inline void gdma_ll_tx_push_data(gdma_dev_t *dev, uint32_t channel, uint32_t data)
{
dev->out_push[channel].outfifo_wdata = data;
dev->out_push[channel].outfifo_push = 1;
}
/**
* @brief Set the descriptor link base address for TX channel
*/
static inline void gdma_ll_tx_set_desc_addr(gdma_dev_t *dev, uint32_t channel, uint32_t addr)
{
dev->out_link[channel].addr = addr;
}
/**
* @brief Start dealing with TX descriptors
*/
static inline void gdma_ll_tx_start(gdma_dev_t *dev, uint32_t channel)
{
dev->out_link[channel].start = 1;
}
/**
* @brief Stop dealing with TX descriptors
*/
static inline void gdma_ll_tx_stop(gdma_dev_t *dev, uint32_t channel)
{
dev->out_link[channel].stop = 1;
}
/**
* @brief Restart a new outlink right after the last descriptor
*/
static inline void gdma_ll_tx_restart(gdma_dev_t *dev, uint32_t channel)
{
dev->out_link[channel].restart = 1;
}
/**
* @brief Check if DMA TX FSM is in IDLE state
*/
static inline bool gdma_ll_tx_is_fsm_idle(gdma_dev_t *dev, uint32_t channel)
{
return dev->out_link[channel].park;
}
/**
* @brief Get TX EOF descriptor's address
*/
static inline uint32_t gdma_ll_tx_get_eof_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
return dev->out_eof_des_addr[channel];
}
/**
* @brief Get current TX descriptor's address
*/
static inline uint32_t gdma_ll_tx_get_current_desc_addr(gdma_dev_t *dev, uint32_t channel)
{
return dev->out_dscr[channel];
}
/**
* @brief Set weight for DMA TX channel
*/
static inline void gdma_ll_tx_set_weight(gdma_dev_t *dev, uint32_t channel, uint32_t weight)
{
dev->wight[channel].tx_weight = weight;
}
/**
* @brief Set priority for DMA TX channel
*/
static inline void gdma_ll_tx_set_priority(gdma_dev_t *dev, uint32_t channel, uint32_t prio)
{
dev->pri[channel].tx_pri = prio;
}
/**
* @brief Connect DMA TX channel to a given peripheral
*/
static inline void gdma_ll_tx_connect_to_periph(gdma_dev_t *dev, uint32_t channel, uint32_t periph_id)
{
dev->peri_sel[channel].peri_out_sel = periph_id;
}
/**
* @brief Extend the L2 FIFO size for TX channel
* @note By default, the L2 FIFO size is SOC_GDMA_L2_FIFO_BASE_SIZE Bytes. Suggest to extend it to twice the block size when accessing PSRAM.
* @note `size_in_bytes` should aligned to 8 and larger than SOC_GDMA_L2_FIFO_BASE_SIZE
*/
static inline void gdma_ll_tx_extend_fifo_size_to(gdma_dev_t *dev, uint32_t channel, uint32_t size_in_bytes)
{
if (size_in_bytes > SOC_GDMA_L2_FIFO_BASE_SIZE) {
dev->sram_size[channel].out_size = size_in_bytes / 8;
}
}
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,45 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
/**
* @brief Type of DMA descriptor
*
*/
typedef struct dma_descriptor_s {
struct {
uint32_t size : 12; /*!< Buffer size */
uint32_t length : 12; /*!< Number of valid bytes in the buffer */
uint32_t reversed24_27 : 4; /*!< Reserved */
uint32_t err_eof : 1; /*!< Whether the received buffer contains error */
uint32_t reserved29 : 1; /*!< Reserved */
uint32_t suc_eof : 1; /*!< Whether the descriptor is the last one in the link */
uint32_t owner : 1; /*!< Who is allowed to access the buffer that this descriptor points to */
} dw0; /*!< Descriptor Word 0 */
void *buffer; /*!< Pointer to the buffer */
struct dma_descriptor_s *next; /*!< Pointer to the next descriptor (set to NULL if the descriptor is the last one, e.g. suc_eof=1) */
} dma_descriptor_t;
_Static_assert(sizeof(dma_descriptor_t) == 12, "dma_descriptor_t should occupy 12 bytes in memory");
#define DMA_DESCRIPTOR_BUFFER_OWNER_CPU (0) /*!< DMA buffer is allowed to be accessed by CPU */
#define DMA_DESCRIPTOR_BUFFER_OWNER_DMA (1) /*!< DMA buffer is allowed to be accessed by DMA engine */
#define DMA_DESCRIPTOR_BUFFER_MAX_SIZE (4095) /*!< Maximum size of the buffer that can be attached to descriptor */

View File

@ -0,0 +1,35 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*******************************************************************************
* NOTICE
* The HAL is not public api, don't use in application code.
* See readme.md in soc/README.md
******************************************************************************/
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include "soc/gdma_struct.h"
typedef struct {
gdma_dev_t *dev;
} gdma_hal_context_t;
#ifdef __cplusplus
}
#endif

View File

@ -6,6 +6,7 @@
#pragma once
#define SOC_TWAI_SUPPORTED 1
#define SOC_CP_DMA_SUPPORTED 1
#define SOC_CPU_CORES_NUM 1
#define SOC_SUPPORTS_SECURE_DL_MODE 1
#define SOC_RISCV_COPROC_SUPPORTED 1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -14,12 +14,5 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#define SOC_CP_DMA_MAX_BUFFER_SIZE (4095) /*!< Maximum size of the buffer that can be attached to descriptor */
#ifdef __cplusplus
}
#endif
#define SOC_GDMA_CHANNELS_NUM (5) /*!< GDMA has 5 TX and 5 RX channels in ESP32-S3 */
#define SOC_GDMA_L2_FIFO_BASE_SIZE (16) /*!< GDMA L2 FIFO basic size is 16 Bytes */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,322 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include <stdint.h>
typedef volatile struct {
union {
struct {
uint32_t in_rst : 1; /*This bit is used to reset DMA channel 0 Rx FSM and Rx FIFO pointer.*/
uint32_t out_rst : 1; /*This bit is used to reset DMA channel 0 Tx FSM and Tx FIFO pointer.*/
uint32_t in_loop_test : 1; /*reserved*/
uint32_t out_loop_test : 1; /*reserved*/
uint32_t out_auto_wrback : 1; /*Set this bit to enable automatic outlink-writeback when all the data in tx buffer has been transmitted.*/
uint32_t out_eof_mode : 1; /*EOF flag generation mode when transmitting data. 1: EOF flag for Tx channel 0 is generated when data need to transmit has been popped from FIFO in DMA*/
uint32_t outdscr_burst_en : 1; /*Set this bit to 1 to enable INCR burst transfer for Tx channel 0 reading link descriptor when accessing internal SRAM.*/
uint32_t indscr_burst_en : 1; /*Set this bit to 1 to enable INCR burst transfer for Rx channel 0 reading link descriptor when accessing internal SRAM.*/
uint32_t out_data_burst_en : 1; /*Set this bit to 1 to enable INCR burst transfer for Tx channel 0 transmitting data when accessing internal SRAM.*/
uint32_t in_data_burst_en : 1; /*Set this bit to 1 to enable INCR burst transfer for Rx channel 0 receiving data when accessing internal SRAM.*/
uint32_t mem_trans_en : 1; /*Set this bit 1 to enable automatic transmitting data from memory to memory via DMA.*/
uint32_t reserved11 : 21; /*reserved*/
};
uint32_t val;
} conf0[5];
union {
struct {
uint32_t infifo_full_thrs : 12; /*This register is used to generate the INFIFO_FULL_WM_INT interrupt when Rx channel 0 received byte number in Rx FIFO is up to the value of the register.*/
uint32_t check_owner : 1; /*Set this bit to enable checking the owner attribute of the link descriptor.*/
uint32_t in_ext_mem_bk_size : 2; /*Block size of Rx channel 0 when DMA access external SRAM. 0: 16 bytes 1: 32 bytes 2/3:reserved*/
uint32_t out_ext_mem_bk_size : 2; /*Block size of Tx channel 0 when DMA access external SRAM. 0: 16 bytes 1: 32 bytes 2/3:reserved*/
uint32_t reserved17 : 15; /*reserved*/
};
uint32_t val;
} conf1[5];
union {
struct {
uint32_t in_done : 1; /*The raw interrupt bit turns to high level when the last data pointed by one inlink descriptor has been received for Rx channel 0.*/
uint32_t in_suc_eof : 1; /*The raw interrupt bit turns to high level when the last data pointed by one inlink descriptor has been received for Rx channel 0. For UHCI0 the raw interrupt bit turns to high level when the last data pointed by one inlink descriptor has been received and no data error is detected for Rx channel 0.*/
uint32_t in_err_eof : 1; /*The raw interrupt bit turns to high level when data error is detected only in the case that the peripheral is UHCI0 for Rx channel 0. For other peripherals this raw interrupt is reserved.*/
uint32_t out_done : 1; /*The raw interrupt bit turns to high level when the last data pointed by one outlink descriptor has been transmitted to peripherals for Tx channel 0.*/
uint32_t out_eof : 1; /*The raw interrupt bit turns to high level when the last data pointed by one outlink descriptor has been read from memory for Tx channel 0.*/
uint32_t in_dscr_err : 1; /*The raw interrupt bit turns to high level when detecting inlink descriptor error including owner error the second and third word error of inlink descriptor for Rx channel 0.*/
uint32_t out_dscr_err : 1; /*The raw interrupt bit turns to high level when detecting outlink descriptor error including owner error the second and third word error of outlink descriptor for Tx channel 0.*/
uint32_t in_dscr_empty : 1; /*The raw interrupt bit turns to high level when Rx buffer pointed by inlink is full and receiving data is not completed but there is no more inlink for Rx channel 0.*/
uint32_t out_total_eof : 1; /*The raw interrupt bit turns to high level when data corresponding a outlink (includes one link descriptor or few link descriptors) is transmitted out for Tx channel 0.*/
uint32_t infifo_full_wm : 1; /*The raw interrupt bit turns to high level when received data byte number is up to threshold configured by REG_DMA_INFIFO_FULL_THRS_CH0 in Rx FIFO of channel 0.*/
uint32_t infifo_ovf_l1 : 1; /*This raw interrupt bit turns to high level when level 1 fifo of Rx channel 0 is overflow.*/
uint32_t infifo_udf_l1 : 1; /*This raw interrupt bit turns to high level when level 1 fifo of Rx channel 0 is underflow.*/
uint32_t infifo_ovf_l3 : 1; /*This raw interrupt bit turns to high level when level 3 fifo of Rx channel 0 is overflow.*/
uint32_t infifo_udf_l3 : 1; /*This raw interrupt bit turns to high level when level 3 fifo of Rx channel 0 is underflow.*/
uint32_t outfifo_ovf_l1 : 1; /*This raw interrupt bit turns to high level when level 1 fifo of Tx channel 0 is overflow.*/
uint32_t outfifo_udf_l1 : 1; /*This raw interrupt bit turns to high level when level 1 fifo of Tx channel 0 is underflow.*/
uint32_t outfifo_ovf_l3 : 1; /*This raw interrupt bit turns to high level when level 3 fifo of Tx channel 0 is overflow.*/
uint32_t outfifo_udf_l3 : 1; /*This raw interrupt bit turns to high level when level 3 fifo of Tx channel 0 is underflow.*/
uint32_t reserved18 : 14; /*reserved*/
};
uint32_t val;
} int_raw[5];
uint32_t reserved_3c;
union {
struct {
uint32_t in_done : 1; /*The raw interrupt status bit for the IN_DONE_CH_INT interrupt.*/
uint32_t in_suc_eof : 1; /*The raw interrupt status bit for the IN_SUC_EOF_CH_INT interrupt.*/
uint32_t in_err_eof : 1; /*The raw interrupt status bit for the IN_ERR_EOF_CH_INT interrupt.*/
uint32_t out_done : 1; /*The raw interrupt status bit for the OUT_DONE_CH_INT interrupt.*/
uint32_t out_eof : 1; /*The raw interrupt status bit for the OUT_EOF_CH_INT interrupt.*/
uint32_t in_dscr_err : 1; /*The raw interrupt status bit for the IN_DSCR_ERR_CH_INT interrupt.*/
uint32_t out_dscr_err : 1; /*The raw interrupt status bit for the OUT_DSCR_ERR_CH_INT interrupt.*/
uint32_t in_dscr_empty : 1; /*The raw interrupt status bit for the IN_DSCR_EMPTY_CH_INT interrupt.*/
uint32_t out_total_eof : 1; /*The raw interrupt status bit for the OUT_TOTAL_EOF_CH_INT interrupt.*/
uint32_t infifo_full_wm : 1; /*The raw interrupt status bit for the INFIFO_FULL_WM_CH_INT interrupt.*/
uint32_t infifo_ovf_l1 : 1; /*The raw interrupt status bit for the INFIFO_OVF_L1_CH_INT interrupt.*/
uint32_t infifo_udf_l1 : 1; /*The raw interrupt status bit for the INFIFO_UDF_L1_CH_INT interrupt.*/
uint32_t infifo_ovf_l3 : 1; /*The raw interrupt status bit for the INFIFO_OVF_L3_CH_INT interrupt.*/
uint32_t infifo_udf_l3 : 1; /*The raw interrupt status bit for the INFIFO_UDF_L3_CH_INT interrupt.*/
uint32_t outfifo_ovf_l1 : 1; /*The raw interrupt status bit for the OUTFIFO_OVF_L1_CH_INT interrupt.*/
uint32_t outfifo_udf_l1 : 1; /*The raw interrupt status bit for the OUTFIFO_UDF_L1_CH_INT interrupt.*/
uint32_t outfifo_ovf_l3 : 1; /*The raw interrupt status bit for the OUTFIFO_OVF_L3_CH_INT interrupt.*/
uint32_t outfifo_udf_l3 : 1; /*The raw interrupt status bit for the OUTFIFO_UDF_L3_CH_INT interrupt.*/
uint32_t reserved18 : 14; /*reserved*/
};
uint32_t val;
} int_st[5];
union {
struct {
uint32_t in_done : 1; /*The interrupt enable bit for the IN_DONE_CH_INT interrupt.*/
uint32_t in_suc_eof : 1; /*The interrupt enable bit for the IN_SUC_EOF_CH_INT interrupt.*/
uint32_t in_err_eof : 1; /*The interrupt enable bit for the IN_ERR_EOF_CH_INT interrupt.*/
uint32_t out_done : 1; /*The interrupt enable bit for the OUT_DONE_CH_INT interrupt.*/
uint32_t out_eof : 1; /*The interrupt enable bit for the OUT_EOF_CH_INT interrupt.*/
uint32_t in_dscr_err : 1; /*The interrupt enable bit for the IN_DSCR_ERR_CH_INT interrupt.*/
uint32_t out_dscr_err : 1; /*The interrupt enable bit for the OUT_DSCR_ERR_CH_INT interrupt.*/
uint32_t in_dscr_empty : 1; /*The interrupt enable bit for the IN_DSCR_EMPTY_CH_INT interrupt.*/
uint32_t out_total_eof : 1; /*The interrupt enable bit for the OUT_TOTAL_EOF_CH_INT interrupt.*/
uint32_t infifo_full_wm : 1; /*The interrupt enable bit for the INFIFO_FULL_WM_CH_INT interrupt.*/
uint32_t infifo_ovf_l1 : 1; /*The interrupt enable bit for the INFIFO_OVF_L1_CH_INT interrupt.*/
uint32_t infifo_udf_l1 : 1; /*The interrupt enable bit for the INFIFO_UDF_L1_CH_INT interrupt.*/
uint32_t infifo_ovf_l3 : 1; /*The interrupt enable bit for the INFIFO_OVF_L3_CH_INT interrupt.*/
uint32_t infifo_udf_l3 : 1; /*The interrupt enable bit for the INFIFO_UDF_L3_CH_INT interrupt.*/
uint32_t outfifo_ovf_l1 : 1; /*The interrupt enable bit for the OUTFIFO_OVF_L1_CH_INT interrupt.*/
uint32_t outfifo_udf_l1 : 1; /*The interrupt enable bit for the OUTFIFO_UDF_L1_CH_INT interrupt.*/
uint32_t outfifo_ovf_l3 : 1; /*The interrupt enable bit for the OUTFIFO_OVF_L3_CH_INT interrupt.*/
uint32_t outfifo_udf_l3 : 1; /*The interrupt enable bit for the OUTFIFO_UDF_L3_CH_INT interrupt.*/
uint32_t reserved18 : 14; /*reserved*/
};
uint32_t val;
} int_ena[5];
union {
struct {
uint32_t in_done : 1; /*Set this bit to clear the IN_DONE_CH_INT interrupt.*/
uint32_t in_suc_eof : 1; /*Set this bit to clear the IN_SUC_EOF_CH_INT interrupt.*/
uint32_t in_err_eof : 1; /*Set this bit to clear the IN_ERR_EOF_CH_INT interrupt.*/
uint32_t out_done : 1; /*Set this bit to clear the OUT_DONE_CH_INT interrupt.*/
uint32_t out_eof : 1; /*Set this bit to clear the OUT_EOF_CH_INT interrupt.*/
uint32_t in_dscr_err : 1; /*Set this bit to clear the IN_DSCR_ERR_CH_INT interrupt.*/
uint32_t out_dscr_err : 1; /*Set this bit to clear the OUT_DSCR_ERR_CH_INT interrupt.*/
uint32_t in_dscr_empty : 1; /*Set this bit to clear the IN_DSCR_EMPTY_CH_INT interrupt.*/
uint32_t out_total_eof : 1; /*Set this bit to clear the OUT_TOTAL_EOF_CH_INT interrupt.*/
uint32_t infifo_full_wm : 1; /*Set this bit to clear the INFIFO_FULL_WM_CH_INT interrupt.*/
uint32_t infifo_ovf_l1 : 1; /*Set this bit to clear the INFIFO_OVF_L1_CH_INT interrupt.*/
uint32_t infifo_udf_l1 : 1; /*Set this bit to clear the INFIFO_UDF_L1_CH_INT interrupt.*/
uint32_t infifo_ovf_l3 : 1; /*Set this bit to clear the INFIFO_OVF_L3_CH_INT interrupt.*/
uint32_t infifo_udf_l3 : 1; /*Set this bit to clear the INFIFO_UDF_L3_CH_INT interrupt.*/
uint32_t outfifo_ovf_l1 : 1; /*Set this bit to clear the OUTFIFO_OVF_L1_CH_INT interrupt.*/
uint32_t outfifo_udf_l1 : 1; /*Set this bit to clear the OUTFIFO_UDF_L1_CH_INT interrupt.*/
uint32_t outfifo_ovf_l3 : 1; /*Set this bit to clear the OUTFIFO_OVF_L3_CH_INT interrupt.*/
uint32_t outfifo_udf_l3 : 1; /*Set this bit to clear the OUTFIFO_UDF_L3_CH_INT interrupt.*/
uint32_t reserved18 : 14; /*reserved*/
};
uint32_t val;
} int_clr[5];
union {
struct {
uint32_t infifo_full_l1 : 1; /*L1 Rx FIFO full signal for Rx channel 0.*/
uint32_t infifo_empty_l1 : 1; /*L1 Rx FIFO empty signal for Rx channel 0.*/
uint32_t infifo_full_l2 : 1; /*L2 Rx FIFO full signal for Rx channel 0.*/
uint32_t infifo_empty_l2 : 1; /*L2 Rx FIFO empty signal for Rx channel 0.*/
uint32_t infifo_full_l3 : 1; /*L3 Rx FIFO full signal for Rx channel 0.*/
uint32_t infifo_empty_l3 : 1; /*L3 Rx FIFO empty signal for Rx channel 0.*/
uint32_t infifo_cnt_l1 : 5; /*The register stores the byte number of the data in L1 Rx FIFO for Rx channel 0.*/
uint32_t infifo_cnt_l2 : 7; /*The register stores the byte number of the data in L2 Rx FIFO for Rx channel 0.*/
uint32_t infifo_cnt_l3 : 5; /*The register stores the byte number of the data in L3 Rx FIFO for Rx channel 0.*/
uint32_t in_remain_under_1b_l3 : 1; /*reserved*/
uint32_t in_remain_under_2b_l3 : 1; /*reserved*/
uint32_t in_remain_under_3b_l3 : 1; /*reserved*/
uint32_t in_remain_under_4b_l3 : 1; /*reserved*/
uint32_t in_buf_hungry : 1; /*reserved*/
uint32_t reserved28 : 4; /*reserved*/
};
uint32_t val;
} infifo_status[5];
union {
struct {
uint32_t outfifo_full_l1 : 1; /*L1 Tx FIFO full signal for Tx channel 0.*/
uint32_t outfifo_empty_l1 : 1; /*L1 Tx FIFO empty signal for Tx channel 0.*/
uint32_t outfifo_full_l2 : 1; /*L2 Tx FIFO full signal for Tx channel 0.*/
uint32_t outfifo_empty_l2 : 1; /*L2 Tx FIFO empty signal for Tx channel 0.*/
uint32_t outfifo_full_l3 : 1; /*L3 Tx FIFO full signal for Tx channel 0.*/
uint32_t outfifo_empty_l3 : 1; /*L3 Tx FIFO empty signal for Tx channel 0.*/
uint32_t outfifo_cnt_l1 : 5; /*The register stores the byte number of the data in L1 Tx FIFO for Tx channel 0.*/
uint32_t outfifo_cnt_l2 : 7; /*The register stores the byte number of the data in L2 Tx FIFO for Tx channel 0.*/
uint32_t outfifo_cnt_l3 : 5; /*The register stores the byte number of the data in L3 Tx FIFO for Tx channel 0.*/
uint32_t out_remain_under_1b_l3 : 1; /*reserved*/
uint32_t out_remain_under_2b_l3 : 1; /*reserved*/
uint32_t out_remain_under_3b_l3 : 1; /*reserved*/
uint32_t out_remain_under_4b_l3 : 1; /*reserved*/
uint32_t reserved27 : 5; /*reserved*/
};
uint32_t val;
} outfifo_status[5];
union {
struct {
uint32_t outfifo_wdata : 9; /*This register stores the data that need to be pushed into DMA FIFO.*/
uint32_t outfifo_push : 1; /*Set this bit to push data into DMA FIFO.*/
uint32_t reserved10 : 22; /*reserved*/
};
uint32_t val;
} out_push[5];
union {
struct {
uint32_t infifo_rdata : 12; /*This register stores the data popping from DMA FIFO.*/
uint32_t infifo_pop : 1; /*Set this bit to pop data from DMA FIFO.*/
uint32_t reserved13 : 19; /*reserved*/
};
uint32_t val;
} in_pop[5];
union {
struct {
uint32_t addr : 20; /*This register stores the 20 least significant bits of the first outlink descriptor's address.*/
uint32_t stop : 1; /*Set this bit to stop dealing with the outlink descriptors.*/
uint32_t start : 1; /*Set this bit to start dealing with the outlink descriptors.*/
uint32_t restart : 1; /*Set this bit to restart a new outlink from the last address.*/
uint32_t park : 1; /*1: the outlink descriptor's FSM is in idle state. 0: the outlink descriptor's FSM is working.*/
uint32_t reserved24 : 8;
};
uint32_t val;
} out_link[5];
union {
struct {
uint32_t addr : 20; /*This register stores the 20 least significant bits of the first inlink descriptor's address.*/
uint32_t auto_ret : 1; /*Set this bit to return to current inlink descriptor's address when there are some errors in current receiving data.*/
uint32_t stop : 1; /*Set this bit to stop dealing with the inlink descriptors.*/
uint32_t start : 1; /*Set this bit to start dealing with the inlink descriptors.*/
uint32_t restart : 1; /*Set this bit to mount a new inlink descriptor.*/
uint32_t park : 1; /*1: the inlink descriptor's FSM is in idle state. 0: the inlink descriptor's FSM is working.*/
uint32_t reserved25 : 7;
};
uint32_t val;
} in_link[5];
union {
struct {
uint32_t inlink_dscr_addr : 18; /*This register stores the current inlink descriptor's address.*/
uint32_t in_dscr_state : 2; /*reserved*/
uint32_t in_state : 3; /*reserved*/
uint32_t reserved23 : 9; /*reserved*/
};
uint32_t val;
} in_state[5];
union {
struct {
uint32_t outlink_dscr_addr : 18; /*This register stores the current outlink descriptor's address.*/
uint32_t out_dscr_state : 2; /*reserved*/
uint32_t out_state : 3; /*reserved*/
uint32_t reserved23 : 9; /*reserved*/
};
uint32_t val;
} out_state[5];
uint32_t out_eof_des_addr[5]; /*This register stores the address of the outlink descriptor when the EOF bit in this descriptor is 1.*/
uint32_t in_suc_eof_des_addr[5]; /*This register stores the address of the inlink descriptor when the EOF bit in this descriptor is 1.*/
uint32_t in_err_eof_des_addr[5]; /*This register stores the address of the inlink descriptor when there are some errors in current receiving data. Only used when peripheral is UHCI0.*/
uint32_t out_eof_bfr_des_addr[5]; /*This register stores the address of the outlink descriptor before the last outlink descriptor.*/
union {
struct {
uint32_t ahb_testmode : 3; /*reserved*/
uint32_t reserved3 : 1; /*reserved*/
uint32_t ahb_testaddr : 2; /*reserved*/
uint32_t reserved6 : 26; /*reserved*/
};
uint32_t val;
} ahb_test;
uint32_t in_dscr[5]; /*The address of the current inlink descriptor x.*/
uint32_t in_dscr_bf0[5]; /*The address of the last inlink descriptor x-1.*/
uint32_t in_dscr_bf1[5]; /*The address of the second-to-last inlink descriptor x-2.*/
uint32_t out_dscr[5]; /*The address of the current outlink descriptor y.*/
uint32_t out_dscr_bf0[5]; /*The address of the last outlink descriptor y-1.*/
uint32_t out_dscr_bf1[5]; /*The address of the second-to-last inlink descriptor y-2.*/
union {
struct {
uint32_t reserved0 : 4; /*reserved*/
uint32_t ram_force_pd : 1; /*power down*/
uint32_t ram_force_pu : 1;
uint32_t ram_clk_fo : 1; /*1: Force to open the clock and bypass the gate-clock when accessing the RAM in DMA. 0: A gate-clock will be used when accessing the RAM in DMA.*/
uint32_t reserved7 : 25; /*reserved*/
};
uint32_t val;
} pd_conf;
union {
struct {
uint32_t tx_weight : 4; /*The weight of Tx channel 0.*/
uint32_t rx_weight : 4; /*The weight of Rx channel 0.*/
uint32_t reserved8 : 24;
};
uint32_t val;
} wight[5];
union {
struct {
uint32_t tx_pri : 4; /*The priority of Tx channel 0. The larger of the value the higher of the priority.*/
uint32_t rx_pri : 4; /*The priority of Rx channel 0. The larger of the value the higher of the priority.*/
uint32_t reserved8 : 24;
};
uint32_t val;
} pri[5];
union {
struct {
uint32_t ahbm_rst_inter : 1; /*Set this bit then clear this bit to reset the internal ahb FSM.*/
uint32_t ahbm_rst_exter : 1; /*Set this bit then clear this bit to reset the external ahb FSM.*/
uint32_t arb_pri_dis : 1; /*Set this bit to disable priority arbitration function.*/
uint32_t clk_en : 1;
uint32_t reserved4 : 28;
};
uint32_t val;
} misc_conf;
union {
struct {
uint32_t peri_in_sel : 6; /*This register is used to select peripheral for Rx channel 0. 0:SPI2*/
uint32_t peri_out_sel : 6; /*This register is used to select peripheral for Tx channel 0. 0:SPI2*/
uint32_t reserved12 : 20;
};
uint32_t val;
} peri_sel[5];
union {
struct {
uint32_t in_size : 5; /*This register is used to configure the size of L2 Rx FIFO for Rx channel 0. 0:16 bytes*/
uint32_t out_size : 5; /*This register is used to configure the size of L2 Tx FIFO for Tx channel 0. 0:16 bytes*/
uint32_t reserved10 : 22;
};
uint32_t val;
} sram_size[5];
uint32_t date; /*register version.*/
} gdma_dev_t;
extern gdma_dev_t GDMA;
#ifdef __cplusplus
}
#endif

View File

@ -42,9 +42,6 @@ typedef enum {
PERIPH_FSPI_MODULE, //SPI2
PERIPH_HSPI_MODULE, //SPI3
PERIPH_VSPI_MODULE, //SPI4
PERIPH_SPI2_DMA_MODULE,
PERIPH_SPI3_DMA_MODULE,
PERIPH_SPI_SHARED_DMA_MODULE, //this DMA is shared by SPI1 and SPI4
PERIPH_SDMMC_MODULE,
PERIPH_TWAI_MODULE,
PERIPH_RNG_MODULE,
@ -56,9 +53,8 @@ typedef enum {
PERIPH_AES_MODULE,
PERIPH_SHA_MODULE,
PERIPH_RSA_MODULE,
PERIPH_CRYPTO_DMA_MODULE, //this DMA is shared between AES and SHA
PERIPH_AES_DMA_MODULE,
PERIPH_SHA_DMA_MODULE,
PERIPH_SYSTIMER_MODULE,
PERIPH_GDMA_MODULE,
PERIPH_MODULE_MAX
} periph_module_t;
@ -102,7 +98,7 @@ typedef enum {
ETS_PWM3_INTR_SOURCE, /**< interruot of PWM3, level*/
ETS_LEDC_INTR_SOURCE, /**< interrupt of LED PWM, level*/
ETS_EFUSE_INTR_SOURCE, /**< interrupt of efuse, level, not likely to use*/
ETS_TWAI_INTR_SOURCE , /**< interrupt of can, level*/
ETS_TWAI_INTR_SOURCE, /**< interrupt of can, level*/
ETS_USB_INTR_SOURCE, /**< interrupt of USB, level*/
ETS_RTC_CORE_INTR_SOURCE, /**< interrupt of rtc core, level, include rtc watchdog*/
ETS_RMT_INTR_SOURCE, /**< interrupt of remote controller, level*/
@ -132,12 +128,11 @@ typedef enum {
ETS_DCACHE_SYNC0_INTR_SOURCE, /**< interrupt of data cache sync done, LEVEL*/
ETS_ICACHE_SYNC0_INTR_SOURCE, /**< interrupt of instruction cache sync done, LEVEL*/
ETS_APB_ADC_INTR_SOURCE, /**< interrupt of APB ADC, LEVEL*/
ETS_CRYPTO_DMA_INTR_SOURCE = 66, /**< interrupt of encrypted DMA, LEVEL*/
ETS_CRYPTO_DMA1_INTR_SOURCE,
ETS_CRYPTO_DMA2_INTR_SOURCE,
ETS_CRYPTO_DMA3_INTR_SOURCE,
ETS_CRYPTO_DMA4_INTR_SOURCE,
ETS_DMA_CH0_INTR_SOURCE, /**< interrupt of general DMA channel 0, LEVEL*/
ETS_DMA_CH1_INTR_SOURCE, /**< interrupt of general DMA channel 1, LEVEL*/
ETS_DMA_CH2_INTR_SOURCE, /**< interrupt of general DMA channel 2, LEVEL*/
ETS_DMA_CH3_INTR_SOURCE, /**< interrupt of general DMA channel 3, LEVEL*/
ETS_DMA_CH4_INTR_SOURCE, /**< interrupt of general DMA channel 4, LEVEL*/
ETS_RSA_INTR_SOURCE, /**< interrupt of RSA accelerator, level*/
ETS_AES_INTR_SOURCE, /**< interrupt of AES accelerator, level*/
ETS_SHA_INTR_SOURCE, /**< interrupt of SHA accelerator, level*/
@ -153,8 +148,7 @@ typedef enum {
ETS_CORE0_PIF_PMS_SIZE_INTR_SOURCE,
ETS_CORE1_IRAM0_PMS_INTR_SOURCE,
ETS_CORE1_DRAM0_PMS_INTR_SOURCE,
ETS_CORE1_PIF_PMS_INTR_SOURCE = 86,
ETS_CORE1_PIF_PMS_INTR_SOURCE,
ETS_CORE1_PIF_PMS_SIZE_INTR_SOURCE,
ETS_CACHE_CORE0_ACS_INTR_SOURCE,
ETS_CACHE_CORE1_ACS_INTR_SOURCE,

View File

@ -6,4 +6,9 @@
#pragma once
#define SOC_TWAI_SUPPORTED 1
#define SOC_CPU_CORES_NUM 2
#define SOC_GDMA_SUPPORTED 1
#define SOC_CPU_CORES_NUM 2
// Attention: These fixed DMA channels are temporarily workaround before we have a centralized DMA controller API to help alloc the channel dynamically
// Remove them when GDMA driver API is ready
#define SOC_GDMA_M2M_DMA_CHANNEL (0)

View File

@ -118,9 +118,9 @@ INPUT = \
$(IDF_PATH)/components/driver/include/driver/dac_common.h \
$(IDF_PATH)/components/driver/include/driver/uart.h \
$(IDF_PATH)/components/esp_adc_cal/include/esp_adc_cal.h \
$(IDF_PATH)/components/esp32s2/include/cp_dma.h \
$(IDF_PATH)/components/esp32s2/include/esp_hmac.h \
$(IDF_PATH)/components/esp32s2/include/esp_ds.h \
$(IDF_PATH)/components/hal/include/hal/dma_types.h \
$(IDF_PATH)/components/hal/include/hal/gpio_types.h \
$(IDF_PATH)/components/hal/include/hal/uart_types.h \
$(IDF_PATH)/components/hal/include/hal/mcpwm_types.h \
@ -255,6 +255,8 @@ INPUT = \
$(IDF_PATH)/components/esp_https_ota/include/esp_https_ota.h \
## Sleep
$(IDF_PATH)/components/esp_system/include/esp_sleep.h \
## Async memory copy
$(IDF_PATH)/components/esp_system/include/esp_async_memcpy.h \
## Logging
$(IDF_PATH)/components/log/include/esp_log.h \
## Base MAC address

View File

@ -4,45 +4,46 @@ The Async memcpy API
Overview
--------
ESP32-S2 features a dedicated DMA (a.k.a `CP_DMA`) which aims to offload internal memory copy operations from the CPU. When using 160MHz CPU, copying 4KB of data via memcpy() takes 14us, copying via cp_dma_memcpy can complete in 7us.
{IDF_TARGET_NAME} has a DMA engine which can help to offload internal memory copy operations from the CPU in a asynchronous way.
The async memcpy API wraps all DMA configurations and operations, the signature of :cpp:func:`cp_dma_memcpy` is almost the same to the standard libc one.
The async memcpy API wraps all DMA configurations and operations, the signature of :cpp:func:`esp_async_memcpy` is almost the same to the standard libc one.
Thanks to the benefit of the DMA, we don't have to wait for each memory copy to be done before we issue another memcpy request. By providing a user defined callback, it's still possible to know when memcpy has finished.
Thanks to the benefit of the DMA, we don't have to wait for each memory copy to be done before we issue another memcpy request. By the way, it's still possible to know when memcpy is finished by listening in the memcpy callback function.
.. note::
Memory copy with external PSRAM is not supported on ESP32-S2, :cpp:func:`cp_dma_memcpy` will abort returning an error if memory address does not reside in SRAM.
.. only:: esp32s2
.. note::
Memory copy from/to external PSRAM is not supported on ESP32-S2, :cpp:func:`esp_async_memcpy` will abort returning an error if buffer address is not in SRAM.
Configure and Install driver
----------------------------
:cpp:func:`cp_dma_driver_install` is used to install `CP_DMA` driver with user's configuration. Please note that async memcpy has to be called with the handle returned by :cpp:func:`cp_dma_driver_install`.
:cpp:func:`esp_async_memcpy_install` is used to install the driver with user's configuration. Please note that async memcpy has to be called with the handle returned from :cpp:func:`esp_async_memcpy_install`.
Driver configuration is described in :cpp:type:`cp_dma_config_t`:
:cpp:member:`max_out_stream` and :cpp:member:`max_in_stream`: You can increase/decrease the number if you want to support more/less memcpy operations to be pending in background.
:cpp:member:`flags`: Control special behavior of `CP_DMA`. If `CP_DMA_FLAGS_WORK_WITH_CACHE_DISABLE` is set in the flags, then `CP_DMA` driver can work even when cache is disabled. Please note, it would increase the consumption of SRAM.
Driver configuration is described in :cpp:type:`async_memcpy_config_t`:
:cpp:member:`backlog`: This is used to configured the maximum number of DMA operation that can be working at the background at the same time.
:cpp:member:`flags`: This is used to enable some special driver features.
:c:macro:`CP_DMA_DEFAULT_CONFIG` provides a default configuration, which specifies the maximum data streams used by underlay DMA engine to 8.
:c:macro:`ASYNC_MEMCPY_DEFAULT_CONFIG` provides a default configuration, which specifies the backlog to 8.
.. highlight:: c
::
cp_dma_config_t config = CP_DMA_DEFAULT_CONFIG();
config.max_in_stream = 4; // update the maximum data stream supported by DMA
config.max_out_stream = 4;
config.flags = CP_DMA_FLAGS_WORK_WITH_CACHE_DISABLE; // the driver can work even when cache is disabled
cp_dma_driver_t driver = NULL;
ESP_ERROR_CHECK(cp_dma_driver_install(&config, &driver)); // install driver, return driver handle
async_memcpy_config_t config = ASYNC_MEMCPY_DEFAULT_CONFIG();
// update the maximum data stream supported by underlying DMA engine
config.backlog = 16;
async_memcpy_t driver = NULL;
ESP_ERROR_CHECK(esp_async_memcpy_install(&config, &driver)); // install driver, return driver handle
Send memory copy request
------------------------
:cpp:func:`cp_dma_memcpy` is the API to send memory copy request to DMA engine. It must be called after `CP_DMA` driver is installed successfully. This API is thread safe, so it can be called from different tasks.
:cpp:func:`esp_async_memcpy` is the API to send memory copy request to DMA engine. It must be called after driver is installed successfully. This API is thread safe, so it can be called from different tasks.
Different from the libc version of `memcpy`, user can pass a callback to :cpp:func:`cp_dma_memcpy` when it's necessary. The callback is executed in the ISR context, make sure you won't violate the the restriction applied to ISR handler.
Different from the libc version of `memcpy`, user should also pass a callback to :cpp:func:`esp_async_memcpy`, if it's necessary to be notified when the memory copy is done. The callback is executed in the ISR context, make sure you won't violate the the restriction applied to ISR handler.
Besides that, the callback function should reside in IRAM space by applying `IRAM_ATTR` attribute. The prototype of the callback function is :cpp:type:`cp_dma_isr_cb_t`, please note that, the callback function should return true if there's a high priority task woken up due to any operations done in the callback.
Besides that, the callback function should reside in IRAM space by applying `IRAM_ATTR` attribute. The prototype of the callback function is :cpp:type:`async_memcpy_isr_cb_t`, please note that, the callback function should return true if it wakes up a high priority task by some API like :cpp:func:`xSemaphoreGiveFromISR`.
.. highlight:: c
@ -51,30 +52,25 @@ Besides that, the callback function should reside in IRAM space by applying `IRA
Semphr_Handle_t semphr; //already initialized in somewhere
// Callback implementation, running in ISR context
static IRAM_ATTR bool memcpy_cb(cp_dma_driver_t drv_hdl, cp_dma_event_t *event, void *cb_args)
static IRAM_ATTR bool my_async_memcpy_cb(async_memcpy_t mcp_hdl, async_memcpy_event_t *event, void *cb_args)
{
SemaphoreHandle_t sem = (SemaphoreHandle_t)cb_args;
BaseType_t high_task_wakeup = pdFALSE;
switch (event->id) {
case CP_DMA_EVENT_M2M_DONE:
SemphrGiveInISR(semphr, &high_task_wakeup); // high_task_wakeup set to pdTRUE if some high priority task unblocked
break;
default:
break;
}
SemphrGiveInISR(semphr, &high_task_wakeup); // high_task_wakeup set to pdTRUE if some high priority task unblocked
return high_task_wakeup == pdTRUE;
}
// Called from user's context
ESP_ERROR_CHECK(cp_dma_memcpy(driver, to, from, copy_len, memcpy_cb, cb_args));
ESP_ERROR_CHECK(esp_async_memcpy(driver_handle, to, from, copy_len, my_async_memcpy_cb, my_semaphore));
//Do something else here
SemphrTake(semphr, ...); //wait until the buffer copy is done
SemphrTake(my_semaphore, ...); //wait until the buffer copy is done
Uninstall driver (optional)
---------------------------
:cpp:func:`cp_dma_driver_uninstall` is used to uninstall `CP_DMA` driver. It's not necessary to uninstall the driver after each memcpy operation. If your application won't use `CP_DMA` anymore, then this API can recycle the memory used by driver.
:cpp:func:`esp_async_memcpy_uninstall` is used to uninstall asynchronous memcpy driver. It's not necessary to uninstall the driver after each memcpy operation. If you know your application won't use this driver anymore, then this API can recycle the memory for you.
API Reference
-------------
.. include-build-file:: inc/cp_dma.inc
.. include-build-file:: inc/esp_async_memcpy.inc