feat(gdma): set burst size and return alignment constraint

burst size can affect the buffer alignment
This commit is contained in:
morris 2024-04-25 18:16:29 +08:00
parent 19c784efef
commit dc6989796a
33 changed files with 522 additions and 356 deletions

View File

@ -240,12 +240,13 @@ static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_ch
gdma_connect(dma_ctx->rx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
}
#endif
gdma_transfer_ability_t ability = {
.psram_trans_align = 0, // fall back to use the same size of the psram data cache line size
.sram_trans_align = 4,
// TODO: add support to allow SPI transfer PSRAM buffer
gdma_transfer_config_t trans_cfg = {
.max_data_burst_size = 16,
.access_ext_mem = false,
};
ESP_RETURN_ON_ERROR(gdma_set_transfer_ability(dma_ctx->tx_dma_chan, &ability), SPI_TAG, "set gdma tx transfer ability failed");
ESP_RETURN_ON_ERROR(gdma_set_transfer_ability(dma_ctx->rx_dma_chan, &ability), SPI_TAG, "set gdma rx transfer ability failed");
ESP_RETURN_ON_ERROR(gdma_config_transfer(dma_ctx->tx_dma_chan, &trans_cfg), SPI_TAG, "config gdma tx transfer failed");
ESP_RETURN_ON_ERROR(gdma_config_transfer(dma_ctx->rx_dma_chan, &trans_cfg), SPI_TAG, "config gdma rx transfer failed");
}
return ret;
}

View File

@ -68,13 +68,16 @@ if(NOT BOOTLOADER_BUILD)
endif()
if(CONFIG_SOC_GDMA_SUPPORTED)
list(APPEND srcs "dma/gdma.c")
list(APPEND srcs "dma/gdma.c" "deprecated/gdma_legacy.c")
if(CONFIG_SOC_GDMA_SUPPORT_SLEEP_RETENTION)
list(APPEND srcs "dma/gdma_sleep_retention.c")
endif()
if(CONFIG_SOC_GDMA_SUPPORT_ETM)
list(APPEND srcs "dma/gdma_etm.c")
endif()
if(CONFIG_SOC_GDMA_SUPPORT_CRC)
list(APPEND srcs "dma/gdma_crc.c")
endif()
endif()
if(CONFIG_SOC_GP_LDO_SUPPORTED)

View File

@ -0,0 +1,62 @@
/*
* SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <string.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "esp_log.h"
#include "esp_check.h"
#include "../dma/gdma_priv.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
static const char *TAG = "gdma";
esp_err_t gdma_set_transfer_ability(gdma_channel_handle_t dma_chan, const gdma_transfer_ability_t *ability)
{
ESP_RETURN_ON_FALSE(dma_chan && ability, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
size_t int_mem_alignment = ability->sram_trans_align;
size_t ext_mem_alignment = ability->psram_trans_align;
// alignment should be 2^n
ESP_RETURN_ON_FALSE((int_mem_alignment & (int_mem_alignment - 1)) == 0, ESP_ERR_INVALID_ARG,
TAG, "invalid sram alignment: %zu", int_mem_alignment);
uint32_t ext_mem_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_DATA);
if (ext_mem_alignment == 0) {
// fall back to use the same size of the psram data cache line size
ext_mem_alignment = ext_mem_cache_line_size;
}
if ((ext_mem_cache_line_size > 0) && (ext_mem_alignment > ext_mem_cache_line_size)) {
ESP_RETURN_ON_FALSE(((ext_mem_alignment % ext_mem_cache_line_size) == 0), ESP_ERR_INVALID_ARG,
TAG, "ext_mem_alignment(%d) should be multiple of the ext_mem_cache_line_size(%"PRIu32")",
ext_mem_alignment, ext_mem_cache_line_size);
}
// if the DMA can't access the PSRAM, this HAL function is no-op
gdma_hal_set_burst_size(hal, pair->pair_id, dma_chan->direction, ext_mem_alignment);
// TX channel can always enable burst mode, no matter data alignment
bool en_burst = true;
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
// RX channel burst mode depends on specific data alignment
en_burst = int_mem_alignment >= 4;
}
gdma_hal_enable_burst(hal, pair->pair_id, dma_chan->direction, en_burst, en_burst);
dma_chan->int_mem_alignment = int_mem_alignment;
dma_chan->ext_mem_alignment = ext_mem_alignment;
ESP_LOGD(TAG, "%s channel (%d,%d), (%u:%u) bytes aligned, burst %s", dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX ? "tx" : "rx",
group->group_id, pair->pair_id, int_mem_alignment, ext_mem_alignment, en_burst ? "enabled" : "disabled");
return ESP_OK;
}

View File

@ -29,6 +29,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/cdefs.h>
#include <sys/param.h>
#include "sdkconfig.h"
#if CONFIG_GDMA_ENABLE_DEBUG_LOG
// The local log level must be defined before including esp_log.h
@ -42,10 +43,9 @@
#include "esp_log.h"
#include "esp_check.h"
#include "esp_memory_utils.h"
#include "esp_flash_encrypt.h"
#include "esp_private/periph_ctrl.h"
#include "gdma_priv.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#if CONFIG_PM_ENABLE && SOC_PM_SUPPORT_TOP_PD
#include "esp_private/gdma_sleep_retention.h"
@ -354,46 +354,68 @@ esp_err_t gdma_get_free_m2m_trig_id_mask(gdma_channel_handle_t dma_chan, uint32_
return ESP_OK;
}
esp_err_t gdma_set_transfer_ability(gdma_channel_handle_t dma_chan, const gdma_transfer_ability_t *ability)
esp_err_t gdma_config_transfer(gdma_channel_handle_t dma_chan, const gdma_transfer_config_t *config)
{
ESP_RETURN_ON_FALSE(dma_chan && ability, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(dma_chan && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
uint32_t max_data_burst_size = config->max_data_burst_size;
if (max_data_burst_size) {
// burst size must be power of 2
ESP_RETURN_ON_FALSE((max_data_burst_size & (max_data_burst_size - 1)) == 0, ESP_ERR_INVALID_ARG,
TAG, "invalid max_data_burst_size: %"PRIu32, max_data_burst_size);
}
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
size_t int_mem_alignment = 1;
size_t ext_mem_alignment = 1;
size_t sram_alignment = ability->sram_trans_align;
size_t psram_alignment = ability->psram_trans_align;
// alignment should be 2^n
ESP_RETURN_ON_FALSE((sram_alignment & (sram_alignment - 1)) == 0, ESP_ERR_INVALID_ARG,
TAG, "invalid sram alignment: %zu", sram_alignment);
uint32_t ext_mem_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_DATA);
if (psram_alignment == 0) {
// fall back to use the same size of the psram data cache line size
psram_alignment = ext_mem_cache_line_size;
}
if (psram_alignment > ext_mem_cache_line_size) {
ESP_RETURN_ON_FALSE(((psram_alignment % ext_mem_cache_line_size) == 0), ESP_ERR_INVALID_ARG,
TAG, "psram_alignment(%d) should be multiple of the ext_mem_cache_line_size(%"PRIu32")",
psram_alignment, ext_mem_cache_line_size);
// always enable descriptor burst as the descriptor is always word aligned and is in the internal SRAM
bool en_desc_burst = true;
bool en_data_burst = max_data_burst_size > 0;
gdma_hal_enable_burst(hal, pair->pair_id, dma_chan->direction, en_data_burst, en_desc_burst);
if (en_data_burst) {
gdma_hal_set_burst_size(hal, pair->pair_id, dma_chan->direction, max_data_burst_size);
}
// if the DMA can't access the PSRAM, this HAL function is no-op
gdma_hal_set_ext_mem_align(hal, pair->pair_id, dma_chan->direction, psram_alignment);
// TX channel can always enable burst mode, no matter data alignment
bool en_burst = true;
if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
// RX channel burst mode depends on specific data alignment
en_burst = sram_alignment >= 4;
#if GDMA_LL_AHB_RX_BURST_NEEDS_ALIGNMENT
if (en_data_burst && dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
int_mem_alignment = MAX(int_mem_alignment, 4);
ext_mem_alignment = MAX(ext_mem_alignment, max_data_burst_size);
}
gdma_hal_enable_burst(hal, pair->pair_id, dma_chan->direction, en_burst, en_burst);
#endif
dma_chan->sram_alignment = sram_alignment;
dma_chan->psram_alignment = psram_alignment;
ESP_LOGD(TAG, "%s channel (%d,%d), (%u:%u) bytes aligned, burst %s", dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX ? "tx" : "rx",
group->group_id, pair->pair_id, sram_alignment, psram_alignment, en_burst ? "enabled" : "disabled");
// if MSPI encryption is enabled, and DMA wants to read/write external memory
if (esp_flash_encryption_enabled()) {
gdma_hal_enable_access_encrypt_mem(hal, pair->pair_id, dma_chan->direction, config->access_ext_mem);
// when DMA access the encrypted memory, extra alignment is needed, for both internal and external memory
if (config->access_ext_mem) {
ext_mem_alignment = MAX(ext_mem_alignment, GDMA_ACCESS_ENCRYPTION_MEM_ALIGNMENT);
int_mem_alignment = MAX(int_mem_alignment, GDMA_ACCESS_ENCRYPTION_MEM_ALIGNMENT);
}
} else {
gdma_hal_enable_access_encrypt_mem(hal, pair->pair_id, dma_chan->direction, false);
}
// if the channel is not allowed to access external memory, set a super big (meaningless) alignment value
// so when the upper layer checks the alignment with an external buffer, the check should fail
if (!config->access_ext_mem) {
ext_mem_alignment = BIT(31);
}
dma_chan->int_mem_alignment = int_mem_alignment;
dma_chan->ext_mem_alignment = ext_mem_alignment;
return ESP_OK;
}
esp_err_t gdma_get_alignment_constraints(gdma_channel_handle_t dma_chan, size_t *int_mem_alignment, size_t *ext_mem_alignment)
{
ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
if (int_mem_alignment) {
*int_mem_alignment = dma_chan->int_mem_alignment;
}
if (ext_mem_alignment) {
*ext_mem_alignment = dma_chan->ext_mem_alignment;
}
return ESP_OK;
}
@ -421,57 +443,6 @@ esp_err_t gdma_set_priority(gdma_channel_handle_t dma_chan, uint32_t priority)
return ESP_OK;
}
#if SOC_GDMA_SUPPORT_CRC
esp_err_t gdma_config_crc_calculator(gdma_channel_handle_t dma_chan, const gdma_crc_calculator_config_t *config)
{
ESP_RETURN_ON_FALSE(dma_chan && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
switch (group->bus_id) {
#if SOC_AHB_GDMA_SUPPORTED
case SOC_GDMA_BUS_AHB:
ESP_RETURN_ON_FALSE(config->crc_bit_width <= GDMA_LL_AHB_MAX_CRC_BIT_WIDTH, ESP_ERR_INVALID_ARG, TAG, "invalid crc bit width");
break;
#endif // SOC_AHB_GDMA_SUPPORTED
#if SOC_AXI_GDMA_SUPPORTED
case SOC_GDMA_BUS_AXI:
ESP_RETURN_ON_FALSE(config->crc_bit_width <= GDMA_LL_AXI_MAX_CRC_BIT_WIDTH, ESP_ERR_INVALID_ARG, TAG, "invalid crc bit width");
break;
#endif // SOC_AXI_GDMA_SUPPORTED
default:
ESP_LOGE(TAG, "invalid bus id: %d", group->bus_id);
return ESP_ERR_INVALID_ARG;
}
// clear the previous CRC result
gdma_hal_clear_crc(hal, pair->pair_id, dma_chan->direction);
// set polynomial and initial value
gdma_hal_crc_config_t hal_config = {
.crc_bit_width = config->crc_bit_width,
.poly_hex = config->poly_hex,
.init_value = config->init_value,
.reverse_data_mask = config->reverse_data_mask,
};
gdma_hal_set_crc_poly(hal, pair->pair_id, dma_chan->direction, &hal_config);
return ESP_OK;
}
esp_err_t gdma_crc_get_result(gdma_channel_handle_t dma_chan, uint32_t *result)
{
ESP_RETURN_ON_FALSE(dma_chan && result, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
*result = gdma_hal_get_crc_result(hal, pair->pair_id, dma_chan->direction);
return ESP_OK;
}
#endif // SOC_GDMA_SUPPORT_CRC
esp_err_t gdma_register_tx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_tx_event_callbacks_t *cbs, void *user_data)
{
ESP_RETURN_ON_FALSE(dma_chan && cbs && dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, TAG, "invalid argument");

View File

@ -0,0 +1,74 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <string.h>
#include <sys/cdefs.h>
#include <sys/param.h>
#include "sdkconfig.h"
#if CONFIG_GDMA_ENABLE_DEBUG_LOG
// The local log level must be defined before including esp_log.h
// Set the maximum log level for this source file
#define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
#endif
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "soc/soc_caps.h"
#include "soc/periph_defs.h"
#include "esp_log.h"
#include "esp_check.h"
#include "gdma_priv.h"
static const char *TAG = "gdma";
esp_err_t gdma_config_crc_calculator(gdma_channel_handle_t dma_chan, const gdma_crc_calculator_config_t *config)
{
ESP_RETURN_ON_FALSE(dma_chan && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
switch (group->bus_id) {
#if SOC_AHB_GDMA_SUPPORTED
case SOC_GDMA_BUS_AHB:
ESP_RETURN_ON_FALSE(config->crc_bit_width <= GDMA_LL_AHB_MAX_CRC_BIT_WIDTH, ESP_ERR_INVALID_ARG, TAG, "invalid crc bit width");
break;
#endif // SOC_AHB_GDMA_SUPPORTED
#if SOC_AXI_GDMA_SUPPORTED
case SOC_GDMA_BUS_AXI:
ESP_RETURN_ON_FALSE(config->crc_bit_width <= GDMA_LL_AXI_MAX_CRC_BIT_WIDTH, ESP_ERR_INVALID_ARG, TAG, "invalid crc bit width");
break;
#endif // SOC_AXI_GDMA_SUPPORTED
default:
ESP_LOGE(TAG, "invalid bus id: %d", group->bus_id);
return ESP_ERR_INVALID_ARG;
}
// clear the previous CRC result
gdma_hal_clear_crc(hal, pair->pair_id, dma_chan->direction);
// set polynomial and initial value
gdma_hal_crc_config_t hal_config = {
.crc_bit_width = config->crc_bit_width,
.poly_hex = config->poly_hex,
.init_value = config->init_value,
.reverse_data_mask = config->reverse_data_mask,
};
gdma_hal_set_crc_poly(hal, pair->pair_id, dma_chan->direction, &hal_config);
return ESP_OK;
}
esp_err_t gdma_crc_get_result(gdma_channel_handle_t dma_chan, uint32_t *result)
{
ESP_RETURN_ON_FALSE(dma_chan && result, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_pair_t *pair = dma_chan->pair;
gdma_group_t *group = pair->group;
gdma_hal_context_t *hal = &group->hal;
*result = gdma_hal_get_crc_result(hal, pair->pair_id, dma_chan->direction);
return ESP_OK;
}

View File

@ -32,6 +32,8 @@
#define GDMA_INTR_ALLOC_FLAGS ESP_INTR_FLAG_INTRDISABLED
#endif
#define GDMA_ACCESS_ENCRYPTION_MEM_ALIGNMENT 16 /*!< The alignment of the memory and size when DMA accesses the encryption memory */
#ifdef __cplusplus
extern "C" {
#endif
@ -67,8 +69,8 @@ struct gdma_channel_t {
portMUX_TYPE spinlock; // channel level spinlock
gdma_channel_direction_t direction; // channel direction
int periph_id; // Peripheral instance ID, indicates which peripheral is connected to this GDMA channel
size_t sram_alignment; // alignment for memory in SRAM
size_t psram_alignment; // alignment for memory in PSRAM
size_t int_mem_alignment; // alignment for memory in internal memory
size_t ext_mem_alignment; // alignment for memory in external memory
esp_err_t (*del)(gdma_channel_t *channel); // channel deletion function, it's polymorphic, see `gdma_del_tx_channel` or `gdma_del_rx_channel`
struct {
uint32_t start_stop_by_etm: 1; // whether the channel is started/stopped by ETM

View File

@ -4,9 +4,6 @@
* SPDX-License-Identifier: Apache-2.0
*/
// DO NOT USE THESE APIS IN ANY APPLICATIONS
// GDMA driver is not public for end users, but for ESP-IDF developers.
#pragma once
#include <stdbool.h>
@ -37,19 +34,6 @@ typedef struct {
} flags;
} gdma_channel_alloc_config_t;
/**
* @brief GDMA transfer ability
*
* @note The alignment set in this structure is **not** a guarantee that gdma driver will take care of the nonalignment cases.
* Actually the GDMA driver has no knowledge about the DMA buffer (address and size) used by upper layer.
* So it's the responsibility of the **upper layer** to take care of the buffer address and size.
*
*/
typedef struct {
size_t sram_trans_align; /*!< DMA transfer alignment for memory in SRAM, in bytes. The driver enables/disables burst mode based on this value. 0 means no alignment is required */
size_t psram_trans_align; /*!< DMA transfer alignment for memory in PSRAM, in bytes. The driver sets proper burst block size based on the alignment value. 0 means no alignment is required */
} gdma_transfer_ability_t;
/**
* @brief Type of GDMA event data
*/
@ -200,16 +184,48 @@ esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_perip
esp_err_t gdma_disconnect(gdma_channel_handle_t dma_chan);
/**
* @brief Set DMA channel transfer ability
* @brief Channel transfer configurations
*/
typedef struct {
uint32_t max_data_burst_size; /*!< Set the max burst size when DMA read/write the data buffer.
Set to 0 means to disable the data burst.
Other value must be power of 2, e.g., 4/8/16/32/64 */
bool access_ext_mem; /*!< Set this if the DMA transfer will access external memory */
} gdma_transfer_config_t;
/**
* @brief Configure transfer parameters for a DMA channel
*
* @note It's highly recommended to enable the burst mode and set proper burst size for the DMA channel,
* which can improve the performance in accessing external memory by a lot.
*
* @param[in] chan DMA channel handle, allocated by `gdma_new_channel`
* @param[in] config Transfer configurations
* @return
* - ESP_OK: Configure DMA transfer parameters successfully
* - ESP_ERR_INVALID_ARG: Configure DMA transfer parameters failed because of invalid argument
* - ESP_FAIL: Configure DMA transfer parameters failed because of other error
*/
esp_err_t gdma_config_transfer(gdma_channel_handle_t dma_chan, const gdma_transfer_config_t *config);
/**
* @brief Get the alignment constraints for internal and external memory
*
* @note You should call this function after `gdma_config_transfer`, the later one can
* adjust the alignment constraints based on various conditions, e.g. burst size, memory encryption, etc.
* @note You can use returned alignment value to validate if a DMA buffer provided by the upper layer meets the constraints.
* @note The returned alignment doesn't take the cache line size into account, if you want to do aligned memory allocation,
* you should align the buffer size to the cache line size by yourself if the DMA buffer is behind a cache.
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] ability Transfer ability, e.g. alignment
* @param[out] int_mem_alignment Internal memory alignment
* @param[out] ext_mem_alignment External memory alignment
* @return
* - ESP_OK: Set DMA channel transfer ability successfully
* - ESP_ERR_INVALID_ARG: Set DMA channel transfer ability failed because of invalid argument
* - ESP_FAIL: Set DMA channel transfer ability failed because of other error
* - ESP_OK: Get alignment constraints successfully
* - ESP_ERR_INVALID_ARG: Get alignment constraints failed because of invalid argument
* - ESP_FAIL: Get alignment constraints failed because of other error
*/
esp_err_t gdma_set_transfer_ability(gdma_channel_handle_t dma_chan, const gdma_transfer_ability_t *ability);
esp_err_t gdma_get_alignment_constraints(gdma_channel_handle_t dma_chan, size_t *int_mem_alignment, size_t *ext_mem_alignment);
/**
* @brief Apply channel strategy for GDMA channel
@ -458,6 +474,36 @@ esp_err_t gdma_config_crc_calculator(gdma_channel_handle_t dma_chan, const gdma_
esp_err_t gdma_crc_get_result(gdma_channel_handle_t dma_chan, uint32_t *result);
#endif // SOC_GDMA_SUPPORT_CRC
/****************************************************************************************
* Deprecated APIs
****************************************************************************************/
/**
* @brief GDMA transfer ability
*
* @note The alignment set in this structure is **not** a guarantee that gdma driver will take care of the nonalignment cases.
* Actually the GDMA driver has no knowledge about the DMA buffer (address and size) used by upper layer.
* So it's the responsibility of the **upper layer** to take care of the buffer address and size.
*
*/
typedef struct {
size_t sram_trans_align; /*!< DMA transfer alignment for memory in SRAM, in bytes. The driver enables/disables burst mode based on this value. 0 means no alignment is required */
size_t psram_trans_align; /*!< DMA transfer alignment for memory in PSRAM, in bytes. The driver sets proper burst block size based on the alignment value. 0 means no alignment is required */
} gdma_transfer_ability_t;
/**
* @brief Set DMA channel transfer ability
*
* @param[in] dma_chan GDMA channel handle, allocated by `gdma_new_channel`
* @param[in] ability Transfer ability, e.g. alignment
* @return
* - ESP_OK: Set DMA channel transfer ability successfully
* - ESP_ERR_INVALID_ARG: Set DMA channel transfer ability failed because of invalid argument
* - ESP_FAIL: Set DMA channel transfer ability failed because of other error
*/
esp_err_t gdma_set_transfer_ability(gdma_channel_handle_t dma_chan, const gdma_transfer_ability_t *ability)
__attribute__((deprecated("please use gdma_config_transfer instead")));
#ifdef __cplusplus
}
#endif

View File

@ -201,7 +201,7 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
// do write-back for the source data because it's in the cache
TEST_ESP_OK(esp_cache_msync((void *)src_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_C2M));
}
#if SOC_DMA_CAN_ACCESS_MSPI_MEM
#if SOC_DMA_CAN_ACCESS_FLASH
const char *src_string = "GDMA can fetch data from MSPI Flash";
size_t src_string_len = strlen(src_string);
TEST_ASSERT_TRUE(esp_ptr_in_drom(src_string));
@ -234,7 +234,7 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
tx_descs_nc[1].dw0.size = 64;
tx_descs_nc[1].dw0.length = 64;
tx_descs_nc[1].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
#if !SOC_DMA_CAN_ACCESS_MSPI_MEM
#if !SOC_DMA_CAN_ACCESS_FLASH
tx_descs_nc[1].dw0.suc_eof = 1;
tx_descs_nc[1].next = NULL;
#else
@ -272,7 +272,7 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl
for (int i = 0; i < 128; i++) {
TEST_ASSERT_EQUAL(i, dst_data[i]);
}
#if SOC_DMA_CAN_ACCESS_MSPI_MEM
#if SOC_DMA_CAN_ACCESS_FLASH
TEST_ASSERT_TRUE(dst_data[128 + src_string_len] == 0xFF);
dst_data[128 + src_string_len] = '\0';
TEST_ASSERT_TRUE(strcmp(src_string, (const char *)((uint32_t)dst_data + 128)) == 0);

View File

@ -48,6 +48,7 @@ extern "C" {
#define GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT 1 // TX and RX channel in the same pair will share the same interrupt source number
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
#define GDMA_LL_AHB_RX_BURST_NEEDS_ALIGNMENT 1
///////////////////////////////////// Common /////////////////////////////////////////

View File

@ -48,6 +48,7 @@ extern "C" {
#define GDMA_LL_AHB_TX_RX_SHARE_INTERRUPT 1 // TX and RX channel in the same pair will share the same interrupt source number
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
#define GDMA_LL_AHB_RX_BURST_NEEDS_ALIGNMENT 1
///////////////////////////////////// Common /////////////////////////////////////////

View File

@ -13,7 +13,6 @@
#include "soc/gdma_struct.h"
#include "soc/gdma_reg.h"
#include "soc/soc_etm_source.h"
#include "soc/pcr_struct.h"
#include "soc/retention_periph_defs.h"
#ifdef __cplusplus
@ -102,25 +101,6 @@ extern "C" {
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable the bus clock for the DMA module
*/
static inline void gdma_ll_enable_bus_clock(int group_id, bool enable)
{
(void)group_id;
PCR.gdma_conf.gdma_clk_en = enable;
}
/**
* @brief Reset the DMA module
*/
static inline void gdma_ll_reset_register(int group_id)
{
(void)group_id;
PCR.gdma_conf.gdma_rst_en = 1;
PCR.gdma_conf.gdma_rst_en = 0;
}
/**
* @brief Force enable register clock
*/

View File

@ -6,8 +6,38 @@
#pragma once
#include "sdkconfig.h"
#include "soc/pcr_struct.h"
#if CONFIG_IDF_TARGET_ESP32C5_BETA3_VERSION
#include "hal/gdma_beta3_ll.h"
#define GDMA_LL_AHB_RX_BURST_NEEDS_ALIGNMENT 1
#else
#include "hal/ahb_dma_ll.h"
#define GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE 1 // AHB GDMA supports adjustable burst size
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Enable the bus clock for the DMA module
*/
static inline void gdma_ll_enable_bus_clock(int group_id, bool enable)
{
(void)group_id;
PCR.gdma_conf.gdma_clk_en = enable;
}
/**
* @brief Reset the DMA module
*/
static inline void gdma_ll_reset_register(int group_id)
{
(void)group_id;
PCR.gdma_conf.gdma_rst_en = 1;
PCR.gdma_conf.gdma_rst_en = 0;
}
#ifdef __cplusplus
}
#endif

View File

@ -10,10 +10,10 @@
#include <stdbool.h>
#include "soc/soc_caps.h"
#include "hal/gdma_types.h"
#include "hal/assert.h"
#include "soc/ahb_dma_struct.h"
#include "soc/ahb_dma_reg.h"
#include "soc/soc_etm_source.h"
#include "soc/pcr_struct.h"
#include "soc/retention_periph_defs.h"
#ifdef __cplusplus
@ -99,25 +99,6 @@ extern "C" {
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Enable the bus clock for the DMA module
*/
static inline void gdma_ll_enable_bus_clock(int group_id, bool enable)
{
(void)group_id;
PCR.gdma_conf.gdma_clk_en = enable;
}
/**
* @brief Reset the DMA module
*/
static inline void gdma_ll_reset_register(int group_id)
{
(void)group_id;
PCR.gdma_conf.gdma_rst_en = 1;
PCR.gdma_conf.gdma_rst_en = 0;
}
/**
* @brief Force enable register clock
*/
@ -212,11 +193,10 @@ static inline void ahb_dma_ll_rx_enable_owner_check(ahb_dma_dev_t *dev, uint32_t
}
/**
* @brief Enable DMA RX channel burst reading data, disabled by default
* @brief Enable DMA RX channel burst reading data, always enabled
*/
static inline void ahb_dma_ll_rx_enable_data_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
// dev->channel[channel].in.in_conf0.in_data_burst_mode_sel_chn = enable; // single/incr4/incr8/incr16
}
/**
@ -227,6 +207,32 @@ static inline void ahb_dma_ll_rx_enable_descriptor_burst(ahb_dma_dev_t *dev, uin
dev->channel[channel].in.in_conf0.indscr_burst_en_chn = enable;
}
/**
* @brief Set RX channel burst size
*/
static inline void ahb_dma_ll_rx_set_burst_size(ahb_dma_dev_t *dev, uint32_t channel, uint32_t sz)
{
uint8_t burst_mode = 0;
switch (sz) {
case 4:
burst_mode = 0; // single
break;
case 16:
burst_mode = 1; // incr4
break;
case 32:
burst_mode = 2; // incr8
break;
case 64:
burst_mode = 3; // incr16
break;
default:
HAL_ASSERT(false);
break;
}
dev->channel[channel].in.in_conf0.in_data_burst_mode_sel_chn = burst_mode;
}
/**
* @brief Reset DMA RX channel FSM and FIFO pointer
*/
@ -440,11 +446,10 @@ static inline void ahb_dma_ll_tx_enable_owner_check(ahb_dma_dev_t *dev, uint32_t
}
/**
* @brief Enable DMA TX channel burst sending data, disabled by default
* @brief Enable DMA TX channel burst sending data, always enabled
*/
static inline void ahb_dma_ll_tx_enable_data_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
// dev->channel[channel].out.out_conf0.out_data_burst_mode_sel_chn = enable;
}
/**
@ -455,6 +460,32 @@ static inline void ahb_dma_ll_tx_enable_descriptor_burst(ahb_dma_dev_t *dev, uin
dev->channel[channel].out.out_conf0.outdscr_burst_en_chn = enable;
}
/**
* @brief Set TX channel burst size
*/
static inline void ahb_dma_ll_tx_set_burst_size(ahb_dma_dev_t *dev, uint32_t channel, uint32_t sz)
{
uint8_t burst_mode = 0;
switch (sz) {
case 4:
burst_mode = 0; // single
break;
case 16:
burst_mode = 1; // incr4
break;
case 32:
burst_mode = 2; // incr8
break;
case 64:
burst_mode = 3; // incr16
break;
default:
HAL_ASSERT(false);
break;
}
dev->channel[channel].out.out_conf0.out_data_burst_mode_sel_chn = burst_mode;
}
/**
* @brief Set TX channel EOF mode
*/

View File

@ -51,6 +51,7 @@ extern "C" {
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
#define GDMA_LL_AHB_RX_BURST_NEEDS_ALIGNMENT 1
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \

View File

@ -51,6 +51,7 @@ extern "C" {
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
#define GDMA_LL_AHB_RX_BURST_NEEDS_ALIGNMENT 1
#define GDMA_LL_TX_ETM_EVENT_TABLE(group, chan, event) \
(uint32_t[1][3][GDMA_ETM_EVENT_MAX]){{{ \

View File

@ -125,7 +125,7 @@ static inline void axi_dma_ll_rx_enable_owner_check(axi_dma_dev_t *dev, uint32_t
}
/**
* @brief Enable DMA RX channel burst reading data, disabled by default
* @brief Enable DMA RX channel burst reading data, always enabled
*/
static inline void axi_dma_ll_rx_enable_data_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
@ -139,6 +139,16 @@ static inline void axi_dma_ll_rx_enable_descriptor_burst(axi_dma_dev_t *dev, uin
dev->in[channel].conf.in_conf0.indscr_burst_en_chn = enable;
}
/**
* @brief Set the RX channel burst size
*/
static inline void axi_dma_ll_rx_set_burst_size(axi_dma_dev_t *dev, uint32_t channel, uint32_t sz)
{
HAL_ASSERT(sz >= 8 && sz <= 128);
int ctz = __builtin_ctz(sz);
dev->in[channel].conf.in_conf0.in_burst_size_sel_chn = ctz - 3;
}
/**
* @brief Reset DMA RX channel FSM and FIFO pointer
*/
@ -274,11 +284,11 @@ static inline void axi_dma_ll_rx_enable_etm_task(axi_dma_dev_t *dev, uint32_t ch
}
/**
* @brief Whether to enable the mean access ecc or aes domain
* @brief Whether to enable access to ecc or aes memory
*/
static inline void axi_dma_ll_rx_enable_ext_mem_ecc_aes_access(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->in[channel].conf.in_conf0.in_ecc_aec_en_chn = enable;
dev->in[channel].conf.in_conf0.in_ecc_aes_en_chn = enable;
}
///////////////////////////////////// TX /////////////////////////////////////////
@ -333,7 +343,7 @@ static inline void axi_dma_ll_tx_enable_owner_check(axi_dma_dev_t *dev, uint32_t
}
/**
* @brief Enable DMA TX channel burst sending data, disabled by default
* @brief Enable DMA TX channel burst sending data, always enabled
*/
static inline void axi_dma_ll_tx_enable_data_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
@ -347,6 +357,16 @@ static inline void axi_dma_ll_tx_enable_descriptor_burst(axi_dma_dev_t *dev, uin
dev->out[channel].conf.out_conf0.outdscr_burst_en_chn = enable;
}
/**
* @brief Set the TX channel burst size
*/
static inline void axi_dma_ll_tx_set_burst_size(axi_dma_dev_t *dev, uint32_t channel, uint32_t sz)
{
HAL_ASSERT(sz >= 8 && sz <= 128);
int ctz = __builtin_ctz(sz);
dev->out[channel].conf.out_conf0.out_burst_size_sel_chn = ctz - 3;
}
/**
* @brief Set TX channel EOF mode
*/
@ -480,11 +500,11 @@ static inline void axi_dma_ll_tx_enable_etm_task(axi_dma_dev_t *dev, uint32_t ch
}
/**
* @brief Whether to enable the mean access ecc or aes domain
* @brief Whether to enable access to ecc or aes memory
*/
static inline void axi_dma_ll_tx_enable_ext_mem_ecc_aes_access(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->out[channel].conf.out_conf0.out_ecc_aec_en_chn = enable;
dev->out[channel].conf.out_conf0.out_ecc_aes_en_chn = enable;
}
///////////////////////////////////// CRC-TX /////////////////////////////////////////

View File

@ -62,6 +62,9 @@ extern "C" {
#define GDMA_LL_AHB_DESC_ALIGNMENT 4
#define GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE 1 // AHB GDMA supports adjustable burst size
#define GDMA_LL_AHB_RX_BURST_NEEDS_ALIGNMENT 1
///////////////////////////////////// Common /////////////////////////////////////////
/**
@ -177,20 +180,20 @@ static inline void gdma_ll_rx_reset_channel(gdma_dev_t *dev, uint32_t channel)
}
/**
* @brief Set DMA RX channel memory block size based on the alignment requirement
* @param align Supported value: 16/32/64
* @brief Set DMA RX channel memory block size based on the burst requirement
* @param burst_sz Supported value: 16/32/64
*/
static inline void gdma_ll_rx_set_ext_mem_block_size(gdma_dev_t *dev, uint32_t channel, uint8_t align)
static inline void gdma_ll_rx_set_burst_size(gdma_dev_t *dev, uint32_t channel, uint32_t burst_sz)
{
uint32_t block_size = 0;
switch (align) {
case 64: // 64 Bytes alignment
switch (burst_sz) {
case 64:
block_size = GDMA_LL_EXT_MEM_BK_SIZE_64B;
break;
case 32: // 32 Bytes alignment
case 32:
block_size = GDMA_LL_EXT_MEM_BK_SIZE_32B;
break;
case 16: // 16 Bytes alignment
case 16:
block_size = GDMA_LL_EXT_MEM_BK_SIZE_16B;
break;
default:
@ -461,20 +464,20 @@ static inline void gdma_ll_tx_reset_channel(gdma_dev_t *dev, uint32_t channel)
}
/**
* @brief Set DMA TX channel memory block size based on the alignment requirement
* @param align Supported value: 16/32/64
* @brief Set DMA TX channel memory block size based on the burst requirement
* @param burst_sz Supported value: 16/32/64
*/
static inline void gdma_ll_tx_set_ext_mem_block_size(gdma_dev_t *dev, uint32_t channel, uint8_t align)
static inline void gdma_ll_tx_set_burst_size(gdma_dev_t *dev, uint32_t channel, uint32_t burst_sz)
{
uint32_t block_size = 0;
switch (align) {
case 64: // 64 Bytes alignment
switch (burst_sz) {
case 64:
block_size = GDMA_LL_EXT_MEM_BK_SIZE_64B;
break;
case 32: // 32 Bytes alignment
case 32:
block_size = GDMA_LL_EXT_MEM_BK_SIZE_32B;
break;
case 16: // 16 Bytes alignment
case 16:
block_size = GDMA_LL_EXT_MEM_BK_SIZE_16B;
break;
default:

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -91,16 +91,16 @@ void gdma_ahb_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channe
}
}
#if SOC_AHB_GDMA_SUPPORT_PSRAM
void gdma_ahb_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align)
#if GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE
void gdma_ahb_hal_set_burst_size(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t burst_sz)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
gdma_ll_rx_set_ext_mem_block_size(hal->dev, chan_id, align);
gdma_ll_rx_set_burst_size(hal->dev, chan_id, burst_sz);
} else {
gdma_ll_tx_set_ext_mem_block_size(hal->dev, chan_id, align);
gdma_ll_tx_set_burst_size(hal->dev, chan_id, burst_sz);
}
}
#endif
#endif // GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE
void gdma_ahb_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back, bool eof_till_popped)
{
@ -195,8 +195,8 @@ void gdma_ahb_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config)
#if SOC_GDMA_SUPPORT_ETM
hal->enable_etm_task = gdma_ahb_hal_enable_etm_task;
#endif
#if SOC_AHB_GDMA_SUPPORT_PSRAM
hal->set_ext_mem_align = gdma_ahb_hal_set_ext_mem_align;
#endif // SOC_AHB_GDMA_SUPPORT_PSRAM
#if GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE
hal->set_burst_size = gdma_ahb_hal_set_burst_size;
#endif // GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE
hal->priv_data = &gdma_ahb_hal_priv_data;
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -104,6 +104,17 @@ void gdma_ahb_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channe
}
}
#if GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE
void gdma_ahb_hal_set_burst_size(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t burst_sz)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_set_burst_size(hal->ahb_dma_dev, chan_id, burst_sz);
} else {
ahb_dma_ll_tx_set_burst_size(hal->ahb_dma_dev, chan_id, burst_sz);
}
}
#endif // GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE
void gdma_ahb_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
@ -246,5 +257,8 @@ void gdma_ahb_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config)
#if SOC_GDMA_SUPPORT_ETM
hal->enable_etm_task = gdma_ahb_hal_enable_etm_task;
#endif // SOC_GDMA_SUPPORT_ETM
#if GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE
hal->set_burst_size = gdma_ahb_hal_set_burst_size;
#endif // GDMA_LL_AHB_BURST_SIZE_ADJUSTABLE
ahb_dma_ll_set_default_memory_range(hal->ahb_dma_dev);
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -91,6 +91,15 @@ void gdma_axi_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channe
}
}
void gdma_axi_hal_set_burst_size(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t burst_sz)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_set_burst_size(hal->axi_dma_dev, chan_id, burst_sz);
} else {
axi_dma_ll_tx_set_burst_size(hal->axi_dma_dev, chan_id, burst_sz);
}
}
void gdma_axi_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back, bool eof_till_popped)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
@ -153,6 +162,15 @@ uint32_t gdma_axi_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gd
}
}
void gdma_axi_hal_enable_access_encrypt_mem(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_or_dis)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_enable_ext_mem_ecc_aes_access(hal->axi_dma_dev, chan_id, en_or_dis);
} else {
axi_dma_ll_tx_enable_ext_mem_ecc_aes_access(hal->axi_dma_dev, chan_id, en_or_dis);
}
}
#if SOC_GDMA_SUPPORT_CRC
void gdma_axi_hal_clear_crc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
@ -238,6 +256,8 @@ void gdma_axi_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config)
hal->read_intr_status = gdma_axi_hal_read_intr_status;
hal->get_intr_status_reg = gdma_axi_hal_get_intr_status_reg;
hal->get_eof_desc_addr = gdma_axi_hal_get_eof_desc_addr;
hal->set_burst_size = gdma_axi_hal_set_burst_size;
hal->enable_access_encrypt_mem = gdma_axi_hal_enable_access_encrypt_mem;
#if SOC_GDMA_SUPPORT_CRC
hal->clear_crc = gdma_axi_hal_clear_crc;
hal->set_crc_poly = gdma_axi_hal_set_crc_poly;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -53,10 +53,10 @@ void gdma_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_di
hal->enable_burst(hal, chan_id, dir, en_data_burst, en_desc_burst);
}
void gdma_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align)
void gdma_hal_set_burst_size(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t burst_sz)
{
if (hal->set_ext_mem_align) {
hal->set_ext_mem_align(hal, chan_id, dir, align);
if (hal->set_burst_size) {
hal->set_burst_size(hal, chan_id, dir, burst_sz);
}
}
@ -90,6 +90,13 @@ uint32_t gdma_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_c
return hal->get_eof_desc_addr(hal, chan_id, dir, is_success);
}
void gdma_hal_enable_access_encrypt_mem(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_or_dis)
{
if (hal->enable_access_encrypt_mem) {
hal->enable_access_encrypt_mem(hal, chan_id, dir, en_or_dis);
}
}
#if SOC_GDMA_SUPPORT_CRC
void gdma_hal_clear_crc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{

View File

@ -80,13 +80,14 @@ struct gdma_hal_context_t {
void (*connect_peri)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id); /// Connect the channel to a peripheral
void (*disconnect_peri)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Disconnect the channel from a peripheral
void (*enable_burst)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst); /// Enable burst mode
void (*set_ext_mem_align)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align); /// Set the alignment of the external memory
void (*set_burst_size)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t burst_sz); /// Set burst transfer size
void (*set_strategy)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back, bool eof_till_popped); /// Set some misc strategy of the channel behaviour
uint32_t (*get_intr_status_reg)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); // Get the interrupt status register address
void (*enable_intr)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis); /// Enable the channel interrupt
void (*clear_intr)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask); /// Clear the channel interrupt
uint32_t (*read_intr_status)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool raw); /// Read the channel interrupt status
uint32_t (*get_eof_desc_addr)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool is_success); /// Get the address of the descriptor with success/error EOF flag set
void (*enable_access_encrypt_mem)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_or_dis); /// Enable the access to the encrypted memory
#if SOC_GDMA_SUPPORT_CRC
void (*clear_crc)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir); /// Clear the CRC interim results
void (*set_crc_poly)(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, const gdma_hal_crc_config_t *config); /// Set the CRC polynomial
@ -115,7 +116,7 @@ void gdma_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel
void gdma_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst);
void gdma_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align);
void gdma_hal_set_burst_size(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t burst_sz);
void gdma_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back, bool eof_till_popped);
@ -129,6 +130,8 @@ uint32_t gdma_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_ch
uint32_t gdma_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool is_success);
void gdma_hal_enable_access_encrypt_mem(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_or_dis);
#if SOC_GDMA_SUPPORT_CRC
void gdma_hal_build_parallel_crc_matrix(int crc_width, uint32_t crc_poly_hex, int data_width,
uint32_t *lfsr_transform_matrix, uint32_t *data_transform_matrix);

View File

@ -28,7 +28,7 @@ void gdma_ahb_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_cha
void gdma_ahb_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst);
void gdma_ahb_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align);
void gdma_ahb_hal_set_burst_size(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t burst_sz);
void gdma_ahb_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back, bool eof_till_popped);

View File

@ -28,6 +28,8 @@ void gdma_axi_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_cha
void gdma_axi_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst);
void gdma_axi_hal_set_burst_size(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t burst_sz);
void gdma_axi_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align);
void gdma_axi_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back, bool eof_till_popped);

View File

@ -73,12 +73,6 @@ static esp_err_t crypto_shared_gdma_init(void)
.direction = GDMA_CHANNEL_DIRECTION_RX,
};
gdma_transfer_ability_t transfer_ability = {
.sram_trans_align = 1,
.psram_trans_align = 16,
};
ret = crypto_shared_gdma_new_channel(&channel_config_tx, &tx_channel);
if (ret != ESP_OK) {
goto err;
@ -90,9 +84,13 @@ static esp_err_t crypto_shared_gdma_init(void)
goto err;
}
gdma_set_transfer_ability(tx_channel, &transfer_ability);
gdma_set_transfer_ability(rx_channel, &transfer_ability);
gdma_transfer_config_t transfer_cfg = {
.max_data_burst_size = 16,
.access_ext_mem = true, // crypto peripheral may want to access PSRAM
};
gdma_config_transfer(tx_channel, &transfer_cfg);
transfer_cfg.max_data_burst_size = 0;
gdma_config_transfer(rx_channel, &transfer_cfg);
gdma_connect(rx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_AES, 0));
gdma_connect(tx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_AES, 0));
@ -223,7 +221,7 @@ bool esp_crypto_shared_gdma_done(void)
{
int rx_ch_id = 0;
gdma_get_channel_id(rx_channel, &rx_ch_id);
while(1) {
while (1) {
if ((axi_dma_ll_rx_get_interrupt_status(&AXI_DMA, rx_ch_id, true) & 1)) {
break;
}

View File

@ -155,7 +155,7 @@ config SOC_CPU_IDRAM_SPLIT_USING_PMP
bool
default y
config SOC_DMA_CAN_ACCESS_MSPI_MEM
config SOC_DMA_CAN_ACCESS_FLASH
bool
default y

View File

@ -166,7 +166,7 @@
// #define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
/*-------------------------- DMA Common CAPS ----------------------------------------*/
#define SOC_DMA_CAN_ACCESS_MSPI_MEM 1 /*!< DMA can access MSPI memory (e.g. Flash, PSRAM) */
#define SOC_DMA_CAN_ACCESS_FLASH 1 /*!< DMA can access Flash memory */
/*-------------------------- GDMA CAPS -------------------------------------*/
#define SOC_AHB_GDMA_VERSION 2

View File

@ -479,7 +479,7 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
int
default 1100
config SOC_DMA_CAN_ACCESS_MSPI_MEM
config SOC_DMA_CAN_ACCESS_FLASH
bool
default y

View File

@ -1,5 +1,5 @@
/**
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -273,10 +273,10 @@ typedef union {
* 1:mean disable cmd of this ch0
*/
uint32_t in_cmd_disable_chn: 1;
/** in_ecc_aec_en_chn : R/W; bitpos: [8]; default: 0;
/** in_ecc_aes_en_chn : R/W; bitpos: [8]; default: 0;
* 1: mean access ecc or aes domain,0: mean not
*/
uint32_t in_ecc_aec_en_chn: 1;
uint32_t in_ecc_aes_en_chn: 1;
/** indscr_burst_en_chn : R/W; bitpos: [9]; default: 0;
* Set this bit to 1 to enable INCR burst transfer for Rx channel 0 reading link
* descriptor when accessing internal SRAM.
@ -567,7 +567,7 @@ typedef union {
*/
uint32_t rx_ch_arb_weigh_chn: 4;
/** rx_arb_weigh_opt_dir_chn : R/W; bitpos: [8]; default: 0;
* 0: mean not optimazation weight function ,1: mean optimazation
* 0: mean not optimization weight function ,1: mean optimization
*/
uint32_t rx_arb_weigh_opt_dir_chn: 1;
uint32_t reserved_9: 23;
@ -952,10 +952,10 @@ typedef union {
* 1:mean disable cmd of this chn
*/
uint32_t out_cmd_disable_chn: 1;
/** out_ecc_aec_en_chn : R/W; bitpos: [9]; default: 0;
/** out_ecc_aes_en_chn : R/W; bitpos: [9]; default: 0;
* 1: mean access ecc or aes domain,0: mean not
*/
uint32_t out_ecc_aec_en_chn: 1;
uint32_t out_ecc_aes_en_chn: 1;
/** outdscr_burst_en_chn : R/W; bitpos: [10]; default: 0;
* Set this bit to 1 to enable INCR burst transfer for Tx channel0 reading link
* descriptor when accessing internal SRAM.
@ -1238,7 +1238,7 @@ typedef union {
*/
uint32_t tx_ch_arb_weigh_chn: 4;
/** tx_arb_weigh_opt_dir_chn : R/W; bitpos: [8]; default: 0;
* 0: mean not optimazation weight function ,1: mean optimazation
* 0: mean not optimization weight function ,1: mean optimization
*/
uint32_t tx_arb_weigh_opt_dir_chn: 1;
uint32_t reserved_9: 23;
@ -1374,106 +1374,6 @@ typedef union {
uint32_t val;
} axi_dma_tx_crc_data_en_addr_chn_reg_t;
/** Type of out_conf0_ch1 register
* Configure 0 register of Tx channel1
*/
typedef union {
struct {
/** out_rst_ch1 : R/W; bitpos: [0]; default: 0;
* This bit is used to reset AXI_DMA channel1 Tx FSM and Tx FIFO pointer.
*/
uint32_t out_rst_ch1: 1;
/** out_loop_test_ch1 : R/W; bitpos: [1]; default: 0;
* reserved
*/
uint32_t out_loop_test_ch1: 1;
/** out_auto_wrback_ch1 : R/W; bitpos: [2]; default: 0;
* Set this bit to enable automatic outlink-writeback when all the data in tx buffer
* has been transmitted.
*/
uint32_t out_auto_wrback_ch1: 1;
/** out_eof_mode_ch1 : R/W; bitpos: [3]; default: 1;
* EOF flag generation mode when transmitting data. 1: EOF flag for Tx channel1 is
* generated when data need to transmit has been popped from FIFO in AXI_DMA
*/
uint32_t out_eof_mode_ch1: 1;
/** out_etm_en_ch1 : R/W; bitpos: [4]; default: 0;
* Set this bit to 1 to enable etm control mode, dma Tx channel1 is triggered by etm
* task.
*/
uint32_t out_etm_en_ch1: 1;
/** out_burst_size_sel_ch1 : R/W; bitpos: [7:5]; default: 0;
* 3'b000-3'b100:burst length 8byte~128byte
*/
uint32_t out_burst_size_sel_ch1: 3;
/** out_cmd_disable_ch1 : R/W; bitpos: [8]; default: 0;
* 1:mean disable cmd of this ch1
*/
uint32_t out_cmd_disable_ch1: 1;
/** out_ecc_aec_en_ch1 : R/W; bitpos: [9]; default: 0;
* 1: mean access ecc or aes domain,0: mean not
*/
uint32_t out_ecc_aec_en_ch1: 1;
/** outdscr_burst_en_ch1 : R/W; bitpos: [10]; default: 0;
* Set this bit to 1 to enable INCR burst transfer for Tx channel1 reading link
* descriptor when accessing internal SRAM.
*/
uint32_t outdscr_burst_en_ch1: 1;
uint32_t reserved_11: 21;
};
uint32_t val;
} axi_dma_out_conf0_ch1_reg_t;
/** Type of out_conf0_ch2 register
* Configure 0 register of Tx channel2
*/
typedef union {
struct {
/** out_rst_ch2 : R/W; bitpos: [0]; default: 0;
* This bit is used to reset AXI_DMA channel2 Tx FSM and Tx FIFO pointer.
*/
uint32_t out_rst_ch2: 1;
/** out_loop_test_ch2 : R/W; bitpos: [1]; default: 0;
* reserved
*/
uint32_t out_loop_test_ch2: 1;
/** out_auto_wrback_ch2 : R/W; bitpos: [2]; default: 0;
* Set this bit to enable automatic outlink-writeback when all the data in tx buffer
* has been transmitted.
*/
uint32_t out_auto_wrback_ch2: 1;
/** out_eof_mode_ch2 : R/W; bitpos: [3]; default: 1;
* EOF flag generation mode when transmitting data. 1: EOF flag for Tx channel2 is
* generated when data need to transmit has been popped from FIFO in AXI_DMA
*/
uint32_t out_eof_mode_ch2: 1;
/** out_etm_en_ch2 : R/W; bitpos: [4]; default: 0;
* Set this bit to 1 to enable etm control mode, dma Tx channel2 is triggered by etm
* task.
*/
uint32_t out_etm_en_ch2: 1;
/** out_burst_size_sel_ch2 : R/W; bitpos: [7:5]; default: 0;
* 3'b000-3'b100:burst length 8byte~128byte
*/
uint32_t out_burst_size_sel_ch2: 3;
/** out_cmd_disable_ch2 : R/W; bitpos: [8]; default: 0;
* 1:mean disable cmd of this ch2
*/
uint32_t out_cmd_disable_ch2: 1;
/** out_ecc_aec_en_ch2 : R/W; bitpos: [9]; default: 0;
* 1: mean access ecc or aes domain,0: mean not
*/
uint32_t out_ecc_aec_en_ch2: 1;
/** outdscr_burst_en_ch2 : R/W; bitpos: [10]; default: 0;
* Set this bit to 1 to enable INCR burst transfer for Tx channel2 reading link
* descriptor when accessing internal SRAM.
*/
uint32_t outdscr_burst_en_ch2: 1;
uint32_t reserved_11: 21;
};
uint32_t val;
} axi_dma_out_conf0_ch2_reg_t;
/** Group: Configuration Registers */
/** Type of arb_timeout register
* This retister is used to config arbiter time slice
@ -1705,12 +1605,12 @@ typedef union {
/** Group: Status Registers */
/** Type of wresp_cnt register
* AXI wr responce cnt register.
* AXI wr response cnt register.
*/
typedef union {
struct {
/** wresp_cnt : RO; bitpos: [3:0]; default: 0;
* axi wr responce cnt reg.
* axi wr response cnt reg.
*/
uint32_t wresp_cnt: 4;
uint32_t reserved_4: 28;
@ -1719,12 +1619,12 @@ typedef union {
} axi_dma_wresp_cnt_reg_t;
/** Type of rresp_cnt register
* AXI wr responce cnt register.
* AXI wr response cnt register.
*/
typedef union {
struct {
/** rresp_cnt : RO; bitpos: [3:0]; default: 0;
* axi rd responce cnt reg.
* axi rd response cnt reg.
*/
uint32_t rresp_cnt: 4;
uint32_t reserved_4: 28;

View File

@ -189,7 +189,7 @@
#define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
/*-------------------------- DMA Common CAPS ----------------------------------------*/
#define SOC_DMA_CAN_ACCESS_MSPI_MEM 1 /*!< DMA can access MSPI memory (e.g. Flash, PSRAM) */
#define SOC_DMA_CAN_ACCESS_FLASH 1 /*!< DMA can access Flash memory */
/*-------------------------- GDMA CAPS -------------------------------------*/
#define SOC_AHB_GDMA_VERSION 2

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -37,7 +37,6 @@ TEST_CASE("Test crypto lib bignum apis", "[wpa_crypto]")
bn = crypto_bignum_init_set(buf, 32);
TEST_ASSERT_NOT_NULL(bn);
TEST_ASSERT(crypto_bignum_to_bin(bn, buf2, 32, 0) == 32);
TEST_ASSERT(!memcmp(buf, buf2, 32));
@ -245,7 +244,6 @@ TEST_CASE("Test crypto lib bignum apis", "[wpa_crypto]")
uint8_t buf1[32], buf2[32];
crypto_bignum *bn1, *bn2;
buf1[0] = 0xf;
buf2[0] = 0x11;
@ -277,7 +275,6 @@ TEST_CASE("Test crypto lib bignum apis", "[wpa_crypto]")
}
}
/*
* Conversion macros for embedded constants:
* build lists of mbedtls_mpi_uint's from lists of unsigned char's grouped by 8, 4 or 2
@ -322,30 +319,29 @@ TEST_CASE("Test crypto lib bignum apis", "[wpa_crypto]")
* (assumes len is an exact multiple of sizeof mbedtls_mpi_uint)
* Allocate a new memory as well so that it can be freed.
*/
static inline void ecp_mpi_load( mbedtls_mpi *X, const mbedtls_mpi_uint *p, size_t len )
static inline void ecp_mpi_load(mbedtls_mpi *X, const mbedtls_mpi_uint *p, size_t len)
{
X->MBEDTLS_PRIVATE(s) = 1;
X->MBEDTLS_PRIVATE(n) = len / sizeof( mbedtls_mpi_uint );
X->MBEDTLS_PRIVATE(n) = len / sizeof(mbedtls_mpi_uint);
X->MBEDTLS_PRIVATE(p) = os_zalloc(len);
memcpy(X->MBEDTLS_PRIVATE(p), (void *)p, len);
}
TEST_CASE("Test crypto lib ECC apis", "[wpa_crypto]")
{
set_leak_threshold(600);
set_leak_threshold(620);
static const mbedtls_mpi_uint secp256r1_gx[] = {
BYTES_TO_T_UINT_8( 0x96, 0xC2, 0x98, 0xD8, 0x45, 0x39, 0xA1, 0xF4 ),
BYTES_TO_T_UINT_8( 0xA0, 0x33, 0xEB, 0x2D, 0x81, 0x7D, 0x03, 0x77 ),
BYTES_TO_T_UINT_8( 0xF2, 0x40, 0xA4, 0x63, 0xE5, 0xE6, 0xBC, 0xF8 ),
BYTES_TO_T_UINT_8( 0x47, 0x42, 0x2C, 0xE1, 0xF2, 0xD1, 0x17, 0x6B ),
BYTES_TO_T_UINT_8(0x96, 0xC2, 0x98, 0xD8, 0x45, 0x39, 0xA1, 0xF4),
BYTES_TO_T_UINT_8(0xA0, 0x33, 0xEB, 0x2D, 0x81, 0x7D, 0x03, 0x77),
BYTES_TO_T_UINT_8(0xF2, 0x40, 0xA4, 0x63, 0xE5, 0xE6, 0xBC, 0xF8),
BYTES_TO_T_UINT_8(0x47, 0x42, 0x2C, 0xE1, 0xF2, 0xD1, 0x17, 0x6B),
};
static const mbedtls_mpi_uint secp256r1_gy[] = {
BYTES_TO_T_UINT_8( 0xF5, 0x51, 0xBF, 0x37, 0x68, 0x40, 0xB6, 0xCB ),
BYTES_TO_T_UINT_8( 0xCE, 0x5E, 0x31, 0x6B, 0x57, 0x33, 0xCE, 0x2B ),
BYTES_TO_T_UINT_8( 0x16, 0x9E, 0x0F, 0x7C, 0x4A, 0xEB, 0xE7, 0x8E ),
BYTES_TO_T_UINT_8( 0x9B, 0x7F, 0x1A, 0xFE, 0xE2, 0x42, 0xE3, 0x4F ),
BYTES_TO_T_UINT_8(0xF5, 0x51, 0xBF, 0x37, 0x68, 0x40, 0xB6, 0xCB),
BYTES_TO_T_UINT_8(0xCE, 0x5E, 0x31, 0x6B, 0x57, 0x33, 0xCE, 0x2B),
BYTES_TO_T_UINT_8(0x16, 0x9E, 0x0F, 0x7C, 0x4A, 0xEB, 0xE7, 0x8E),
BYTES_TO_T_UINT_8(0x9B, 0x7F, 0x1A, 0xFE, 0xE2, 0x42, 0xE3, 0x4F),
};
{
@ -393,8 +389,8 @@ TEST_CASE("Test crypto lib ECC apis", "[wpa_crypto]")
TEST_ASSERT_NOT_NULL(q);
TEST_ASSERT_NOT_NULL(r);
mbedtls_mpi_init( &num );
mbedtls_mpi_lset( &num, 3 );
mbedtls_mpi_init(&num);
mbedtls_mpi_lset(&num, 3);
ecp_mpi_load(& ((mbedtls_ecp_point *)p)->MBEDTLS_PRIVATE(X), secp256r1_gx, sizeof(secp256r1_gx));
ecp_mpi_load(& ((mbedtls_ecp_point *)p)->MBEDTLS_PRIVATE(Y), secp256r1_gy, sizeof(secp256r1_gy));
@ -408,7 +404,7 @@ TEST_CASE("Test crypto lib ECC apis", "[wpa_crypto]")
TEST_ASSERT(crypto_ec_point_cmp(e, q, r) == 0);
mbedtls_mpi_free( &num );
mbedtls_mpi_free(&num);
crypto_ec_point_deinit(p, 1);
crypto_ec_point_deinit(q, 1);
crypto_ec_point_deinit(r, 1);
@ -432,8 +428,8 @@ TEST_CASE("Test crypto lib ECC apis", "[wpa_crypto]")
TEST_ASSERT_NOT_NULL(q);
TEST_ASSERT_NOT_NULL(r);
mbedtls_mpi_init( &num );
mbedtls_mpi_lset( &num, 100 );
mbedtls_mpi_init(&num);
mbedtls_mpi_lset(&num, 100);
ecp_mpi_load(& ((mbedtls_ecp_point *)p)->MBEDTLS_PRIVATE(X), secp256r1_gx, sizeof(secp256r1_gx));
ecp_mpi_load(& ((mbedtls_ecp_point *)p)->MBEDTLS_PRIVATE(Y), secp256r1_gy, sizeof(secp256r1_gy));
@ -448,7 +444,7 @@ TEST_CASE("Test crypto lib ECC apis", "[wpa_crypto]")
TEST_ASSERT(crypto_ec_point_is_at_infinity(e, r));
mbedtls_mpi_free( &num );
mbedtls_mpi_free(&num);
crypto_ec_point_deinit(p, 1);
crypto_ec_point_deinit(q, 1);
crypto_ec_point_deinit(r, 1);
@ -468,8 +464,8 @@ TEST_CASE("Test crypto lib ECC apis", "[wpa_crypto]")
TEST_ASSERT_NOT_NULL(p);
TEST_ASSERT_NOT_NULL(q);
mbedtls_mpi_init( &num );
mbedtls_mpi_lset( &num, 50 );
mbedtls_mpi_init(&num);
mbedtls_mpi_lset(&num, 50);
ecp_mpi_load(& ((mbedtls_ecp_point *)p)->MBEDTLS_PRIVATE(X), secp256r1_gx, sizeof(secp256r1_gx));
ecp_mpi_load(& ((mbedtls_ecp_point *)p)->MBEDTLS_PRIVATE(Y), secp256r1_gy, sizeof(secp256r1_gy));
@ -483,8 +479,7 @@ TEST_CASE("Test crypto lib ECC apis", "[wpa_crypto]")
TEST_ASSERT(crypto_ec_point_mul(e, p, (crypto_bignum *) &num, q) == 0);
TEST_ASSERT(crypto_ec_point_is_on_curve(e, q));
mbedtls_mpi_free( &num );
mbedtls_mpi_free(&num);
crypto_ec_point_deinit(p, 1);
crypto_ec_point_deinit(q, 1);
crypto_ec_deinit(e);
@ -506,8 +501,8 @@ TEST_CASE("Test crypto lib ECC apis", "[wpa_crypto]")
TEST_ASSERT_NOT_NULL(q);
TEST_ASSERT_NOT_NULL(r);
mbedtls_mpi_init( &num );
mbedtls_mpi_lset( &num, 50 );
mbedtls_mpi_init(&num);
mbedtls_mpi_lset(&num, 50);
ecp_mpi_load(& ((mbedtls_ecp_point *)p)->MBEDTLS_PRIVATE(X), secp256r1_gx, sizeof(secp256r1_gx));
ecp_mpi_load(& ((mbedtls_ecp_point *)p)->MBEDTLS_PRIVATE(Y), secp256r1_gy, sizeof(secp256r1_gy));
@ -532,7 +527,7 @@ TEST_CASE("Test crypto lib ECC apis", "[wpa_crypto]")
TEST_ASSERT(crypto_ec_point_add(e, q, r, r) == 0);
TEST_ASSERT(crypto_ec_point_is_at_infinity(e, r));
mbedtls_mpi_free( &num );
mbedtls_mpi_free(&num);
crypto_ec_point_deinit(p, 1);
crypto_ec_point_deinit(q, 1);
crypto_ec_point_deinit(r, 1);

View File

@ -77,12 +77,12 @@ Non-IRAM-Safe Interrupt Handlers
If the ``ESP_INTR_FLAG_IRAM`` flag is not set when registering, the interrupt handler will not get executed when the caches are disabled. Once the caches are restored, the non-IRAM-safe interrupts will be re-enabled. After this moment, the interrupt handler will run normally again. This means that as long as caches are disabled, users will not see the corresponding hardware event happening.
.. only:: SOC_DMA_CAN_ACCESS_MSPI_MEM
.. only:: SOC_DMA_CAN_ACCESS_FLASH
When DMA Read Data from Flash
-----------------------------
When DMA is reading data from Flash, erase/write operations from SPI1 take higher priority in hardware, resulting in unpredictable data read by DMA. It is recommended to stop DMA access to Flash before erasing or writing to it. If DMA cannot be stopped (for example, the LCD needs to continuously refresh image data stored in Flash), it is advisable to copy such data to PSRAM or internal SRAM.
When DMA is reading data from Flash, erase/write operations from SPI1 take higher priority in hardware, resulting in unpredictable data read by DMA if auto-suspend is not enabled. It is recommended to stop DMA access to Flash before erasing or writing to it. If DMA cannot be stopped (for example, the LCD needs to continuously refresh image data stored in Flash), it is advisable to copy such data to PSRAM or internal SRAM.
.. only:: SOC_SPI_MEM_SUPPORT_AUTO_SUSPEND

View File

@ -77,12 +77,12 @@ IRAM 安全中断处理程序
如果在注册时没有设置 ``ESP_INTR_FLAG_IRAM`` 标志,当禁用 cache 时,将不会执行中断处理程序。一旦 cache 恢复,非 IRAM 安全的中断将重新启用,中断处理程序随即再次正常运行。这意味着,只要禁用了 cache就不会发生相应的硬件事件。
.. only:: SOC_DMA_CAN_ACCESS_MSPI_MEM
.. only:: SOC_DMA_CAN_ACCESS_FLASH
当 DMA 也可以访问 Flash 中的数据时
----------------------------------
当 DMA 正在从 Flash 中读取数据时,来自 SPI1 的擦/写操作优先级会更高,导致 DMA 读到错误的数据。建议在擦写 Flash 之前先停止 DMA 对 Flash 的访问。如果 DMA 不可以停止,比如 LCD 需要持续刷新保存在 Flash 中的图像数据,建议将此类数据拷贝到 PSRAM 或者内部的 SRAM 中。
当 DMA 正在从 Flash 中读取数据时,来自 SPI1 的擦/写操作优先级会更高,如果 Flash 的 auto-suspend 功能没有开启,将会导致 DMA 读到错误的数据。建议在擦写 Flash 之前先停止 DMA 对 Flash 的访问。如果 DMA 不可以停止,比如 LCD 需要持续刷新保存在 Flash 中的图像数据,建议将此类数据拷贝到 PSRAM 或者内部的 SRAM 中。
.. only:: SOC_SPI_MEM_SUPPORT_AUTO_SUSPEND