refactor(spi): replace dma_ll related in spi by dma driver (part1)

This commit is contained in:
wanlei 2023-10-18 17:25:20 +08:00
parent 35fc493dcc
commit d0023b061f
17 changed files with 376 additions and 334 deletions

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2016-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2016-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -67,6 +67,7 @@ typedef struct adc_digi_context_t {
gdma_channel_handle_t rx_dma_channel; //dma rx channel handle gdma_channel_handle_t rx_dma_channel; //dma rx channel handle
#elif CONFIG_IDF_TARGET_ESP32S2 #elif CONFIG_IDF_TARGET_ESP32S2
spi_host_device_t spi_host; //ADC uses this SPI DMA spi_host_device_t spi_host; //ADC uses this SPI DMA
spi_dma_ctx_t *spi_dma_ctx; //spi_dma context
intr_handle_t intr_hdl; //Interrupt handler intr_handle_t intr_hdl; //Interrupt handler
#elif CONFIG_IDF_TARGET_ESP32 #elif CONFIG_IDF_TARGET_ESP32
i2s_port_t i2s_host; //ADC uses this I2S DMA i2s_port_t i2s_host; //ADC uses this I2S DMA
@ -167,7 +168,7 @@ esp_err_t adc_digi_deinitialize(void)
gdma_del_channel(s_adc_digi_ctx->rx_dma_channel); gdma_del_channel(s_adc_digi_ctx->rx_dma_channel);
#elif CONFIG_IDF_TARGET_ESP32S2 #elif CONFIG_IDF_TARGET_ESP32S2
esp_intr_free(s_adc_digi_ctx->intr_hdl); esp_intr_free(s_adc_digi_ctx->intr_hdl);
spicommon_dma_chan_free(s_adc_digi_ctx->spi_host); spicommon_dma_chan_free(s_adc_digi_ctx->spi_dma_ctx);
spicommon_periph_free(s_adc_digi_ctx->spi_host); spicommon_periph_free(s_adc_digi_ctx->spi_host);
#elif CONFIG_IDF_TARGET_ESP32 #elif CONFIG_IDF_TARGET_ESP32
esp_intr_free(s_adc_digi_ctx->intr_hdl); esp_intr_free(s_adc_digi_ctx->intr_hdl);
@ -274,13 +275,14 @@ esp_err_t adc_digi_initialize(const adc_digi_init_config_t *init_config)
uint32_t dma_chan = 0; uint32_t dma_chan = 0;
spi_success = spicommon_periph_claim(SPI3_HOST, "adc"); spi_success = spicommon_periph_claim(SPI3_HOST, "adc");
ret = spicommon_dma_chan_alloc(SPI3_HOST, SPI_DMA_CH_AUTO, &dma_chan, &dma_chan); ret = spicommon_dma_chan_alloc(SPI3_HOST, SPI_DMA_CH_AUTO, &s_adc_digi_ctx->spi_dma_ctx);
if (ret == ESP_OK) { if (ret == ESP_OK) {
s_adc_digi_ctx->spi_host = SPI3_HOST; s_adc_digi_ctx->spi_host = SPI3_HOST;
} }
if (!spi_success || (s_adc_digi_ctx->spi_host != SPI3_HOST)) { if (!spi_success || (s_adc_digi_ctx->spi_host != SPI3_HOST)) {
goto cleanup; goto cleanup;
} }
dma_chan = s_adc_digi_ctx->spi_dma_ctx->rx_dma_chan.chan_id;
ret = esp_intr_alloc(spicommon_irqdma_source_for_host(s_adc_digi_ctx->spi_host), 0, adc_dma_intr_handler, ret = esp_intr_alloc(spicommon_irqdma_source_for_host(s_adc_digi_ctx->spi_host), 0, adc_dma_intr_handler,
(void *)s_adc_digi_ctx, &s_adc_digi_ctx->intr_hdl); (void *)s_adc_digi_ctx, &s_adc_digi_ctx->intr_hdl);

View File

@ -196,13 +196,14 @@ esp_err_t adc_continuous_new_handle(const adc_continuous_handle_cfg_t *hdl_confi
uint32_t dma_chan = 0; uint32_t dma_chan = 0;
spi_success = spicommon_periph_claim(SPI3_HOST, "adc"); spi_success = spicommon_periph_claim(SPI3_HOST, "adc");
ret = spicommon_dma_chan_alloc(SPI3_HOST, SPI_DMA_CH_AUTO, &dma_chan, &dma_chan); ret = spicommon_dma_chan_alloc(SPI3_HOST, SPI_DMA_CH_AUTO, &adc_ctx->spi_dma_ctx);
if (ret == ESP_OK) { if (ret == ESP_OK) {
adc_ctx->spi_host = SPI3_HOST; adc_ctx->spi_host = SPI3_HOST;
} }
if (!spi_success || (adc_ctx->spi_host != SPI3_HOST)) { if (!spi_success || (adc_ctx->spi_host != SPI3_HOST)) {
goto cleanup; goto cleanup;
} }
dma_chan = adc_ctx->spi_dma_ctx->rx_dma_chan.chan_id;
ret = esp_intr_alloc(spicommon_irqdma_source_for_host(adc_ctx->spi_host), ESP_INTR_FLAG_IRAM, adc_dma_intr_handler, ret = esp_intr_alloc(spicommon_irqdma_source_for_host(adc_ctx->spi_host), ESP_INTR_FLAG_IRAM, adc_dma_intr_handler,
(void *)adc_ctx, &adc_ctx->dma_intr_hdl); (void *)adc_ctx, &adc_ctx->dma_intr_hdl);
@ -494,7 +495,7 @@ esp_err_t adc_continuous_deinit(adc_continuous_handle_t handle)
gdma_del_channel(handle->rx_dma_channel); gdma_del_channel(handle->rx_dma_channel);
#elif CONFIG_IDF_TARGET_ESP32S2 #elif CONFIG_IDF_TARGET_ESP32S2
esp_intr_free(handle->dma_intr_hdl); esp_intr_free(handle->dma_intr_hdl);
spicommon_dma_chan_free(handle->spi_host); spicommon_dma_chan_free(handle->spi_dma_ctx);
spicommon_periph_free(handle->spi_host); spicommon_periph_free(handle->spi_host);
#elif CONFIG_IDF_TARGET_ESP32 #elif CONFIG_IDF_TARGET_ESP32
esp_intr_free(handle->dma_intr_hdl); esp_intr_free(handle->dma_intr_hdl);

View File

@ -19,6 +19,7 @@
#include "esp_private/gdma.h" #include "esp_private/gdma.h"
#elif CONFIG_IDF_TARGET_ESP32S2 #elif CONFIG_IDF_TARGET_ESP32S2
#include "hal/spi_types.h" #include "hal/spi_types.h"
#include "esp_private/spi_common_internal.h"
#elif CONFIG_IDF_TARGET_ESP32 #elif CONFIG_IDF_TARGET_ESP32
#include "driver/i2s_types.h" #include "driver/i2s_types.h"
#endif #endif
@ -77,6 +78,7 @@ struct adc_continuous_ctx_t {
gdma_channel_handle_t rx_dma_channel; //dma rx channel handle gdma_channel_handle_t rx_dma_channel; //dma rx channel handle
#elif CONFIG_IDF_TARGET_ESP32S2 #elif CONFIG_IDF_TARGET_ESP32S2
spi_host_device_t spi_host; //ADC uses this SPI DMA spi_host_device_t spi_host; //ADC uses this SPI DMA
spi_dma_ctx_t *spi_dma_ctx; //spi_dma context
#elif CONFIG_IDF_TARGET_ESP32 #elif CONFIG_IDF_TARGET_ESP32
i2s_port_t i2s_host; //ADC uses this I2S DMA i2s_port_t i2s_host; //ADC uses this I2S DMA
#endif #endif

View File

@ -38,6 +38,7 @@
typedef struct { typedef struct {
void *periph_dev; /* DMA peripheral device address */ void *periph_dev; /* DMA peripheral device address */
uint32_t dma_chan; uint32_t dma_chan;
spi_dma_ctx_t *spi_dma_ctx; /* spi_dma context */
intr_handle_t intr_handle; /* Interrupt handle */ intr_handle_t intr_handle; /* Interrupt handle */
bool use_apll; /* Whether use APLL as digital controller clock source */ bool use_apll; /* Whether use APLL as digital controller clock source */
} dac_dma_periph_spi_t; } dac_dma_periph_spi_t;
@ -142,9 +143,10 @@ esp_err_t dac_dma_periph_init(uint32_t freq_hz, bool is_alternate, bool is_apll)
/* When transmit alternately, twice frequency is needed to guarantee the convert frequency in one channel */ /* When transmit alternately, twice frequency is needed to guarantee the convert frequency in one channel */
uint32_t trans_freq_hz = freq_hz * (is_alternate ? 2 : 1); uint32_t trans_freq_hz = freq_hz * (is_alternate ? 2 : 1);
ESP_GOTO_ON_ERROR(s_dac_dma_periph_set_clock(trans_freq_hz, is_apll), err, TAG, "Failed to set clock of DMA peripheral"); ESP_GOTO_ON_ERROR(s_dac_dma_periph_set_clock(trans_freq_hz, is_apll), err, TAG, "Failed to set clock of DMA peripheral");
ESP_GOTO_ON_ERROR(spicommon_dma_chan_alloc(DAC_DMA_PERIPH_SPI_HOST, SPI_DMA_CH_AUTO, &s_ddp->dma_chan, &s_ddp->dma_chan), ESP_GOTO_ON_ERROR(spicommon_dma_chan_alloc(DAC_DMA_PERIPH_SPI_HOST, SPI_DMA_CH_AUTO, &s_ddp->spi_dma_ctx),
err, TAG, "Failed to allocate dma peripheral channel"); err, TAG, "Failed to allocate dma peripheral channel");
s_ddp->dma_chan = s_ddp->spi_dma_ctx->rx_dma_chan.chan_id;
spi_ll_enable_intr(s_ddp->periph_dev, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_OUT_TOTAL_EOF); spi_ll_enable_intr(s_ddp->periph_dev, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_OUT_TOTAL_EOF);
dac_ll_digi_set_convert_mode(is_alternate); dac_ll_digi_set_convert_mode(is_alternate);
return ret; return ret;
@ -157,7 +159,7 @@ esp_err_t dac_dma_periph_deinit(void)
{ {
ESP_RETURN_ON_FALSE(s_ddp->intr_handle == NULL, ESP_ERR_INVALID_STATE, TAG, "The interrupt is not deregistered yet"); ESP_RETURN_ON_FALSE(s_ddp->intr_handle == NULL, ESP_ERR_INVALID_STATE, TAG, "The interrupt is not deregistered yet");
if (s_ddp->dma_chan) { if (s_ddp->dma_chan) {
ESP_RETURN_ON_ERROR(spicommon_dma_chan_free(DAC_DMA_PERIPH_SPI_HOST), TAG, "Failed to free dma peripheral channel"); ESP_RETURN_ON_ERROR(spicommon_dma_chan_free(s_ddp->spi_dma_ctx), TAG, "Failed to free dma peripheral channel");
} }
ESP_RETURN_ON_FALSE(spicommon_periph_free(DAC_DMA_PERIPH_SPI_HOST), ESP_FAIL, TAG, "Failed to release DAC DMA peripheral"); ESP_RETURN_ON_FALSE(spicommon_periph_free(DAC_DMA_PERIPH_SPI_HOST), ESP_FAIL, TAG, "Failed to release DAC DMA peripheral");
spi_ll_disable_intr(s_ddp->periph_dev, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_OUT_TOTAL_EOF); spi_ll_disable_intr(s_ddp->periph_dev, SPI_LL_INTR_OUT_EOF | SPI_LL_INTR_OUT_TOTAL_EOF);

View File

@ -11,7 +11,8 @@ set(public_include "include")
if(CONFIG_SOC_GPSPI_SUPPORTED) if(CONFIG_SOC_GPSPI_SUPPORTED)
list(APPEND srcs "src/gpspi/spi_common.c" list(APPEND srcs "src/gpspi/spi_common.c"
"src/gpspi/spi_master.c" "src/gpspi/spi_master.c"
"src/gpspi/spi_slave.c") "src/gpspi/spi_slave.c"
"src/gpspi/spi_dma.c")
endif() endif()
if(CONFIG_SOC_SPI_SUPPORT_SLAVE_HD_VER2) if(CONFIG_SOC_SPI_SUPPORT_SLAVE_HD_VER2)

View File

@ -13,7 +13,7 @@
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "hal/spi_types.h" #include "hal/spi_types.h"
#include "hal/dma_types.h" #include "hal/dma_types.h"
#include "soc/gdma_channel.h" #include "esp_private/spi_dma.h"
#include "esp_pm.h" #include "esp_pm.h"
#include "esp_private/spi_share_hw_ctrl.h" #include "esp_private/spi_share_hw_ctrl.h"
#if SOC_GDMA_SUPPORTED #if SOC_GDMA_SUPPORTED
@ -37,14 +37,13 @@ extern "C"
#define SPI_MASTER_ATTR #define SPI_MASTER_ATTR
#endif #endif
#if SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB //NOTE!! If both A and B are not defined, '#if (A==B)' is true, because GCC use 0 stand for undefined symbol
#define DMA_DESC_MEM_ALIGN_SIZE 4 #if SOC_GPSPI_SUPPORTED && defined(SOC_GDMA_BUS_AXI) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI)
#define SPI_GDMA_NEW_CHANNEL gdma_new_ahb_channel #define DMA_DESC_MEM_ALIGN_SIZE 8
typedef dma_descriptor_align4_t spi_dma_desc_t;
#else
#define DMA_DESC_MEM_ALIGN_SIZE 8
#define SPI_GDMA_NEW_CHANNEL gdma_new_axi_channel
typedef dma_descriptor_align8_t spi_dma_desc_t; typedef dma_descriptor_align8_t spi_dma_desc_t;
#else
#define DMA_DESC_MEM_ALIGN_SIZE 4
typedef dma_descriptor_align4_t spi_dma_desc_t;
#endif #endif
/// Attributes of an SPI bus /// Attributes of an SPI bus
@ -54,60 +53,64 @@ typedef struct {
int max_transfer_sz; ///< Maximum length of bytes available to send int max_transfer_sz; ///< Maximum length of bytes available to send
bool dma_enabled; ///< To enable DMA or not bool dma_enabled; ///< To enable DMA or not
uint16_t internal_mem_align_size; ///< Buffer align byte requirement for internal memory uint16_t internal_mem_align_size; ///< Buffer align byte requirement for internal memory
int tx_dma_chan; ///< TX DMA channel, on ESP32 and ESP32S2, tx_dma_chan and rx_dma_chan are same
int rx_dma_chan; ///< RX DMA channel, on ESP32 and ESP32S2, tx_dma_chan and rx_dma_chan are same
int dma_desc_num; ///< DMA descriptor number of dmadesc_tx or dmadesc_rx.
spi_dma_desc_t *dmadesc_tx; ///< DMA descriptor array for TX
spi_dma_desc_t *dmadesc_rx; ///< DMA descriptor array for RX
spi_bus_lock_handle_t lock; spi_bus_lock_handle_t lock;
#ifdef CONFIG_PM_ENABLE #ifdef CONFIG_PM_ENABLE
esp_pm_lock_handle_t pm_lock; ///< Power management lock esp_pm_lock_handle_t pm_lock; ///< Power management lock
#endif #endif
} spi_bus_attr_t; } spi_bus_attr_t;
typedef struct {
#if SOC_GDMA_SUPPORTED
gdma_channel_handle_t tx_dma_chan; ///< GDMA tx channel
gdma_channel_handle_t rx_dma_chan; ///< GDMA rx channel
#else
spi_dma_chan_handle_t tx_dma_chan; ///< TX DMA channel, on ESP32 and ESP32S2, tx_dma_chan and rx_dma_chan are same
spi_dma_chan_handle_t rx_dma_chan; ///< RX DMA channel, on ESP32 and ESP32S2, tx_dma_chan and rx_dma_chan are same
#endif
int dma_desc_num; ///< DMA descriptor number of dmadesc_tx or dmadesc_rx.
spi_dma_desc_t *dmadesc_tx; ///< DMA descriptor array for TX
spi_dma_desc_t *dmadesc_rx; ///< DMA descriptor array for RX
} spi_dma_ctx_t;
/// Destructor called when a bus is deinitialized. /// Destructor called when a bus is deinitialized.
typedef esp_err_t (*spi_destroy_func_t)(void*); typedef esp_err_t (*spi_destroy_func_t)(void*);
/** /**
* @brief Alloc DMA for SPI * @brief Alloc DMA channel for SPI
* *
* @param host_id SPI host ID * @param host_id SPI host ID
* @param dma_chan DMA channel to be used * @param dma_chan DMA channel to be used
* @param[out] out_actual_tx_dma_chan Actual TX DMA channel (if you choose to assign a specific DMA channel, this will be the channel you assigned before) * @param out_dma_ctx Actual DMA channel context (if you choose to assign a specific DMA channel, this will be the channel you assigned before)
* @param[out] out_actual_rx_dma_chan Actual RX DMA channel (if you choose to assign a specific DMA channel, this will be the channel you assigned before)
* *
* @return * @return
* - ESP_OK: On success * - ESP_OK: On success
* - ESP_ERR_NO_MEM: No enough memory * - ESP_ERR_NO_MEM: No enough memory
* - ESP_ERR_NOT_FOUND: There is no available DMA channel * - ESP_ERR_NOT_FOUND: There is no available DMA channel
*/ */
esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan, uint32_t *out_actual_tx_dma_chan, uint32_t *out_actual_rx_dma_chan); esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t **out_dma_ctx);
/**
* @brief Alloc DMA descriptors for SPI
*
* @param dma_ctx DMA context returned by `spicommon_dma_chan_alloc`
* @param[in] cfg_max_sz Expected maximum transfer size, in bytes.
* @param[out] actual_max_sz Actual max transfer size one transaction can be, in bytes.
*
* @return
* - ESP_OK: On success
* - ESP_ERR_NO_MEM: No enough memory
*/
esp_err_t spicommon_dma_desc_alloc(spi_dma_ctx_t *dma_ctx, int cfg_max_sz, int *actual_max_sz);
/** /**
* @brief Free DMA for SPI * @brief Free DMA for SPI
* *
* @param host_id SPI host ID * @param dma_ctx spi_dma_ctx_t struct pointer
* *
* @return * @return
* - ESP_OK: On success * - ESP_OK: On success
*/ */
esp_err_t spicommon_dma_chan_free(spi_host_device_t host_id); esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx);
#if SOC_GDMA_SUPPORTED
/**
* @brief Get SPI GDMA Handle for GMDA Supported Chip
*
* @param host_id SPI host ID
* @param gdma_handle GDMA Handle to Return
* @param gdma_direction GDMA Channel Direction in Enum
* - GDMA_CHANNEL_DIRECTION_TX
* - GDMA_CHANNEL_DIRECTION_RX
*
* @return
* - ESP_OK: On success
*/
esp_err_t spicommon_gdma_get_handle(spi_host_device_t host_id, gdma_channel_handle_t *gdma_handle, gdma_channel_direction_t gdma_direction);
#endif
/** /**
* @brief Connect a SPI peripheral to GPIO pins * @brief Connect a SPI peripheral to GPIO pins
@ -272,6 +275,14 @@ void spi_bus_main_set_lock(spi_bus_lock_handle_t lock);
*/ */
const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id); const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id);
/**
* @brief Get the dma context of a specified SPI bus.
*
* @param host_id The specified host to get attribute
* @return (Const) Pointer to the dma context
*/
const spi_dma_ctx_t* spi_bus_get_dma_ctx(spi_host_device_t host_id);
/** /**
* @brief Register a function to a initialized bus to make it called when deinitializing the bus. * @brief Register a function to a initialized bus to make it called when deinitializing the bus.
* *

View File

@ -0,0 +1,58 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "stdbool.h"
#include "hal/spi_types.h"
#ifdef __cplusplus
extern "C" {
#endif
#if !SOC_GDMA_SUPPORTED
/**
* @brief Enumeration of SPI_DMA channel direction
*/
typedef enum {
DMA_CHANNEL_DIRECTION_TX, /*!< DMA channel direction: TX */
DMA_CHANNEL_DIRECTION_RX, /*!< DMA channel direction: RX */
} spi_dma_chan_dir_t;
typedef struct {
spi_host_device_t host_id;
spi_dma_chan_dir_t dir;
int chan_id;
} spi_dma_chan_handle_t;
/**
* Enable/Disable data/desc burst for spi_dma channel
*
* @param chan_handle Context of the spi_dma channel.
* @param data_burst enable or disable data burst
* @param desc_burst enable or disable desc burst
*/
void spi_dma_enable_burst(spi_dma_chan_handle_t chan_handle, bool data_burst, bool desc_burst);
/**
* Reset dma channel for spi_dma
*
* @param chan_handle Context of the spi_dma channel.
*/
void spi_dma_reset(spi_dma_chan_handle_t chan_handle);
/**
* Start dma channel for spi_dma
*
* @param chan_handle Context of the spi_dma channel.
* @param addr Addr of linked dma descriptor to mount
*/
void spi_dma_start(spi_dma_chan_handle_t chan_handle, void *addr);
#endif //!SOC_GDMA_SUPPORTED
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,7 @@
[mapping:gpspi_driver]
archive: libesp_driver_spi.a
entries:
if SOC_GDMA_SUPPORTED = n:
if SPI_MASTER_ISR_IN_IRAM = y || SPI_SLAVE_ISR_IN_IRAM = y:
spi_dma: spi_dma_reset (noflash)
spi_dma: spi_dma_start (noflash)

View File

@ -10,6 +10,7 @@
#include "esp_types.h" #include "esp_types.h"
#include "esp_attr.h" #include "esp_attr.h"
#include "esp_check.h" #include "esp_check.h"
#include "esp_cache.h"
#include "esp_rom_gpio.h" #include "esp_rom_gpio.h"
#include "esp_heap_caps.h" #include "esp_heap_caps.h"
#include "soc/spi_periph.h" #include "soc/spi_periph.h"
@ -18,6 +19,7 @@
#include "esp_private/periph_ctrl.h" #include "esp_private/periph_ctrl.h"
#include "esp_private/spi_common_internal.h" #include "esp_private/spi_common_internal.h"
#include "esp_private/spi_share_hw_ctrl.h" #include "esp_private/spi_share_hw_ctrl.h"
#include "esp_private/esp_cache_private.h"
#include "hal/spi_hal.h" #include "hal/spi_hal.h"
#include "hal/gpio_hal.h" #include "hal/gpio_hal.h"
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
@ -25,8 +27,6 @@
#endif #endif
#if SOC_GDMA_SUPPORTED #if SOC_GDMA_SUPPORTED
#include "esp_private/gdma.h" #include "esp_private/gdma.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#endif #endif
static const char *SPI_TAG = "spi"; static const char *SPI_TAG = "spi";
@ -42,10 +42,7 @@ static const char *SPI_TAG = "spi";
#define SPI_MAIN_BUS_DEFAULT() { \ #define SPI_MAIN_BUS_DEFAULT() { \
.host_id = 0, \ .host_id = 0, \
.bus_attr = { \ .bus_attr = { \
.tx_dma_chan = 0, \
.rx_dma_chan = 0, \
.max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE, \ .max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE, \
.dma_desc_num= 0, \
}, \ }, \
} }
@ -56,6 +53,7 @@ typedef struct {
spi_destroy_func_t destroy_func; spi_destroy_func_t destroy_func;
void* destroy_arg; void* destroy_arg;
spi_bus_attr_t bus_attr; spi_bus_attr_t bus_attr;
spi_dma_ctx_t *dma_ctx;
#if SOC_GDMA_SUPPORTED #if SOC_GDMA_SUPPORTED
gdma_channel_handle_t tx_channel; gdma_channel_handle_t tx_channel;
gdma_channel_handle_t rx_channel; gdma_channel_handle_t rx_channel;
@ -75,11 +73,19 @@ static __attribute__((constructor)) void spi_bus_lock_init_main_bus(void)
} }
#endif #endif
#if !SOC_GDMA_SUPPORTED #if SOC_GDMA_SUPPORTED
//NOTE!! If both A and B are not defined, '#if (A==B)' is true, because GCC use 0 stand for undefined symbol
#if defined(SOC_GDMA_BUS_AXI) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI)
#define SPI_GDMA_NEW_CHANNEL gdma_new_axi_channel
#elif defined(SOC_GDMA_BUS_AHB) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB)
#define SPI_GDMA_NEW_CHANNEL gdma_new_ahb_channel
#endif
#else
//Each bit stands for 1 dma channel, BIT(0) should be used for SPI1 //Each bit stands for 1 dma channel, BIT(0) should be used for SPI1
static uint8_t spi_dma_chan_enabled = 0; static uint8_t spi_dma_chan_enabled = 0;
static portMUX_TYPE spi_dma_spinlock = portMUX_INITIALIZER_UNLOCKED; static portMUX_TYPE spi_dma_spinlock = portMUX_INITIALIZER_UNLOCKED;
#endif //#if !SOC_GDMA_SUPPORTED #endif //!SOC_GDMA_SUPPORTED
static inline bool is_valid_host(spi_host_device_t host) static inline bool is_valid_host(spi_host_device_t host)
{ {
@ -157,7 +163,7 @@ static void connect_spi_and_dma(spi_host_device_t host, int dma_chan)
#endif #endif
} }
static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, uint32_t *out_actual_tx_dma_chan, uint32_t *out_actual_rx_dma_chan) static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t *dma_ctx)
{ {
assert(is_valid_host(host_id)); assert(is_valid_host(host_id));
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
@ -187,60 +193,66 @@ static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_ch
} }
//On ESP32 and ESP32S2, actual_tx_dma_chan and actual_rx_dma_chan are always same //On ESP32 and ESP32S2, actual_tx_dma_chan and actual_rx_dma_chan are always same
*out_actual_tx_dma_chan = actual_dma_chan; dma_ctx->tx_dma_chan.chan_id = actual_dma_chan;
*out_actual_rx_dma_chan = actual_dma_chan; dma_ctx->rx_dma_chan.chan_id = actual_dma_chan;
dma_ctx->tx_dma_chan.host_id = host_id;
dma_ctx->rx_dma_chan.host_id = host_id;
dma_ctx->tx_dma_chan.dir = DMA_CHANNEL_DIRECTION_TX;
dma_ctx->rx_dma_chan.dir = DMA_CHANNEL_DIRECTION_RX;
if (!success) { if (!success) {
SPI_CHECK(false, "no available dma channel", ESP_ERR_NOT_FOUND); SPI_CHECK(false, "no available dma channel", ESP_ERR_NOT_FOUND);
} }
connect_spi_and_dma(host_id, actual_dma_chan);
connect_spi_and_dma(host_id, *out_actual_tx_dma_chan); spi_dma_enable_burst(dma_ctx->tx_dma_chan, true, true);
spi_dma_enable_burst(dma_ctx->rx_dma_chan, true, true);
return ret; return ret;
} }
#else //SOC_GDMA_SUPPORTED #else //SOC_GDMA_SUPPORTED
static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, uint32_t *out_actual_tx_dma_chan, uint32_t *out_actual_rx_dma_chan) static esp_err_t alloc_dma_chan(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t *dma_ctx)
{ {
assert(is_valid_host(host_id)); assert(is_valid_host(host_id));
assert(dma_chan == SPI_DMA_CH_AUTO); assert(dma_chan == SPI_DMA_CH_AUTO);
esp_err_t ret = ESP_OK; esp_err_t ret = ESP_OK;
spicommon_bus_context_t *ctx = bus_ctx[host_id];
if (dma_chan == SPI_DMA_CH_AUTO) { if (dma_chan == SPI_DMA_CH_AUTO) {
gdma_channel_alloc_config_t tx_alloc_config = { gdma_channel_alloc_config_t tx_alloc_config = {
.flags.reserve_sibling = 1, .flags.reserve_sibling = 1,
.direction = GDMA_CHANNEL_DIRECTION_TX, .direction = GDMA_CHANNEL_DIRECTION_TX,
}; };
ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&tx_alloc_config, &ctx->tx_channel), SPI_TAG, "alloc gdma tx failed"); ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&tx_alloc_config, &dma_ctx->tx_dma_chan), SPI_TAG, "alloc gdma tx failed");
gdma_channel_alloc_config_t rx_alloc_config = { gdma_channel_alloc_config_t rx_alloc_config = {
.direction = GDMA_CHANNEL_DIRECTION_RX, .direction = GDMA_CHANNEL_DIRECTION_RX,
.sibling_chan = ctx->tx_channel, .sibling_chan = dma_ctx->tx_dma_chan,
}; };
ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&rx_alloc_config, &ctx->rx_channel), SPI_TAG, "alloc gdma rx failed"); ESP_RETURN_ON_ERROR(SPI_GDMA_NEW_CHANNEL(&rx_alloc_config, &dma_ctx->rx_dma_chan), SPI_TAG, "alloc gdma rx failed");
if (host_id == SPI2_HOST) { if (host_id == SPI2_HOST) {
gdma_connect(ctx->rx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2)); gdma_connect(dma_ctx->tx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2));
gdma_connect(ctx->tx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2)); gdma_connect(dma_ctx->rx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 2));
} }
#if (SOC_SPI_PERIPH_NUM >= 3) #if (SOC_SPI_PERIPH_NUM >= 3)
else if (host_id == SPI3_HOST) { else if (host_id == SPI3_HOST) {
gdma_connect(ctx->rx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3)); gdma_connect(dma_ctx->tx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
gdma_connect(ctx->tx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3)); gdma_connect(dma_ctx->rx_dma_chan, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_SPI, 3));
} }
#endif #endif
gdma_get_channel_id(ctx->tx_channel, (int *)out_actual_tx_dma_chan); gdma_transfer_ability_t ability = {
gdma_get_channel_id(ctx->rx_channel, (int *)out_actual_rx_dma_chan); .psram_trans_align = 0, // fall back to use the same size of the psram data cache line size
.sram_trans_align = 4,
};
ESP_RETURN_ON_ERROR(gdma_set_transfer_ability(dma_ctx->tx_dma_chan, &ability), SPI_TAG, "set gdma tx transfer ability failed");
ESP_RETURN_ON_ERROR(gdma_set_transfer_ability(dma_ctx->rx_dma_chan, &ability), SPI_TAG, "set gdma rx transfer ability failed");
} }
return ret; return ret;
} }
#endif //#if !SOC_GDMA_SUPPORTED #endif //#if !SOC_GDMA_SUPPORTED
esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan, uint32_t *out_actual_tx_dma_chan, uint32_t *out_actual_rx_dma_chan) esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma_chan, spi_dma_ctx_t **out_dma_ctx)
{ {
assert(is_valid_host(host_id)); assert(is_valid_host(host_id));
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
@ -250,60 +262,56 @@ esp_err_t spicommon_dma_chan_alloc(spi_host_device_t host_id, spi_dma_chan_t dma
#endif #endif
esp_err_t ret = ESP_OK; esp_err_t ret = ESP_OK;
uint32_t actual_tx_dma_chan = 0; spi_dma_ctx_t *dma_ctx = (spi_dma_ctx_t *)calloc(1, sizeof(spi_dma_ctx_t));
uint32_t actual_rx_dma_chan = 0; if (!dma_ctx) {
spicommon_bus_context_t *ctx = (spicommon_bus_context_t *)calloc(1, sizeof(spicommon_bus_context_t));
if (!ctx) {
ret = ESP_ERR_NO_MEM; ret = ESP_ERR_NO_MEM;
goto cleanup; goto cleanup;
} }
bus_ctx[host_id] = ctx;
ctx->host_id = host_id;
ret = alloc_dma_chan(host_id, dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan); ret = alloc_dma_chan(host_id, dma_chan, dma_ctx);
if (ret != ESP_OK) { if (ret != ESP_OK) {
goto cleanup; goto cleanup;
} }
ctx->bus_attr.tx_dma_chan = actual_tx_dma_chan; *out_dma_ctx = dma_ctx;
ctx->bus_attr.rx_dma_chan = actual_rx_dma_chan;
*out_actual_tx_dma_chan = actual_tx_dma_chan;
*out_actual_rx_dma_chan = actual_rx_dma_chan;
return ret; return ret;
cleanup: cleanup:
free(ctx); free(dma_ctx);
ctx = NULL;
return ret; return ret;
} }
#if SOC_GDMA_SUPPORTED esp_err_t spicommon_dma_desc_alloc(spi_dma_ctx_t *dma_ctx, int cfg_max_sz, int *actual_max_sz)
esp_err_t spicommon_gdma_get_handle(spi_host_device_t host_id, gdma_channel_handle_t *gdma_handle, gdma_channel_direction_t gdma_direction)
{ {
assert(is_valid_host(host_id)); int dma_desc_ct = (cfg_max_sz + DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED - 1) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
ESP_RETURN_ON_FALSE((gdma_direction == GDMA_CHANNEL_DIRECTION_TX) || \ if (dma_desc_ct == 0) {
(gdma_direction == GDMA_CHANNEL_DIRECTION_RX), \ dma_desc_ct = 1; //default to 4k when max is not given
ESP_ERR_INVALID_ARG, SPI_TAG, "GDMA Direction not supported!"); }
if (gdma_direction == GDMA_CHANNEL_DIRECTION_TX) { dma_ctx->dmadesc_tx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
*gdma_handle = bus_ctx[host_id]->tx_channel; dma_ctx->dmadesc_rx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
} if (dma_ctx->dmadesc_tx == NULL || dma_ctx->dmadesc_rx == NULL) {
if (gdma_direction == GDMA_CHANNEL_DIRECTION_RX) { if (dma_ctx->dmadesc_tx) {
*gdma_handle = bus_ctx[host_id]->rx_channel; free(dma_ctx->dmadesc_tx);
dma_ctx->dmadesc_tx = NULL;
}
if (dma_ctx->dmadesc_rx) {
free(dma_ctx->dmadesc_rx);
dma_ctx->dmadesc_rx = NULL;
}
return ESP_ERR_NO_MEM;
} }
dma_ctx->dma_desc_num = dma_desc_ct;
*actual_max_sz = dma_desc_ct * DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
return ESP_OK; return ESP_OK;
} }
#endif // SOC_GDMA_SUPPORTED
//----------------------------------------------------------free dma periph-------------------------------------------------------// //----------------------------------------------------------free dma periph-------------------------------------------------------//
static esp_err_t dma_chan_free(spi_host_device_t host_id) esp_err_t spicommon_dma_chan_free(spi_dma_ctx_t *dma_ctx)
{ {
assert(is_valid_host(host_id)); assert(dma_ctx);
spicommon_bus_context_t *ctx = bus_ctx[host_id];
#if !SOC_GDMA_SUPPORTED #if !SOC_GDMA_SUPPORTED
//On ESP32S2, each SPI controller has its own DMA channel //On ESP32S2, each SPI controller has its own DMA channel
int dma_chan = ctx->bus_attr.tx_dma_chan; int dma_chan = dma_ctx->tx_dma_chan.chan_id;
assert(spi_dma_chan_enabled & BIT(dma_chan)); assert(spi_dma_chan_enabled & BIT(dma_chan));
portENTER_CRITICAL(&spi_dma_spinlock); portENTER_CRITICAL(&spi_dma_spinlock);
@ -311,41 +319,31 @@ static esp_err_t dma_chan_free(spi_host_device_t host_id)
#if SPI_LL_DMA_SHARED #if SPI_LL_DMA_SHARED
PERIPH_RCC_RELEASE_ATOMIC(get_dma_periph(dma_chan), ref_count) { PERIPH_RCC_RELEASE_ATOMIC(get_dma_periph(dma_chan), ref_count) {
if (ref_count == 0) { if (ref_count == 0) {
spi_dma_ll_enable_bus_clock(host_id, false); spi_dma_ll_enable_bus_clock(dma_ctx->tx_dma_chan.host_id, false);
} }
} }
#else #else
SPI_COMMON_RCC_CLOCK_ATOMIC() { SPI_COMMON_RCC_CLOCK_ATOMIC() {
spi_dma_ll_enable_bus_clock(host_id, false); spi_dma_ll_enable_bus_clock(dma_ctx->tx_dma_chan.host_id, false);
} }
#endif #endif
portEXIT_CRITICAL(&spi_dma_spinlock); portEXIT_CRITICAL(&spi_dma_spinlock);
#else //SOC_GDMA_SUPPORTED #else //SOC_GDMA_SUPPORTED
if (ctx->rx_channel) { if (dma_ctx->rx_dma_chan) {
gdma_disconnect(ctx->rx_channel); gdma_disconnect(dma_ctx->rx_dma_chan);
gdma_del_channel(ctx->rx_channel); gdma_del_channel(dma_ctx->rx_dma_chan);
} }
if (ctx->tx_channel) { if (dma_ctx->tx_dma_chan) {
gdma_disconnect(ctx->tx_channel); gdma_disconnect(dma_ctx->tx_dma_chan);
gdma_del_channel(ctx->tx_channel); gdma_del_channel(dma_ctx->tx_dma_chan);
} }
#endif #endif
free(dma_ctx);
return ESP_OK; return ESP_OK;
} }
esp_err_t spicommon_dma_chan_free(spi_host_device_t host_id)
{
assert(is_valid_host(host_id));
esp_err_t ret = dma_chan_free(host_id);
free(bus_ctx[host_id]);
bus_ctx[host_id] = NULL;
return ret;
}
//----------------------------------------------------------IO general-------------------------------------------------------// //----------------------------------------------------------IO general-------------------------------------------------------//
#if SOC_SPI_SUPPORT_OCT #if SOC_SPI_SUPPORT_OCT
static bool check_iomux_pins_oct(spi_host_device_t host, const spi_bus_config_t* bus_config) static bool check_iomux_pins_oct(spi_host_device_t host, const spi_bus_config_t* bus_config)
@ -757,8 +755,6 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
esp_err_t err = ESP_OK; esp_err_t err = ESP_OK;
spicommon_bus_context_t *ctx = NULL; spicommon_bus_context_t *ctx = NULL;
spi_bus_attr_t *bus_attr = NULL; spi_bus_attr_t *bus_attr = NULL;
uint32_t actual_tx_dma_chan = 0;
uint32_t actual_rx_dma_chan = 0;
SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG); SPI_CHECK(is_valid_host(host_id), "invalid host_id", ESP_ERR_INVALID_ARG);
SPI_CHECK(bus_ctx[host_id] == NULL, "SPI bus already initialized.", ESP_ERR_INVALID_STATE); SPI_CHECK(bus_ctx[host_id] == NULL, "SPI bus already initialized.", ESP_ERR_INVALID_STATE);
@ -791,35 +787,22 @@ esp_err_t spi_bus_initialize(spi_host_device_t host_id, const spi_bus_config_t *
if (dma_chan != SPI_DMA_DISABLED) { if (dma_chan != SPI_DMA_DISABLED) {
bus_attr->dma_enabled = 1; bus_attr->dma_enabled = 1;
err = alloc_dma_chan(host_id, dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan); err = spicommon_dma_chan_alloc(host_id, dma_chan, &ctx->dma_ctx);
if (err != ESP_OK) { if (err != ESP_OK) {
goto cleanup; goto cleanup;
} }
bus_attr->tx_dma_chan = actual_tx_dma_chan; err = spicommon_dma_desc_alloc(ctx->dma_ctx, bus_config->max_transfer_sz, &bus_attr->max_transfer_sz);
bus_attr->rx_dma_chan = actual_rx_dma_chan; if (err != ESP_OK) {
int dma_desc_ct = (bus_config->max_transfer_sz + DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED - 1) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
if (dma_desc_ct == 0) {
dma_desc_ct = 1; //default to 4k when max is not given
}
bus_attr->max_transfer_sz = dma_desc_ct * DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
bus_attr->dmadesc_tx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
bus_attr->dmadesc_rx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
if (bus_attr->dmadesc_tx == NULL || bus_attr->dmadesc_rx == NULL) {
err = ESP_ERR_NO_MEM;
goto cleanup; goto cleanup;
} }
bus_attr->dma_desc_num = dma_desc_ct;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
bus_attr->internal_mem_align_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA); esp_cache_get_alignment(ESP_CACHE_MALLOC_FLAG_DMA, (size_t *)&bus_attr->internal_mem_align_size);
#else #else
bus_attr->internal_mem_align_size = 4; bus_attr->internal_mem_align_size = 4;
#endif #endif
} else { } else {
bus_attr->dma_enabled = 0; bus_attr->dma_enabled = 0;
bus_attr->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE; bus_attr->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
bus_attr->dma_desc_num = 0;
} }
spi_bus_lock_config_t lock_config = { spi_bus_lock_config_t lock_config = {
@ -854,12 +837,11 @@ cleanup:
if (bus_attr->lock) { if (bus_attr->lock) {
spi_bus_deinit_lock(bus_attr->lock); spi_bus_deinit_lock(bus_attr->lock);
} }
free(bus_attr->dmadesc_tx); if (ctx->dma_ctx) {
free(bus_attr->dmadesc_rx); free(ctx->dma_ctx->dmadesc_tx);
bus_attr->dmadesc_tx = NULL; free(ctx->dma_ctx->dmadesc_rx);
bus_attr->dmadesc_rx = NULL; spicommon_dma_chan_free(ctx->dma_ctx);
if (bus_attr->dma_enabled) { ctx->dma_ctx = NULL;
dma_chan_free(host_id);
} }
} }
spicommon_periph_free(host_id); spicommon_periph_free(host_id);
@ -877,6 +859,15 @@ const spi_bus_attr_t* spi_bus_get_attr(spi_host_device_t host_id)
return &bus_ctx[host_id]->bus_attr; return &bus_ctx[host_id]->bus_attr;
} }
const spi_dma_ctx_t* spi_bus_get_dma_ctx(spi_host_device_t host_id)
{
if (bus_ctx[host_id] == NULL) {
return NULL;
}
return bus_ctx[host_id]->dma_ctx;
}
esp_err_t spi_bus_free(spi_host_device_t host_id) esp_err_t spi_bus_free(spi_host_device_t host_id)
{ {
if (bus_ctx[host_id] == NULL) { if (bus_ctx[host_id] == NULL) {
@ -890,19 +881,17 @@ esp_err_t spi_bus_free(spi_host_device_t host_id)
if (ctx->destroy_func) { if (ctx->destroy_func) {
err = ctx->destroy_func(ctx->destroy_arg); err = ctx->destroy_func(ctx->destroy_arg);
} }
spicommon_bus_free_io_cfg(&bus_attr->bus_cfg); spicommon_bus_free_io_cfg(&bus_attr->bus_cfg);
#ifdef CONFIG_PM_ENABLE #ifdef CONFIG_PM_ENABLE
esp_pm_lock_delete(bus_attr->pm_lock); esp_pm_lock_delete(bus_attr->pm_lock);
#endif #endif
spi_bus_deinit_lock(bus_attr->lock); spi_bus_deinit_lock(bus_attr->lock);
free(bus_attr->dmadesc_rx); if (ctx->dma_ctx) {
free(bus_attr->dmadesc_tx); free(ctx->dma_ctx->dmadesc_tx);
bus_attr->dmadesc_tx = NULL; free(ctx->dma_ctx->dmadesc_rx);
bus_attr->dmadesc_rx = NULL; spicommon_dma_chan_free(ctx->dma_ctx);
if (bus_attr->dma_enabled > 0) { ctx->dma_ctx = NULL;
dma_chan_free(host_id);
} }
spicommon_periph_free(host_id); spicommon_periph_free(host_id);
free(ctx); free(ctx);

View File

@ -0,0 +1,47 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_private/spi_dma.h"
#include "hal/spi_ll.h"
#if !SOC_GDMA_SUPPORTED
void spi_dma_enable_burst(spi_dma_chan_handle_t chan_handle, bool data_burst, bool desc_burst)
{
spi_dma_dev_t *spi_dma = SPI_LL_GET_HW(chan_handle.host_id);
if (chan_handle.dir == DMA_CHANNEL_DIRECTION_TX) {
spi_dma_ll_tx_enable_burst_data(spi_dma, chan_handle.chan_id, data_burst);
spi_dma_ll_tx_enable_burst_desc(spi_dma, chan_handle.chan_id, desc_burst);
} else {
spi_dma_ll_rx_enable_burst_data(spi_dma, chan_handle.chan_id, data_burst);
spi_dma_ll_rx_enable_burst_desc(spi_dma, chan_handle.chan_id, desc_burst);
}
}
/************************************* IRAM CONTEXT **************************************/
void spi_dma_reset(spi_dma_chan_handle_t chan_handle)
{
spi_dma_dev_t *spi_dma = SPI_LL_GET_HW(chan_handle.host_id);
if (chan_handle.dir == DMA_CHANNEL_DIRECTION_TX) {
spi_dma_ll_tx_reset(spi_dma, chan_handle.chan_id);
} else {
spi_dma_ll_rx_reset(spi_dma, chan_handle.chan_id);
}
}
void spi_dma_start(spi_dma_chan_handle_t chan_handle, void *addr)
{
spi_dma_dev_t *spi_dma = SPI_LL_GET_HW(chan_handle.host_id);
if (chan_handle.dir == DMA_CHANNEL_DIRECTION_TX) {
spi_dma_ll_tx_start(spi_dma, chan_handle.chan_id, (lldesc_t *)addr);
} else {
spi_dma_ll_rx_start(spi_dma, chan_handle.chan_id, (lldesc_t *)addr);
}
}
#endif

View File

@ -149,6 +149,7 @@ typedef struct {
spi_trans_priv_t cur_trans_buf; spi_trans_priv_t cur_trans_buf;
int cur_cs; //current device doing transaction int cur_cs; //current device doing transaction
const spi_bus_attr_t* bus_attr; const spi_bus_attr_t* bus_attr;
const spi_dma_ctx_t *dma_ctx;
/** /**
* the bus is permanently controlled by a device until `spi_bus_release_bus`` is called. Otherwise * the bus is permanently controlled by a device until `spi_bus_release_bus`` is called. Otherwise
@ -221,6 +222,7 @@ static esp_err_t spi_master_init_driver(spi_host_device_t host_id)
esp_err_t err = ESP_OK; esp_err_t err = ESP_OK;
const spi_bus_attr_t* bus_attr = spi_bus_get_attr(host_id); const spi_bus_attr_t* bus_attr = spi_bus_get_attr(host_id);
const spi_dma_ctx_t *dma_ctx = spi_bus_get_dma_ctx(host_id);
SPI_CHECK(bus_attr != NULL, "host_id not initialized", ESP_ERR_INVALID_STATE); SPI_CHECK(bus_attr != NULL, "host_id not initialized", ESP_ERR_INVALID_STATE);
SPI_CHECK(bus_attr->lock != NULL, "SPI Master cannot attach to bus. (Check CONFIG_SPI_FLASH_SHARE_SPI1_BUS)", ESP_ERR_INVALID_ARG); SPI_CHECK(bus_attr->lock != NULL, "SPI Master cannot attach to bus. (Check CONFIG_SPI_FLASH_SHARE_SPI1_BUS)", ESP_ERR_INVALID_ARG);
// spihost contains atomic variables, which should not be put in PSRAM // spihost contains atomic variables, which should not be put in PSRAM
@ -236,6 +238,7 @@ static esp_err_t spi_master_init_driver(spi_host_device_t host_id)
.polling = false, .polling = false,
.device_acquiring_lock = NULL, .device_acquiring_lock = NULL,
.bus_attr = bus_attr, .bus_attr = bus_attr,
.dma_ctx = dma_ctx,
}; };
// interrupts are not allowed on SPI1 bus // interrupts are not allowed on SPI1 bus
@ -259,17 +262,24 @@ static esp_err_t spi_master_init_driver(spi_host_device_t host_id)
} }
//assign the SPI, RX DMA and TX DMA peripheral registers beginning address //assign the SPI, RX DMA and TX DMA peripheral registers beginning address
spi_hal_config_t hal_config = { spi_hal_config_t hal_config = { .dma_enabled = bus_attr->dma_enabled, };
if (bus_attr->dma_enabled && dma_ctx) {
hal_config.dmadesc_tx = dma_ctx->dmadesc_tx;
hal_config.dmadesc_rx = dma_ctx->dmadesc_rx;
hal_config.dmadesc_n = dma_ctx->dma_desc_num;
#if SOC_GDMA_SUPPORTED
//temporary used for gdma_ll alias in hal layer
gdma_get_channel_id(dma_ctx->tx_dma_chan, (int *)&hal_config.tx_dma_chan);
gdma_get_channel_id(dma_ctx->rx_dma_chan, (int *)&hal_config.rx_dma_chan);
#else
//On ESP32-S2 and earlier chips, DMA registers are part of SPI registers. Pass the registers of SPI peripheral to control it. //On ESP32-S2 and earlier chips, DMA registers are part of SPI registers. Pass the registers of SPI peripheral to control it.
.dma_in = SPI_LL_GET_HW(host_id), hal_config.dma_in = SPI_LL_GET_HW(host_id);
.dma_out = SPI_LL_GET_HW(host_id), hal_config.dma_out = SPI_LL_GET_HW(host_id);
.dma_enabled = bus_attr->dma_enabled, hal_config.tx_dma_chan = dma_ctx->tx_dma_chan.chan_id;
.dmadesc_tx = bus_attr->dmadesc_tx, hal_config.rx_dma_chan = dma_ctx->rx_dma_chan.chan_id;
.dmadesc_rx = bus_attr->dmadesc_rx, #endif
.tx_dma_chan = bus_attr->tx_dma_chan, }
.rx_dma_chan = bus_attr->rx_dma_chan,
.dmadesc_n = bus_attr->dma_desc_num,
};
SPI_MASTER_PERI_CLOCK_ATOMIC() { SPI_MASTER_PERI_CLOCK_ATOMIC() {
spi_ll_enable_clock(host_id, true); spi_ll_enable_clock(host_id, true);
} }
@ -633,8 +643,8 @@ static void SPI_MASTER_ISR_ATTR spi_new_trans(spi_device_t *dev, spi_trans_priv_
spi_hal_trans_config_t hal_trans = {}; spi_hal_trans_config_t hal_trans = {};
hal_trans.tx_bitlen = trans->length; hal_trans.tx_bitlen = trans->length;
hal_trans.rx_bitlen = trans->rxlength; hal_trans.rx_bitlen = trans->rxlength;
hal_trans.rcv_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_rcv; hal_trans.rcv_buffer = (uint8_t*)trans_buf->buffer_to_rcv;
hal_trans.send_buffer = (uint8_t*)host->cur_trans_buf.buffer_to_send; hal_trans.send_buffer = (uint8_t*)trans_buf->buffer_to_send;
hal_trans.cmd = trans->cmd; hal_trans.cmd = trans->cmd;
hal_trans.addr = trans->addr; hal_trans.addr = trans->addr;
hal_trans.cs_keep_active = (trans->flags & SPI_TRANS_CS_KEEP_ACTIVE) ? 1 : 0; hal_trans.cs_keep_active = (trans->flags & SPI_TRANS_CS_KEEP_ACTIVE) ? 1 : 0;
@ -699,6 +709,10 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
BaseType_t do_yield = pdFALSE; BaseType_t do_yield = pdFALSE;
spi_host_t *host = (spi_host_t *)arg; spi_host_t *host = (spi_host_t *)arg;
const spi_bus_attr_t* bus_attr = host->bus_attr; const spi_bus_attr_t* bus_attr = host->bus_attr;
#if CONFIG_IDF_TARGET_ESP32
//only for esp32 dma workaround usage
const spi_dma_ctx_t *dma_ctx = host->dma_ctx;
#endif
assert(spi_hal_usr_is_done(&host->hal)); assert(spi_hal_usr_is_done(&host->hal));
@ -720,7 +734,7 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
if (bus_attr->dma_enabled) { if (bus_attr->dma_enabled) {
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
//This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
spicommon_dmaworkaround_idle(bus_attr->tx_dma_chan); spicommon_dmaworkaround_idle(dma_ctx->tx_dma_chan.chan_id);
#endif //#if CONFIG_IDF_TARGET_ESP32 #endif //#if CONFIG_IDF_TARGET_ESP32
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible
@ -793,7 +807,7 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
if (bus_attr->dma_enabled && (cur_trans_buf->buffer_to_rcv || cur_trans_buf->buffer_to_send)) { if (bus_attr->dma_enabled && (cur_trans_buf->buffer_to_rcv || cur_trans_buf->buffer_to_send)) {
//mark channel as active, so that the DMA will not be reset by the slave //mark channel as active, so that the DMA will not be reset by the slave
//This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
spicommon_dmaworkaround_transfer_active(bus_attr->tx_dma_chan); spicommon_dmaworkaround_transfer_active(dma_ctx->tx_dma_chan.chan_id);
} }
#endif //#if CONFIG_IDF_TARGET_ESP32 #endif //#if CONFIG_IDF_TARGET_ESP32
spi_new_trans(device_to_send, cur_trans_buf); spi_new_trans(device_to_send, cur_trans_buf);
@ -1081,7 +1095,7 @@ esp_err_t SPI_MASTER_ISR_ATTR spi_device_acquire_bus(spi_device_t *device, TickT
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
if (host->bus_attr->dma_enabled) { if (host->bus_attr->dma_enabled) {
//This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
spicommon_dmaworkaround_transfer_active(host->bus_attr->tx_dma_chan); spicommon_dmaworkaround_transfer_active(host->dma_ctx->tx_dma_chan.chan_id);
} }
#endif //#if CONFIG_IDF_TARGET_ESP32 #endif //#if CONFIG_IDF_TARGET_ESP32
@ -1101,7 +1115,7 @@ void SPI_MASTER_ISR_ATTR spi_device_release_bus(spi_device_t *dev)
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
if (host->bus_attr->dma_enabled) { if (host->bus_attr->dma_enabled) {
//This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same //This workaround is only for esp32, where tx_dma_chan and rx_dma_chan are always same
spicommon_dmaworkaround_idle(host->bus_attr->tx_dma_chan); spicommon_dmaworkaround_idle(host->dma_ctx->tx_dma_chan.chan_id);
} }
//Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset. //Tell common code DMA workaround that our DMA channel is idle. If needed, the code will do a DMA reset.
#endif //#if CONFIG_IDF_TARGET_ESP32 #endif //#if CONFIG_IDF_TARGET_ESP32

View File

@ -59,6 +59,7 @@ typedef struct {
typedef struct { typedef struct {
int id; int id;
spi_bus_config_t bus_config; spi_bus_config_t bus_config;
spi_dma_ctx_t *dma_ctx;
spi_slave_interface_config_t cfg; spi_slave_interface_config_t cfg;
intr_handle_t intr; intr_handle_t intr;
spi_slave_hal_context_t hal; spi_slave_hal_context_t hal;
@ -72,8 +73,6 @@ typedef struct {
bool cs_iomux; bool cs_iomux;
uint8_t cs_in_signal; uint8_t cs_in_signal;
uint16_t internal_mem_align_size; uint16_t internal_mem_align_size;
uint32_t tx_dma_chan;
uint32_t rx_dma_chan;
#ifdef CONFIG_PM_ENABLE #ifdef CONFIG_PM_ENABLE
esp_pm_lock_handle_t pm_lock; esp_pm_lock_handle_t pm_lock;
#endif #endif
@ -133,8 +132,6 @@ static void ipc_isr_reg_to_core(void *args)
esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *bus_config, const spi_slave_interface_config_t *slave_config, spi_dma_chan_t dma_chan) esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *bus_config, const spi_slave_interface_config_t *slave_config, spi_dma_chan_t dma_chan)
{ {
bool spi_chan_claimed; bool spi_chan_claimed;
uint32_t actual_tx_dma_chan = 0;
uint32_t actual_rx_dma_chan = 0;
esp_err_t ret = ESP_OK; esp_err_t ret = ESP_OK;
esp_err_t err; esp_err_t err;
SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG); SPI_CHECK(is_valid_host(host), "invalid host", ESP_ERR_INVALID_ARG);
@ -172,19 +169,27 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
spihost[host]->dma_enabled = (dma_chan != SPI_DMA_DISABLED); spihost[host]->dma_enabled = (dma_chan != SPI_DMA_DISABLED);
if (spihost[host]->dma_enabled) { if (spihost[host]->dma_enabled) {
ret = spicommon_dma_chan_alloc(host, dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan); ret = spicommon_dma_chan_alloc(host, dma_chan, &spihost[host]->dma_ctx);
if (ret != ESP_OK) { if (ret != ESP_OK) {
goto cleanup; goto cleanup;
} }
spihost[host]->tx_dma_chan = actual_tx_dma_chan; ret = spicommon_dma_desc_alloc(spihost[host]->dma_ctx, bus_config->max_transfer_sz, &spihost[host]->max_transfer_sz);
spihost[host]->rx_dma_chan = actual_rx_dma_chan; if (ret != ESP_OK) {
goto cleanup;
//See how many dma descriptors we need and allocate them
int dma_desc_ct = (bus_config->max_transfer_sz + SPI_MAX_DMA_LEN - 1) / SPI_MAX_DMA_LEN;
if (dma_desc_ct == 0) {
dma_desc_ct = 1; //default to 4k when max is not given
} }
spihost[host]->max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN;
hal->dmadesc_tx = spihost[host]->dma_ctx->dmadesc_tx;
hal->dmadesc_rx = spihost[host]->dma_ctx->dmadesc_rx;
hal->dmadesc_n = spihost[host]->dma_ctx->dma_desc_num;
#if SOC_GDMA_SUPPORTED
//temporary used for gdma_ll alias in hal layer
gdma_get_channel_id(spihost[host]->dma_ctx->tx_dma_chan, (int *)&hal->tx_dma_chan);
gdma_get_channel_id(spihost[host]->dma_ctx->rx_dma_chan, (int *)&hal->rx_dma_chan);
#else
hal->tx_dma_chan = spihost[host]->dma_ctx->tx_dma_chan.chan_id;
hal->rx_dma_chan = spihost[host]->dma_ctx->rx_dma_chan.chan_id;
#endif
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
size_t alignment; size_t alignment;
esp_cache_get_alignment(ESP_CACHE_MALLOC_FLAG_DMA, &alignment); esp_cache_get_alignment(ESP_CACHE_MALLOC_FLAG_DMA, &alignment);
@ -192,14 +197,6 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
#else #else
spihost[host]->internal_mem_align_size = 4; spihost[host]->internal_mem_align_size = 4;
#endif #endif
hal->dmadesc_tx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
hal->dmadesc_rx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
if (!hal->dmadesc_tx || !hal->dmadesc_rx) {
ret = ESP_ERR_NO_MEM;
goto cleanup;
}
hal->dmadesc_n = dma_desc_ct;
} else { } else {
//We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most. //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
spihost[host]->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE; spihost[host]->max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE;
@ -278,9 +275,6 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
hal->tx_lsbfirst = (slave_config->flags & SPI_SLAVE_TXBIT_LSBFIRST) ? 1 : 0; hal->tx_lsbfirst = (slave_config->flags & SPI_SLAVE_TXBIT_LSBFIRST) ? 1 : 0;
hal->mode = slave_config->mode; hal->mode = slave_config->mode;
hal->use_dma = spihost[host]->dma_enabled; hal->use_dma = spihost[host]->dma_enabled;
hal->tx_dma_chan = actual_tx_dma_chan;
hal->rx_dma_chan = actual_rx_dma_chan;
spi_slave_hal_setup_device(hal); spi_slave_hal_setup_device(hal);
return ESP_OK; return ESP_OK;
@ -301,9 +295,9 @@ cleanup:
} }
spi_slave_hal_deinit(&spihost[host]->hal); spi_slave_hal_deinit(&spihost[host]->hal);
if (spihost[host]->dma_enabled) { if (spihost[host]->dma_enabled) {
spicommon_dma_chan_free(host); free(spihost[host]->dma_ctx->dmadesc_tx);
free(spihost[host]->hal.dmadesc_tx); free(spihost[host]->dma_ctx->dmadesc_rx);
free(spihost[host]->hal.dmadesc_rx); spicommon_dma_chan_free(spihost[host]->dma_ctx);
} }
free(spihost[host]); free(spihost[host]);
@ -324,9 +318,9 @@ esp_err_t spi_slave_free(spi_host_device_t host)
vQueueDelete(spihost[host]->ret_queue); vQueueDelete(spihost[host]->ret_queue);
} }
if (spihost[host]->dma_enabled) { if (spihost[host]->dma_enabled) {
spicommon_dma_chan_free(host); free(spihost[host]->dma_ctx->dmadesc_tx);
free(spihost[host]->hal.dmadesc_tx); free(spihost[host]->dma_ctx->dmadesc_rx);
free(spihost[host]->hal.dmadesc_rx); spicommon_dma_chan_free(spihost[host]->dma_ctx);
} }
spicommon_bus_free_io_cfg(&spihost[host]->bus_config); spicommon_bus_free_io_cfg(&spihost[host]->bus_config);
esp_intr_free(spihost[host]->intr); esp_intr_free(spihost[host]->intr);
@ -586,7 +580,7 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg)
//This workaround is only for esp32 //This workaround is only for esp32
if (spi_slave_hal_dma_need_reset(hal)) { if (spi_slave_hal_dma_need_reset(hal)) {
//On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same //On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same
spicommon_dmaworkaround_req_reset(host->tx_dma_chan, spi_slave_restart_after_dmareset, host); spicommon_dmaworkaround_req_reset(host->dma_ctx->tx_dma_chan.chan_id, spi_slave_restart_after_dmareset, host);
} }
#endif //#if CONFIG_IDF_TARGET_ESP32 #endif //#if CONFIG_IDF_TARGET_ESP32
@ -614,7 +608,7 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg)
//This workaround is only for esp32 //This workaround is only for esp32
if (use_dma) { if (use_dma) {
//On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same //On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same
spicommon_dmaworkaround_idle(host->tx_dma_chan); spicommon_dmaworkaround_idle(host->dma_ctx->tx_dma_chan.chan_id);
if (spicommon_dmaworkaround_reset_in_progress()) { if (spicommon_dmaworkaround_reset_in_progress()) {
//We need to wait for the reset to complete. Disable int (will be re-enabled on reset callback) and exit isr. //We need to wait for the reset to complete. Disable int (will be re-enabled on reset callback) and exit isr.
esp_intr_disable(host->intr); esp_intr_disable(host->intr);
@ -649,7 +643,7 @@ static void SPI_SLAVE_ISR_ATTR spi_intr(void *arg)
if (use_dma) { if (use_dma) {
//This workaround is only for esp32 //This workaround is only for esp32
//On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same //On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same
spicommon_dmaworkaround_transfer_active(host->tx_dma_chan); spicommon_dmaworkaround_transfer_active(host->dma_ctx->tx_dma_chan.chan_id);
} }
#endif //#if CONFIG_IDF_TARGET_ESP32 #endif //#if CONFIG_IDF_TARGET_ESP32

View File

@ -36,15 +36,12 @@ typedef struct {
typedef struct { typedef struct {
bool dma_enabled; bool dma_enabled;
spi_dma_ctx_t *dma_ctx;
uint16_t internal_mem_align_size; uint16_t internal_mem_align_size;
int max_transfer_sz; int max_transfer_sz;
uint32_t flags; uint32_t flags;
portMUX_TYPE int_spinlock; portMUX_TYPE int_spinlock;
intr_handle_t intr; intr_handle_t intr;
#if SOC_GDMA_SUPPORTED
gdma_channel_handle_t gdma_handle_tx; //varible for storge gdma handle
gdma_channel_handle_t gdma_handle_rx;
#endif
intr_handle_t intr_dma; intr_handle_t intr_dma;
spi_slave_hd_callback_config_t callback; spi_slave_hd_callback_config_t callback;
spi_slave_hd_hal_context_t hal; spi_slave_hd_hal_context_t hal;
@ -74,13 +71,10 @@ static bool spi_gdma_tx_channel_callback(gdma_channel_handle_t dma_chan, gdma_ev
static void spi_slave_hd_intr_append(void *arg); static void spi_slave_hd_intr_append(void *arg);
static void spi_slave_hd_intr_segment(void *arg); static void spi_slave_hd_intr_segment(void *arg);
esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *bus_config, esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *bus_config, const spi_slave_hd_slot_config_t *config)
const spi_slave_hd_slot_config_t *config)
{ {
bool spi_chan_claimed; bool spi_chan_claimed;
bool append_mode = (config->flags & SPI_SLAVE_HD_APPEND_MODE); bool append_mode = (config->flags & SPI_SLAVE_HD_APPEND_MODE);
uint32_t actual_tx_dma_chan = 0;
uint32_t actual_rx_dma_chan = 0;
esp_err_t ret = ESP_OK; esp_err_t ret = ESP_OK;
SPIHD_CHECK(VALID_HOST(host_id), "invalid host", ESP_ERR_INVALID_ARG); SPIHD_CHECK(VALID_HOST(host_id), "invalid host", ESP_ERR_INVALID_ARG);
@ -104,34 +98,28 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
host->append_mode = append_mode; host->append_mode = append_mode;
if (host->dma_enabled) { if (host->dma_enabled) {
ret = spicommon_dma_chan_alloc(host_id, config->dma_chan, &actual_tx_dma_chan, &actual_rx_dma_chan); ret = spicommon_dma_chan_alloc(host_id, config->dma_chan, &host->dma_ctx);
if (ret != ESP_OK) {
goto cleanup;
}
ret = spicommon_dma_desc_alloc(host->dma_ctx, bus_config->max_transfer_sz, &host->max_transfer_sz);
if (ret != ESP_OK) { if (ret != ESP_OK) {
goto cleanup; goto cleanup;
} }
//Malloc for all the DMA descriptors host->hal.dma_desc_num = host->dma_ctx->dma_desc_num;
int dma_desc_ct = (bus_config->max_transfer_sz + DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED - 1) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED; host->hal.dmadesc_tx = heap_caps_malloc(sizeof(spi_slave_hd_hal_desc_append_t) * host->hal.dma_desc_num, MALLOC_CAP_DEFAULT);
if (dma_desc_ct == 0) { host->hal.dmadesc_rx = heap_caps_malloc(sizeof(spi_slave_hd_hal_desc_append_t) * host->hal.dma_desc_num, MALLOC_CAP_DEFAULT);
dma_desc_ct = 1; //default to 4k when max is not given if (!(host->hal.dmadesc_tx && host->hal.dmadesc_rx)) {
}
host->hal.dma_desc_num = dma_desc_ct;
spi_dma_desc_t *orig_dmadesc_tx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
spi_dma_desc_t *orig_dmadesc_rx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
host->hal.dmadesc_tx = heap_caps_malloc(sizeof(spi_slave_hd_hal_desc_append_t) * dma_desc_ct, MALLOC_CAP_DEFAULT);
host->hal.dmadesc_rx = heap_caps_malloc(sizeof(spi_slave_hd_hal_desc_append_t) * dma_desc_ct, MALLOC_CAP_DEFAULT);
if (!(host->hal.dmadesc_tx && host->hal.dmadesc_rx && orig_dmadesc_tx && orig_dmadesc_rx)) {
ret = ESP_ERR_NO_MEM; ret = ESP_ERR_NO_MEM;
goto cleanup; goto cleanup;
} }
//Pair each desc to each possible trans //Pair each desc to each possible trans
for (int i = 0; i < dma_desc_ct; i ++) { for (int i = 0; i < host->hal.dma_desc_num; i ++) {
host->hal.dmadesc_tx[i].desc = &orig_dmadesc_tx[i]; host->hal.dmadesc_tx[i].desc = &host->dma_ctx->dmadesc_tx[i];
host->hal.dmadesc_rx[i].desc = &orig_dmadesc_rx[i]; host->hal.dmadesc_rx[i].desc = &host->dma_ctx->dmadesc_rx[i];
} }
//Get the actual SPI bus transaction size in bytes.
host->max_transfer_sz = dma_desc_ct * DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE #if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
size_t alignment; size_t alignment;
esp_cache_get_alignment(ESP_CACHE_MALLOC_FLAG_DMA, &alignment); esp_cache_get_alignment(ESP_CACHE_MALLOC_FLAG_DMA, &alignment);
@ -156,14 +144,21 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
.dma_in = SPI_LL_GET_HW(host_id), .dma_in = SPI_LL_GET_HW(host_id),
.dma_out = SPI_LL_GET_HW(host_id), .dma_out = SPI_LL_GET_HW(host_id),
.dma_enabled = host->dma_enabled, .dma_enabled = host->dma_enabled,
.tx_dma_chan = actual_tx_dma_chan,
.rx_dma_chan = actual_rx_dma_chan,
.append_mode = append_mode, .append_mode = append_mode,
.mode = config->mode, .mode = config->mode,
.tx_lsbfirst = (config->flags & SPI_SLAVE_HD_RXBIT_LSBFIRST), .tx_lsbfirst = (config->flags & SPI_SLAVE_HD_RXBIT_LSBFIRST),
.rx_lsbfirst = (config->flags & SPI_SLAVE_HD_TXBIT_LSBFIRST), .rx_lsbfirst = (config->flags & SPI_SLAVE_HD_TXBIT_LSBFIRST),
}; };
#if SOC_GDMA_SUPPORTED
//temporary used for gdma_ll alias in hal layer
gdma_get_channel_id(host->dma_ctx->tx_dma_chan, (int *)&hal_config.tx_dma_chan);
gdma_get_channel_id(host->dma_ctx->rx_dma_chan, (int *)&hal_config.rx_dma_chan);
#else
hal_config.tx_dma_chan = host->dma_ctx->tx_dma_chan.chan_id;
hal_config.rx_dma_chan = host->dma_ctx->rx_dma_chan.chan_id;
#endif
//Init the hal according to the hal_config set above //Init the hal according to the hal_config set above
spi_slave_hd_hal_init(&host->hal, &hal_config); spi_slave_hd_hal_init(&host->hal, &hal_config);
@ -219,11 +214,10 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
} }
#if SOC_GDMA_SUPPORTED #if SOC_GDMA_SUPPORTED
// config gmda and ISR callback for gdma supported chip // config gmda and ISR callback for gdma supported chip
spicommon_gdma_get_handle(host_id, &host->gdma_handle_tx, GDMA_CHANNEL_DIRECTION_TX);
gdma_tx_event_callbacks_t tx_cbs = { gdma_tx_event_callbacks_t tx_cbs = {
.on_trans_eof = spi_gdma_tx_channel_callback .on_trans_eof = spi_gdma_tx_channel_callback
}; };
gdma_register_tx_event_callbacks(host->gdma_handle_tx, &tx_cbs, host); gdma_register_tx_event_callbacks(host->dma_ctx->tx_dma_chan, &tx_cbs, host);
#else #else
ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_append, ret = esp_intr_alloc(spicommon_irqdma_source_for_host(host_id), 0, spi_slave_hd_intr_append,
(void *)host, &host->intr_dma); (void *)host, &host->intr_dma);
@ -293,11 +287,11 @@ esp_err_t spi_slave_hd_deinit(spi_host_device_t host_id)
spicommon_periph_free(host_id); spicommon_periph_free(host_id);
if (host->dma_enabled) { if (host->dma_enabled) {
free(host->hal.dmadesc_tx->desc); free(host->dma_ctx->dmadesc_tx);
free(host->hal.dmadesc_rx->desc); free(host->dma_ctx->dmadesc_rx);
free(host->hal.dmadesc_tx); free(host->hal.dmadesc_tx);
free(host->hal.dmadesc_rx); free(host->hal.dmadesc_rx);
spicommon_dma_chan_free(host_id); spicommon_dma_chan_free(host->dma_ctx);
} }
free(host); free(host);
spihost[host_id] = NULL; spihost[host_id] = NULL;

View File

@ -908,8 +908,8 @@ TEST_CASE_MULTIPLE_DEVICES("SPI quad hd test ", "[spi_ms][test_env=generic_multi
#endif // #if !TEMPORARY_DISABLED_FOR_TARGETS(ESP32S2) #endif // #if !TEMPORARY_DISABLED_FOR_TARGETS(ESP32S2)
//***************************************TEST FOR APPEND MODE******************************************// //***************************************TEST FOR APPEND MODE******************************************//
#define TEST_APPEND_CACHE_SIZE 4 #define TEST_APPEND_NUM 4
#define TEST_TRANS_LEN TEST_DMA_MAX_SIZE #define TEST_TRANS_LEN TEST_DMA_MAX_SIZE
void prepare_data(uint8_t *buff, uint32_t len, int8_t diff) void prepare_data(uint8_t *buff, uint32_t len, int8_t diff)
{ {
@ -930,20 +930,20 @@ void slave_run_append(void)
TEST_ESP_OK(spi_slave_hd_init(TEST_SPI_HOST, &bus_cfg, &slave_hd_cfg)); TEST_ESP_OK(spi_slave_hd_init(TEST_SPI_HOST, &bus_cfg, &slave_hd_cfg));
unity_wait_for_signal("Master ready"); unity_wait_for_signal("Master ready");
spi_slave_hd_data_t *ret_trans, slave_rx_trans[TEST_APPEND_CACHE_SIZE] = {}; spi_slave_hd_data_t *ret_trans, slave_rx_trans[TEST_APPEND_NUM] = {};
uint8_t *slave_exp = heap_caps_malloc(TEST_TRANS_LEN, MALLOC_CAP_DEFAULT); uint8_t *slave_exp = heap_caps_malloc(TEST_TRANS_LEN, MALLOC_CAP_DEFAULT);
// append some data first // append some data first
for (uint32_t cache_instans = 0; cache_instans < TEST_APPEND_CACHE_SIZE; cache_instans++) { for (uint32_t append_idx = 0; append_idx < TEST_APPEND_NUM; append_idx++) {
int trans_len = 16 << (cache_instans + 1); int trans_len = 16 << (append_idx + 1);
if (trans_len > TEST_TRANS_LEN) { if (trans_len > TEST_TRANS_LEN) {
trans_len = TEST_TRANS_LEN; trans_len = TEST_TRANS_LEN;
} }
slave_rx_trans[cache_instans].data = heap_caps_calloc(1, TEST_TRANS_LEN, MALLOC_CAP_DMA); slave_rx_trans[append_idx].data = heap_caps_aligned_calloc(4, 1, TEST_TRANS_LEN, MALLOC_CAP_DMA);
TEST_ASSERT_NOT_NULL(slave_rx_trans[cache_instans].data); TEST_ASSERT_NOT_NULL(slave_rx_trans[append_idx].data);
slave_rx_trans[cache_instans].len = trans_len; slave_rx_trans[append_idx].len = trans_len;
TEST_ESP_OK(spi_slave_hd_append_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_rx_trans[cache_instans], portMAX_DELAY)); TEST_ESP_OK(spi_slave_hd_append_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, &slave_rx_trans[append_idx], portMAX_DELAY));
} }
for (int trans_num = 1; trans_num <= 8; trans_num ++) { for (int trans_num = 1; trans_num <= 8; trans_num ++) {
@ -960,7 +960,7 @@ void slave_run_append(void)
ESP_LOG_BUFFER_HEX_LEVEL("slave exp", slave_exp, trans_len, ESP_LOG_DEBUG); ESP_LOG_BUFFER_HEX_LEVEL("slave exp", slave_exp, trans_len, ESP_LOG_DEBUG);
spitest_cmp_or_dump(slave_exp, ret_trans->data, trans_len); spitest_cmp_or_dump(slave_exp, ret_trans->data, trans_len);
if (trans_num <= TEST_APPEND_CACHE_SIZE) { if (trans_num <= TEST_APPEND_NUM) {
// append one more transaction // append one more transaction
int new_append_len = trans_len << 4; int new_append_len = trans_len << 4;
if (new_append_len > TEST_TRANS_LEN) { if (new_append_len > TEST_TRANS_LEN) {
@ -976,16 +976,16 @@ void slave_run_append(void)
free(slave_exp); free(slave_exp);
//------------------------------------tx direction------------------------------ //------------------------------------tx direction------------------------------
spi_slave_hd_data_t slave_tx_trans[TEST_APPEND_CACHE_SIZE] = {}; spi_slave_hd_data_t slave_tx_trans[TEST_APPEND_NUM] = {};
for (uint32_t cache_instans = 0; cache_instans < TEST_APPEND_CACHE_SIZE; cache_instans ++) { for (uint32_t append_idx = 0; append_idx < TEST_APPEND_NUM; append_idx ++) {
int trans_len = 16 << (cache_instans + 1); int trans_len = 16 << (append_idx + 1);
if (trans_len >= TEST_TRANS_LEN) { if (trans_len >= TEST_TRANS_LEN) {
trans_len = TEST_TRANS_LEN; trans_len = TEST_TRANS_LEN;
} }
slave_tx_trans[cache_instans].data = slave_rx_trans[cache_instans].data; slave_tx_trans[append_idx].data = slave_rx_trans[append_idx].data;
slave_tx_trans[cache_instans].len = trans_len; slave_tx_trans[append_idx].len = trans_len;
prepare_data(slave_tx_trans[cache_instans].data, trans_len, -3); prepare_data(slave_tx_trans[append_idx].data, trans_len, -3);
TEST_ESP_OK(spi_slave_hd_append_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_tx_trans[cache_instans], portMAX_DELAY)); TEST_ESP_OK(spi_slave_hd_append_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, &slave_tx_trans[append_idx], portMAX_DELAY));
} }
//Get one result and load a new transaction //Get one result and load a new transaction
@ -995,7 +995,7 @@ void slave_run_append(void)
ESP_LOGI("slave", "trasacted len: %d", ret_trans->len); ESP_LOGI("slave", "trasacted len: %d", ret_trans->len);
ESP_LOG_BUFFER_HEX_LEVEL("slave tx", ret_trans->data, ret_trans->len, ESP_LOG_DEBUG); ESP_LOG_BUFFER_HEX_LEVEL("slave tx", ret_trans->data, ret_trans->len, ESP_LOG_DEBUG);
if (trans_num <= TEST_APPEND_CACHE_SIZE) { if (trans_num <= TEST_APPEND_NUM) {
// append one more transaction // append one more transaction
int new_append_len = 16 << (trans_num + 4); int new_append_len = 16 << (trans_num + 4);
if (new_append_len > TEST_TRANS_LEN) { if (new_append_len > TEST_TRANS_LEN) {
@ -1008,7 +1008,7 @@ void slave_run_append(void)
} }
} }
printf("================Master Rx Done==================\n"); printf("================Master Rx Done==================\n");
for (int i = 0; i < TEST_APPEND_CACHE_SIZE; i++) { for (int i = 0; i < TEST_APPEND_NUM; i++) {
free(slave_tx_trans[i].data); free(slave_tx_trans[i].data);
} }

View File

@ -12,29 +12,6 @@
#include "soc/soc_caps.h" #include "soc/soc_caps.h"
#include "soc/clk_tree_defs.h" #include "soc/clk_tree_defs.h"
//This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
#if SOC_GDMA_SUPPORTED
#if (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB) && (SOC_AHB_GDMA_VERSION == 1)
#include "soc/gdma_struct.h"
#include "hal/gdma_ll.h"
#define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) gdma_ll_rx_enable_data_burst(&GDMA, chan, enable);
#define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) gdma_ll_tx_enable_data_burst(&GDMA, chan, enable);
#define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) gdma_ll_rx_enable_descriptor_burst(&GDMA, chan, enable);
#define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) gdma_ll_tx_enable_descriptor_burst(&GDMA, chan, enable);
#define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) gdma_ll_tx_enable_auto_write_back(&GDMA, chan, enable);
#define spi_dma_ll_set_out_eof_generation(dev, chan, enable) gdma_ll_tx_set_eof_mode(&GDMA, chan, enable);
#elif (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI) //TODO: IDF-6152, refactor spi hal layer
#include "hal/axi_dma_ll.h"
#define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) axi_dma_ll_rx_enable_data_burst(&AXI_DMA, chan, enable);
#define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) axi_dma_ll_tx_enable_data_burst(&AXI_DMA, chan, enable);
#define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) axi_dma_ll_rx_enable_descriptor_burst(&AXI_DMA, chan, enable);
#define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) axi_dma_ll_tx_enable_descriptor_burst(&AXI_DMA, chan, enable);
#define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) axi_dma_ll_tx_enable_auto_write_back(&AXI_DMA, chan, enable);
#define spi_dma_ll_set_out_eof_generation(dev, chan, enable) axi_dma_ll_tx_set_eof_mode(&AXI_DMA, chan, enable);
#endif
#endif //SOC_GDMA_SUPPORTED
/* The tag may be unused if log level is set to NONE */ /* The tag may be unused if log level is set to NONE */
static const __attribute__((unused)) char SPI_HAL_TAG[] = "spi_hal"; static const __attribute__((unused)) char SPI_HAL_TAG[] = "spi_hal";
@ -44,14 +21,6 @@ static const __attribute__((unused)) char SPI_HAL_TAG[] = "spi_hal";
return (ret_val); \ return (ret_val); \
} }
static void s_spi_hal_dma_init_config(const spi_hal_context_t *hal)
{
spi_dma_ll_rx_enable_burst_data(hal->dma_in, hal->rx_dma_chan, 1);
spi_dma_ll_tx_enable_burst_data(hal->dma_out, hal->tx_dma_chan, 1);
spi_dma_ll_rx_enable_burst_desc(hal->dma_in, hal->rx_dma_chan, 1);
spi_dma_ll_tx_enable_burst_desc(hal->dma_out, hal->tx_dma_chan, 1);
}
void spi_hal_init(spi_hal_context_t *hal, uint32_t host_id, const spi_hal_config_t *config) void spi_hal_init(spi_hal_context_t *hal, uint32_t host_id, const spi_hal_config_t *config)
{ {
memset(hal, 0, sizeof(spi_hal_context_t)); memset(hal, 0, sizeof(spi_hal_context_t));
@ -71,9 +40,6 @@ void spi_hal_init(spi_hal_context_t *hal, uint32_t host_id, const spi_hal_config
spi_ll_set_mosi_free_level(hw, 0); spi_ll_set_mosi_free_level(hw, 0);
#endif #endif
spi_ll_master_init(hw); spi_ll_master_init(hw);
if (config->dma_enabled) {
s_spi_hal_dma_init_config(hal);
}
//Force a transaction done interrupt. This interrupt won't fire yet because //Force a transaction done interrupt. This interrupt won't fire yet because
//we initialized the SPI interrupt as disabled. This way, we can just //we initialized the SPI interrupt as disabled. This way, we can just

View File

@ -2,37 +2,6 @@
#include "hal/spi_ll.h" #include "hal/spi_ll.h"
#include "soc/soc_caps.h" #include "soc/soc_caps.h"
//This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
#if SOC_GDMA_SUPPORTED
#if (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB) && (SOC_AHB_GDMA_VERSION == 1)
#include "soc/gdma_struct.h"
#include "hal/gdma_ll.h"
#define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) gdma_ll_rx_enable_data_burst(&GDMA, chan, enable);
#define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) gdma_ll_tx_enable_data_burst(&GDMA, chan, enable);
#define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) gdma_ll_rx_enable_descriptor_burst(&GDMA, chan, enable);
#define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) gdma_ll_tx_enable_descriptor_burst(&GDMA, chan, enable);
#define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) gdma_ll_tx_enable_auto_write_back(&GDMA, chan, enable);
#define spi_dma_ll_set_out_eof_generation(dev, chan, enable) gdma_ll_tx_set_eof_mode(&GDMA, chan, enable);
#elif (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI) //TODO: IDF-6152, refactor spi hal layer
#include "hal/axi_dma_ll.h"
#define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) axi_dma_ll_rx_enable_data_burst(&AXI_DMA, chan, enable);
#define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) axi_dma_ll_tx_enable_data_burst(&AXI_DMA, chan, enable);
#define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) axi_dma_ll_rx_enable_descriptor_burst(&AXI_DMA, chan, enable);
#define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) axi_dma_ll_tx_enable_descriptor_burst(&AXI_DMA, chan, enable);
#define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) axi_dma_ll_tx_enable_auto_write_back(&AXI_DMA, chan, enable);
#define spi_dma_ll_set_out_eof_generation(dev, chan, enable) axi_dma_ll_tx_set_eof_mode(&AXI_DMA, chan, enable);
#endif
#endif //SOC_GDMA_SUPPORTED
static void s_spi_slave_hal_dma_init_config(const spi_slave_hal_context_t *hal)
{
spi_dma_ll_rx_enable_burst_data(hal->dma_in, hal->rx_dma_chan, 1);
spi_dma_ll_tx_enable_burst_data(hal->dma_out, hal->tx_dma_chan, 1);
spi_dma_ll_rx_enable_burst_desc(hal->dma_in, hal->rx_dma_chan, 1);
spi_dma_ll_tx_enable_burst_desc(hal->dma_out, hal->tx_dma_chan, 1);
}
void spi_slave_hal_init(spi_slave_hal_context_t *hal, const spi_slave_hal_config_t *hal_config) void spi_slave_hal_init(spi_slave_hal_context_t *hal, const spi_slave_hal_config_t *hal_config)
{ {
spi_dev_t *hw = SPI_LL_GET_HW(hal_config->host_id); spi_dev_t *hw = SPI_LL_GET_HW(hal_config->host_id);
@ -40,9 +9,6 @@ void spi_slave_hal_init(spi_slave_hal_context_t *hal, const spi_slave_hal_config
hal->dma_in = hal_config->dma_in; hal->dma_in = hal_config->dma_in;
hal->dma_out = hal_config->dma_out; hal->dma_out = hal_config->dma_out;
if (hal->use_dma) {
s_spi_slave_hal_dma_init_config(hal);
}
spi_ll_slave_init(hal->hw); spi_ll_slave_init(hal->hw);
//Force a transaction done interrupt. This interrupt won't fire yet because we initialized the SPI interrupt as //Force a transaction done interrupt. This interrupt won't fire yet because we initialized the SPI interrupt as

View File

@ -27,10 +27,6 @@
#define spi_dma_ll_rx_restart(dev, chan) gdma_ll_rx_restart(&GDMA, chan) #define spi_dma_ll_rx_restart(dev, chan) gdma_ll_rx_restart(&GDMA, chan)
#define spi_dma_ll_rx_reset(dev, chan) gdma_ll_rx_reset_channel(&GDMA, chan) #define spi_dma_ll_rx_reset(dev, chan) gdma_ll_rx_reset_channel(&GDMA, chan)
#define spi_dma_ll_tx_reset(dev, chan) gdma_ll_tx_reset_channel(&GDMA, chan) #define spi_dma_ll_tx_reset(dev, chan) gdma_ll_tx_reset_channel(&GDMA, chan)
#define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) gdma_ll_rx_enable_data_burst(&GDMA, chan, enable)
#define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) gdma_ll_tx_enable_data_burst(&GDMA, chan, enable)
#define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) gdma_ll_rx_enable_descriptor_burst(&GDMA, chan, enable)
#define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) gdma_ll_tx_enable_descriptor_burst(&GDMA, chan, enable)
#define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) gdma_ll_tx_enable_auto_write_back(&GDMA, chan, enable) #define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) gdma_ll_tx_enable_auto_write_back(&GDMA, chan, enable)
#define spi_dma_ll_set_out_eof_generation(dev, chan, enable) gdma_ll_tx_set_eof_mode(&GDMA, chan, enable) #define spi_dma_ll_set_out_eof_generation(dev, chan, enable) gdma_ll_tx_set_eof_mode(&GDMA, chan, enable)
#define spi_dma_ll_get_out_eof_desc_addr(dev, chan) gdma_ll_tx_get_eof_desc_addr(&GDMA, chan) #define spi_dma_ll_get_out_eof_desc_addr(dev, chan) gdma_ll_tx_get_eof_desc_addr(&GDMA, chan)
@ -50,10 +46,6 @@
#define spi_dma_ll_rx_restart(dev, chan) axi_dma_ll_rx_restart(&AXI_DMA, chan) #define spi_dma_ll_rx_restart(dev, chan) axi_dma_ll_rx_restart(&AXI_DMA, chan)
#define spi_dma_ll_rx_reset(dev, chan) axi_dma_ll_rx_reset_channel(&AXI_DMA, chan) #define spi_dma_ll_rx_reset(dev, chan) axi_dma_ll_rx_reset_channel(&AXI_DMA, chan)
#define spi_dma_ll_tx_reset(dev, chan) axi_dma_ll_tx_reset_channel(&AXI_DMA, chan) #define spi_dma_ll_tx_reset(dev, chan) axi_dma_ll_tx_reset_channel(&AXI_DMA, chan)
#define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) axi_dma_ll_rx_enable_data_burst(&AXI_DMA, chan, enable)
#define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) axi_dma_ll_tx_enable_data_burst(&AXI_DMA, chan, enable)
#define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) axi_dma_ll_rx_enable_descriptor_burst(&AXI_DMA, chan, enable)
#define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) axi_dma_ll_tx_enable_descriptor_burst(&AXI_DMA, chan, enable)
#define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) axi_dma_ll_tx_enable_auto_write_back(&AXI_DMA, chan, enable) #define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) axi_dma_ll_tx_enable_auto_write_back(&AXI_DMA, chan, enable)
#define spi_dma_ll_set_out_eof_generation(dev, chan, enable) axi_dma_ll_tx_set_eof_mode(&AXI_DMA, chan, enable) #define spi_dma_ll_set_out_eof_generation(dev, chan, enable) axi_dma_ll_tx_set_eof_mode(&AXI_DMA, chan, enable)
#define spi_dma_ll_get_out_eof_desc_addr(dev, chan) axi_dma_ll_tx_get_eof_desc_addr(&AXI_DMA, chan) #define spi_dma_ll_get_out_eof_desc_addr(dev, chan) axi_dma_ll_tx_get_eof_desc_addr(&AXI_DMA, chan)
@ -71,10 +63,6 @@
static void s_spi_slave_hd_hal_dma_init_config(const spi_slave_hd_hal_context_t *hal) static void s_spi_slave_hd_hal_dma_init_config(const spi_slave_hd_hal_context_t *hal)
{ {
spi_dma_ll_rx_enable_burst_data(hal->dma_in, hal->rx_dma_chan, 1);
spi_dma_ll_tx_enable_burst_data(hal->dma_out, hal->tx_dma_chan, 1);
spi_dma_ll_rx_enable_burst_desc(hal->dma_in, hal->rx_dma_chan, 1);
spi_dma_ll_tx_enable_burst_desc(hal->dma_out, hal->tx_dma_chan, 1);
spi_dma_ll_enable_out_auto_wrback(hal->dma_out, hal->tx_dma_chan, 1); spi_dma_ll_enable_out_auto_wrback(hal->dma_out, hal->tx_dma_chan, 1);
spi_dma_ll_set_out_eof_generation(hal->dma_out, hal->tx_dma_chan, 1); spi_dma_ll_set_out_eof_generation(hal->dma_out, hal->tx_dma_chan, 1);
} }