feat(spi_master): rebase dma sct mode support, rename APIs, use malloc conf_buffer

This commit is contained in:
wanlei 2024-01-15 20:02:09 +08:00
parent a307096ec0
commit 51ffd40843
10 changed files with 422 additions and 453 deletions

View File

@ -158,48 +158,6 @@ typedef struct {
uint8_t dummy_bits; ///< The dummy length in this transaction, in bits. uint8_t dummy_bits; ///< The dummy length in this transaction, in bits.
} spi_transaction_ext_t ; } spi_transaction_ext_t ;
#if SOC_SPI_SCT_SUPPORTED
/**
* @Backgrounds: `SCT Mode`
* Segmented-Configure-Transfer Mode
*
* In this mode, you could pre-configure multiple SPI transactions.
* - These whole transaction is called one `Segmented-Configure-Transaction` or one `SCT`.
* - Each of the transactions in one `SCT` is called one `Segment`.
*
* Per segment can have different SPI phase configurations
*/
/**
* SPI SCT Mode transaction flags
*/
#define SPI_SEG_TRANS_PREP_LEN_UPDATED (1<<0) ///< Use `spi_seg_transaction_t: cs_ena_pretrans` in this segment.
#define SPI_SEG_TRANS_CMD_LEN_UPDATED (1<<1) ///< Use `spi_seg_transaction_t: command_bits` in this segment.
#define SPI_SEG_TRANS_ADDR_LEN_UPDATED (1<<2) ///< Use `spi_seg_transaction_t: address_bits` in this segment.
#define SPI_SEG_TRANS_DUMMY_LEN_UPDATED (1<<3) ///< Use `spi_seg_transaction_t: dummy_bits` in this segment.
#define SPI_SEG_TRANS_DONE_LEN_UPDATED (1<<4) ///< Use `spi_seg_transaction_t: cs_ena_posttrans` in this segment.
/**
* This struct is for SPI SCT (Segmented-Configure-Transfer) Mode.
*
* By default, length of each SPI Phase will not change per segment. Each segment will use the phase length you set when `spi_bus_add_device()`
* However, you could force a segment to use its custom phase length. To achieve this, set the `SPI_SEG_TRANS_XX` flags, to customize phase length.
*/
typedef struct {
struct spi_transaction_t base; ///< Transaction data, so that pointer to spi_transaction_t can be converted into spi_seg_transaction_t
uint8_t cs_ena_pretrans; ///< Amount of SPI bit-cycles the cs should be activated before the transmission
uint8_t cs_ena_posttrans; ///< Amount of SPI bit-cycles the cs should stay active after the transmission
uint8_t command_bits; ///< The command length in this transaction, in bits.
uint8_t address_bits; ///< The address length in this transaction, in bits.
uint8_t dummy_bits; ///< The dummy length in this transaction, in bits.
uint32_t seg_gap_clock_len; ///< The len of CS inactive time between segments, in clocks.
uint32_t seg_trans_flags; ///< SCT specific flags. See `SPI_SEG_TRANS_XXX` macros.
/**< Necessary buffer required by HW, don't touch this. >**/
uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX];
} spi_seg_transaction_t;
#endif //#if SOC_SPI_SCT_SUPPORTED
typedef struct spi_device_t *spi_device_handle_t; ///< Handle for a device on a SPI bus typedef struct spi_device_t *spi_device_handle_t; ///< Handle for a device on a SPI bus
/** /**
* @brief Allocate a device on a SPI bus * @brief Allocate a device on a SPI bus
@ -301,68 +259,6 @@ esp_err_t spi_device_get_trans_result(spi_device_handle_t handle, spi_transactio
*/ */
esp_err_t spi_device_transmit(spi_device_handle_t handle, spi_transaction_t *trans_desc); esp_err_t spi_device_transmit(spi_device_handle_t handle, spi_transaction_t *trans_desc);
#if SOC_SPI_SCT_SUPPORTED
/**
* @brief Enable/Disable Segmented-Configure-Transfer (SCT) mode
*
* Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode
*
* @note This API isn't thread safe. Besides, after enabling this, current SPI host will be switched into SCT mode.
* Therefore, never call this API when in multiple threads, or when an SPI transaction is ongoing (on this SPI host).
*
* @param handle Device handle obtained using spi_host_add_dev
* @param enable True: to enable SCT mode; False: to disable SCT mode
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments
* - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SPI internal Queue isn't empty, etc.
*/
esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool enable);
/**
* @brief Queue an SPI Segmented-Configure-Transaction (SCT) list for interrupt transaction execution.
*
* Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode
*
* @note After calling this API, call `spi_device_get_segment_trans_result` to get the transaction results.
*
* @param handle Device handle obtained using spi_host_add_dev
* @param seg_trans_desc Pointer to the transaction segments list head (a one-segment-list is also acceptable)
* @param seg_num Segment number
* @param ticks_to_wait Ticks to wait until there's room in the queue; use portMAX_DELAY to never time out.
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments
* - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SCT mode isn't enabled, DMA descriptors not enough, etc.
* - ESP_ERR_TIMEOUT: Timeout, this SCT transaction isn't queued successfully
*/
esp_err_t spi_device_queue_segment_trans(spi_device_handle_t handle, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num, TickType_t ticks_to_wait);
/**
* @brief Get the result of an SPI Segmented-Configure-Transaction (SCT).
*
* Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode
*
* @note Until this API returns (with `ESP_OK`), you can now recycle the memory used for this SCT list (pointed by `seg_trans_desc`).
* You must maintain the SCT list related memory before this API returns, otherwise the SCT transaction may fail
*
* @param handle Device handle obtained using spi_host_add_dev
* @param[out] seg_trans_desc Pointer to the completed SCT list head (then you can recycle this list of memory).
* @param ticks_to_wait Ticks to wait until there's a returned item; use portMAX_DELAY to never time out.
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments
* - ESP_ERR_INVALID_STATE: Invalid states, e.g.: SCT mode isn't enabled, etc.
* - ESP_ERR_TIMEOUT: Timeout, didn't get a completed SCT transaction
*/
esp_err_t spi_device_get_segment_trans_result(spi_device_handle_t handle, spi_seg_transaction_t **seg_trans_desc, TickType_t ticks_to_wait);
#endif //#if SOC_SPI_SCT_SUPPORTED
/** /**
* @brief Immediately start a polling transaction. * @brief Immediately start a polling transaction.
* *

View File

@ -0,0 +1,114 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief
* This file contains SPI Master private/internal APIs. Private/Internal APIs are:
* - Visible to other IDF components
* - Suggest NOT to use these APIs in your applications
* - We don't provide backward compatibility, and safety on these APIs either
*/
#pragma once
#include "driver/spi_master.h"
#if SOC_SPI_SCT_SUPPORTED
/**
* @Backgrounds: `SCT Mode`
* Segmented-Configure-Transfer Mode
*
* In this mode, you could pre-configure multiple SPI transactions.
* - These whole transaction is called one `Segmented-Configure-Transaction` or one `SCT`.
* - Each of the transactions in one `SCT` is called one `Segment`.
*
* Per segment can have different SPI phase configurations
*/
/**
* SPI SCT Mode transaction flags
*/
#define SPI_MULTI_TRANS_PREP_LEN_UPDATED (1<<0) ///< Use `spi_multi_transaction_t: cs_ena_pretrans` in this segment.
#define SPI_MULTI_TRANS_CMD_LEN_UPDATED (1<<1) ///< Use `spi_multi_transaction_t: command_bits` in this segment.
#define SPI_MULTI_TRANS_ADDR_LEN_UPDATED (1<<2) ///< Use `spi_multi_transaction_t: address_bits` in this segment.
#define SPI_MULTI_TRANS_DUMMY_LEN_UPDATED (1<<3) ///< Use `spi_multi_transaction_t: dummy_bits` in this segment.
#define SPI_MULTI_TRANS_DONE_LEN_UPDATED (1<<4) ///< Use `spi_multi_transaction_t: cs_ena_posttrans` in this segment.
/**
* This struct is for SPI SCT (Segmented-Configure-Transfer) Mode.
*
* By default, length of each SPI Phase will not change per segment. Each segment will use the phase length you set when `spi_bus_add_device()`
* However, you could force a segment to use its custom phase length. To achieve this, set the `SPI_SEG_TRANS_XX` flags, to customize phase length.
*/
typedef struct {
struct spi_transaction_t base; ///< Transaction data, so that pointer to spi_transaction_t can be converted into spi_multi_transaction_t
uint8_t cs_ena_pretrans; ///< Amount of SPI bit-cycles the cs should be activated before the transmission
uint8_t cs_ena_posttrans; ///< Amount of SPI bit-cycles the cs should stay active after the transmission
uint8_t command_bits; ///< The command length in this transaction, in bits.
uint8_t address_bits; ///< The address length in this transaction, in bits.
uint8_t dummy_bits; ///< The dummy length in this transaction, in bits.
uint32_t sct_gap_len; ///< The len of CS inactive time between segments, in clocks.
uint32_t seg_trans_flags; ///< SCT specific flags. See `SPI_SEG_TRANS_XXX` macros.
} spi_multi_transaction_t;
/**
* @brief Enable/Disable Segmented-Configure-Transfer (SCT) mode
*
* Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode
*
* @note This API isn't thread safe. Besides, after enabling this, current SPI host will be switched into SCT mode.
* Therefore, never call this API when in multiple threads, or when an SPI transaction is ongoing (on this SPI host).
*
* @param handle Device handle obtained using spi_host_add_dev
* @param enable True: to enable SCT mode; False: to disable SCT mode
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments
* - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SPI internal Queue isn't empty, etc.
*/
esp_err_t spi_bus_multi_trans_mode_enable(spi_device_handle_t handle, bool enable);
/**
* @brief Queue an SPI Segmented-Configure-Transaction (SCT) list for interrupt transaction execution.
*
* Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode
*
* @note After calling this API, call `spi_device_get_multi_trans_result` to get the transaction results.
*
* @param handle Device handle obtained using spi_host_add_dev
* @param seg_trans_desc Pointer to the transaction segments list head (a one-segment-list is also acceptable)
* @param trans_num Transaction number in this segment
* @param ticks_to_wait Ticks to wait until there's room in the queue; use portMAX_DELAY to never time out.
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments
* - ESP_ERR_INVALID_STATE: Invalid states, e.g.: an SPI polling transaction is ongoing, SCT mode isn't enabled, DMA descriptors not enough, etc.
* - ESP_ERR_TIMEOUT: Timeout, this SCT transaction isn't queued successfully
*/
esp_err_t spi_device_queue_multi_trans(spi_device_handle_t handle, spi_multi_transaction_t *seg_trans_desc, uint32_t trans_num, TickType_t ticks_to_wait);
/**
* @brief Get the result of an SPI Segmented-Configure-Transaction (SCT).
*
* Search for `@Backgrounds: `SCT Mode`` in this header file to know what is SCT mode
*
* @note Until this API returns (with `ESP_OK`), you can now recycle the memory used for this SCT list (pointed by `seg_trans_desc`).
* You must maintain the SCT list related memory before this API returns, otherwise the SCT transaction may fail
*
* @param handle Device handle obtained using spi_host_add_dev
* @param[out] seg_trans_desc Pointer to the completed SCT list head (then you can recycle this list of memory).
* @param ticks_to_wait Ticks to wait until there's a returned item; use portMAX_DELAY to never time out.
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments
* - ESP_ERR_INVALID_STATE: Invalid states, e.g.: SCT mode isn't enabled, etc.
* - ESP_ERR_TIMEOUT: Timeout, didn't get a completed SCT transaction
*/
esp_err_t spi_device_get_multi_trans_result(spi_device_handle_t handle, spi_multi_transaction_t **seg_trans_desc, TickType_t ticks_to_wait);
#endif //#if SOC_SPI_SCT_SUPPORTED

View File

@ -114,6 +114,7 @@ We have two bits to control the interrupt:
#include <sys/param.h> #include <sys/param.h>
#include "esp_private/periph_ctrl.h" #include "esp_private/periph_ctrl.h"
#include "esp_private/spi_common_internal.h" #include "esp_private/spi_common_internal.h"
#include "esp_private/spi_master_internal.h"
#include "driver/spi_master.h" #include "driver/spi_master.h"
#include "esp_clk_tree.h" #include "esp_clk_tree.h"
#include "clk_ctrl_os.h" #include "clk_ctrl_os.h"
@ -137,21 +138,38 @@ typedef struct spi_device_t spi_device_t;
/// struct to hold private transaction data (like tx and rx buffer for DMA). /// struct to hold private transaction data (like tx and rx buffer for DMA).
typedef struct { typedef struct {
spi_transaction_t *trans; spi_transaction_t *trans;
const uint32_t *buffer_to_send; //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is; const uint32_t *buffer_to_send; //equals to tx_data, if SPI_TRANS_USE_RXDATA is applied; otherwise if original buffer wasn't in DMA-capable memory, this gets the address of a temporary buffer that is;
//otherwise sets to the original buffer or NULL if no buffer is assigned. //otherwise sets to the original buffer or NULL if no buffer is assigned.
uint32_t *buffer_to_rcv; // similar to buffer_to_send uint32_t *buffer_to_rcv; //similar to buffer_to_send
uint32_t dummy; //As we create the queue when in init, to use sct mode private descriptor as a queue item (when in sct mode), we need to add a dummy member here to keep the same size with `spi_sct_desc_priv_t`. #if SOC_SPI_SCT_SUPPORTED
uint32_t reserved[2]; //As we create the queue when in init, to use sct mode private descriptor as a queue item (when in sct mode), we need to add a dummy member here to keep the same size with `spi_sct_trans_priv_t`.
#endif
} spi_trans_priv_t; } spi_trans_priv_t;
#if SOC_SPI_SCT_SUPPORTED #if SOC_SPI_SCT_SUPPORTED
//Type of dma descriptors that used under SPI SCT mode //Type of dma descriptors that used under SPI SCT mode
typedef struct { typedef struct {
lldesc_t *tx_seg_head; spi_dma_desc_t *tx_seg_head;
lldesc_t *rx_seg_head; spi_dma_desc_t *rx_seg_head;
spi_seg_transaction_t *sct_trans_desc_head; spi_multi_transaction_t *sct_trans_desc_head;
uint16_t tx_used_desc_num; uint32_t *sct_conf_buffer;
uint16_t rx_used_desc_num; uint16_t tx_used_desc_num;
} spi_sct_desc_priv_t; uint16_t rx_used_desc_num;
} spi_sct_trans_priv_t;
_Static_assert(sizeof(spi_trans_priv_t) == sizeof(spi_sct_trans_priv_t)); //size of spi_trans_priv_t must be the same as size of spi_sct_trans_priv_t
typedef struct {
/* Segmented-Configure-Transfer required, configured by driver, don't touch */
uint32_t tx_free_desc_num;
uint32_t rx_free_desc_num;
spi_dma_desc_t *cur_tx_seg_link; ///< Current TX DMA descriptor used for sct mode.
spi_dma_desc_t *cur_rx_seg_link; ///< Current RX DMA descriptor used for sct mode.
spi_dma_desc_t *tx_seg_link_tail; ///< Tail of the TX DMA descriptor link
spi_dma_desc_t *rx_seg_link_tail; ///< Tail of the RX DMA descriptor link
} spi_sct_desc_ctx_t;
static void spi_hal_sct_tx_dma_desc_recycle(spi_sct_desc_ctx_t *desc_ctx, uint32_t recycle_num);
static void spi_hal_sct_rx_dma_desc_recycle(spi_sct_desc_ctx_t *desc_ctx, uint32_t recycle_num);
#endif #endif
typedef struct { typedef struct {
@ -161,7 +179,8 @@ typedef struct {
spi_hal_context_t hal; spi_hal_context_t hal;
spi_trans_priv_t cur_trans_buf; spi_trans_priv_t cur_trans_buf;
#if SOC_SPI_SCT_SUPPORTED #if SOC_SPI_SCT_SUPPORTED
spi_sct_desc_priv_t cur_sct_trans; spi_sct_desc_ctx_t sct_desc_pool;
spi_sct_trans_priv_t cur_sct_trans;
#endif #endif
int cur_cs; //current device doing transaction int cur_cs; //current device doing transaction
const spi_bus_attr_t* bus_attr; const spi_bus_attr_t* bus_attr;
@ -781,7 +800,7 @@ static void SPI_MASTER_ISR_ATTR spi_post_trans(spi_host_t *host)
} }
#if SOC_SPI_SCT_SUPPORTED #if SOC_SPI_SCT_SUPPORTED
static void SPI_MASTER_ISR_ATTR spi_sct_set_hal_trans_config(spi_seg_transaction_t *trans_header, spi_hal_trans_config_t *hal_trans) static void SPI_MASTER_ISR_ATTR spi_sct_set_hal_trans_config(spi_multi_transaction_t *trans_header, spi_hal_trans_config_t *hal_trans)
{ {
spi_transaction_t *trans = &trans_header->base; spi_transaction_t *trans = &trans_header->base;
@ -796,7 +815,27 @@ static void SPI_MASTER_ISR_ATTR spi_sct_set_hal_trans_config(spi_seg_transaction
hal_trans->line_mode.cmd_lines = (trans->flags & SPI_TRANS_MULTILINE_CMD) ? hal_trans->line_mode.data_lines : 1; hal_trans->line_mode.cmd_lines = (trans->flags & SPI_TRANS_MULTILINE_CMD) ? hal_trans->line_mode.data_lines : 1;
} }
static void SPI_MASTER_ISR_ATTR spi_new_sct_trans(spi_device_t *dev, spi_sct_desc_priv_t *cur_sct_trans) static void SPI_MASTER_ISR_ATTR s_sct_load_dma_link(spi_device_t *dev, spi_dma_desc_t *rx_seg_head, spi_dma_desc_t *tx_seg_head)
{
spi_hal_context_t *hal = &dev->host->hal;
const spi_dma_ctx_t *dma_ctx = dev->host->dma_ctx;
spi_hal_clear_intr_mask(hal, SPI_LL_INTR_SEG_DONE);
if (rx_seg_head) {
spi_dma_reset(dma_ctx->rx_dma_chan);
spi_hal_hw_prepare_rx(hal->hw);
spi_dma_start(dma_ctx->rx_dma_chan, rx_seg_head);
}
if (tx_seg_head) {
spi_dma_reset(dma_ctx->tx_dma_chan);
spi_hal_hw_prepare_tx(hal->hw);
spi_dma_start(dma_ctx->tx_dma_chan, tx_seg_head);
}
}
static void SPI_MASTER_ISR_ATTR spi_new_sct_trans(spi_device_t *dev, spi_sct_trans_priv_t *cur_sct_trans)
{ {
dev->host->cur_cs = dev->id; dev->host->cur_cs = dev->id;
@ -805,9 +844,9 @@ static void SPI_MASTER_ISR_ATTR spi_new_sct_trans(spi_device_t *dev, spi_sct_des
#if !CONFIG_IDF_TARGET_ESP32S2 #if !CONFIG_IDF_TARGET_ESP32S2
// s2 update this seg_gap_clock_len by dma from conf_buffer // s2 update this seg_gap_clock_len by dma from conf_buffer
spi_hal_sct_set_conf_bits_len(&dev->host->hal, cur_sct_trans->sct_trans_desc_head->seg_gap_clock_len); spi_hal_sct_set_conf_bits_len(&dev->host->hal, cur_sct_trans->sct_trans_desc_head->sct_gap_len);
#endif #endif
spi_hal_sct_load_dma_link(&dev->host->hal, cur_sct_trans->rx_seg_head, cur_sct_trans->tx_seg_head); s_sct_load_dma_link(dev, cur_sct_trans->rx_seg_head, cur_sct_trans->tx_seg_head);
if (dev->cfg.pre_cb) { if (dev->cfg.pre_cb) {
dev->cfg.pre_cb((spi_transaction_t *)cur_sct_trans->sct_trans_desc_head); dev->cfg.pre_cb((spi_transaction_t *)cur_sct_trans->sct_trans_desc_head);
} }
@ -822,9 +861,10 @@ static void SPI_MASTER_ISR_ATTR spi_post_sct_trans(spi_host_t *host)
assert(host->cur_sct_trans.rx_used_desc_num == 0); assert(host->cur_sct_trans.rx_used_desc_num == 0);
} }
free(host->cur_sct_trans.sct_conf_buffer);
portENTER_CRITICAL_ISR(&host->spinlock); portENTER_CRITICAL_ISR(&host->spinlock);
spi_hal_sct_tx_dma_desc_recycle(&host->hal, host->cur_sct_trans.tx_used_desc_num); spi_hal_sct_tx_dma_desc_recycle(&host->sct_desc_pool, host->cur_sct_trans.tx_used_desc_num);
spi_hal_sct_rx_dma_desc_recycle(&host->hal, host->cur_sct_trans.rx_used_desc_num); spi_hal_sct_rx_dma_desc_recycle(&host->sct_desc_pool, host->cur_sct_trans.rx_used_desc_num);
portEXIT_CRITICAL_ISR(&host->spinlock); portEXIT_CRITICAL_ISR(&host->spinlock);
if (host->device[host->cur_cs]->cfg.post_cb) { if (host->device[host->cur_cs]->cfg.post_cb) {
host->device[host->cur_cs]->cfg.post_cb((spi_transaction_t *)host->cur_sct_trans.sct_trans_desc_head); host->device[host->cur_cs]->cfg.post_cb((spi_transaction_t *)host->cur_sct_trans.sct_trans_desc_head);
@ -846,7 +886,7 @@ static void SPI_MASTER_ISR_ATTR spi_intr(void *arg)
#endif #endif
#if SOC_SPI_SCT_SUPPORTED #if SOC_SPI_SCT_SUPPORTED
assert(spi_hal_usr_is_done(&host->hal) || spi_ll_get_intr(host->hal.hw, SPI_LL_INTR_SEG_DONE)); assert(spi_hal_usr_is_done(&host->hal) || spi_hal_get_intr_mask(&host->hal, SPI_LL_INTR_SEG_DONE));
#else #else
assert(spi_hal_usr_is_done(&host->hal)); assert(spi_hal_usr_is_done(&host->hal));
#endif #endif
@ -1414,12 +1454,155 @@ esp_err_t spi_bus_get_max_transaction_len(spi_host_device_t host_id, size_t *max
} }
#if SOC_SPI_SCT_SUPPORTED #if SOC_SPI_SCT_SUPPORTED
/*-----------------------------------------------------------
* Below functions should be in the same spinlock
*-----------------------------------------------------------*/
/*-------------------------
* TX
*------------------------*/
static void SPI_MASTER_ISR_ATTR spi_hal_sct_tx_dma_desc_recycle(spi_sct_desc_ctx_t *desc_ctx, uint32_t recycle_num)
{
desc_ctx->tx_free_desc_num += recycle_num;
}
static void s_sct_prepare_tx_seg(spi_sct_desc_ctx_t *desc_ctx, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, spi_dma_desc_t **trans_head)
{
HAL_ASSERT(desc_ctx->tx_free_desc_num >= 1 + lldesc_get_required_num(buf_len_bytes));
const spi_dma_ctx_t *dma_ctx = __containerof(desc_ctx, spi_host_t, sct_desc_pool)->dma_ctx;
*trans_head = desc_ctx->cur_tx_seg_link;
spicommon_dma_desc_setup_link(desc_ctx->cur_tx_seg_link, conf_buffer, SOC_SPI_SCT_BUFFER_NUM_MAX * 4, false);
spi_dma_desc_t *conf_buffer_link = desc_ctx->cur_tx_seg_link;
desc_ctx->tx_free_desc_num -= 1;
desc_ctx->tx_seg_link_tail = desc_ctx->cur_tx_seg_link;
desc_ctx->cur_tx_seg_link++;
if (desc_ctx->cur_tx_seg_link == dma_ctx->dmadesc_tx + dma_ctx->dma_desc_num) {
//As there is enough space, so we simply point this to the pool head
desc_ctx->cur_tx_seg_link = dma_ctx->dmadesc_tx;
}
if (send_buffer && buf_len_bytes) {
spicommon_dma_desc_setup_link(desc_ctx->cur_tx_seg_link, send_buffer, buf_len_bytes, false);
conf_buffer_link->next = desc_ctx->cur_tx_seg_link;
for (int i = 0; i < lldesc_get_required_num(buf_len_bytes); i++) {
desc_ctx->tx_seg_link_tail = desc_ctx->cur_tx_seg_link;
desc_ctx->cur_tx_seg_link++;
if (desc_ctx->cur_tx_seg_link == dma_ctx->dmadesc_tx + dma_ctx->dma_desc_num) {
//As there is enough space, so we simply point this to the pool head
desc_ctx->cur_tx_seg_link = dma_ctx->dmadesc_tx;
}
}
desc_ctx->tx_free_desc_num -= lldesc_get_required_num(buf_len_bytes);
}
}
static esp_err_t spi_hal_sct_new_tx_dma_desc_head(spi_sct_desc_ctx_t *desc_ctx, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, spi_dma_desc_t **trans_head, uint32_t *used_desc_num)
{
//1 desc for the conf_buffer, other for data.
if (desc_ctx->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) {
return ESP_ERR_NO_MEM;
}
s_sct_prepare_tx_seg(desc_ctx, conf_buffer, send_buffer, buf_len_bytes, trans_head);
*used_desc_num = 1 + lldesc_get_required_num(buf_len_bytes);
return ESP_OK;
}
static esp_err_t spi_hal_sct_link_tx_seg_dma_desc(spi_sct_desc_ctx_t *desc_ctx, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num)
{
//1 desc for the conf_buffer, other for data.
if (desc_ctx->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) {
return ESP_ERR_NO_MEM;
}
if (desc_ctx->tx_seg_link_tail) {
//Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail.
desc_ctx->tx_seg_link_tail->next = desc_ctx->cur_tx_seg_link;
}
spi_dma_desc_t *internal_head = NULL;
s_sct_prepare_tx_seg(desc_ctx, conf_buffer, send_buffer, buf_len_bytes, &internal_head);
*used_desc_num += 1 + lldesc_get_required_num(buf_len_bytes);
return ESP_OK;
}
// /*-------------------------
// * RX
// *------------------------*/
static void SPI_MASTER_ISR_ATTR spi_hal_sct_rx_dma_desc_recycle(spi_sct_desc_ctx_t *desc_ctx, uint32_t recycle_num)
{
desc_ctx->rx_free_desc_num += recycle_num;
}
static void s_sct_prepare_rx_seg(spi_sct_desc_ctx_t *desc_ctx, const void *recv_buffer, uint32_t buf_len_bytes, spi_dma_desc_t **trans_head)
{
HAL_ASSERT(desc_ctx->rx_free_desc_num >= lldesc_get_required_num(buf_len_bytes));
const spi_dma_ctx_t *dma_ctx = __containerof(desc_ctx, spi_host_t, sct_desc_pool)->dma_ctx;
*trans_head = desc_ctx->cur_rx_seg_link;
spicommon_dma_desc_setup_link(desc_ctx->cur_rx_seg_link, recv_buffer, buf_len_bytes, true);
for (int i = 0; i < lldesc_get_required_num(buf_len_bytes); i++) {
desc_ctx->rx_seg_link_tail = desc_ctx->cur_rx_seg_link;
desc_ctx->cur_rx_seg_link++;
if (desc_ctx->cur_rx_seg_link == dma_ctx->dmadesc_rx + dma_ctx->dma_desc_num) {
//As there is enough space, so we simply point this to the pool head
desc_ctx->cur_rx_seg_link = dma_ctx->dmadesc_rx;
}
}
desc_ctx->rx_free_desc_num -= lldesc_get_required_num(buf_len_bytes);
}
static esp_err_t spi_hal_sct_new_rx_dma_desc_head(spi_sct_desc_ctx_t *desc_ctx, const void *recv_buffer, uint32_t buf_len_bytes, spi_dma_desc_t **trans_head, uint32_t *used_desc_num)
{
if (desc_ctx->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) {
return ESP_ERR_NO_MEM;
}
s_sct_prepare_rx_seg(desc_ctx, recv_buffer, buf_len_bytes, trans_head);
*used_desc_num = lldesc_get_required_num(buf_len_bytes);
return ESP_OK;
}
static esp_err_t spi_hal_sct_link_rx_seg_dma_desc(spi_sct_desc_ctx_t *desc_ctx, const void *recv_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num)
{
if (desc_ctx->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) {
return ESP_ERR_NO_MEM;
}
if (desc_ctx->rx_seg_link_tail) {
//Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail.
desc_ctx->rx_seg_link_tail->next = desc_ctx->cur_rx_seg_link;
}
spi_dma_desc_t *internal_head = NULL;
s_sct_prepare_rx_seg(desc_ctx, recv_buffer, buf_len_bytes, &internal_head);
*used_desc_num += lldesc_get_required_num(buf_len_bytes);
return ESP_OK;
}
static void s_spi_sct_reset_dma_pool(const spi_dma_ctx_t *dma_ctx, spi_sct_desc_ctx_t *sct_desc_pool)
{
sct_desc_pool->tx_free_desc_num = dma_ctx->dma_desc_num;
sct_desc_pool->rx_free_desc_num = dma_ctx->dma_desc_num;
sct_desc_pool->cur_tx_seg_link = dma_ctx->dmadesc_tx;
sct_desc_pool->cur_rx_seg_link = dma_ctx->dmadesc_rx;
sct_desc_pool->tx_seg_link_tail = NULL;
sct_desc_pool->rx_seg_link_tail = NULL;
}
/** /**
* This function will turn this host into SCT (segmented-configure-transfer) mode. * This function will turn this host into SCT (segmented-configure-transfer) mode.
* *
* No concurrency guarantee, if a transaction is ongoing, calling this will lead to wrong transaction * No concurrency guarantee, if a transaction is ongoing, calling this will lead to wrong transaction
*/ */
esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool enable) esp_err_t spi_bus_multi_trans_mode_enable(spi_device_handle_t handle, bool enable)
{ {
SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG);
SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG);
@ -1440,11 +1623,11 @@ esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool ena
}; };
spi_host_t *host = handle->host; spi_host_t *host = handle->host;
spi_trans_priv_t trans_buf; spi_trans_priv_t trans_buf = { .trans = &fake_trans };
spi_hal_context_t *hal = &handle->host->hal; spi_hal_context_t *hal = &handle->host->hal;
spi_hal_dev_config_t *hal_dev = &handle->hal_dev; spi_hal_dev_config_t *hal_dev = &handle->hal_dev;
//As we know the `fake_trans` are internal, so no need to `uninstall_priv_desc` //As we know the `fake_trans` are internal, so no need to `uninstall_priv_desc`
ret = setup_priv_desc(&fake_trans, &trans_buf, (host->bus_attr->dma_enabled)); ret = setup_priv_desc(host, &trans_buf);
if (ret != ESP_OK) { if (ret != ESP_OK) {
return ret; return ret;
} }
@ -1457,9 +1640,10 @@ esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool ena
#if CONFIG_IDF_TARGET_ESP32S2 #if CONFIG_IDF_TARGET_ESP32S2
// conf_base need ensure transaction gap len more than about 2us under different freq. // conf_base need ensure transaction gap len more than about 2us under different freq.
// conf_base only configurable on s2. // conf_base only configurable on s2.
spi_hal_sct_setup_conf_base(hal, handle->real_clk_freq_hz/600000); spi_hal_sct_setup_conf_base(hal, handle->hal_dev.timing_conf.real_freq / 600000);
#endif #endif
s_spi_sct_reset_dma_pool(host->dma_ctx, &host->sct_desc_pool);
spi_hal_sct_init(hal); spi_hal_sct_init(hal);
} else { } else {
spi_hal_sct_deinit(&handle->host->hal); spi_hal_sct_deinit(&handle->host->hal);
@ -1470,26 +1654,26 @@ esp_err_t spi_bus_segment_trans_mode_enable(spi_device_handle_t handle, bool ena
return ESP_OK; return ESP_OK;
} }
static void SPI_MASTER_ATTR s_sct_init_conf_buffer(spi_hal_context_t *hal, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num) static void SPI_MASTER_ATTR s_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t *buffer, uint32_t trans_num)
{ {
// read from HW need waiting for slower APB clock domain return data, loop to contact slow clock domain will waste time. // read from HW need waiting for slower APB clock domain return data, loop to contact slow clock domain will waste time.
// use one imagen then copied by cpu instead. // use one imagen then copied by cpu instead.
uint32_t conf_buffer_img[SOC_SPI_SCT_BUFFER_NUM_MAX]; uint32_t conf_buffer_img[SOC_SPI_SCT_BUFFER_NUM_MAX];
spi_hal_sct_init_conf_buffer(hal, conf_buffer_img); spi_hal_sct_init_conf_buffer(hal, conf_buffer_img);
for (int i = 0; i < seg_num; i++) { for (int i = 0; i < trans_num; i++) {
memcpy(seg_trans_desc[i].conf_buffer, conf_buffer_img, sizeof(conf_buffer_img)); memcpy(&buffer[i * SOC_SPI_SCT_BUFFER_NUM_MAX], conf_buffer_img, sizeof(conf_buffer_img));
} }
} }
static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, spi_seg_transaction_t *seg_trans_desc, bool seg_end) static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle, spi_multi_transaction_t *seg_trans_desc, uint32_t *buffer, bool seg_end)
{ {
spi_hal_context_t *hal = &handle->host->hal; spi_hal_context_t *hal = &handle->host->hal;
spi_hal_dev_config_t *hal_dev = &handle->hal_dev; spi_hal_dev_config_t *hal_dev = &handle->hal_dev;
spi_hal_seg_config_t seg_config = {}; spi_hal_seg_config_t seg_config = {};
//prep //prep
if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_PREP_LEN_UPDATED) { if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_PREP_LEN_UPDATED) {
seg_config.cs_setup = seg_trans_desc->cs_ena_pretrans; seg_config.cs_setup = seg_trans_desc->cs_ena_pretrans;
} else { } else {
seg_config.cs_setup = handle->cfg.cs_ena_pretrans; seg_config.cs_setup = handle->cfg.cs_ena_pretrans;
@ -1497,7 +1681,7 @@ static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle,
//cmd //cmd
seg_config.cmd = seg_trans_desc->base.cmd; seg_config.cmd = seg_trans_desc->base.cmd;
if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_CMD_LEN_UPDATED) { if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_CMD_LEN_UPDATED) {
seg_config.cmd_bits = seg_trans_desc->command_bits; seg_config.cmd_bits = seg_trans_desc->command_bits;
} else { } else {
seg_config.cmd_bits = handle->cfg.command_bits; seg_config.cmd_bits = handle->cfg.command_bits;
@ -1505,14 +1689,14 @@ static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle,
//addr //addr
seg_config.addr = seg_trans_desc->base.addr; seg_config.addr = seg_trans_desc->base.addr;
if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_ADDR_LEN_UPDATED) { if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_ADDR_LEN_UPDATED) {
seg_config.addr_bits = seg_trans_desc->address_bits; seg_config.addr_bits = seg_trans_desc->address_bits;
} else { } else {
seg_config.addr_bits = handle->cfg.address_bits; seg_config.addr_bits = handle->cfg.address_bits;
} }
//dummy //dummy
if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_DUMMY_LEN_UPDATED) { if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_DUMMY_LEN_UPDATED) {
seg_config.dummy_bits = seg_trans_desc->dummy_bits; seg_config.dummy_bits = seg_trans_desc->dummy_bits;
} else { } else {
seg_config.dummy_bits = handle->cfg.dummy_bits; seg_config.dummy_bits = handle->cfg.dummy_bits;
@ -1525,7 +1709,7 @@ static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle,
seg_config.rx_bitlen = seg_trans_desc->base.rxlength; seg_config.rx_bitlen = seg_trans_desc->base.rxlength;
//done //done
if (seg_trans_desc->seg_trans_flags & SPI_SEG_TRANS_DONE_LEN_UPDATED) { if (seg_trans_desc->seg_trans_flags & SPI_MULTI_TRANS_DONE_LEN_UPDATED) {
seg_config.cs_hold = seg_trans_desc->cs_ena_posttrans; seg_config.cs_hold = seg_trans_desc->cs_ena_posttrans;
} else { } else {
seg_config.cs_hold = handle->cfg.cs_ena_posttrans; seg_config.cs_hold = handle->cfg.cs_ena_posttrans;
@ -1535,21 +1719,25 @@ static void SPI_MASTER_ATTR s_sct_format_conf_buffer(spi_device_handle_t handle,
if (seg_end) { if (seg_end) {
seg_config.seg_end = true; seg_config.seg_end = true;
} }
seg_config.seg_gap_len = seg_trans_desc->seg_gap_clock_len; seg_config.seg_gap_len = seg_trans_desc->sct_gap_len;
// set line mode or ... // set line mode to hal_config
spi_sct_set_hal_trans_config(seg_trans_desc, &hal->trans_config); spi_sct_set_hal_trans_config(seg_trans_desc, &hal->trans_config);
spi_hal_sct_format_conf_buffer(hal, &seg_config, hal_dev, seg_trans_desc->conf_buffer); spi_hal_sct_format_conf_buffer(hal, &seg_config, hal_dev, buffer);
} }
esp_err_t SPI_MASTER_ATTR spi_device_queue_segment_trans(spi_device_handle_t handle, spi_seg_transaction_t *seg_trans_desc, uint32_t seg_num, TickType_t ticks_to_wait) esp_err_t SPI_MASTER_ATTR spi_device_queue_multi_trans(spi_device_handle_t handle, spi_multi_transaction_t *seg_trans_desc, uint32_t trans_num, TickType_t ticks_to_wait)
{ {
SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG);
SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG);
SPI_CHECK(handle->host->sct_mode_enabled == 1, "SCT mode isn't enabled", ESP_ERR_INVALID_STATE); SPI_CHECK(handle->host->sct_mode_enabled == 1, "SCT mode isn't enabled", ESP_ERR_INVALID_STATE);
esp_err_t ret = ESP_OK; esp_err_t ret = ESP_OK;
for (int i = 0; i < seg_num; i++) { uint16_t alignment = handle->host->bus_attr->internal_mem_align_size;
uint32_t *conf_buffer = heap_caps_aligned_alloc(alignment, (trans_num * SOC_SPI_SCT_BUFFER_NUM_MAX * sizeof(uint32_t)), MALLOC_CAP_DMA);
SPI_CHECK(conf_buffer, "No enough memory", ESP_ERR_NO_MEM);
for (int i = 0; i < trans_num; i++) {
ret = check_trans_valid(handle, (spi_transaction_t *)&seg_trans_desc[i]); ret = check_trans_valid(handle, (spi_transaction_t *)&seg_trans_desc[i]);
if (ret != ESP_OK) { if (ret != ESP_OK) {
return ret; return ret;
@ -1558,53 +1746,53 @@ esp_err_t SPI_MASTER_ATTR spi_device_queue_segment_trans(spi_device_handle_t han
SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE); SPI_CHECK(!spi_bus_device_is_polling(handle), "Cannot queue new transaction while previous polling transaction is not terminated.", ESP_ERR_INVALID_STATE);
spi_hal_context_t *hal = &handle->host->hal; spi_hal_context_t *hal = &handle->host->hal;
s_sct_init_conf_buffer(hal, seg_trans_desc, seg_num); s_sct_init_conf_buffer(hal, conf_buffer, trans_num);
spi_hal_dma_desc_status_t dma_desc_status = SPI_HAL_DMA_DESC_NULL; static esp_err_t dma_desc_status = ESP_FAIL;
lldesc_t *tx_seg_head = NULL; spi_dma_desc_t *tx_seg_head = NULL;
uint32_t tx_used_dma_desc_num = 0; uint32_t tx_used_dma_desc_num = 0;
uint32_t tx_buf_len = 0; uint32_t tx_buf_len = 0;
lldesc_t *rx_seg_head = NULL; spi_dma_desc_t *rx_seg_head = NULL;
uint32_t rx_used_dma_desc_num = 0; uint32_t rx_used_dma_desc_num = 0;
uint32_t rx_buf_len = 0; uint32_t rx_buf_len = 0;
/*--------------Get segment head--------------*/ /*--------------Get segment head--------------*/
s_sct_format_conf_buffer(handle, &seg_trans_desc[0], (seg_num == 1)); s_sct_format_conf_buffer(handle, &seg_trans_desc[0], conf_buffer, (trans_num == 1));
//TX //TX
tx_buf_len = (seg_trans_desc[0].base.length + 8 - 1) / 8; tx_buf_len = (seg_trans_desc[0].base.length + 8 - 1) / 8;
portENTER_CRITICAL(&handle->host->spinlock); portENTER_CRITICAL(&handle->host->spinlock);
dma_desc_status = spi_hal_sct_new_tx_dma_desc_head(hal, seg_trans_desc[0].conf_buffer, seg_trans_desc[0].base.tx_buffer, tx_buf_len, &tx_seg_head, &tx_used_dma_desc_num); dma_desc_status = spi_hal_sct_new_tx_dma_desc_head(&handle->host->sct_desc_pool, conf_buffer, seg_trans_desc[0].base.tx_buffer, tx_buf_len, &tx_seg_head, &tx_used_dma_desc_num);
portEXIT_CRITICAL(&handle->host->spinlock); portEXIT_CRITICAL(&handle->host->spinlock);
SPI_CHECK(dma_desc_status == SPI_HAL_DMA_DESC_LINKED, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); SPI_CHECK(dma_desc_status == ESP_OK, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE);
//RX //RX
//This is modified to the same lenght as tx length, when in fd mode, else it's `rxlength` //This is modified to the same lenght as tx length, when in fd mode, else it's `rxlength`
rx_buf_len = (seg_trans_desc[0].base.rxlength + 8 - 1) / 8; rx_buf_len = (seg_trans_desc[0].base.rxlength + 8 - 1) / 8;
if (seg_trans_desc[0].base.rx_buffer) { if (seg_trans_desc[0].base.rx_buffer) {
portENTER_CRITICAL(&handle->host->spinlock); portENTER_CRITICAL(&handle->host->spinlock);
dma_desc_status = spi_hal_sct_new_rx_dma_desc_head(hal, seg_trans_desc[0].base.rx_buffer, rx_buf_len, &rx_seg_head, &rx_used_dma_desc_num); dma_desc_status = spi_hal_sct_new_rx_dma_desc_head(&handle->host->sct_desc_pool, seg_trans_desc[0].base.rx_buffer, rx_buf_len, &rx_seg_head, &rx_used_dma_desc_num);
portEXIT_CRITICAL(&handle->host->spinlock); portEXIT_CRITICAL(&handle->host->spinlock);
SPI_CHECK(dma_desc_status == SPI_HAL_DMA_DESC_LINKED, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); SPI_CHECK(dma_desc_status == ESP_OK, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE);
} }
/*--------------Prepare other segments--------------*/ /*--------------Prepare other segments--------------*/
for (int i = 1; i < seg_num; i++) { for (int i = 1; i < trans_num; i++) {
s_sct_format_conf_buffer(handle, &seg_trans_desc[i], (i == (seg_num - 1))); s_sct_format_conf_buffer(handle, &seg_trans_desc[i], &conf_buffer[i * SOC_SPI_SCT_BUFFER_NUM_MAX], (i == (trans_num - 1)));
//TX //TX
tx_buf_len = (seg_trans_desc[i].base.length + 8 - 1) / 8; tx_buf_len = (seg_trans_desc[i].base.length + 8 - 1) / 8;
portENTER_CRITICAL(&handle->host->spinlock); portENTER_CRITICAL(&handle->host->spinlock);
dma_desc_status = spi_hal_sct_link_tx_seg_dma_desc(hal, seg_trans_desc[i].conf_buffer, seg_trans_desc[i].base.tx_buffer, tx_buf_len, &tx_used_dma_desc_num); dma_desc_status = spi_hal_sct_link_tx_seg_dma_desc(&handle->host->sct_desc_pool, &conf_buffer[i * SOC_SPI_SCT_BUFFER_NUM_MAX], seg_trans_desc[i].base.tx_buffer, tx_buf_len, &tx_used_dma_desc_num);
portEXIT_CRITICAL(&handle->host->spinlock); portEXIT_CRITICAL(&handle->host->spinlock);
SPI_CHECK(dma_desc_status == SPI_HAL_DMA_DESC_LINKED, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE); SPI_CHECK(dma_desc_status == ESP_OK, "No available dma descriptors, increase the `max_transfer_sz`, or wait queued transactions are done", ESP_ERR_INVALID_STATE);
//RX //RX
if (seg_trans_desc[i].base.rx_buffer) { if (seg_trans_desc[i].base.rx_buffer) {
//This is modified to the same lenght as tx length, when in fd mode, else it's `rxlength` //This is modified to the same lenght as tx length, when in fd mode, else it's `rxlength`
rx_buf_len = (seg_trans_desc[i].base.rxlength + 8 - 1) / 8; rx_buf_len = (seg_trans_desc[i].base.rxlength + 8 - 1) / 8;
portENTER_CRITICAL(&handle->host->spinlock); portENTER_CRITICAL(&handle->host->spinlock);
dma_desc_status = spi_hal_sct_link_rx_seg_dma_desc(hal, seg_trans_desc[i].base.rx_buffer, rx_buf_len, &rx_used_dma_desc_num); dma_desc_status = spi_hal_sct_link_rx_seg_dma_desc(&handle->host->sct_desc_pool, seg_trans_desc[i].base.rx_buffer, rx_buf_len, &rx_used_dma_desc_num);
portEXIT_CRITICAL(&handle->host->spinlock); portEXIT_CRITICAL(&handle->host->spinlock);
} }
} }
@ -1613,10 +1801,11 @@ esp_err_t SPI_MASTER_ATTR spi_device_queue_segment_trans(spi_device_handle_t han
esp_pm_lock_acquire(handle->host->bus_attr->pm_lock); esp_pm_lock_acquire(handle->host->bus_attr->pm_lock);
#endif #endif
spi_sct_desc_priv_t sct_desc = { spi_sct_trans_priv_t sct_desc = {
.tx_seg_head = tx_seg_head, .tx_seg_head = tx_seg_head,
.rx_seg_head = rx_seg_head, .rx_seg_head = rx_seg_head,
.sct_trans_desc_head = seg_trans_desc, .sct_trans_desc_head = seg_trans_desc,
.sct_conf_buffer = conf_buffer,
.tx_used_desc_num = tx_used_dma_desc_num, .tx_used_desc_num = tx_used_dma_desc_num,
.rx_used_desc_num = rx_used_dma_desc_num, .rx_used_desc_num = rx_used_dma_desc_num,
}; };
@ -1639,12 +1828,12 @@ esp_err_t SPI_MASTER_ATTR spi_device_queue_segment_trans(spi_device_handle_t han
return ESP_OK; return ESP_OK;
} }
esp_err_t SPI_MASTER_ATTR spi_device_get_segment_trans_result(spi_device_handle_t handle, spi_seg_transaction_t **seg_trans_desc, TickType_t ticks_to_wait) esp_err_t SPI_MASTER_ATTR spi_device_get_multi_trans_result(spi_device_handle_t handle, spi_multi_transaction_t **seg_trans_desc, TickType_t ticks_to_wait)
{ {
SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG); SPI_CHECK(handle, "Invalid arguments.", ESP_ERR_INVALID_ARG);
SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG); SPI_CHECK(SOC_SPI_SCT_SUPPORTED_PERIPH(handle->host->id), "Invalid arguments", ESP_ERR_INVALID_ARG);
SPI_CHECK(handle->host->sct_mode_enabled == 1, "SCT mode isn't enabled", ESP_ERR_INVALID_STATE); SPI_CHECK(handle->host->sct_mode_enabled == 1, "SCT mode isn't enabled", ESP_ERR_INVALID_STATE);
spi_sct_desc_priv_t sct_desc = {}; spi_sct_trans_priv_t sct_desc = {};
BaseType_t r = xQueueReceive(handle->ret_queue, (void *)&sct_desc, ticks_to_wait); BaseType_t r = xQueueReceive(handle->ret_queue, (void *)&sct_desc, ticks_to_wait);
if (!r) { if (!r) {

View File

@ -44,7 +44,7 @@
#if !CONFIG_FREERTOS_SMP // IDF-5223 #if !CONFIG_FREERTOS_SMP // IDF-5223
#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING 15 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING 15
#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING_NO_DMA 15 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING_NO_DMA 15
#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING 32 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING 33
#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING_NO_DMA 30 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING_NO_DMA 30
#else #else
#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING 17 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING 17
@ -55,7 +55,7 @@
#elif CONFIG_IDF_TARGET_ESP32C6 #elif CONFIG_IDF_TARGET_ESP32C6
#define IDF_PERFORMANCE_MAX_SPI_CLK_FREQ 26*1000*1000 #define IDF_PERFORMANCE_MAX_SPI_CLK_FREQ 26*1000*1000
#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING 34 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING 35 //TODO: IDF-9551, check perform
#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING 17 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING 17
#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING_NO_DMA 32 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_NO_POLLING_NO_DMA 32
#define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING_NO_DMA 15 #define IDF_PERFORMANCE_MAX_SPI_PER_TRANS_POLLING_NO_DMA 15

View File

@ -4,9 +4,14 @@ set(srcs
"test_spi_master.c" "test_spi_master.c"
"test_spi_sio.c" "test_spi_sio.c"
"test_spi_bus_lock.c" "test_spi_bus_lock.c"
"test_spi_master_sct.c"
) )
# sct test using slave hd APIs, need slave hd support
# tmp skip sct test under iram_safe, both sct and slave hd are not cleaned
if(CONFIG_SOC_SPI_SUPPORT_SLAVE_HD_VER2 AND NOT CONFIG_COMPILER_DUMP_RTL_FILES)
list(APPEND srcs "test_spi_master_sct.c")
endif()
# In order for the cases defined by `TEST_CASE` to be linked into the final elf, # In order for the cases defined by `TEST_CASE` to be linked into the final elf,
# the component can be registered as WHOLE_ARCHIVE # the component can be registered as WHOLE_ARCHIVE
idf_component_register( idf_component_register(

View File

@ -14,12 +14,12 @@
#include "test_utils.h" #include "test_utils.h"
#include "esp_heap_caps.h" #include "esp_heap_caps.h"
#include "driver/spi_master.h" #include "driver/spi_master.h"
#include "esp_private/spi_master_internal.h"
#include "driver/spi_slave_hd.h" #include "driver/spi_slave_hd.h"
#include "driver/spi_slave.h" #include "driver/spi_slave.h"
#include "soc/spi_pins.h" #include "soc/spi_pins.h"
#include "test_spi_utils.h" #include "test_spi_utils.h"
__attribute__((unused)) static const char *TAG = "SCT"; __attribute__((unused)) static const char *TAG = "SCT";
#if (SOC_SPI_SUPPORT_SLAVE_HD_VER2 && SOC_SPI_SCT_SUPPORTED) #if (SOC_SPI_SUPPORT_SLAVE_HD_VER2 && SOC_SPI_SCT_SUPPORTED)
@ -62,18 +62,18 @@ static void hd_master(void)
uint8_t *master_rx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); uint8_t *master_rx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
uint32_t master_rx_val = 0; uint32_t master_rx_val = 0;
uint8_t *slave_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); uint8_t *slave_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
get_tx_buffer(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN); test_fill_random_to_buffers_dualboard(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN);
spi_seg_transaction_t *ret_seg_trans = NULL; spi_multi_transaction_t *ret_seg_trans = NULL;
//---------------------Master TX---------------------------// //---------------------Master TX---------------------------//
spi_seg_transaction_t tx_seg_trans[TEST_HD_TIMES] = { spi_multi_transaction_t tx_seg_trans[TEST_HD_TIMES] = {
{ {
.base = { .base = {
.cmd = 0x1, .cmd = 0x1,
.addr = TEST_HD_BUF_0_ID, .addr = TEST_HD_BUF_0_ID,
.length = 4 * 8, .length = 4 * 8,
.tx_buffer = (uint8_t *)&master_tx_val, .tx_buffer = (uint8_t *) &master_tx_val,
}, },
}, },
//TEST_HD_DATA_LEN of TX data, splitted into 2 segments. `TEST_HD_DATA_LEN_PER_SEG` per segment //TEST_HD_DATA_LEN of TX data, splitted into 2 segments. `TEST_HD_DATA_LEN_PER_SEG` per segment
@ -84,7 +84,7 @@ static void hd_master(void)
.tx_buffer = master_tx_buf, .tx_buffer = master_tx_buf,
}, },
.dummy_bits = 8, .dummy_bits = 8,
.seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, .seg_trans_flags = SPI_MULTI_TRANS_DUMMY_LEN_UPDATED,
}, },
{ {
.base = { .base = {
@ -93,7 +93,7 @@ static void hd_master(void)
.tx_buffer = master_tx_buf + TEST_HD_DATA_LEN_PER_SEG, .tx_buffer = master_tx_buf + TEST_HD_DATA_LEN_PER_SEG,
}, },
.dummy_bits = 8, .dummy_bits = 8,
.seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, .seg_trans_flags = SPI_MULTI_TRANS_DUMMY_LEN_UPDATED,
}, },
{ {
.base = { .base = {
@ -102,23 +102,22 @@ static void hd_master(void)
}, },
}; };
TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, true)); TEST_ESP_OK(spi_bus_multi_trans_mode_enable(handle, true));
unity_wait_for_signal("Slave ready"); unity_wait_for_signal("Slave ready");
TEST_ESP_OK(spi_device_queue_segment_trans(handle, tx_seg_trans, TEST_HD_TIMES, portMAX_DELAY)); TEST_ESP_OK(spi_device_queue_multi_trans(handle, tx_seg_trans, TEST_HD_TIMES, portMAX_DELAY));
TEST_ESP_OK(spi_device_get_segment_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); TEST_ESP_OK(spi_device_get_multi_trans_result(handle, &ret_seg_trans, portMAX_DELAY));
TEST_ASSERT(ret_seg_trans == tx_seg_trans); TEST_ASSERT(ret_seg_trans == tx_seg_trans);
ESP_LOG_BUFFER_HEX("Master tx", master_tx_buf, TEST_HD_DATA_LEN); ESP_LOG_BUFFER_HEX("Master tx", master_tx_buf, TEST_HD_DATA_LEN);
TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, false)); TEST_ESP_OK(spi_bus_multi_trans_mode_enable(handle, false));
//---------------------Master RX---------------------------// //---------------------Master RX---------------------------//
spi_seg_transaction_t rx_seg_trans[TEST_HD_TIMES] = { spi_multi_transaction_t rx_seg_trans[TEST_HD_TIMES] = {
{ {
.base = { .base = {
.cmd = 0x2, .cmd = 0x2,
.addr = TEST_HD_BUF_1_ID, .addr = TEST_HD_BUF_1_ID,
.rxlength = 4 * 8, .rxlength = 4 * 8,
.rx_buffer = (uint8_t *)&master_rx_val, .rx_buffer = (uint8_t *) &master_rx_val,
}, },
}, },
// TEST_HD_DATA_LEN of TX data, splitted into 2 segments. `TEST_HD_DATA_LEN_PER_SEG` per segment // TEST_HD_DATA_LEN of TX data, splitted into 2 segments. `TEST_HD_DATA_LEN_PER_SEG` per segment
@ -129,7 +128,7 @@ static void hd_master(void)
.rx_buffer = master_rx_buf, .rx_buffer = master_rx_buf,
}, },
.dummy_bits = 8, .dummy_bits = 8,
.seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, .seg_trans_flags = SPI_MULTI_TRANS_DUMMY_LEN_UPDATED,
}, },
{ {
.base = { .base = {
@ -138,7 +137,7 @@ static void hd_master(void)
.rx_buffer = master_rx_buf + TEST_HD_DATA_LEN_PER_SEG, .rx_buffer = master_rx_buf + TEST_HD_DATA_LEN_PER_SEG,
}, },
.dummy_bits = 8, .dummy_bits = 8,
.seg_trans_flags = SPI_SEG_TRANS_DUMMY_LEN_UPDATED, .seg_trans_flags = SPI_MULTI_TRANS_DUMMY_LEN_UPDATED,
}, },
{ {
.base = { .base = {
@ -146,11 +145,11 @@ static void hd_master(void)
} }
}, },
}; };
TEST_ESP_OK(spi_bus_segment_trans_mode_enable(handle, true)); TEST_ESP_OK(spi_bus_multi_trans_mode_enable(handle, true));
unity_wait_for_signal("Slave ready"); unity_wait_for_signal("Slave ready");
TEST_ESP_OK(spi_device_queue_segment_trans(handle, rx_seg_trans, TEST_HD_TIMES, portMAX_DELAY)); TEST_ESP_OK(spi_device_queue_multi_trans(handle, rx_seg_trans, TEST_HD_TIMES, portMAX_DELAY));
TEST_ESP_OK(spi_device_get_segment_trans_result(handle, &ret_seg_trans, portMAX_DELAY)); TEST_ESP_OK(spi_device_get_multi_trans_result(handle, &ret_seg_trans, portMAX_DELAY));
TEST_ASSERT(ret_seg_trans == rx_seg_trans); TEST_ASSERT(ret_seg_trans == rx_seg_trans);
ESP_LOGI("Master", "Slave Reg[%d] value is: 0x%" PRIx32, TEST_HD_BUF_1_ID, master_rx_val); ESP_LOGI("Master", "Slave Reg[%d] value is: 0x%" PRIx32, TEST_HD_BUF_1_ID, master_rx_val);
@ -184,7 +183,7 @@ static void hd_slave(void)
uint8_t *slave_rx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); uint8_t *slave_rx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
uint32_t slave_rx_val = 0; uint32_t slave_rx_val = 0;
uint8_t *master_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); uint8_t *master_tx_buf = heap_caps_calloc(1, TEST_HD_DATA_LEN, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
get_tx_buffer(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN); test_fill_random_to_buffers_dualboard(199, master_tx_buf, slave_tx_buf, TEST_HD_DATA_LEN);
unity_wait_for_signal("Master ready"); unity_wait_for_signal("Master ready");
@ -194,7 +193,7 @@ static void hd_slave(void)
.len = TEST_HD_DATA_LEN, .len = TEST_HD_DATA_LEN,
}; };
TEST_ESP_OK(spi_slave_hd_queue_trans(SPI2_HOST, SPI_SLAVE_CHAN_RX, &slave_rx_trans, portMAX_DELAY)); TEST_ESP_OK(spi_slave_hd_queue_trans(SPI2_HOST, SPI_SLAVE_CHAN_RX, &slave_rx_trans, portMAX_DELAY));
unity_send_signal("slave ready"); unity_send_signal("Slave ready");
TEST_ESP_OK(spi_slave_hd_get_trans_res(SPI2_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY)); TEST_ESP_OK(spi_slave_hd_get_trans_res(SPI2_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY));
TEST_ASSERT(ret_trans == &slave_rx_trans); TEST_ASSERT(ret_trans == &slave_rx_trans);
@ -212,7 +211,7 @@ static void hd_slave(void)
.len = TEST_HD_DATA_LEN, .len = TEST_HD_DATA_LEN,
}; };
TEST_ESP_OK(spi_slave_hd_queue_trans(SPI2_HOST, SPI_SLAVE_CHAN_TX, &slave_tx_trans, portMAX_DELAY)); TEST_ESP_OK(spi_slave_hd_queue_trans(SPI2_HOST, SPI_SLAVE_CHAN_TX, &slave_tx_trans, portMAX_DELAY));
unity_send_signal("slave ready"); unity_send_signal("Slave ready");
TEST_ESP_OK(spi_slave_hd_get_trans_res(SPI2_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY)); TEST_ESP_OK(spi_slave_hd_get_trans_res(SPI2_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY));
TEST_ASSERT(ret_trans == &slave_tx_trans); TEST_ASSERT(ret_trans == &slave_tx_trans);
ESP_LOG_BUFFER_HEX("Slave tx", slave_tx_buf, TEST_HD_DATA_LEN); ESP_LOG_BUFFER_HEX("Slave tx", slave_tx_buf, TEST_HD_DATA_LEN);

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -1533,6 +1533,19 @@ static inline void spi_ll_set_conf_base_bitslen(spi_dev_t *hw, uint8_t conf_base
} }
} }
/**
* Set conf phase bits len to HW for segment config trans mode.
*
* @param hw Beginning address of the peripheral registers.
* @param conf_bitlen Value of field conf_bitslen in cmd reg.
*/
static inline void spi_ll_set_conf_phase_bits_len(spi_dev_t *hw, uint32_t conf_bitlen)
{
if (conf_bitlen <= SOC_SPI_SCT_CONF_BITLEN_MAX) {
hw->cmd.conf_bitlen = conf_bitlen;
}
}
/** /**
* Set conf phase bits len to config buffer for segment config trans mode. * Set conf phase bits len to config buffer for segment config trans mode.
* *

View File

@ -48,15 +48,6 @@ typedef dma_descriptor_align4_t spi_dma_desc_t;
typedef dma_descriptor_align8_t spi_dma_desc_t; typedef dma_descriptor_align8_t spi_dma_desc_t;
#endif #endif
/**
* @brief Enum for DMA descriptor status
*/
typedef enum spi_hal_dma_desc_status_t {
SPI_HAL_DMA_DESC_NULL = 0, ///< Null descriptos
SPI_HAL_DMA_DESC_RUN_OUT = 1, ///< DMA descriptors are not enough for data
SPI_HAL_DMA_DESC_LINKED = 2, ///< DMA descriptors are linked successfully
} spi_hal_dma_desc_status_t;
/** /**
* Input parameters to the ``spi_hal_cal_clock_conf`` to calculate the timing configuration * Input parameters to the ``spi_hal_cal_clock_conf`` to calculate the timing configuration
*/ */
@ -112,17 +103,6 @@ typedef struct {
/* Configured by driver at initialization, don't touch */ /* Configured by driver at initialization, don't touch */
spi_dev_t *hw; ///< Beginning address of the peripheral registers. spi_dev_t *hw; ///< Beginning address of the peripheral registers.
bool dma_enabled; ///< Whether the DMA is enabled, do not update after initialization bool dma_enabled; ///< Whether the DMA is enabled, do not update after initialization
#if SOC_SPI_SCT_SUPPORTED
/* Segmented-Configure-Transfer required, configured by driver, don't touch */
uint32_t tx_free_desc_num;
uint32_t rx_free_desc_num;
lldesc_t *cur_tx_seg_link; ///< Current TX DMA descriptor used for sct mode.
lldesc_t *cur_rx_seg_link; ///< Current RX DMA descriptor used for sct mode.
lldesc_t *tx_seg_link_tail; ///< Tail of the TX DMA descriptor link
lldesc_t *rx_seg_link_tail; ///< Tail of the RX DMA descriptor link
#endif //#if SOC_SPI_SCT_SUPPORTED
/* Internal parameters, don't touch */ /* Internal parameters, don't touch */
spi_hal_trans_config_t trans_config; ///< Transaction configuration spi_hal_trans_config_t trans_config; ///< Transaction configuration
} spi_hal_context_t; } spi_hal_context_t;
@ -341,92 +321,6 @@ void spi_hal_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t conf_buffer[S
*/ */
void spi_hal_sct_format_conf_buffer(spi_hal_context_t *hal, const spi_hal_seg_config_t *config, const spi_hal_dev_config_t *dev, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]); void spi_hal_sct_format_conf_buffer(spi_hal_context_t *hal, const spi_hal_seg_config_t *config, const spi_hal_dev_config_t *dev, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]);
/**
* Format tx dma descriptor(s) for a SCT head
*
* @param hal Context of the HAL layer.
* @param conf_buffer Conf buffer
* @param send_buffer TX buffer
* @param buf_len_bytes TX buffer length, in bytes
* @param[out] trans_head SCT dma descriptor head
* @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used
*
* @return
* - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together
* - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`)
*/
spi_hal_dma_desc_status_t spi_hal_sct_new_tx_dma_desc_head(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num);
/**
* Format tx dma descriptor(s) for a segment, and linked it to its previous segment
*
* @param hal Context of the HAL layer.
* @param conf_buffer Conf buffer
* @param send_buffer TX buffer
* @param buf_len_bytes TX buffer length, in bytes
* @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used
*
* @return
* - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together
* - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`)
*/
spi_hal_dma_desc_status_t spi_hal_sct_link_tx_seg_dma_desc(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num);
/**
* Recycle used tx dma descriptors (back to available state, NOT a memory free)
*
* @param hal Context of the HAL layer.
* @param recycle_num Number of the to-be-recycled descriptors
*/
void spi_hal_sct_tx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num);
/**
* Format rx dma descriptor(s) for a SCT head
*
* @param hal Context of the HAL layer.
* @param recv_buffer RX buffer
* @param buf_len_bytes RX buffer length, in bytes
* @param[out] trans_head SCT dma descriptor head
* @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used
*
* @return
* - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together
* - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`)
*/
spi_hal_dma_desc_status_t spi_hal_sct_new_rx_dma_desc_head(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num);
/**
* Format rx dma descriptor(s) for a segment, and linked it to its previous segment
*
* @param hal Context of the HAL layer.
* @param send_buffer RX buffer
* @param buf_len_bytes RX buffer length, in bytes
* @param[out] used_desc_num After formatting, `used_desc_num` number of descriptors are used
*
* @return
* - SPI_HAL_DMA_DESC_LINKED: Successfully format these dma descriptors, and link together
* - SPI_HAL_DMA_DESC_RUN_OUT: Run out of dma descriptors, should alloc more, or wait until enough number of descriptors are recycled (by `spi_hal_sct_tx_dma_desc_recycle`)
*/
spi_hal_dma_desc_status_t spi_hal_sct_link_rx_seg_dma_desc(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num);
/**
* Recycle used rx dma descriptors (back to available state, NOT a memory free)
*
* @param hal Context of the HAL layer.
* @param recycle_num Number of the to-be-recycled descriptors
*/
void spi_hal_sct_rx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num);
/**
* Load dma descriptors to dma
* Will do nothing to TX or RX dma, when `tx_seg_head` or `rx_seg_head` is NULL
*
* @param hal Context of the HAL layer.
* @param rx_seg_head Head of the SCT RX dma descriptors
* @param tx_seg_head Head of the SCT TX dma descriptors
*/
void spi_hal_sct_load_dma_link(spi_hal_context_t *hal, lldesc_t *rx_seg_head, lldesc_t *tx_seg_head);
/** /**
* Deinit SCT mode related registers and hal states * Deinit SCT mode related registers and hal states
*/ */
@ -435,13 +329,22 @@ void spi_hal_sct_deinit(spi_hal_context_t *hal);
/** /**
* Set conf_bitslen to HW for sct. * Set conf_bitslen to HW for sct.
*/ */
#define spi_hal_sct_set_conf_bits_len(hal, conf_len) spi_ll_set_conf_phase_bits_len((hal)->hw, conf_len) void spi_hal_sct_set_conf_bits_len(spi_hal_context_t *hal, uint32_t conf_len);
/**
* Clear SPI interrupt bits by mask
*/
void spi_hal_clear_intr_mask(spi_hal_context_t *hal, uint32_t mask);
/**
* Get SPI interrupt bits status by mask
*/
bool spi_hal_get_intr_mask(spi_hal_context_t *hal, uint32_t mask);
/** /**
* Set conf_bitslen base to HW for sct, only supported on s2. * Set conf_bitslen base to HW for sct, only supported on s2.
*/ */
#define spi_hal_sct_setup_conf_base(hal, conf_base) spi_ll_set_conf_base_bitslen((hal)->hw, conf_base) #define spi_hal_sct_setup_conf_base(hal, conf_base) spi_ll_set_conf_base_bitslen((hal)->hw, conf_base)
#endif //#if SOC_SPI_SCT_SUPPORTED #endif //#if SOC_SPI_SCT_SUPPORTED
#endif //#if SOC_GPSPI_SUPPORTED #endif //#if SOC_GPSPI_SUPPORTED

View File

@ -53,19 +53,8 @@ void spi_hal_deinit(spi_hal_context_t *hal)
} }
#if SOC_SPI_SCT_SUPPORTED #if SOC_SPI_SCT_SUPPORTED
static void s_sct_reset_dma_link(spi_hal_context_t *hal)
{
hal->tx_free_desc_num = hal->dmadesc_n;
hal->rx_free_desc_num = hal->dmadesc_n;
hal->cur_tx_seg_link = hal->dmadesc_tx;
hal->cur_rx_seg_link = hal->dmadesc_rx;
hal->tx_seg_link_tail = NULL;
hal->rx_seg_link_tail = NULL;
}
void spi_hal_sct_init(spi_hal_context_t *hal) void spi_hal_sct_init(spi_hal_context_t *hal)
{ {
s_sct_reset_dma_link(hal);
spi_ll_conf_state_enable(hal->hw, true); spi_ll_conf_state_enable(hal->hw, true);
spi_ll_set_magic_number(hal->hw, SPI_LL_SCT_MAGIC_NUMBER); spi_ll_set_magic_number(hal->hw, SPI_LL_SCT_MAGIC_NUMBER);
spi_ll_disable_int(hal->hw); //trans_done intr enabled in `add device` phase, sct mode shoud use sct_trans_done only spi_ll_disable_int(hal->hw); //trans_done intr enabled in `add device` phase, sct mode shoud use sct_trans_done only

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -167,6 +167,18 @@ void spi_hal_fetch_result(const spi_hal_context_t *hal)
/*------------------------------------------------------------------------------ /*------------------------------------------------------------------------------
* Segmented-Configure-Transfer * Segmented-Configure-Transfer
*----------------------------------------------------------------------------*/ *----------------------------------------------------------------------------*/
void spi_hal_clear_intr_mask(spi_hal_context_t *hal, uint32_t mask) {
spi_ll_clear_intr(hal->hw, mask);
}
bool spi_hal_get_intr_mask(spi_hal_context_t *hal, uint32_t mask) {
return spi_ll_get_intr(hal->hw, mask);
}
void spi_hal_sct_set_conf_bits_len(spi_hal_context_t *hal, uint32_t conf_len) {
spi_ll_set_conf_phase_bits_len(hal->hw, conf_len);
}
void spi_hal_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX]) void spi_hal_sct_init_conf_buffer(spi_hal_context_t *hal, uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX])
{ {
spi_ll_init_conf_buffer(hal->hw, conf_buffer); spi_ll_init_conf_buffer(hal->hw, conf_buffer);
@ -189,155 +201,4 @@ void spi_hal_sct_format_conf_buffer(spi_hal_context_t *hal, const spi_hal_seg_co
#endif #endif
} }
void spi_hal_sct_load_dma_link(spi_hal_context_t *hal, lldesc_t *rx_seg_head, lldesc_t *tx_seg_head)
{
spi_ll_clear_intr(hal->hw, SPI_LL_INTR_SEG_DONE);
HAL_ASSERT(hal->dma_enabled);
if (rx_seg_head) {
spi_dma_ll_rx_reset(hal->dma_in, hal->rx_dma_chan);
spi_ll_dma_rx_fifo_reset(hal->hw);
spi_ll_infifo_full_clr(hal->hw);
spi_ll_dma_rx_enable(hal->hw, 1);
spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, rx_seg_head);
}
if (tx_seg_head) {
spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan);
spi_ll_dma_tx_fifo_reset(hal->hw);
spi_ll_outfifo_empty_clr(hal->hw);
spi_ll_dma_tx_enable(hal->hw, 1);
spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, tx_seg_head);
}
}
/*-----------------------------------------------------------
* Below hal functions should be in the same spinlock
*-----------------------------------------------------------*/
/*-------------------------
* TX
*------------------------*/
void spi_hal_sct_tx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num)
{
hal->tx_free_desc_num += recycle_num;
}
static void s_sct_prepare_tx_seg(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head)
{
HAL_ASSERT(hal->tx_free_desc_num >= 1 + lldesc_get_required_num(buf_len_bytes));
*trans_head = hal->cur_tx_seg_link;
lldesc_setup_link(hal->cur_tx_seg_link, conf_buffer, SOC_SPI_SCT_BUFFER_NUM_MAX * 4, false);
lldesc_t *conf_buffer_link = hal->cur_tx_seg_link;
hal->tx_free_desc_num -= 1;
hal->tx_seg_link_tail = hal->cur_tx_seg_link;
hal->cur_tx_seg_link++;
if (hal->cur_tx_seg_link == hal->dmadesc_tx + hal->dmadesc_n) {
//As there is enough space, so we simply point this to the pool head
hal->cur_tx_seg_link = hal->dmadesc_tx;
}
if(send_buffer && buf_len_bytes) {
lldesc_setup_link(hal->cur_tx_seg_link, send_buffer, buf_len_bytes, false);
STAILQ_NEXT(conf_buffer_link, qe) = hal->cur_tx_seg_link;
for (int i = 0; i < lldesc_get_required_num(buf_len_bytes); i++) {
hal->tx_seg_link_tail = hal->cur_tx_seg_link;
hal->cur_tx_seg_link++;
if (hal->cur_tx_seg_link == hal->dmadesc_tx + hal->dmadesc_n) {
//As there is enough space, so we simply point this to the pool head
hal->cur_tx_seg_link = hal->dmadesc_tx;
}
}
hal->tx_free_desc_num -= lldesc_get_required_num(buf_len_bytes);
}
}
spi_hal_dma_desc_status_t spi_hal_sct_new_tx_dma_desc_head(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num)
{
//1 desc for the conf_buffer, other for data.
if (hal->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) {
return SPI_HAL_DMA_DESC_RUN_OUT;
}
s_sct_prepare_tx_seg(hal, conf_buffer, send_buffer, buf_len_bytes, trans_head);
*used_desc_num = 1 + lldesc_get_required_num(buf_len_bytes);
return SPI_HAL_DMA_DESC_LINKED;
}
spi_hal_dma_desc_status_t spi_hal_sct_link_tx_seg_dma_desc(spi_hal_context_t *hal, const uint32_t conf_buffer[SOC_SPI_SCT_BUFFER_NUM_MAX], const void *send_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num)
{
//1 desc for the conf_buffer, other for data.
if (hal->tx_free_desc_num < 1 + lldesc_get_required_num(buf_len_bytes)) {
return SPI_HAL_DMA_DESC_RUN_OUT;
}
if (hal->tx_seg_link_tail) {
//Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail.
STAILQ_NEXT(hal->tx_seg_link_tail, qe) = hal->cur_tx_seg_link;
}
lldesc_t *internal_head = NULL;
s_sct_prepare_tx_seg(hal, conf_buffer, send_buffer, buf_len_bytes, &internal_head);
*used_desc_num += 1 + lldesc_get_required_num(buf_len_bytes);
return SPI_HAL_DMA_DESC_LINKED;
}
/*-------------------------
* RX
*------------------------*/
void spi_hal_sct_rx_dma_desc_recycle(spi_hal_context_t *hal, uint32_t recycle_num)
{
hal->rx_free_desc_num += recycle_num;
}
static void s_sct_prepare_rx_seg(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head)
{
HAL_ASSERT(hal->rx_free_desc_num >= lldesc_get_required_num(buf_len_bytes));
*trans_head = hal->cur_rx_seg_link;
lldesc_setup_link(hal->cur_rx_seg_link, recv_buffer, buf_len_bytes, true);
for (int i = 0; i< lldesc_get_required_num(buf_len_bytes); i++) {
hal->rx_seg_link_tail = hal->cur_rx_seg_link;
hal->cur_rx_seg_link++;
if (hal->cur_rx_seg_link == hal->dmadesc_rx + hal->dmadesc_n) {
//As there is enough space, so we simply point this to the pool head
hal->cur_rx_seg_link = hal->dmadesc_rx;
}
}
hal->rx_free_desc_num -= lldesc_get_required_num(buf_len_bytes);
}
spi_hal_dma_desc_status_t spi_hal_sct_new_rx_dma_desc_head(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, lldesc_t **trans_head, uint32_t *used_desc_num)
{
if (hal->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) {
return SPI_HAL_DMA_DESC_RUN_OUT;
}
s_sct_prepare_rx_seg(hal, recv_buffer, buf_len_bytes, trans_head);
*used_desc_num = lldesc_get_required_num(buf_len_bytes);
return SPI_HAL_DMA_DESC_LINKED;
}
spi_hal_dma_desc_status_t spi_hal_sct_link_rx_seg_dma_desc(spi_hal_context_t *hal, const void *recv_buffer, uint32_t buf_len_bytes, uint32_t *used_desc_num)
{
if (hal->rx_free_desc_num < lldesc_get_required_num(buf_len_bytes)) {
return SPI_HAL_DMA_DESC_RUN_OUT;
}
if (hal->rx_seg_link_tail) {
//Connect last segment to the current segment, as we're sure the `s_sct_prepare_tx_seg` next won't fail.
STAILQ_NEXT(hal->rx_seg_link_tail, qe) = hal->cur_rx_seg_link;
}
lldesc_t *internal_head = NULL;
s_sct_prepare_rx_seg(hal, recv_buffer, buf_len_bytes, &internal_head);
*used_desc_num += lldesc_get_required_num(buf_len_bytes);
return SPI_HAL_DMA_DESC_LINKED;
}
#endif //#if SOC_SPI_SCT_SUPPORTED #endif //#if SOC_SPI_SCT_SUPPORTED