feat(spi_slave_hd): add esp32p4 support for seg and append mode

This commit is contained in:
wanlei 2023-09-14 16:52:12 +08:00
parent 751efec8b6
commit daeb71d7e4
10 changed files with 298 additions and 101 deletions

View File

@ -184,7 +184,9 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b
if (dma_desc_ct == 0) dma_desc_ct = 1; //default to 4k when max is not given
spihost[host]->max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_cache_get_alignment(ESP_CACHE_MALLOC_FLAG_DMA, (size_t *)&spihost[host]->internal_mem_align_size);
size_t alignment;
esp_cache_get_alignment(ESP_CACHE_MALLOC_FLAG_DMA, &alignment);
spihost[host]->internal_mem_align_size = alignment;
#else
spihost[host]->internal_mem_align_size = 4;
#endif

View File

@ -5,6 +5,7 @@
*/
#include "esp_log.h"
#include "esp_check.h"
#include "esp_memory_utils.h"
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
@ -12,9 +13,12 @@
#include "freertos/ringbuf.h"
#include "driver/gpio.h"
#include "esp_private/spi_common_internal.h"
#include "esp_private/esp_cache_private.h"
#include "driver/spi_slave_hd.h"
#include "hal/spi_slave_hd_hal.h"
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
#include "esp_cache.h"
#endif
#if (SOC_SPI_PERIPH_NUM == 2)
#define VALID_HOST(x) ((x) == SPI2_HOST)
@ -23,8 +27,15 @@
#endif
#define SPIHD_CHECK(cond,warn,ret) do{if(!(cond)){ESP_LOGE(TAG, warn); return ret;}} while(0)
/// struct to hold private transaction data (like tx and rx buffer for DMA).
typedef struct {
spi_slave_hd_data_t *trans; //original trans
void *aligned_buffer; //actually trans buffer (re-malloced if needed)
} spi_slave_hd_trans_priv_t;
typedef struct {
bool dma_enabled;
uint16_t internal_mem_align_size;
int max_transfer_sz;
uint32_t flags;
portMUX_TYPE int_spinlock;
@ -45,8 +56,8 @@ typedef struct {
QueueHandle_t tx_cnting_sem;
QueueHandle_t rx_cnting_sem;
spi_slave_hd_data_t *tx_desc;
spi_slave_hd_data_t *rx_desc;
spi_slave_hd_trans_priv_t tx_curr_trans;
spi_slave_hd_trans_priv_t rx_curr_trans;
#ifdef CONFIG_PM_ENABLE
esp_pm_lock_handle_t pm_lock;
#endif
@ -103,9 +114,9 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
dma_desc_ct = 1; //default to 4k when max is not given
}
host->hal.dma_desc_num = dma_desc_ct;
spi_dma_desc_t *orig_dmadesc_tx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
spi_dma_desc_t *orig_dmadesc_rx = heap_caps_aligned_alloc(DMA_DESC_MEM_ALIGN_SIZE, sizeof(spi_dma_desc_t) * dma_desc_ct, MALLOC_CAP_DMA);
lldesc_t *orig_dmadesc_tx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
lldesc_t *orig_dmadesc_rx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA);
host->hal.dmadesc_tx = heap_caps_malloc(sizeof(spi_slave_hd_hal_desc_append_t) * dma_desc_ct, MALLOC_CAP_DEFAULT);
host->hal.dmadesc_rx = heap_caps_malloc(sizeof(spi_slave_hd_hal_desc_append_t) * dma_desc_ct, MALLOC_CAP_DEFAULT);
if (!(host->hal.dmadesc_tx && host->hal.dmadesc_rx && orig_dmadesc_tx && orig_dmadesc_rx)) {
@ -120,8 +131,15 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
//Get the actual SPI bus transaction size in bytes.
host->max_transfer_sz = dma_desc_ct * DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
size_t alignment;
esp_cache_get_alignment(ESP_CACHE_MALLOC_FLAG_DMA, &alignment);
host->internal_mem_align_size = alignment;
#else
host->internal_mem_align_size = 4;
#endif
} else {
//We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
//We're limited to non-DMA transfers: the SPI work registers can hold (72 for S2, 64 for others) bytes at most.
host->max_transfer_sz = 0;
}
@ -130,8 +148,7 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
goto cleanup;
}
gpio_set_direction(config->spics_io_num, GPIO_MODE_INPUT);
spicommon_cs_initialize(host_id, config->spics_io_num, 0,
!(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS));
spicommon_cs_initialize(host_id, config->spics_io_num, 0, !(bus_config->flags & SPICOMMON_BUSFLAG_NATIVE_PINS));
spi_slave_hd_hal_config_t hal_config = {
.host_id = host_id,
@ -159,11 +176,11 @@ esp_err_t spi_slave_hd_init(spi_host_device_t host_id, const spi_bus_config_t *b
#endif //CONFIG_PM_ENABLE
//Create Queues and Semaphores
host->tx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
host->rx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
host->tx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_trans_priv_t));
host->rx_ret_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_trans_priv_t));
if (!host->append_mode) {
host->tx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
host->rx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_data_t *));
host->tx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_trans_priv_t));
host->rx_trans_queue = xQueueCreate(config->queue_size, sizeof(spi_slave_hd_trans_priv_t));
if (!host->tx_trans_queue || !host->rx_trans_queue) {
ret = ESP_ERR_NO_MEM;
goto cleanup;
@ -305,10 +322,10 @@ static IRAM_ATTR void spi_slave_hd_intr_segment(void *arg)
bool rx_done = false;
portENTER_CRITICAL_ISR(&host->int_spinlock);
if (host->tx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_SEND)) {
if (host->tx_curr_trans.trans && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_SEND)) {
tx_done = true;
}
if (host->rx_desc && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_RECV)) {
if (host->rx_curr_trans.trans && spi_slave_hd_hal_check_disable_event(hal, SPI_EV_RECV)) {
rx_done = true;
}
portEXIT_CRITICAL_ISR(&host->int_spinlock);
@ -318,50 +335,56 @@ static IRAM_ATTR void spi_slave_hd_intr_segment(void *arg)
if (callback->cb_sent) {
spi_slave_hd_event_t ev = {
.event = SPI_EV_SEND,
.trans = host->tx_desc,
.trans = host->tx_curr_trans.trans,
};
BaseType_t cb_awoken = pdFALSE;
ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
awoken |= cb_awoken;
}
if (ret_queue) {
ret = xQueueSendFromISR(host->tx_ret_queue, &host->tx_desc, &awoken);
ret = xQueueSendFromISR(host->tx_ret_queue, &host->tx_curr_trans, &awoken);
// The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
assert(ret == pdTRUE);
}
host->tx_desc = NULL;
host->tx_curr_trans.trans = NULL;
}
if (rx_done) {
bool ret_queue = true;
host->rx_desc->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal);
host->rx_curr_trans.trans->trans_len = spi_slave_hd_hal_rxdma_seg_get_len(hal);
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible
uint16_t alignment = host->internal_mem_align_size;
uint32_t buff_len = (host->rx_curr_trans.trans->len + alignment - 1) & (~(alignment - 1));
esp_err_t ret = esp_cache_msync((void *)host->rx_curr_trans.aligned_buffer, buff_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
#endif
if (callback->cb_recv) {
spi_slave_hd_event_t ev = {
.event = SPI_EV_RECV,
.trans = host->rx_desc,
.trans = host->rx_curr_trans.trans,
};
BaseType_t cb_awoken = pdFALSE;
ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
awoken |= cb_awoken;
}
if (ret_queue) {
ret = xQueueSendFromISR(host->rx_ret_queue, &host->rx_desc, &awoken);
ret = xQueueSendFromISR(host->rx_ret_queue, &host->rx_curr_trans, &awoken);
// The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
assert(ret == pdTRUE);
}
host->rx_desc = NULL;
host->rx_curr_trans.trans = NULL;
}
bool tx_sent = false;
bool rx_sent = false;
if (!host->tx_desc) {
ret = xQueueReceiveFromISR(host->tx_trans_queue, &host->tx_desc, &awoken);
if (!host->tx_curr_trans.trans) {
ret = xQueueReceiveFromISR(host->tx_trans_queue, &host->tx_curr_trans, &awoken);
if (ret == pdTRUE) {
spi_slave_hd_hal_txdma(hal, host->tx_desc->data, host->tx_desc->len);
spi_slave_hd_hal_txdma(hal, host->tx_curr_trans.aligned_buffer, host->tx_curr_trans.trans->len);
tx_sent = true;
if (callback->cb_send_dma_ready) {
spi_slave_hd_event_t ev = {
.event = SPI_EV_SEND_DMA_READY,
.trans = host->tx_desc,
.trans = host->tx_curr_trans.trans,
};
BaseType_t cb_awoken = pdFALSE;
callback->cb_send_dma_ready(callback->arg, &ev, &cb_awoken);
@ -369,15 +392,15 @@ static IRAM_ATTR void spi_slave_hd_intr_segment(void *arg)
}
}
}
if (!host->rx_desc) {
ret = xQueueReceiveFromISR(host->rx_trans_queue, &host->rx_desc, &awoken);
if (!host->rx_curr_trans.trans) {
ret = xQueueReceiveFromISR(host->rx_trans_queue, &host->rx_curr_trans, &awoken);
if (ret == pdTRUE) {
spi_slave_hd_hal_rxdma(hal, host->rx_desc->data, host->rx_desc->len);
spi_slave_hd_hal_rxdma(hal, host->rx_curr_trans.aligned_buffer, host->rx_curr_trans.trans->len);
rx_sent = true;
if (callback->cb_recv_dma_ready) {
spi_slave_hd_event_t ev = {
.event = SPI_EV_RECV_DMA_READY,
.trans = host->rx_desc,
.trans = host->rx_curr_trans.trans,
};
BaseType_t cb_awoken = pdFALSE;
callback->cb_recv_dma_ready(callback->arg, &ev, &cb_awoken);
@ -406,10 +429,10 @@ static IRAM_ATTR void spi_slave_hd_append_tx_isr(void *arg)
BaseType_t awoken = pdFALSE;
BaseType_t ret __attribute__((unused));
spi_slave_hd_data_t *trans_desc;
spi_slave_hd_trans_priv_t ret_priv_trans;
while (1) {
bool trans_finish = false;
trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&trans_desc);
trans_finish = spi_slave_hd_hal_get_tx_finished_trans(hal, (void **)&ret_priv_trans.trans, &ret_priv_trans.aligned_buffer);
if (!trans_finish) {
break;
}
@ -418,7 +441,7 @@ static IRAM_ATTR void spi_slave_hd_append_tx_isr(void *arg)
if (callback->cb_sent) {
spi_slave_hd_event_t ev = {
.event = SPI_EV_SEND,
.trans = trans_desc,
.trans = ret_priv_trans.trans,
};
BaseType_t cb_awoken = pdFALSE;
ret_queue = callback->cb_sent(callback->arg, &ev, &cb_awoken);
@ -426,7 +449,7 @@ static IRAM_ATTR void spi_slave_hd_append_tx_isr(void *arg)
}
if (ret_queue) {
ret = xQueueSendFromISR(host->tx_ret_queue, &trans_desc, &awoken);
ret = xQueueSendFromISR(host->tx_ret_queue, &ret_priv_trans, &awoken);
assert(ret == pdTRUE);
ret = xSemaphoreGiveFromISR(host->tx_cnting_sem, &awoken);
@ -444,21 +467,27 @@ static IRAM_ATTR void spi_slave_hd_append_rx_isr(void *arg)
BaseType_t awoken = pdFALSE;
BaseType_t ret __attribute__((unused));
spi_slave_hd_data_t *trans_desc;
spi_slave_hd_trans_priv_t ret_priv_trans;
size_t trans_len;
while (1) {
bool trans_finish = false;
trans_finish = spi_slave_hd_hal_get_rx_finished_trans(hal, (void **)&trans_desc, &trans_len);
trans_finish = spi_slave_hd_hal_get_rx_finished_trans(hal, (void **)&ret_priv_trans.trans, &ret_priv_trans.aligned_buffer, &trans_len);
if (!trans_finish) {
break;
}
trans_desc->trans_len = trans_len;
ret_priv_trans.trans->trans_len = trans_len;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible
uint16_t alignment = host->internal_mem_align_size;
uint32_t buff_len = (ret_priv_trans.trans->len + alignment - 1) & (~(alignment - 1));
esp_err_t ret = esp_cache_msync((void *)ret_priv_trans.aligned_buffer, buff_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
#endif
bool ret_queue = true;
if (callback->cb_recv) {
spi_slave_hd_event_t ev = {
.event = SPI_EV_RECV,
.trans = trans_desc,
.trans = ret_priv_trans.trans,
};
BaseType_t cb_awoken = pdFALSE;
ret_queue = callback->cb_recv(callback->arg, &ev, &cb_awoken);
@ -466,7 +495,7 @@ static IRAM_ATTR void spi_slave_hd_append_rx_isr(void *arg)
}
if (ret_queue) {
ret = xQueueSendFromISR(host->rx_ret_queue, &trans_desc, &awoken);
ret = xQueueSendFromISR(host->rx_ret_queue, &ret_priv_trans, &awoken);
assert(ret == pdTRUE);
ret = xSemaphoreGiveFromISR(host->rx_cnting_sem, &awoken);
@ -514,22 +543,64 @@ static IRAM_ATTR void spi_slave_hd_intr_append(void *arg)
}
}
static void s_spi_slave_hd_destroy_priv_trans(spi_host_device_t host, spi_slave_hd_trans_priv_t *priv_trans, spi_slave_chan_t chan)
{
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
spi_slave_hd_data_t *orig_trans = priv_trans->trans;
if (priv_trans->aligned_buffer != orig_trans->data) {
if (chan == SPI_SLAVE_CHAN_RX) {
memcpy(orig_trans->data, priv_trans->aligned_buffer, orig_trans->trans_len);
}
free(priv_trans->aligned_buffer);
}
#endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
}
static esp_err_t s_spi_slave_hd_setup_priv_trans(spi_host_device_t host, spi_slave_hd_trans_priv_t *priv_trans, spi_slave_chan_t chan)
{
spi_slave_hd_data_t *orig_trans = priv_trans->trans;
priv_trans->aligned_buffer = orig_trans->data;
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
uint16_t alignment = spihost[host]->internal_mem_align_size;
uint32_t byte_len = orig_trans->len;
if (((uint32_t)orig_trans->data) | (byte_len & (alignment - 1))) {
ESP_RETURN_ON_FALSE(orig_trans->flags & SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO, ESP_ERR_INVALID_ARG, TAG, "data buffer addr&len not align to %d, or not dma_capable", alignment);
byte_len = (byte_len + alignment - 1) & (~(alignment - 1)); // up align to alignment
ESP_LOGD(TAG, "Re-allocate %s buffer of len %ld for DMA", (chan == SPI_SLAVE_CHAN_TX)?"TX":"RX", byte_len);
priv_trans->aligned_buffer = heap_caps_aligned_alloc(64, byte_len, MALLOC_CAP_DMA);
if (priv_trans->aligned_buffer == NULL) {
return ESP_ERR_NO_MEM;
}
}
if (chan == SPI_SLAVE_CHAN_TX) {
memcpy(priv_trans->aligned_buffer, orig_trans->data, orig_trans->len);
esp_err_t ret = esp_cache_msync((void *)priv_trans->aligned_buffer, byte_len, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
ESP_RETURN_ON_FALSE(ESP_OK == ret, ESP_ERR_INVALID_STATE, TAG, "mem sync c2m(writeback) fail");
}
#endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
return ESP_OK;
}
static esp_err_t get_ret_queue_result(spi_host_device_t host_id, spi_slave_chan_t chan, spi_slave_hd_data_t **out_trans, TickType_t timeout)
{
spi_slave_hd_slot_t *host = spihost[host_id];
spi_slave_hd_data_t *trans;
spi_slave_hd_trans_priv_t hd_priv_trans;
BaseType_t ret;
if (chan == SPI_SLAVE_CHAN_TX) {
ret = xQueueReceive(host->tx_ret_queue, &trans, timeout);
ret = xQueueReceive(host->tx_ret_queue, &hd_priv_trans, timeout);
} else {
ret = xQueueReceive(host->rx_ret_queue, &trans, timeout);
ret = xQueueReceive(host->rx_ret_queue, &hd_priv_trans, timeout);
}
if (ret == pdFALSE) {
return ESP_ERR_TIMEOUT;
}
*out_trans = trans;
s_spi_slave_hd_destroy_priv_trans(host_id, &hd_priv_trans, chan);
*out_trans = hd_priv_trans.trans;
return ESP_OK;
}
@ -543,14 +614,17 @@ esp_err_t spi_slave_hd_queue_trans(spi_host_device_t host_id, spi_slave_chan_t c
SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
spi_slave_hd_trans_priv_t hd_priv_trans = {.trans = trans};
SPIHD_CHECK( ESP_OK == s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), "No mem to allocate new cache buffer", ESP_ERR_NO_MEM);
if (chan == SPI_SLAVE_CHAN_TX) {
BaseType_t ret = xQueueSend(host->tx_trans_queue, &trans, timeout);
BaseType_t ret = xQueueSend(host->tx_trans_queue, &hd_priv_trans, timeout);
if (ret == pdFALSE) {
return ESP_ERR_TIMEOUT;
}
tx_invoke(host);
} else { //chan == SPI_SLAVE_CHAN_RX
BaseType_t ret = xQueueSend(host->rx_trans_queue, &trans, timeout);
BaseType_t ret = xQueueSend(host->rx_trans_queue, &hd_priv_trans, timeout);
if (ret == pdFALSE) {
return ESP_ERR_TIMEOUT;
}
@ -594,18 +668,21 @@ esp_err_t spi_slave_hd_append_trans(spi_host_device_t host_id, spi_slave_chan_t
SPIHD_CHECK(trans->len <= host->max_transfer_sz && trans->len > 0, "Invalid buffer size", ESP_ERR_INVALID_ARG);
SPIHD_CHECK(chan == SPI_SLAVE_CHAN_TX || chan == SPI_SLAVE_CHAN_RX, "Invalid channel", ESP_ERR_INVALID_ARG);
spi_slave_hd_trans_priv_t hd_priv_trans = {.trans = trans};
SPIHD_CHECK( ESP_OK == s_spi_slave_hd_setup_priv_trans(host_id, &hd_priv_trans, chan), "No mem to allocate new cache buffer", ESP_ERR_NO_MEM);
if (chan == SPI_SLAVE_CHAN_TX) {
BaseType_t ret = xSemaphoreTake(host->tx_cnting_sem, timeout);
if (ret == pdFALSE) {
return ESP_ERR_TIMEOUT;
}
err = spi_slave_hd_hal_txdma_append(hal, trans->data, trans->len, trans);
err = spi_slave_hd_hal_txdma_append(hal, hd_priv_trans.aligned_buffer, trans->len, trans);
} else {
BaseType_t ret = xSemaphoreTake(host->rx_cnting_sem, timeout);
if (ret == pdFALSE) {
return ESP_ERR_TIMEOUT;
}
err = spi_slave_hd_hal_rxdma_append(hal, trans->data, trans->len, trans);
err = spi_slave_hd_hal_rxdma_append(hal, hd_priv_trans.aligned_buffer, trans->len, trans);
}
if (err != ESP_OK) {
ESP_LOGE(TAG, "Wait until the DMA finishes its transaction");

View File

@ -23,11 +23,14 @@ extern "C"
#error The SPI peripheral does not support this feature
#endif
#define SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO (1<<0) ///< Automatically re-malloc dma buffer if user buffer doesn't meet hardware alignment or dma_capable, this process may lose some memory and performance
/// Descriptor of data to send/receive
typedef struct {
uint8_t* data; ///< Buffer to send, must be DMA capable
size_t len; ///< Len of data to send/receive. For receiving the buffer length should be multiples of 4 bytes, otherwise the extra part will be truncated.
size_t trans_len; ///< For RX direction, it indicates the data actually received. For TX direction, it is meaningless.
uint32_t flags; ///< Bitwise OR of SPI_SLAVE_HD_TRANS_* flags
void* arg; ///< Extra argument indiciating this data
} spi_slave_hd_data_t;

View File

@ -1,2 +1,2 @@
| Supported Targets | ESP32-C2 | ESP32-C3 | ESP32-C6 | ESP32-H2 | ESP32-S2 | ESP32-S3 |
| ----------------- | -------- | -------- | -------- | -------- | -------- | -------- |
| Supported Targets | ESP32-C2 | ESP32-C3 | ESP32-C6 | ESP32-H2 | ESP32-P4 | ESP32-S2 | ESP32-S3 |
| ----------------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- |

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -10,23 +10,15 @@
#define TEST_MEMORY_LEAK_THRESHOLD (200)
static size_t before_free_8bit;
static size_t before_free_32bit;
void setUp(void)
{
before_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
before_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
unity_utils_record_free_mem();
}
void tearDown(void)
{
esp_reent_cleanup(); //clean up some of the newlib's lazy allocations
size_t after_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
size_t after_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
printf("\n");
unity_utils_check_leak(before_free_8bit, after_free_8bit, "8BIT", TEST_MEMORY_LEAK_THRESHOLD);
unity_utils_check_leak(before_free_32bit, after_free_32bit, "32BIT", TEST_MEMORY_LEAK_THRESHOLD);
unity_utils_evaluate_leaks_direct(TEST_MEMORY_LEAK_THRESHOLD);
}
void app_main(void)

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -224,6 +224,7 @@ static void test_hd_start(spi_device_handle_t *spi, int freq, const spitest_para
ctx->tx_data = (spi_slave_hd_data_t) {
.data = &ctx->slave_rddma_buf[pos],
.len = len,
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
};
esp_err_t err = spi_slave_hd_queue_trans(TEST_SLAVE_HOST, SPI_SLAVE_CHAN_TX, &ctx->tx_data, portMAX_DELAY);
TEST_ESP_OK(err);
@ -231,6 +232,7 @@ static void test_hd_start(spi_device_handle_t *spi, int freq, const spitest_para
ctx->rx_data = (spi_slave_hd_data_t) {
.data = ctx->slave_wrdma_buf,
.len = TEST_DMA_MAX_SIZE,
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
};
err = spi_slave_hd_queue_trans(TEST_SLAVE_HOST, SPI_SLAVE_CHAN_RX, &ctx->rx_data, portMAX_DELAY);
TEST_ESP_OK(err);
@ -269,6 +271,7 @@ void test_wrdma(testhd_context_t* ctx, const spitest_param_set_t *cfg, spi_devic
ctx->rx_data = (spi_slave_hd_data_t) {
.data = ctx->slave_wrdma_buf,
.len = TEST_DMA_MAX_SIZE,
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
};
esp_err_t err = spi_slave_hd_queue_trans(TEST_SLAVE_HOST, SPI_SLAVE_CHAN_RX, &ctx->rx_data, portMAX_DELAY);
TEST_ESP_OK(err);
@ -302,6 +305,7 @@ void test_rddma(testhd_context_t* ctx, const spitest_param_set_t* cfg, spi_devic
ctx->tx_data = (spi_slave_hd_data_t) {
.data = &ctx->slave_rddma_buf[pos],
.len = len,
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
};
esp_err_t err = spi_slave_hd_queue_trans(TEST_SLAVE_HOST, SPI_SLAVE_CHAN_TX, &ctx->tx_data, portMAX_DELAY);
TEST_ESP_OK(err);
@ -415,6 +419,14 @@ static void test_hd_loop(const void* arg1, void* arg2)
TEST_ASSERT_EQUAL_HEX8_ARRAY(&slave_mem, recv_buffer, REG_REGION_SIZE);
}
//To release the re-malloced buffer remain in slave trans queue if possible
printf("clean tx %d rx %d\n", context->tx_data.len, context->rx_data.len);
TEST_ESP_OK(essl_spi_rddma(spi, context->master_rddma_buf, context->tx_data.len, TEST_SEG_SIZE, 0));
TEST_ESP_OK(essl_spi_wrdma(spi, context->master_wrdma_buf, context->rx_data.len, TEST_SEG_SIZE, 0));
spi_slave_hd_data_t* ret_trans;
TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SLAVE_HOST, SPI_SLAVE_CHAN_TX, &ret_trans, portMAX_DELAY));
TEST_ESP_OK(spi_slave_hd_get_trans_res(TEST_SLAVE_HOST, SPI_SLAVE_CHAN_RX, &ret_trans, portMAX_DELAY));
master_free_device_bus(spi);
spi_slave_hd_deinit(TEST_SLAVE_HOST);
}
@ -434,8 +446,8 @@ static const ptest_func_t hd_test_func = {
static int test_freq_hd[] = {
// 100*1000,
// 10 * 1000 * 1000, //maximum freq MISO stable before next latch edge
// 20 * 1000 * 1000, //maximum freq MISO stable before next latch edge
40 * 1000 * 1000, //maximum freq MISO stable before next latch edge
20 * 1000 * 1000, //maximum freq MISO stable before next latch edge
// 40 * 1000 * 1000, //maximum freq MISO stable before next latch edge
0,
};
@ -520,19 +532,23 @@ TEST_CASE("test spi slave hd segment mode, master too long", "[spi][spi_slv_hd]"
{
.data = slave_recv_buf,
.len = (trans_len[0] + 3) & (~3),
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
},
{
.data = slave_recv_buf + send_buf_size,
.len = (trans_len[1] + 3) & (~3),
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
},
//send
{
.data = slave_send_buf,
.len = trans_len[0],
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
},
{
.data = slave_send_buf + send_buf_size,
.len = trans_len[1],
.flags = SPI_SLAVE_HD_TRANS_DMA_BUFFER_ALIGN_AUTO,
},
};
@ -925,15 +941,17 @@ void slave_run_append(void)
ESP_LOG_BUFFER_HEX_LEVEL("slave exp", slave_exp, trans_len, ESP_LOG_DEBUG);
spitest_cmp_or_dump(slave_exp, ret_trans->data, trans_len);
// append one more transaction
int new_append_len = trans_len << 4;
if (new_append_len > TEST_TRANS_LEN) {
new_append_len = TEST_TRANS_LEN;
if (trans_num <= TEST_APPEND_CACHE_SIZE) {
// append one more transaction
int new_append_len = trans_len << 4;
if (new_append_len > TEST_TRANS_LEN) {
new_append_len = TEST_TRANS_LEN;
}
memset(ret_trans->data, 0, ret_trans->trans_len);
ret_trans->len = new_append_len;
ret_trans->trans_len = 0;
TEST_ESP_OK(spi_slave_hd_append_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, ret_trans, portMAX_DELAY));
}
memset(ret_trans->data, 0, ret_trans->trans_len);
ret_trans->len = new_append_len;
ret_trans->trans_len = 0;
TEST_ESP_OK(spi_slave_hd_append_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_RX, ret_trans, portMAX_DELAY));
}
printf("================Master Tx Done==================\n\n");
free(slave_exp);
@ -958,15 +976,17 @@ void slave_run_append(void)
ESP_LOGI("slave", "trasacted len: %d", ret_trans->len);
ESP_LOG_BUFFER_HEX_LEVEL("slave tx", ret_trans->data, ret_trans->len, ESP_LOG_DEBUG);
// append one more transaction
int new_append_len = 16 << (trans_num + 4);
if (new_append_len > TEST_TRANS_LEN) {
new_append_len = TEST_TRANS_LEN;
if (trans_num <= TEST_APPEND_CACHE_SIZE) {
// append one more transaction
int new_append_len = 16 << (trans_num + 4);
if (new_append_len > TEST_TRANS_LEN) {
new_append_len = TEST_TRANS_LEN;
}
ret_trans->len = new_append_len;
ret_trans->trans_len = 0;
prepare_data(ret_trans->data, ret_trans->len, -3);
TEST_ESP_OK(spi_slave_hd_append_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, ret_trans, portMAX_DELAY));
}
ret_trans->len = new_append_len;
ret_trans->trans_len = 0;
prepare_data(ret_trans->data, ret_trans->len, -3);
TEST_ESP_OK(spi_slave_hd_append_trans(TEST_SPI_HOST, SPI_SLAVE_CHAN_TX, ret_trans, portMAX_DELAY));
}
printf("================Master Rx Done==================\n");
for (int i = 0; i < TEST_APPEND_CACHE_SIZE; i++) free(slave_tx_trans[i].data);

View File

@ -46,7 +46,9 @@
#include "esp_types.h"
#include "esp_err.h"
#include "soc/soc_caps.h"
#include "soc/gdma_channel.h"
#include "hal/spi_types.h"
#include "hal/dma_types.h"
#if SOC_GPSPI_SUPPORTED
#include "hal/spi_ll.h"
#endif
@ -57,13 +59,24 @@ extern "C" {
#if SOC_GPSPI_SUPPORTED
//NOTE!! If both A and B are not defined, '#if (A==B)' is true, because GCC use 0 stand for undefined symbol
#if !defined(SOC_GDMA_TRIG_PERIPH_SPI2_BUS)
typedef dma_descriptor_align4_t spi_dma_desc_t;
#else
#if defined(SOC_GDMA_BUS_AXI) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI)
typedef dma_descriptor_align8_t spi_dma_desc_t;
#elif defined(SOC_GDMA_BUS_AHB) && (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB)
typedef dma_descriptor_align4_t spi_dma_desc_t;
#endif
#endif
/**
* @brief Type of dma descriptor with appended members
* this structure inherits DMA descriptor, with a pointer to the transaction descriptor passed from users.
*/
typedef struct {
lldesc_t *desc; ///< DMA descriptor
void *arg; ///< This points to the transaction descriptor user passed in
spi_dma_desc_t *desc; ///< DMA descriptor
void *arg; ///< This points to the transaction descriptor user passed in
} spi_slave_hd_hal_desc_append_t;
/// Configuration of the HAL
@ -253,9 +266,10 @@ int spi_slave_hd_hal_get_last_addr(spi_slave_hd_hal_context_t *hal);
*
* @param hal Context of the HAL layer
* @param out_trans Pointer to the caller-defined transaction
* @param real_buff_addr Actually data buffer head the HW used
* @return 1: Transaction is finished; 0: Transaction is not finished
*/
bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans);
bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, void **real_buff_addr);
/**
* @brief Return the finished RX transaction
@ -266,10 +280,11 @@ bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, voi
*
* @param hal Context of the HAL layer
* @param out_trans Pointer to the caller-defined transaction
* @param real_buff_addr Actually data buffer head the HW used
* @param out_len Actual number of bytes of received data
* @return 1: Transaction is finished; 0: Transaction is not finished
*/
bool spi_slave_hd_hal_get_rx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, size_t *out_len);
bool spi_slave_hd_hal_get_rx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, void **real_buff_addr, size_t *out_len);
/**
* @brief Load the TX DMA descriptors without stopping the DMA

View File

@ -14,11 +14,13 @@
#include "soc/spi_periph.h"
#include "soc/lldesc.h"
#include "soc/soc_caps.h"
#include "soc/ext_mem_defs.h"
#include "hal/spi_slave_hd_hal.h"
#include "hal/assert.h"
//This GDMA related part will be introduced by GDMA dedicated APIs in the future. Here we temporarily use macros.
#if SOC_AHB_GDMA_VERSION == 1
#if SOC_GDMA_SUPPORTED
#if (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AHB) && (SOC_AHB_GDMA_VERSION == 1)
#include "soc/gdma_struct.h"
#include "hal/gdma_ll.h"
#define spi_dma_ll_tx_restart(dev, chan) gdma_ll_tx_restart(&GDMA, chan)
@ -41,7 +43,31 @@
gdma_ll_tx_set_desc_addr(&GDMA, chan, (uint32_t)addr);\
gdma_ll_tx_start(&GDMA, chan);\
} while (0)
#elif (SOC_GDMA_TRIG_PERIPH_SPI2_BUS == SOC_GDMA_BUS_AXI)
#include "hal/axi_dma_ll.h"
#define spi_dma_ll_tx_restart(dev, chan) axi_dma_ll_tx_restart(&AXI_DMA, chan)
#define spi_dma_ll_rx_restart(dev, chan) axi_dma_ll_rx_restart(&AXI_DMA, chan)
#define spi_dma_ll_rx_reset(dev, chan) axi_dma_ll_rx_reset_channel(&AXI_DMA, chan)
#define spi_dma_ll_tx_reset(dev, chan) axi_dma_ll_tx_reset_channel(&AXI_DMA, chan)
#define spi_dma_ll_rx_enable_burst_data(dev, chan, enable) axi_dma_ll_rx_enable_data_burst(&AXI_DMA, chan, enable)
#define spi_dma_ll_tx_enable_burst_data(dev, chan, enable) axi_dma_ll_tx_enable_data_burst(&AXI_DMA, chan, enable)
#define spi_dma_ll_rx_enable_burst_desc(dev, chan, enable) axi_dma_ll_rx_enable_descriptor_burst(&AXI_DMA, chan, enable)
#define spi_dma_ll_tx_enable_burst_desc(dev, chan, enable) axi_dma_ll_tx_enable_descriptor_burst(&AXI_DMA, chan, enable)
#define spi_dma_ll_enable_out_auto_wrback(dev, chan, enable) axi_dma_ll_tx_enable_auto_write_back(&AXI_DMA, chan, enable)
#define spi_dma_ll_set_out_eof_generation(dev, chan, enable) axi_dma_ll_tx_set_eof_mode(&AXI_DMA, chan, enable)
#define spi_dma_ll_get_out_eof_desc_addr(dev, chan) axi_dma_ll_tx_get_eof_desc_addr(&AXI_DMA, chan)
#define spi_dma_ll_get_in_suc_eof_desc_addr(dev, chan) axi_dma_ll_rx_get_success_eof_desc_addr(&AXI_DMA, chan)
#define spi_dma_ll_rx_start(dev, chan, addr) do {\
axi_dma_ll_rx_set_desc_addr(&AXI_DMA, chan, (uint32_t)addr);\
axi_dma_ll_rx_start(&AXI_DMA, chan);\
} while (0)
#define spi_dma_ll_tx_start(dev, chan, addr) do {\
axi_dma_ll_tx_set_desc_addr(&AXI_DMA, chan, (uint32_t)addr);\
axi_dma_ll_tx_start(&AXI_DMA, chan);\
} while (0)
#endif
#endif //SOC_GDMA_SUPPORTED
static void s_spi_slave_hd_hal_dma_init_config(const spi_slave_hd_hal_context_t *hal)
{
@ -117,9 +143,66 @@ void spi_slave_hd_hal_init(spi_slave_hd_hal_context_t *hal, const spi_slave_hd_h
spi_ll_slave_set_seg_mode(hal->dev, true);
}
#if SOC_NON_CACHEABLE_OFFSET
#define ADDR_DMA_2_CPU(addr) ((typeof(addr))((uint32_t)(addr) + 0x40000000))
#define ADDR_CPU_2_DMA(addr) ((typeof(addr))((uint32_t)(addr) - 0x40000000))
#else
#define ADDR_DMA_2_CPU(addr) (addr)
#define ADDR_CPU_2_DMA(addr) (addr)
#endif
static void s_spi_hal_dma_desc_setup_link(spi_dma_desc_t *dmadesc, const void *data, int len, bool is_rx)
{
dmadesc = ADDR_DMA_2_CPU(dmadesc);
int n = 0;
while (len) {
int dmachunklen = len;
if (dmachunklen > DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED) {
dmachunklen = DMA_DESCRIPTOR_BUFFER_MAX_SIZE_4B_ALIGNED;
}
if (is_rx) {
//Receive needs DMA length rounded to next 32-bit boundary
dmadesc[n].dw0.size = (dmachunklen + 3) & (~3);
dmadesc[n].dw0.length = (dmachunklen + 3) & (~3);
} else {
dmadesc[n].dw0.size = dmachunklen;
dmadesc[n].dw0.length = dmachunklen;
}
dmadesc[n].buffer = (uint8_t *)data;
dmadesc[n].dw0.suc_eof = 0;
dmadesc[n].dw0.owner = 1;
dmadesc[n].next = ADDR_CPU_2_DMA(&dmadesc[n + 1]);
len -= dmachunklen;
data += dmachunklen;
n++;
}
dmadesc[n - 1].dw0.suc_eof = 1; //Mark last DMA desc as end of stream.
dmadesc[n - 1].next = NULL;
}
static int s_desc_get_received_len_addr(spi_dma_desc_t* head, spi_dma_desc_t** out_next, void **out_buff_head)
{
spi_dma_desc_t* desc_cpu = ADDR_DMA_2_CPU(head);
int len = 0;
if (out_buff_head) {
*out_buff_head = desc_cpu->buffer;
}
while(head) {
len += desc_cpu->dw0.length;
bool eof = desc_cpu->dw0.suc_eof;
desc_cpu = ADDR_DMA_2_CPU(desc_cpu->next);
head = head->next;
if (eof) break;
}
if (out_next) {
*out_next = head;
}
return len;
}
void spi_slave_hd_hal_rxdma(spi_slave_hd_hal_context_t *hal, uint8_t *out_buf, size_t len)
{
lldesc_setup_link(hal->dmadesc_rx->desc, out_buf, len, true);
s_spi_hal_dma_desc_setup_link(hal->dmadesc_rx->desc, out_buf, len, true);
spi_ll_dma_rx_fifo_reset(hal->dev);
spi_dma_ll_rx_reset(hal->dma_in, hal->rx_dma_chan);
@ -128,12 +211,12 @@ void spi_slave_hd_hal_rxdma(spi_slave_hd_hal_context_t *hal, uint8_t *out_buf, s
spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD7);
spi_ll_dma_rx_enable(hal->dev, 1);
spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, hal->dmadesc_rx->desc);
spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, (lldesc_t *)hal->dmadesc_rx->desc);
}
void spi_slave_hd_hal_txdma(spi_slave_hd_hal_context_t *hal, uint8_t *data, size_t len)
{
lldesc_setup_link(hal->dmadesc_tx->desc, data, len, false);
s_spi_hal_dma_desc_setup_link(hal->dmadesc_tx->desc, data, len, false);
spi_ll_dma_tx_fifo_reset(hal->dev);
spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan);
@ -142,7 +225,7 @@ void spi_slave_hd_hal_txdma(spi_slave_hd_hal_context_t *hal, uint8_t *data, size
spi_ll_clear_intr(hal->dev, SPI_LL_INTR_CMD8);
spi_ll_dma_tx_enable(hal->dev, 1);
spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, hal->dmadesc_tx->desc);
spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, (lldesc_t *)hal->dmadesc_tx->desc);
}
static spi_ll_intr_t get_event_intr(spi_slave_hd_hal_context_t *hal, spi_event_t ev)
@ -238,11 +321,11 @@ int spi_slave_hd_hal_get_rxlen(spi_slave_hd_hal_context_t *hal)
int spi_slave_hd_hal_rxdma_seg_get_len(spi_slave_hd_hal_context_t *hal)
{
lldesc_t *desc = hal->dmadesc_rx->desc;
return lldesc_get_received_len(desc, NULL);
spi_dma_desc_t *desc = hal->dmadesc_rx->desc;
return s_desc_get_received_len_addr(desc, NULL, NULL);
}
bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans)
bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, void **real_buff_addr)
{
uint32_t desc_now = spi_dma_ll_get_out_eof_desc_addr(hal->dma_out, hal->tx_dma_chan);
if ((uint32_t)hal->tx_dma_head->desc == desc_now) {
@ -255,11 +338,12 @@ bool spi_slave_hd_hal_get_tx_finished_trans(spi_slave_hd_hal_context_t *hal, voi
hal->tx_dma_head = hal->dmadesc_tx;
}
*out_trans = hal->tx_dma_head->arg;
s_desc_get_received_len_addr(hal->tx_dma_head->desc, NULL, real_buff_addr);
hal->tx_recycled_desc_cnt++;
return true;
}
bool spi_slave_hd_hal_get_rx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, size_t *out_len)
bool spi_slave_hd_hal_get_rx_finished_trans(spi_slave_hd_hal_context_t *hal, void **out_trans, void **real_buff_addr, size_t *out_len)
{
uint32_t desc_now = spi_dma_ll_get_in_suc_eof_desc_addr(hal->dma_in, hal->rx_dma_chan);
if ((uint32_t)hal->rx_dma_head->desc == desc_now) {
@ -272,7 +356,7 @@ bool spi_slave_hd_hal_get_rx_finished_trans(spi_slave_hd_hal_context_t *hal, voi
hal->rx_dma_head = hal->dmadesc_rx;
}
*out_trans = hal->rx_dma_head->arg;
*out_len = hal->rx_dma_head->desc->length;
*out_len = s_desc_get_received_len_addr(hal->rx_dma_head->desc, NULL, real_buff_addr);
hal->rx_recycled_desc_cnt++;
return true;
}
@ -287,7 +371,7 @@ esp_err_t spi_slave_hd_hal_txdma_append(spi_slave_hd_hal_context_t *hal, uint8_t
return ESP_ERR_INVALID_STATE;
}
lldesc_setup_link(hal->tx_cur_desc->desc, data, len, false);
s_spi_hal_dma_desc_setup_link(hal->tx_cur_desc->desc, data, len, false);
hal->tx_cur_desc->arg = arg;
if (!hal->tx_dma_started) {
@ -298,10 +382,10 @@ esp_err_t spi_slave_hd_hal_txdma_append(spi_slave_hd_hal_context_t *hal, uint8_t
spi_ll_outfifo_empty_clr(hal->dev);
spi_dma_ll_tx_reset(hal->dma_out, hal->tx_dma_chan);
spi_ll_dma_tx_enable(hal->dev, 1);
spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, hal->tx_cur_desc->desc);
spi_dma_ll_tx_start(hal->dma_out, hal->tx_dma_chan, (lldesc_t *)hal->tx_cur_desc->desc);
} else {
//there is already a consecutive link
STAILQ_NEXT(hal->tx_dma_tail->desc, qe) = hal->tx_cur_desc->desc;
ADDR_DMA_2_CPU(hal->tx_dma_tail->desc)->next = hal->tx_cur_desc->desc;
hal->tx_dma_tail = hal->tx_cur_desc;
spi_dma_ll_tx_restart(hal->dma_out, hal->tx_dma_chan);
}
@ -328,7 +412,7 @@ esp_err_t spi_slave_hd_hal_rxdma_append(spi_slave_hd_hal_context_t *hal, uint8_t
return ESP_ERR_INVALID_STATE;
}
lldesc_setup_link(hal->rx_cur_desc->desc, data, len, false);
s_spi_hal_dma_desc_setup_link(hal->rx_cur_desc->desc, data, len, false);
hal->rx_cur_desc->arg = arg;
if (!hal->rx_dma_started) {
@ -339,10 +423,10 @@ esp_err_t spi_slave_hd_hal_rxdma_append(spi_slave_hd_hal_context_t *hal, uint8_t
spi_ll_dma_rx_fifo_reset(hal->dma_in);
spi_ll_infifo_full_clr(hal->dev);
spi_ll_dma_rx_enable(hal->dev, 1);
spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, hal->rx_cur_desc->desc);
spi_dma_ll_rx_start(hal->dma_in, hal->rx_dma_chan, (lldesc_t *)hal->rx_cur_desc->desc);
} else {
//there is already a consecutive link
STAILQ_NEXT(hal->rx_dma_tail->desc, qe) = hal->rx_cur_desc->desc;
ADDR_DMA_2_CPU(hal->rx_dma_tail->desc)->next = hal->rx_cur_desc->desc;
hal->rx_dma_tail = hal->rx_cur_desc;
spi_dma_ll_rx_restart(hal->dma_in, hal->rx_dma_chan);
}

View File

@ -891,6 +891,10 @@ config SOC_SPI_MAXIMUM_BUFFER_SIZE
int
default 64
config SOC_SPI_SUPPORT_SLAVE_HD_VER2
bool
default y
config SOC_SPI_SLAVE_SUPPORT_SEG_TRANS
bool
default y

View File

@ -405,7 +405,7 @@
#define SOC_SPI_MAX_CS_NUM 6
#define SOC_SPI_MAXIMUM_BUFFER_SIZE 64
// #define SOC_SPI_SUPPORT_SLAVE_HD_VER2 1 //TODO: IDF-7505
#define SOC_SPI_SUPPORT_SLAVE_HD_VER2 1
#define SOC_SPI_SLAVE_SUPPORT_SEG_TRANS 1
#define SOC_SPI_SUPPORT_DDRCLK 1
#define SOC_SPI_SUPPORT_CD_SIG 1