2020-04-29 04:20:40 -04:00
// Copyright 2010-2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
# include "esp_log.h"
# include "freertos/FreeRTOS.h"
2021-01-18 04:16:52 -05:00
# include "freertos/semphr.h"
# include "freertos/queue.h"
2020-04-29 04:20:40 -04:00
# include "freertos/ringbuf.h"
# include "driver/gpio.h"
# include "driver/spi_common_internal.h"
# include "driver/spi_slave_hd.h"
2021-01-18 04:16:52 -05:00
# include "hal/spi_slave_hd_hal.h"
2020-04-29 04:20:40 -04:00
2021-02-03 02:14:17 -05:00
# if (SOC_SPI_PERIPH_NUM == 2)
# define VALID_HOST(x) ((x) == SPI2_HOST)
# elif (SOC_SPI_PERIPH_NUM == 3)
# define VALID_HOST(x) ((x) >= SPI2_HOST && (x) <= SPI3_HOST)
# endif
2020-04-29 04:20:40 -04:00
# define SPIHD_CHECK(cond,warn,ret) do{if(!(cond)){ESP_LOGE(TAG, warn); return ret;}} while(0)
typedef struct {
2021-01-27 08:56:16 -05:00
bool dma_enabled ;
2021-01-18 04:16:52 -05:00
int max_transfer_sz ;
uint32_t flags ;
portMUX_TYPE int_spinlock ;
2020-04-29 04:20:40 -04:00
intr_handle_t intr ;
intr_handle_t intr_dma ;
spi_slave_hd_callback_config_t callback ;
2021-01-18 04:16:52 -05:00
spi_slave_hd_hal_context_t hal ;
bool append_mode ;
2020-04-29 04:20:40 -04:00
QueueHandle_t tx_trans_queue ;
QueueHandle_t tx_ret_queue ;
QueueHandle_t rx_trans_queue ;
QueueHandle_t rx_ret_queue ;
2021-01-18 04:16:52 -05:00
QueueHandle_t tx_cnting_sem ;
QueueHandle_t rx_cnting_sem ;
2020-04-29 04:20:40 -04:00
spi_slave_hd_data_t * tx_desc ;
spi_slave_hd_data_t * rx_desc ;
# ifdef CONFIG_PM_ENABLE
esp_pm_lock_handle_t pm_lock ;
# endif
} spi_slave_hd_slot_t ;
static spi_slave_hd_slot_t * spihost [ SOC_SPI_PERIPH_NUM ] ;
static const char TAG [ ] = " slave_hd " ;
2021-01-18 04:16:52 -05:00
static void spi_slave_hd_intr_segment ( void * arg ) ;
# if CONFIG_IDF_TARGET_ESP32S2
//Append mode is only supported on ESP32S2 now
static void spi_slave_hd_intr_append ( void * arg ) ;
# endif
2020-04-29 04:20:40 -04:00
esp_err_t spi_slave_hd_init ( spi_host_device_t host_id , const spi_bus_config_t * bus_config ,
const spi_slave_hd_slot_config_t * config )
{
2021-01-26 07:18:52 -05:00
bool spi_chan_claimed ;
2021-01-18 04:16:52 -05:00
bool append_mode = ( config - > flags & SPI_SLAVE_HD_APPEND_MODE ) ;
2021-01-27 08:56:16 -05:00
uint32_t actual_tx_dma_chan = 0 ;
uint32_t actual_rx_dma_chan = 0 ;
2020-04-29 04:20:40 -04:00
esp_err_t ret = ESP_OK ;
SPIHD_CHECK ( VALID_HOST ( host_id ) , " invalid host " , ESP_ERR_INVALID_ARG ) ;
2021-01-26 07:18:52 -05:00
# if CONFIG_IDF_TARGET_ESP32S2
2021-02-19 22:03:28 -05:00
SPIHD_CHECK ( config - > dma_chan = = SPI_DMA_DISABLED | | config - > dma_chan = = ( int ) host_id | | config - > dma_chan = = SPI_DMA_CH_AUTO , " invalid dma channel " , ESP_ERR_INVALID_ARG ) ;
2021-01-26 07:18:52 -05:00
# elif SOC_GDMA_SUPPORTED
2021-02-19 22:03:28 -05:00
SPIHD_CHECK ( config - > dma_chan = = SPI_DMA_DISABLED | | config - > dma_chan = = SPI_DMA_CH_AUTO , " invalid dma channel, chip only support spi dma channel auto-alloc " , ESP_ERR_INVALID_ARG ) ;
2021-01-26 07:18:52 -05:00
# endif
2021-01-18 04:16:52 -05:00
# if !CONFIG_IDF_TARGET_ESP32S2
//Append mode is only supported on ESP32S2 now
SPIHD_CHECK ( append_mode = = 0 , " Append mode is only supported on ESP32S2 now " , ESP_ERR_INVALID_ARG ) ;
# endif
2020-04-29 04:20:40 -04:00
spi_chan_claimed = spicommon_periph_claim ( host_id , " slave_hd " ) ;
SPIHD_CHECK ( spi_chan_claimed , " host already in use " , ESP_ERR_INVALID_STATE ) ;
2021-02-04 06:09:28 -05:00
spi_slave_hd_slot_t * host = calloc ( 1 , sizeof ( spi_slave_hd_slot_t ) ) ;
2020-04-29 04:20:40 -04:00
if ( host = = NULL ) {
ret = ESP_ERR_NO_MEM ;
goto cleanup ;
}
spihost [ host_id ] = host ;
host - > int_spinlock = ( portMUX_TYPE ) portMUX_INITIALIZER_UNLOCKED ;
2021-02-19 22:03:28 -05:00
host - > dma_enabled = ( config - > dma_chan ! = SPI_DMA_DISABLED ) ;
2021-01-27 08:56:16 -05:00
if ( host - > dma_enabled ) {
2021-02-03 02:14:17 -05:00
ret = spicommon_slave_dma_chan_alloc ( host_id , config - > dma_chan , & actual_tx_dma_chan , & actual_rx_dma_chan ) ;
2021-01-27 08:56:16 -05:00
if ( ret ! = ESP_OK ) {
goto cleanup ;
}
}
ret = spicommon_bus_initialize_io ( host_id , bus_config , SPICOMMON_BUSFLAG_SLAVE | bus_config - > flags , & host - > flags ) ;
2020-04-29 04:20:40 -04:00
if ( ret ! = ESP_OK ) {
goto cleanup ;
}
gpio_set_direction ( config - > spics_io_num , GPIO_MODE_INPUT ) ;
spicommon_cs_initialize ( host_id , config - > spics_io_num , 0 ,
! ( bus_config - > flags & SPICOMMON_BUSFLAG_NATIVE_PINS ) ) ;
2021-01-18 04:16:52 -05:00
host - > append_mode = append_mode ;
2020-04-29 04:20:40 -04:00
spi_slave_hd_hal_config_t hal_config = {
. host_id = host_id ,
2020-09-08 22:21:49 -04:00
. dma_in = SPI_LL_GET_HW ( host_id ) ,
. dma_out = SPI_LL_GET_HW ( host_id ) ,
2021-01-27 08:56:16 -05:00
. dma_enabled = host - > dma_enabled ,
2021-02-03 02:14:17 -05:00
. tx_dma_chan = actual_tx_dma_chan ,
. rx_dma_chan = actual_rx_dma_chan ,
2021-01-18 04:16:52 -05:00
. append_mode = append_mode ,
. mode = config - > mode ,
2020-04-29 04:20:40 -04:00
. tx_lsbfirst = ( config - > flags & SPI_SLAVE_HD_RXBIT_LSBFIRST ) ,
. rx_lsbfirst = ( config - > flags & SPI_SLAVE_HD_TXBIT_LSBFIRST ) ,
} ;
2021-01-27 08:56:16 -05:00
if ( host - > dma_enabled ) {
2021-01-18 04:16:52 -05:00
//Malloc for all the DMA descriptors
uint32_t total_desc_size = spi_slave_hd_hal_get_total_desc_size ( & host - > hal , bus_config - > max_transfer_sz ) ;
host - > hal . dmadesc_tx = heap_caps_malloc ( total_desc_size , MALLOC_CAP_DMA ) ;
host - > hal . dmadesc_rx = heap_caps_malloc ( total_desc_size , MALLOC_CAP_DMA ) ;
if ( ! host - > hal . dmadesc_tx | | ! host - > hal . dmadesc_rx ) {
2020-04-29 04:20:40 -04:00
ret = ESP_ERR_NO_MEM ;
goto cleanup ;
}
2021-01-18 04:16:52 -05:00
//Get the actual SPI bus transaction size in bytes.
host - > max_transfer_sz = spi_salve_hd_hal_get_max_bus_size ( & host - > hal ) ;
2020-04-29 04:20:40 -04:00
} else {
//We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
host - > max_transfer_sz = 0 ;
}
2021-01-18 04:16:52 -05:00
//Init the hal according to the hal_config set above
spi_slave_hd_hal_init ( & host - > hal , & hal_config ) ;
2020-04-29 04:20:40 -04:00
# ifdef CONFIG_PM_ENABLE
ret = esp_pm_lock_create ( ESP_PM_APB_FREQ_MAX , 0 , " spi_slave " , & host - > pm_lock ) ;
if ( ret ! = ESP_OK ) {
goto cleanup ;
}
// Lock APB frequency while SPI slave driver is in use
esp_pm_lock_acquire ( host - > pm_lock ) ;
# endif //CONFIG_PM_ENABLE
2021-01-18 04:16:52 -05:00
//Create Queues and Semaphores
2020-04-29 04:20:40 -04:00
host - > tx_ret_queue = xQueueCreate ( config - > queue_size , sizeof ( spi_slave_hd_data_t * ) ) ;
host - > rx_ret_queue = xQueueCreate ( config - > queue_size , sizeof ( spi_slave_hd_data_t * ) ) ;
2021-01-18 04:16:52 -05:00
if ( ! host - > append_mode ) {
host - > tx_trans_queue = xQueueCreate ( config - > queue_size , sizeof ( spi_slave_hd_data_t * ) ) ;
host - > rx_trans_queue = xQueueCreate ( config - > queue_size , sizeof ( spi_slave_hd_data_t * ) ) ;
if ( ! host - > tx_trans_queue | | ! host - > rx_trans_queue ) {
ret = ESP_ERR_NO_MEM ;
goto cleanup ;
}
}
# if CONFIG_IDF_TARGET_ESP32S2
//Append mode is only supported on ESP32S2 now
else {
host - > tx_cnting_sem = xSemaphoreCreateCounting ( config - > queue_size , config - > queue_size ) ;
host - > rx_cnting_sem = xSemaphoreCreateCounting ( config - > queue_size , config - > queue_size ) ;
if ( ! host - > tx_cnting_sem | | ! host - > rx_cnting_sem ) {
ret = ESP_ERR_NO_MEM ;
goto cleanup ;
}
2020-04-29 04:20:40 -04:00
}
2021-01-18 04:16:52 -05:00
# endif //#if CONFIG_IDF_TARGET_ESP32S2
2020-04-29 04:20:40 -04:00
2021-01-18 04:16:52 -05:00
//Alloc intr
if ( ! host - > append_mode ) {
ret = esp_intr_alloc ( spicommon_irqsource_for_host ( host_id ) , 0 , spi_slave_hd_intr_segment ,
2020-04-29 04:20:40 -04:00
( void * ) host , & host - > intr ) ;
2021-01-18 04:16:52 -05:00
if ( ret ! = ESP_OK ) {
goto cleanup ;
}
ret = esp_intr_alloc ( spicommon_irqdma_source_for_host ( host_id ) , 0 , spi_slave_hd_intr_segment ,
( void * ) host , & host - > intr_dma ) ;
if ( ret ! = ESP_OK ) {
goto cleanup ;
}
2020-04-29 04:20:40 -04:00
}
2021-01-18 04:16:52 -05:00
# if CONFIG_IDF_TARGET_ESP32S2
//Append mode is only supported on ESP32S2 now
else {
ret = esp_intr_alloc ( spicommon_irqsource_for_host ( host_id ) , 0 , spi_slave_hd_intr_append ,
( void * ) host , & host - > intr ) ;
if ( ret ! = ESP_OK ) {
goto cleanup ;
}
ret = esp_intr_alloc ( spicommon_irqdma_source_for_host ( host_id ) , 0 , spi_slave_hd_intr_append ,
( void * ) host , & host - > intr_dma ) ;
if ( ret ! = ESP_OK ) {
goto cleanup ;
}
2020-04-29 04:20:40 -04:00
}
2021-01-18 04:16:52 -05:00
# endif //#if CONFIG_IDF_TARGET_ESP32S2
2020-04-29 04:20:40 -04:00
2021-01-18 04:16:52 -05:00
//Init callbacks
memcpy ( ( uint8_t * ) & host - > callback , ( uint8_t * ) & config - > cb_config , sizeof ( spi_slave_hd_callback_config_t ) ) ;
2020-04-29 04:20:40 -04:00
spi_event_t event = 0 ;
if ( host - > callback . cb_buffer_tx ! = NULL ) event | = SPI_EV_BUF_TX ;
if ( host - > callback . cb_buffer_rx ! = NULL ) event | = SPI_EV_BUF_RX ;
if ( host - > callback . cb_cmd9 ! = NULL ) event | = SPI_EV_CMD9 ;
if ( host - > callback . cb_cmdA ! = NULL ) event | = SPI_EV_CMDA ;
spi_slave_hd_hal_enable_event_intr ( & host - > hal , event ) ;
return ESP_OK ;
cleanup :
// Memory free is in the deinit function
spi_slave_hd_deinit ( host_id ) ;
return ret ;
}
esp_err_t spi_slave_hd_deinit ( spi_host_device_t host_id )
{
spi_slave_hd_slot_t * host = spihost [ host_id ] ;
if ( host = = NULL ) return ESP_ERR_INVALID_ARG ;
if ( host - > tx_trans_queue ) vQueueDelete ( host - > tx_trans_queue ) ;
if ( host - > tx_ret_queue ) vQueueDelete ( host - > tx_ret_queue ) ;
if ( host - > rx_trans_queue ) vQueueDelete ( host - > rx_trans_queue ) ;
if ( host - > rx_ret_queue ) vQueueDelete ( host - > rx_ret_queue ) ;
2021-01-18 04:16:52 -05:00
if ( host - > tx_cnting_sem ) vSemaphoreDelete ( host - > tx_cnting_sem ) ;
if ( host - > rx_cnting_sem ) vSemaphoreDelete ( host - > rx_cnting_sem ) ;
2020-04-29 04:20:40 -04:00
if ( host ) {
free ( host - > hal . dmadesc_tx ) ;
free ( host - > hal . dmadesc_rx ) ;
esp_intr_free ( host - > intr ) ;
esp_intr_free ( host - > intr_dma ) ;
# ifdef CONFIG_PM_ENABLE
if ( host - > pm_lock ) {
esp_pm_lock_release ( host - > pm_lock ) ;
esp_pm_lock_delete ( host - > pm_lock ) ;
}
# endif
}
spicommon_periph_free ( host_id ) ;
2021-01-27 08:56:16 -05:00
if ( host - > dma_enabled ) {
2021-02-03 02:14:17 -05:00
spicommon_slave_free_dma ( host_id ) ;
2020-04-29 04:20:40 -04:00
}
free ( host ) ;
spihost [ host_id ] = NULL ;
return ESP_OK ;
}
static void tx_invoke ( spi_slave_hd_slot_t * host )
{
portENTER_CRITICAL ( & host - > int_spinlock ) ;
spi_slave_hd_hal_invoke_event_intr ( & host - > hal , SPI_EV_SEND ) ;
portEXIT_CRITICAL ( & host - > int_spinlock ) ;
}
static void rx_invoke ( spi_slave_hd_slot_t * host )
{
portENTER_CRITICAL ( & host - > int_spinlock ) ;
spi_slave_hd_hal_invoke_event_intr ( & host - > hal , SPI_EV_RECV ) ;
portEXIT_CRITICAL ( & host - > int_spinlock ) ;
}
static inline IRAM_ATTR BaseType_t intr_check_clear_callback ( spi_slave_hd_slot_t * host , spi_event_t ev , slave_cb_t cb )
{
BaseType_t cb_awoken = pdFALSE ;
2020-08-10 05:14:11 -04:00
if ( spi_slave_hd_hal_check_clear_event ( & host - > hal , ev ) & & cb ) {
2020-04-29 04:20:40 -04:00
spi_slave_hd_event_t event = { . event = ev } ;
cb ( host - > callback . arg , & event , & cb_awoken ) ;
}
return cb_awoken ;
}
2021-01-18 04:16:52 -05:00
static IRAM_ATTR void spi_slave_hd_intr_segment ( void * arg )
2020-04-29 04:20:40 -04:00
{
2021-01-18 04:16:52 -05:00
spi_slave_hd_slot_t * host = ( spi_slave_hd_slot_t * ) arg ;
2020-04-29 04:20:40 -04:00
spi_slave_hd_callback_config_t * callback = & host - > callback ;
2021-01-18 04:16:52 -05:00
spi_slave_hd_hal_context_t * hal = & host - > hal ;
BaseType_t awoken = pdFALSE ;
BaseType_t ret ;
2020-04-29 04:20:40 -04:00
awoken | = intr_check_clear_callback ( host , SPI_EV_BUF_TX , callback - > cb_buffer_tx ) ;
awoken | = intr_check_clear_callback ( host , SPI_EV_BUF_RX , callback - > cb_buffer_rx ) ;
awoken | = intr_check_clear_callback ( host , SPI_EV_CMD9 , callback - > cb_cmd9 ) ;
awoken | = intr_check_clear_callback ( host , SPI_EV_CMDA , callback - > cb_cmdA ) ;
bool tx_done = false ;
bool rx_done = false ;
portENTER_CRITICAL_ISR ( & host - > int_spinlock ) ;
2021-01-18 04:16:52 -05:00
if ( host - > tx_desc & & spi_slave_hd_hal_check_disable_event ( hal , SPI_EV_SEND ) ) {
2020-04-29 04:20:40 -04:00
tx_done = true ;
}
2021-01-18 04:16:52 -05:00
if ( host - > rx_desc & & spi_slave_hd_hal_check_disable_event ( hal , SPI_EV_RECV ) ) {
2020-04-29 04:20:40 -04:00
rx_done = true ;
}
portEXIT_CRITICAL_ISR ( & host - > int_spinlock ) ;
if ( tx_done ) {
bool ret_queue = true ;
if ( callback - > cb_sent ) {
spi_slave_hd_event_t ev = {
. event = SPI_EV_SEND ,
. trans = host - > tx_desc ,
} ;
BaseType_t cb_awoken = pdFALSE ;
ret_queue = callback - > cb_sent ( callback - > arg , & ev , & cb_awoken ) ;
awoken | = cb_awoken ;
}
if ( ret_queue ) {
ret = xQueueSendFromISR ( host - > tx_ret_queue , & host - > tx_desc , & awoken ) ;
// The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
assert ( ret = = pdTRUE ) ;
}
host - > tx_desc = NULL ;
}
if ( rx_done ) {
bool ret_queue = true ;
2021-01-18 04:16:52 -05:00
host - > rx_desc - > trans_len = spi_slave_hd_hal_rxdma_seg_get_len ( hal ) ;
2020-04-29 04:20:40 -04:00
if ( callback - > cb_recv ) {
spi_slave_hd_event_t ev = {
. event = SPI_EV_RECV ,
. trans = host - > rx_desc ,
} ;
BaseType_t cb_awoken = pdFALSE ;
ret_queue = callback - > cb_recv ( callback - > arg , & ev , & cb_awoken ) ;
awoken | = cb_awoken ;
}
if ( ret_queue ) {
ret = xQueueSendFromISR ( host - > rx_ret_queue , & host - > rx_desc , & awoken ) ;
// The return queue is full. All the data remian in send_queue + ret_queue should not be more than the queue length.
assert ( ret = = pdTRUE ) ;
}
host - > rx_desc = NULL ;
}
bool tx_sent = false ;
bool rx_sent = false ;
if ( ! host - > tx_desc ) {
ret = xQueueReceiveFromISR ( host - > tx_trans_queue , & host - > tx_desc , & awoken ) ;
if ( ret = = pdTRUE ) {
2021-01-18 04:16:52 -05:00
spi_slave_hd_hal_txdma ( hal , host - > tx_desc - > data , host - > tx_desc - > len ) ;
2020-04-29 04:20:40 -04:00
tx_sent = true ;
2021-04-16 09:31:55 -04:00
if ( callback - > cb_send_dma_ready ) {
spi_slave_hd_event_t ev = {
. event = SPI_EV_SEND_DMA_READY ,
. trans = host - > tx_desc ,
} ;
BaseType_t cb_awoken = pdFALSE ;
callback - > cb_send_dma_ready ( callback - > arg , & ev , & cb_awoken ) ;
awoken | = cb_awoken ;
}
2020-04-29 04:20:40 -04:00
}
}
if ( ! host - > rx_desc ) {
ret = xQueueReceiveFromISR ( host - > rx_trans_queue , & host - > rx_desc , & awoken ) ;
if ( ret = = pdTRUE ) {
2021-01-18 04:16:52 -05:00
spi_slave_hd_hal_rxdma ( hal , host - > rx_desc - > data , host - > rx_desc - > len ) ;
2020-04-29 04:20:40 -04:00
rx_sent = true ;
2021-04-16 09:31:55 -04:00
if ( callback - > cb_recv_dma_ready ) {
spi_slave_hd_event_t ev = {
. event = SPI_EV_RECV_DMA_READY ,
. trans = host - > rx_desc ,
} ;
BaseType_t cb_awoken = pdFALSE ;
callback - > cb_recv_dma_ready ( callback - > arg , & ev , & cb_awoken ) ;
awoken | = cb_awoken ;
}
2020-04-29 04:20:40 -04:00
}
}
portENTER_CRITICAL_ISR ( & host - > int_spinlock ) ;
2021-01-18 04:16:52 -05:00
if ( tx_sent ) {
spi_slave_hd_hal_enable_event_intr ( hal , SPI_EV_SEND ) ;
}
2020-04-29 04:20:40 -04:00
if ( rx_sent ) {
2021-01-18 04:16:52 -05:00
spi_slave_hd_hal_enable_event_intr ( hal , SPI_EV_RECV ) ;
2020-04-29 04:20:40 -04:00
}
2021-01-18 04:16:52 -05:00
portEXIT_CRITICAL_ISR ( & host - > int_spinlock ) ;
if ( awoken = = pdTRUE ) portYIELD_FROM_ISR ( ) ;
}
# if CONFIG_IDF_TARGET_ESP32S2
//Append mode is only supported on ESP32S2 now
static IRAM_ATTR void spi_slave_hd_intr_append ( void * arg )
{
spi_slave_hd_slot_t * host = ( spi_slave_hd_slot_t * ) arg ;
spi_slave_hd_callback_config_t * callback = & host - > callback ;
spi_slave_hd_hal_context_t * hal = & host - > hal ;
BaseType_t awoken = pdFALSE ;
BaseType_t ret ;
bool tx_done = false ;
bool rx_done = false ;
portENTER_CRITICAL_ISR ( & host - > int_spinlock ) ;
if ( spi_slave_hd_hal_check_clear_event ( hal , SPI_EV_SEND ) ) {
tx_done = true ;
}
if ( spi_slave_hd_hal_check_clear_event ( hal , SPI_EV_RECV ) ) {
rx_done = true ;
2020-04-29 04:20:40 -04:00
}
portEXIT_CRITICAL_ISR ( & host - > int_spinlock ) ;
2021-01-18 04:16:52 -05:00
if ( tx_done ) {
spi_slave_hd_data_t * trans_desc ;
while ( 1 ) {
bool trans_finish = false ;
trans_finish = spi_slave_hd_hal_get_tx_finished_trans ( hal , ( void * * ) & trans_desc ) ;
if ( ! trans_finish ) {
break ;
}
bool ret_queue = true ;
if ( callback - > cb_sent ) {
spi_slave_hd_event_t ev = {
. event = SPI_EV_SEND ,
. trans = trans_desc ,
} ;
BaseType_t cb_awoken = pdFALSE ;
ret_queue = callback - > cb_sent ( callback - > arg , & ev , & cb_awoken ) ;
awoken | = cb_awoken ;
}
if ( ret_queue ) {
ret = xQueueSendFromISR ( host - > tx_ret_queue , & trans_desc , & awoken ) ;
assert ( ret = = pdTRUE ) ;
ret = xSemaphoreGiveFromISR ( host - > tx_cnting_sem , & awoken ) ;
assert ( ret = = pdTRUE ) ;
}
}
}
if ( rx_done ) {
spi_slave_hd_data_t * trans_desc ;
size_t trans_len ;
while ( 1 ) {
bool trans_finish = false ;
trans_finish = spi_slave_hd_hal_get_rx_finished_trans ( hal , ( void * * ) & trans_desc , & trans_len ) ;
if ( ! trans_finish ) {
break ;
}
trans_desc - > trans_len = trans_len ;
bool ret_queue = true ;
if ( callback - > cb_recv ) {
spi_slave_hd_event_t ev = {
. event = SPI_EV_RECV ,
. trans = trans_desc ,
} ;
BaseType_t cb_awoken = pdFALSE ;
ret_queue = callback - > cb_recv ( callback - > arg , & ev , & cb_awoken ) ;
awoken | = cb_awoken ;
}
if ( ret_queue ) {
ret = xQueueSendFromISR ( host - > rx_ret_queue , & trans_desc , & awoken ) ;
assert ( ret = = pdTRUE ) ;
ret = xSemaphoreGiveFromISR ( host - > rx_cnting_sem , & awoken ) ;
assert ( ret = = pdTRUE ) ;
}
}
}
2020-04-29 04:20:40 -04:00
if ( awoken = = pdTRUE ) portYIELD_FROM_ISR ( ) ;
}
2021-01-18 04:16:52 -05:00
# endif //#if CONFIG_IDF_TARGET_ESP32S2
2020-04-29 04:20:40 -04:00
2021-01-18 04:16:52 -05:00
static esp_err_t get_ret_queue_result ( spi_host_device_t host_id , spi_slave_chan_t chan , spi_slave_hd_data_t * * out_trans , TickType_t timeout )
{
spi_slave_hd_slot_t * host = spihost [ host_id ] ;
spi_slave_hd_data_t * trans ;
BaseType_t ret ;
if ( chan = = SPI_SLAVE_CHAN_TX ) {
ret = xQueueReceive ( host - > tx_ret_queue , & trans , timeout ) ;
} else {
ret = xQueueReceive ( host - > rx_ret_queue , & trans , timeout ) ;
}
if ( ret = = pdFALSE ) {
return ESP_ERR_TIMEOUT ;
}
* out_trans = trans ;
return ESP_OK ;
}
//---------------------------------------------------------Segment Mode Transaction APIs-----------------------------------------------------------//
2020-04-29 04:20:40 -04:00
esp_err_t spi_slave_hd_queue_trans ( spi_host_device_t host_id , spi_slave_chan_t chan , spi_slave_hd_data_t * trans , TickType_t timeout )
{
spi_slave_hd_slot_t * host = spihost [ host_id ] ;
2021-01-18 04:16:52 -05:00
SPIHD_CHECK ( host - > append_mode = = 0 , " This API should be used for SPI Slave HD Segment Mode " , ESP_ERR_INVALID_STATE ) ;
2020-04-29 04:20:40 -04:00
SPIHD_CHECK ( esp_ptr_dma_capable ( trans - > data ) , " The buffer should be DMA capable. " , ESP_ERR_INVALID_ARG ) ;
SPIHD_CHECK ( trans - > len < = host - > max_transfer_sz & & trans - > len > 0 , " Invalid buffer size " , ESP_ERR_INVALID_ARG ) ;
SPIHD_CHECK ( chan = = SPI_SLAVE_CHAN_TX | | chan = = SPI_SLAVE_CHAN_RX , " Invalid channel " , ESP_ERR_INVALID_ARG ) ;
if ( chan = = SPI_SLAVE_CHAN_TX ) {
BaseType_t ret = xQueueSend ( host - > tx_trans_queue , & trans , timeout ) ;
if ( ret = = pdFALSE ) {
return ESP_ERR_TIMEOUT ;
}
tx_invoke ( host ) ;
} else { //chan == SPI_SLAVE_CHAN_RX
BaseType_t ret = xQueueSend ( host - > rx_trans_queue , & trans , timeout ) ;
if ( ret = = pdFALSE ) {
return ESP_ERR_TIMEOUT ;
}
rx_invoke ( host ) ;
}
return ESP_OK ;
}
esp_err_t spi_slave_hd_get_trans_res ( spi_host_device_t host_id , spi_slave_chan_t chan , spi_slave_hd_data_t * * out_trans , TickType_t timeout )
{
2021-01-18 04:16:52 -05:00
esp_err_t ret ;
2020-04-29 04:20:40 -04:00
spi_slave_hd_slot_t * host = spihost [ host_id ] ;
2021-01-18 04:16:52 -05:00
SPIHD_CHECK ( host - > append_mode = = 0 , " This API should be used for SPI Slave HD Segment Mode " , ESP_ERR_INVALID_STATE ) ;
SPIHD_CHECK ( chan = = SPI_SLAVE_CHAN_TX | | chan = = SPI_SLAVE_CHAN_RX , " Invalid channel " , ESP_ERR_INVALID_ARG ) ;
ret = get_ret_queue_result ( host_id , chan , out_trans , timeout ) ;
return ret ;
2020-04-29 04:20:40 -04:00
}
void spi_slave_hd_read_buffer ( spi_host_device_t host_id , int addr , uint8_t * out_data , size_t len )
{
spi_slave_hd_hal_read_buffer ( & spihost [ host_id ] - > hal , addr , out_data , len ) ;
}
void spi_slave_hd_write_buffer ( spi_host_device_t host_id , int addr , uint8_t * data , size_t len )
{
spi_slave_hd_hal_write_buffer ( & spihost [ host_id ] - > hal , addr , data , len ) ;
2020-11-10 02:40:01 -05:00
}
2021-01-18 04:16:52 -05:00
# if CONFIG_IDF_TARGET_ESP32S2
//Append mode is only supported on ESP32S2 now
//---------------------------------------------------------Append Mode Transaction APIs-----------------------------------------------------------//
esp_err_t spi_slave_hd_append_trans ( spi_host_device_t host_id , spi_slave_chan_t chan , spi_slave_hd_data_t * trans , TickType_t timeout )
{
esp_err_t err ;
spi_slave_hd_slot_t * host = spihost [ host_id ] ;
spi_slave_hd_hal_context_t * hal = & host - > hal ;
SPIHD_CHECK ( trans - > len < = SPI_MAX_DMA_LEN , " Currently we only support transaction with data length within 4092 bytes " , ESP_ERR_INVALID_ARG ) ;
SPIHD_CHECK ( host - > append_mode = = 1 , " This API should be used for SPI Slave HD Append Mode " , ESP_ERR_INVALID_STATE ) ;
SPIHD_CHECK ( esp_ptr_dma_capable ( trans - > data ) , " The buffer should be DMA capable. " , ESP_ERR_INVALID_ARG ) ;
SPIHD_CHECK ( trans - > len < = host - > max_transfer_sz & & trans - > len > 0 , " Invalid buffer size " , ESP_ERR_INVALID_ARG ) ;
SPIHD_CHECK ( chan = = SPI_SLAVE_CHAN_TX | | chan = = SPI_SLAVE_CHAN_RX , " Invalid channel " , ESP_ERR_INVALID_ARG ) ;
if ( chan = = SPI_SLAVE_CHAN_TX ) {
BaseType_t ret = xSemaphoreTake ( host - > tx_cnting_sem , timeout ) ;
if ( ret = = pdFALSE ) {
return ESP_ERR_TIMEOUT ;
}
err = spi_slave_hd_hal_txdma_append ( hal , trans - > data , trans - > len , trans ) ;
} else {
BaseType_t ret = xSemaphoreTake ( host - > rx_cnting_sem , timeout ) ;
if ( ret = = pdFALSE ) {
return ESP_ERR_TIMEOUT ;
}
err = spi_slave_hd_hal_rxdma_append ( hal , trans - > data , trans - > len , trans ) ;
}
if ( err ! = ESP_OK ) {
ESP_LOGE ( TAG , " Wait until the DMA finishes its transaction " ) ;
}
return err ;
}
esp_err_t spi_slave_hd_get_append_trans_res ( spi_host_device_t host_id , spi_slave_chan_t chan , spi_slave_hd_data_t * * out_trans , TickType_t timeout )
{
esp_err_t ret ;
spi_slave_hd_slot_t * host = spihost [ host_id ] ;
SPIHD_CHECK ( host - > append_mode = = 1 , " This API should be used for SPI Slave HD Append Mode " , ESP_ERR_INVALID_STATE ) ;
SPIHD_CHECK ( chan = = SPI_SLAVE_CHAN_TX | | chan = = SPI_SLAVE_CHAN_RX , " Invalid channel " , ESP_ERR_INVALID_ARG ) ;
ret = get_ret_queue_result ( host_id , chan , out_trans , timeout ) ;
return ret ;
}
# endif //#if CONFIG_IDF_TARGET_ESP32S2