2021-05-23 20:09:38 -04:00
/*
2023-07-12 22:44:06 -04:00
* SPDX - FileCopyrightText : 2015 - 2023 Espressif Systems ( Shanghai ) CO LTD
2021-05-23 20:09:38 -04:00
*
* SPDX - License - Identifier : Apache - 2.0
*/
2017-03-31 03:05:25 -04:00
# include <string.h>
# include "esp_types.h"
# include "esp_attr.h"
2023-01-03 22:24:54 -05:00
# include "esp_check.h"
2019-03-26 04:30:43 -04:00
# include "esp_intr_alloc.h"
2017-03-31 03:05:25 -04:00
# include "esp_log.h"
# include "esp_err.h"
2017-09-24 03:05:35 -04:00
# include "esp_pm.h"
2023-09-01 05:51:54 -04:00
# include "esp_cache.h"
2020-09-09 22:37:58 -04:00
# include "esp_heap_caps.h"
# include "esp_rom_sys.h"
# include "soc/lldesc.h"
# include "soc/soc_caps.h"
# include "soc/spi_periph.h"
# include "soc/soc_memory_layout.h"
2017-03-31 03:05:25 -04:00
# include "freertos/FreeRTOS.h"
# include "freertos/semphr.h"
# include "freertos/task.h"
2020-09-09 22:37:58 -04:00
# include "sdkconfig.h"
2017-03-31 03:05:25 -04:00
# include "driver/gpio.h"
2020-09-09 22:37:58 -04:00
# include "driver/spi_slave.h"
2023-01-03 22:24:54 -05:00
# include "hal/gpio_hal.h"
2020-09-14 05:33:10 -04:00
# include "hal/spi_slave_hal.h"
2023-01-03 22:25:46 -05:00
# include "esp_private/spi_slave_internal.h"
2023-01-03 22:24:54 -05:00
# include "esp_private/spi_common_internal.h"
2023-09-01 05:51:54 -04:00
# include "esp_private/esp_cache_private.h"
2019-06-13 02:19:31 -04:00
2017-03-31 03:05:25 -04:00
static const char * SPI_TAG = " spi_slave " ;
2023-01-03 22:24:54 -05:00
# define SPI_CHECK(a, str, ret_val) ESP_RETURN_ON_FALSE(a, ret_val, SPI_TAG, str)
2017-03-31 03:05:25 -04:00
2018-08-20 06:30:40 -04:00
# ifdef CONFIG_SPI_SLAVE_ISR_IN_IRAM
# define SPI_SLAVE_ISR_ATTR IRAM_ATTR
# else
# define SPI_SLAVE_ISR_ATTR
# endif
# ifdef CONFIG_SPI_SLAVE_IN_IRAM
# define SPI_SLAVE_ATTR IRAM_ATTR
# else
# define SPI_SLAVE_ATTR
# endif
2023-09-01 05:51:54 -04:00
/// struct to hold private transaction data (like tx and rx buffer for DMA).
typedef struct {
spi_slave_transaction_t * trans ; //original trans
void * tx_buffer ; //actually tx buffer (re-malloced if needed)
void * rx_buffer ; //actually rx buffer (re-malloced if needed)
} spi_slave_trans_priv_t ;
2017-03-31 03:05:25 -04:00
typedef struct {
2018-09-06 02:47:45 -04:00
int id ;
2022-11-23 22:16:27 -05:00
spi_bus_config_t bus_config ;
2017-03-31 03:05:25 -04:00
spi_slave_interface_config_t cfg ;
intr_handle_t intr ;
2019-04-18 10:13:05 -04:00
spi_slave_hal_context_t hal ;
2023-09-01 05:51:54 -04:00
spi_slave_trans_priv_t cur_trans ;
2018-03-21 08:42:45 -04:00
uint32_t flags ;
2023-01-09 03:18:24 -05:00
uint32_t intr_flags ;
2017-03-31 03:05:25 -04:00
int max_transfer_sz ;
QueueHandle_t trans_queue ;
QueueHandle_t ret_queue ;
2021-01-27 08:56:16 -05:00
bool dma_enabled ;
2022-11-27 21:22:25 -05:00
bool cs_iomux ;
2023-01-03 22:24:54 -05:00
uint8_t cs_in_signal ;
2023-09-01 05:51:54 -04:00
uint16_t internal_mem_align_size ;
2021-01-27 08:56:16 -05:00
uint32_t tx_dma_chan ;
uint32_t rx_dma_chan ;
2017-09-24 03:05:35 -04:00
# ifdef CONFIG_PM_ENABLE
esp_pm_lock_handle_t pm_lock ;
# endif
2017-03-31 03:05:25 -04:00
} spi_slave_t ;
2019-06-13 02:12:54 -04:00
static spi_slave_t * spihost [ SOC_SPI_PERIPH_NUM ] ;
2017-03-31 03:05:25 -04:00
2022-01-19 21:25:43 -05:00
static void spi_intr ( void * arg ) ;
2017-03-31 03:05:25 -04:00
2023-01-03 22:24:54 -05:00
__attribute__ ( ( always_inline ) )
2019-10-24 07:00:26 -04:00
static inline bool is_valid_host ( spi_host_device_t host )
{
2021-02-03 02:14:17 -05:00
//SPI1 can be used as GPSPI only on ESP32
2019-10-24 07:00:26 -04:00
# if CONFIG_IDF_TARGET_ESP32
return host > = SPI1_HOST & & host < = SPI3_HOST ;
2021-02-03 02:14:17 -05:00
# elif (SOC_SPI_PERIPH_NUM == 2)
return host = = SPI2_HOST ;
# elif (SOC_SPI_PERIPH_NUM == 3)
2019-10-24 07:00:26 -04:00
return host > = SPI2_HOST & & host < = SPI3_HOST ;
# endif
}
2022-03-14 18:29:41 -04:00
static inline bool SPI_SLAVE_ISR_ATTR bus_is_iomux ( spi_slave_t * host )
2018-09-06 02:47:45 -04:00
{
2019-06-13 02:12:54 -04:00
return host - > flags & SPICOMMON_BUSFLAG_IOMUX_PINS ;
2018-09-06 02:47:45 -04:00
}
2022-03-14 18:29:41 -04:00
static void SPI_SLAVE_ISR_ATTR freeze_cs ( spi_slave_t * host )
2018-09-06 02:47:45 -04:00
{
2023-01-03 22:24:54 -05:00
esp_rom_gpio_connect_in_signal ( GPIO_MATRIX_CONST_ONE_INPUT , host - > cs_in_signal , false ) ;
2018-09-06 02:47:45 -04:00
}
// Use this function instead of cs_initial to avoid overwrite the output config
// This is used in test by internal gpio matrix connections
2022-03-14 18:29:41 -04:00
static inline void SPI_SLAVE_ISR_ATTR restore_cs ( spi_slave_t * host )
2018-09-06 02:47:45 -04:00
{
2022-11-27 21:22:25 -05:00
if ( host - > cs_iomux ) {
2023-01-03 22:24:54 -05:00
gpio_ll_iomux_in ( GPIO_HAL_GET_HW ( GPIO_PORT_0 ) , host - > cfg . spics_io_num , host - > cs_in_signal ) ;
2018-09-06 02:47:45 -04:00
} else {
2023-01-03 22:24:54 -05:00
esp_rom_gpio_connect_in_signal ( host - > cfg . spics_io_num , host - > cs_in_signal , false ) ;
2018-09-06 02:47:45 -04:00
}
}
2023-01-09 03:18:24 -05:00
# if (SOC_CPU_CORES_NUM > 1) && (!CONFIG_FREERTOS_UNICORE)
typedef struct {
spi_slave_t * host ;
esp_err_t * err ;
} spi_ipc_param_t ;
static void ipc_isr_reg_to_core ( void * args )
{
spi_slave_t * host = ( ( spi_ipc_param_t * ) args ) - > host ;
2023-02-20 02:31:40 -05:00
* ( ( spi_ipc_param_t * ) args ) - > err = esp_intr_alloc ( spicommon_irqsource_for_host ( host - > id ) , host - > intr_flags | ESP_INTR_FLAG_INTRDISABLED , spi_intr , ( void * ) host , & host - > intr ) ;
2023-01-09 03:18:24 -05:00
}
# endif
2021-02-19 22:03:28 -05:00
esp_err_t spi_slave_initialize ( spi_host_device_t host , const spi_bus_config_t * bus_config , const spi_slave_interface_config_t * slave_config , spi_dma_chan_t dma_chan )
2017-03-31 03:05:25 -04:00
{
2021-01-26 07:18:52 -05:00
bool spi_chan_claimed ;
2021-01-27 08:56:16 -05:00
uint32_t actual_tx_dma_chan = 0 ;
uint32_t actual_rx_dma_chan = 0 ;
2018-03-21 08:42:45 -04:00
esp_err_t ret = ESP_OK ;
esp_err_t err ;
2019-10-24 07:00:26 -04:00
SPI_CHECK ( is_valid_host ( host ) , " invalid host " , ESP_ERR_INVALID_ARG ) ;
2021-01-26 07:18:52 -05:00
# ifdef CONFIG_IDF_TARGET_ESP32
2021-02-19 22:03:28 -05:00
SPI_CHECK ( dma_chan > = SPI_DMA_DISABLED & & dma_chan < = SPI_DMA_CH_AUTO , " invalid dma channel " , ESP_ERR_INVALID_ARG ) ;
2021-01-26 07:18:52 -05:00
# elif CONFIG_IDF_TARGET_ESP32S2
2021-02-19 22:03:28 -05:00
SPI_CHECK ( dma_chan = = SPI_DMA_DISABLED | | dma_chan = = ( int ) host | | dma_chan = = SPI_DMA_CH_AUTO , " invalid dma channel " , ESP_ERR_INVALID_ARG ) ;
2021-01-26 07:18:52 -05:00
# elif SOC_GDMA_SUPPORTED
2021-02-19 22:03:28 -05:00
SPI_CHECK ( dma_chan = = SPI_DMA_DISABLED | | dma_chan = = SPI_DMA_CH_AUTO , " invalid dma channel, chip only support spi dma channel auto-alloc " , ESP_ERR_INVALID_ARG ) ;
2019-06-13 02:19:31 -04:00
# endif
2018-10-23 04:57:32 -04:00
SPI_CHECK ( ( bus_config - > intr_flags & ( ESP_INTR_FLAG_HIGH | ESP_INTR_FLAG_EDGE | ESP_INTR_FLAG_INTRDISABLED ) ) = = 0 , " intr flag not allowed " , ESP_ERR_INVALID_ARG ) ;
# ifndef CONFIG_SPI_SLAVE_ISR_IN_IRAM
SPI_CHECK ( ( bus_config - > intr_flags & ESP_INTR_FLAG_IRAM ) = = 0 , " ESP_INTR_FLAG_IRAM should be disabled when CONFIG_SPI_SLAVE_ISR_IN_IRAM is not set. " , ESP_ERR_INVALID_ARG ) ;
# endif
2020-09-10 07:11:51 -04:00
SPI_CHECK ( slave_config - > spics_io_num < 0 | | GPIO_IS_VALID_GPIO ( slave_config - > spics_io_num ) , " spics pin invalid " , ESP_ERR_INVALID_ARG ) ;
2017-09-22 05:47:06 -04:00
2022-09-15 03:56:25 -04:00
//Check post_trans_cb status when `SPI_SLAVE_NO_RETURN_RESULT` flag is set.
if ( slave_config - > flags & SPI_SLAVE_NO_RETURN_RESULT ) {
SPI_CHECK ( slave_config - > post_trans_cb ! = NULL , " use feature flag 'SPI_SLAVE_NO_RETURN_RESULT' but no post_trans_cb function sets " , ESP_ERR_INVALID_ARG ) ;
}
2018-10-05 03:39:32 -04:00
spi_chan_claimed = spicommon_periph_claim ( host , " spi slave " ) ;
2017-09-22 05:47:06 -04:00
SPI_CHECK ( spi_chan_claimed , " host already in use " , ESP_ERR_INVALID_STATE ) ;
2018-08-20 06:30:40 -04:00
2017-04-26 23:24:44 -04:00
spihost [ host ] = malloc ( sizeof ( spi_slave_t ) ) ;
2018-03-21 08:42:45 -04:00
if ( spihost [ host ] = = NULL ) {
ret = ESP_ERR_NO_MEM ;
goto cleanup ;
}
2017-03-31 03:05:25 -04:00
memset ( spihost [ host ] , 0 , sizeof ( spi_slave_t ) ) ;
memcpy ( & spihost [ host ] - > cfg , slave_config , sizeof ( spi_slave_interface_config_t ) ) ;
2022-11-23 22:16:27 -05:00
memcpy ( & spihost [ host ] - > bus_config , bus_config , sizeof ( spi_bus_config_t ) ) ;
2018-09-06 02:47:45 -04:00
spihost [ host ] - > id = host ;
2023-09-01 05:51:54 -04:00
spi_slave_hal_context_t * hal = & spihost [ host ] - > hal ;
2017-03-31 03:05:25 -04:00
2023-09-01 05:51:54 -04:00
spihost [ host ] - > dma_enabled = ( dma_chan ! = SPI_DMA_DISABLED ) ;
if ( spihost [ host ] - > dma_enabled ) {
2021-12-15 01:15:32 -05:00
ret = spicommon_dma_chan_alloc ( host , dma_chan , & actual_tx_dma_chan , & actual_rx_dma_chan ) ;
2021-01-27 08:56:16 -05:00
if ( ret ! = ESP_OK ) {
goto cleanup ;
}
2023-09-01 05:51:54 -04:00
spihost [ host ] - > tx_dma_chan = actual_tx_dma_chan ;
spihost [ host ] - > rx_dma_chan = actual_rx_dma_chan ;
//See how many dma descriptors we need and allocate them
int dma_desc_ct = ( bus_config - > max_transfer_sz + SPI_MAX_DMA_LEN - 1 ) / SPI_MAX_DMA_LEN ;
if ( dma_desc_ct = = 0 ) dma_desc_ct = 1 ; //default to 4k when max is not given
spihost [ host ] - > max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN ;
# if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_cache_get_alignment ( ESP_CACHE_MALLOC_FLAG_DMA , ( size_t * ) & spihost [ host ] - > internal_mem_align_size ) ;
# else
spihost [ host ] - > internal_mem_align_size = 4 ;
# endif
hal - > dmadesc_tx = heap_caps_aligned_alloc ( DMA_DESC_MEM_ALIGN_SIZE , sizeof ( spi_dma_desc_t ) * dma_desc_ct , MALLOC_CAP_DMA ) ;
hal - > dmadesc_rx = heap_caps_aligned_alloc ( DMA_DESC_MEM_ALIGN_SIZE , sizeof ( spi_dma_desc_t ) * dma_desc_ct , MALLOC_CAP_DMA ) ;
if ( ! hal - > dmadesc_tx | | ! hal - > dmadesc_rx ) {
ret = ESP_ERR_NO_MEM ;
goto cleanup ;
}
hal - > dmadesc_n = dma_desc_ct ;
} else {
//We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most.
spihost [ host ] - > max_transfer_sz = SOC_SPI_MAXIMUM_BUFFER_SIZE ;
2021-01-27 08:56:16 -05:00
}
err = spicommon_bus_initialize_io ( host , bus_config , SPICOMMON_BUSFLAG_SLAVE | bus_config - > flags , & spihost [ host ] - > flags ) ;
2018-03-21 08:42:45 -04:00
if ( err ! = ESP_OK ) {
ret = err ;
goto cleanup ;
}
2020-09-10 07:11:51 -04:00
if ( slave_config - > spics_io_num > = 0 ) {
spicommon_cs_initialize ( host , slave_config - > spics_io_num , 0 , ! bus_is_iomux ( spihost [ host ] ) ) ;
2022-11-27 21:22:25 -05:00
// check and save where cs line really route through
spihost [ host ] - > cs_iomux = ( slave_config - > spics_io_num = = spi_periph_signal [ host ] . spics0_iomux_pin ) & & bus_is_iomux ( spihost [ host ] ) ;
2023-01-03 22:24:54 -05:00
spihost [ host ] - > cs_in_signal = spi_periph_signal [ host ] . spics_in ;
2020-09-10 07:11:51 -04:00
}
2018-09-06 02:47:45 -04:00
// The slave DMA suffers from unexpected transactions. Forbid reading if DMA is enabled by disabling the CS line.
2023-09-01 05:51:54 -04:00
if ( spihost [ host ] - > dma_enabled ) freeze_cs ( spihost [ host ] ) ;
2018-09-06 02:47:45 -04:00
2017-09-24 03:05:35 -04:00
# ifdef CONFIG_PM_ENABLE
2018-03-21 08:42:45 -04:00
err = esp_pm_lock_create ( ESP_PM_APB_FREQ_MAX , 0 , " spi_slave " ,
2017-09-24 03:05:35 -04:00
& spihost [ host ] - > pm_lock ) ;
if ( err ! = ESP_OK ) {
2018-03-21 08:42:45 -04:00
ret = err ;
goto cleanup ;
2017-09-24 03:05:35 -04:00
}
// Lock APB frequency while SPI slave driver is in use
esp_pm_lock_acquire ( spihost [ host ] - > pm_lock ) ;
# endif //CONFIG_PM_ENABLE
2017-03-31 03:05:25 -04:00
//Create queues
2023-09-01 05:51:54 -04:00
spihost [ host ] - > trans_queue = xQueueCreate ( slave_config - > queue_size , sizeof ( spi_slave_trans_priv_t ) ) ;
2022-09-15 03:56:25 -04:00
if ( ! spihost [ host ] - > trans_queue ) {
2018-03-21 08:42:45 -04:00
ret = ESP_ERR_NO_MEM ;
goto cleanup ;
}
2022-09-15 03:56:25 -04:00
if ( ! ( slave_config - > flags & SPI_SLAVE_NO_RETURN_RESULT ) ) {
2023-09-01 05:51:54 -04:00
spihost [ host ] - > ret_queue = xQueueCreate ( slave_config - > queue_size , sizeof ( spi_slave_trans_priv_t ) ) ;
2022-09-15 03:56:25 -04:00
if ( ! spihost [ host ] - > ret_queue ) {
ret = ESP_ERR_NO_MEM ;
goto cleanup ;
}
}
2017-03-31 03:05:25 -04:00
2023-01-09 03:18:24 -05:00
# if (SOC_CPU_CORES_NUM > 1) && (!CONFIG_FREERTOS_UNICORE)
2023-07-12 22:44:06 -04:00
if ( bus_config - > isr_cpu_id > ESP_INTR_CPU_AFFINITY_AUTO ) {
2023-01-09 03:18:24 -05:00
spihost [ host ] - > intr_flags = bus_config - > intr_flags ;
2023-07-12 22:44:06 -04:00
SPI_CHECK ( bus_config - > isr_cpu_id < = ESP_INTR_CPU_AFFINITY_1 , " invalid core id " , ESP_ERR_INVALID_ARG ) ;
2023-01-09 03:18:24 -05:00
spi_ipc_param_t ipc_args = {
. host = spihost [ host ] ,
. err = & err ,
} ;
2023-07-12 22:44:06 -04:00
esp_ipc_call_blocking ( ESP_INTR_CPU_AFFINITY_TO_CORE_ID ( bus_config - > isr_cpu_id ) , ipc_isr_reg_to_core , ( void * ) & ipc_args ) ;
2023-01-09 03:18:24 -05:00
} else
# endif
{
2023-02-20 02:31:40 -05:00
err = esp_intr_alloc ( spicommon_irqsource_for_host ( host ) , bus_config - > intr_flags | ESP_INTR_FLAG_INTRDISABLED , spi_intr , ( void * ) spihost [ host ] , & spihost [ host ] - > intr ) ;
2023-01-09 03:18:24 -05:00
}
2018-03-21 08:42:45 -04:00
if ( err ! = ESP_OK ) {
ret = err ;
goto cleanup ;
}
2018-06-25 00:34:31 -04:00
2020-09-14 05:33:10 -04:00
//assign the SPI, RX DMA and TX DMA peripheral registers beginning address
spi_slave_hal_config_t hal_config = {
. host_id = host ,
. dma_in = SPI_LL_GET_HW ( host ) ,
. dma_out = SPI_LL_GET_HW ( host )
} ;
spi_slave_hal_init ( hal , & hal_config ) ;
2019-04-18 10:13:05 -04:00
hal - > rx_lsbfirst = ( slave_config - > flags & SPI_SLAVE_RXBIT_LSBFIRST ) ? 1 : 0 ;
hal - > tx_lsbfirst = ( slave_config - > flags & SPI_SLAVE_TXBIT_LSBFIRST ) ? 1 : 0 ;
hal - > mode = slave_config - > mode ;
2023-09-01 05:51:54 -04:00
hal - > use_dma = spihost [ host ] - > dma_enabled ;
2021-01-27 08:56:16 -05:00
hal - > tx_dma_chan = actual_tx_dma_chan ;
hal - > rx_dma_chan = actual_rx_dma_chan ;
2017-03-31 03:05:25 -04:00
2019-04-18 10:13:05 -04:00
spi_slave_hal_setup_device ( hal ) ;
2017-03-31 03:05:25 -04:00
return ESP_OK ;
2018-03-21 08:42:45 -04:00
cleanup :
2017-03-31 03:05:25 -04:00
if ( spihost [ host ] ) {
if ( spihost [ host ] - > trans_queue ) vQueueDelete ( spihost [ host ] - > trans_queue ) ;
if ( spihost [ host ] - > ret_queue ) vQueueDelete ( spihost [ host ] - > ret_queue ) ;
2017-09-24 03:05:35 -04:00
# ifdef CONFIG_PM_ENABLE
if ( spihost [ host ] - > pm_lock ) {
esp_pm_lock_release ( spihost [ host ] - > pm_lock ) ;
esp_pm_lock_delete ( spihost [ host ] - > pm_lock ) ;
}
# endif
2017-03-31 03:05:25 -04:00
}
2019-04-18 10:13:05 -04:00
spi_slave_hal_deinit ( & spihost [ host ] - > hal ) ;
2021-01-27 08:56:16 -05:00
if ( spihost [ host ] - > dma_enabled ) {
2021-12-15 01:15:32 -05:00
spicommon_dma_chan_free ( host ) ;
2023-09-01 05:51:54 -04:00
free ( spihost [ host ] - > hal . dmadesc_tx ) ;
free ( spihost [ host ] - > hal . dmadesc_rx ) ;
2021-01-27 08:56:16 -05:00
}
2017-03-31 03:05:25 -04:00
free ( spihost [ host ] ) ;
2017-04-26 23:24:44 -04:00
spihost [ host ] = NULL ;
2017-03-31 03:05:25 -04:00
spicommon_periph_free ( host ) ;
2021-01-27 08:56:16 -05:00
2018-03-21 08:42:45 -04:00
return ret ;
2017-03-31 03:05:25 -04:00
}
esp_err_t spi_slave_free ( spi_host_device_t host )
{
2019-10-24 07:00:26 -04:00
SPI_CHECK ( is_valid_host ( host ) , " invalid host " , ESP_ERR_INVALID_ARG ) ;
2017-03-31 03:05:25 -04:00
SPI_CHECK ( spihost [ host ] , " host not slave " , ESP_ERR_INVALID_ARG ) ;
if ( spihost [ host ] - > trans_queue ) vQueueDelete ( spihost [ host ] - > trans_queue ) ;
if ( spihost [ host ] - > ret_queue ) vQueueDelete ( spihost [ host ] - > ret_queue ) ;
2021-01-27 08:56:16 -05:00
if ( spihost [ host ] - > dma_enabled ) {
2021-12-15 01:15:32 -05:00
spicommon_dma_chan_free ( host ) ;
2023-09-01 05:51:54 -04:00
free ( spihost [ host ] - > hal . dmadesc_tx ) ;
free ( spihost [ host ] - > hal . dmadesc_rx ) ;
2017-09-22 05:47:06 -04:00
}
2022-11-23 22:16:27 -05:00
spicommon_bus_free_io_cfg ( & spihost [ host ] - > bus_config ) ;
2017-09-30 06:26:55 -04:00
esp_intr_free ( spihost [ host ] - > intr ) ;
2017-09-24 03:05:35 -04:00
# ifdef CONFIG_PM_ENABLE
esp_pm_lock_release ( spihost [ host ] - > pm_lock ) ;
esp_pm_lock_delete ( spihost [ host ] - > pm_lock ) ;
# endif //CONFIG_PM_ENABLE
2017-03-31 03:05:25 -04:00
free ( spihost [ host ] ) ;
2017-04-26 23:24:44 -04:00
spihost [ host ] = NULL ;
2017-03-31 03:05:25 -04:00
spicommon_periph_free ( host ) ;
return ESP_OK ;
}
2023-09-01 05:51:54 -04:00
static void SPI_SLAVE_ISR_ATTR spi_slave_uninstall_priv_trans ( spi_host_device_t host , spi_slave_trans_priv_t * priv_trans )
{
# if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
spi_slave_transaction_t * trans = ( spi_slave_transaction_t * ) priv_trans - > trans ;
if ( spihost [ host ] - > dma_enabled ) {
if ( trans - > tx_buffer & & ( trans - > tx_buffer ! = priv_trans - > tx_buffer ) ) {
free ( priv_trans - > tx_buffer ) ;
}
if ( trans - > rx_buffer & & ( trans - > rx_buffer ! = priv_trans - > rx_buffer ) ) {
memcpy ( trans - > rx_buffer , priv_trans - > rx_buffer , ( trans - > length + 7 ) / 8 ) ;
free ( priv_trans - > rx_buffer ) ;
}
}
# endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
}
static esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_setup_priv_trans ( spi_host_device_t host , spi_slave_trans_priv_t * priv_trans )
{
spi_slave_transaction_t * trans = ( spi_slave_transaction_t * ) priv_trans - > trans ;
priv_trans - > tx_buffer = ( void * ) trans - > tx_buffer ;
priv_trans - > rx_buffer = trans - > rx_buffer ;
# if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
uint16_t alignment = spihost [ host ] - > internal_mem_align_size ;
uint32_t buffer_byte_len = ( trans - > length + 7 ) / 8 ;
if ( spihost [ host ] - > dma_enabled & & trans - > tx_buffer ) {
if ( ( ! esp_ptr_dma_capable ( trans - > tx_buffer ) | | ( ( ( ( uint32_t ) trans - > tx_buffer ) | buffer_byte_len ) & ( alignment - 1 ) ) ) ) {
ESP_RETURN_ON_FALSE_ISR ( trans - > flags & SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO , ESP_ERR_INVALID_ARG , SPI_TAG , " TX buffer addr&len not align to %d, or not dma_capable " , alignment ) ;
//if txbuf in the desc not DMA-capable, or not align to "alignment", malloc a new one
ESP_EARLY_LOGD ( SPI_TAG , " Allocate TX buffer for DMA " ) ;
buffer_byte_len = ( buffer_byte_len + alignment - 1 ) & ( ~ ( alignment - 1 ) ) ; // up align to "alignment"
uint32_t * temp = heap_caps_aligned_alloc ( alignment , buffer_byte_len , MALLOC_CAP_DMA ) ;
if ( temp = = NULL ) {
return ESP_ERR_NO_MEM ;
}
memcpy ( temp , trans - > tx_buffer , ( trans - > length + 7 ) / 8 ) ;
priv_trans - > tx_buffer = temp ;
}
esp_err_t ret = esp_cache_msync ( ( void * ) priv_trans - > tx_buffer , buffer_byte_len , ESP_CACHE_MSYNC_FLAG_DIR_C2M ) ;
ESP_RETURN_ON_FALSE_ISR ( ESP_OK = = ret , ESP_ERR_INVALID_STATE , SPI_TAG , " mem sync c2m(writeback) fail " ) ;
}
if ( spihost [ host ] - > dma_enabled & & trans - > rx_buffer & & ( ! esp_ptr_dma_capable ( trans - > rx_buffer ) | | ( ( ( ( uint32_t ) trans - > rx_buffer ) | ( trans - > length + 7 ) / 8 ) & ( alignment - 1 ) ) ) ) {
ESP_RETURN_ON_FALSE_ISR ( trans - > flags & SPI_SLAVE_TRANS_DMA_BUFFER_ALIGN_AUTO , ESP_ERR_INVALID_ARG , SPI_TAG , " RX buffer addr&len not align to %d, or not dma_capable " , alignment ) ;
//if rxbuf in the desc not DMA-capable, or not align to "alignment", malloc a new one
ESP_EARLY_LOGD ( SPI_TAG , " Allocate RX buffer for DMA " ) ;
buffer_byte_len = ( buffer_byte_len + alignment - 1 ) & ( ~ ( alignment - 1 ) ) ; // up align to "alignment"
priv_trans - > rx_buffer = heap_caps_aligned_alloc ( alignment , buffer_byte_len , MALLOC_CAP_DMA ) ;
if ( priv_trans - > rx_buffer = = NULL ) {
free ( priv_trans - > tx_buffer ) ;
return ESP_ERR_NO_MEM ;
}
}
# endif //SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
return ESP_OK ;
}
esp_err_t SPI_SLAVE_ATTR spi_slave_queue_trans ( spi_host_device_t host , const spi_slave_transaction_t * trans_desc , TickType_t ticks_to_wait )
{
BaseType_t r ;
SPI_CHECK ( is_valid_host ( host ) , " invalid host " , ESP_ERR_INVALID_ARG ) ;
SPI_CHECK ( spihost [ host ] , " host not slave " , ESP_ERR_INVALID_ARG ) ;
SPI_CHECK ( spihost [ host ] - > dma_enabled = = 0 | | trans_desc - > tx_buffer = = NULL | | esp_ptr_dma_capable ( trans_desc - > tx_buffer ) ,
" txdata not in DMA-capable memory " , ESP_ERR_INVALID_ARG ) ;
SPI_CHECK ( spihost [ host ] - > dma_enabled = = 0 | | trans_desc - > rx_buffer = = NULL | |
( esp_ptr_dma_capable ( trans_desc - > rx_buffer ) & & esp_ptr_word_aligned ( trans_desc - > rx_buffer ) & &
( trans_desc - > length % 4 = = 0 ) ) ,
" rxdata not in DMA-capable memory or not WORD aligned " , ESP_ERR_INVALID_ARG ) ;
SPI_CHECK ( trans_desc - > length < = spihost [ host ] - > max_transfer_sz * 8 , " data transfer > host maximum " , ESP_ERR_INVALID_ARG ) ;
spi_slave_trans_priv_t priv_trans = { . trans = ( spi_slave_transaction_t * ) trans_desc } ;
SPI_CHECK ( ESP_OK = = spi_slave_setup_priv_trans ( host , & priv_trans ) , " slave setup priv_trans failed " , ESP_ERR_NO_MEM ) ;
r = xQueueSend ( spihost [ host ] - > trans_queue , ( void * ) & priv_trans , ticks_to_wait ) ;
if ( ! r ) return ESP_ERR_TIMEOUT ;
esp_intr_enable ( spihost [ host ] - > intr ) ;
return ESP_OK ;
}
2022-07-21 09:23:31 -04:00
/**
* @ note
* This API is used to reset SPI Slave transaction queue . After calling this function :
* - The SPI Slave transaction queue will be reset .
*
* Therefore , this API shouldn ' t be called when the corresponding SPI Master is doing an SPI transaction .
*
* @ note
* We don ' t actually need to enter a critical section here .
* SPI Slave ISR will only get triggered when its corresponding SPI Master ' s transaction is done .
* As we don ' t expect this function to be called when its corresponding SPI Master is doing an SPI transaction ,
* so concurrent call to these registers won ' t happen
*
*/
esp_err_t SPI_SLAVE_ATTR spi_slave_queue_reset ( spi_host_device_t host )
{
SPI_CHECK ( is_valid_host ( host ) , " invalid host " , ESP_ERR_INVALID_ARG ) ;
SPI_CHECK ( spihost [ host ] , " host not slave " , ESP_ERR_INVALID_ARG ) ;
esp_intr_disable ( spihost [ host ] - > intr ) ;
spi_ll_set_int_stat ( spihost [ host ] - > hal . hw ) ;
2023-09-01 05:51:54 -04:00
spi_slave_trans_priv_t trans ;
while ( uxQueueMessagesWaiting ( spihost [ host ] - > trans_queue ) ) {
xQueueReceive ( spihost [ host ] - > trans_queue , & trans , 0 ) ;
spi_slave_uninstall_priv_trans ( host , & trans ) ;
}
spihost [ host ] - > cur_trans . trans = NULL ;
2022-07-21 09:23:31 -04:00
return ESP_OK ;
}
2017-03-31 03:05:25 -04:00
2023-09-01 05:51:54 -04:00
esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_queue_trans_isr ( spi_host_device_t host , const spi_slave_transaction_t * trans_desc )
2023-01-03 22:26:24 -05:00
{
2023-09-01 05:51:54 -04:00
BaseType_t r ;
BaseType_t do_yield = pdFALSE ;
2023-01-03 22:26:24 -05:00
ESP_RETURN_ON_FALSE_ISR ( is_valid_host ( host ) , ESP_ERR_INVALID_ARG , SPI_TAG , " invalid host " ) ;
ESP_RETURN_ON_FALSE_ISR ( spihost [ host ] , ESP_ERR_INVALID_ARG , SPI_TAG , " host not slave " ) ;
2023-09-01 05:51:54 -04:00
ESP_RETURN_ON_FALSE_ISR ( trans_desc - > length < = spihost [ host ] - > max_transfer_sz * 8 , ESP_ERR_INVALID_ARG , SPI_TAG , " data transfer > host maximum " ) ;
if ( spihost [ host ] - > dma_enabled ) {
uint16_t alignment = spihost [ host ] - > internal_mem_align_size ;
uint32_t buffer_byte_len = ( trans_desc - > length + 7 ) / 8 ;
ESP_RETURN_ON_FALSE_ISR ( \
( trans_desc - > tx_buffer & & \
esp_ptr_dma_capable ( trans_desc - > tx_buffer ) & & \
( ( ( ( uint32_t ) trans_desc - > tx_buffer ) | buffer_byte_len ) & ( alignment - 1 ) ) = = 0 ) , \
ESP_ERR_INVALID_ARG , SPI_TAG , " txdata addr & len not align to %d bytes or not dma_capable " , alignment \
) ;
ESP_RETURN_ON_FALSE_ISR ( \
( trans_desc - > rx_buffer & & \
esp_ptr_dma_capable ( trans_desc - > rx_buffer ) & & \
( ( ( ( uint32_t ) trans_desc - > rx_buffer ) | buffer_byte_len ) & ( alignment - 1 ) ) = = 0 ) , \
ESP_ERR_INVALID_ARG , SPI_TAG , " rxdata addr & len not align to %d bytes or not dma_capable " , alignment \
) ;
}
2023-01-03 22:26:24 -05:00
2023-09-01 05:51:54 -04:00
spi_slave_trans_priv_t priv_trans = {
. trans = ( spi_slave_transaction_t * ) trans_desc ,
. tx_buffer = ( void * ) trans_desc - > tx_buffer ,
. rx_buffer = trans_desc - > rx_buffer ,
} ;
r = xQueueSendFromISR ( spihost [ host ] - > trans_queue , ( void * ) & priv_trans , & do_yield ) ;
if ( ! r ) {
return ESP_ERR_NO_MEM ;
2023-01-03 22:26:24 -05:00
}
if ( do_yield ) {
portYIELD_FROM_ISR ( ) ;
}
return ESP_OK ;
}
2023-09-01 05:51:54 -04:00
esp_err_t SPI_SLAVE_ISR_ATTR spi_slave_queue_reset_isr ( spi_host_device_t host )
2023-01-03 22:25:46 -05:00
{
ESP_RETURN_ON_FALSE_ISR ( is_valid_host ( host ) , ESP_ERR_INVALID_ARG , SPI_TAG , " invalid host " ) ;
ESP_RETURN_ON_FALSE_ISR ( spihost [ host ] , ESP_ERR_INVALID_ARG , SPI_TAG , " host not slave " ) ;
2023-09-01 05:51:54 -04:00
spi_slave_trans_priv_t trans ;
BaseType_t do_yield = pdFALSE ;
while ( pdFALSE = = xQueueIsQueueEmptyFromISR ( spihost [ host ] - > trans_queue ) ) {
xQueueReceiveFromISR ( spihost [ host ] - > trans_queue , & trans , & do_yield ) ;
spi_slave_uninstall_priv_trans ( host , & trans ) ;
2023-01-03 22:25:46 -05:00
}
if ( do_yield ) {
portYIELD_FROM_ISR ( ) ;
}
2023-09-01 05:51:54 -04:00
spihost [ host ] - > cur_trans . trans = NULL ;
2023-01-03 22:25:46 -05:00
return ESP_OK ;
}
2017-03-31 03:05:25 -04:00
2018-08-20 06:30:40 -04:00
esp_err_t SPI_SLAVE_ATTR spi_slave_get_trans_result ( spi_host_device_t host , spi_slave_transaction_t * * trans_desc , TickType_t ticks_to_wait )
2017-03-31 03:05:25 -04:00
{
BaseType_t r ;
2019-10-24 07:00:26 -04:00
SPI_CHECK ( is_valid_host ( host ) , " invalid host " , ESP_ERR_INVALID_ARG ) ;
2017-03-31 03:05:25 -04:00
SPI_CHECK ( spihost [ host ] , " host not slave " , ESP_ERR_INVALID_ARG ) ;
2022-09-15 03:56:25 -04:00
//if SPI_SLAVE_NO_RETURN_RESULT is set, ret_queue will always be empty
SPI_CHECK ( ! ( spihost [ host ] - > cfg . flags & SPI_SLAVE_NO_RETURN_RESULT ) , " API not Supported! " , ESP_ERR_NOT_SUPPORTED ) ;
2023-09-01 05:51:54 -04:00
spi_slave_trans_priv_t priv_trans ;
r = xQueueReceive ( spihost [ host ] - > ret_queue , ( void * ) & priv_trans , ticks_to_wait ) ;
2017-03-31 03:05:25 -04:00
if ( ! r ) return ESP_ERR_TIMEOUT ;
2023-09-01 05:51:54 -04:00
spi_slave_uninstall_priv_trans ( host , & priv_trans ) ;
* trans_desc = priv_trans . trans ;
2017-03-31 03:05:25 -04:00
return ESP_OK ;
}
2018-08-20 06:30:40 -04:00
esp_err_t SPI_SLAVE_ATTR spi_slave_transmit ( spi_host_device_t host , spi_slave_transaction_t * trans_desc , TickType_t ticks_to_wait )
2017-03-31 03:05:25 -04:00
{
esp_err_t ret ;
spi_slave_transaction_t * ret_trans ;
//ToDo: check if any spi transfers in flight
2017-04-26 23:24:44 -04:00
ret = spi_slave_queue_trans ( host , trans_desc , ticks_to_wait ) ;
if ( ret ! = ESP_OK ) return ret ;
ret = spi_slave_get_trans_result ( host , & ret_trans , ticks_to_wait ) ;
if ( ret ! = ESP_OK ) return ret ;
assert ( ret_trans = = trans_desc ) ;
2017-03-31 03:05:25 -04:00
return ESP_OK ;
}
2022-09-15 06:26:12 -04:00
# if CONFIG_IDF_TARGET_ESP32
2018-08-20 06:30:40 -04:00
static void SPI_SLAVE_ISR_ATTR spi_slave_restart_after_dmareset ( void * arg )
2017-03-31 03:05:25 -04:00
{
2017-04-26 23:24:44 -04:00
spi_slave_t * host = ( spi_slave_t * ) arg ;
2017-03-31 03:05:25 -04:00
esp_intr_enable ( host - > intr ) ;
}
2022-09-15 06:26:12 -04:00
# endif //#if CONFIG_IDF_TARGET_ESP32
2017-03-31 03:05:25 -04:00
//This is run in interrupt context and apart from initialization and destruction, this is the only code
//touching the host (=spihost[x]) variable. The rest of the data arrives in queues. That is why there are
//no muxes in this code.
2018-08-20 06:30:40 -04:00
static void SPI_SLAVE_ISR_ATTR spi_intr ( void * arg )
2017-03-31 03:05:25 -04:00
{
BaseType_t r ;
2017-04-26 23:24:44 -04:00
BaseType_t do_yield = pdFALSE ;
spi_slave_t * host = ( spi_slave_t * ) arg ;
2019-04-18 10:13:05 -04:00
spi_slave_hal_context_t * hal = & host - > hal ;
2017-03-31 03:05:25 -04:00
2019-04-18 10:13:05 -04:00
assert ( spi_slave_hal_usr_is_done ( hal ) ) ;
2017-03-31 03:05:25 -04:00
2021-01-27 08:56:16 -05:00
bool use_dma = host - > dma_enabled ;
2023-09-01 05:51:54 -04:00
if ( host - > cur_trans . trans ) {
2018-09-06 02:47:45 -04:00
// When DMA is enabled, the slave rx dma suffers from unexpected transactions. Forbid reading until transaction ready.
2019-04-18 10:13:05 -04:00
if ( use_dma ) freeze_cs ( host ) ;
spi_slave_hal_store_result ( hal ) ;
2023-09-01 05:51:54 -04:00
host - > cur_trans . trans - > trans_len = spi_slave_hal_get_rcv_bitlen ( hal ) ;
2017-09-30 07:59:05 -04:00
2022-09-15 06:26:12 -04:00
# if CONFIG_IDF_TARGET_ESP32
//This workaround is only for esp32
2019-04-18 10:13:05 -04:00
if ( spi_slave_hal_dma_need_reset ( hal ) ) {
2022-09-15 06:26:12 -04:00
//On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same
2021-01-27 08:56:16 -05:00
spicommon_dmaworkaround_req_reset ( host - > tx_dma_chan , spi_slave_restart_after_dmareset , host ) ;
2017-03-31 03:05:25 -04:00
}
2022-09-15 06:26:12 -04:00
# endif //#if CONFIG_IDF_TARGET_ESP32
2023-09-01 05:51:54 -04:00
# if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE //invalidate here to let user access rx data in post_cb if possible
if ( use_dma & & host - > cur_trans . rx_buffer ) {
uint16_t alignment = host - > internal_mem_align_size ;
uint32_t buffer_byte_len = ( host - > cur_trans . trans - > length + 7 ) / 8 ;
buffer_byte_len = ( buffer_byte_len + alignment - 1 ) & ( ~ ( alignment - 1 ) ) ;
// invalidate priv_trans.buffer_to_rcv anyway, only user provide aligned buffer can rcv correct data in post_cb
esp_err_t ret = esp_cache_msync ( ( void * ) host - > cur_trans . rx_buffer , buffer_byte_len , ESP_CACHE_MSYNC_FLAG_DIR_M2C ) ;
assert ( ret = = ESP_OK ) ;
}
# endif
if ( host - > cfg . post_trans_cb ) host - > cfg . post_trans_cb ( host - > cur_trans . trans ) ;
2022-09-15 03:56:25 -04:00
if ( ! ( host - > cfg . flags & SPI_SLAVE_NO_RETURN_RESULT ) ) {
xQueueSendFromISR ( host - > ret_queue , & host - > cur_trans , & do_yield ) ;
}
2023-09-01 05:51:54 -04:00
host - > cur_trans . trans = NULL ;
2017-03-31 03:05:25 -04:00
}
2022-09-15 06:26:12 -04:00
# if CONFIG_IDF_TARGET_ESP32
//This workaround is only for esp32
2019-04-18 10:13:05 -04:00
if ( use_dma ) {
2022-09-15 06:26:12 -04:00
//On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same
2021-01-27 08:56:16 -05:00
spicommon_dmaworkaround_idle ( host - > tx_dma_chan ) ;
2017-03-31 03:05:25 -04:00
if ( spicommon_dmaworkaround_reset_in_progress ( ) ) {
//We need to wait for the reset to complete. Disable int (will be re-enabled on reset callback) and exit isr.
esp_intr_disable ( host - > intr ) ;
if ( do_yield ) portYIELD_FROM_ISR ( ) ;
return ;
}
}
2022-09-15 06:26:12 -04:00
# endif //#if CONFIG_IDF_TARGET_ESP32
2017-03-31 03:05:25 -04:00
2019-05-31 03:23:10 -04:00
//Disable interrupt before checking to avoid concurrency issue.
esp_intr_disable ( host - > intr ) ;
2023-09-01 05:51:54 -04:00
spi_slave_trans_priv_t priv_trans ;
2017-03-31 03:05:25 -04:00
//Grab next transaction
2023-09-01 05:51:54 -04:00
r = xQueueReceiveFromISR ( host - > trans_queue , & priv_trans , & do_yield ) ;
2019-05-31 03:23:10 -04:00
if ( r ) {
2022-03-14 10:49:18 -04:00
// sanity check
2023-09-01 05:51:54 -04:00
assert ( priv_trans . trans ) ;
2022-03-14 10:49:18 -04:00
2019-05-31 03:23:10 -04:00
//enable the interrupt again if there is packet to send
esp_intr_enable ( host - > intr ) ;
2017-03-31 03:05:25 -04:00
//We have a transaction. Send it.
2023-09-01 05:51:54 -04:00
host - > cur_trans = priv_trans ;
2017-03-31 03:05:25 -04:00
2023-09-01 05:51:54 -04:00
hal - > bitlen = priv_trans . trans - > length ;
hal - > rx_buffer = priv_trans . rx_buffer ;
hal - > tx_buffer = priv_trans . tx_buffer ;
2019-04-18 10:13:05 -04:00
2022-09-15 06:26:12 -04:00
# if CONFIG_IDF_TARGET_ESP32
2019-04-18 10:13:05 -04:00
if ( use_dma ) {
2022-09-15 06:26:12 -04:00
//This workaround is only for esp32
//On ESP32, actual_tx_dma_chan and actual_rx_dma_chan are always same
2021-01-27 08:56:16 -05:00
spicommon_dmaworkaround_transfer_active ( host - > tx_dma_chan ) ;
2017-03-31 03:05:25 -04:00
}
2022-09-15 06:26:12 -04:00
# endif //#if CONFIG_IDF_TARGET_ESP32
2017-03-31 03:05:25 -04:00
2019-04-18 10:13:05 -04:00
spi_slave_hal_prepare_data ( hal ) ;
2017-03-31 03:05:25 -04:00
2018-09-06 02:47:45 -04:00
//The slave rx dma get disturbed by unexpected transaction. Only connect the CS when slave is ready.
2019-04-18 10:13:05 -04:00
if ( use_dma ) {
restore_cs ( host ) ;
}
2018-09-06 02:47:45 -04:00
2017-03-31 03:05:25 -04:00
//Kick off transfer
2019-04-18 10:13:05 -04:00
spi_slave_hal_user_start ( hal ) ;
2023-09-01 05:51:54 -04:00
if ( host - > cfg . post_setup_cb ) host - > cfg . post_setup_cb ( priv_trans . trans ) ;
2017-03-31 03:05:25 -04:00
}
if ( do_yield ) portYIELD_FROM_ISR ( ) ;
}