2020-04-29 04:20:40 -04:00
// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
# include <string.h>
# include <sys/param.h>
# include "esp_log.h"
# include "driver/spi_master.h"
# include "driver/periph_ctrl.h"
2021-07-18 23:01:13 -04:00
# include "essl_internal.h"
2020-04-29 04:20:40 -04:00
# include "essl_spi.h"
2021-07-18 23:01:13 -04:00
# define ESSL_SPI_CHECK(cond, warn, ret) do{if(!(cond)){ESP_LOGE(TAG, warn); return ret;}} while(0)
2022-09-15 04:03:54 -04:00
# include "hal/spi_types.h"
# include "hal/spi_ll.h"
2021-07-18 23:01:13 -04:00
/**
* Initialise device function list of SPI by this macro .
*/
# define ESSL_SPI_DEFAULT_DEV_FUNC() (essl_dev_t) {\
. get_tx_buffer_num = essl_spi_get_tx_buffer_num , \
. update_tx_buffer_num = essl_spi_update_tx_buffer_num , \
. get_rx_data_size = essl_spi_get_rx_data_size , \
. update_rx_data_size = essl_spi_update_rx_data_size , \
. send_packet = essl_spi_send_packet , \
. get_packet = essl_spi_get_packet , \
. write_reg = essl_spi_write_reg , \
. read_reg = essl_spi_read_reg , \
}
static const char TAG [ ] = " essl_spi " ;
typedef struct {
spi_device_handle_t spi ; // Pointer to SPI device handle.
/* Master TX, Slave RX */
struct {
size_t sent_buf_num ; // Number of TX buffers that has been sent out by the master.
size_t slave_rx_buf_num ; // Number of RX buffers laoded by the slave.
uint16_t tx_buffer_size ; /* Buffer size for Master TX / Slave RX direction.
* Data with length within this size will still be regarded as one buffer .
* E . g . 10 bytes data costs 2 buffers if the size is 8 bytes per buffer . */
uint8_t tx_sync_reg ; // The pre-negotiated register ID for Master-TX-SLAVE-RX synchronization. 1 word (4 Bytes) will be reserved for the synchronization.
} master_out ;
/* Master RX, Slave TX */
struct {
size_t received_bytes ; // Number of the RX bytes that has been received by the Master.
size_t slave_tx_bytes ; // Number of the TX bytes that has been loaded by the Slave
uint8_t rx_sync_reg ; // The pre-negotiated register ID for Master-RX-SLAVE-TX synchronization. 1 word (4 Bytes) will be reserved for the synchronization.
} master_in ;
} essl_spi_context_t ;
2020-04-29 04:20:40 -04:00
2022-09-15 04:03:54 -04:00
static uint16_t get_hd_command ( spi_command_t cmd_t , uint32_t flags )
2020-04-29 04:20:40 -04:00
{
2022-09-15 04:03:54 -04:00
spi_line_mode_t line_mode = {
. cmd_lines = 1 ,
} ;
2020-04-29 04:20:40 -04:00
if ( flags & SPI_TRANS_MODE_DIO ) {
2022-09-15 04:03:54 -04:00
line_mode . data_lines = 2 ;
2020-04-29 04:20:40 -04:00
if ( flags & SPI_TRANS_MODE_DIOQIO_ADDR ) {
2022-09-15 04:03:54 -04:00
line_mode . addr_lines = 2 ;
2020-04-29 04:20:40 -04:00
} else {
2022-09-15 04:03:54 -04:00
line_mode . addr_lines = 1 ;
2020-04-29 04:20:40 -04:00
}
} else if ( flags & SPI_TRANS_MODE_QIO ) {
2022-09-15 04:03:54 -04:00
line_mode . data_lines = 4 ;
2020-04-29 04:20:40 -04:00
if ( flags & SPI_TRANS_MODE_DIOQIO_ADDR ) {
2022-09-15 04:03:54 -04:00
line_mode . addr_lines = 4 ;
2020-04-29 04:20:40 -04:00
} else {
2022-09-15 04:03:54 -04:00
line_mode . addr_lines = 1 ;
2020-04-29 04:20:40 -04:00
}
2022-09-15 04:03:54 -04:00
} else {
line_mode . data_lines = 1 ;
line_mode . addr_lines = 1 ;
2020-04-29 04:20:40 -04:00
}
2022-09-15 04:03:54 -04:00
return spi_ll_get_slave_hd_command ( cmd_t , line_mode ) ;
2020-04-29 04:20:40 -04:00
}
static int get_hd_dummy_bits ( uint32_t flags )
{
2022-09-15 04:03:54 -04:00
spi_line_mode_t line_mode = { } ;
if ( flags & SPI_TRANS_MODE_DIO ) {
line_mode . data_lines = 2 ;
} else if ( flags & SPI_TRANS_MODE_QIO ) {
line_mode . data_lines = 4 ;
2020-04-29 04:20:40 -04:00
} else {
2022-09-15 04:03:54 -04:00
line_mode . data_lines = 1 ;
2020-04-29 04:20:40 -04:00
}
2022-09-15 04:03:54 -04:00
return spi_ll_get_slave_hd_dummy_bits ( line_mode ) ;
2020-04-29 04:20:40 -04:00
}
esp_err_t essl_spi_rdbuf ( spi_device_handle_t spi , uint8_t * out_data , int addr , int len , uint32_t flags )
{
spi_transaction_ext_t t = {
. base = {
2022-09-15 04:03:54 -04:00
. cmd = get_hd_command ( SPI_CMD_HD_RDBUF , flags ) ,
2020-04-29 04:20:40 -04:00
. addr = addr % 72 ,
. rxlength = len * 8 ,
. rx_buffer = out_data ,
. flags = flags | SPI_TRANS_VARIABLE_DUMMY ,
} ,
. dummy_bits = get_hd_dummy_bits ( flags ) ,
} ;
return spi_device_transmit ( spi , ( spi_transaction_t * ) & t ) ;
}
2021-04-16 09:33:14 -04:00
esp_err_t essl_spi_rdbuf_polling ( spi_device_handle_t spi , uint8_t * out_data , int addr , int len , uint32_t flags )
{
spi_transaction_ext_t t = {
. base = {
2022-09-15 04:03:54 -04:00
. cmd = get_hd_command ( SPI_CMD_HD_RDBUF , flags ) ,
2021-04-16 09:33:14 -04:00
. addr = addr % 72 ,
. rxlength = len * 8 ,
. rx_buffer = out_data ,
. flags = flags | SPI_TRANS_VARIABLE_DUMMY ,
} ,
. dummy_bits = get_hd_dummy_bits ( flags ) ,
} ;
return spi_device_polling_transmit ( spi , ( spi_transaction_t * ) & t ) ;
}
2020-04-29 04:20:40 -04:00
esp_err_t essl_spi_wrbuf ( spi_device_handle_t spi , const uint8_t * data , int addr , int len , uint32_t flags )
{
spi_transaction_ext_t t = {
. base = {
2022-09-15 04:03:54 -04:00
. cmd = get_hd_command ( SPI_CMD_HD_WRBUF , flags ) ,
2020-04-29 04:20:40 -04:00
. addr = addr % 72 ,
. length = len * 8 ,
. tx_buffer = data ,
. flags = flags | SPI_TRANS_VARIABLE_DUMMY ,
} ,
. dummy_bits = get_hd_dummy_bits ( flags ) ,
} ;
return spi_device_transmit ( spi , ( spi_transaction_t * ) & t ) ;
}
2021-04-16 09:33:14 -04:00
esp_err_t essl_spi_wrbuf_polling ( spi_device_handle_t spi , const uint8_t * data , int addr , int len , uint32_t flags )
{
spi_transaction_ext_t t = {
. base = {
2022-09-15 04:03:54 -04:00
. cmd = get_hd_command ( SPI_CMD_HD_WRBUF , flags ) ,
2021-04-16 09:33:14 -04:00
. addr = addr % 72 ,
. length = len * 8 ,
. tx_buffer = data ,
. flags = flags | SPI_TRANS_VARIABLE_DUMMY ,
} ,
. dummy_bits = get_hd_dummy_bits ( flags ) ,
} ;
return spi_device_polling_transmit ( spi , ( spi_transaction_t * ) & t ) ;
}
2020-04-29 04:20:40 -04:00
esp_err_t essl_spi_rddma_seg ( spi_device_handle_t spi , uint8_t * out_data , int seg_len , uint32_t flags )
{
spi_transaction_ext_t t = {
. base = {
2022-09-15 04:03:54 -04:00
. cmd = get_hd_command ( SPI_CMD_HD_RDDMA , flags ) ,
2020-04-29 04:20:40 -04:00
. rxlength = seg_len * 8 ,
. rx_buffer = out_data ,
. flags = flags | SPI_TRANS_VARIABLE_DUMMY ,
} ,
. dummy_bits = get_hd_dummy_bits ( flags ) ,
} ;
return spi_device_transmit ( spi , ( spi_transaction_t * ) & t ) ;
}
esp_err_t essl_spi_rddma_done ( spi_device_handle_t spi , uint32_t flags )
{
spi_transaction_t end_t = {
2022-09-15 04:03:54 -04:00
. cmd = get_hd_command ( SPI_CMD_HD_INT0 , flags ) ,
2020-04-29 04:20:40 -04:00
. flags = flags ,
} ;
return spi_device_transmit ( spi , & end_t ) ;
}
esp_err_t essl_spi_rddma ( spi_device_handle_t spi , uint8_t * out_data , int len , int seg_len , uint32_t flags )
{
if ( ! esp_ptr_dma_capable ( out_data ) | | ( ( intptr_t ) out_data % 4 ) ! = 0 ) {
return ESP_ERR_INVALID_ARG ;
}
seg_len = ( seg_len > 0 ) ? seg_len : len ;
uint8_t * read_ptr = out_data ;
2021-07-18 23:01:13 -04:00
esp_err_t ret = ESP_OK ;
2020-04-29 04:20:40 -04:00
while ( len > 0 ) {
int send_len = MIN ( seg_len , len ) ;
2021-07-18 23:01:13 -04:00
ret = essl_spi_rddma_seg ( spi , read_ptr , send_len , flags ) ;
if ( ret ! = ESP_OK ) return ret ;
2020-04-29 04:20:40 -04:00
len - = send_len ;
read_ptr + = send_len ;
}
return essl_spi_rddma_done ( spi , flags ) ;
}
esp_err_t essl_spi_wrdma_seg ( spi_device_handle_t spi , const uint8_t * data , int seg_len , uint32_t flags )
{
spi_transaction_ext_t t = {
. base = {
2022-09-15 04:03:54 -04:00
. cmd = get_hd_command ( SPI_CMD_HD_WRDMA , flags ) ,
2020-04-29 04:20:40 -04:00
. length = seg_len * 8 ,
. tx_buffer = data ,
. flags = flags | SPI_TRANS_VARIABLE_DUMMY ,
} ,
. dummy_bits = get_hd_dummy_bits ( flags ) ,
} ;
return spi_device_transmit ( spi , ( spi_transaction_t * ) & t ) ;
}
esp_err_t essl_spi_wrdma_done ( spi_device_handle_t spi , uint32_t flags )
{
spi_transaction_t end_t = {
2022-09-15 04:03:54 -04:00
. cmd = get_hd_command ( SPI_CMD_HD_WR_END , flags ) ,
2020-04-29 04:20:40 -04:00
. flags = flags ,
} ;
return spi_device_transmit ( spi , & end_t ) ;
}
esp_err_t essl_spi_wrdma ( spi_device_handle_t spi , const uint8_t * data , int len , int seg_len , uint32_t flags )
{
if ( ! esp_ptr_dma_capable ( data ) ) {
return ESP_ERR_INVALID_ARG ;
}
seg_len = ( seg_len > 0 ) ? seg_len : len ;
while ( len > 0 ) {
int send_len = MIN ( seg_len , len ) ;
esp_err_t ret = essl_spi_wrdma_seg ( spi , data , send_len , flags ) ;
if ( ret ! = ESP_OK ) return ret ;
len - = send_len ;
data + = send_len ;
}
return essl_spi_wrdma_done ( spi , flags ) ;
}
esp_err_t essl_spi_int ( spi_device_handle_t spi , int int_n , uint32_t flags )
{
spi_transaction_t end_t = {
2022-09-15 04:03:54 -04:00
. cmd = get_hd_command ( SPI_CMD_HD_INT0 + int_n , flags ) ,
2020-04-29 04:20:40 -04:00
. flags = flags ,
} ;
return spi_device_transmit ( spi , & end_t ) ;
}
2021-07-18 23:01:13 -04:00
//------------------------------------ APPEND MODE ----------------------------------//
static uint32_t essl_spi_get_rx_data_size ( void * arg ) ;
static esp_err_t essl_spi_update_rx_data_size ( void * arg , uint32_t wait_ms ) ;
static uint32_t essl_spi_get_tx_buffer_num ( void * arg ) ;
static esp_err_t essl_spi_update_tx_buffer_num ( void * arg , uint32_t wait_ms ) ;
esp_err_t essl_spi_init_dev ( essl_handle_t * out_handle , const essl_spi_config_t * init_config )
{
ESSL_SPI_CHECK ( init_config - > spi , " Check SPI initialization first " , ESP_ERR_INVALID_STATE ) ;
ESSL_SPI_CHECK ( init_config - > tx_sync_reg < = ( SOC_SPI_MAXIMUM_BUFFER_SIZE - 1 ) * 4 , " tx_sync_reg ID larger than interal register length " , ESP_ERR_INVALID_ARG ) ;
ESSL_SPI_CHECK ( init_config - > rx_sync_reg < = ( SOC_SPI_MAXIMUM_BUFFER_SIZE - 1 ) * 4 , " rx_sync_reg ID larger than interal register length " , ESP_ERR_INVALID_ARG ) ;
ESSL_SPI_CHECK ( init_config - > tx_sync_reg ! = init_config - > rx_sync_reg , " Should use different word of registers for synchronization " , ESP_ERR_INVALID_ARG ) ;
essl_spi_context_t * context = calloc ( 1 , sizeof ( essl_spi_context_t ) ) ;
essl_dev_t * dev = calloc ( 1 , sizeof ( essl_dev_t ) ) ;
if ( ! context | | ! dev ) {
free ( context ) ;
free ( dev ) ;
return ESP_ERR_NO_MEM ;
}
* context = ( essl_spi_context_t ) {
. spi = * init_config - > spi ,
. master_out . tx_buffer_size = init_config - > tx_buf_size ,
. master_out . tx_sync_reg = init_config - > tx_sync_reg ,
. master_in . rx_sync_reg = init_config - > rx_sync_reg
} ;
* dev = ESSL_SPI_DEFAULT_DEV_FUNC ( ) ;
dev - > args = context ;
* out_handle = dev ;
return ESP_OK ;
}
esp_err_t essl_spi_deinit_dev ( essl_handle_t handle )
{
ESSL_SPI_CHECK ( handle , " ESSL SPI is not in use " , ESP_ERR_INVALID_STATE ) ;
free ( handle - > args ) ;
free ( handle ) ;
return ESP_OK ;
}
void essl_spi_reset_cnt ( void * arg )
{
essl_spi_context_t * ctx = arg ;
if ( ctx ) {
ctx - > master_out . sent_buf_num = 0 ;
ctx - > master_in . received_bytes = 0 ;
}
}
//------------------------------------ RX ----------------------------------//
esp_err_t essl_spi_read_reg ( void * arg , uint8_t addr , uint8_t * out_value , uint32_t wait_ms )
{
essl_spi_context_t * ctx = arg ;
ESSL_SPI_CHECK ( arg , " Check ESSL SPI initialization first " , ESP_ERR_INVALID_STATE ) ;
uint8_t reserved_1_head = ctx - > master_out . tx_sync_reg < ctx - > master_in . rx_sync_reg ? ctx - > master_out . tx_sync_reg : ctx - > master_in . rx_sync_reg ;
uint8_t reserved_1_tail = reserved_1_head + 3 ;
uint8_t reserved_2_head = ctx - > master_out . tx_sync_reg < ctx - > master_in . rx_sync_reg ? ctx - > master_in . rx_sync_reg : ctx - > master_out . tx_sync_reg ;
uint8_t reserved_2_tail = reserved_2_head + 3 ;
ESSL_SPI_CHECK ( addr < reserved_1_head | | ( addr > reserved_1_tail & & addr < reserved_2_head ) | | addr > reserved_2_tail , " Invalid address " , ESP_ERR_INVALID_ARG ) ;
return essl_spi_rdbuf ( ctx - > spi , out_value , addr , sizeof ( uint8_t ) , 0 ) ;
}
static uint32_t essl_spi_get_rx_data_size ( void * arg )
{
essl_spi_context_t * ctx = arg ;
ESP_LOGV ( TAG , " slave tx buffer: %d bytes, master has read: %d bytes " , ctx - > master_in . slave_tx_bytes , ctx - > master_in . received_bytes ) ;
return ctx - > master_in . slave_tx_bytes - ctx - > master_in . received_bytes ;
}
static esp_err_t essl_spi_update_rx_data_size ( void * arg , uint32_t wait_ms )
{
essl_spi_context_t * ctx = arg ;
uint32_t updated_size ;
uint32_t previous_size ;
esp_err_t ret ;
ret = essl_spi_rdbuf_polling ( ctx - > spi , ( uint8_t * ) & previous_size , ctx - > master_in . rx_sync_reg , sizeof ( uint32_t ) , 0 ) ;
if ( ret ! = ESP_OK ) {
return ret ;
}
/**
* Read until the last 2 reading result are same . Reason :
* SPI transaction is carried on per 1 Byte . So when Master is reading the shared register , if the
* register value is changed by Slave at this time , Master may get wrong data .
*/
while ( 1 ) {
ret = essl_spi_rdbuf_polling ( ctx - > spi , ( uint8_t * ) & updated_size , ctx - > master_in . rx_sync_reg , sizeof ( uint32_t ) , 0 ) ;
if ( ret ! = ESP_OK ) {
return ret ;
}
if ( updated_size = = previous_size ) {
ctx - > master_in . slave_tx_bytes = updated_size ;
ESP_LOGV ( TAG , " updated: slave prepared tx buffer is: %d bytes " , updated_size ) ;
return ret ;
}
previous_size = updated_size ;
}
}
esp_err_t essl_spi_get_packet ( void * arg , void * out_data , size_t size , uint32_t wait_ms )
{
ESSL_SPI_CHECK ( arg , " Check ESSL SPI initialization first " , ESP_ERR_INVALID_STATE ) ;
if ( ! esp_ptr_dma_capable ( out_data ) | | ( ( intptr_t ) out_data % 4 ) ! = 0 ) {
return ESP_ERR_INVALID_ARG ;
}
essl_spi_context_t * ctx = arg ;
esp_err_t ret ;
if ( essl_spi_get_rx_data_size ( arg ) < size ) {
/**
* For realistic situation , usually there will be a large overhead ( Slave will load large amount of data ) ,
* so here we only update the Slave ' s TX size when the last - updated size is smaller than what Master requires .
*/
ret = essl_spi_update_rx_data_size ( arg , wait_ms ) ;
if ( ret ! = ESP_OK ) {
return ret ;
}
//Slave still did not load enough size of buffer
if ( essl_spi_get_rx_data_size ( arg ) < size ) {
ESP_LOGV ( TAG , " slave buffer: %d is not enough, %d is required " , ctx - > master_in . slave_tx_bytes , ctx - > master_in . received_bytes + size ) ;
return ESP_ERR_NOT_FOUND ;
}
}
ESP_LOGV ( TAG , " get_packet: size to read is: %d " , size ) ;
ret = essl_spi_rddma_seg ( ctx - > spi , out_data , size , 0 ) ;
if ( ret ! = ESP_OK ) {
return ret ;
}
ctx - > master_in . received_bytes + = size ;
return ESP_OK ;
}
//------------------------------------ TX ----------------------------------//
esp_err_t essl_spi_write_reg ( void * arg , uint8_t addr , uint8_t value , uint8_t * out_value , uint32_t wait_ms )
{
essl_spi_context_t * ctx = arg ;
ESSL_SPI_CHECK ( arg , " Check ESSL SPI initialization first " , ESP_ERR_INVALID_STATE ) ;
uint8_t reserved_1_head = ctx - > master_out . tx_sync_reg < ctx - > master_in . rx_sync_reg ? ctx - > master_out . tx_sync_reg : ctx - > master_in . rx_sync_reg ;
uint8_t reserved_1_tail = reserved_1_head + 3 ;
uint8_t reserved_2_head = ctx - > master_out . tx_sync_reg < ctx - > master_in . rx_sync_reg ? ctx - > master_in . rx_sync_reg : ctx - > master_out . tx_sync_reg ;
uint8_t reserved_2_tail = reserved_2_head + 3 ;
ESSL_SPI_CHECK ( addr < reserved_1_head | | ( addr > reserved_1_tail & & addr < reserved_2_head ) | | addr > reserved_2_tail , " Invalid address " , ESP_ERR_INVALID_ARG ) ;
ESSL_SPI_CHECK ( out_value = = NULL , " This feature is not supported " , ESP_ERR_NOT_SUPPORTED ) ;
return essl_spi_wrbuf ( ctx - > spi , & value , addr , sizeof ( uint8_t ) , 0 ) ;
}
static uint32_t essl_spi_get_tx_buffer_num ( void * arg )
{
essl_spi_context_t * ctx = arg ;
ESP_LOGV ( TAG , " slave rx buffer: %d, master has sent: %d " , ctx - > master_out . slave_rx_buf_num , ctx - > master_out . sent_buf_num ) ;
return ctx - > master_out . slave_rx_buf_num - ctx - > master_out . sent_buf_num ;
}
static esp_err_t essl_spi_update_tx_buffer_num ( void * arg , uint32_t wait_ms )
{
essl_spi_context_t * ctx = arg ;
uint32_t updated_num ;
uint32_t previous_size ;
esp_err_t ret ;
ret = essl_spi_rdbuf_polling ( ctx - > spi , ( uint8_t * ) & previous_size , ctx - > master_out . tx_sync_reg , sizeof ( uint32_t ) , 0 ) ;
if ( ret ! = ESP_OK ) {
return ret ;
}
/**
* Read until the last 2 reading result are same . Reason :
* SPI transaction is carried on per 1 Byte . So when Master is reading the shared register , if the
* register value is changed by Slave at this time , Master may get wrong data .
*/
while ( 1 ) {
ret = essl_spi_rdbuf_polling ( ctx - > spi , ( uint8_t * ) & updated_num , ctx - > master_out . tx_sync_reg , sizeof ( uint32_t ) , 0 ) ;
if ( ret ! = ESP_OK ) {
return ret ;
}
if ( updated_num = = previous_size ) {
ctx - > master_out . slave_rx_buf_num = updated_num ;
ESP_LOGV ( TAG , " updated: slave prepared rx buffer: %d " , updated_num ) ;
return ret ;
}
previous_size = updated_num ;
}
}
esp_err_t essl_spi_send_packet ( void * arg , const void * data , size_t size , uint32_t wait_ms )
{
ESSL_SPI_CHECK ( arg , " Check ESSL SPI initialization first " , ESP_ERR_INVALID_STATE ) ;
if ( ! esp_ptr_dma_capable ( data ) ) {
return ESP_ERR_INVALID_ARG ;
}
essl_spi_context_t * ctx = arg ;
esp_err_t ret ;
uint32_t buf_num_to_use = ( size + ctx - > master_out . tx_buffer_size - 1 ) / ctx - > master_out . tx_buffer_size ;
if ( essl_spi_get_tx_buffer_num ( arg ) < buf_num_to_use ) {
/**
* For realistic situation , usually there will be a large overhead ( Slave will load enough number of RX buffers ) ,
* so here we only update the Slave ' s RX buffer number when the last - updated number is smaller than what Master requires .
*/
ret = essl_spi_update_tx_buffer_num ( arg , wait_ms ) ;
if ( ret ! = ESP_OK ) {
return ret ;
}
//Slave still did not load a sufficient amount of buffers
if ( essl_spi_get_tx_buffer_num ( arg ) < buf_num_to_use ) {
ESP_LOGV ( TAG , " slave buffer: %d is not enough, %d is required " , ctx - > master_out . slave_rx_buf_num , ctx - > master_out . sent_buf_num + buf_num_to_use ) ;
return ESP_ERR_NOT_FOUND ;
}
}
ESP_LOGV ( TAG , " send_packet: size to write is: %d " , size ) ;
ret = essl_spi_wrdma_seg ( ctx - > spi , data , size , 0 ) ;
if ( ret ! = ESP_OK ) {
return ret ;
}
ctx - > master_out . sent_buf_num + = buf_num_to_use ;
return essl_spi_wrdma_done ( ctx - > spi , 0 ) ;
}