From 0ec08ca21b7d3937b593b712ee6396dc07fd89df Mon Sep 17 00:00:00 2001 From: "Michael (XIAO Xufeng)" Date: Thu, 10 Oct 2019 12:35:13 +0800 Subject: [PATCH] sdio_slave: support HAL layer --- components/driver/include/driver/sdio_slave.h | 37 +- components/driver/sdio_slave.c | 976 ++++-------------- components/soc/CMakeLists.txt | 4 +- components/soc/include/hal/sdio_slave_hal.h | 529 ++++++++++ components/soc/include/hal/sdio_slave_ll.h | 482 +++++++++ components/soc/include/hal/sdio_slave_types.h | 47 + components/soc/src/hal/sdio_slave_hal.c | 729 +++++++++++++ docs/Doxyfile | 1 + .../api-reference/peripherals/sdio_slave.rst | 1 + 9 files changed, 1997 insertions(+), 809 deletions(-) create mode 100644 components/soc/include/hal/sdio_slave_hal.h create mode 100644 components/soc/include/hal/sdio_slave_ll.h create mode 100644 components/soc/include/hal/sdio_slave_types.h create mode 100644 components/soc/src/hal/sdio_slave_hal.c diff --git a/components/driver/include/driver/sdio_slave.h b/components/driver/include/driver/sdio_slave.h index 005e469cdc..a67ad47ea4 100644 --- a/components/driver/include/driver/sdio_slave.h +++ b/components/driver/include/driver/sdio_slave.h @@ -20,6 +20,7 @@ #include "esp_err.h" #include "sys/queue.h" +#include "hal/sdio_slave_types.h" #include "soc/sdio_slave_periph.h" #ifdef __cplusplus @@ -30,36 +31,6 @@ extern "C" { typedef void(*sdio_event_cb_t)(uint8_t event); -/// Mask of interrupts sending to the host. -typedef enum { - SDIO_SLAVE_HOSTINT_SEND_NEW_PACKET = HOST_SLC0_RX_NEW_PACKET_INT_ENA, ///< New packet available - SDIO_SLAVE_HOSTINT_RECV_OVF = HOST_SLC0_TX_OVF_INT_ENA, ///< Slave receive buffer overflow - SDIO_SLAVE_HOSTINT_SEND_UDF = HOST_SLC0_RX_UDF_INT_ENA, ///< Slave sending buffer underflow (this case only happen when the host do not request for packet according to the packet len). - SDIO_SLAVE_HOSTINT_BIT7 = HOST_SLC0_TOHOST_BIT7_INT_ENA, ///< General purpose interrupt bits that can be used by the user. - SDIO_SLAVE_HOSTINT_BIT6 = HOST_SLC0_TOHOST_BIT6_INT_ENA, - SDIO_SLAVE_HOSTINT_BIT5 = HOST_SLC0_TOHOST_BIT5_INT_ENA, - SDIO_SLAVE_HOSTINT_BIT4 = HOST_SLC0_TOHOST_BIT4_INT_ENA, - SDIO_SLAVE_HOSTINT_BIT3 = HOST_SLC0_TOHOST_BIT3_INT_ENA, - SDIO_SLAVE_HOSTINT_BIT2 = HOST_SLC0_TOHOST_BIT2_INT_ENA, - SDIO_SLAVE_HOSTINT_BIT1 = HOST_SLC0_TOHOST_BIT1_INT_ENA, - SDIO_SLAVE_HOSTINT_BIT0 = HOST_SLC0_TOHOST_BIT0_INT_ENA, -} sdio_slave_hostint_t; - -/// Timing of SDIO slave -typedef enum { - SDIO_SLAVE_TIMING_PSEND_PSAMPLE = 0,/**< Send at posedge, and sample at posedge. Default value for HS mode. - * Normally there's no problem using this to work in DS mode. - */ - SDIO_SLAVE_TIMING_NSEND_PSAMPLE ,///< Send at negedge, and sample at posedge. Default value for DS mode and below. - SDIO_SLAVE_TIMING_PSEND_NSAMPLE, ///< Send at posedge, and sample at negedge - SDIO_SLAVE_TIMING_NSEND_NSAMPLE, ///< Send at negedge, and sample at negedge -} sdio_slave_timing_t; - -/// Configuration of SDIO slave mode -typedef enum { - SDIO_SLAVE_SEND_STREAM = 0, ///< Stream mode, all packets to send will be combined as one if possible - SDIO_SLAVE_SEND_PACKET = 1, ///< Packet mode, one packets will be sent one after another (only increase packet_len if last packet sent). -} sdio_slave_sending_mode_t; /// Configuration of SDIO slave typedef struct { @@ -267,9 +238,9 @@ sdio_slave_hostint_t sdio_slave_get_host_intena(void); /** Set the interrupt enable for host. * - * @param ena Enable mask for host interrupt. + * @param mask Enable mask for host interrupt. */ -void sdio_slave_set_host_intena(sdio_slave_hostint_t ena); +void sdio_slave_set_host_intena(sdio_slave_hostint_t mask); /** Interrupt the host by general purpose interrupt. * @@ -285,7 +256,7 @@ esp_err_t sdio_slave_send_host_int(uint8_t pos); * * @param mask Interrupt bits to clear, by bit mask. */ -void sdio_slave_clear_host_int(uint8_t mask); +void sdio_slave_clear_host_int(sdio_slave_hostint_t mask); /** Wait for general purpose interrupt from host. * diff --git a/components/driver/sdio_slave.c b/components/driver/sdio_slave.c index 317e492bc5..b346e5c6b2 100644 --- a/components/driver/sdio_slave.c +++ b/components/driver/sdio_slave.c @@ -17,7 +17,7 @@ Architecture: The whole SDIO slave peripheral consists of three parts: the registers (including the control registers of -interrupts and shared registers), the sending FIFO and the receving FIFO. A document ``esp_slave_protocol.rst`` +interrupts and shared registers), the sending FIFO and the receiving FIFO. A document ``esp_slave_protocol.rst`` describes the functionality of the peripheral detailedly. The host can access only one of those parts at once, and the hardware functions of these parts are totally independent. Hence this driver is designed into these three independent parts. The shared registers are quite @@ -29,7 +29,7 @@ is ready to send/receive data. The driver resets the counters during initializat inform the slave to reset the counters again if it should reboot (or lose the counter value for some reasons). Then the host can read/write the FIFOs by CMD53 commands according to the counters. -Since we don't want to copy all the data from the buffer each time we use sending/receving buffer, +Since we don't want to copy all the data from the buffer each time we use sending/receiving buffer, the buffers are directly loaded onto the sending/receiving linked-list and taken off only after use. Hence the driver takes ownership of the buffer when the buffer is fed to the driver. @@ -44,7 +44,7 @@ The driver of FIFOs works as below: to the app. Each time the app asks to receive by a buffer, the descriptor of the buffer is loaded onto the linked-list, - and the counter of receiving buffers is inreased so that the host will know this by the receiving interrupt. + and the counter of receiving buffers is increased so that the host will know this by the receiving interrupt. The hardware will automatically go through the linked list and write data into the buffers loaded on the list. @@ -97,6 +97,7 @@ The driver of FIFOs works as below: #include "xtensa/core-macros.h" #include "driver/periph_ctrl.h" #include "driver/gpio.h" +#include "hal/sdio_slave_hal.h" #define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\ @@ -104,68 +105,36 @@ The driver of FIFOs works as below: return ret_val;\ } }while (0) +static const char TAG[] = "sdio_slave"; + #define SDIO_SLAVE_LOGE(s, ...) ESP_LOGE(TAG, "%s:%d (%s):"s, __FILE__,__LINE__,__FUNCTION__,##__VA_ARGS__) #define SDIO_SLAVE_LOGW(s, ...) ESP_LOGW(TAG, "%s: "s, __FUNCTION__,##__VA_ARGS__) -static const char TAG[] = "sdio_slave"; - -typedef enum { - STATE_IDLE = 1, - STATE_WAIT_FOR_START = 2, - STATE_SENDING = 3, -} send_state_t; - -// first 3 WORDs of this struct is defined by and compatible to the DMA link list format. -// sdio_slave_buf_handle_t is of type buf_desc_t*; -typedef struct buf_desc_s{ - volatile uint32_t size :12, - length:12, - offset: 5, /* h/w reserved 5bit, s/w use it as offset in buffer */ - sosf : 1, /* start of sub-frame */ - eof : 1, /* end of frame */ - owner : 1; /* hw or sw */ - uint8_t* buf; - union{ - TAILQ_ENTRY(buf_desc_s) te; // tailq used by receving +// sdio_slave_buf_handle_t is of type recv_desc_t*; +typedef struct recv_desc_s{ + union { struct { - STAILQ_ENTRY(buf_desc_s) qe; // stailq used by sending and receiving - union { - uint32_t pkt_len; - // shared with the tqe_prev in tailq, happen to be non-zero in the stailq. only - // write to 0 when removed from tailq, set to other will bring invalid pointer. - uint32_t not_receiving; - }; + // the third word, pointer to next desc, is shared with the tailq entry. + sdio_slave_hal_recv_desc_t hal_desc; + // when the forth word is used (not NULL), means the tailq is used, not in the receiving state. + uint32_t not_receiving; + }; + struct { + // first 3 WORDs of this struct is defined by and compatible to the DMA link list format. + uint32_t _reserved0; + uint32_t _reserved1; + TAILQ_ENTRY(recv_desc_s) te; // tailq used to store the registered descriptors. }; }; - void* arg; /* to hold some parameters */ -} buf_desc_t; +} recv_desc_t; -typedef STAILQ_HEAD(bufdesc_stailq_head_s, buf_desc_s) buf_stailq_t; -typedef TAILQ_HEAD(bufdesc_tailq_head_s, buf_desc_s) buf_tailq_t; -typedef struct { - uint8_t* data; - uint8_t* write_ptr; - uint8_t* read_ptr; - uint8_t* free_ptr; - size_t item_size; - size_t size; - portMUX_TYPE write_spinlock; - SemaphoreHandle_t remain_cnt; -} sdio_ringbuf_t; - -#define offset_of(type, field) ((unsigned int)&(((type *)(0))->field)) -typedef enum { - ringbuf_write_ptr = offset_of(sdio_ringbuf_t, write_ptr), - ringbuf_read_ptr = offset_of(sdio_ringbuf_t, read_ptr), - ringbuf_free_ptr = offset_of(sdio_ringbuf_t, free_ptr), -} sdio_ringbuf_pointer_t; - -#define SDIO_RINGBUF_INITIALIZER {.write_spinlock = portMUX_INITIALIZER_UNLOCKED,} +typedef TAILQ_HEAD(recv_tailq_head_s, recv_desc_s) recv_tailq_t; typedef struct { sdio_slave_config_t config; + sdio_slave_context_t *hal; intr_handle_t intr_handle; //allocated interrupt handle /*------- events ---------------*/ union { @@ -178,35 +147,25 @@ typedef struct { portMUX_TYPE reg_spinlock; /*------- sending ---------------*/ //desc in the send_link_list are temporary, taken information and space from the ringbuf, return to ringbuf after use. - send_state_t send_state; - sdio_ringbuf_t sendbuf; + SemaphoreHandle_t remain_cnt; + portMUX_TYPE write_spinlock; QueueHandle_t ret_queue; - buf_desc_t* in_flight; - buf_desc_t* in_flight_end; - buf_desc_t* in_flight_next; /*------- receiving ---------------*/ - buf_stailq_t recv_link_list; // now ready to/already hold data - buf_tailq_t recv_reg_list; // removed from the link list, registered but not used now - volatile buf_desc_t* recv_cur_ret; // next desc to return, NULL if all loaded descriptors are returned - portMUX_TYPE recv_spinlock; + recv_tailq_t recv_reg_list; // removed from the link list, registered but not used now + portMUX_TYPE recv_spinlock; } sdio_context_t; #define CONTEXT_INIT_VAL { \ .intr_handle = NULL, \ + .hal = NULL, \ /*------- events ---------------*/ \ .events = {}, \ .reg_spinlock = portMUX_INITIALIZER_UNLOCKED, \ /*------- sending ---------------*/ \ - .send_state = STATE_IDLE, \ - .sendbuf = SDIO_RINGBUF_INITIALIZER, \ .ret_queue = NULL, \ - .in_flight = NULL, \ - .in_flight_end = NULL, \ - .in_flight_next = NULL, \ + .write_spinlock = portMUX_INITIALIZER_UNLOCKED, \ /*------- receiving ---------------*/ \ - .recv_link_list = STAILQ_HEAD_INITIALIZER(context.recv_link_list), \ .recv_reg_list = TAILQ_HEAD_INITIALIZER(context.recv_reg_list), \ - .recv_cur_ret = NULL, \ .recv_spinlock = portMUX_INITIALIZER_UNLOCKED, \ } @@ -218,173 +177,19 @@ static void sdio_intr_send(void*); static void sdio_intr_recv(void*); static esp_err_t send_flush_data(void); -static esp_err_t send_reset_counter(void); -static void recv_flush_data(void); -static void recv_reset_counter(void); +static esp_err_t recv_flush_data(void); -static esp_err_t send_start(void); -static void send_stop(void); -static esp_err_t recv_start(void); -static void recv_stop(void); +static inline void critical_enter_recv(void); +static inline void critical_exit_recv(void); static void deinit_context(void); - -/**************** Ring buffer for SDIO use *****************/ -typedef enum { - RINGBUF_GET_ONE = 0, - RINGBUF_GET_ALL = 1, -} ringbuf_get_all_t; - -static void sdio_ringbuf_deinit(sdio_ringbuf_t* buf) -{ - if (buf->remain_cnt != NULL) vSemaphoreDelete(buf->remain_cnt); - if (buf->data != NULL) free(buf->data); - *buf = (sdio_ringbuf_t) SDIO_RINGBUF_INITIALIZER; -} - -static esp_err_t sdio_ringbuf_init(sdio_ringbuf_t* buf, int item_size, int item_cnt) -{ - if (buf->data != NULL) { - SDIO_SLAVE_LOGE("sdio_ringbuf_init: already initialized"); - return ESP_ERR_INVALID_STATE; - } - buf->item_size = item_size; - //one item is not used. - buf->size = item_size * (item_cnt+1); - //apply for resources - buf->data = (uint8_t*)heap_caps_malloc(buf->size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); - if (buf->data == NULL) goto no_mem; - buf->remain_cnt = xSemaphoreCreateCounting(item_cnt, item_cnt); - if (buf->remain_cnt == NULL) goto no_mem; - //initialize pointers - buf->write_ptr = buf->data; - buf->read_ptr = buf->data; - buf->free_ptr = buf->data; - return ESP_OK; -no_mem: - sdio_ringbuf_deinit(buf); - return ESP_ERR_NO_MEM; -} - -//calculate a pointer with offset to a original pointer of the specific ringbuffer -static inline uint8_t* sdio_ringbuf_offset_ptr(sdio_ringbuf_t *buf, sdio_ringbuf_pointer_t ptr, uint32_t offset) -{ - uint8_t *buf_ptr = (uint8_t*)*(uint32_t*)(((uint8_t*)buf)+ptr); //get the specific pointer of the buffer - uint8_t *offset_ptr=buf_ptr+offset; - if (offset_ptr >= buf->data + buf->size) offset_ptr -= buf->size; - return offset_ptr; -} - -static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t* buf, esp_err_t (*copy_callback)(uint8_t*, void*), void* arg, TickType_t wait) -{ - portBASE_TYPE ret = xSemaphoreTake(buf->remain_cnt, wait); - if (ret != pdTRUE) return ESP_ERR_TIMEOUT; - - portENTER_CRITICAL(&buf->write_spinlock); - uint8_t* get_ptr = sdio_ringbuf_offset_ptr(buf, ringbuf_write_ptr, buf->item_size); - esp_err_t err = ESP_OK; - if (copy_callback) (*copy_callback)(get_ptr, arg); - if (err != ESP_OK) { - portEXIT_CRITICAL(&buf->write_spinlock); - return err; - } - buf->write_ptr = get_ptr; - portEXIT_CRITICAL(&buf->write_spinlock); - return ESP_OK; -} - -// this ringbuf is a return-before-recv-again strategy -// since this is designed to be called in the ISR, no parallel logic -static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t* buf, uint8_t **start, uint8_t **end, ringbuf_get_all_t get_all, TickType_t wait) -{ - assert(buf->free_ptr == buf->read_ptr); //must return before recv again - assert(wait == 0); //only implement wait = 0 case now - if (start == NULL && end == NULL) return ESP_ERR_INVALID_ARG; // must have a output - if (buf->read_ptr == buf->write_ptr) return ESP_ERR_NOT_FOUND; // no data - - uint8_t *get_start = sdio_ringbuf_offset_ptr(buf, ringbuf_read_ptr, buf->item_size); - - if (get_all != RINGBUF_GET_ONE) { - buf->read_ptr = buf->write_ptr; - } else { - buf->read_ptr = get_start; - } - - if (start != NULL) *start = get_start; - if (end != NULL) *end = buf->read_ptr; - return ESP_OK; -} - -static inline void sdio_ringbuf_return_from_isr(sdio_ringbuf_t* buf, uint8_t *ptr, portBASE_TYPE *yield) -{ - assert(sdio_ringbuf_offset_ptr(buf, ringbuf_free_ptr, buf->item_size) == ptr); - int size = (buf->read_ptr + buf->size - buf->free_ptr)%buf->size; - int count = size/buf->item_size; - assert(count*buf->item_size==size); - buf->free_ptr = buf->read_ptr; - for(int i = 0; i < count; i++) { - portBASE_TYPE ret = xSemaphoreGiveFromISR(buf->remain_cnt, yield); - assert(ret == pdTRUE); - } -} - -static inline void sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr) -{ - assert(sdio_ringbuf_offset_ptr(buf, ringbuf_free_ptr, buf->item_size) == ptr); - int size = (buf->read_ptr + buf->size - buf->free_ptr)%buf->size; - int count = size/buf->item_size; - assert(count*buf->item_size==size); - buf->free_ptr = buf->read_ptr; - for(int i = 0; i < count; i++) { - portBASE_TYPE ret = xSemaphoreGive(buf->remain_cnt); - assert(ret == pdTRUE); - } -} - -static inline uint8_t* sdio_ringbuf_peek_front(sdio_ringbuf_t* buf) -{ - if (buf->read_ptr != buf->write_ptr) { - return sdio_ringbuf_offset_ptr(buf, ringbuf_read_ptr, buf->item_size); - } else { - return NULL; - } -} - -static inline uint8_t* sdio_ringbuf_peek_rear(sdio_ringbuf_t *buf) -{ - return buf->write_ptr; -} - -static inline bool sdio_ringbuf_empty(sdio_ringbuf_t* buf) -{ - return (buf->read_ptr == buf->write_ptr? true : false); -} -/**************** End of Ring buffer for SDIO *****************/ - -static inline void show_queue_item(buf_desc_t *item) -{ - ESP_EARLY_LOGI(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner); - ESP_EARLY_LOGI(TAG, " buf: %p, stqe_next: %p, tqe-prev: %p", item->buf, item->qe.stqe_next, item->te.tqe_prev); -} - -static void __attribute((unused)) dump_queue(buf_stailq_t *queue) -{ - int cnt = 0; - buf_desc_t *item = NULL; - ESP_EARLY_LOGI(TAG, ">>>>> first: %p, last: %p <<<<<", queue->stqh_first, queue->stqh_last); - STAILQ_FOREACH(item, queue, qe) { - cnt++; - show_queue_item(item); - } - ESP_EARLY_LOGI(TAG, "total: %d", cnt); -} - static inline void show_ll(lldesc_t *item) { ESP_EARLY_LOGI(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner); ESP_EARLY_LOGI(TAG, " buf: %p, stqe_next: %p", item->buf, item->qe.stqe_next); } + static void __attribute((unused)) dump_ll(lldesc_t *queue) { int cnt = 0; @@ -410,52 +215,36 @@ static inline void deinit_context(void) vQueueDelete(context.ret_queue); context.ret_queue = NULL; } - sdio_ringbuf_deinit(&context.sendbuf); + if (context.remain_cnt != NULL) vSemaphoreDelete(context.remain_cnt); + free(context.hal->send_desc_queue.data); + context.hal->send_desc_queue.data = NULL; + free(context.hal); + context.hal = NULL; } -esp_err_t link_desc_to_last(uint8_t* desc, void* arg) -{ - STAILQ_NEXT((buf_desc_t*)arg, qe) = (buf_desc_t*)desc; - return ESP_OK; -} - -static esp_err_t init_ringbuf(void) -{ - esp_err_t ret = sdio_ringbuf_init(&context.sendbuf, sizeof(buf_desc_t), context.config.send_queue_size); - if (ret != ESP_OK) return ret; - - esp_err_t rcv_res; - buf_desc_t *first=NULL, *last=NULL; - - //no copy for the first descriptor - ret = sdio_ringbuf_send(&context.sendbuf, NULL, NULL, portMAX_DELAY); - if (ret != ESP_OK) return ret; - - //loop in the ringbuf to link all the desc one after another as a ring - for (int i = 0; i < context.config.send_queue_size+1; i++) { - rcv_res = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&last, NULL, RINGBUF_GET_ONE, 0); - assert (rcv_res == ESP_OK); - ret = sdio_ringbuf_send(&context.sendbuf, link_desc_to_last, last, portMAX_DELAY); - if (ret != ESP_OK) return ret; - sdio_ringbuf_return(&context.sendbuf, (uint8_t*)last); - } - first = NULL; - last = NULL; - //clear the queue - rcv_res = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&first, (uint8_t**)&last, RINGBUF_GET_ALL, 0); - assert (rcv_res == ESP_OK); - assert(first == last); //there should be only one desc remain - sdio_ringbuf_return(&context.sendbuf, (uint8_t*)first); - return ESP_OK; -} - -static esp_err_t init_context(sdio_slave_config_t *config) +static esp_err_t init_context(const sdio_slave_config_t *config) { SDIO_SLAVE_CHECK(*(uint32_t*)&context.config == 0, "sdio slave already initialized", ESP_ERR_INVALID_STATE); - context = (sdio_context_t)CONTEXT_INIT_VAL; context.config = *config; + //initialize and configure the HAL + context.hal = (sdio_slave_context_t*)heap_caps_calloc(sizeof(sdio_slave_context_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + if (context.hal == NULL) goto no_mem; + + context.hal->sending_mode = config->sending_mode; + context.hal->timing = config->timing; + context.hal->send_queue_size = config->send_queue_size; + context.hal->recv_buffer_size = config->recv_buffer_size; + //initialize ringbuffer resources + sdio_ringbuf_t *buf = &(context.hal->send_desc_queue); + //one item is not used. + buf->size = SDIO_SLAVE_SEND_DESC_SIZE * (config->send_queue_size+1); + buf->data = (uint8_t*)heap_caps_malloc(buf->size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + if (buf->data == NULL) goto no_mem; + + sdio_slave_hal_init(context.hal); + // in theory we can queue infinite buffers in the linked list, but for multi-core reason we have to use a queue to // count the finished buffers. context.recv_event = xSemaphoreCreateCounting(UINT32_MAX, 0); @@ -469,8 +258,8 @@ static esp_err_t init_context(sdio_slave_config_t *config) } } - esp_err_t ret = init_ringbuf(); - if (ret != ESP_OK) goto no_mem; + context.remain_cnt = xSemaphoreCreateCounting(context.config.send_queue_size, context.config.send_queue_size); + if (context.remain_cnt == NULL) goto no_mem; context.ret_queue = xQueueCreate(config->send_queue_size, sizeof(void*)); if (context.ret_queue == NULL) goto no_mem; @@ -501,9 +290,6 @@ static void configure_pin(int pin, uint32_t func, bool pullup) static inline esp_err_t sdio_slave_hw_init(sdio_slave_config_t *config) { - //enable interrupts - SLC.slc0_int_ena.val = 0; - //initialize pin const sdio_slave_slot_info_t *slot = &sdio_slave_slot_info[1]; @@ -523,54 +309,7 @@ static inline esp_err_t sdio_slave_hw_init(sdio_slave_config_t *config) periph_module_reset(PERIPH_SDIO_SLAVE_MODULE); periph_module_enable(PERIPH_SDIO_SLAVE_MODULE); - SLC.conf0.slc0_rx_auto_wrback = 1; - SLC.conf0.slc0_token_auto_clr = 0; - SLC.conf0.slc0_rx_loop_test = 0; - SLC.conf0.slc0_tx_loop_test = 0; - - SLC.conf1.slc0_rx_stitch_en = 0; - SLC.conf1.slc0_tx_stitch_en = 0; - SLC.conf1.slc0_len_auto_clr = 0; - - SLC.rx_dscr_conf.slc0_token_no_replace = 1; - HINF.cfg_data1.highspeed_enable = 1; - HINF.cfg_data1.sdio_ver = 0x232; - - switch(config->timing) { - case SDIO_SLAVE_TIMING_PSEND_PSAMPLE: - HOST.conf.frc_sdio20 = 0x1f; - HOST.conf.frc_sdio11 = 0; - HOST.conf.frc_pos_samp = 0x1f; - HOST.conf.frc_neg_samp = 0; - break; - case SDIO_SLAVE_TIMING_PSEND_NSAMPLE: - HOST.conf.frc_sdio20 = 0x1f; - HOST.conf.frc_sdio11 = 0; - HOST.conf.frc_pos_samp = 0; - HOST.conf.frc_neg_samp = 0x1f; - break; - case SDIO_SLAVE_TIMING_NSEND_PSAMPLE: - HOST.conf.frc_sdio20 = 0; - HOST.conf.frc_sdio11 = 0x1f; - HOST.conf.frc_pos_samp = 0x1f; - HOST.conf.frc_neg_samp = 0; - break; - case SDIO_SLAVE_TIMING_NSEND_NSAMPLE: - HOST.conf.frc_sdio20 = 0; - HOST.conf.frc_sdio11 = 0x1f; - HOST.conf.frc_pos_samp = 0; - HOST.conf.frc_neg_samp = 0x1f; - break; - } - - SLC.slc0_int_ena.frhost_bit0 = 1; - SLC.slc0_int_ena.frhost_bit1 = 1; - SLC.slc0_int_ena.frhost_bit2 = 1; - SLC.slc0_int_ena.frhost_bit3 = 1; - SLC.slc0_int_ena.frhost_bit4 = 1; - SLC.slc0_int_ena.frhost_bit5 = 1; - SLC.slc0_int_ena.frhost_bit6 = 1; - SLC.slc0_int_ena.frhost_bit7 = 1; + sdio_slave_hal_hw_init(context.hal); return ESP_OK; } @@ -606,12 +345,13 @@ esp_err_t sdio_slave_initialize(sdio_slave_config_t *config) r = esp_intr_alloc(ETS_SLC0_INTR_SOURCE, flags, sdio_intr, NULL, &intr_handle); if (r != ESP_OK) return r; - r = sdio_slave_hw_init(config); - if (r != ESP_OK) return r; r = init_context(config); if (r != ESP_OK) return r; context.intr_handle = intr_handle; + r = sdio_slave_hw_init(config); + if (r != ESP_OK) return r; + sdio_slave_reset(); return ESP_OK; } @@ -620,18 +360,19 @@ void sdio_slave_deinit(void) { sdio_slave_hw_deinit(); - //unregister all buffers in the queue, and not in the queue - buf_desc_t *temp_desc; - buf_desc_t *desc; + //unregister all buffers registered but returned (not loaded) + recv_desc_t *temp_desc; + recv_desc_t *desc; TAILQ_FOREACH_SAFE(desc, &context.recv_reg_list, te, temp_desc) { TAILQ_REMOVE(&context.recv_reg_list, desc, te); free(desc); } - STAILQ_FOREACH_SAFE(desc, &context.recv_link_list, qe, temp_desc) { - STAILQ_REMOVE(&context.recv_link_list, desc, buf_desc_s, qe); + //unregister all buffers that is loaded and not returned + while (1) { + desc = (recv_desc_t*)sdio_slave_hal_recv_unload_desc(context.hal); + if (desc == NULL) break; free(desc); } - esp_err_t ret = esp_intr_free(context.intr_handle); assert(ret==ESP_OK); context.intr_handle = NULL; @@ -641,45 +382,58 @@ void sdio_slave_deinit(void) esp_err_t sdio_slave_start(void) { esp_err_t ret; - HOST.slc0_int_clr.val = UINT32_MAX;//clear all interrupts - ret = send_start(); + sdio_slave_hostint_t intr = (sdio_slave_hostint_t)UINT32_MAX; + sdio_slave_hal_hostint_clear(context.hal, &intr); + ret = sdio_slave_hal_send_start(context.hal); if (ret != ESP_OK) return ret; - ret = recv_start(); + + critical_enter_recv(); + sdio_slave_hal_recv_start(context.hal); + critical_exit_recv(); + ret = ESP_OK; if (ret != ESP_OK) return ret; - HINF.cfg_data1.sdio_ioready1 = 1; //set IO ready to 1 to allow host to use + + sdio_slave_hal_set_ioready(context.hal, true); return ESP_OK; } esp_err_t sdio_slave_reset(void) { - send_flush_data(); - send_reset_counter(); - recv_flush_data(); - recv_reset_counter(); - return ESP_OK; + esp_err_t err; + err = send_flush_data(); + if (err != ESP_OK) { + return err; + } + + err = sdio_slave_hal_send_reset_counter(context.hal); + if (err != ESP_OK) { + return err; + } + + err = recv_flush_data(); + if (err != ESP_OK) { + return err; + } + + critical_enter_recv(); + sdio_slave_hal_recv_reset_counter(context.hal); + critical_exit_recv(); + err = ESP_OK; + return err; } void sdio_slave_stop(void) { - HINF.cfg_data1.sdio_ioready1 = 0; //set IO ready to 1 to stop host from using - send_stop(); - recv_stop(); + sdio_slave_hal_set_ioready(context.hal, false); + sdio_slave_hal_send_stop(context.hal); + sdio_slave_hal_recv_stop(context.hal); } -#define SDIO_SLAVE_SLC_INT_TX_MASK (SLC_SLC0_TX_ERR_EOF_INT_ST_M | SLC_SLC0_TX_DSCR_EMPTY_INT_ST_M | SLC_SLC0_TX_DSCR_ERR_INT_ST_M | SLC_SLC0_TX_SUC_EOF_INT_ST_M | SLC_SLC0_TX_DONE_INT_ST_M | SLC_SLC0_TX_OVF_INT_ST_M | SLC_SLC0_TX_START_INT_ST_M) -#define SDIO_SLAVE_SLC_INT_RX_MASK (SLC_SLC0_RX_DSCR_ERR_INT_ST_M | SLC_SLC0_RX_EOF_INT_ST_M | SLC_SLC0_RX_DONE_INT_ST_M | SLC_SLC0_RX_UDF_INT_ST_M | SLC_SLC0_RX_START_INT_ST_M) -#define SDIO_SLAVE_SLC_INT_HOST_MASK (SLC_FRHOST_BIT7_INT_ST_M | SLC_FRHOST_BIT6_INT_ST_M | SLC_FRHOST_BIT5_INT_ST_M | SLC_FRHOST_BIT4_INT_ST_M | SLC_FRHOST_BIT3_INT_ST_M | SLC_FRHOST_BIT2_INT_ST_M | SLC_FRHOST_BIT1_INT_ST_M | SLC_FRHOST_BIT0_INT_ST_M) - -//strange but `tx_*` regs for host->slave transfers while `rx_*` regs for slave->host transfers static void sdio_intr(void* arg) { - uint32_t int_val = SLC.slc0_int_st.val; - uint32_t int_raw = SLC.slc0_int_raw.val; - ESP_EARLY_LOGV(TAG, "sdio_intr: %08X(%08X)", int_val, int_raw); - - if (int_val & SDIO_SLAVE_SLC_INT_RX_MASK) sdio_intr_send(arg); - if (int_val & SDIO_SLAVE_SLC_INT_TX_MASK) sdio_intr_recv(arg); - if (int_val & SDIO_SLAVE_SLC_INT_HOST_MASK) sdio_intr_host(arg); + sdio_intr_send(arg); + sdio_intr_recv(arg); + sdio_intr_host(arg); } /*--------------------------------------------------------------------------- @@ -687,10 +441,9 @@ static void sdio_intr(void* arg) *--------------------------------------------------------------------------*/ static void sdio_intr_host(void* arg) { - uint8_t int_val = SLC.slc0_int_st.val & 0xff; - + sdio_slave_ll_slvint_t int_val; + sdio_slave_hal_slvint_fetch_clear(context.hal, &int_val); portBASE_TYPE yield = pdFALSE; - SLC.slc0_int_clr.val = int_val; for(int i = 0; i < 8; i++) { if (BIT(i) & int_val) { if (context.config.event_cb != NULL) (*context.config.event_cb)(i); @@ -706,13 +459,11 @@ esp_err_t sdio_slave_wait_int(int pos, TickType_t wait) return xSemaphoreTake(context.events[pos], wait); } - uint8_t sdio_slave_read_reg(int pos) { if (pos >= 28 && pos <= 31) SDIO_SLAVE_LOGW("%s: interrupt reg, for reference", __FUNCTION__); if (pos < 0 || pos >= 64) SDIO_SLAVE_LOGE("read register address wrong"); - - return *(uint8_t*)(HOST_SLCHOST_CONF_W_REG(pos)); + return sdio_slave_hal_host_get_reg(context.hal, pos); } esp_err_t sdio_slave_write_reg(int pos, uint8_t reg) @@ -725,205 +476,51 @@ esp_err_t sdio_slave_write_reg(int pos, uint8_t reg) SDIO_SLAVE_LOGE("write register address wrong"); return ESP_ERR_INVALID_ARG; } - uint32_t addr = HOST_SLCHOST_CONF_W_REG(pos) & (~3); - uint32_t shift = (pos % 4)*8; portENTER_CRITICAL(&context.reg_spinlock); - int val = *(uint32_t*)addr; - *(uint32_t*)addr = (val & ~(0xff << shift)) | (reg<host transfers. + /* The link list is handled in the app, while counter and pointer processed in ISR. * Driver abuse rx_done bit to invoke ISR. * If driver is stopped, the link list is stopped as well as the ISR invoker. */ -static inline void send_length_write(uint32_t len) -{ - SLC.slc0_len_conf.val = FIELD_TO_VALUE2(SLC_SLC0_LEN_WDATA, len) | FIELD_TO_VALUE2(SLC_SLC0_LEN_WR, 1); - ESP_EARLY_LOGV(TAG, "send_length_write: %d, last_len: %08X", len, HOST.pkt_len.reg_slc0_len); -} - -static inline void send_start_transmission(const void* desc) -{ - //reset to flush previous packets - SLC.conf0.slc0_rx_rst = 1; - SLC.conf0.slc0_rx_rst = 0; - SLC.slc0_rx_link.addr = (uint32_t)desc; - SLC.slc0_rx_link.start = 1; -} - -static inline void send_stop_ll_operation(void) -{ - SLC.slc0_rx_link.stop = 1; -} - -static inline uint32_t send_length_read(void) -{ - return HOST.pkt_len.reg_slc0_len; -} - -DMA_ATTR static const buf_desc_t start_desc = { - .owner = 1, - .buf = (void*)0x3ffbbbbb, //assign a dma-capable pointer other than NULL, which will not be used - .size = 1, - .length = 1, - .eof = 1, -}; - -static inline void send_isr_invoker_enable(void) -{ - //force trigger rx_done interrupt. the interrupt is abused to invoke ISR from the app by the enable bit and never cleared. - send_start_transmission(&start_desc); - //wait for rx_done - while(!SLC.slc0_int_raw.rx_done); - HOST.slc0_int_clr.rx_new_packet = 1; - send_stop_ll_operation(); -} - -static inline void send_isr_invoker_disable(void) -{ - SLC.slc0_int_clr.rx_done = 1; -} - -static inline void send_intr_enable(void) -{ - SLC.slc0_int_ena.rx_eof = 1; - send_isr_invoker_enable(); -} - -static inline void send_intr_disable(void) -{ - send_isr_invoker_disable(); - SLC.slc0_int_ena.rx_eof = 0; -} - -static inline void send_isr_invoke(void) -{ - SLC.slc0_int_ena.rx_done = 1; -} - -static inline send_state_t send_get_state(void) -{ - return context.send_state; -} - -static inline void send_set_state(send_state_t state) -{ - context.send_state = state; -} - -//start hw operation with existing data (if exist) -static esp_err_t send_start(void) -{ - SDIO_SLAVE_CHECK(send_get_state() == STATE_IDLE, - "already started", ESP_ERR_INVALID_STATE); - SLC.slc0_int_clr.rx_eof = 1; - send_set_state(STATE_WAIT_FOR_START); - send_intr_enable(); - return ESP_OK; -} - -//only stop hw operations, no touch to data as well as counter -static void send_stop(void) -{ - SLC.slc0_rx_link.stop = 1; - send_intr_disable(); - - send_set_state(STATE_IDLE); -} - -static inline esp_err_t send_isr_eof(portBASE_TYPE *yield) -{ - // inform app to recycle descs - portBASE_TYPE ret = pdTRUE; - buf_desc_t *desc = context.in_flight; - assert(desc != NULL); - - do { - ESP_EARLY_LOGV(TAG, "end: %x", desc->arg); - ret = xQueueSendFromISR(context.ret_queue, &desc->arg, yield); - assert(ret == pdTRUE); - buf_desc_t* next = STAILQ_NEXT(desc, qe); - desc = next; - } while(desc!=NULL); - STAILQ_NEXT(context.in_flight_end, qe) = context.in_flight_next; - sdio_ringbuf_return_from_isr(&context.sendbuf, (uint8_t*)context.in_flight, yield); - context.in_flight = NULL; - context.in_flight_end = NULL; - // Go to wait for packet state - send_set_state(STATE_WAIT_FOR_START); - return ESP_OK; -} - -static inline esp_err_t send_isr_check_new_pkt(portBASE_TYPE *yield) -{ - esp_err_t ret; - buf_desc_t *start = NULL; - buf_desc_t *end = NULL; - if (context.config.sending_mode == SDIO_SLAVE_SEND_PACKET) { - ret = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&start, (uint8_t**)&end, RINGBUF_GET_ONE, 0); - } else { //stream mode - ret = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&start, (uint8_t**)&end, RINGBUF_GET_ALL, 0); - } - if (ret == ESP_OK) { - context.in_flight = start; - context.in_flight_end = end; - end->eof = 1; - //temporarily break the link ring here, the ring will be re-connected in ``send_isr_eof()``. - context.in_flight_next = STAILQ_NEXT(end, qe); - STAILQ_NEXT(end, qe) = NULL; - } - return ESP_OK; -} - -static inline esp_err_t send_isr_new_packet(void) -{ - // since eof is changed, we have to stop and reset the link list, - // and restart new link list operation - buf_desc_t *const start_desc = context.in_flight; - buf_desc_t *const end_desc = context.in_flight_end; - assert(start_desc != NULL && end_desc != NULL); - - send_stop_ll_operation(); - send_start_transmission(start_desc); - - // update pkt_len register to allow host reading. - send_length_write(end_desc->pkt_len); - - send_set_state(STATE_SENDING); - - ESP_EARLY_LOGD(TAG, "restart new send: %p->%p, pkt_len: %d", start_desc, end_desc, end_desc->pkt_len); - return ESP_OK; -} static void sdio_intr_send(void* arg) { @@ -931,60 +528,51 @@ static void sdio_intr_send(void* arg) portBASE_TYPE yield = pdFALSE; // this interrupt is abused to get ISR invoked by app - if (SLC.slc0_int_st.rx_done) SLC.slc0_int_ena.rx_done = 0; + sdio_slave_hal_send_handle_isr_invoke(context.hal); - // Goto idle state (cur_start=NULL) if transmission done, - // also update sequence and recycle descs. - if (SLC.slc0_int_st.rx_eof) { - SLC.slc0_int_clr.rx_eof = 1; - //check current state - assert(send_get_state() == STATE_SENDING);// context.send_start != NOT_YET && context.send_end != NOT_YET); - send_isr_eof(&yield); + uint32_t returned_cnt; + if (sdio_slave_hal_send_eof_happened(context.hal)) { + portBASE_TYPE ret = pdTRUE; + + esp_err_t err; + while (1) { + void *finished_arg; + err = sdio_slave_hal_send_get_next_finished_arg(context.hal, &finished_arg, &returned_cnt); + if (err != ESP_OK) { + break; + } + + assert(returned_cnt == 0); + ESP_EARLY_LOGV(TAG, "end: %x", finished_arg); + ret = xQueueSendFromISR(context.ret_queue, &finished_arg, &yield); + assert(ret == pdTRUE); + } + //get_next_finished_arg returns the total amount of returned descs. + for(int i = 0; i < returned_cnt; i++) { + portBASE_TYPE ret = xSemaphoreGiveFromISR(context.remain_cnt, &yield); + assert(ret == pdTRUE); + } } - // Go to wait sending state (cur_start!=NULL && cur_end==NULL) if not sending and new packet ready. - // Note we may also enter this state by stopping sending in the app. - if (send_get_state() == STATE_WAIT_FOR_START) { - if (context.in_flight == NULL) send_isr_check_new_pkt(&yield); - // Go to sending state (cur_start and cur_end != NULL) if has packet to send. - if (context.in_flight) send_isr_new_packet(); - } + sdio_slave_hal_send_new_packet_if_exist(context.hal); if (yield) portYIELD_FROM_ISR(); } -esp_err_t send_write_desc(uint8_t* desc, void* arg) -{ - buf_desc_t *new_desc = (buf_desc_t*)arg; - buf_desc_t *tail = (buf_desc_t*)sdio_ringbuf_peek_rear(&context.sendbuf); - new_desc->pkt_len = tail->pkt_len + new_desc->size; - //copy and keep the link - STAILQ_NEXT(new_desc, qe) = STAILQ_NEXT((buf_desc_t*)desc, qe); - - memcpy(desc, new_desc, sizeof(buf_desc_t)); - return ESP_OK; -} - esp_err_t sdio_slave_send_queue(uint8_t* addr, size_t len, void* arg, TickType_t wait) { SDIO_SLAVE_CHECK(len > 0, "len <= 0", ESP_ERR_INVALID_ARG); SDIO_SLAVE_CHECK(esp_ptr_dma_capable(addr) && (uint32_t)addr%4==0, "buffer to send should be DMA capable and 32-bit aligned", ESP_ERR_INVALID_ARG); - buf_desc_t new_desc = { - .size = len, - .length = len, - .buf = addr, - .owner = 1, - // in stream mode, the eof is only appended (in ISR) when new packet is ready to be sent - .eof = (context.config.sending_mode == SDIO_SLAVE_SEND_PACKET?1:0), - .arg = arg, - }; + portBASE_TYPE cnt_ret = xSemaphoreTake(context.remain_cnt, wait); + if (cnt_ret != pdTRUE) return ESP_ERR_TIMEOUT; - esp_err_t ret = sdio_ringbuf_send(&context.sendbuf, send_write_desc, &new_desc, wait); + portENTER_CRITICAL(&context.write_spinlock); + esp_err_t ret = sdio_slave_hal_send_queue(context.hal, addr, len, arg); + portEXIT_CRITICAL(&context.write_spinlock); if (ret != ESP_OK) return ret; - send_isr_invoke(); return ESP_OK; } @@ -1014,89 +602,36 @@ esp_err_t sdio_slave_transmit(uint8_t* addr, size_t len) //clear data but keep counter static esp_err_t send_flush_data(void) { - //only works in idle state / wait to send state - SDIO_SLAVE_CHECK(send_get_state() == STATE_IDLE, - "flush data when transmission started", ESP_ERR_INVALID_STATE); + esp_err_t err; - HOST.slc0_int_clr.rx_new_packet = 1; - - buf_desc_t *last = NULL; - if (context.in_flight) { - buf_desc_t *desc = context.in_flight; - while (desc != NULL) { - xQueueSend(context.ret_queue, &desc->arg, portMAX_DELAY); - last = desc; - desc = STAILQ_NEXT(desc, qe); + while (1) { + void *finished_arg; + uint32_t return_cnt = 0; + err = sdio_slave_hal_send_flush_next_buffer(context.hal, &finished_arg, &return_cnt); + if (err == ESP_OK) { + portBASE_TYPE ret = xQueueSend(context.ret_queue, &finished_arg, portMAX_DELAY); + assert(ret == pdTRUE); + for (int i = 0; i < return_cnt; i++) { + portBASE_TYPE ret = xSemaphoreGive(context.remain_cnt); + assert(ret == pdTRUE); + } + } else { + if (err == ESP_ERR_NOT_FOUND) { + err = ESP_OK; + } + break; } - STAILQ_NEXT(context.in_flight_end, qe) = context.in_flight_next; - sdio_ringbuf_return(&context.sendbuf, (uint8_t*)context.in_flight); - context.in_flight = NULL; - context.in_flight_end = NULL; } - buf_desc_t *head, *tail; - esp_err_t ret = sdio_ringbuf_recv(&context.sendbuf, (uint8_t**)&head, (uint8_t**)&tail, RINGBUF_GET_ALL, 0); - if (ret == ESP_OK) { - buf_desc_t *desc = head; - while (1) { - xQueueSend(context.ret_queue, &desc->arg, portMAX_DELAY); - last = desc; - if (desc == tail) break; - desc = STAILQ_NEXT(desc, qe); - } - sdio_ringbuf_return(&context.sendbuf, (uint8_t*)head); + if (err == ESP_ERR_INVALID_STATE) { + ESP_LOGE(TAG, "flush data when transmission started"); } - - // if in wait to send state, set the sequence number of tail to the value last sent, just as if the packet wait to - // send never queued. - // Go to idle state (cur_end!=NULL and cur_start=NULL) - send_set_state(STATE_IDLE); - - if (last == NULL) last = (buf_desc_t*)sdio_ringbuf_peek_rear(&context.sendbuf); - last->pkt_len = send_length_read(); - return ESP_OK; + return err; } -//clear counter but keep data -static esp_err_t send_reset_counter(void) -{ - SDIO_SLAVE_CHECK(send_get_state() == STATE_IDLE, - "reset counter when transmission started", ESP_ERR_INVALID_STATE); - - send_length_write(0); - - uint32_t last_cnt=0; - buf_desc_t *desc = context.in_flight; - buf_desc_t *last = NULL; - while(desc != NULL) { - last_cnt += desc->length; - desc->pkt_len = last_cnt; - last = desc; - desc = STAILQ_NEXT(desc, qe); - } - // in theory the desc should be the one right next to the last of in_flight, - // but the link of last is NULL, so get the desc from the ringbuf directly. - desc = (buf_desc_t*)sdio_ringbuf_peek_front(&context.sendbuf); - while(desc != NULL) { - last_cnt += desc->length; - desc->pkt_len = last_cnt; - last = desc; - desc = STAILQ_NEXT(desc, qe); - } - if (last == NULL) { - last = (buf_desc_t*)sdio_ringbuf_peek_rear(&context.sendbuf); - last->pkt_len = 0; - } - - return ESP_OK; -} - - /*--------------------------------------------------------------------------- * Recv *--------------------------------------------------------------------------*/ -//strange but the registers for host->slave transfers are really called "tx*". - #define CHECK_HANDLE_IDLE(desc) do { if (desc == NULL || !desc->not_receiving) {\ return ESP_ERR_INVALID_ARG; } } while(0) @@ -1110,144 +645,48 @@ static inline void critical_exit_recv(void) portEXIT_CRITICAL(&context.recv_spinlock); } -static inline void recv_size_inc(void) -{ - // fields wdata and inc_more should be written by the same instruction. - SLC.slc0_token1.val = FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WDATA, 1) | FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_INC_MORE, 1); -} - -static inline void recv_size_reset(void) -{ - SLC.slc0_token1.val = FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WDATA, 0) | FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WR, 1); -} - -static inline buf_desc_t* recv_get_first_empty_buf(void) -{ - buf_stailq_t *const queue = &context.recv_link_list; - buf_desc_t *desc = STAILQ_FIRST(queue); - while(desc && desc->owner == 0) { - desc = STAILQ_NEXT(desc, qe); - } - return desc; -} - -static esp_err_t recv_start(void) -{ - SLC.conf0.slc0_tx_rst = 1; - SLC.conf0.slc0_tx_rst = 0; - - critical_enter_recv(); - buf_desc_t *desc = recv_get_first_empty_buf(); - if (!desc) { - ESP_LOGD(TAG, "recv: restart without desc"); - critical_exit_recv(); - return ESP_OK; // if no buffer loaded, return directly. - } - //the counter is handled when add/flush/reset - SLC.slc0_tx_link.addr = (uint32_t)desc; - SLC.slc0_tx_link.start = 1; - critical_exit_recv(); - - SLC.slc0_int_ena.tx_done = 1; - return ESP_OK; -} - -static void recv_stop(void) -{ - SLC.slc0_tx_link.stop = 1; - SLC.slc0_int_ena.tx_done = 0; -} - -// reset the counter, but keep the data -static void recv_reset_counter(void) -{ - recv_size_reset(); - - critical_enter_recv(); - buf_desc_t *desc = recv_get_first_empty_buf(); - while (desc != NULL) { - assert(desc->owner == 1); - recv_size_inc(); - desc = STAILQ_NEXT(desc, qe); - } - critical_exit_recv(); -} - // remove data, still increase the counter -static void recv_flush_data(void) +static esp_err_t recv_flush_data(void) { - buf_stailq_t *const queue = &context.recv_link_list; - - critical_enter_recv(); while(1) { portBASE_TYPE ret = xSemaphoreTake(context.recv_event, 0); if (ret == pdFALSE) break; - - buf_desc_t *desc = STAILQ_FIRST(queue); - assert (desc != NULL && desc->owner == 0); - STAILQ_REMOVE_HEAD(queue, qe); - desc->owner = 1; - STAILQ_INSERT_TAIL(queue, desc, qe); - recv_size_inc(); - //we only add it to the tail here, without start the DMA nor increase buffer num. + critical_enter_recv(); + sdio_slave_hal_recv_flush_one_buffer(context.hal); + critical_exit_recv(); } - critical_exit_recv(); + return ESP_OK; } static void sdio_intr_recv(void* arg) { portBASE_TYPE yield = 0; - if (SLC.slc0_int_raw.tx_done) { - SLC.slc0_int_clr.tx_done = 1; - while (context.recv_cur_ret && context.recv_cur_ret->owner == 0) { - // This may cause the ``cur_ret`` pointer to be NULL, indicating the list is empty, - // in this case the ``tx_done`` should happen no longer until new desc is appended. - // The app is responsible to place the pointer to the right place again when appending new desc. - portENTER_CRITICAL_ISR(&context.recv_spinlock); - context.recv_cur_ret = STAILQ_NEXT(context.recv_cur_ret, qe); - portEXIT_CRITICAL_ISR(&context.recv_spinlock); + while (sdio_slave_hal_recv_done(context.hal)) { + portENTER_CRITICAL_ISR(&context.recv_spinlock); + bool has_next_item = sdio_slave_hal_recv_has_next_item(context.hal); + portEXIT_CRITICAL_ISR(&context.recv_spinlock); + if (has_next_item) { ESP_EARLY_LOGV(TAG, "intr_recv: Give"); xSemaphoreGiveFromISR(context.recv_event, &yield); - SLC.slc0_int_clr.tx_done = 1; - }; + continue; //check the linked list again skip the interrupt checking + } + // if no more items on the list, go back and check again the interrupt, + // will loop until the interrupt bit is kept cleared. } if (yield) portYIELD_FROM_ISR(); } esp_err_t sdio_slave_recv_load_buf(sdio_slave_buf_handle_t handle) { - buf_desc_t *desc = (buf_desc_t*)handle; + recv_desc_t *desc = (recv_desc_t*)handle; CHECK_HANDLE_IDLE(desc); - - buf_stailq_t *const queue = &context.recv_link_list; + assert(desc->not_receiving); critical_enter_recv(); - assert(desc->not_receiving); TAILQ_REMOVE(&context.recv_reg_list, desc, te); - desc->owner = 1; desc->not_receiving = 0; //manually remove the prev link (by set not_receiving=0), to indicate this is in the queue - - buf_desc_t *const tail = STAILQ_LAST(queue, buf_desc_s, qe); - - STAILQ_INSERT_TAIL(queue, desc, qe); - if (context.recv_cur_ret == NULL) { - context.recv_cur_ret = desc; - } - - if (tail == NULL) { - //no one in the ll, start new ll operation. - SLC.slc0_tx_link.addr = (uint32_t)desc; - SLC.slc0_tx_link.start = 1; - ESP_LOGV(TAG, "recv_load_buf: start new"); - SLC.slc0_int_ena.tx_done = 1; - } else { - //restart former ll operation - SLC.slc0_tx_link.restart = 1; - ESP_LOGV(TAG, "recv_load_buf: restart"); - } + sdio_slave_hal_load_buf(context.hal, &desc->hal_desc); critical_exit_recv(); - recv_size_inc(); - return ESP_OK; } @@ -1255,18 +694,14 @@ sdio_slave_buf_handle_t sdio_slave_recv_register_buf(uint8_t *start) { SDIO_SLAVE_CHECK(esp_ptr_dma_capable(start) && (uint32_t)start%4==0, "buffer to register should be DMA capable and 32-bit aligned", NULL); - buf_desc_t *desc = (buf_desc_t*)heap_caps_malloc(sizeof(buf_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); + recv_desc_t *desc = (recv_desc_t*)heap_caps_malloc(sizeof(recv_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); if (desc == NULL) { SDIO_SLAVE_LOGE("cannot allocate lldesc for new buffer"); return NULL; } //initially in the reg list - *desc = (buf_desc_t) { - .size = context.config.recv_buffer_size, - .buf = start, - //no length required, eof always=0 - }; + sdio_slave_hal_recv_init_desc(context.hal, &desc->hal_desc, start); critical_enter_recv(); TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, te); critical_exit_recv(); @@ -1279,25 +714,22 @@ esp_err_t sdio_slave_recv(sdio_slave_buf_handle_t* handle_ret, uint8_t **out_add portBASE_TYPE ret = xSemaphoreTake(context.recv_event, wait); if (ret == pdFALSE) return ESP_ERR_TIMEOUT; - buf_stailq_t *const queue = &context.recv_link_list; - critical_enter_recv(); //remove from queue, add back to reg list. - buf_desc_t *desc = STAILQ_FIRST(queue); - STAILQ_REMOVE_HEAD(queue, qe); + recv_desc_t *desc = (recv_desc_t*)sdio_slave_hal_recv_unload_desc(context.hal); + assert(desc != NULL && desc->hal_desc.owner == 0); TAILQ_INSERT_TAIL(&context.recv_reg_list, desc, te); critical_exit_recv(); - assert(desc != NULL && desc->owner == 0); *handle_ret = (sdio_slave_buf_handle_t)desc; - if (out_addr) *out_addr = desc->buf; - if (out_len) *out_len = desc->length; + if (out_addr) *out_addr = (uint8_t*)desc->hal_desc.buf; + if (out_len) *out_len = desc->hal_desc.length; return ESP_OK; } esp_err_t sdio_slave_recv_unregister_buf(sdio_slave_buf_handle_t handle) { - buf_desc_t *desc = (buf_desc_t*)handle; + recv_desc_t *desc = (recv_desc_t*)handle; CHECK_HANDLE_IDLE(desc); //in the queue, fail. critical_enter_recv(); @@ -1309,15 +741,9 @@ esp_err_t sdio_slave_recv_unregister_buf(sdio_slave_buf_handle_t handle) uint8_t* sdio_slave_recv_get_buf(sdio_slave_buf_handle_t handle, size_t *len_o) { - buf_desc_t *desc = (buf_desc_t*)handle; if (handle == NULL) return NULL; + recv_desc_t *desc = (recv_desc_t*)handle; - if (len_o!= NULL) *len_o= desc->length; - return desc->buf; -} - -static void __attribute((unused)) sdio_slave_recv_get_loaded_buffer_num(void) -{ - buf_stailq_t *const queue = &context.recv_link_list; - dump_queue(queue); -} + if (len_o!= NULL) *len_o= desc->hal_desc.length; + return (uint8_t*)desc->hal_desc.buf; +} \ No newline at end of file diff --git a/components/soc/CMakeLists.txt b/components/soc/CMakeLists.txt index 5b1ffd7e8c..845cfd047c 100644 --- a/components/soc/CMakeLists.txt +++ b/components/soc/CMakeLists.txt @@ -38,7 +38,9 @@ list(APPEND srcs ) if(IDF_TARGET STREQUAL "esp32") - list(APPEND srcs "src/hal/mcpwm_hal.c") + list(APPEND srcs "src/hal/mcpwm_hal.c" + "src/hal/sdio_slave_hal.c" + ) endif() if(IDF_TARGET STREQUAL "esp32s2beta") diff --git a/components/soc/include/hal/sdio_slave_hal.h b/components/soc/include/hal/sdio_slave_hal.h new file mode 100644 index 0000000000..e95bf045df --- /dev/null +++ b/components/soc/include/hal/sdio_slave_hal.h @@ -0,0 +1,529 @@ +// Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/******************************************************************************* + * NOTICE + * The hal is not public api, don't use in application code. + * See readme.md in soc/include/hal/readme.md + ******************************************************************************/ + +// The HAL layer for SDIO slave (common part) + +// SDIO slave HAL usages: + +/* +Architecture: + +The whole SDIO slave peripheral consists of three parts: the registers (including the interrupt +control and shared registers), a send FIFO, and a receive FIFO. The document +``esp_slave_protocol.rst`` describes the functionality of the peripheral in detail. An SDIO host +will only ever access one of the three parts at any one time, thus the hardware functionality of +the SDIO slave peripheral are completely independent. Likewise, this HAL is organized in such a +fashion as to correspond to the three independent parts. + +The shared registers are quite simple: the slave can directly access them from the internal data +bus, while the host can access them by CMD52/53 with the correct address. As for the interrupts: +when an SDIO host interrupts the SDIO slave peripheral (by writing a command), the corresponding +bit in the interrupt register will be set; when the SDIO slave peripheral needs to interrupt the +host, it write some register to cause the host interrupt bit being set, and the slave hardware +will output the interrupt signal on the DAT1 line. + +For the FIFOs, the peripheral provides counters as registers so that the host can always know whether the slave +is ready to send/receive data. The HAL resets the counters during initialization, and the host should somehow +inform the slave to reset the counters again if it should reboot (or lose the counter value for some reasons). +Then the host can read/write the FIFOs by CMD53 commands according to the counters. + +In order to avoid copying data to/from the FIFOs or memory buffers each time, the HAL layer +contains a descriptor queue (implemented as linked-list) that allows descriptors of memory +buffers to be queued for transmission/reception. Once a buffer is queued, the HAL takes ownership +of the buffer until some "finish" functions successfully return, indicating the +transmission/reception of that buffer is complete. The ISR is invoked multiple times to iterate +through the queued descriptors, and also to signal to the upper layer if a buffer has been +freed. + +The HAL is used as below: + +- Receiving part: + + 1. Call `sdio_slave_hal_recv_start` to start the receiving DMA. + + If there are already buffers loaded, the receiving will start from those buffers first. + + 2. Call `sdio_slave_hal_recv_init_desc` with a `sdio_slave_hal_recv_desc_t` and the buffer address to + associate the descriptor with the buffer. + + The HAL initialize this descriptors with the determined length and maybe some extra data. + + 3. Call `sdio_slave_hal_load_buf` with the initialized descriptor of the buffer to load a + receiving buffer to the HAL. + + When the DMA is started, the descriptors is loaded onto the DMA linked-list, and the + counter of receiving buffers is increased so that the host will know this by the + receiving interrupt. The hardware will automatically go through the linked list and write + data into the buffers loaded on the list. + + 4. (Optional, mandatory only when interrupt enabled) Call `sdio_slave_hal_recv_done` to check + and clear the receiving interrupt bits. + + 5. Call `sdio_slave_hal_recv_has_next_item` to check whether there are finished buffers. + + 6. Call `sdio_slave_hal_recv_unload_desc` for the same times as + `sdio_slave_hal_recv_has_next_item` successfully returns. + + 7. (Optional) Call `sdio_slave_hal_recv_reset_counter` to reset the counter to current loaded + but not used buffers if you want to reset the counter only. This is available only when + the DMA is stopped. + + 8. (Optional) Call `sdio_slave_hal_recv_flush_one_buffer` (recursively) if you want to + discard data of one (or more) buffers and load them again. This is available only when + the DMA is stopped. + + 9. (Optional when deinitialization) Call `sdio_slave_hal_recv_unload_desc` recursively to get + all the buffers loaded to the HAL, no matter they are used or not. Don't do this when the + DMA is not stopped. + +- Sending part: + + The sending driver is slightly different, since we are not using the re-start feature. + (TODO: re-write this part if the stitch mode is released) + + 1. Call `sdio_slave_hal_send_start` to start the sending DMA. + + If there is already any data queued, it will ne ready to be sent to host now. + + 2. Call `sdio_slave_hal_send_queue` to queue the data to send. + + If the interrupt is enabled, the ISR will be invoked. + + 3. (Required if interrupt enabled) Call `` to clear the interrupt bits used by the SW + invoking logic. + + 4. Call `sdio_slave_hal_send_new_packet_if_exist` to check and send new packet (if there is + data queued). + + 5. Call `sdio_slave_hal_send_eof_happened` to check whether the previous packet is done. + + It will also clear the interrupt status bit for this event. + + 6. Call `sdio_slave_hal_send_get_next_finished_arg` recursively to get the arguments for the + finished buffers. + + 7. (Optional when deinitialization) Call `sdio_slave_hal_send_flush_next_buffer` recursively + to get all buffers queued, regardless sent or not. Don't do this when the DMA is not stopped. + + 8. (Optional) Call `sdio_slave_hal_send_reset_counter` to reset the counter to current loaded + but not sent buffers if you want to reset the counter only. Don't do this when the DMA is not + stopped. + + Note a counter should be used when performing step 2 and 6, to make sure that the queue size + is enough. + +- Host part: + + 1. Call `sdio_slave_hal_hostint_set_ena` and `sdio_slave_hal_hostint_get_ena` to + enable/disable the interrupt sent to master. Note that the host can also modify the same + registers at the same time. Try to avoid using them outside the initialization process. + + 2. Call `sdio_slave_hal_hostint_send` and `sdio_slave_hal_hostint_clear` to trigger general + purpose interrupts or cancel all kinds of interrupts send to the host. These interrupts are + set/cleared in a concurrent-safe way, so the slave can call these functions safely. + + 3. Call `sdio_slave_hal_slvint_fetch_clear` to fetch the general purpose interrupts sent by + the host to the slave. These interrupts will also be cleared after the calls. + + 4. Call `sdio_slave_hal_host_get_reg` and `sdio_slave_hal_host_set_reg` to read/write the + general purpose shared between the host and slave. Note that these registers are also not + concurrent-safe. Try not to write to the same register from two directions at the same time. +*/ + +#pragma once +#include +#include "soc/lldesc.h" +#include "hal/sdio_slave_types.h" +#include "hal/sdio_slave_ll.h" + +/// Space used for each sending descriptor. Should initialize the sendbuf accoring to this size. +#define SDIO_SLAVE_SEND_DESC_SIZE sizeof(sdio_slave_hal_send_desc_t) + + +/// Status of the sending part +typedef enum { + STATE_IDLE = 1, + STATE_WAIT_FOR_START = 2, + STATE_SENDING = 3, + STATE_GETTING_RESULT = 4, + STATE_GETTING_UNSENT_DESC = 5, +} send_state_t; + +typedef struct { + uint8_t* data; ///< Address of the buffer + size_t size; ///< Size of the buffer, but can only queue (size/SDIO_SLAVE_SEND_DESC_SIZE)-1 descriptors + uint8_t* write_ptr; + uint8_t* read_ptr; + uint8_t* free_ptr; +} sdio_ringbuf_t; + +// Append two extra words to be used by the HAL. +// Should Initialize the member `data` of `send_desc_queue` of the HAL context +// with size of this desc * N. + +/// DMA descriptor with extra fields +typedef struct sdio_slave_hal_send_desc_s { + lldesc_t dma_desc; ///< Used by Hardware, has pointer linking to next desc + uint32_t pkt_len; ///< Accumulated length till this descriptor + void* arg; ///< Holding arguments indicating this buffer */ +} sdio_slave_hal_send_desc_t; + +/// Descriptor used by the receiving part, call `sdio_slave_hal_recv_init_desc` +/// to initialize it before use. +typedef lldesc_t sdio_slave_hal_recv_desc_t; +#define sdio_slave_hal_recv_desc_s lldesc_s +typedef STAILQ_HEAD(recv_stailq_head_s, sdio_slave_hal_recv_desc_s) sdio_slave_hal_recv_stailq_t; + + +/** HAL context structure. Call `sdio_slave_hal_init` to initialize it and + * configure required members before actually use the HAL. + */ +typedef struct { + /// Hardware registers for this SDIO slave peripheral, configured by + /// `sdio_slave_hal_init` + struct { + slc_dev_t* slc; + host_dev_t* host; + hinf_dev_t* hinf; + }; + sdio_slave_sending_mode_t sending_mode; /**< Sending mode, should be manually configured before using the HAL. + * see `sdio_slave_sending_mode_t`. + */ + sdio_slave_timing_t timing; /**< Timing mode (launch edge and latch edge settings). Should be manually + * configured before using the HAL. `SDIO_SLAVE_TIMING_PSEND_PSAMPLE` is + * recommended by default. + */ + int send_queue_size; /**< Max buffers that can be queued before sending. Should be manually + * configured before using the HAL. + */ + size_t recv_buffer_size; /**< The size of each buffer. The host and slave should share a + * pre-negotiated value. Should be manually configured before using + * the HAL. + */ + sdio_ringbuf_t send_desc_queue; /**< The ring buffer used to hold queued descriptors. Should be manually + * initialized before using the HAL. + */ + //Internal status, no need to touch. + send_state_t send_state; // Current state of sending part. + uint32_t tail_pkt_len; // The accumulated send length of the tail packet. + sdio_slave_hal_send_desc_t* in_flight_head; // The head of linked list in-flight. + sdio_slave_hal_send_desc_t* in_flight_end; // The end of linked list in-flight. + sdio_slave_hal_send_desc_t* in_flight_next; // The header of linked list to be sent next time. + sdio_slave_hal_send_desc_t* returned_desc; // The last returned descriptor + + sdio_slave_hal_recv_stailq_t recv_link_list; // Linked list of buffers ready to hold data and the buffers already hold data. + volatile sdio_slave_hal_recv_desc_t* recv_cur_ret; // Next desc to return, NULL if all loaded descriptors are returned. +} sdio_slave_context_t ; + +/** + * Initialize the HAL, should provide buffers to the context and configure the + * members before this funciton is called. + * + * @param hal Context of the HAL layer. + */ +void sdio_slave_hal_init(sdio_slave_context_t *hal); + +/** + * Initialize the SDIO slave peripheral hardware. + * + * @param hal Context of the HAL layer. + */ +void sdio_slave_hal_hw_init(sdio_slave_context_t *hal); + +/** + * Set the IO ready for host to read. + * + * @param hal Context of the HAL layer. + * @param ready true to tell the host the slave is ready, otherwise false. + */ +void sdio_slave_hal_set_ioready(sdio_slave_context_t *hal, bool ready); + +/*--------------------------------------------------------------------------- + * Send + *--------------------------------------------------------------------------*/ + +/** + * The hardware sending DMA starts. If there is existing data, send them. + * + * @param hal Context of the HAL layer. + */ +esp_err_t sdio_slave_hal_send_start(sdio_slave_context_t *hal); + +/** + * Stops hardware sending DMA. + * + * @note The data in the queue, as well as the counter are not touched. + * @param hal Context of the HAL layer. + */ +void sdio_slave_hal_send_stop(sdio_slave_context_t *hal); + +/** + * Put some data into the sending queue. + * + * @note The caller should keeps the buffer, until the `arg` is returned by + * `sdio_slave_hal_send_get_next_finished_arg`. + * @note The caller should count to ensure there is enough space in the queue. + * The initial queue size is sizeof(sendbuf.data)/sizeof(sdio_slave_hal_send_desc_t)-1, + * Will decrease by one when this function successfully returns. + * Released only by `sdio_slave_hal_send_get_next_finished_arg` or + * `sdio_slave_hal_send_flush_next_buffer`. + * + * @note The HAL is not thread-safe. The caller should use a spinlock to ensure + * the `sdio_slave_hal_send_queue` and ... are not called at the same time. + * + * @param hal Context of the HAL layer. + * @param addr Address of data in the memory to send. + * @param len Length of data to send. + * @param arg Argument indicating this sending. + * @return Always ESP_OK. + */ +esp_err_t sdio_slave_hal_send_queue(sdio_slave_context_t *hal, uint8_t *addr, size_t len, void *arg); + +/** + * The ISR should call this, to handle the SW invoking event. + * @param hal Context of the HAL layer. + */ +void sdio_slave_hal_send_handle_isr_invoke(sdio_slave_context_t *hal); + +/** + * Check whether there is no in-flight transactions, and send new packet if there + * is new packets queued. + * + * @param hal Context of the HAL layer. + * @return + * - ESP_OK: The DMA starts to send a new packet. + * - ESP_ERR_NOT_FOUND: No packet waiting to be sent. + * - ESP_ERR_INVALID_STATE: There is packet in-flight. + */ +esp_err_t sdio_slave_hal_send_new_packet_if_exist(sdio_slave_context_t *hal); + +/** + * Check whether the sending EOF has happened and clear the interrupt. + * + * Call `sdio_slave_hal_send_get_next_finished_arg` recursively to retrieve arguments of finished + * buffers. + * + * @param hal Context of the HAL layer. + * @return true if happened, otherwise false. + */ +bool sdio_slave_hal_send_eof_happened(sdio_slave_context_t *hal); + +/** + * Get the arguments of finished packets. Call recursively until all finished + * arguments are all retrieved. + * + * @param hal Context of the HAL layer. + * @param out_arg Output argument of the finished buffer. + * @param out_returned_cnt Released queue size to be queued again. + * @return + * - ESP_OK: if one argument retrieved. + * - ESP_ERR_NOT_FOUND: All the arguments of the finished buffers are retrieved. + */ +esp_err_t sdio_slave_hal_send_get_next_finished_arg(sdio_slave_context_t *hal, void **out_arg, uint32_t* out_returned_cnt); + +/** + * Flush one buffer in the queue, no matter sent, canceled or not sent yet. + * + * Call recursively to clear the whole queue before deinitialization. + * + * @note Only call when the DMA is stopped! + * @param hal Context of the HAL layer. + * @param out_arg Argument indiciating the buffer to send + * @param out_return_cnt Space in the queue released after this descriptor is flushed. + * @return + * - ESP_ERR_INVALID_STATE: This function call be called only when the DMA is stopped. + * - ESP_ERR_NOT_FOUND: if no buffer in the queue + * - ESP_OK: if a buffer is successfully flushed and returned. + */ +esp_err_t sdio_slave_hal_send_flush_next_buffer(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt); + +/** + * Walk through all the unsent buffers and reset the counter to the accumulated length of them. The data will be kept. + * + * @note Only call when the DMA is stopped! + * @param hal Context of the HAL layer. + * @return + * - ESP_ERR_INVALID_STATE: this function call be called only when the DMA is stopped + * - ESP_OK: if success + */ +esp_err_t sdio_slave_hal_send_reset_counter(sdio_slave_context_t *hal); + + +/*--------------------------------------------------------------------------- + * Receive + *--------------------------------------------------------------------------*/ +/** + * Start the receiving DMA. + * + * @note If there are already some buffers loaded, will receive from them first. + * @param hal Context of the HAL layer. + */ +void sdio_slave_hal_recv_start(sdio_slave_context_t *hal); + +/** + * Stop the receiving DMA. + * + * @note Data and the counter will not be touched. You can still call + * `sdio_slave_hal_recv_has_next_item` to get the received buffer. + * And unused buffers loaded to the HAL will still be in the `loaded` + * state in the HAL, until returned by `sdio_slave_hal_recv_unload_desc`. + * @param hal Context of the HAL layer. + */ +void sdio_slave_hal_recv_stop(sdio_slave_context_t* hal); + +/** + * Associate the buffer to the descriptor given. The descriptor may also be initialized with some + * other data. + * + * @param hal Context of the HAL layer. + * @param desc Descriptor to associate with the buffer + * @param start Start address of the buffer + */ +void sdio_slave_hal_recv_init_desc(sdio_slave_context_t *hal, sdio_slave_hal_recv_desc_t *desc, uint8_t *start); + +/** + * Load the buffer to the HAL to be used to receive data. + * + * @note Loaded buffers will be returned to the upper layer only when: + * 1. Returned by `sdio_slave_hal_recv_has_next_item` when receiving to that buffer successfully + * done. + * 2. Returned by `sdio_slave_hal_recv_unload_desc` unconditionally. + * @param hal Context of the HAL layer. + * @param desc Descriptor to load to the HAL to receive. + */ +void sdio_slave_hal_load_buf(sdio_slave_context_t *hal, sdio_slave_hal_recv_desc_t *desc); + +/** + * Check and clear the interrupt indicating a buffer has finished receiving. + * + * @param hal Context of the HAL layer. + * @return true if interrupt triggered, otherwise false. + */ +bool sdio_slave_hal_recv_done(sdio_slave_context_t* hal); + +/** + * Call this function recursively to check whether there is any buffer that has + * finished receiving. + * + * Will walk through the linked list to find a newer finished buffer. For each successful return, + * it means there is one finished buffer. You can one by `sdio_slave_hal_recv_unload_desc`. You can + * also call `sdio_slave_hal_recv_has_next_item` several times continuously before you call the + * `sdio_slave_hal_recv_unload_desc` for the same times. + * + * @param hal Context of the HAL layer. + * @return true if there is + */ +bool sdio_slave_hal_recv_has_next_item(sdio_slave_context_t* hal); + +/** + * Unconditionally remove and return the first descriptor loaded to the HAL. + * + * Unless during de-initialization, `sdio_slave_hal_recv_has_next_item` should have succeed for the + * same times as this function is called, to ensure the returned descriptor has finished its + * receiving job. + * + * @param hal Context of the HAL layer. + * @return The removed descriptor, NULL means the linked-list is empty. + */ +sdio_slave_hal_recv_desc_t *sdio_slave_hal_recv_unload_desc(sdio_slave_context_t *hal); + +/** + * Walk through all the unused buffers and reset the counter to the number of + * them. + * + * @note Only call when the DMA is stopped! + * @param hal Context of the HAL layer. + */ +void sdio_slave_hal_recv_reset_counter(sdio_slave_context_t *hal); + +/** + * Walk through all the used buffers, clear the finished flag and appended them + * back to the end of the unused list, waiting to receive then. + * + * @note You will lose all the received data in the buffer. + * @note Only call when the DMA is stopped! + * @param hal Context of the HAL layer. + */ +void sdio_slave_hal_recv_flush_one_buffer(sdio_slave_context_t *hal); + + +/*--------------------------------------------------------------------------- + * Host + *--------------------------------------------------------------------------*/ + +/** + * Enable some of the interrupts for the host. + * + * @note May have concurrency issue wit the host or other tasks, suggest only use it during + * initialization. + * @param hal Context of the HAL layer. + * @param mask Bitwise mask for the interrupts to enable. + */ +void sdio_slave_hal_hostint_set_ena(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask); + +/** + * Get the enabled interrupts. + * + * @param hal Context of the HAL layer. + * @param out_int_mask Output of the enabled interrupts + */ +void sdio_slave_hal_hostint_get_ena(sdio_slave_context_t *hal, sdio_slave_hostint_t *out_int_mask); + +/** + * Send general purpose interrupt (slave send to host). + * @param hal Context of the HAL layer. + * @param mask Interrupts to send, only `SDIO_SLAVE_HOSTINT_BIT*` are allowed. + */ +void sdio_slave_hal_hostint_send(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask); + +/** + * Cleared the specified interrupts for the host. + * + * @param hal Context of the HAL layer. + * @param mask Interrupts to clear. + */ +void sdio_slave_hal_hostint_clear(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask); + + +/** + * Fetch the interrupt (host send to slave) status bits and clear all of them. + * @param hal Context of the HAL layer. + * @param out_int_mask Output interrupt status + */ +void sdio_slave_hal_slvint_fetch_clear(sdio_slave_context_t *hal, sdio_slave_ll_slvint_t *out_int_mask); + +/** + * Get the value of a shared general purpose register. + * + * @param hal Context of the HAL layer. + * @param pos Position of the register, 4 bytes share a word. 0-63 except 24-27. + * @return The register value. + */ +uint8_t sdio_slave_hal_host_get_reg(sdio_slave_context_t *hal, int pos); + +/** + * Set the value of shared general purpose register. + * + * @param hal Context of the HAL layer. + * @param pos Position of the register, 4 bytes share a word. 0-63 except 24-27. + * @param reg Value to set. + */ +void sdio_slave_hal_host_set_reg(sdio_slave_context_t *hal, int pos, uint8_t reg); + diff --git a/components/soc/include/hal/sdio_slave_ll.h b/components/soc/include/hal/sdio_slave_ll.h new file mode 100644 index 0000000000..129f288cad --- /dev/null +++ b/components/soc/include/hal/sdio_slave_ll.h @@ -0,0 +1,482 @@ +// Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/******************************************************************************* + * NOTICE + * The hal is not public api, don't use in application code. + * See readme.md in soc/include/hal/readme.md + ******************************************************************************/ + +// The LL layer for ESP32 SDIO slave register operations +// It's strange but `tx_*` regs for host->slave transfers while `rx_*` regs for slave->host transfers +// To reduce ambiguity, we call (host->slave, tx) transfers receiving and (slave->host, rx) transfers receiving + +#pragma once + +#include "hal/sdio_slave_hal.h" +#include "soc/slc_struct.h" +#include "soc/slc_reg.h" +#include "soc/host_struct.h" +#include "soc/host_reg.h" +#include "soc/hinf_struct.h" +#include "soc/lldesc.h" + +/// Get address of the only SLC registers for ESP32 +#define sdio_slave_ll_get_slc(ID) (&SLC) +/// Get address of the only HOST registers for ESP32 +#define sdio_slave_ll_get_host(ID) (&HOST) +/// Get address of the only HINF registers for ESP32 +#define sdio_slave_ll_get_hinf(ID) (&HINF) + + +/// Mask of general purpose interrupts sending from the host. +typedef enum { + SDIO_SLAVE_LL_SLVINT_0 = BIT(0), ///< General purpose interrupt bit 0. + SDIO_SLAVE_LL_SLVINT_1 = BIT(1), + SDIO_SLAVE_LL_SLVINT_2 = BIT(2), + SDIO_SLAVE_LL_SLVINT_3 = BIT(3), + SDIO_SLAVE_LL_SLVINT_4 = BIT(4), + SDIO_SLAVE_LL_SLVINT_5 = BIT(5), + SDIO_SLAVE_LL_SLVINT_6 = BIT(6), + SDIO_SLAVE_LL_SLVINT_7 = BIT(7), +} sdio_slave_ll_slvint_t; + +/** + * Initialize the hardware. + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_init(slc_dev_t *slc) +{ + slc->slc0_int_ena.val = 0; + + slc->conf0.slc0_rx_auto_wrback = 1; + slc->conf0.slc0_token_auto_clr = 0; + slc->conf0.slc0_rx_loop_test = 0; + slc->conf0.slc0_tx_loop_test = 0; + + slc->conf1.slc0_rx_stitch_en = 0; + slc->conf1.slc0_tx_stitch_en = 0; + slc->conf1.slc0_len_auto_clr = 0; + + slc->rx_dscr_conf.slc0_token_no_replace = 1; +} + +/** + * Set the timing for the communication + * + * @param host Address of the host registers + * @param timing Timing configuration to set + */ +static inline void sdio_slave_ll_set_timing(host_dev_t *host, sdio_slave_timing_t timing) +{ + switch(timing) { + case SDIO_SLAVE_TIMING_PSEND_PSAMPLE: + host->conf.frc_sdio20 = 0x1f; + host->conf.frc_sdio11 = 0; + host->conf.frc_pos_samp = 0x1f; + host->conf.frc_neg_samp = 0; + break; + case SDIO_SLAVE_TIMING_PSEND_NSAMPLE: + host->conf.frc_sdio20 = 0x1f; + host->conf.frc_sdio11 = 0; + host->conf.frc_pos_samp = 0; + host->conf.frc_neg_samp = 0x1f; + break; + case SDIO_SLAVE_TIMING_NSEND_PSAMPLE: + host->conf.frc_sdio20 = 0; + host->conf.frc_sdio11 = 0x1f; + host->conf.frc_pos_samp = 0x1f; + host->conf.frc_neg_samp = 0; + break; + case SDIO_SLAVE_TIMING_NSEND_NSAMPLE: + host->conf.frc_sdio20 = 0; + host->conf.frc_sdio11 = 0x1f; + host->conf.frc_pos_samp = 0; + host->conf.frc_neg_samp = 0x1f; + break; + } +} + +/** + * Set the HS supported bit to be read by the host. + * + * @param hinf Address of the hinf registers + * @param hs true if supported, otherwise false. + */ +static inline void sdio_slave_ll_enable_hs(hinf_dev_t *hinf, bool hs) +{ + if (hs) { + hinf->cfg_data1.sdio_ver = 0x232; + hinf->cfg_data1.highspeed_enable = 1; + } +} + +/** + * Set the IO Ready bit to be read by the host. + * + * @param hinf Address of the hinf registers + * @param ready true if ready, otherwise false. + */ +static inline void sdio_slave_ll_set_ioready(hinf_dev_t *hinf, bool ready) +{ + hinf->cfg_data1.sdio_ioready1 = (ready ? 1 : 0); //set IO ready to 1 to stop host from using +} + +/*--------------------------------------------------------------------------- + * Send + *--------------------------------------------------------------------------*/ +/** + * Reset the sending DMA. + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_send_reset(slc_dev_t *slc) +{ + //reset to flush previous packets + slc->conf0.slc0_rx_rst = 1; + slc->conf0.slc0_rx_rst = 0; +} + +/** + * Start the sending DMA with the given descriptor. + * + * @param slc Address of the SLC registers + * @param desc Descriptor to send + */ +static inline void sdio_slave_ll_send_start(slc_dev_t *slc, const lldesc_t *desc) +{ + slc->slc0_rx_link.addr = (uint32_t)desc; + slc->slc0_rx_link.start = 1; +} + +/** + * Write the PKT_LEN register to be written by the host to a certain value. + * + * @param slc Address of the SLC registers + * @param len Length to write + */ +static inline void sdio_slave_ll_send_write_len(slc_dev_t *slc, uint32_t len) +{ + slc->slc0_len_conf.val = FIELD_TO_VALUE2(SLC_SLC0_LEN_WDATA, len) | FIELD_TO_VALUE2(SLC_SLC0_LEN_WR, 1); +} + +/** + * Read the value of PKT_LEN register. The register may keep the same until read + * by the host. + * + * @param host Address of the host registers + * @return The value of PKT_LEN register. + */ +static inline uint32_t sdio_slave_ll_send_read_len(host_dev_t *host) +{ + return host->pkt_len.reg_slc0_len; +} + +/** + * Enable the rx_done interrupt. (sending) + * + * @param slc Address of the SLC registers + * @param ena true if enable, otherwise false. + */ +static inline void sdio_slave_ll_send_part_done_intr_ena(slc_dev_t *slc, bool ena) +{ + slc->slc0_int_ena.rx_done = (ena ? 1 : 0); +} + +/** + * Clear the rx_done interrupt. (sending) + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_send_part_done_clear(slc_dev_t *slc) +{ + slc->slc0_int_clr.rx_done = 1; +} + +/** + * Check whether the hardware is ready for the SW to use rx_done to invoke + * the ISR. + * + * @param slc Address of the SLC registers + * @return true if ready, otherwise false. + */ +static inline bool sdio_slave_ll_send_invoker_ready(slc_dev_t *slc) +{ + return slc->slc0_int_raw.rx_done; +} + +/** + * Stop the sending DMA. + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_send_stop(slc_dev_t *slc) +{ + slc->slc0_rx_link.stop = 1; +} + +/** + * Enable the sending interrupt (rx_eof). + * + * @param slc Address of the SLC registers + * @param ena true to enable, false to disable + */ +static inline void sdio_slave_ll_send_intr_ena(slc_dev_t *slc, bool ena) +{ + slc->slc0_int_ena.rx_eof = (ena? 1: 0); +} + +/** + * Clear the sending interrupt (rx_eof). + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_send_intr_clr(slc_dev_t *slc) +{ + slc->slc0_int_clr.rx_eof = 1; +} + +/** + * Check whether the sending is done. + * + * @param slc Address of the SLC registers + * @return true if done, otherwise false + */ +static inline bool sdio_slave_ll_send_done(slc_dev_t *slc) +{ + return slc->slc0_int_st.rx_eof != 0; +} + +/** + * Clear the host interrupt indicating the slave having packet to be read. + * + * @param host Address of the host registers + */ +static inline void sdio_slave_ll_send_hostint_clr(host_dev_t *host) +{ + host->slc0_int_clr.rx_new_packet = 1; +} + +/*--------------------------------------------------------------------------- + * Receive + *--------------------------------------------------------------------------*/ +/** + * Enable the receiving interrupt. + * + * @param slc Address of the SLC registers + * @param ena + */ +static inline void sdio_slave_ll_recv_intr_ena(slc_dev_t *slc, bool ena) +{ + slc->slc0_int_ena.tx_done = (ena ? 1 : 0); +} + +/** + * Start receiving DMA with the given descriptor. + * + * @param slc Address of the SLC registers + * @param desc Descriptor of the receiving buffer. + */ +static inline void sdio_slave_ll_recv_start(slc_dev_t *slc, lldesc_t *desc) +{ + slc->slc0_tx_link.addr = (uint32_t)desc; + slc->slc0_tx_link.start = 1; +} + +/** + * Increase the receiving buffer counter by 1. + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_recv_size_inc(slc_dev_t *slc) +{ + // fields wdata and inc_more should be written by the same instruction. + slc->slc0_token1.val = FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WDATA, 1) | FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_INC_MORE, 1); +} + +/** + * Reset the receiving buffer. + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_recv_size_reset(slc_dev_t *slc) +{ + slc->slc0_token1.val = FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WDATA, 0) | FIELD_TO_VALUE2(SLC_SLC0_TOKEN1_WR, 1); +} + +/** + * Check whether there is a receiving finished event. + * + * @param slc Address of the SLC registers + * @return + */ +static inline bool sdio_slave_ll_recv_done(slc_dev_t *slc) +{ + return slc->slc0_int_raw.tx_done != 0; +} + +/** + * Clear the receiving finished interrupt. + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_recv_done_clear(slc_dev_t *slc) +{ + slc->slc0_int_clr.tx_done = 1; +} + +/** + * Restart the DMA. Call after you modified the next pointer of the tail descriptor to the appended + * descriptor. + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_recv_restart(slc_dev_t *slc) +{ + slc->slc0_tx_link.restart = 1; +} + +/** + * Reset the receiving DMA. + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_recv_reset(slc_dev_t *slc) +{ + slc->conf0.slc0_tx_rst = 1; + slc->conf0.slc0_tx_rst = 0; +} + +/** + * Stop the receiving DMA. + * + * @param slc Address of the SLC registers + */ +static inline void sdio_slave_ll_recv_stop(slc_dev_t *slc) +{ + slc->slc0_tx_link.stop = 1; +} + +/*--------------------------------------------------------------------------- + * Host + *--------------------------------------------------------------------------*/ +/** + * Get the address of the shared general purpose register. Internal. + * + * @param host Address of the host registers + * @param pos Position of the register, 0-63 except 24-27. + * @return address of the register. + */ +static inline intptr_t sdio_slave_ll_host_get_w_reg(host_dev_t* host, int pos) +{ + return (intptr_t )&(host->conf_w0) + pos + (pos>23?4:0) + (pos>31?12:0); +} + +/** + * Get the value of the shared general purpose register. + * + * @param host Address of the host registers + * @param pos Position of the register, 0-63, except 24-27. + * @return value of the register. + */ +static inline uint8_t sdio_slave_ll_host_get_reg(host_dev_t *host, int pos) +{ + return *(uint8_t*)sdio_slave_ll_host_get_w_reg(host, pos); +} + +/** + * Set the value of the shared general purpose register. + * + * @param host Address of the host registers + * @param pos Position of the register, 0-63, except 24-27. + * @param reg Value to set. + */ +static inline void sdio_slave_ll_host_set_reg(host_dev_t* host, int pos, uint8_t reg) +{ + uint32_t* addr = (uint32_t*)(sdio_slave_ll_host_get_w_reg(host, pos) & (~3)); + uint32_t shift = (pos % 4) * 8; + *addr &= ~(0xff << shift); + *addr |= ((uint32_t)reg << shift); +} + +/** + * Get the interrupt enable bits for the host. + * + * @param host Address of the host registers + * @return Enabled interrupts + */ +static inline sdio_slave_hostint_t sdio_slave_ll_host_get_intena(host_dev_t* host) +{ + return host->slc0_func1_int_ena.val; +} + +/** + * Set the interrupt enable bits for the host. + * + * @param host Address of the host registers + * @param mask Mask of interrupts to enable + */ +static inline void sdio_slave_ll_host_set_intena(host_dev_t *host, const sdio_slave_hostint_t *mask) +{ + host->slc0_func1_int_ena.val = (*mask); +} + +/** + * Clear the interrupt bits for the host. + * @param host Address of the host registers + * @param mask Mask of interrupts to clear. + */ +static inline void sdio_slave_ll_host_intr_clear(host_dev_t* host, const sdio_slave_hostint_t *mask) +{ + host->slc0_int_clr.val = (*mask); +} + +/** + * Send general purpose interrupts to the host. + * @param slc Address of the SLC registers + * @param mask Mask of interrupts to seend to host + */ +static inline void sdio_slave_ll_host_send_int(slc_dev_t *slc, const sdio_slave_hostint_t *mask) +{ + //use registers in SLC to trigger, rather than write HOST registers directly + //other interrupts than tohost interrupts are not supported yet + slc->intvec_tohost.slc0_intvec = (*mask); +} + +/** + * Enable some of the slave interrups (send from host) + * + * @param slc Address of the SLC registers + * @param mask Mask of interrupts to enable, all those set to 0 will be disabled. + */ +static inline void sdio_slave_ll_slvint_set_ena(slc_dev_t *slc, const sdio_slave_ll_slvint_t *mask) +{ + //other interrupts are not enabled + slc->slc0_int_ena.val = (slc->slc0_int_ena.val & (~0xff)) | ((*mask) & 0xff); +} + +/** + * Fetch the slave interrupts (send from host) and clear them. + * + * @param slc Address of the SLC registers + * @param out_slv_int Output of the slave interrupts fetched and cleared. + */ +static inline void sdio_slave_ll_slvint_fetch_clear(slc_dev_t *slc, sdio_slave_ll_slvint_t *out_slv_int) +{ + sdio_slave_ll_slvint_t slv_int = slc->slc0_int_st.val & 0xff; + *out_slv_int = slv_int; + slc->slc0_int_clr.val = slv_int; +} + diff --git a/components/soc/include/hal/sdio_slave_types.h b/components/soc/include/hal/sdio_slave_types.h new file mode 100644 index 0000000000..fd3e4050e4 --- /dev/null +++ b/components/soc/include/hal/sdio_slave_types.h @@ -0,0 +1,47 @@ +// Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "soc/soc.h" + +/// Mask of interrupts sending to the host. +typedef enum { + SDIO_SLAVE_HOSTINT_BIT0 = BIT(0), ///< General purpose interrupt bit 0. + SDIO_SLAVE_HOSTINT_BIT1 = BIT(1), + SDIO_SLAVE_HOSTINT_BIT2 = BIT(2), + SDIO_SLAVE_HOSTINT_BIT3 = BIT(3), + SDIO_SLAVE_HOSTINT_BIT4 = BIT(4), + SDIO_SLAVE_HOSTINT_BIT5 = BIT(5), + SDIO_SLAVE_HOSTINT_BIT6 = BIT(6), + SDIO_SLAVE_HOSTINT_BIT7 = BIT(7), + SDIO_SLAVE_HOSTINT_SEND_NEW_PACKET = BIT(23), ///< New packet available +} sdio_slave_hostint_t; + + +/// Timing of SDIO slave +typedef enum { + SDIO_SLAVE_TIMING_PSEND_PSAMPLE = 0,/**< Send at posedge, and sample at posedge. Default value for HS mode. + * Normally there's no problem using this to work in DS mode. + */ + SDIO_SLAVE_TIMING_NSEND_PSAMPLE ,///< Send at negedge, and sample at posedge. Default value for DS mode and below. + SDIO_SLAVE_TIMING_PSEND_NSAMPLE, ///< Send at posedge, and sample at negedge + SDIO_SLAVE_TIMING_NSEND_NSAMPLE, ///< Send at negedge, and sample at negedge +} sdio_slave_timing_t; + +/// Configuration of SDIO slave mode +typedef enum { + SDIO_SLAVE_SEND_STREAM = 0, ///< Stream mode, all packets to send will be combined as one if possible + SDIO_SLAVE_SEND_PACKET = 1, ///< Packet mode, one packets will be sent one after another (only increase packet_len if last packet sent). +} sdio_slave_sending_mode_t; \ No newline at end of file diff --git a/components/soc/src/hal/sdio_slave_hal.c b/components/soc/src/hal/sdio_slave_hal.c new file mode 100644 index 0000000000..5b4464846b --- /dev/null +++ b/components/soc/src/hal/sdio_slave_hal.c @@ -0,0 +1,729 @@ +// Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The HAL layer for SDIO slave (common part) + +#include +#include +#include +#include +#include +#include "hal/sdio_slave_hal.h" +#include "hal/hal_defs.h" +#include "esp_attr.h" + + +#define SDIO_SLAVE_CHECK(res, str, ret_val) do { if(!(res)){\ + HAL_LOGE(TAG, "%s", str);\ + return ret_val;\ +} }while (0) + +const char TAG[] = "SDIO_HAL"; + +static esp_err_t init_send_queue(sdio_slave_context_t *hal); + +/**************** Ring buffer for SDIO sending use *****************/ +typedef enum { + RINGBUF_GET_ONE = 0, + RINGBUF_GET_ALL = 1, +} ringbuf_get_all_t; + +typedef enum { + RINGBUF_WRITE_PTR, + RINGBUF_READ_PTR, + RINGBUF_FREE_PTR, +} sdio_ringbuf_pointer_t; + +static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg); +static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, uint8_t **start, uint8_t **end, ringbuf_get_all_t get_all); +static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr); + +#define _SEND_DESC_NEXT(x) STAILQ_NEXT(&((sdio_slave_hal_send_desc_t*)x)->dma_desc, qe) +#define SEND_DESC_NEXT(x) (sdio_slave_hal_send_desc_t*)_SEND_DESC_NEXT(x) +#define SEND_DESC_NEXT_SET(x, target) do { \ + _SEND_DESC_NEXT(x)=(lldesc_t*)target; \ + }while(0) + +static esp_err_t link_desc_to_last(uint8_t* desc, void* arg) +{ + SEND_DESC_NEXT_SET(arg, desc); + return ESP_OK; +} + +//calculate a pointer with offset to a original pointer of the specific ringbuffer +static inline uint8_t* sdio_ringbuf_offset_ptr(sdio_ringbuf_t *buf, sdio_ringbuf_pointer_t ptr, uint32_t offset) +{ + uint8_t *buf_ptr; + switch (ptr) { + case RINGBUF_WRITE_PTR: + buf_ptr = buf->write_ptr; + break; + case RINGBUF_READ_PTR: + buf_ptr = buf->read_ptr; + break; + case RINGBUF_FREE_PTR: + buf_ptr = buf->free_ptr; + break; + default: + abort(); + } + + uint8_t *offset_ptr=buf_ptr+offset; + if (offset_ptr >= buf->data + buf->size) { + offset_ptr -= buf->size; + } + return offset_ptr; +} + +static esp_err_t sdio_ringbuf_send(sdio_ringbuf_t *buf, esp_err_t (*copy_callback)(uint8_t *, void *), void *arg) +{ + uint8_t* get_ptr = sdio_ringbuf_offset_ptr(buf, RINGBUF_WRITE_PTR, SDIO_SLAVE_SEND_DESC_SIZE); + esp_err_t err = ESP_OK; + if (copy_callback) { + (*copy_callback)(get_ptr, arg); + } + if (err != ESP_OK) return err; + + buf->write_ptr = get_ptr; + return ESP_OK; +} + +// this ringbuf is a return-before-recv-again strategy +// since this is designed to be called in the ISR, no parallel logic +static inline esp_err_t sdio_ringbuf_recv(sdio_ringbuf_t *buf, uint8_t **start, uint8_t **end, ringbuf_get_all_t get_all) +{ + assert(buf->free_ptr == buf->read_ptr); //must return before recv again + if (start == NULL && end == NULL) return ESP_ERR_INVALID_ARG; // must have a output + if (buf->read_ptr == buf->write_ptr) return ESP_ERR_NOT_FOUND; // no data + + uint8_t *get_start = sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE); + + if (get_all != RINGBUF_GET_ONE) { + buf->read_ptr = buf->write_ptr; + } else { + buf->read_ptr = get_start; + } + + if (start != NULL) { + *start = get_start; + } + if (end != NULL) { + *end = buf->read_ptr; + } + return ESP_OK; +} + +static inline int sdio_ringbuf_return(sdio_ringbuf_t* buf, uint8_t *ptr) +{ + assert(sdio_ringbuf_offset_ptr(buf, RINGBUF_FREE_PTR, SDIO_SLAVE_SEND_DESC_SIZE) == ptr); + int size = (buf->read_ptr + buf->size - buf->free_ptr) % buf->size; + int count = size / SDIO_SLAVE_SEND_DESC_SIZE; + assert(count * SDIO_SLAVE_SEND_DESC_SIZE==size); + buf->free_ptr = buf->read_ptr; + return count; +} + +static inline uint8_t* sdio_ringbuf_peek_front(sdio_ringbuf_t* buf) +{ + if (buf->read_ptr != buf->write_ptr) { + return sdio_ringbuf_offset_ptr(buf, RINGBUF_READ_PTR, SDIO_SLAVE_SEND_DESC_SIZE); + } else { + return NULL; + } +} + +static inline uint8_t* sdio_ringbuf_peek_rear(sdio_ringbuf_t *buf) +{ + return buf->write_ptr; +} + +static inline bool sdio_ringbuf_empty(sdio_ringbuf_t* buf) +{ + return (buf->read_ptr == buf->write_ptr); +} + +/**************** End of Ring buffer *****************/ + +void sdio_slave_hal_init(sdio_slave_context_t *hal) +{ + hal->host = sdio_slave_ll_get_host(0); + hal->slc = sdio_slave_ll_get_slc(0); + hal->hinf = sdio_slave_ll_get_hinf(0); + hal->send_state = STATE_IDLE; + hal->recv_link_list = (sdio_slave_hal_recv_stailq_t)STAILQ_HEAD_INITIALIZER(hal->recv_link_list); + + init_send_queue(hal); +} + +void sdio_slave_hal_hw_init(sdio_slave_context_t *hal) +{ + sdio_slave_ll_init(hal->slc); + sdio_slave_ll_enable_hs(hal->hinf, true); + sdio_slave_ll_set_timing(hal->host, hal->timing); + sdio_slave_ll_slvint_t intr_ena = 0xff; + sdio_slave_ll_slvint_set_ena(hal->slc, &intr_ena); +} + +static esp_err_t init_send_queue(sdio_slave_context_t *hal) +{ + esp_err_t ret; + esp_err_t rcv_res; + sdio_ringbuf_t *buf = &(hal->send_desc_queue); + + //initialize pointers + buf->write_ptr = buf->data; + buf->read_ptr = buf->data; + buf->free_ptr = buf->data; + + sdio_slave_hal_send_desc_t *first = NULL, *last = NULL; + //no copy for the first descriptor + + ret = sdio_ringbuf_send(buf, NULL, NULL); + if (ret != ESP_OK) return ret; + + //loop in the ringbuf to link all the desc one after another as a ring + for (int i = 0; i < hal->send_queue_size + 1; i++) { + rcv_res = sdio_ringbuf_recv(buf, (uint8_t **) &last, NULL, RINGBUF_GET_ONE); + assert (rcv_res == ESP_OK); + + ret = sdio_ringbuf_send(buf, link_desc_to_last, last); + if (ret != ESP_OK) return ret; + + sdio_ringbuf_return(buf, (uint8_t *) last); + } + + first = NULL; + last = NULL; + //clear the queue + rcv_res = sdio_ringbuf_recv(buf, (uint8_t **) &first, (uint8_t **) &last, RINGBUF_GET_ALL); + assert (rcv_res == ESP_OK); + assert(first == last); //there should be only one desc remain + sdio_ringbuf_return(buf, (uint8_t *) first); + return ESP_OK; +} + +void sdio_slave_hal_set_ioready(sdio_slave_context_t *hal, bool ready) +{ + sdio_slave_ll_set_ioready(hal->hinf, ready); //set IO ready to 1 to allow host to use +} + + +/*--------------------------------------------------------------------------- + * Send + * + * The hardware has a cache, so that once a descriptor is loaded onto the linked-list, it cannot be modified + * until returned (used) by the hardware. This forbids us from loading descriptors onto the linked list during + * the transfer (or the time waiting for host to start a transfer). However, we use a "ringbuffer" (different from + * the one in ``freertos/`` folder) holding descriptors to solve this: + + * 1. The driver allocates continuous memory for several buffer descriptors (the maximum buffer number) during + * initialization. Then the driver points the STAILQ_NEXT pointer of all the descriptors except the last one + * to the next descriptor of each of them. Then the pointer of the last descriptor points back to the first one: + * now the descriptor is in a ring. + + * 2. The "ringbuffer" has a write pointer points to where app can write new descriptor. The app writes the new descriptor + * indicated by the write pointer without touching the STAILQ_NEXT pointer so that the descriptors are always in a + * ring-like linked-list. The app never touches the part of linked-list being used by the hardware. + + * 3. When the hardware needs some data to send, it automatically pick a part of linked descriptors. According to the mode: + * - Buffer mode: only pick the next one to the last one sent; + * - Stream mode: pick the whole unsent linked list, starting from the one above, to the latest linked one. + + * The driver removes the STAILQ_NEXT pointer of the last descriptor and put the head of the part to the DMA controller so + * that it looks like just a linear linked-list rather than a ring to the hardware. + + * 4. The counter of sending FIFO can increase when app load new buffers (in STREAM_MODE) or when new transfer should + * start (in PACKET_MODE). + + * 5. When the sending transfer is finished, the driver goes through the descriptors just send in the ISR and push all + * the ``arg`` member of descriptors to the queue back to the app, so that the app can handle finished buffers. The + * driver also fix the STAILQ_NEXT pointer of the last descriptor so that the descriptors are now in a ring again. +----------------------------------------------------------------------------*/ +static inline void send_set_state(sdio_slave_context_t *hal, send_state_t state) +{ + hal->send_state = state; +} + +static inline send_state_t send_get_state(sdio_slave_context_t* hal) +{ + return hal->send_state; +} + +DMA_ATTR static const lldesc_t start_desc = { + .owner = 1, + .buf = (void*)0x3ffbbbbb, //assign a dma-capable pointer other than NULL, which will not be used + .size = 1, + .length = 1, + .eof = 1, +}; + +//force trigger rx_done interrupt. the interrupt is abused to invoke ISR from the app by the enable bit and never cleared. +static void send_isr_invoker_enable(const sdio_slave_context_t *hal) +{ + sdio_slave_ll_send_reset(hal->slc); + sdio_slave_ll_send_start(hal->slc, &start_desc); + //wait for rx_done + while(!sdio_slave_ll_send_invoker_ready(hal->slc)); + sdio_slave_ll_send_stop(hal->slc); + sdio_slave_ll_send_hostint_clr(hal->host); +} + +static void send_isr_invoker_disable(sdio_slave_context_t *hal) +{ + sdio_slave_ll_send_part_done_clear(hal->slc); +} + +void sdio_slave_hal_send_handle_isr_invoke(sdio_slave_context_t *hal) +{ + sdio_slave_ll_send_part_done_intr_ena(hal->slc, false); +} + +//start hw operation with existing data (if exist) +esp_err_t sdio_slave_hal_send_start(sdio_slave_context_t *hal) +{ + SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE, + "already started", ESP_ERR_INVALID_STATE); + send_set_state(hal, STATE_WAIT_FOR_START); + send_isr_invoker_enable(hal); + sdio_slave_ll_send_intr_clr(hal->slc); + sdio_slave_ll_send_intr_ena(hal->slc, true); + return ESP_OK; +} + +//only stop hw operations, no touch to data as well as counter +void sdio_slave_hal_send_stop(sdio_slave_context_t *hal) +{ + sdio_slave_ll_send_stop(hal->slc); + send_isr_invoker_disable(hal); + sdio_slave_ll_send_intr_ena(hal->slc, false); + send_set_state(hal, STATE_IDLE); +} + +static void send_new_packet(sdio_slave_context_t *hal) +{ + // since eof is changed, we have to stop and reset the link list, + // and restart new link list operation + sdio_slave_hal_send_desc_t *const start_desc = hal->in_flight_head; + sdio_slave_hal_send_desc_t *const end_desc = hal->in_flight_end; + assert(start_desc != NULL && end_desc != NULL); + + sdio_slave_ll_send_stop(hal->slc); + sdio_slave_ll_send_reset(hal->slc); + sdio_slave_ll_send_start(hal->slc, (lldesc_t*)start_desc); + + // update pkt_len register to allow host reading. + sdio_slave_ll_send_write_len(hal->slc, end_desc->pkt_len); + ESP_EARLY_LOGV(TAG, "send_length_write: %d, last_len: %08X", end_desc->pkt_len, sdio_slave_ll_send_read_len(hal->host)); + + send_set_state(hal, STATE_SENDING); + + ESP_EARLY_LOGD(TAG, "restart new send: %p->%p, pkt_len: %d", start_desc, end_desc, end_desc->pkt_len); +} + +static esp_err_t send_check_new_packet(sdio_slave_context_t *hal) +{ + esp_err_t ret; + sdio_slave_hal_send_desc_t *start = NULL; + sdio_slave_hal_send_desc_t *end = NULL; + if (hal->sending_mode == SDIO_SLAVE_SEND_PACKET) { + ret = sdio_ringbuf_recv(&(hal->send_desc_queue), (uint8_t **) &start, (uint8_t **) &end, RINGBUF_GET_ONE); + } else { //stream mode + ret = sdio_ringbuf_recv(&(hal->send_desc_queue), (uint8_t **) &start, (uint8_t **) &end, RINGBUF_GET_ALL); + } + if (ret == ESP_OK) { + hal->in_flight_head = start; + hal->in_flight_end = end; + end->dma_desc.eof = 1; + //temporarily break the link ring here, the ring will be re-connected in ``send_isr_eof()``. + hal->in_flight_next = SEND_DESC_NEXT(end); + SEND_DESC_NEXT_SET(end, NULL); + } + return ESP_OK; +} + +bool sdio_slave_hal_send_eof_happened(sdio_slave_context_t* hal) +{ + // Goto idle state (cur_start=NULL) if transmission done, + // also update sequence and recycle descs. + if (sdio_slave_ll_send_done(hal->slc)) { + //check current state + assert(send_get_state(hal) == STATE_SENDING); + sdio_slave_ll_send_intr_clr(hal->slc); + return true; + } else { + return false; + } +} + +//clear counter but keep data +esp_err_t sdio_slave_hal_send_reset_counter(sdio_slave_context_t* hal) +{ + SDIO_SLAVE_CHECK(send_get_state(hal) == STATE_IDLE, + "reset counter when transmission started", ESP_ERR_INVALID_STATE); + + uint32_t len; + sdio_slave_ll_send_write_len(hal->slc, 0); + ESP_EARLY_LOGV(TAG, "send_length_write: %d, last_len: %08X", len, sdio_slave_ll_send_read_len(hal->host)); + + hal->tail_pkt_len = 0; + sdio_slave_hal_send_desc_t *desc = hal->in_flight_head; + while(desc != NULL) { + hal->tail_pkt_len += desc->dma_desc.length; + desc->pkt_len = hal->tail_pkt_len; + desc = SEND_DESC_NEXT(desc); + } + // in theory the desc should be the one right next to the last of in_flight_head, + // but the link of last is NULL, so get the desc from the ringbuf directly. + desc = (sdio_slave_hal_send_desc_t*)sdio_ringbuf_peek_front(&(hal->send_desc_queue)); + while(desc != NULL) { + hal->tail_pkt_len += desc->dma_desc.length; + desc->pkt_len = hal->tail_pkt_len; + desc = SEND_DESC_NEXT(desc); + } + + return ESP_OK; +} + +static esp_err_t send_get_inflight_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_returned_cnt, + bool init) +{ + esp_err_t ret; + if (init) { + assert(hal->returned_desc == NULL); + hal->returned_desc = hal->in_flight_head; + send_set_state(hal, STATE_GETTING_RESULT); + } + + if (hal->returned_desc != NULL) { + *out_arg = hal->returned_desc->arg; + hal->returned_desc = SEND_DESC_NEXT(hal->returned_desc); + ret = ESP_OK; + } else { + if (hal->in_flight_head != NULL) { + // fix the link broken of last desc when being sent + assert(hal->in_flight_end != NULL); + SEND_DESC_NEXT_SET(hal->in_flight_end, hal->in_flight_next); + + *out_returned_cnt = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*)hal->in_flight_head); + } + + hal->in_flight_head = NULL; + hal->in_flight_end = NULL; + + ret = ESP_ERR_NOT_FOUND; + } + return ret; +} + +static esp_err_t send_get_unsent_desc(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt) +{ + esp_err_t ret; + sdio_slave_hal_send_desc_t *head, *tail; + ret = sdio_ringbuf_recv(&(hal->send_desc_queue), (uint8_t **) &head, (uint8_t **) &tail, RINGBUF_GET_ONE); + + if (ret == ESP_OK) { + //currently each packet takes only one desc. + assert(head == tail); + (*out_arg) = head->arg; + (*out_return_cnt) = sdio_ringbuf_return(&(hal->send_desc_queue), (uint8_t*) head); + } else if (ret == ESP_ERR_NOT_FOUND) { + // if in wait to send state, set the sequence number of tail to the value last sent, just as if the packet wait to + // send never queued. + // Go to idle state (cur_end!=NULL and cur_start=NULL) + send_set_state(hal, STATE_IDLE); + hal->tail_pkt_len = sdio_slave_ll_send_read_len(hal->host); + } + return ret; +} + +esp_err_t sdio_slave_hal_send_get_next_finished_arg(sdio_slave_context_t *hal, void **out_arg, uint32_t* out_returned_cnt) +{ + bool init = (send_get_state(hal) == STATE_SENDING); + if (init) { + assert(hal->in_flight_head != NULL); + } else { + assert(send_get_state(hal) == STATE_GETTING_RESULT); + } + *out_returned_cnt = 0; + + esp_err_t ret = send_get_inflight_desc(hal, out_arg, out_returned_cnt, init); + + if (ret == ESP_ERR_NOT_FOUND) { + // Go to wait for packet state + send_set_state(hal, STATE_WAIT_FOR_START); + } + return ret; +} + + +esp_err_t sdio_slave_hal_send_flush_next_buffer(sdio_slave_context_t *hal, void **out_arg, uint32_t *out_return_cnt) +{ + esp_err_t ret = ESP_OK; + *out_return_cnt = 0; + bool init = (send_get_state(hal) == STATE_IDLE); + if (!init) { + if (send_get_state(hal) != STATE_GETTING_RESULT && send_get_state(hal) != STATE_GETTING_UNSENT_DESC) { + return ESP_ERR_INVALID_STATE; + } + } + + if (init || send_get_state(hal) == STATE_GETTING_RESULT) { + ret = send_get_inflight_desc(hal, out_arg, out_return_cnt, init); + if (ret == ESP_ERR_NOT_FOUND) { + send_set_state(hal, STATE_GETTING_UNSENT_DESC); + } + } + if (send_get_state(hal) == STATE_GETTING_UNSENT_DESC) { + ret = send_get_unsent_desc(hal, out_arg, out_return_cnt); + if (ret == ESP_ERR_NOT_FOUND) { + send_set_state(hal, STATE_IDLE); + } + } + return ret; +} + +esp_err_t sdio_slave_hal_send_new_packet_if_exist(sdio_slave_context_t *hal) +{ + esp_err_t ret; + // Go to wait sending state (cur_start!=NULL && cur_end==NULL) if not sending and new packet ready. + // Note we may also enter this state by stopping sending in the app. + if (send_get_state(hal) == STATE_WAIT_FOR_START) { + if (hal->in_flight_head == NULL) { + send_check_new_packet(hal); + } + // Go to sending state (cur_start and cur_end != NULL) if has packet to send. + if (hal->in_flight_head) { + send_new_packet(hal); + ret = ESP_OK; + } else { + ret = ESP_ERR_NOT_FOUND; + } + } else { + ret = ESP_ERR_INVALID_STATE; + } + return ret; +} + +static esp_err_t send_write_desc(uint8_t* desc, void* arg) +{ + sdio_slave_hal_send_desc_t* next_desc = SEND_DESC_NEXT(desc); + memcpy(desc, arg, sizeof(sdio_slave_hal_send_desc_t)); + SEND_DESC_NEXT_SET(desc, next_desc); + return ESP_OK; +} + +static void send_isr_invoke(sdio_slave_context_t *hal) +{ + sdio_slave_ll_send_part_done_intr_ena(hal->slc, true); +} + +esp_err_t sdio_slave_hal_send_queue(sdio_slave_context_t* hal, uint8_t *addr, size_t len, void *arg) +{ + hal->tail_pkt_len += len; + sdio_slave_hal_send_desc_t new_desc = { + .dma_desc = { + .size = len, + .length = len, + .buf = addr, + .owner = 1, + // in stream mode, the eof is only appended (in ISR) when new packet is ready to be sent + .eof = (hal->sending_mode == SDIO_SLAVE_SEND_PACKET), + }, + .arg = arg, + .pkt_len = hal->tail_pkt_len, + }; + + esp_err_t ret = sdio_ringbuf_send(&(hal->send_desc_queue), send_write_desc, &new_desc); + send_isr_invoke(hal); + return ret; +} + +/*--------------------------------------------------------------------------- + * Receive + *--------------------------------------------------------------------------*/ + +static lldesc_t* recv_get_first_empty_buf(sdio_slave_context_t* hal) +{ + sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list); + lldesc_t *desc = STAILQ_FIRST(queue); + while(desc && desc->owner == 0) { + desc = STAILQ_NEXT(desc, qe); + } + return desc; +} + +void sdio_slave_hal_recv_stop(sdio_slave_context_t* hal) +{ + sdio_slave_ll_set_ioready(hal->hinf, false); //set IO ready to 0 to stop host from using + sdio_slave_ll_send_stop(hal->slc); + sdio_slave_ll_recv_stop(hal->slc); + sdio_slave_ll_recv_intr_ena(hal->slc, false); +} + +//touching linked list, should be protected by spinlock +bool sdio_slave_hal_recv_has_next_item(sdio_slave_context_t* hal) +{ + + if (hal->recv_cur_ret == NULL || hal->recv_cur_ret->owner != 0) return false; + + // This may cause the ``cur_ret`` pointer to be NULL, indicating the list is empty, + // in this case the ``tx_done`` should happen no longer until new desc is appended. + // The app is responsible to place the pointer to the right place again when appending new desc. + + hal->recv_cur_ret = STAILQ_NEXT(hal->recv_cur_ret, qe); + return true; +} + +bool sdio_slave_hal_recv_done(sdio_slave_context_t *hal) +{ + bool ret = sdio_slave_ll_recv_done(hal->slc); + if (ret) { + sdio_slave_ll_recv_done_clear(hal->slc); + } + return ret; +} + +lldesc_t *sdio_slave_hal_recv_unload_desc(sdio_slave_context_t *hal) +{ + sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list; + lldesc_t *desc = STAILQ_FIRST(queue); + if (desc) { + STAILQ_REMOVE_HEAD(queue, qe); + } + return desc; +} + +void sdio_slave_hal_recv_init_desc(sdio_slave_context_t* hal, lldesc_t *desc, uint8_t *start) +{ + *desc = (lldesc_t) { + .size = hal->recv_buffer_size, + .buf = start, + }; +} + +void sdio_slave_hal_recv_start(sdio_slave_context_t *hal) +{ + sdio_slave_ll_recv_reset(hal->slc); + lldesc_t *desc = recv_get_first_empty_buf(hal); + if (!desc) { + HAL_LOGD(TAG, "recv: restart without desc"); + } else { + //the counter is handled when add/flush/reset + sdio_slave_ll_recv_start(hal->slc, desc); + sdio_slave_ll_recv_intr_ena(hal->slc, true); + } +} + +void sdio_slave_hal_recv_reset_counter(sdio_slave_context_t *hal) +{ + sdio_slave_ll_recv_size_reset(hal->slc); + lldesc_t *desc = recv_get_first_empty_buf(hal); + while (desc != NULL) { + sdio_slave_ll_recv_size_inc(hal->slc); + desc = STAILQ_NEXT(desc, qe); + } +} + +void sdio_slave_hal_recv_flush_one_buffer(sdio_slave_context_t *hal) +{ + sdio_slave_hal_recv_stailq_t *const queue = &hal->recv_link_list; + lldesc_t *desc = STAILQ_FIRST(queue); + assert (desc != NULL && desc->owner == 0); + STAILQ_REMOVE_HEAD(queue, qe); + desc->owner = 1; + STAILQ_INSERT_TAIL(queue, desc, qe); + sdio_slave_ll_recv_size_inc(hal->slc); + //we only add it to the tail here, without start the DMA nor increase buffer num. +} + +void sdio_slave_hal_load_buf(sdio_slave_context_t *hal, lldesc_t *desc) +{ + sdio_slave_hal_recv_stailq_t *const queue = &(hal->recv_link_list); + desc->owner = 1; + + lldesc_t *const tail = STAILQ_LAST(queue, lldesc_s, qe); + + STAILQ_INSERT_TAIL(queue, desc, qe); + if (hal->recv_cur_ret == NULL) { + hal->recv_cur_ret = desc; + } + + if (tail == NULL) { + //no one in the ll, start new ll operation. + sdio_slave_ll_recv_start(hal->slc, desc); + sdio_slave_ll_recv_intr_ena(hal->slc, true); + HAL_LOGV(TAG, "recv_load_buf: start new"); + } else { + //restart former ll operation + sdio_slave_ll_recv_restart(hal->slc); + HAL_LOGV(TAG, "recv_load_buf: restart"); + } + sdio_slave_ll_recv_size_inc(hal->slc); +} + +static inline void show_queue_item(lldesc_t *item) +{ + ESP_EARLY_LOGI(TAG, "=> %p: size: %d(%d), eof: %d, owner: %d", item, item->size, item->length, item->eof, item->owner); + ESP_EARLY_LOGI(TAG, " buf: %p, stqe_next: %p", item->buf, item->qe.stqe_next); +} + +static void __attribute((unused)) dump_queue(sdio_slave_hal_recv_stailq_t *queue) +{ + int cnt = 0; + lldesc_t *item = NULL; + ESP_EARLY_LOGI(TAG, ">>>>> first: %p, last: %p <<<<<", queue->stqh_first, queue->stqh_last); + STAILQ_FOREACH(item, queue, qe) { + cnt++; + show_queue_item(item); + } + ESP_EARLY_LOGI(TAG, "total: %d", cnt); +} + +/*--------------------------------------------------------------------------- + * Host + *--------------------------------------------------------------------------*/ +void sdio_slave_hal_hostint_get_ena(sdio_slave_context_t *hal, sdio_slave_hostint_t *out_int_mask) +{ + *out_int_mask = sdio_slave_ll_host_get_intena(hal->host); +} + +void sdio_slave_hal_hostint_clear(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask) +{ + sdio_slave_ll_host_intr_clear(hal->host, mask);//clear all interrupts +} + +void sdio_slave_hal_hostint_set_ena(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask) +{ + sdio_slave_ll_host_set_intena(hal->host, mask); +} + +void sdio_slave_hal_hostint_send(sdio_slave_context_t *hal, const sdio_slave_hostint_t *mask) +{ + sdio_slave_ll_host_send_int(hal->slc, mask); +} + +uint8_t sdio_slave_hal_host_get_reg(sdio_slave_context_t *hal, int pos) +{ + return sdio_slave_ll_host_get_reg(hal->host, pos); +} +void sdio_slave_hal_host_set_reg(sdio_slave_context_t *hal, int pos, uint8_t reg) +{ + sdio_slave_ll_host_set_reg(hal->host, pos, reg); +} + +void sdio_slave_hal_slvint_fetch_clear(sdio_slave_context_t *hal, sdio_slave_ll_slvint_t *out_int_mask) +{ + sdio_slave_ll_slvint_fetch_clear(hal->slc, out_int_mask); +} + diff --git a/docs/Doxyfile b/docs/Doxyfile index 497ce96afb..a6f301cf5b 100644 --- a/docs/Doxyfile +++ b/docs/Doxyfile @@ -184,6 +184,7 @@ INPUT = \ ../../components/driver/include/driver/sdspi_host.h \ ## SDIO slave ../../components/driver/include/driver/sdio_slave.h \ + ../../components/soc/include/hal/sdio_slave_types.h \ ## Non-Volatile Storage ../../components/nvs_flash/include/nvs.h \ ../../components/nvs_flash/include/nvs_flash.h \ diff --git a/docs/en/api-reference/peripherals/sdio_slave.rst b/docs/en/api-reference/peripherals/sdio_slave.rst index c4e9294ca2..6a28545800 100644 --- a/docs/en/api-reference/peripherals/sdio_slave.rst +++ b/docs/en/api-reference/peripherals/sdio_slave.rst @@ -277,5 +277,6 @@ Slave/master communication: :example:`peripherals/sdio`. API Reference ------------- +.. include:: /_build/inc/sdio_slave_types.inc .. include:: /_build/inc/sdio_slave.inc