2021-08-24 11:20:50 -04:00
|
|
|
/*
|
2024-02-22 15:25:10 -05:00
|
|
|
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
|
2021-08-24 11:20:50 -04:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "sdkconfig.h"
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include "freertos/FreeRTOS.h"
|
|
|
|
#include "freertos/portmacro.h"
|
|
|
|
#include "freertos/task.h"
|
2021-10-27 09:15:46 -04:00
|
|
|
#include "freertos/semphr.h"
|
2021-08-24 11:20:50 -04:00
|
|
|
#include "esp_err.h"
|
|
|
|
#include "esp_log.h"
|
|
|
|
#include "esp_heap_caps.h"
|
|
|
|
#include "hcd.h"
|
|
|
|
#include "usbh.h"
|
2021-08-31 02:14:04 -04:00
|
|
|
#include "usb/usb_helpers.h"
|
2021-08-24 11:20:50 -04:00
|
|
|
#include "usb/usb_types_ch9.h"
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
#define EP_NUM_MIN 1 // The smallest possible non-default endpoint number
|
|
|
|
#define EP_NUM_MAX 16 // The largest possible non-default endpoint number
|
|
|
|
#define NUM_NON_DEFAULT_EP ((EP_NUM_MAX - 1) * 2) // The total number of non-default endpoints a device can have.
|
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// Device action flags. LISTED IN THE ORDER THEY SHOULD BE HANDLED IN within usbh_process(). Some actions are mutually exclusive
|
2023-05-08 12:43:32 -04:00
|
|
|
typedef enum {
|
2023-05-08 12:53:27 -04:00
|
|
|
DEV_ACTION_EPn_HALT_FLUSH = (1 << 0), // Halt all non-default endpoints then flush them (called after a device gone is gone)
|
|
|
|
DEV_ACTION_EP0_FLUSH = (1 << 1), // Retire all URBS submitted to EP0
|
|
|
|
DEV_ACTION_EP0_DEQUEUE = (1 << 2), // Dequeue all URBs from EP0
|
|
|
|
DEV_ACTION_EP0_CLEAR = (1 << 3), // Move EP0 to the the active state
|
|
|
|
DEV_ACTION_PROP_GONE_EVT = (1 << 4), // Propagate a USBH_EVENT_DEV_GONE event
|
2024-03-21 08:57:32 -04:00
|
|
|
DEV_ACTION_FREE = (1 << 5), // Free the device object
|
|
|
|
DEV_ACTION_PROP_NEW_DEV = (1 << 6), // Propagate a USBH_EVENT_NEW_DEV event
|
2023-05-08 12:43:32 -04:00
|
|
|
} dev_action_t;
|
2021-08-24 11:20:50 -04:00
|
|
|
|
|
|
|
typedef struct device_s device_t;
|
2023-05-08 12:43:32 -04:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
struct {
|
|
|
|
usbh_ep_cb_t ep_cb;
|
|
|
|
void *ep_cb_arg;
|
|
|
|
hcd_pipe_handle_t pipe_hdl;
|
2023-05-08 12:53:27 -04:00
|
|
|
device_t *dev; // Pointer to the device object that this endpoint is contained in
|
|
|
|
const usb_ep_desc_t *ep_desc; // This just stores a pointer endpoint descriptor inside the device's "config_desc"
|
2023-05-08 12:43:32 -04:00
|
|
|
} constant;
|
|
|
|
} endpoint_t;
|
|
|
|
|
2021-08-24 11:20:50 -04:00
|
|
|
struct device_s {
|
2023-05-08 12:53:27 -04:00
|
|
|
// Dynamic members require a critical section
|
2021-08-24 11:20:50 -04:00
|
|
|
struct {
|
|
|
|
TAILQ_ENTRY(device_s) tailq_entry;
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
uint32_t in_pending_list: 1;
|
2024-03-21 08:57:32 -04:00
|
|
|
uint32_t is_gone: 1; // Device is gone (disconnected or port error)
|
|
|
|
uint32_t waiting_free: 1; // Device object is awaiting to be freed
|
2024-04-09 17:06:46 -04:00
|
|
|
uint32_t enum_lock: 1; // Device is locked for enumeration. Enum information (e.g., address, device/config desc etc) may change
|
|
|
|
uint32_t reserved28: 28;
|
2021-08-24 11:20:50 -04:00
|
|
|
};
|
|
|
|
uint32_t val;
|
|
|
|
} flags;
|
2021-12-14 05:28:04 -05:00
|
|
|
uint32_t action_flags;
|
2021-08-24 11:20:50 -04:00
|
|
|
int num_ctrl_xfers_inflight;
|
|
|
|
usb_device_state_t state;
|
2024-04-02 23:24:03 -04:00
|
|
|
uint32_t open_count;
|
2021-10-27 09:15:46 -04:00
|
|
|
} dynamic;
|
2023-05-08 12:53:27 -04:00
|
|
|
// Mux protected members must be protected by the USBH mux_lock when accessed
|
2021-10-27 09:15:46 -04:00
|
|
|
struct {
|
2023-05-08 12:43:32 -04:00
|
|
|
/*
|
|
|
|
- Endpoint object pointers for each possible non-default endpoint
|
|
|
|
- All OUT EPs are listed before IN EPs (i.e., EP_NUM_MIN OUT ... EP_NUM_MAX OUT ... EP_NUM_MIN IN ... EP_NUM_MAX)
|
|
|
|
*/
|
|
|
|
endpoint_t *endpoints[NUM_NON_DEFAULT_EP];
|
2021-10-27 09:15:46 -04:00
|
|
|
} mux_protected;
|
2024-04-09 17:06:46 -04:00
|
|
|
// Constant members do not require a critical section
|
2021-08-24 11:20:50 -04:00
|
|
|
struct {
|
2024-04-09 17:06:46 -04:00
|
|
|
// Assigned on device allocation and remain constant for the device's lifetime
|
2021-08-24 11:20:50 -04:00
|
|
|
hcd_pipe_handle_t default_pipe;
|
|
|
|
hcd_port_handle_t port_hdl;
|
|
|
|
usb_speed_t speed;
|
2024-04-09 17:06:46 -04:00
|
|
|
unsigned int uid;
|
|
|
|
/*
|
|
|
|
These fields are can only be changed when enum_lock is set, thus can be treated as constant
|
|
|
|
*/
|
|
|
|
uint8_t address;
|
|
|
|
usb_device_desc_t *desc;
|
|
|
|
usb_config_desc_t *config_desc;
|
|
|
|
usb_str_desc_t *str_desc_manu;
|
|
|
|
usb_str_desc_t *str_desc_product;
|
|
|
|
usb_str_desc_t *str_desc_ser_num;
|
2021-08-24 11:20:50 -04:00
|
|
|
} constant;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct {
|
2023-05-08 12:53:27 -04:00
|
|
|
// Dynamic members require a critical section
|
2021-08-24 11:20:50 -04:00
|
|
|
struct {
|
2023-05-08 12:53:27 -04:00
|
|
|
TAILQ_HEAD(tailhead_devs, device_s) devs_idle_tailq; // Tailq of all enum and configured devices
|
|
|
|
TAILQ_HEAD(tailhead_devs_cb, device_s) devs_pending_tailq; // Tailq of devices that need to have their cb called
|
2021-08-24 11:20:50 -04:00
|
|
|
} dynamic;
|
2023-05-08 12:53:27 -04:00
|
|
|
// Mux protected members must be protected by the USBH mux_lock when accessed
|
2021-10-27 09:15:46 -04:00
|
|
|
struct {
|
2023-05-08 12:53:27 -04:00
|
|
|
uint8_t num_device; // Number of enumerated devices
|
2021-10-27 09:15:46 -04:00
|
|
|
} mux_protected;
|
2023-05-08 12:53:27 -04:00
|
|
|
// Constant members do no change after installation thus do not require a critical section
|
2021-08-24 11:20:50 -04:00
|
|
|
struct {
|
2023-05-08 12:43:32 -04:00
|
|
|
usb_proc_req_cb_t proc_req_cb;
|
|
|
|
void *proc_req_cb_arg;
|
2021-08-24 11:20:50 -04:00
|
|
|
usbh_event_cb_t event_cb;
|
|
|
|
void *event_cb_arg;
|
2021-10-27 09:15:46 -04:00
|
|
|
SemaphoreHandle_t mux_lock;
|
2021-08-24 11:20:50 -04:00
|
|
|
} constant;
|
|
|
|
} usbh_t;
|
|
|
|
|
|
|
|
static usbh_t *p_usbh_obj = NULL;
|
|
|
|
|
|
|
|
static portMUX_TYPE usbh_lock = portMUX_INITIALIZER_UNLOCKED;
|
|
|
|
|
|
|
|
const char *USBH_TAG = "USBH";
|
|
|
|
|
|
|
|
#define USBH_ENTER_CRITICAL_ISR() portENTER_CRITICAL_ISR(&usbh_lock)
|
|
|
|
#define USBH_EXIT_CRITICAL_ISR() portEXIT_CRITICAL_ISR(&usbh_lock)
|
|
|
|
#define USBH_ENTER_CRITICAL() portENTER_CRITICAL(&usbh_lock)
|
|
|
|
#define USBH_EXIT_CRITICAL() portEXIT_CRITICAL(&usbh_lock)
|
|
|
|
#define USBH_ENTER_CRITICAL_SAFE() portENTER_CRITICAL_SAFE(&usbh_lock)
|
|
|
|
#define USBH_EXIT_CRITICAL_SAFE() portEXIT_CRITICAL_SAFE(&usbh_lock)
|
|
|
|
|
|
|
|
#define USBH_CHECK(cond, ret_val) ({ \
|
|
|
|
if (!(cond)) { \
|
|
|
|
return (ret_val); \
|
|
|
|
} \
|
|
|
|
})
|
|
|
|
#define USBH_CHECK_FROM_CRIT(cond, ret_val) ({ \
|
|
|
|
if (!(cond)) { \
|
|
|
|
USBH_EXIT_CRITICAL(); \
|
|
|
|
return ret_val; \
|
|
|
|
} \
|
|
|
|
})
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
// ------------------------------------------------- Forward Declare ---------------------------------------------------
|
|
|
|
|
|
|
|
static bool ep0_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr);
|
|
|
|
|
|
|
|
static bool epN_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr);
|
|
|
|
|
|
|
|
static bool _dev_set_actions(device_t *dev_obj, uint32_t action_flags);
|
|
|
|
|
|
|
|
// ----------------------------------------------------- Helpers -------------------------------------------------------
|
|
|
|
|
2024-04-09 17:06:46 -04:00
|
|
|
static device_t *_find_dev_from_uid(unsigned int uid)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION
|
|
|
|
*/
|
|
|
|
device_t *dev_iter;
|
|
|
|
|
|
|
|
// Search the device lists for a device with the specified address
|
|
|
|
TAILQ_FOREACH(dev_iter, &p_usbh_obj->dynamic.devs_idle_tailq, dynamic.tailq_entry) {
|
|
|
|
if (dev_iter->constant.uid == uid) {
|
|
|
|
return dev_iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TAILQ_FOREACH(dev_iter, &p_usbh_obj->dynamic.devs_pending_tailq, dynamic.tailq_entry) {
|
|
|
|
if (dev_iter->constant.uid == uid) {
|
|
|
|
return dev_iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static device_t *_find_dev_from_addr(uint8_t dev_addr)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION
|
|
|
|
*/
|
|
|
|
device_t *dev_iter;
|
|
|
|
|
|
|
|
// Search the device lists for a device with the specified address
|
|
|
|
TAILQ_FOREACH(dev_iter, &p_usbh_obj->dynamic.devs_idle_tailq, dynamic.tailq_entry) {
|
|
|
|
if (dev_iter->constant.address == dev_addr) {
|
|
|
|
return dev_iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TAILQ_FOREACH(dev_iter, &p_usbh_obj->dynamic.devs_pending_tailq, dynamic.tailq_entry) {
|
|
|
|
if (dev_iter->constant.address == dev_addr) {
|
|
|
|
return dev_iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
static inline bool check_ep_addr(uint8_t bEndpointAddress)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
Check that the bEndpointAddress is valid
|
|
|
|
- Must be <= EP_NUM_MAX (e.g., 16)
|
|
|
|
- Must be >= EP_NUM_MIN (e.g., 1).
|
|
|
|
- EP0 is the owned/managed by USBH, thus must never by directly addressed by users (see USB 2.0 section 10.5.1.2)
|
|
|
|
*/
|
|
|
|
uint8_t addr = bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK;
|
|
|
|
return (addr >= EP_NUM_MIN) && (addr <= EP_NUM_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
static endpoint_t *get_ep_from_addr(device_t *dev_obj, uint8_t bEndpointAddress)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
CALLER IS RESPONSIBLE FOR TAKING THE mux_lock
|
|
|
|
*/
|
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// Calculate index to the device's endpoint object list
|
2023-05-08 12:43:32 -04:00
|
|
|
int index;
|
|
|
|
// EP_NUM_MIN should map to an index of 0
|
|
|
|
index = (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK) - EP_NUM_MIN;
|
|
|
|
assert(index >= 0); // Endpoint address is not supported
|
|
|
|
if (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_DIR_MASK) {
|
2023-05-08 12:53:27 -04:00
|
|
|
// OUT EPs are listed before IN EPs, so add an offset
|
2023-05-08 12:43:32 -04:00
|
|
|
index += (EP_NUM_MAX - EP_NUM_MIN);
|
|
|
|
}
|
|
|
|
return dev_obj->mux_protected.endpoints[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_ep_from_addr(device_t *dev_obj, uint8_t bEndpointAddress, endpoint_t *ep_obj)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
CALLER IS RESPONSIBLE FOR TAKING THE mux_lock
|
|
|
|
*/
|
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// Calculate index to the device's endpoint object list
|
2023-05-08 12:43:32 -04:00
|
|
|
int index;
|
2023-05-08 12:53:27 -04:00
|
|
|
// EP_NUM_MIN should map to an index of 0
|
2023-05-08 12:43:32 -04:00
|
|
|
index = (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK) - EP_NUM_MIN;
|
|
|
|
assert(index >= 0); // Endpoint address is not supported
|
|
|
|
if (bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_DIR_MASK) {
|
2023-05-08 12:53:27 -04:00
|
|
|
// OUT EPs are listed before IN EPs, so add an offset
|
2023-05-08 12:43:32 -04:00
|
|
|
index += (EP_NUM_MAX - EP_NUM_MIN);
|
|
|
|
}
|
|
|
|
dev_obj->mux_protected.endpoints[index] = ep_obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool urb_check_args(urb_t *urb)
|
|
|
|
{
|
|
|
|
if (urb->transfer.callback == NULL) {
|
|
|
|
ESP_LOGE(USBH_TAG, "usb_transfer_t callback is NULL");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (urb->transfer.num_bytes > urb->transfer.data_buffer_size) {
|
|
|
|
ESP_LOGE(USBH_TAG, "usb_transfer_t num_bytes > data_buffer_size");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-04-09 17:06:46 -04:00
|
|
|
static bool transfer_check_usb_compliance(usb_transfer_t *transfer, usb_transfer_type_t type, unsigned int mps, bool is_in)
|
2023-05-08 12:43:32 -04:00
|
|
|
{
|
|
|
|
if (type == USB_TRANSFER_TYPE_CTRL) {
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check that num_bytes and wLength are set correctly
|
2023-05-08 12:43:32 -04:00
|
|
|
usb_setup_packet_t *setup_pkt = (usb_setup_packet_t *)transfer->data_buffer;
|
2024-04-09 17:06:46 -04:00
|
|
|
bool mismatch = false;
|
|
|
|
if (is_in) {
|
|
|
|
// For IN transfers, 'num_bytes >= sizeof(usb_setup_packet_t) + setup_pkt->wLength' due to MPS rounding
|
|
|
|
mismatch = (transfer->num_bytes < sizeof(usb_setup_packet_t) + setup_pkt->wLength);
|
|
|
|
} else {
|
|
|
|
// For OUT transfers, num_bytes must match 'sizeof(usb_setup_packet_t) + setup_pkt->wLength'
|
|
|
|
mismatch = (transfer->num_bytes != sizeof(usb_setup_packet_t) + setup_pkt->wLength);
|
|
|
|
}
|
|
|
|
if (mismatch) {
|
|
|
|
ESP_LOGE(USBH_TAG, "usb_transfer_t num_bytes %d and usb_setup_packet_t wLength %d mismatch", transfer->num_bytes, setup_pkt->wLength);
|
2023-05-08 12:43:32 -04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else if (type == USB_TRANSFER_TYPE_ISOCHRONOUS) {
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check that there is at least one isochronous packet descriptor
|
2023-05-08 12:43:32 -04:00
|
|
|
if (transfer->num_isoc_packets <= 0) {
|
|
|
|
ESP_LOGE(USBH_TAG, "usb_transfer_t num_isoc_packets is 0");
|
|
|
|
return false;
|
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check that sum of all packet lengths add up to transfer length
|
|
|
|
// If IN, check that each packet length is integer multiple of MPS
|
2023-05-08 12:43:32 -04:00
|
|
|
int total_num_bytes = 0;
|
|
|
|
bool mod_mps_all_zero = true;
|
|
|
|
for (int i = 0; i < transfer->num_isoc_packets; i++) {
|
|
|
|
total_num_bytes += transfer->isoc_packet_desc[i].num_bytes;
|
|
|
|
if (transfer->isoc_packet_desc[i].num_bytes % mps != 0) {
|
|
|
|
mod_mps_all_zero = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (transfer->num_bytes != total_num_bytes) {
|
|
|
|
ESP_LOGE(USBH_TAG, "ISOC transfer num_bytes != num_bytes of all packets");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (is_in && !mod_mps_all_zero) {
|
|
|
|
ESP_LOGE(USBH_TAG, "ISOC IN num_bytes not integer multiple of MPS");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check that IN transfers are integer multiple of MPS
|
2023-05-08 12:43:32 -04:00
|
|
|
if (is_in && (transfer->num_bytes % mps != 0)) {
|
|
|
|
ESP_LOGE(USBH_TAG, "IN transfer num_bytes not integer multiple of MPS");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-08-24 11:20:50 -04:00
|
|
|
// --------------------------------------------------- Allocation ------------------------------------------------------
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
static esp_err_t endpoint_alloc(device_t *dev_obj, const usb_ep_desc_t *ep_desc, usbh_ep_config_t *ep_config, endpoint_t **ep_obj_ret)
|
|
|
|
{
|
|
|
|
esp_err_t ret;
|
|
|
|
endpoint_t *ep_obj;
|
|
|
|
hcd_pipe_handle_t pipe_hdl;
|
|
|
|
|
|
|
|
ep_obj = heap_caps_calloc(1, sizeof(endpoint_t), MALLOC_CAP_DEFAULT);
|
|
|
|
if (ep_obj == NULL) {
|
|
|
|
return ESP_ERR_NO_MEM;
|
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Allocate the EP's underlying pipe
|
2023-05-08 12:43:32 -04:00
|
|
|
hcd_pipe_config_t pipe_config = {
|
|
|
|
.callback = epN_pipe_callback,
|
|
|
|
.callback_arg = (void *)ep_obj,
|
|
|
|
.context = ep_config->context,
|
|
|
|
.ep_desc = ep_desc,
|
|
|
|
.dev_speed = dev_obj->constant.speed,
|
|
|
|
.dev_addr = dev_obj->constant.address,
|
|
|
|
};
|
|
|
|
ret = hcd_pipe_alloc(dev_obj->constant.port_hdl, &pipe_config, &pipe_hdl);
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
goto pipe_err;
|
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Initialize the endpoint object
|
2023-05-08 12:43:32 -04:00
|
|
|
ep_obj->constant.pipe_hdl = pipe_hdl;
|
|
|
|
ep_obj->constant.ep_cb = ep_config->ep_cb;
|
|
|
|
ep_obj->constant.ep_cb_arg = ep_config->ep_cb_arg;
|
|
|
|
ep_obj->constant.dev = dev_obj;
|
|
|
|
ep_obj->constant.ep_desc = ep_desc;
|
2023-05-08 12:53:27 -04:00
|
|
|
// Return the endpoint object
|
2023-05-08 12:43:32 -04:00
|
|
|
*ep_obj_ret = ep_obj;
|
|
|
|
|
|
|
|
ret = ESP_OK;
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
pipe_err:
|
|
|
|
heap_caps_free(ep_obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void endpoint_free(endpoint_t *ep_obj)
|
|
|
|
{
|
|
|
|
if (ep_obj == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Deallocate the EP's underlying pipe
|
2023-05-08 12:43:32 -04:00
|
|
|
ESP_ERROR_CHECK(hcd_pipe_free(ep_obj->constant.pipe_hdl));
|
2023-05-08 12:53:27 -04:00
|
|
|
// Free the heap object
|
2023-05-08 12:43:32 -04:00
|
|
|
heap_caps_free(ep_obj);
|
|
|
|
}
|
|
|
|
|
2024-04-09 17:06:46 -04:00
|
|
|
static esp_err_t device_alloc(unsigned int uid,
|
|
|
|
usb_speed_t speed,
|
|
|
|
hcd_port_handle_t port_hdl,
|
|
|
|
device_t **dev_obj_ret)
|
2021-08-24 11:20:50 -04:00
|
|
|
{
|
|
|
|
device_t *dev_obj = heap_caps_calloc(1, sizeof(device_t), MALLOC_CAP_DEFAULT);
|
2024-04-09 17:06:46 -04:00
|
|
|
if (dev_obj == NULL) {
|
|
|
|
return ESP_ERR_NO_MEM;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2024-04-09 17:06:46 -04:00
|
|
|
|
|
|
|
esp_err_t ret;
|
|
|
|
// Allocate a pipe for EP0
|
2021-08-24 11:20:50 -04:00
|
|
|
hcd_pipe_config_t pipe_config = {
|
2024-04-09 17:06:46 -04:00
|
|
|
.callback = ep0_pipe_callback,
|
|
|
|
.callback_arg = (void *)dev_obj,
|
2021-08-24 11:20:50 -04:00
|
|
|
.context = (void *)dev_obj,
|
2023-05-08 12:53:27 -04:00
|
|
|
.ep_desc = NULL, // No endpoint descriptor means we're allocating a pipe for EP0
|
2021-08-24 11:20:50 -04:00
|
|
|
.dev_speed = speed,
|
|
|
|
.dev_addr = 0,
|
|
|
|
};
|
|
|
|
hcd_pipe_handle_t default_pipe_hdl;
|
|
|
|
ret = hcd_pipe_alloc(port_hdl, &pipe_config, &default_pipe_hdl);
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
goto err;
|
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Initialize device object
|
2021-08-24 11:20:50 -04:00
|
|
|
dev_obj->dynamic.state = USB_DEVICE_STATE_DEFAULT;
|
|
|
|
dev_obj->constant.default_pipe = default_pipe_hdl;
|
|
|
|
dev_obj->constant.port_hdl = port_hdl;
|
|
|
|
dev_obj->constant.speed = speed;
|
2024-04-09 17:06:46 -04:00
|
|
|
dev_obj->constant.uid = uid;
|
|
|
|
// Note: Enumeration related dev_obj->constant fields are initialized later using usbh_dev_set_...() functions
|
|
|
|
|
|
|
|
// Write-back device object
|
2021-08-24 11:20:50 -04:00
|
|
|
*dev_obj_ret = dev_obj;
|
|
|
|
ret = ESP_OK;
|
2024-04-09 17:06:46 -04:00
|
|
|
|
2021-08-24 11:20:50 -04:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
err:
|
|
|
|
heap_caps_free(dev_obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void device_free(device_t *dev_obj)
|
|
|
|
{
|
|
|
|
if (dev_obj == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
2024-04-09 17:06:46 -04:00
|
|
|
// Device descriptor might not have been set yet
|
|
|
|
if (dev_obj->constant.desc) {
|
|
|
|
heap_caps_free(dev_obj->constant.desc);
|
|
|
|
}
|
|
|
|
// Configuration descriptor might not have been set yet
|
2021-11-16 05:20:52 -05:00
|
|
|
if (dev_obj->constant.config_desc) {
|
2024-04-09 17:06:46 -04:00
|
|
|
heap_caps_free(dev_obj->constant.config_desc);
|
2021-11-16 05:20:52 -05:00
|
|
|
}
|
2024-04-09 17:06:46 -04:00
|
|
|
// String descriptors might not have been set yet
|
2021-12-08 06:46:46 -05:00
|
|
|
if (dev_obj->constant.str_desc_manu) {
|
2024-04-09 17:06:46 -04:00
|
|
|
heap_caps_free(dev_obj->constant.str_desc_manu);
|
2021-12-08 06:46:46 -05:00
|
|
|
}
|
|
|
|
if (dev_obj->constant.str_desc_product) {
|
2024-04-09 17:06:46 -04:00
|
|
|
heap_caps_free(dev_obj->constant.str_desc_product);
|
2021-12-08 06:46:46 -05:00
|
|
|
}
|
|
|
|
if (dev_obj->constant.str_desc_ser_num) {
|
2024-04-09 17:06:46 -04:00
|
|
|
heap_caps_free(dev_obj->constant.str_desc_ser_num);
|
2021-12-08 06:46:46 -05:00
|
|
|
}
|
2021-11-16 05:20:52 -05:00
|
|
|
ESP_ERROR_CHECK(hcd_pipe_free(dev_obj->constant.default_pipe));
|
2021-08-24 11:20:50 -04:00
|
|
|
heap_caps_free(dev_obj);
|
|
|
|
}
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
// ---------------------------------------------------- Callbacks ------------------------------------------------------
|
2021-08-24 11:20:50 -04:00
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
static bool ep0_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr)
|
2021-08-24 11:20:50 -04:00
|
|
|
{
|
|
|
|
uint32_t action_flags;
|
|
|
|
device_t *dev_obj = (device_t *)user_arg;
|
|
|
|
switch (pipe_event) {
|
2023-05-08 12:53:27 -04:00
|
|
|
case HCD_PIPE_EVENT_URB_DONE:
|
|
|
|
// A control transfer completed on EP0's pipe . We need to dequeue it
|
|
|
|
action_flags = DEV_ACTION_EP0_DEQUEUE;
|
|
|
|
break;
|
|
|
|
case HCD_PIPE_EVENT_ERROR_XFER:
|
|
|
|
case HCD_PIPE_EVENT_ERROR_URB_NOT_AVAIL:
|
|
|
|
case HCD_PIPE_EVENT_ERROR_OVERFLOW:
|
|
|
|
// EP0's pipe has encountered an error. We need to retire all URBs, dequeue them, then make the pipe active again
|
|
|
|
action_flags = DEV_ACTION_EP0_FLUSH |
|
|
|
|
DEV_ACTION_EP0_DEQUEUE |
|
|
|
|
DEV_ACTION_EP0_CLEAR;
|
|
|
|
if (in_isr) {
|
|
|
|
ESP_EARLY_LOGE(USBH_TAG, "Dev %d EP 0 Error", dev_obj->constant.address);
|
|
|
|
} else {
|
|
|
|
ESP_LOGE(USBH_TAG, "Dev %d EP 0 Error", dev_obj->constant.address);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case HCD_PIPE_EVENT_ERROR_STALL:
|
|
|
|
// EP0's pipe encountered a "protocol stall". We just need to dequeue URBs then make the pipe active again
|
|
|
|
action_flags = DEV_ACTION_EP0_DEQUEUE | DEV_ACTION_EP0_CLEAR;
|
|
|
|
if (in_isr) {
|
|
|
|
ESP_EARLY_LOGE(USBH_TAG, "Dev %d EP 0 STALL", dev_obj->constant.address);
|
|
|
|
} else {
|
|
|
|
ESP_LOGE(USBH_TAG, "Dev %d EP 0 STALL", dev_obj->constant.address);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
action_flags = 0;
|
|
|
|
break;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL_SAFE();
|
2023-05-08 12:43:32 -04:00
|
|
|
bool call_proc_req_cb = _dev_set_actions(dev_obj, action_flags);
|
2021-08-24 11:20:50 -04:00
|
|
|
USBH_EXIT_CRITICAL_SAFE();
|
|
|
|
|
|
|
|
bool yield = false;
|
2023-05-08 12:43:32 -04:00
|
|
|
if (call_proc_req_cb) {
|
|
|
|
yield = p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, in_isr, p_usbh_obj->constant.proc_req_cb_arg);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
return yield;
|
|
|
|
}
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
static bool epN_pipe_callback(hcd_pipe_handle_t pipe_hdl, hcd_pipe_event_t pipe_event, void *user_arg, bool in_isr)
|
|
|
|
{
|
|
|
|
endpoint_t *ep_obj = (endpoint_t *)user_arg;
|
|
|
|
return ep_obj->constant.ep_cb((usbh_ep_handle_t)ep_obj,
|
|
|
|
(usbh_ep_event_t)pipe_event,
|
|
|
|
ep_obj->constant.ep_cb_arg,
|
|
|
|
in_isr);
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------- Event Related ----------------------------------------------------
|
|
|
|
|
|
|
|
static bool _dev_set_actions(device_t *dev_obj, uint32_t action_flags)
|
|
|
|
{
|
2024-04-09 17:06:46 -04:00
|
|
|
/*
|
|
|
|
THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION
|
|
|
|
*/
|
2023-05-08 12:43:32 -04:00
|
|
|
if (action_flags == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
bool call_proc_req_cb;
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check if device is already on the callback list
|
2023-05-08 12:43:32 -04:00
|
|
|
if (!dev_obj->dynamic.flags.in_pending_list) {
|
2023-05-08 12:53:27 -04:00
|
|
|
// Move device form idle device list to callback device list
|
2023-05-08 12:43:32 -04:00
|
|
|
TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
|
|
|
|
TAILQ_INSERT_TAIL(&p_usbh_obj->dynamic.devs_pending_tailq, dev_obj, dynamic.tailq_entry);
|
|
|
|
dev_obj->dynamic.action_flags |= action_flags;
|
|
|
|
dev_obj->dynamic.flags.in_pending_list = 1;
|
|
|
|
call_proc_req_cb = true;
|
|
|
|
} else {
|
2024-02-23 14:24:23 -05:00
|
|
|
// The device is already on the callback list, thus a processing request is already pending.
|
|
|
|
dev_obj->dynamic.action_flags |= action_flags;
|
2023-05-08 12:43:32 -04:00
|
|
|
call_proc_req_cb = false;
|
|
|
|
}
|
|
|
|
return call_proc_req_cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void handle_epn_halt_flush(device_t *dev_obj)
|
2021-10-27 09:15:46 -04:00
|
|
|
{
|
2023-05-08 12:53:27 -04:00
|
|
|
// We need to take the mux_lock to access mux_protected members
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
|
2023-05-08 12:53:27 -04:00
|
|
|
// Halt then flush all non-default EPs
|
2023-05-08 12:43:32 -04:00
|
|
|
for (int i = 0; i < NUM_NON_DEFAULT_EP; i++) {
|
|
|
|
if (dev_obj->mux_protected.endpoints[i] != NULL) {
|
|
|
|
ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->mux_protected.endpoints[i]->constant.pipe_hdl, HCD_PIPE_CMD_HALT));
|
|
|
|
ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->mux_protected.endpoints[i]->constant.pipe_hdl, HCD_PIPE_CMD_FLUSH));
|
2021-10-27 09:15:46 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
xSemaphoreGive(p_usbh_obj->constant.mux_lock);
|
|
|
|
}
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
static inline void handle_ep0_flush(device_t *dev_obj)
|
|
|
|
{
|
|
|
|
ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->constant.default_pipe, HCD_PIPE_CMD_HALT));
|
|
|
|
ESP_ERROR_CHECK(hcd_pipe_command(dev_obj->constant.default_pipe, HCD_PIPE_CMD_FLUSH));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void handle_ep0_dequeue(device_t *dev_obj)
|
|
|
|
{
|
2023-05-08 12:53:27 -04:00
|
|
|
// Empty URBs from EP0's pipe and call the control transfer callback
|
2023-05-08 12:43:32 -04:00
|
|
|
ESP_LOGD(USBH_TAG, "Default pipe device %d", dev_obj->constant.address);
|
|
|
|
int num_urbs = 0;
|
|
|
|
urb_t *urb = hcd_urb_dequeue(dev_obj->constant.default_pipe);
|
|
|
|
while (urb != NULL) {
|
|
|
|
num_urbs++;
|
2024-02-22 15:44:01 -05:00
|
|
|
usbh_event_data_t event_data = {
|
|
|
|
.event = USBH_EVENT_CTRL_XFER,
|
|
|
|
.ctrl_xfer_data = {
|
|
|
|
.dev_hdl = (usb_device_handle_t)dev_obj,
|
|
|
|
.urb = urb,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
p_usbh_obj->constant.event_cb(&event_data, p_usbh_obj->constant.event_cb_arg);
|
2023-05-08 12:43:32 -04:00
|
|
|
urb = hcd_urb_dequeue(dev_obj->constant.default_pipe);
|
|
|
|
}
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
dev_obj->dynamic.num_ctrl_xfers_inflight -= num_urbs;
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void handle_ep0_clear(device_t *dev_obj)
|
|
|
|
{
|
2023-05-08 12:53:27 -04:00
|
|
|
// We allow the pipe command to fail just in case the pipe becomes invalid mid command
|
2023-05-08 12:43:32 -04:00
|
|
|
hcd_pipe_command(dev_obj->constant.default_pipe, HCD_PIPE_CMD_CLEAR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void handle_prop_gone_evt(device_t *dev_obj)
|
|
|
|
{
|
2023-05-08 12:53:27 -04:00
|
|
|
// Flush EP0's pipe. Then propagate a USBH_EVENT_DEV_GONE event
|
2023-05-08 12:43:32 -04:00
|
|
|
ESP_LOGE(USBH_TAG, "Device %d gone", dev_obj->constant.address);
|
2024-02-22 15:25:10 -05:00
|
|
|
usbh_event_data_t event_data = {
|
|
|
|
.event = USBH_EVENT_DEV_GONE,
|
|
|
|
.dev_gone_data = {
|
|
|
|
.dev_addr = dev_obj->constant.address,
|
|
|
|
.dev_hdl = (usb_device_handle_t)dev_obj,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
p_usbh_obj->constant.event_cb(&event_data, p_usbh_obj->constant.event_cb_arg);
|
2023-05-08 12:43:32 -04:00
|
|
|
}
|
|
|
|
|
2024-03-21 08:57:32 -04:00
|
|
|
static inline void handle_free(device_t *dev_obj)
|
2021-08-24 11:20:50 -04:00
|
|
|
{
|
2024-03-21 08:57:32 -04:00
|
|
|
// Cache a copy of the device's address as we are about to free the device object
|
2024-04-09 17:06:46 -04:00
|
|
|
const unsigned int dev_uid = dev_obj->constant.uid;
|
2023-05-08 12:43:32 -04:00
|
|
|
bool all_free;
|
|
|
|
ESP_LOGD(USBH_TAG, "Freeing device %d", dev_obj->constant.address);
|
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// We need to take the mux_lock to access mux_protected members
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
|
2021-08-24 11:20:50 -04:00
|
|
|
USBH_ENTER_CRITICAL();
|
2023-05-08 12:53:27 -04:00
|
|
|
// Remove the device object for it's containing list
|
2021-08-24 11:20:50 -04:00
|
|
|
if (dev_obj->dynamic.flags.in_pending_list) {
|
|
|
|
dev_obj->dynamic.flags.in_pending_list = 0;
|
|
|
|
TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_pending_tailq, dev_obj, dynamic.tailq_entry);
|
|
|
|
} else {
|
|
|
|
TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
|
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
2021-10-27 09:15:46 -04:00
|
|
|
p_usbh_obj->mux_protected.num_device--;
|
2023-05-08 12:43:32 -04:00
|
|
|
all_free = (p_usbh_obj->mux_protected.num_device == 0);
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreGive(p_usbh_obj->constant.mux_lock);
|
2021-11-16 05:20:52 -05:00
|
|
|
device_free(dev_obj);
|
2023-05-08 12:43:32 -04:00
|
|
|
|
2024-03-21 08:57:32 -04:00
|
|
|
// Propagate USBH_EVENT_DEV_FREE event
|
|
|
|
usbh_event_data_t event_data = {
|
|
|
|
.event = USBH_EVENT_DEV_FREE,
|
|
|
|
.dev_free_data = {
|
2024-04-09 17:06:46 -04:00
|
|
|
.dev_uid = dev_uid,
|
2024-03-21 08:57:32 -04:00
|
|
|
}
|
|
|
|
};
|
|
|
|
p_usbh_obj->constant.event_cb(&event_data, p_usbh_obj->constant.event_cb_arg);
|
|
|
|
|
2024-02-22 15:25:10 -05:00
|
|
|
// If all devices have been freed, propagate a USBH_EVENT_ALL_FREE event
|
2023-05-08 12:43:32 -04:00
|
|
|
if (all_free) {
|
|
|
|
ESP_LOGD(USBH_TAG, "Device all free");
|
2024-03-21 08:57:32 -04:00
|
|
|
event_data.event = USBH_EVENT_ALL_FREE;
|
2024-02-22 15:25:10 -05:00
|
|
|
p_usbh_obj->constant.event_cb(&event_data, p_usbh_obj->constant.event_cb_arg);
|
2023-05-08 12:43:32 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-21 08:57:32 -04:00
|
|
|
static inline void handle_prop_new_dev(device_t *dev_obj)
|
2023-05-08 12:43:32 -04:00
|
|
|
{
|
|
|
|
ESP_LOGD(USBH_TAG, "New device %d", dev_obj->constant.address);
|
2024-02-22 15:25:10 -05:00
|
|
|
usbh_event_data_t event_data = {
|
|
|
|
.event = USBH_EVENT_NEW_DEV,
|
|
|
|
.new_dev_data = {
|
|
|
|
.dev_addr = dev_obj->constant.address,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
p_usbh_obj->constant.event_cb(&event_data, p_usbh_obj->constant.event_cb_arg);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
|
2024-03-29 09:29:01 -04:00
|
|
|
// -------------------------------------------- USBH Processing Functions ----------------------------------------------
|
2021-08-24 11:20:50 -04:00
|
|
|
|
|
|
|
esp_err_t usbh_install(const usbh_config_t *usbh_config)
|
|
|
|
{
|
|
|
|
USBH_CHECK(usbh_config != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
USBH_CHECK_FROM_CRIT(p_usbh_obj == NULL, ESP_ERR_INVALID_STATE);
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
2021-10-27 09:15:46 -04:00
|
|
|
esp_err_t ret;
|
2021-08-24 11:20:50 -04:00
|
|
|
usbh_t *usbh_obj = heap_caps_calloc(1, sizeof(usbh_t), MALLOC_CAP_DEFAULT);
|
2021-10-27 09:15:46 -04:00
|
|
|
SemaphoreHandle_t mux_lock = xSemaphoreCreateMutex();
|
|
|
|
if (usbh_obj == NULL || mux_lock == NULL) {
|
|
|
|
ret = ESP_ERR_NO_MEM;
|
2023-05-08 12:43:32 -04:00
|
|
|
goto err;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Initialize USBH object
|
2021-08-24 11:20:50 -04:00
|
|
|
TAILQ_INIT(&usbh_obj->dynamic.devs_idle_tailq);
|
|
|
|
TAILQ_INIT(&usbh_obj->dynamic.devs_pending_tailq);
|
2023-05-08 12:43:32 -04:00
|
|
|
usbh_obj->constant.proc_req_cb = usbh_config->proc_req_cb;
|
|
|
|
usbh_obj->constant.proc_req_cb_arg = usbh_config->proc_req_cb_arg;
|
2021-08-24 11:20:50 -04:00
|
|
|
usbh_obj->constant.event_cb = usbh_config->event_cb;
|
|
|
|
usbh_obj->constant.event_cb_arg = usbh_config->event_cb_arg;
|
2021-10-27 09:15:46 -04:00
|
|
|
usbh_obj->constant.mux_lock = mux_lock;
|
2021-08-24 11:20:50 -04:00
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// Assign USBH object pointer
|
2021-08-24 11:20:50 -04:00
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
if (p_usbh_obj != NULL) {
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
2023-05-08 12:43:32 -04:00
|
|
|
goto err;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
p_usbh_obj = usbh_obj;
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
ret = ESP_OK;
|
|
|
|
return ret;
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
err:
|
2021-10-27 09:15:46 -04:00
|
|
|
if (mux_lock != NULL) {
|
|
|
|
vSemaphoreDelete(mux_lock);
|
|
|
|
}
|
2021-08-24 11:20:50 -04:00
|
|
|
heap_caps_free(usbh_obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_uninstall(void)
|
|
|
|
{
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check that USBH is in a state to be uninstalled
|
2021-08-24 11:20:50 -04:00
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
USBH_CHECK_FROM_CRIT(p_usbh_obj != NULL, ESP_ERR_INVALID_STATE);
|
|
|
|
usbh_t *usbh_obj = p_usbh_obj;
|
2021-10-27 09:15:46 -04:00
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
esp_err_t ret;
|
2023-05-08 12:53:27 -04:00
|
|
|
// We need to take the mux_lock to access mux_protected members
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreTake(usbh_obj->constant.mux_lock, portMAX_DELAY);
|
|
|
|
if (p_usbh_obj->mux_protected.num_device > 0) {
|
2023-05-08 12:53:27 -04:00
|
|
|
// There are still devices allocated. Can't uninstall right now.
|
2021-10-27 09:15:46 -04:00
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
goto exit;
|
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check again if we can uninstall
|
2021-10-27 09:15:46 -04:00
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
assert(p_usbh_obj == usbh_obj);
|
2021-08-24 11:20:50 -04:00
|
|
|
p_usbh_obj = NULL;
|
|
|
|
USBH_EXIT_CRITICAL();
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreGive(usbh_obj->constant.mux_lock);
|
2021-08-24 11:20:50 -04:00
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// Free resources
|
2021-10-27 09:15:46 -04:00
|
|
|
vSemaphoreDelete(usbh_obj->constant.mux_lock);
|
2021-08-24 11:20:50 -04:00
|
|
|
heap_caps_free(usbh_obj);
|
2021-10-27 09:15:46 -04:00
|
|
|
ret = ESP_OK;
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
exit:
|
|
|
|
xSemaphoreGive(p_usbh_obj->constant.mux_lock);
|
|
|
|
return ret;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_process(void)
|
|
|
|
{
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
USBH_CHECK_FROM_CRIT(p_usbh_obj != NULL, ESP_ERR_INVALID_STATE);
|
2023-05-08 12:53:27 -04:00
|
|
|
// Keep processing until all device's with pending events have been handled
|
|
|
|
while (!TAILQ_EMPTY(&p_usbh_obj->dynamic.devs_pending_tailq)) {
|
|
|
|
// Move the device back into the idle device list,
|
2021-08-24 11:20:50 -04:00
|
|
|
device_t *dev_obj = TAILQ_FIRST(&p_usbh_obj->dynamic.devs_pending_tailq);
|
|
|
|
TAILQ_REMOVE(&p_usbh_obj->dynamic.devs_pending_tailq, dev_obj, dynamic.tailq_entry);
|
|
|
|
TAILQ_INSERT_TAIL(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
|
2023-05-08 12:53:27 -04:00
|
|
|
// Clear the device's flags
|
2021-12-14 05:28:04 -05:00
|
|
|
uint32_t action_flags = dev_obj->dynamic.action_flags;
|
|
|
|
dev_obj->dynamic.action_flags = 0;
|
2021-08-24 11:20:50 -04:00
|
|
|
dev_obj->dynamic.flags.in_pending_list = 0;
|
|
|
|
|
2021-10-27 09:15:46 -04:00
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
Exit critical section to handle device action flags in their listed order
|
|
|
|
--------------------------------------------------------------------- */
|
2021-08-24 11:20:50 -04:00
|
|
|
USBH_EXIT_CRITICAL();
|
2022-08-09 05:58:59 -04:00
|
|
|
ESP_LOGD(USBH_TAG, "Processing actions 0x%"PRIx32"", action_flags);
|
2021-10-27 09:15:46 -04:00
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
if (action_flags & DEV_ACTION_EPn_HALT_FLUSH) {
|
|
|
|
handle_epn_halt_flush(dev_obj);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2023-05-08 12:43:32 -04:00
|
|
|
if (action_flags & DEV_ACTION_EP0_FLUSH) {
|
|
|
|
handle_ep0_flush(dev_obj);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2023-05-08 12:43:32 -04:00
|
|
|
if (action_flags & DEV_ACTION_EP0_DEQUEUE) {
|
|
|
|
handle_ep0_dequeue(dev_obj);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2023-05-08 12:43:32 -04:00
|
|
|
if (action_flags & DEV_ACTION_EP0_CLEAR) {
|
|
|
|
handle_ep0_clear(dev_obj);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2023-05-08 12:43:32 -04:00
|
|
|
if (action_flags & DEV_ACTION_PROP_GONE_EVT) {
|
|
|
|
handle_prop_gone_evt(dev_obj);
|
2021-10-27 09:15:46 -04:00
|
|
|
}
|
2021-08-24 11:20:50 -04:00
|
|
|
/*
|
|
|
|
Note: We make these action flags mutually exclusive in case they happen in rapid succession. They are handled
|
|
|
|
in the order of precedence
|
|
|
|
For example
|
|
|
|
- New device event is requested followed immediately by a disconnection
|
|
|
|
*/
|
2024-03-21 08:57:32 -04:00
|
|
|
if (action_flags & DEV_ACTION_FREE) {
|
|
|
|
handle_free(dev_obj);
|
|
|
|
} else if (action_flags & DEV_ACTION_PROP_NEW_DEV) {
|
|
|
|
handle_prop_new_dev(dev_obj);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
USBH_ENTER_CRITICAL();
|
2021-10-27 09:15:46 -04:00
|
|
|
/* ---------------------------------------------------------------------
|
|
|
|
Re-enter critical sections. All device action flags should have been handled.
|
|
|
|
--------------------------------------------------------------------- */
|
2021-08-24 11:20:50 -04:00
|
|
|
|
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2024-03-29 09:29:01 -04:00
|
|
|
// ---------------------------------------------- Device Pool Functions ------------------------------------------------
|
|
|
|
|
2024-04-02 23:24:03 -04:00
|
|
|
esp_err_t usbh_devs_num(int *num_devs_ret)
|
2021-12-08 06:46:46 -05:00
|
|
|
{
|
|
|
|
USBH_CHECK(num_devs_ret != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
|
|
|
|
*num_devs_ret = p_usbh_obj->mux_protected.num_device;
|
|
|
|
xSemaphoreGive(p_usbh_obj->constant.mux_lock);
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2024-04-02 23:24:03 -04:00
|
|
|
esp_err_t usbh_devs_addr_list_fill(int list_len, uint8_t *dev_addr_list, int *num_dev_ret)
|
2021-10-27 12:54:27 -04:00
|
|
|
{
|
|
|
|
USBH_CHECK(dev_addr_list != NULL && num_dev_ret != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
int num_filled = 0;
|
|
|
|
device_t *dev_obj;
|
2024-04-09 17:06:46 -04:00
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
/*
|
|
|
|
Fill list with devices from idle tailq and pending tailq. Only devices that
|
|
|
|
are fully enumerated are added to the list. Thus, the following devices are
|
|
|
|
not excluded:
|
|
|
|
- Devices with their enum_lock set
|
|
|
|
- Devices not in the configured state
|
|
|
|
- Devices with address 0
|
|
|
|
*/
|
2021-10-27 12:54:27 -04:00
|
|
|
TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_idle_tailq, dynamic.tailq_entry) {
|
|
|
|
if (num_filled < list_len) {
|
2024-04-09 17:06:46 -04:00
|
|
|
if (!dev_obj->dynamic.flags.enum_lock &&
|
|
|
|
dev_obj->dynamic.state == USB_DEVICE_STATE_CONFIGURED &&
|
|
|
|
dev_obj->constant.address != 0) {
|
|
|
|
dev_addr_list[num_filled] = dev_obj->constant.address;
|
|
|
|
num_filled++;
|
|
|
|
}
|
2021-10-27 12:54:27 -04:00
|
|
|
} else {
|
2024-04-09 17:06:46 -04:00
|
|
|
// Address list is already full
|
2021-10-27 12:54:27 -04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Fill list with devices from pending tailq
|
2021-10-27 12:54:27 -04:00
|
|
|
TAILQ_FOREACH(dev_obj, &p_usbh_obj->dynamic.devs_pending_tailq, dynamic.tailq_entry) {
|
|
|
|
if (num_filled < list_len) {
|
2024-04-09 17:06:46 -04:00
|
|
|
if (!dev_obj->dynamic.flags.enum_lock &&
|
|
|
|
dev_obj->dynamic.state == USB_DEVICE_STATE_CONFIGURED &&
|
|
|
|
dev_obj->constant.address != 0) {
|
|
|
|
dev_addr_list[num_filled] = dev_obj->constant.address;
|
|
|
|
num_filled++;
|
|
|
|
}
|
2021-10-27 12:54:27 -04:00
|
|
|
} else {
|
2024-04-09 17:06:46 -04:00
|
|
|
// Address list is already full
|
2021-10-27 12:54:27 -04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
2023-05-08 12:53:27 -04:00
|
|
|
// Write back number of devices filled
|
2021-10-27 12:54:27 -04:00
|
|
|
*num_dev_ret = num_filled;
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2024-04-09 17:06:46 -04:00
|
|
|
esp_err_t usbh_devs_add(unsigned int uid, usb_speed_t dev_speed, hcd_port_handle_t port_hdl)
|
|
|
|
{
|
|
|
|
USBH_CHECK(port_hdl != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
esp_err_t ret;
|
|
|
|
device_t *dev_obj;
|
|
|
|
|
|
|
|
// Allocate a device object (initialized to address 0)
|
|
|
|
ret = device_alloc(uid, dev_speed, port_hdl, &dev_obj);
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to take the mux_lock to access mux_protected members
|
|
|
|
xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
|
|
|
|
// Check that there is not already a device with the same uid
|
|
|
|
if (_find_dev_from_uid(uid) != NULL) {
|
|
|
|
ret = ESP_ERR_INVALID_ARG;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
// Check that there is not already a device currently with address 0
|
|
|
|
if (_find_dev_from_addr(0) != NULL) {
|
|
|
|
ret = ESP_ERR_NOT_FINISHED;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
// Add the device to the idle device list
|
|
|
|
TAILQ_INSERT_TAIL(&p_usbh_obj->dynamic.devs_idle_tailq, dev_obj, dynamic.tailq_entry);
|
|
|
|
p_usbh_obj->mux_protected.num_device++;
|
|
|
|
ret = ESP_OK;
|
|
|
|
|
|
|
|
exit:
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
xSemaphoreGive(p_usbh_obj->constant.mux_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_devs_remove(unsigned int uid)
|
|
|
|
{
|
|
|
|
esp_err_t ret;
|
|
|
|
device_t *dev_obj;
|
|
|
|
bool call_proc_req_cb = false;
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
dev_obj = _find_dev_from_uid(uid);
|
|
|
|
if (dev_obj == NULL) {
|
|
|
|
ret = ESP_ERR_NOT_FOUND;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
// Mark the device as gone
|
|
|
|
dev_obj->dynamic.flags.is_gone = 1;
|
|
|
|
// Check if the device can be freed immediately
|
|
|
|
if (dev_obj->dynamic.open_count == 0) {
|
|
|
|
// Device is not currently opened at all. Can free immediately.
|
|
|
|
call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_FREE);
|
|
|
|
} else {
|
|
|
|
// Device is still opened. Flush endpoints and propagate device gone event
|
|
|
|
call_proc_req_cb = _dev_set_actions(dev_obj,
|
|
|
|
DEV_ACTION_EPn_HALT_FLUSH |
|
|
|
|
DEV_ACTION_EP0_FLUSH |
|
|
|
|
DEV_ACTION_EP0_DEQUEUE |
|
|
|
|
DEV_ACTION_PROP_GONE_EVT);
|
|
|
|
}
|
|
|
|
ret = ESP_OK;
|
|
|
|
exit:
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
// Call the processing request callback
|
|
|
|
if (call_proc_req_cb) {
|
|
|
|
p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-04-02 23:24:03 -04:00
|
|
|
esp_err_t usbh_devs_mark_all_free(void)
|
2024-03-29 09:29:01 -04:00
|
|
|
{
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
/*
|
|
|
|
Go through the device list and mark each device as waiting to be closed. If the device is not opened at all, we can
|
|
|
|
disable it immediately.
|
|
|
|
Note: We manually traverse the list because we need to add/remove items while traversing
|
|
|
|
*/
|
|
|
|
bool call_proc_req_cb = false;
|
|
|
|
bool wait_for_free = false;
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
device_t *dev_obj_cur;
|
|
|
|
device_t *dev_obj_next;
|
|
|
|
// Go through pending list first as it's more efficient
|
|
|
|
if (i == 0) {
|
|
|
|
dev_obj_cur = TAILQ_FIRST(&p_usbh_obj->dynamic.devs_pending_tailq);
|
|
|
|
} else {
|
|
|
|
dev_obj_cur = TAILQ_FIRST(&p_usbh_obj->dynamic.devs_idle_tailq);
|
|
|
|
}
|
|
|
|
while (dev_obj_cur != NULL) {
|
|
|
|
// Keep a copy of the next item first in case we remove the current item
|
|
|
|
dev_obj_next = TAILQ_NEXT(dev_obj_cur, dynamic.tailq_entry);
|
2024-04-02 23:24:03 -04:00
|
|
|
if (dev_obj_cur->dynamic.open_count == 0) {
|
|
|
|
// Device is not opened. Can free immediately.
|
2024-03-29 09:29:01 -04:00
|
|
|
call_proc_req_cb |= _dev_set_actions(dev_obj_cur, DEV_ACTION_FREE);
|
|
|
|
} else {
|
2024-04-02 23:24:03 -04:00
|
|
|
// Device is still opened. Just mark it as waiting to be freed
|
2024-03-29 09:29:01 -04:00
|
|
|
dev_obj_cur->dynamic.flags.waiting_free = 1;
|
|
|
|
}
|
|
|
|
// At least one device needs to be freed. User needs to wait for USBH_EVENT_ALL_FREE event
|
|
|
|
wait_for_free = true;
|
|
|
|
dev_obj_cur = dev_obj_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
if (call_proc_req_cb) {
|
|
|
|
p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
|
|
|
|
}
|
|
|
|
return (wait_for_free) ? ESP_ERR_NOT_FINISHED : ESP_OK;
|
|
|
|
}
|
|
|
|
|
2024-04-02 23:24:03 -04:00
|
|
|
esp_err_t usbh_devs_open(uint8_t dev_addr, usb_device_handle_t *dev_hdl)
|
2021-08-24 11:20:50 -04:00
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
esp_err_t ret;
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
2023-05-08 12:53:27 -04:00
|
|
|
// Go through the device lists to find the device with the specified address
|
2024-04-09 17:06:46 -04:00
|
|
|
device_t *dev_obj = _find_dev_from_addr(dev_addr);
|
|
|
|
if (dev_obj != NULL) {
|
|
|
|
// Check if the device is in a state to be opened
|
|
|
|
if (dev_obj->dynamic.flags.is_gone || // Device is already gone (disconnected)
|
|
|
|
dev_obj->dynamic.flags.waiting_free) { // Device is waiting to be freed
|
2021-08-24 11:20:50 -04:00
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
2024-04-09 17:06:46 -04:00
|
|
|
} else if (dev_obj->dynamic.flags.enum_lock) { // Device's enum_lock is set
|
|
|
|
ret = ESP_ERR_NOT_ALLOWED;
|
2021-08-24 11:20:50 -04:00
|
|
|
} else {
|
2024-04-02 23:24:03 -04:00
|
|
|
dev_obj->dynamic.open_count++;
|
2024-04-09 17:06:46 -04:00
|
|
|
*dev_hdl = (usb_device_handle_t)dev_obj;
|
2021-08-24 11:20:50 -04:00
|
|
|
ret = ESP_OK;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = ESP_ERR_NOT_FOUND;
|
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-04-02 23:24:03 -04:00
|
|
|
esp_err_t usbh_devs_close(usb_device_handle_t dev_hdl)
|
2021-08-24 11:20:50 -04:00
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
2024-04-09 17:06:46 -04:00
|
|
|
// Device should never be closed while its enum_lock is
|
|
|
|
USBH_CHECK_FROM_CRIT(!dev_obj->dynamic.flags.enum_lock, ESP_ERR_NOT_ALLOWED);
|
2024-04-02 23:24:03 -04:00
|
|
|
dev_obj->dynamic.open_count--;
|
2023-05-08 12:43:32 -04:00
|
|
|
bool call_proc_req_cb = false;
|
2024-04-02 23:24:03 -04:00
|
|
|
if (dev_obj->dynamic.open_count == 0) {
|
2023-05-08 12:53:27 -04:00
|
|
|
// Sanity check.
|
|
|
|
assert(dev_obj->dynamic.num_ctrl_xfers_inflight == 0); // There cannot be any control transfer in-flight
|
2024-04-02 23:24:03 -04:00
|
|
|
assert(!dev_obj->dynamic.flags.waiting_free); // This can only be set when open_count reaches 0
|
2024-03-21 08:57:32 -04:00
|
|
|
if (dev_obj->dynamic.flags.is_gone || dev_obj->dynamic.flags.waiting_free) {
|
|
|
|
// Device is already gone or is awaiting to be freed. Trigger the USBH process to free the device
|
|
|
|
call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_FREE);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Else, there's nothing to do. Leave the device allocated
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
if (call_proc_req_cb) {
|
|
|
|
p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2024-04-09 17:06:46 -04:00
|
|
|
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_devs_new_dev_event(usb_device_handle_t dev_hdl)
|
|
|
|
{
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
bool call_proc_req_cb = false;
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
// Device must be in the configured state
|
|
|
|
USBH_CHECK_FROM_CRIT(dev_obj->dynamic.state == USB_DEVICE_STATE_CONFIGURED, ESP_ERR_INVALID_STATE);
|
|
|
|
call_proc_req_cb = _dev_set_actions(dev_obj, DEV_ACTION_PROP_NEW_DEV);
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
// Call the processing request callback
|
|
|
|
if (call_proc_req_cb) {
|
|
|
|
p_usbh_obj->constant.proc_req_cb(USB_PROC_REQ_SOURCE_USBH, false, p_usbh_obj->constant.proc_req_cb_arg);
|
|
|
|
}
|
|
|
|
|
2021-08-24 11:20:50 -04:00
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
2024-03-29 09:29:01 -04:00
|
|
|
// ------------------------------------------------ Device Functions ---------------------------------------------------
|
2021-08-24 11:20:50 -04:00
|
|
|
|
2024-03-29 09:29:01 -04:00
|
|
|
// ----------------------- Getters -------------------------
|
2021-08-24 11:20:50 -04:00
|
|
|
|
|
|
|
esp_err_t usbh_dev_get_addr(usb_device_handle_t dev_hdl, uint8_t *dev_addr)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL && dev_addr != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
USBH_CHECK_FROM_CRIT(dev_obj->constant.address > 0, ESP_ERR_INVALID_STATE);
|
|
|
|
*dev_addr = dev_obj->constant.address;
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_get_info(usb_device_handle_t dev_hdl, usb_device_info_t *dev_info)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL && dev_info != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
|
|
|
|
dev_info->speed = dev_obj->constant.speed;
|
|
|
|
dev_info->dev_addr = dev_obj->constant.address;
|
2024-04-09 17:06:46 -04:00
|
|
|
// Device descriptor might not have been set yet
|
|
|
|
if (dev_obj->constant.desc) {
|
|
|
|
dev_info->bMaxPacketSize0 = dev_obj->constant.desc->bMaxPacketSize0;
|
|
|
|
} else {
|
|
|
|
// Use the default pipe's MPS instead
|
|
|
|
dev_info->bMaxPacketSize0 = hcd_pipe_get_mps(dev_obj->constant.default_pipe);
|
|
|
|
}
|
|
|
|
// Configuration descriptor might not have been set yet
|
|
|
|
if (dev_obj->constant.config_desc) {
|
|
|
|
dev_info->bConfigurationValue = dev_obj->constant.config_desc->bConfigurationValue;
|
|
|
|
} else {
|
|
|
|
dev_info->bConfigurationValue = 0;
|
|
|
|
}
|
2021-12-08 06:46:46 -05:00
|
|
|
dev_info->str_desc_manufacturer = dev_obj->constant.str_desc_manu;
|
|
|
|
dev_info->str_desc_product = dev_obj->constant.str_desc_product;
|
|
|
|
dev_info->str_desc_serial_num = dev_obj->constant.str_desc_ser_num;
|
2024-04-09 17:06:46 -04:00
|
|
|
|
|
|
|
return ESP_OK;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_get_desc(usb_device_handle_t dev_hdl, const usb_device_desc_t **dev_desc_ret)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL && dev_desc_ret != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
|
|
|
|
*dev_desc_ret = dev_obj->constant.desc;
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_get_config_desc(usb_device_handle_t dev_hdl, const usb_config_desc_t **config_desc_ret)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL && config_desc_ret != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
|
2024-04-09 17:06:46 -04:00
|
|
|
*config_desc_ret = dev_obj->constant.config_desc;
|
|
|
|
|
|
|
|
return ESP_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------- Setters -------------------------
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_enum_lock(usb_device_handle_t dev_hdl)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
esp_err_t ret;
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
|
|
|
|
// We need to take the mux_lock to access mux_protected members
|
|
|
|
xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
|
|
|
|
|
|
|
|
/*
|
|
|
|
The device's enum_lock can only be set when the following conditions are met:
|
|
|
|
- No other endpoints except EP0 have been allocated
|
|
|
|
- We are the sole opener
|
|
|
|
- Device's enum_lock is not already set
|
|
|
|
*/
|
|
|
|
// Check that no other endpoints except EP0 have been allocated
|
|
|
|
bool ep_found = false;
|
|
|
|
for (int i = 0; i < NUM_NON_DEFAULT_EP; i++) {
|
|
|
|
if (dev_obj->mux_protected.endpoints[i] != NULL) {
|
|
|
|
ep_found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ep_found) {
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
// Check that we are the sole opener and enum_lock is not already set
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
if (!dev_obj->dynamic.flags.enum_lock && (dev_obj->dynamic.open_count == 1)) {
|
|
|
|
dev_obj->dynamic.flags.enum_lock = true;
|
|
|
|
ret = ESP_OK;
|
|
|
|
} else {
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
exit:
|
|
|
|
xSemaphoreGive(p_usbh_obj->constant.mux_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_enum_unlock(usb_device_handle_t dev_hdl)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
|
2021-10-27 09:15:46 -04:00
|
|
|
esp_err_t ret;
|
2024-04-09 17:06:46 -04:00
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
|
2021-08-24 11:20:50 -04:00
|
|
|
USBH_ENTER_CRITICAL();
|
2024-04-09 17:06:46 -04:00
|
|
|
// Device's enum_lock must have been previously set
|
|
|
|
if (dev_obj->dynamic.flags.enum_lock) {
|
|
|
|
assert(dev_obj->dynamic.open_count == 1); // We must still be the sole opener
|
|
|
|
dev_obj->dynamic.flags.enum_lock = false;
|
|
|
|
ret = ESP_OK;
|
|
|
|
} else {
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_set_ep0_mps(usb_device_handle_t dev_hdl, uint16_t wMaxPacketSize)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
esp_err_t ret;
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
// Device's EP0 MPS can only be updated when in the default state
|
|
|
|
if (dev_obj->dynamic.state != USB_DEVICE_STATE_DEFAULT) {
|
2021-10-27 09:15:46 -04:00
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
goto exit;
|
|
|
|
}
|
2024-04-09 17:06:46 -04:00
|
|
|
// Device's enum_lock must be set before enumeration related data fields can be set
|
|
|
|
if (dev_obj->dynamic.flags.enum_lock) {
|
|
|
|
ret = hcd_pipe_update_mps(dev_obj->constant.default_pipe, wMaxPacketSize);
|
|
|
|
} else {
|
|
|
|
ret = ESP_ERR_NOT_ALLOWED;
|
|
|
|
}
|
|
|
|
exit:
|
2021-08-24 11:20:50 -04:00
|
|
|
USBH_EXIT_CRITICAL();
|
2024-04-09 17:06:46 -04:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_set_addr(usb_device_handle_t dev_hdl, uint8_t dev_addr)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
esp_err_t ret;
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
// Device's address can only be set when in the default state
|
|
|
|
USBH_CHECK_FROM_CRIT(dev_obj->dynamic.state == USB_DEVICE_STATE_DEFAULT, ESP_ERR_INVALID_STATE);
|
|
|
|
// Device's enum_lock must be set before enumeration related data fields can be set
|
|
|
|
USBH_CHECK_FROM_CRIT(dev_obj->dynamic.flags.enum_lock, ESP_ERR_NOT_ALLOWED);
|
|
|
|
// Update the device and default pipe's target address
|
|
|
|
ret = hcd_pipe_update_dev_addr(dev_obj->constant.default_pipe, dev_addr);
|
|
|
|
if (ret == ESP_OK) {
|
|
|
|
dev_obj->constant.address = dev_addr;
|
|
|
|
dev_obj->dynamic.state = USB_DEVICE_STATE_ADDRESS;
|
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_set_desc(usb_device_handle_t dev_hdl, const usb_device_desc_t *device_desc)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL && device_desc != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
esp_err_t ret;
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
usb_device_desc_t *new_desc, *old_desc;
|
|
|
|
|
|
|
|
// Allocate and copy new device descriptor
|
|
|
|
new_desc = heap_caps_malloc(sizeof(usb_device_desc_t), MALLOC_CAP_DEFAULT);
|
|
|
|
if (new_desc == NULL) {
|
|
|
|
return ESP_ERR_NO_MEM;
|
|
|
|
}
|
|
|
|
memcpy(new_desc, device_desc, sizeof(usb_device_desc_t));
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
// Device's descriptor can only be set in the default or addressed state
|
|
|
|
if (!(dev_obj->dynamic.state == USB_DEVICE_STATE_DEFAULT || dev_obj->dynamic.state == USB_DEVICE_STATE_ADDRESS)) {
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
// Device's enum_lock must be set before we can set its device descriptor
|
|
|
|
if (!dev_obj->dynamic.flags.enum_lock) {
|
|
|
|
ret = ESP_ERR_NOT_ALLOWED;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
old_desc = dev_obj->constant.desc; // Save old descriptor for cleanup
|
|
|
|
dev_obj->constant.desc = new_desc; // Assign new descriptor
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
// Clean up old descriptor or failed assignment
|
|
|
|
heap_caps_free(old_desc);
|
2021-10-27 09:15:46 -04:00
|
|
|
ret = ESP_OK;
|
2024-04-09 17:06:46 -04:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
err:
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
heap_caps_free(new_desc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_set_config_desc(usb_device_handle_t dev_hdl, const usb_config_desc_t *config_desc_full)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL && config_desc_full != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
esp_err_t ret;
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
usb_config_desc_t *new_desc, *old_desc;
|
|
|
|
|
|
|
|
// Allocate and copy new config descriptor
|
|
|
|
new_desc = heap_caps_malloc(config_desc_full->wTotalLength, MALLOC_CAP_DEFAULT);
|
|
|
|
if (new_desc == NULL) {
|
|
|
|
return ESP_ERR_NO_MEM;
|
|
|
|
}
|
|
|
|
memcpy(new_desc, config_desc_full, config_desc_full->wTotalLength);
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
// Device's config descriptor can only be set when in the addressed state
|
|
|
|
if (dev_obj->dynamic.state != USB_DEVICE_STATE_ADDRESS) {
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
// Device's enum_lock must be set before we can set its config descriptor
|
|
|
|
if (!dev_obj->dynamic.flags.enum_lock) {
|
|
|
|
ret = ESP_ERR_NOT_ALLOWED;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
old_desc = dev_obj->constant.config_desc; // Save old descriptor for cleanup
|
|
|
|
dev_obj->constant.config_desc = new_desc; // Assign new descriptor
|
|
|
|
dev_obj->dynamic.state = USB_DEVICE_STATE_CONFIGURED;
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
// Clean up old descriptor or failed assignment
|
|
|
|
heap_caps_free(old_desc);
|
|
|
|
ret = ESP_OK;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
err:
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
heap_caps_free(new_desc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_set_str_desc(usb_device_handle_t dev_hdl, const usb_str_desc_t *str_desc, int select)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL && str_desc != NULL && (select >= 0 && select < 3), ESP_ERR_INVALID_ARG);
|
|
|
|
esp_err_t ret;
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
usb_str_desc_t *new_desc, *old_desc;
|
|
|
|
|
|
|
|
// Allocate and copy new string descriptor
|
|
|
|
new_desc = heap_caps_malloc(str_desc->bLength, MALLOC_CAP_DEFAULT);
|
|
|
|
if (new_desc == NULL) {
|
|
|
|
return ESP_ERR_NO_MEM;
|
|
|
|
}
|
|
|
|
memcpy(new_desc, str_desc, str_desc->bLength);
|
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
// Device's string descriptors can only be set when in the default state
|
|
|
|
if (dev_obj->dynamic.state != USB_DEVICE_STATE_CONFIGURED) {
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
// Device's enum_lock must be set before we can set its string descriptors
|
|
|
|
if (!dev_obj->dynamic.flags.enum_lock) {
|
|
|
|
ret = ESP_ERR_NOT_ALLOWED;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
// Assign to the selected descriptor
|
|
|
|
switch (select) {
|
|
|
|
case 0:
|
|
|
|
old_desc = dev_obj->constant.str_desc_manu;
|
|
|
|
dev_obj->constant.str_desc_manu = new_desc;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
old_desc = dev_obj->constant.str_desc_product;
|
|
|
|
dev_obj->constant.str_desc_product = new_desc;
|
|
|
|
break;
|
|
|
|
default: // 2
|
|
|
|
old_desc = dev_obj->constant.str_desc_ser_num;
|
|
|
|
dev_obj->constant.str_desc_ser_num = new_desc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
// Clean up old descriptor or failed assignment
|
|
|
|
heap_caps_free(old_desc);
|
|
|
|
ret = ESP_OK;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
err:
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
heap_caps_free(new_desc);
|
2021-10-27 09:15:46 -04:00
|
|
|
return ret;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
|
2024-03-29 09:29:01 -04:00
|
|
|
// ----------------------------------------------- Endpoint Functions -------------------------------------------------
|
2021-08-24 11:20:50 -04:00
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
esp_err_t usbh_ep_alloc(usb_device_handle_t dev_hdl, usbh_ep_config_t *ep_config, usbh_ep_handle_t *ep_hdl_ret)
|
2021-08-24 11:20:50 -04:00
|
|
|
{
|
2023-05-08 12:43:32 -04:00
|
|
|
USBH_CHECK(dev_hdl != NULL && ep_config != NULL && ep_hdl_ret != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
uint8_t bEndpointAddress = ep_config->bEndpointAddress;
|
|
|
|
USBH_CHECK(check_ep_addr(bEndpointAddress), ESP_ERR_INVALID_ARG);
|
|
|
|
|
2021-08-24 11:20:50 -04:00
|
|
|
esp_err_t ret;
|
2023-05-08 12:43:32 -04:00
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
endpoint_t *ep_obj;
|
2024-04-09 17:06:46 -04:00
|
|
|
USBH_CHECK(dev_obj->constant.config_desc, ESP_ERR_INVALID_STATE); // Configuration descriptor must be set
|
2021-12-23 05:22:55 -05:00
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// Find the endpoint descriptor from the device's current configuration descriptor
|
2023-05-08 12:43:32 -04:00
|
|
|
const usb_ep_desc_t *ep_desc = usb_parse_endpoint_descriptor_by_address(dev_obj->constant.config_desc, ep_config->bInterfaceNumber, ep_config->bAlternateSetting, ep_config->bEndpointAddress, NULL);
|
|
|
|
if (ep_desc == NULL) {
|
|
|
|
return ESP_ERR_NOT_FOUND;
|
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Allocate the endpoint object
|
2023-05-08 12:43:32 -04:00
|
|
|
ret = endpoint_alloc(dev_obj, ep_desc, ep_config, &ep_obj);
|
2021-08-24 11:20:50 -04:00
|
|
|
if (ret != ESP_OK) {
|
2023-05-08 12:43:32 -04:00
|
|
|
goto alloc_err;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// We need to take the mux_lock to access mux_protected members
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
|
2021-12-23 05:22:55 -05:00
|
|
|
USBH_ENTER_CRITICAL();
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check the device's state before we assign the a pipe to the allocated endpoint
|
2021-12-23 05:22:55 -05:00
|
|
|
if (dev_obj->dynamic.state != USB_DEVICE_STATE_CONFIGURED) {
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
2023-05-08 12:43:32 -04:00
|
|
|
goto dev_state_err;
|
2021-12-23 05:22:55 -05:00
|
|
|
}
|
|
|
|
USBH_EXIT_CRITICAL();
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check if the endpoint has already been allocated
|
2023-05-08 12:43:32 -04:00
|
|
|
if (get_ep_from_addr(dev_obj, bEndpointAddress) == NULL) {
|
|
|
|
set_ep_from_addr(dev_obj, bEndpointAddress, ep_obj);
|
2023-05-08 12:53:27 -04:00
|
|
|
// Write back the endpoint handle
|
2023-05-08 12:43:32 -04:00
|
|
|
*ep_hdl_ret = (usbh_ep_handle_t)ep_obj;
|
|
|
|
ret = ESP_OK;
|
|
|
|
} else {
|
2023-05-08 12:53:27 -04:00
|
|
|
// Endpoint is already allocated
|
2023-05-08 12:43:32 -04:00
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2023-05-08 12:43:32 -04:00
|
|
|
dev_state_err:
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreGive(p_usbh_obj->constant.mux_lock);
|
2021-08-24 11:20:50 -04:00
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// If the endpoint was not assigned, free it
|
2023-05-08 12:43:32 -04:00
|
|
|
if (ret != ESP_OK) {
|
|
|
|
endpoint_free(ep_obj);
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2023-05-08 12:43:32 -04:00
|
|
|
alloc_err:
|
2021-08-24 11:20:50 -04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
esp_err_t usbh_ep_free(usbh_ep_handle_t ep_hdl)
|
2021-08-24 11:20:50 -04:00
|
|
|
{
|
2023-05-08 12:43:32 -04:00
|
|
|
USBH_CHECK(ep_hdl != NULL, ESP_ERR_INVALID_ARG);
|
2021-08-24 11:20:50 -04:00
|
|
|
|
2021-10-27 09:15:46 -04:00
|
|
|
esp_err_t ret;
|
2023-05-08 12:43:32 -04:00
|
|
|
endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
|
|
|
|
device_t *dev_obj = (device_t *)ep_obj->constant.dev;
|
|
|
|
uint8_t bEndpointAddress = ep_obj->constant.ep_desc->bEndpointAddress;
|
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// Todo: Check that the EP's underlying pipe is halted before allowing the EP to be freed (IDF-7273)
|
|
|
|
// Check that the the EP's underlying pipe has no more in-flight URBs
|
2023-05-08 12:43:32 -04:00
|
|
|
if (hcd_pipe_get_num_urbs(ep_obj->constant.pipe_hdl) != 0) {
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
goto exit;
|
|
|
|
}
|
2021-10-27 09:15:46 -04:00
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// We need to take the mux_lock to access mux_protected members
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check if the endpoint was allocated on this device
|
2023-05-08 12:43:32 -04:00
|
|
|
if (ep_obj == get_ep_from_addr(dev_obj, bEndpointAddress)) {
|
2023-05-08 12:53:27 -04:00
|
|
|
// Clear the endpoint from the device's endpoint object list
|
2023-05-08 12:43:32 -04:00
|
|
|
set_ep_from_addr(dev_obj, bEndpointAddress, NULL);
|
|
|
|
ret = ESP_OK;
|
2021-08-24 11:20:50 -04:00
|
|
|
} else {
|
2023-05-08 12:43:32 -04:00
|
|
|
ret = ESP_ERR_NOT_FOUND;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreGive(p_usbh_obj->constant.mux_lock);
|
2021-08-24 11:20:50 -04:00
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// Finally, we free the endpoint object
|
2021-10-27 09:15:46 -04:00
|
|
|
if (ret == ESP_OK) {
|
2023-05-08 12:43:32 -04:00
|
|
|
endpoint_free(ep_obj);
|
2021-10-27 09:15:46 -04:00
|
|
|
}
|
2023-05-08 12:43:32 -04:00
|
|
|
exit:
|
2021-10-27 09:15:46 -04:00
|
|
|
return ret;
|
2021-08-24 11:20:50 -04:00
|
|
|
}
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
esp_err_t usbh_ep_get_handle(usb_device_handle_t dev_hdl, uint8_t bEndpointAddress, usbh_ep_handle_t *ep_hdl_ret)
|
2021-08-24 11:20:50 -04:00
|
|
|
{
|
2023-05-08 12:43:32 -04:00
|
|
|
USBH_CHECK(dev_hdl != NULL && ep_hdl_ret != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
USBH_CHECK(check_ep_addr(bEndpointAddress), ESP_ERR_INVALID_ARG);
|
2021-10-27 09:15:46 -04:00
|
|
|
|
|
|
|
esp_err_t ret;
|
2021-08-24 11:20:50 -04:00
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
2023-05-08 12:43:32 -04:00
|
|
|
endpoint_t *ep_obj;
|
2021-08-24 11:20:50 -04:00
|
|
|
|
2023-05-08 12:53:27 -04:00
|
|
|
// We need to take the mux_lock to access mux_protected members
|
2021-10-27 09:15:46 -04:00
|
|
|
xSemaphoreTake(p_usbh_obj->constant.mux_lock, portMAX_DELAY);
|
2023-05-08 12:43:32 -04:00
|
|
|
ep_obj = get_ep_from_addr(dev_obj, bEndpointAddress);
|
|
|
|
xSemaphoreGive(p_usbh_obj->constant.mux_lock);
|
|
|
|
if (ep_obj) {
|
|
|
|
*ep_hdl_ret = (usbh_ep_handle_t)ep_obj;
|
|
|
|
ret = ESP_OK;
|
2021-08-24 11:20:50 -04:00
|
|
|
} else {
|
|
|
|
ret = ESP_ERR_NOT_FOUND;
|
|
|
|
}
|
2023-05-08 12:43:32 -04:00
|
|
|
|
2021-08-24 11:20:50 -04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-03-29 09:29:01 -04:00
|
|
|
esp_err_t usbh_ep_command(usbh_ep_handle_t ep_hdl, usbh_ep_cmd_t command)
|
|
|
|
{
|
|
|
|
USBH_CHECK(ep_hdl != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
|
|
|
|
endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
|
|
|
|
// Send the command to the EP's underlying pipe
|
|
|
|
return hcd_pipe_command(ep_obj->constant.pipe_hdl, (hcd_pipe_cmd_t)command);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *usbh_ep_get_context(usbh_ep_handle_t ep_hdl)
|
|
|
|
{
|
|
|
|
assert(ep_hdl);
|
|
|
|
endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
|
|
|
|
return hcd_pipe_get_context(ep_obj->constant.pipe_hdl);
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------- Transfer Functions --------------------------------------------------
|
|
|
|
|
|
|
|
esp_err_t usbh_dev_submit_ctrl_urb(usb_device_handle_t dev_hdl, urb_t *urb)
|
|
|
|
{
|
|
|
|
USBH_CHECK(dev_hdl != NULL && urb != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
device_t *dev_obj = (device_t *)dev_hdl;
|
|
|
|
USBH_CHECK(urb_check_args(urb), ESP_ERR_INVALID_ARG);
|
|
|
|
bool xfer_is_in = ((usb_setup_packet_t *)urb->transfer.data_buffer)->bmRequestType & USB_BM_REQUEST_TYPE_DIR_IN;
|
2024-04-09 17:06:46 -04:00
|
|
|
// Device descriptor could still be NULL at this point, so we get the MPS from the pipe instead.
|
|
|
|
unsigned int mps = hcd_pipe_get_mps(dev_obj->constant.default_pipe);
|
|
|
|
USBH_CHECK(transfer_check_usb_compliance(&(urb->transfer), USB_TRANSFER_TYPE_CTRL, mps, xfer_is_in), ESP_ERR_INVALID_ARG);
|
2024-03-29 09:29:01 -04:00
|
|
|
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
// Increment the control transfer count first
|
|
|
|
dev_obj->dynamic.num_ctrl_xfers_inflight++;
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
|
|
|
|
esp_err_t ret;
|
|
|
|
if (hcd_pipe_get_state(dev_obj->constant.default_pipe) != HCD_PIPE_STATE_ACTIVE) {
|
|
|
|
ret = ESP_ERR_INVALID_STATE;
|
|
|
|
goto hcd_err;
|
|
|
|
}
|
|
|
|
ret = hcd_urb_enqueue(dev_obj->constant.default_pipe, urb);
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
goto hcd_err;
|
|
|
|
}
|
|
|
|
ret = ESP_OK;
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
hcd_err:
|
|
|
|
USBH_ENTER_CRITICAL();
|
|
|
|
dev_obj->dynamic.num_ctrl_xfers_inflight--;
|
|
|
|
USBH_EXIT_CRITICAL();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
esp_err_t usbh_ep_enqueue_urb(usbh_ep_handle_t ep_hdl, urb_t *urb)
|
|
|
|
{
|
|
|
|
USBH_CHECK(ep_hdl != NULL && urb != NULL, ESP_ERR_INVALID_ARG);
|
2023-06-07 06:08:51 -04:00
|
|
|
USBH_CHECK(urb_check_args(urb), ESP_ERR_INVALID_ARG);
|
|
|
|
|
2023-05-08 12:43:32 -04:00
|
|
|
endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
|
|
|
|
|
2023-12-14 04:14:39 -05:00
|
|
|
USBH_CHECK(transfer_check_usb_compliance(&(urb->transfer),
|
|
|
|
USB_EP_DESC_GET_XFERTYPE(ep_obj->constant.ep_desc),
|
|
|
|
USB_EP_DESC_GET_MPS(ep_obj->constant.ep_desc),
|
|
|
|
USB_EP_DESC_GET_EP_DIR(ep_obj->constant.ep_desc)),
|
|
|
|
ESP_ERR_INVALID_ARG);
|
2023-05-08 12:53:27 -04:00
|
|
|
// Check that the EP's underlying pipe is in the active state before submitting the URB
|
2023-05-08 12:43:32 -04:00
|
|
|
if (hcd_pipe_get_state(ep_obj->constant.pipe_hdl) != HCD_PIPE_STATE_ACTIVE) {
|
|
|
|
return ESP_ERR_INVALID_STATE;
|
|
|
|
}
|
2023-05-08 12:53:27 -04:00
|
|
|
// Enqueue the URB to the EP's underlying pipe
|
2023-05-08 12:43:32 -04:00
|
|
|
return hcd_urb_enqueue(ep_obj->constant.pipe_hdl, urb);
|
|
|
|
}
|
|
|
|
|
|
|
|
esp_err_t usbh_ep_dequeue_urb(usbh_ep_handle_t ep_hdl, urb_t **urb_ret)
|
|
|
|
{
|
|
|
|
USBH_CHECK(ep_hdl != NULL && urb_ret != NULL, ESP_ERR_INVALID_ARG);
|
|
|
|
|
|
|
|
endpoint_t *ep_obj = (endpoint_t *)ep_hdl;
|
2023-05-08 12:53:27 -04:00
|
|
|
// Enqueue the URB to the EP's underlying pipe
|
2023-05-08 12:43:32 -04:00
|
|
|
*urb_ret = hcd_urb_dequeue(ep_obj->constant.pipe_hdl);
|
|
|
|
return ESP_OK;
|
|
|
|
}
|