2021-08-24 11:20:50 -04:00
/*
2023-11-23 10:00:13 -05:00
* SPDX - FileCopyrightText : 2015 - 2024 Espressif Systems ( Shanghai ) CO LTD
2021-08-24 11:20:50 -04:00
*
* SPDX - License - Identifier : Apache - 2.0
*/
2021-08-31 02:14:04 -04:00
/*
Warning : The USB Host Library API is still a beta version and may be subject to change
*/
2021-08-24 11:20:50 -04:00
# include <stdlib.h>
# include <stdint.h>
2023-12-14 11:14:55 -05:00
# include <string.h>
2023-11-23 10:00:13 -05:00
# include "sdkconfig.h"
2021-08-24 11:20:50 -04:00
# include "freertos/FreeRTOS.h"
# include "freertos/task.h"
# include "freertos/queue.h"
# include "freertos/semphr.h"
# include "esp_err.h"
# include "esp_log.h"
# include "esp_heap_caps.h"
# include "hub.h"
2024-03-04 15:11:43 -05:00
# include "enum.h"
2021-08-24 11:20:50 -04:00
# include "usbh.h"
2023-05-08 12:43:32 -04:00
# include "hcd.h"
2021-11-17 13:07:53 -05:00
# include "esp_private/usb_phy.h"
2021-08-24 11:20:50 -04:00
# include "usb/usb_host.h"
static portMUX_TYPE host_lock = portMUX_INITIALIZER_UNLOCKED ;
# define HOST_ENTER_CRITICAL_ISR() portENTER_CRITICAL_ISR(&host_lock)
# define HOST_EXIT_CRITICAL_ISR() portEXIT_CRITICAL_ISR(&host_lock)
# define HOST_ENTER_CRITICAL() portENTER_CRITICAL(&host_lock)
# define HOST_EXIT_CRITICAL() portEXIT_CRITICAL(&host_lock)
# define HOST_ENTER_CRITICAL_SAFE() portENTER_CRITICAL_SAFE(&host_lock)
# define HOST_EXIT_CRITICAL_SAFE() portEXIT_CRITICAL_SAFE(&host_lock)
# define HOST_CHECK(cond, ret_val) ({ \
if ( ! ( cond ) ) { \
return ( ret_val ) ; \
} \
} )
# define HOST_CHECK_FROM_CRIT(cond, ret_val) ({ \
if ( ! ( cond ) ) { \
HOST_EXIT_CRITICAL ( ) ; \
return ret_val ; \
} \
} )
2024-03-04 15:11:43 -05:00
# define PROCESS_REQUEST_PENDING_FLAG_USBH (1 << 0)
# define PROCESS_REQUEST_PENDING_FLAG_HUB (1 << 1)
# define PROCESS_REQUEST_PENDING_FLAG_ENUM (1 << 2)
2023-11-23 10:00:13 -05:00
2023-12-14 11:14:55 -05:00
# define SHORT_DESC_REQ_LEN 8
# define CTRL_TRANSFER_MAX_DATA_LEN CONFIG_USB_HOST_CONTROL_TRANSFER_MAX_SIZE
2023-05-08 12:43:32 -04:00
typedef struct ep_wrapper_s ep_wrapper_t ;
2021-08-24 11:20:50 -04:00
typedef struct interface_s interface_t ;
typedef struct client_s client_t ;
2023-05-08 12:43:32 -04:00
struct ep_wrapper_s {
2023-05-08 12:53:27 -04:00
// Dynamic members require a critical section
2021-08-24 11:20:50 -04:00
struct {
2023-05-08 12:43:32 -04:00
TAILQ_ENTRY ( ep_wrapper_s ) tailq_entry ;
2021-08-24 11:20:50 -04:00
union {
struct {
uint32_t pending : 1 ;
2023-05-08 12:53:27 -04:00
uint32_t reserved31 : 31 ;
2021-08-24 11:20:50 -04:00
} ;
} flags ;
uint32_t num_urb_inflight ;
2023-05-08 12:43:32 -04:00
usbh_ep_event_t last_event ;
2021-08-24 11:20:50 -04:00
} dynamic ;
2023-05-08 12:53:27 -04:00
// Constant members do no change after claiming the interface thus do not require a critical section
2021-08-24 11:20:50 -04:00
struct {
2023-05-08 12:43:32 -04:00
usbh_ep_handle_t ep_hdl ;
2021-08-24 11:20:50 -04:00
interface_t * intf_obj ;
} constant ;
} ;
struct interface_s {
2023-05-08 12:53:27 -04:00
// Dynamic members require a critical section
2021-08-24 11:20:50 -04:00
struct {
TAILQ_ENTRY ( interface_s ) tailq_entry ;
} mux_protected ;
2023-05-08 12:53:27 -04:00
// Constant members do no change after claiming the interface thus do not require a critical section
2021-08-24 11:20:50 -04:00
struct {
const usb_intf_desc_t * intf_desc ;
usb_device_handle_t dev_hdl ;
client_t * client_obj ;
2023-05-08 12:43:32 -04:00
ep_wrapper_t * endpoints [ 0 ] ;
2021-08-24 11:20:50 -04:00
} constant ;
} ;
struct client_s {
2023-05-08 12:53:27 -04:00
// Dynamic members require a critical section
2021-08-24 11:20:50 -04:00
struct {
TAILQ_ENTRY ( client_s ) tailq_entry ;
2023-05-08 12:43:32 -04:00
TAILQ_HEAD ( tailhead_pending_ep , ep_wrapper_s ) pending_ep_tailq ;
TAILQ_HEAD ( tailhead_idle_ep , ep_wrapper_s ) idle_ep_tailq ;
2021-08-24 11:20:50 -04:00
TAILQ_HEAD ( tailhead_done_ctrl_xfers , urb_s ) done_ctrl_xfer_tailq ;
union {
struct {
uint32_t handling_events : 1 ;
uint32_t taking_mux : 1 ;
2023-11-21 12:35:54 -05:00
uint32_t reserved6 : 6 ;
2021-08-24 11:20:50 -04:00
uint32_t num_intf_claimed : 8 ;
uint32_t reserved16 : 16 ;
} ;
uint32_t val ;
} flags ;
uint32_t num_done_ctrl_xfer ;
uint32_t opened_dev_addr_map ;
} dynamic ;
2023-05-08 12:53:27 -04:00
// Mux protected members must be protected by host library the mux_lock when accessed
2021-08-24 11:20:50 -04:00
struct {
TAILQ_HEAD ( tailhead_interfaces , interface_s ) interface_tailq ;
} mux_protected ;
2023-05-08 12:53:27 -04:00
// Constant members do no change after registration thus do not require a critical section
2021-08-24 11:20:50 -04:00
struct {
SemaphoreHandle_t event_sem ;
usb_host_client_event_cb_t event_callback ;
void * callback_arg ;
QueueHandle_t event_msg_queue ;
} constant ;
} ;
typedef struct {
2023-05-08 12:53:27 -04:00
// Dynamic members require a critical section
2021-08-24 11:20:50 -04:00
struct {
2023-05-08 12:53:27 -04:00
// Access to these should be done in a critical section
2021-08-24 11:20:50 -04:00
uint32_t process_pending_flags ;
uint32_t lib_event_flags ;
union {
struct {
uint32_t handling_events : 1 ;
2023-11-21 12:35:54 -05:00
uint32_t reserved7 : 7 ;
2021-08-24 11:20:50 -04:00
uint32_t num_clients : 8 ;
uint32_t reserved16 : 16 ;
} ;
uint32_t val ;
} flags ;
} dynamic ;
2023-05-08 12:53:27 -04:00
// Mux protected members must be protected by host library the mux_lock when accessed
2021-08-24 11:20:50 -04:00
struct {
2023-05-08 12:53:27 -04:00
TAILQ_HEAD ( tailhead_clients , client_s ) client_tailq ; // List of all clients registered
2021-08-24 11:20:50 -04:00
} mux_protected ;
2023-05-08 12:53:27 -04:00
// Constant members do no change after installation thus do not require a critical section
2021-08-24 11:20:50 -04:00
struct {
SemaphoreHandle_t event_sem ;
SemaphoreHandle_t mux_lock ;
2023-05-08 12:53:27 -04:00
usb_phy_handle_t phy_handle ; // Will be NULL if host library is installed with skip_phy_setup
2024-03-04 15:11:43 -05:00
void * enum_client ; // Pointer to Enum driver (acting as a client). Used to reroute completed USBH control transfers
void * hub_client ; // Pointer to External Hub driver (acting as a client). Used to reroute completed USBH control transfers. NULL, when External Hub Driver not available.
2021-08-24 11:20:50 -04:00
} constant ;
} host_lib_t ;
static host_lib_t * p_host_lib_obj = NULL ;
const char * USB_HOST_TAG = " USB HOST " ;
// ----------------------------------------------------- Helpers -------------------------------------------------------
static inline void _record_client_opened_device ( client_t * client_obj , uint8_t dev_addr )
{
assert ( dev_addr ! = 0 ) ;
client_obj - > dynamic . opened_dev_addr_map | = ( 1 < < ( dev_addr - 1 ) ) ;
}
static inline void _clear_client_opened_device ( client_t * client_obj , uint8_t dev_addr )
{
assert ( dev_addr ! = 0 ) ;
client_obj - > dynamic . opened_dev_addr_map & = ~ ( 1 < < ( dev_addr - 1 ) ) ;
}
static inline bool _check_client_opened_device ( client_t * client_obj , uint8_t dev_addr )
{
2024-04-09 17:06:46 -04:00
bool ret ;
if ( dev_addr ! = 0 ) {
ret = client_obj - > dynamic . opened_dev_addr_map & ( 1 < < ( dev_addr - 1 ) ) ;
} else {
ret = false ;
}
return ret ;
2021-08-24 11:20:50 -04:00
}
static bool _unblock_client ( client_t * client_obj , bool in_isr )
{
2023-11-21 12:35:54 -05:00
bool yield ;
2021-08-24 11:20:50 -04:00
HOST_EXIT_CRITICAL_SAFE ( ) ;
2023-11-21 12:35:54 -05:00
if ( in_isr ) {
BaseType_t xTaskWoken = pdFALSE ;
xSemaphoreGiveFromISR ( client_obj - > constant . event_sem , & xTaskWoken ) ;
yield = ( xTaskWoken = = pdTRUE ) ;
} else {
xSemaphoreGive ( client_obj - > constant . event_sem ) ;
yield = false ;
2021-08-24 11:20:50 -04:00
}
HOST_ENTER_CRITICAL_SAFE ( ) ;
return yield ;
}
static bool _unblock_lib ( bool in_isr )
{
2023-11-21 12:35:54 -05:00
bool yield ;
2021-08-24 11:20:50 -04:00
HOST_EXIT_CRITICAL_SAFE ( ) ;
2023-11-21 12:35:54 -05:00
if ( in_isr ) {
BaseType_t xTaskWoken = pdFALSE ;
xSemaphoreGiveFromISR ( p_host_lib_obj - > constant . event_sem , & xTaskWoken ) ;
yield = ( xTaskWoken = = pdTRUE ) ;
} else {
xSemaphoreGive ( p_host_lib_obj - > constant . event_sem ) ;
yield = false ;
2021-08-24 11:20:50 -04:00
}
HOST_ENTER_CRITICAL_SAFE ( ) ;
return yield ;
}
2024-03-04 15:11:43 -05:00
static inline bool _is_internal_client ( void * client )
{
if ( p_host_lib_obj - > constant . enum_client & & ( client = = p_host_lib_obj - > constant . enum_client ) ) {
return true ;
}
2024-04-02 08:25:11 -04:00
# if ENABLE_USB_HUBS
if ( p_host_lib_obj - > constant . hub_client & & ( client = = p_host_lib_obj - > constant . hub_client ) ) {
return true ;
}
# endif // ENABLE_USB_HUBS
2024-03-04 15:11:43 -05:00
return false ;
}
2021-08-24 11:20:50 -04:00
static void send_event_msg_to_clients ( const usb_host_client_event_msg_t * event_msg , bool send_to_all , uint8_t opened_dev_addr )
{
2023-05-08 12:53:27 -04:00
// Lock client list
2021-08-24 11:20:50 -04:00
xSemaphoreTake ( p_host_lib_obj - > constant . mux_lock , portMAX_DELAY ) ;
2023-05-08 12:53:27 -04:00
// Send event message to relevant or all clients
2021-08-24 11:20:50 -04:00
client_t * client_obj ;
TAILQ_FOREACH ( client_obj , & p_host_lib_obj - > mux_protected . client_tailq , dynamic . tailq_entry ) {
if ( ! send_to_all ) {
2023-05-08 12:53:27 -04:00
// Check if client opened the device
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
bool send = _check_client_opened_device ( client_obj , opened_dev_addr ) ;
HOST_EXIT_CRITICAL ( ) ;
if ( ! send ) {
continue ;
}
}
2023-05-08 12:53:27 -04:00
// Send the event message
2021-08-24 11:20:50 -04:00
if ( xQueueSend ( client_obj - > constant . event_msg_queue , event_msg , 0 ) = = pdTRUE ) {
HOST_ENTER_CRITICAL ( ) ;
_unblock_client ( client_obj , false ) ;
HOST_EXIT_CRITICAL ( ) ;
} else {
ESP_LOGE ( USB_HOST_TAG , " Client event message queue full " ) ;
}
}
2023-05-08 12:53:27 -04:00
// Unlock client list
2021-08-24 11:20:50 -04:00
xSemaphoreGive ( p_host_lib_obj - > constant . mux_lock ) ;
}
// ---------------------------------------------------- Callbacks ------------------------------------------------------
// ------------------- Library Related ---------------------
2023-05-08 12:43:32 -04:00
static bool proc_req_callback ( usb_proc_req_source_t source , bool in_isr , void * arg )
2021-08-24 11:20:50 -04:00
{
HOST_ENTER_CRITICAL_SAFE ( ) ;
2023-05-08 12:53:27 -04:00
// Store the processing request source
2021-08-24 11:20:50 -04:00
switch ( source ) {
2023-05-08 12:53:27 -04:00
case USB_PROC_REQ_SOURCE_USBH :
p_host_lib_obj - > dynamic . process_pending_flags | = PROCESS_REQUEST_PENDING_FLAG_USBH ;
break ;
case USB_PROC_REQ_SOURCE_HUB :
p_host_lib_obj - > dynamic . process_pending_flags | = PROCESS_REQUEST_PENDING_FLAG_HUB ;
break ;
2024-03-04 15:11:43 -05:00
case USB_PROC_REQ_SOURCE_ENUM :
p_host_lib_obj - > dynamic . process_pending_flags | = PROCESS_REQUEST_PENDING_FLAG_ENUM ;
break ;
2021-08-24 11:20:50 -04:00
}
bool yield = _unblock_lib ( in_isr ) ;
HOST_EXIT_CRITICAL_SAFE ( ) ;
return yield ;
}
2024-02-22 15:25:10 -05:00
static void usbh_event_callback ( usbh_event_data_t * event_data , void * arg )
2021-08-24 11:20:50 -04:00
{
2024-02-22 15:25:10 -05:00
switch ( event_data - > event ) {
2024-02-22 15:44:01 -05:00
case USBH_EVENT_CTRL_XFER : {
assert ( event_data - > ctrl_xfer_data . urb ! = NULL ) ;
assert ( event_data - > ctrl_xfer_data . urb - > usb_host_client ! = NULL ) ;
2024-04-09 17:06:46 -04:00
// Redistribute completed control transfers to the clients that submitted them
2024-03-04 15:11:43 -05:00
if ( _is_internal_client ( event_data - > ctrl_xfer_data . urb - > usb_host_client ) ) {
// Simply call the transfer callback
2024-04-09 17:06:46 -04:00
event_data - > ctrl_xfer_data . urb - > transfer . callback ( & event_data - > ctrl_xfer_data . urb - > transfer ) ;
} else {
client_t * client_obj = ( client_t * ) event_data - > ctrl_xfer_data . urb - > usb_host_client ;
HOST_ENTER_CRITICAL ( ) ;
TAILQ_INSERT_TAIL ( & client_obj - > dynamic . done_ctrl_xfer_tailq , event_data - > ctrl_xfer_data . urb , tailq_entry ) ;
client_obj - > dynamic . num_done_ctrl_xfer + + ;
_unblock_client ( client_obj , false ) ;
HOST_EXIT_CRITICAL ( ) ;
}
2024-02-22 15:44:01 -05:00
break ;
}
2024-02-22 15:25:10 -05:00
case USBH_EVENT_NEW_DEV : {
2023-05-08 12:53:27 -04:00
// Prepare a NEW_DEV client event message, the send it to all clients
usb_host_client_event_msg_t event_msg = {
. event = USB_HOST_CLIENT_EVENT_NEW_DEV ,
2024-02-22 15:25:10 -05:00
. new_dev . address = event_data - > new_dev_data . dev_addr ,
2023-05-08 12:53:27 -04:00
} ;
send_event_msg_to_clients ( & event_msg , true , 0 ) ;
2024-04-02 08:25:11 -04:00
# if ENABLE_USB_HUBS
hub_notify_new_dev ( event_data - > new_dev_data . dev_addr ) ;
# endif // ENABLE_USB_HUBS
2023-05-08 12:53:27 -04:00
break ;
}
case USBH_EVENT_DEV_GONE : {
2024-04-02 08:25:11 -04:00
# if ENABLE_USB_HUBS
hub_notify_dev_gone ( event_data - > new_dev_data . dev_addr ) ;
# endif // ENABLE_USB_HUBS
2023-05-08 12:53:27 -04:00
// Prepare event msg, send only to clients that have opened the device
usb_host_client_event_msg_t event_msg = {
. event = USB_HOST_CLIENT_EVENT_DEV_GONE ,
2024-02-22 15:25:10 -05:00
. dev_gone . dev_hdl = event_data - > dev_gone_data . dev_hdl ,
2023-05-08 12:53:27 -04:00
} ;
2024-02-22 15:25:10 -05:00
send_event_msg_to_clients ( & event_msg , false , event_data - > dev_gone_data . dev_addr ) ;
2023-05-08 12:53:27 -04:00
break ;
}
2024-03-21 08:57:32 -04:00
case USBH_EVENT_DEV_FREE : {
2024-04-09 17:06:46 -04:00
// Let the Hub driver know that the device is free and its port can be recycled
2024-04-02 08:25:11 -04:00
// Port could be absent, no need to verify
hub_port_recycle ( event_data - > dev_free_data . parent_dev_hdl ,
event_data - > dev_free_data . port_num ,
event_data - > dev_free_data . dev_uid ) ;
2024-03-21 08:57:32 -04:00
break ;
}
2024-02-22 15:25:10 -05:00
case USBH_EVENT_ALL_FREE : {
2023-05-08 12:53:27 -04:00
// Notify the lib handler that all devices are free
HOST_ENTER_CRITICAL ( ) ;
p_host_lib_obj - > dynamic . lib_event_flags | = USB_HOST_LIB_EVENT_FLAGS_ALL_FREE ;
_unblock_lib ( false ) ;
HOST_EXIT_CRITICAL ( ) ;
break ;
}
default :
abort ( ) ; // Should never occur
break ;
2021-08-24 11:20:50 -04:00
}
}
2024-03-28 08:49:44 -04:00
static void hub_event_callback ( hub_event_data_t * event_data , void * arg )
{
switch ( event_data - > event ) {
case HUB_EVENT_CONNECTED :
2024-03-04 15:11:43 -05:00
// Start enumeration process
enum_start ( event_data - > connected . uid ) ;
2024-03-28 08:49:44 -04:00
break ;
case HUB_EVENT_RESET_COMPLETED :
2024-03-04 15:11:43 -05:00
// Proceed enumeration process
ESP_ERROR_CHECK ( enum_proceed ( event_data - > reset_completed . uid ) ) ;
2024-03-28 08:49:44 -04:00
break ;
case HUB_EVENT_DISCONNECTED :
2024-03-04 15:11:43 -05:00
// Cancel enumeration process
enum_cancel ( event_data - > disconnected . uid ) ;
2024-03-28 08:49:44 -04:00
// We allow this to fail in case the device object was already freed
usbh_devs_remove ( event_data - > disconnected . uid ) ;
break ;
default :
abort ( ) ; // Should never occur
break ;
}
}
2024-03-04 15:11:43 -05:00
static void enum_event_callback ( enum_event_data_t * event_data , void * arg )
{
enum_event_t event = event_data - > event ;
switch ( event ) {
case ENUM_EVENT_STARTED :
// Enumeration process started
break ;
case ENUM_EVENT_RESET_REQUIRED :
hub_port_reset ( event_data - > reset_req . parent_dev_hdl , event_data - > reset_req . parent_port_num ) ;
break ;
case ENUM_EVENT_COMPLETED :
2024-04-02 08:25:11 -04:00
// Notify port that device completed enumeration
hub_port_active ( event_data - > complete . parent_dev_hdl , event_data - > complete . parent_port_num ) ;
2024-03-04 15:11:43 -05:00
// Propagate a new device event
ESP_ERROR_CHECK ( usbh_devs_new_dev_event ( event_data - > complete . dev_hdl ) ) ;
break ;
case ENUM_EVENT_CANCELED :
// Enumeration canceled
break ;
default :
abort ( ) ; // Should never occur
break ;
}
}
2021-08-24 11:20:50 -04:00
// ------------------- Client Related ----------------------
2023-05-08 12:43:32 -04:00
static bool endpoint_callback ( usbh_ep_handle_t ep_hdl , usbh_ep_event_t ep_event , void * user_arg , bool in_isr )
2021-08-24 11:20:50 -04:00
{
2023-05-08 12:43:32 -04:00
ep_wrapper_t * ep_wrap = ( ep_wrapper_t * ) user_arg ;
client_t * client_obj = ( client_t * ) ep_wrap - > constant . intf_obj - > constant . client_obj ;
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL_SAFE ( ) ;
2023-05-08 12:53:27 -04:00
// Store the event to be handled later. Note that we allow overwriting of events because more severe will halt the pipe prevent any further events.
2023-05-08 12:43:32 -04:00
ep_wrap - > dynamic . last_event = ep_event ;
2023-05-08 12:53:27 -04:00
// Add the EP to the client's pending list if it's not in the list already
2023-05-08 12:43:32 -04:00
if ( ! ep_wrap - > dynamic . flags . pending ) {
ep_wrap - > dynamic . flags . pending = 1 ;
TAILQ_REMOVE ( & client_obj - > dynamic . idle_ep_tailq , ep_wrap , dynamic . tailq_entry ) ;
TAILQ_INSERT_TAIL ( & client_obj - > dynamic . pending_ep_tailq , ep_wrap , dynamic . tailq_entry ) ;
2021-08-24 11:20:50 -04:00
}
bool yield = _unblock_client ( client_obj , in_isr ) ;
HOST_EXIT_CRITICAL_SAFE ( ) ;
return yield ;
}
2023-12-14 11:14:55 -05:00
static void get_config_desc_transfer_cb ( usb_transfer_t * transfer )
{
SemaphoreHandle_t transfer_done = ( SemaphoreHandle_t ) transfer - > context ;
xSemaphoreGive ( transfer_done ) ;
}
2021-08-24 11:20:50 -04:00
// ------------------------------------------------ Library Functions --------------------------------------------------
// ----------------------- Public --------------------------
esp_err_t usb_host_install ( const usb_host_config_t * config )
{
HOST_CHECK ( config ! = NULL , ESP_ERR_INVALID_ARG ) ;
HOST_ENTER_CRITICAL ( ) ;
HOST_CHECK_FROM_CRIT ( p_host_lib_obj = = NULL , ESP_ERR_INVALID_STATE ) ;
HOST_EXIT_CRITICAL ( ) ;
esp_err_t ret ;
host_lib_t * host_lib_obj = heap_caps_calloc ( 1 , sizeof ( host_lib_t ) , MALLOC_CAP_DEFAULT ) ;
SemaphoreHandle_t event_sem = xSemaphoreCreateBinary ( ) ;
SemaphoreHandle_t mux_lock = xSemaphoreCreateMutex ( ) ;
if ( host_lib_obj = = NULL | | event_sem = = NULL | | mux_lock = = NULL ) {
ret = ESP_ERR_NO_MEM ;
goto alloc_err ;
}
2023-05-08 12:53:27 -04:00
// Initialize host library object
2021-08-24 11:20:50 -04:00
TAILQ_INIT ( & host_lib_obj - > mux_protected . client_tailq ) ;
host_lib_obj - > constant . event_sem = event_sem ;
host_lib_obj - > constant . mux_lock = mux_lock ;
2023-05-08 12:43:32 -04:00
/*
Install each layer of the Host stack ( listed below ) from the lowest layer to the highest
- USB PHY
- HCD
- USBH
2024-03-04 15:11:43 -05:00
- Enum
2023-05-08 12:43:32 -04:00
- Hub
*/
2023-05-08 12:53:27 -04:00
// Install USB PHY (if necessary). USB PHY driver will also enable the underlying Host Controller
2021-11-17 13:07:53 -05:00
if ( ! config - > skip_phy_setup ) {
2023-05-08 12:53:27 -04:00
// Host Library defaults to internal PHY
2021-11-17 13:07:53 -05:00
usb_phy_config_t phy_config = {
. controller = USB_PHY_CTRL_OTG ,
. target = USB_PHY_TARGET_INT ,
. otg_mode = USB_OTG_MODE_HOST ,
2023-05-08 12:53:27 -04:00
. otg_speed = USB_PHY_SPEED_UNDEFINED , // In Host mode, the speed is determined by the connected device
2022-10-06 03:04:53 -04:00
. ext_io_conf = NULL ,
. otg_io_conf = NULL ,
2021-11-17 13:07:53 -05:00
} ;
ret = usb_new_phy ( & phy_config , & host_lib_obj - > constant . phy_handle ) ;
2023-05-08 12:53:27 -04:00
if ( ret ! = ESP_OK ) {
goto phy_err ;
}
2021-11-17 13:07:53 -05:00
}
2023-05-08 12:43:32 -04:00
2023-05-08 12:53:27 -04:00
// Install HCD
2023-05-08 12:43:32 -04:00
hcd_config_t hcd_config = {
. intr_flags = config - > intr_flags
} ;
ret = hcd_install ( & hcd_config ) ;
if ( ret ! = ESP_OK ) {
goto hcd_err ;
}
2023-05-08 12:53:27 -04:00
// Install USBH
2021-08-24 11:20:50 -04:00
usbh_config_t usbh_config = {
2023-05-08 12:43:32 -04:00
. proc_req_cb = proc_req_callback ,
. proc_req_cb_arg = NULL ,
2024-02-22 15:25:10 -05:00
. event_cb = usbh_event_callback ,
2021-08-24 11:20:50 -04:00
. event_cb_arg = NULL ,
} ;
ret = usbh_install ( & usbh_config ) ;
if ( ret ! = ESP_OK ) {
goto usbh_err ;
}
2023-05-08 12:43:32 -04:00
2024-03-04 15:11:43 -05:00
// Install Enumeration driver
enum_config_t enum_config = {
. proc_req_cb = proc_req_callback ,
. proc_req_cb_arg = NULL ,
. enum_event_cb = enum_event_callback ,
. enum_event_cb_arg = NULL ,
# if ENABLE_ENUM_FILTER_CALLBACK
. enum_filter_cb = config - > enum_filter_cb ,
. enum_filter_cb_arg = NULL ,
2023-11-23 10:00:13 -05:00
# endif // ENABLE_ENUM_FILTER_CALLBACK
2024-03-04 15:11:43 -05:00
} ;
ret = enum_install ( & enum_config , & host_lib_obj - > constant . enum_client ) ;
if ( ret ! = ESP_OK ) {
goto enum_err ;
}
2023-05-08 12:53:27 -04:00
// Install Hub
2021-08-24 11:20:50 -04:00
hub_config_t hub_config = {
2023-05-08 12:43:32 -04:00
. proc_req_cb = proc_req_callback ,
. proc_req_cb_arg = NULL ,
2024-03-28 08:49:44 -04:00
. event_cb = hub_event_callback ,
. event_cb_arg = NULL ,
2021-08-24 11:20:50 -04:00
} ;
2024-04-09 17:06:46 -04:00
ret = hub_install ( & hub_config , & host_lib_obj - > constant . hub_client ) ;
2021-08-24 11:20:50 -04:00
if ( ret ! = ESP_OK ) {
goto hub_err ;
}
2023-05-08 12:53:27 -04:00
// Assign host library object
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
if ( p_host_lib_obj ! = NULL ) {
HOST_EXIT_CRITICAL ( ) ;
ret = ESP_ERR_INVALID_STATE ;
goto assign_err ;
}
p_host_lib_obj = host_lib_obj ;
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Start the root hub
2021-08-24 11:20:50 -04:00
ESP_ERROR_CHECK ( hub_root_start ( ) ) ;
ret = ESP_OK ;
return ret ;
assign_err :
ESP_ERROR_CHECK ( hub_uninstall ( ) ) ;
hub_err :
2024-03-04 15:11:43 -05:00
ESP_ERROR_CHECK ( enum_uninstall ( ) ) ;
enum_err :
2021-08-24 11:20:50 -04:00
ESP_ERROR_CHECK ( usbh_uninstall ( ) ) ;
usbh_err :
2023-05-08 12:43:32 -04:00
ESP_ERROR_CHECK ( hcd_uninstall ( ) ) ;
hcd_err :
2022-11-29 07:30:36 -05:00
if ( host_lib_obj - > constant . phy_handle ) {
ESP_ERROR_CHECK ( usb_del_phy ( host_lib_obj - > constant . phy_handle ) ) ;
2021-11-17 13:07:53 -05:00
}
phy_err :
2021-08-24 11:20:50 -04:00
alloc_err :
if ( mux_lock ) {
vSemaphoreDelete ( mux_lock ) ;
}
if ( event_sem ) {
vSemaphoreDelete ( event_sem ) ;
}
heap_caps_free ( host_lib_obj ) ;
return ret ;
}
esp_err_t usb_host_uninstall ( void )
{
2023-05-08 12:53:27 -04:00
// All devices must have been freed at this point
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
HOST_CHECK_FROM_CRIT ( p_host_lib_obj ! = NULL , ESP_ERR_INVALID_STATE ) ;
HOST_CHECK_FROM_CRIT ( p_host_lib_obj - > dynamic . process_pending_flags = = 0 & &
p_host_lib_obj - > dynamic . lib_event_flags = = 0 & &
p_host_lib_obj - > dynamic . flags . val = = 0 ,
ESP_ERR_INVALID_STATE ) ;
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Stop the root hub
2021-08-24 11:20:50 -04:00
ESP_ERROR_CHECK ( hub_root_stop ( ) ) ;
2023-05-08 12:53:27 -04:00
// Unassign the host library object
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
host_lib_t * host_lib_obj = p_host_lib_obj ;
p_host_lib_obj = NULL ;
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:43:32 -04:00
/*
Uninstall each layer of the Host stack ( listed below ) from the highest layer to the lowest
- Hub
2024-03-04 15:11:43 -05:00
- Enum
2023-05-08 12:43:32 -04:00
- USBH
- HCD
- USB PHY
*/
ESP_ERROR_CHECK ( hub_uninstall ( ) ) ;
2024-03-04 15:11:43 -05:00
ESP_ERROR_CHECK ( enum_uninstall ( ) ) ;
2023-05-08 12:43:32 -04:00
ESP_ERROR_CHECK ( usbh_uninstall ( ) ) ;
ESP_ERROR_CHECK ( hcd_uninstall ( ) ) ;
2023-05-08 12:53:27 -04:00
// If the USB PHY was setup, then delete it
2021-11-17 13:07:53 -05:00
if ( host_lib_obj - > constant . phy_handle ) {
ESP_ERROR_CHECK ( usb_del_phy ( host_lib_obj - > constant . phy_handle ) ) ;
}
2023-05-08 12:43:32 -04:00
2023-05-08 12:53:27 -04:00
// Free memory objects
2021-08-24 11:20:50 -04:00
vSemaphoreDelete ( host_lib_obj - > constant . mux_lock ) ;
vSemaphoreDelete ( host_lib_obj - > constant . event_sem ) ;
heap_caps_free ( host_lib_obj ) ;
return ESP_OK ;
}
esp_err_t usb_host_lib_handle_events ( TickType_t timeout_ticks , uint32_t * event_flags_ret )
{
2023-11-21 12:35:54 -05:00
// Check arguments and state
HOST_CHECK ( p_host_lib_obj ! = NULL , ESP_ERR_INVALID_STATE ) ;
esp_err_t ret = ( timeout_ticks = = 0 ) ? ESP_OK : ESP_ERR_TIMEOUT ; // We don't want to return ESP_ERR_TIMEOUT if we aren't blocking
uint32_t event_flags ;
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
2023-11-21 12:35:54 -05:00
// Set handling_events flag. This prevents the host library from being uninstalled
p_host_lib_obj - > dynamic . flags . handling_events = 1 ;
HOST_EXIT_CRITICAL ( ) ;
while ( 1 ) {
// Loop until there are no more events
if ( xSemaphoreTake ( p_host_lib_obj - > constant . event_sem , timeout_ticks ) = = pdFALSE ) {
// Timed out waiting for semaphore or currently no events
break ;
2021-08-24 11:20:50 -04:00
}
2023-11-21 12:35:54 -05:00
// Read and clear process pending flags
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
2023-11-21 12:35:54 -05:00
uint32_t process_pending_flags = p_host_lib_obj - > dynamic . process_pending_flags ;
p_host_lib_obj - > dynamic . process_pending_flags = 0 ;
2021-08-24 11:20:50 -04:00
HOST_EXIT_CRITICAL ( ) ;
2023-11-21 12:35:54 -05:00
2023-05-08 12:43:32 -04:00
if ( process_pending_flags & PROCESS_REQUEST_PENDING_FLAG_USBH ) {
2021-08-24 11:20:50 -04:00
ESP_ERROR_CHECK ( usbh_process ( ) ) ;
}
2023-05-08 12:43:32 -04:00
if ( process_pending_flags & PROCESS_REQUEST_PENDING_FLAG_HUB ) {
2021-08-24 11:20:50 -04:00
ESP_ERROR_CHECK ( hub_process ( ) ) ;
}
2024-03-04 15:11:43 -05:00
if ( process_pending_flags & PROCESS_REQUEST_PENDING_FLAG_ENUM ) {
ESP_ERROR_CHECK ( enum_process ( ) ) ;
}
2023-11-21 12:35:54 -05:00
ret = ESP_OK ;
// Set timeout_ticks to 0 so that we can check for events again without blocking
timeout_ticks = 0 ;
2021-08-24 11:20:50 -04:00
}
2023-11-21 12:35:54 -05:00
HOST_ENTER_CRITICAL ( ) ;
2021-08-24 11:20:50 -04:00
p_host_lib_obj - > dynamic . flags . handling_events = 0 ;
2023-11-21 12:35:54 -05:00
// Read and clear any event flags
2021-08-24 11:20:50 -04:00
event_flags = p_host_lib_obj - > dynamic . lib_event_flags ;
p_host_lib_obj - > dynamic . lib_event_flags = 0 ;
HOST_EXIT_CRITICAL ( ) ;
if ( event_flags_ret ! = NULL ) {
* event_flags_ret = event_flags ;
}
return ret ;
}
2021-10-27 12:54:27 -04:00
esp_err_t usb_host_lib_unblock ( void )
{
2023-05-08 12:53:27 -04:00
// All devices must have been freed at this point
2021-10-27 12:54:27 -04:00
HOST_ENTER_CRITICAL ( ) ;
HOST_CHECK_FROM_CRIT ( p_host_lib_obj ! = NULL , ESP_ERR_INVALID_STATE ) ;
_unblock_lib ( false ) ;
HOST_EXIT_CRITICAL ( ) ;
return ESP_OK ;
}
2021-12-08 06:46:46 -05:00
esp_err_t usb_host_lib_info ( usb_host_lib_info_t * info_ret )
{
HOST_CHECK ( info_ret ! = NULL , ESP_ERR_INVALID_ARG ) ;
int num_devs_temp ;
int num_clients_temp ;
HOST_ENTER_CRITICAL ( ) ;
HOST_CHECK_FROM_CRIT ( p_host_lib_obj ! = NULL , ESP_ERR_INVALID_STATE ) ;
num_clients_temp = p_host_lib_obj - > dynamic . flags . num_clients ;
HOST_EXIT_CRITICAL ( ) ;
2024-04-02 23:24:03 -04:00
usbh_devs_num ( & num_devs_temp ) ;
2021-12-08 06:46:46 -05:00
2023-05-08 12:53:27 -04:00
// Write back return values
2021-12-08 06:46:46 -05:00
info_ret - > num_devices = num_devs_temp ;
info_ret - > num_clients = num_clients_temp ;
return ESP_OK ;
}
2021-08-24 11:20:50 -04:00
// ------------------------------------------------ Client Functions ---------------------------------------------------
// ----------------------- Private -------------------------
static void _handle_pending_ep ( client_t * client_obj )
{
2023-05-08 12:53:27 -04:00
// Handle each EP on the pending list
2021-08-24 11:20:50 -04:00
while ( ! TAILQ_EMPTY ( & client_obj - > dynamic . pending_ep_tailq ) ) {
2023-05-08 12:53:27 -04:00
// Get the next pending EP.
2023-05-08 12:43:32 -04:00
ep_wrapper_t * ep_wrap = TAILQ_FIRST ( & client_obj - > dynamic . pending_ep_tailq ) ;
TAILQ_REMOVE ( & client_obj - > dynamic . pending_ep_tailq , ep_wrap , dynamic . tailq_entry ) ;
TAILQ_INSERT_TAIL ( & client_obj - > dynamic . idle_ep_tailq , ep_wrap , dynamic . tailq_entry ) ;
ep_wrap - > dynamic . flags . pending = 0 ;
usbh_ep_event_t last_event = ep_wrap - > dynamic . last_event ;
2021-08-24 11:20:50 -04:00
uint32_t num_urb_dequeued = 0 ;
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Handle pipe event
2021-08-24 11:20:50 -04:00
switch ( last_event ) {
2023-05-08 12:53:27 -04:00
case USBH_EP_EVENT_ERROR_XFER :
case USBH_EP_EVENT_ERROR_URB_NOT_AVAIL :
case USBH_EP_EVENT_ERROR_OVERFLOW :
case USBH_EP_EVENT_ERROR_STALL :
// The endpoint is now stalled. Flush all pending URBs
ESP_ERROR_CHECK ( usbh_ep_command ( ep_wrap - > constant . ep_hdl , USBH_EP_CMD_FLUSH ) ) ;
// All URBs in this pipe are now retired waiting to be dequeued. Fall through to dequeue them
__attribute__ ( ( fallthrough ) ) ;
case USBH_EP_EVENT_URB_DONE : {
// Dequeue all URBs and run their transfer callback
urb_t * urb ;
usbh_ep_dequeue_urb ( ep_wrap - > constant . ep_hdl , & urb ) ;
while ( urb ! = NULL ) {
// Clear the transfer's in-flight flag to indicate the transfer is no longer in-flight
urb - > usb_host_inflight = false ;
urb - > transfer . callback ( & urb - > transfer ) ;
num_urb_dequeued + + ;
2023-05-08 12:43:32 -04:00
usbh_ep_dequeue_urb ( ep_wrap - > constant . ep_hdl , & urb ) ;
2021-08-24 11:20:50 -04:00
}
2023-05-08 12:53:27 -04:00
break ;
}
default :
abort ( ) ; // Should never occur
break ;
2021-08-24 11:20:50 -04:00
}
HOST_ENTER_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Update the endpoint's number of URB's in-flight
2023-05-08 12:43:32 -04:00
assert ( num_urb_dequeued < = ep_wrap - > dynamic . num_urb_inflight ) ;
ep_wrap - > dynamic . num_urb_inflight - = num_urb_dequeued ;
2021-08-24 11:20:50 -04:00
}
}
// ----------------------- Public --------------------------
esp_err_t usb_host_client_register ( const usb_host_client_config_t * client_config , usb_host_client_handle_t * client_hdl_ret )
{
2021-07-16 04:01:15 -04:00
HOST_CHECK ( p_host_lib_obj , ESP_ERR_INVALID_STATE ) ;
2021-08-24 11:20:50 -04:00
HOST_CHECK ( client_config ! = NULL & & client_hdl_ret ! = NULL , ESP_ERR_INVALID_ARG ) ;
2021-10-27 12:54:27 -04:00
HOST_CHECK ( client_config - > max_num_event_msg > 0 , ESP_ERR_INVALID_ARG ) ;
if ( ! client_config - > is_synchronous ) {
2023-05-08 12:53:27 -04:00
// Asynchronous clients must provide a
2021-10-27 12:54:27 -04:00
HOST_CHECK ( client_config - > async . client_event_callback ! = NULL , ESP_ERR_INVALID_ARG ) ;
}
2021-08-24 11:20:50 -04:00
esp_err_t ret ;
2023-05-08 12:53:27 -04:00
// Create client object
2021-08-24 11:20:50 -04:00
client_t * client_obj = heap_caps_calloc ( 1 , sizeof ( client_t ) , MALLOC_CAP_DEFAULT ) ;
SemaphoreHandle_t event_sem = xSemaphoreCreateBinary ( ) ;
QueueHandle_t event_msg_queue = xQueueCreate ( client_config - > max_num_event_msg , sizeof ( usb_host_client_event_msg_t ) ) ;
if ( client_obj = = NULL | | event_sem = = NULL | | event_msg_queue = = NULL ) {
ret = ESP_ERR_NO_MEM ;
goto alloc_err ;
}
2023-05-08 12:53:27 -04:00
// Initialize client object
2021-08-24 11:20:50 -04:00
TAILQ_INIT ( & client_obj - > dynamic . pending_ep_tailq ) ;
TAILQ_INIT ( & client_obj - > dynamic . idle_ep_tailq ) ;
TAILQ_INIT ( & client_obj - > mux_protected . interface_tailq ) ;
TAILQ_INIT ( & client_obj - > dynamic . done_ctrl_xfer_tailq ) ;
client_obj - > constant . event_sem = event_sem ;
2021-10-27 12:54:27 -04:00
client_obj - > constant . event_callback = client_config - > async . client_event_callback ;
client_obj - > constant . callback_arg = client_config - > async . callback_arg ;
2021-08-24 11:20:50 -04:00
client_obj - > constant . event_msg_queue = event_msg_queue ;
2023-05-08 12:53:27 -04:00
// Add client to the host library's list of clients
2021-08-24 11:20:50 -04:00
xSemaphoreTake ( p_host_lib_obj - > constant . mux_lock , portMAX_DELAY ) ;
HOST_ENTER_CRITICAL ( ) ;
p_host_lib_obj - > dynamic . flags . num_clients + + ;
HOST_EXIT_CRITICAL ( ) ;
TAILQ_INSERT_TAIL ( & p_host_lib_obj - > mux_protected . client_tailq , client_obj , dynamic . tailq_entry ) ;
xSemaphoreGive ( p_host_lib_obj - > constant . mux_lock ) ;
2023-05-08 12:53:27 -04:00
// Write back client handle
2021-08-24 11:20:50 -04:00
* client_hdl_ret = ( usb_host_client_handle_t ) client_obj ;
ret = ESP_OK ;
return ret ;
alloc_err :
if ( event_msg_queue ) {
vQueueDelete ( event_msg_queue ) ;
}
if ( event_sem ) {
vSemaphoreDelete ( event_sem ) ;
}
heap_caps_free ( client_obj ) ;
return ESP_OK ;
}
esp_err_t usb_host_client_deregister ( usb_host_client_handle_t client_hdl )
{
HOST_CHECK ( client_hdl ! = NULL , ESP_ERR_INVALID_ARG ) ;
client_t * client_obj = ( client_t * ) client_hdl ;
esp_err_t ret ;
2023-05-08 12:53:27 -04:00
// We take the mux_lock because we need to access the host library's client_tailq
2021-08-24 11:20:50 -04:00
xSemaphoreTake ( p_host_lib_obj - > constant . mux_lock , portMAX_DELAY ) ;
HOST_ENTER_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Check that client can currently deregistered
2021-08-24 11:20:50 -04:00
bool can_deregister ;
if ( ! TAILQ_EMPTY ( & client_obj - > dynamic . pending_ep_tailq ) | |
2023-05-08 12:53:27 -04:00
! TAILQ_EMPTY ( & client_obj - > dynamic . idle_ep_tailq ) | |
! TAILQ_EMPTY ( & client_obj - > dynamic . done_ctrl_xfer_tailq ) | |
client_obj - > dynamic . flags . handling_events | |
client_obj - > dynamic . flags . taking_mux | |
client_obj - > dynamic . flags . num_intf_claimed ! = 0 | |
client_obj - > dynamic . num_done_ctrl_xfer ! = 0 | |
client_obj - > dynamic . opened_dev_addr_map ! = 0 ) {
can_deregister = false ;
} else {
can_deregister = true ;
}
2021-08-24 11:20:50 -04:00
HOST_EXIT_CRITICAL ( ) ;
if ( ! can_deregister ) {
ret = ESP_ERR_INVALID_STATE ;
goto exit ;
}
2023-05-08 12:53:27 -04:00
// Remove client object from the library's list of clients
2021-08-24 11:20:50 -04:00
TAILQ_REMOVE ( & p_host_lib_obj - > mux_protected . client_tailq , client_obj , dynamic . tailq_entry ) ;
HOST_ENTER_CRITICAL ( ) ;
p_host_lib_obj - > dynamic . flags . num_clients - - ;
if ( p_host_lib_obj - > dynamic . flags . num_clients = = 0 ) {
2023-05-08 12:53:27 -04:00
// This is the last client being deregistered. Notify the lib handler
2021-08-24 11:20:50 -04:00
p_host_lib_obj - > dynamic . lib_event_flags | = USB_HOST_LIB_EVENT_FLAGS_NO_CLIENTS ;
_unblock_lib ( false ) ;
}
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Free client object
2021-08-24 11:20:50 -04:00
vQueueDelete ( client_obj - > constant . event_msg_queue ) ;
vSemaphoreDelete ( client_obj - > constant . event_sem ) ;
heap_caps_free ( client_obj ) ;
ret = ESP_OK ;
exit :
xSemaphoreGive ( p_host_lib_obj - > constant . mux_lock ) ;
return ret ;
}
esp_err_t usb_host_client_handle_events ( usb_host_client_handle_t client_hdl , TickType_t timeout_ticks )
{
2023-11-21 12:35:54 -05:00
// Check arguments and state
2021-08-24 11:20:50 -04:00
HOST_CHECK ( client_hdl ! = NULL , ESP_ERR_INVALID_ARG ) ;
2023-11-21 12:35:54 -05:00
HOST_CHECK ( p_host_lib_obj ! = NULL , ESP_ERR_INVALID_STATE ) ;
esp_err_t ret = ( timeout_ticks = = 0 ) ? ESP_OK : ESP_ERR_TIMEOUT ; // We don't want to return ESP_ERR_TIMEOUT if we aren't blocking
2021-08-24 11:20:50 -04:00
client_t * client_obj = ( client_t * ) client_hdl ;
HOST_ENTER_CRITICAL ( ) ;
2023-11-21 12:35:54 -05:00
// Set handling_events flag. This prevents the client from being deregistered
2021-08-24 11:20:50 -04:00
client_obj - > dynamic . flags . handling_events = 1 ;
2023-11-21 12:35:54 -05:00
HOST_EXIT_CRITICAL ( ) ;
while ( 1 ) {
// Loop until there are no more events
if ( xSemaphoreTake ( client_obj - > constant . event_sem , timeout_ticks ) = = pdFALSE ) {
// Timed out waiting for semaphore or currently no events
break ;
}
HOST_ENTER_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Handle pending endpoints
2021-08-24 11:20:50 -04:00
if ( ! TAILQ_EMPTY ( & client_obj - > dynamic . pending_ep_tailq ) ) {
_handle_pending_ep ( client_obj ) ;
}
2023-05-08 12:53:27 -04:00
// Handle any done control transfers
2021-08-24 11:20:50 -04:00
while ( client_obj - > dynamic . num_done_ctrl_xfer > 0 ) {
urb_t * urb = TAILQ_FIRST ( & client_obj - > dynamic . done_ctrl_xfer_tailq ) ;
TAILQ_REMOVE ( & client_obj - > dynamic . done_ctrl_xfer_tailq , urb , tailq_entry ) ;
client_obj - > dynamic . num_done_ctrl_xfer - - ;
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Clear the transfer's in-flight flag to indicate the transfer is no longer in-flight
2022-10-19 12:04:53 -04:00
urb - > usb_host_inflight = false ;
2023-05-08 12:53:27 -04:00
// Call the transfer's callback
2021-08-24 11:20:50 -04:00
urb - > transfer . callback ( & urb - > transfer ) ;
HOST_ENTER_CRITICAL ( ) ;
}
2023-11-21 12:35:54 -05:00
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Handle event messages
2021-08-24 11:20:50 -04:00
while ( uxQueueMessagesWaiting ( client_obj - > constant . event_msg_queue ) > 0 ) {
2023-05-08 12:53:27 -04:00
// Dequeue the event message and call the client event callback
2021-08-24 11:20:50 -04:00
usb_host_client_event_msg_t event_msg ;
BaseType_t queue_ret = xQueueReceive ( client_obj - > constant . event_msg_queue , & event_msg , 0 ) ;
assert ( queue_ret = = pdTRUE ) ;
client_obj - > constant . event_callback ( & event_msg , client_obj - > constant . callback_arg ) ;
}
2023-11-21 12:35:54 -05:00
ret = ESP_OK ;
// Set timeout_ticks to 0 so that we can check for events again without blocking
timeout_ticks = 0 ;
2021-08-24 11:20:50 -04:00
}
2023-11-21 12:35:54 -05:00
HOST_ENTER_CRITICAL ( ) ;
client_obj - > dynamic . flags . handling_events = 0 ;
2021-08-24 11:20:50 -04:00
HOST_EXIT_CRITICAL ( ) ;
return ret ;
}
esp_err_t usb_host_client_unblock ( usb_host_client_handle_t client_hdl )
{
HOST_CHECK ( client_hdl ! = NULL , ESP_ERR_INVALID_ARG ) ;
client_t * client_obj = ( client_t * ) client_hdl ;
HOST_ENTER_CRITICAL ( ) ;
_unblock_client ( client_obj , false ) ;
HOST_EXIT_CRITICAL ( ) ;
return ESP_OK ;
}
// ------------------------------------------------- Device Handling ---------------------------------------------------
esp_err_t usb_host_device_open ( usb_host_client_handle_t client_hdl , uint8_t dev_addr , usb_device_handle_t * dev_hdl_ret )
{
HOST_CHECK ( dev_addr > 0 & & client_hdl ! = NULL & & dev_hdl_ret ! = NULL , ESP_ERR_INVALID_ARG ) ;
client_t * client_obj = ( client_t * ) client_hdl ;
esp_err_t ret ;
usb_device_handle_t dev_hdl ;
2024-04-02 23:24:03 -04:00
ret = usbh_devs_open ( dev_addr , & dev_hdl ) ;
2021-08-24 11:20:50 -04:00
if ( ret ! = ESP_OK ) {
goto exit ;
}
HOST_ENTER_CRITICAL ( ) ;
if ( _check_client_opened_device ( client_obj , dev_addr ) ) {
2023-05-08 12:53:27 -04:00
// Client has already opened the device. Close it and return an error
2021-08-24 11:20:50 -04:00
ret = ESP_ERR_INVALID_STATE ;
HOST_EXIT_CRITICAL ( ) ;
goto already_opened ;
}
2023-05-08 12:53:27 -04:00
// Record in client object that we have opened the device of this address
2021-08-24 11:20:50 -04:00
_record_client_opened_device ( client_obj , dev_addr ) ;
HOST_EXIT_CRITICAL ( ) ;
* dev_hdl_ret = dev_hdl ;
ret = ESP_OK ;
return ret ;
already_opened :
2024-03-28 06:07:34 -04:00
ESP_ERROR_CHECK ( usbh_dev_close ( dev_hdl ) ) ;
2021-08-24 11:20:50 -04:00
exit :
return ret ;
}
esp_err_t usb_host_device_close ( usb_host_client_handle_t client_hdl , usb_device_handle_t dev_hdl )
{
HOST_CHECK ( dev_hdl ! = NULL & & client_hdl ! = NULL , ESP_ERR_INVALID_ARG ) ;
client_t * client_obj = ( client_t * ) client_hdl ;
2023-05-08 12:53:27 -04:00
// We take the lock because we need to walk the interface list
2021-08-24 11:20:50 -04:00
xSemaphoreTake ( p_host_lib_obj - > constant . mux_lock , portMAX_DELAY ) ;
esp_err_t ret ;
2023-05-08 12:53:27 -04:00
// Check that all interfaces claimed by this client do not belong to this device
2021-08-24 11:20:50 -04:00
bool all_released = true ;
interface_t * intf_obj ;
TAILQ_FOREACH ( intf_obj , & client_obj - > mux_protected . interface_tailq , mux_protected . tailq_entry ) {
if ( intf_obj - > constant . dev_hdl = = dev_hdl ) {
all_released = false ;
break ;
}
}
if ( ! all_released ) {
ret = ESP_ERR_INVALID_STATE ;
goto exit ;
}
2023-05-08 12:53:27 -04:00
// Check that client actually opened the device in the first place
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
uint8_t dev_addr ;
ESP_ERROR_CHECK ( usbh_dev_get_addr ( dev_hdl , & dev_addr ) ) ;
HOST_CHECK_FROM_CRIT ( _check_client_opened_device ( client_obj , dev_addr ) , ESP_ERR_NOT_FOUND ) ;
if ( ! _check_client_opened_device ( client_obj , dev_addr ) ) {
2023-05-08 12:53:27 -04:00
// Client never opened this device
2021-08-24 11:20:50 -04:00
ret = ESP_ERR_INVALID_STATE ;
HOST_EXIT_CRITICAL ( ) ;
goto exit ;
}
2023-05-08 12:53:27 -04:00
// Proceed to clear the record of the device form the client
2021-08-24 11:20:50 -04:00
_clear_client_opened_device ( client_obj , dev_addr ) ;
HOST_EXIT_CRITICAL ( ) ;
2024-03-28 06:07:34 -04:00
ESP_ERROR_CHECK ( usbh_dev_close ( dev_hdl ) ) ;
2021-08-24 11:20:50 -04:00
ret = ESP_OK ;
exit :
xSemaphoreGive ( p_host_lib_obj - > constant . mux_lock ) ;
return ret ;
}
esp_err_t usb_host_device_free_all ( void )
{
HOST_ENTER_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
HOST_CHECK_FROM_CRIT ( p_host_lib_obj - > dynamic . flags . num_clients = = 0 , ESP_ERR_INVALID_STATE ) ; // All clients must have been deregistered
2021-08-24 11:20:50 -04:00
HOST_EXIT_CRITICAL ( ) ;
esp_err_t ret ;
2024-04-02 08:25:11 -04:00
# if ENABLE_USB_HUBS
hub_notify_all_free ( ) ;
# endif // ENABLE_USB_HUBS
2024-04-02 23:24:03 -04:00
ret = usbh_devs_mark_all_free ( ) ;
2023-05-08 12:53:27 -04:00
// If ESP_ERR_NOT_FINISHED is returned, caller must wait for USB_HOST_LIB_EVENT_FLAGS_ALL_FREE to confirm all devices are free
2021-08-24 11:20:50 -04:00
return ret ;
}
2021-10-27 12:54:27 -04:00
esp_err_t usb_host_device_addr_list_fill ( int list_len , uint8_t * dev_addr_list , int * num_dev_ret )
{
HOST_CHECK ( dev_addr_list ! = NULL & & num_dev_ret ! = NULL , ESP_ERR_INVALID_ARG ) ;
2024-04-02 23:24:03 -04:00
return usbh_devs_addr_list_fill ( list_len , dev_addr_list , num_dev_ret ) ;
2021-10-27 12:54:27 -04:00
}
2021-08-24 11:20:50 -04:00
// ------------------------------------------------- Device Requests ---------------------------------------------------
// ------------------- Cached Requests ---------------------
esp_err_t usb_host_device_info ( usb_device_handle_t dev_hdl , usb_device_info_t * dev_info )
{
HOST_CHECK ( dev_hdl ! = NULL & & dev_info ! = NULL , ESP_ERR_INVALID_ARG ) ;
return usbh_dev_get_info ( dev_hdl , dev_info ) ;
}
// ----------------------------------------------- Descriptor Requests -------------------------------------------------
// ----------------- Cached Descriptors --------------------
esp_err_t usb_host_get_device_descriptor ( usb_device_handle_t dev_hdl , const usb_device_desc_t * * device_desc )
{
HOST_CHECK ( dev_hdl ! = NULL & & device_desc ! = NULL , ESP_ERR_INVALID_ARG ) ;
return usbh_dev_get_desc ( dev_hdl , device_desc ) ;
}
esp_err_t usb_host_get_active_config_descriptor ( usb_device_handle_t dev_hdl , const usb_config_desc_t * * config_desc )
{
HOST_CHECK ( dev_hdl ! = NULL & & config_desc ! = NULL , ESP_ERR_INVALID_ARG ) ;
return usbh_dev_get_config_desc ( dev_hdl , config_desc ) ;
}
2023-12-14 11:14:55 -05:00
// ----------------- Descriptors Transfer Requests --------------------
static usb_transfer_status_t wait_for_transmission_done ( usb_transfer_t * transfer )
{
SemaphoreHandle_t transfer_done = ( SemaphoreHandle_t ) transfer - > context ;
xSemaphoreTake ( transfer_done , portMAX_DELAY ) ;
usb_transfer_status_t status = transfer - > status ;
// EP0 halt->flush->clear is managed by USBH and lower layers
return status ;
}
static esp_err_t get_config_desc_transfer ( usb_host_client_handle_t client_hdl , usb_transfer_t * ctrl_transfer , const int bConfigurationValue , const int num_bytes )
{
const usb_device_desc_t * dev_desc ;
ESP_ERROR_CHECK ( usbh_dev_get_desc ( ctrl_transfer - > device_handle , & dev_desc ) ) ;
usb_setup_packet_t * setup_pkt = ( usb_setup_packet_t * ) ctrl_transfer - > data_buffer ;
USB_SETUP_PACKET_INIT_GET_CONFIG_DESC ( setup_pkt , bConfigurationValue - 1 , num_bytes ) ;
ctrl_transfer - > num_bytes = sizeof ( usb_setup_packet_t ) + usb_round_up_to_mps ( num_bytes , dev_desc - > bMaxPacketSize0 ) ;
// IN data stage should return exactly num_bytes (SHORT_DESC_REQ_LEN or wTotalLength) bytes
const int expect_num_bytes = sizeof ( usb_setup_packet_t ) + num_bytes ;
// Submit control transfer
esp_err_t ret = usb_host_transfer_submit_control ( client_hdl , ctrl_transfer ) ;
if ( ret ! = ESP_OK ) {
ESP_LOGE ( USB_HOST_TAG , " Submit ctrl transfer failed " ) ;
return ret ;
}
// Wait for transfer to finish
const usb_transfer_status_t status_short_desc = wait_for_transmission_done ( ctrl_transfer ) ;
if ( status_short_desc ! = USB_TRANSFER_STATUS_COMPLETED ) {
ESP_LOGE ( USB_HOST_TAG , " Get config descriptor transfer status: %d " , status_short_desc ) ;
ret = ESP_ERR_INVALID_STATE ;
return ret ;
}
// Check IN transfer returned the expected correct number of bytes
if ( ( expect_num_bytes ! = 0 ) & & ( ctrl_transfer - > actual_num_bytes ! = expect_num_bytes ) ) {
if ( ctrl_transfer - > actual_num_bytes > expect_num_bytes ) {
// The device returned more bytes than requested.
// This violates the USB specs chapter 9.3.5, but we can continue
ESP_LOGW ( USB_HOST_TAG , " Incorrect number of bytes returned %d " , ctrl_transfer - > actual_num_bytes ) ;
return ESP_OK ;
} else {
// The device returned less bytes than requested. We cannot continue.
ESP_LOGE ( USB_HOST_TAG , " Incorrect number of bytes returned %d " , ctrl_transfer - > actual_num_bytes ) ;
return ESP_ERR_INVALID_RESPONSE ;
}
}
return ESP_OK ;
}
esp_err_t usb_host_get_config_desc ( usb_host_client_handle_t client_hdl , usb_device_handle_t dev_hdl , uint8_t bConfigurationValue , const usb_config_desc_t * * config_desc_ret )
{
esp_err_t ret = ESP_OK ;
HOST_CHECK ( client_hdl ! = NULL & & dev_hdl ! = NULL & & config_desc_ret ! = NULL , ESP_ERR_INVALID_ARG ) ;
// Get number of configurations
const usb_device_desc_t * dev_desc ;
ESP_ERROR_CHECK ( usbh_dev_get_desc ( dev_hdl , & dev_desc ) ) ;
HOST_CHECK ( bConfigurationValue ! = 0 , ESP_ERR_INVALID_ARG ) ;
HOST_CHECK ( bConfigurationValue < = dev_desc - > bNumConfigurations , ESP_ERR_NOT_SUPPORTED ) ;
// Initialize transfer
usb_transfer_t * ctrl_transfer ;
if ( usb_host_transfer_alloc ( sizeof ( usb_setup_packet_t ) + CTRL_TRANSFER_MAX_DATA_LEN , 0 , & ctrl_transfer ) ) {
return ESP_ERR_NO_MEM ;
}
SemaphoreHandle_t transfer_done = xSemaphoreCreateBinary ( ) ;
if ( transfer_done = = NULL ) {
ret = ESP_ERR_NO_MEM ;
goto exit ;
}
ctrl_transfer - > device_handle = dev_hdl ;
ctrl_transfer - > bEndpointAddress = 0 ;
ctrl_transfer - > callback = get_config_desc_transfer_cb ;
ctrl_transfer - > context = ( void * ) transfer_done ;
// Initiate control transfer for short config descriptor
ret = get_config_desc_transfer ( client_hdl , ctrl_transfer , bConfigurationValue , SHORT_DESC_REQ_LEN ) ;
if ( ret ! = ESP_OK ) {
goto exit ;
}
// Get length of full config descriptor
const usb_config_desc_t * config_desc_short = ( usb_config_desc_t * ) ( ctrl_transfer - > data_buffer + sizeof ( usb_setup_packet_t ) ) ;
// Initiate control transfer for full config descriptor
ret = get_config_desc_transfer ( client_hdl , ctrl_transfer , bConfigurationValue , config_desc_short - > wTotalLength ) ;
if ( ret ! = ESP_OK ) {
goto exit ;
}
// Allocate memory to store the configuration descriptor
const usb_config_desc_t * config_desc_full = ( usb_config_desc_t * ) ( ctrl_transfer - > data_buffer + sizeof ( usb_setup_packet_t ) ) ;
usb_config_desc_t * config_desc = heap_caps_malloc ( config_desc_full - > wTotalLength , MALLOC_CAP_DEFAULT ) ;
if ( config_desc = = NULL ) {
ret = ESP_ERR_NO_MEM ;
goto exit ;
}
// Copy the configuration descriptor
memcpy ( config_desc , config_desc_full , config_desc_full - > wTotalLength ) ;
* config_desc_ret = config_desc ;
ret = ESP_OK ;
exit :
if ( ctrl_transfer ) {
usb_host_transfer_free ( ctrl_transfer ) ;
}
if ( transfer_done ! = NULL ) {
vSemaphoreDelete ( transfer_done ) ;
}
return ret ;
}
esp_err_t usb_host_get_config_desc_free ( const usb_config_desc_t * config_desc )
{
HOST_CHECK ( config_desc ! = NULL , ESP_ERR_INVALID_ARG ) ;
heap_caps_free ( ( usb_config_desc_t * ) config_desc ) ;
return ESP_OK ;
}
2021-08-24 11:20:50 -04:00
// ----------------------------------------------- Interface Functions -------------------------------------------------
// ----------------------- Private -------------------------
2023-05-08 12:43:32 -04:00
static esp_err_t ep_wrapper_alloc ( usb_device_handle_t dev_hdl , const usb_ep_desc_t * ep_desc , interface_t * intf_obj , ep_wrapper_t * * ep_wrap_ret )
2021-08-24 11:20:50 -04:00
{
2023-05-08 12:43:32 -04:00
ep_wrapper_t * ep_wrap = heap_caps_calloc ( 1 , sizeof ( ep_wrapper_t ) , MALLOC_CAP_DEFAULT ) ;
if ( ep_wrap = = NULL ) {
2021-08-24 11:20:50 -04:00
return ESP_ERR_NO_MEM ;
}
esp_err_t ret ;
2023-05-08 12:43:32 -04:00
usbh_ep_handle_t ep_hdl ;
2021-08-24 11:20:50 -04:00
usbh_ep_config_t ep_config = {
2023-05-08 12:43:32 -04:00
. bInterfaceNumber = intf_obj - > constant . intf_desc - > bInterfaceNumber ,
. bAlternateSetting = intf_obj - > constant . intf_desc - > bAlternateSetting ,
. bEndpointAddress = ep_desc - > bEndpointAddress ,
. ep_cb = endpoint_callback ,
. ep_cb_arg = ( void * ) ep_wrap ,
. context = ( void * ) ep_wrap ,
2021-08-24 11:20:50 -04:00
} ;
2023-05-08 12:43:32 -04:00
ret = usbh_ep_alloc ( dev_hdl , & ep_config , & ep_hdl ) ;
2021-08-24 11:20:50 -04:00
if ( ret ! = ESP_OK ) {
2023-05-08 12:43:32 -04:00
goto alloc_err ;
2021-08-24 11:20:50 -04:00
}
2023-05-08 12:53:27 -04:00
// Initialize endpoint wrapper item
2023-05-08 12:43:32 -04:00
ep_wrap - > constant . ep_hdl = ep_hdl ;
ep_wrap - > constant . intf_obj = intf_obj ;
2023-05-08 12:53:27 -04:00
// Write back result
2023-05-08 12:43:32 -04:00
* ep_wrap_ret = ep_wrap ;
2021-08-24 11:20:50 -04:00
ret = ESP_OK ;
return ret ;
2023-05-08 12:43:32 -04:00
alloc_err :
heap_caps_free ( ep_wrap ) ;
2021-08-24 11:20:50 -04:00
return ret ;
}
2023-05-08 12:43:32 -04:00
static void ep_wrapper_free ( usb_device_handle_t dev_hdl , ep_wrapper_t * ep_wrap )
2021-08-24 11:20:50 -04:00
{
2023-05-08 12:43:32 -04:00
if ( ep_wrap = = NULL ) {
2021-08-24 11:20:50 -04:00
return ;
}
2023-05-08 12:53:27 -04:00
// Free the underlying endpoint
2023-05-08 12:43:32 -04:00
ESP_ERROR_CHECK ( usbh_ep_free ( ep_wrap - > constant . ep_hdl ) ) ;
2023-05-08 12:53:27 -04:00
// Free the endpoint wrapper item
2023-05-08 12:43:32 -04:00
heap_caps_free ( ep_wrap ) ;
2021-08-24 11:20:50 -04:00
}
static interface_t * interface_alloc ( client_t * client_obj , usb_device_handle_t dev_hdl , const usb_intf_desc_t * intf_desc )
{
2023-05-08 12:43:32 -04:00
interface_t * intf_obj = heap_caps_calloc ( 1 , sizeof ( interface_t ) + ( sizeof ( ep_wrapper_t * ) * intf_desc - > bNumEndpoints ) , MALLOC_CAP_DEFAULT ) ;
2021-08-24 11:20:50 -04:00
if ( intf_obj = = NULL ) {
return NULL ;
}
intf_obj - > constant . intf_desc = intf_desc ;
intf_obj - > constant . client_obj = client_obj ;
intf_obj - > constant . dev_hdl = dev_hdl ;
return intf_obj ;
}
static void interface_free ( interface_t * intf_obj )
{
if ( intf_obj = = NULL ) {
return ;
}
for ( int i = 0 ; i < intf_obj - > constant . intf_desc - > bNumEndpoints ; i + + ) {
assert ( intf_obj - > constant . endpoints [ i ] = = NULL ) ;
}
heap_caps_free ( intf_obj ) ;
}
static esp_err_t interface_claim ( client_t * client_obj , usb_device_handle_t dev_hdl , const usb_config_desc_t * config_desc , uint8_t bInterfaceNumber , uint8_t bAlternateSetting , interface_t * * intf_obj_ret )
{
esp_err_t ret ;
2023-05-08 12:53:27 -04:00
// We need to walk to configuration descriptor to find the correct interface descriptor, and each of its constituent endpoint descriptors
// Find the interface descriptor and allocate the interface object
2021-08-24 11:20:50 -04:00
int offset_intf ;
2021-08-31 02:14:04 -04:00
const usb_intf_desc_t * intf_desc = usb_parse_interface_descriptor ( config_desc , bInterfaceNumber , bAlternateSetting , & offset_intf ) ;
2021-08-24 11:20:50 -04:00
if ( intf_desc = = NULL ) {
ret = ESP_ERR_NOT_FOUND ;
goto exit ;
}
2023-05-08 12:53:27 -04:00
// Allocate interface object
2021-08-24 11:20:50 -04:00
interface_t * intf_obj = interface_alloc ( client_obj , dev_hdl , intf_desc ) ;
if ( intf_obj = = NULL ) {
ret = ESP_ERR_NO_MEM ;
goto exit ;
}
2023-05-08 12:53:27 -04:00
// Find each endpoint descriptor in the interface by index, and allocate those endpoints
2021-08-24 11:20:50 -04:00
for ( int i = 0 ; i < intf_desc - > bNumEndpoints ; i + + ) {
int offset_ep = offset_intf ;
2021-08-31 02:14:04 -04:00
const usb_ep_desc_t * ep_desc = usb_parse_endpoint_descriptor_by_index ( intf_desc , i , config_desc - > wTotalLength , & offset_ep ) ;
2021-08-24 11:20:50 -04:00
if ( ep_desc = = NULL ) {
ret = ESP_ERR_NOT_FOUND ;
goto ep_alloc_err ;
}
2023-05-08 12:53:27 -04:00
// Allocate the endpoint wrapper item
2023-05-08 12:43:32 -04:00
ep_wrapper_t * ep_wrap ;
ret = ep_wrapper_alloc ( dev_hdl , ep_desc , intf_obj , & ep_wrap ) ;
2021-08-24 11:20:50 -04:00
if ( ret ! = ESP_OK ) {
goto ep_alloc_err ;
}
2023-05-08 12:53:27 -04:00
// Fill the interface object with the allocated endpoints
2023-05-08 12:43:32 -04:00
intf_obj - > constant . endpoints [ i ] = ep_wrap ;
2021-08-24 11:20:50 -04:00
}
2023-05-08 12:53:27 -04:00
// Add interface object to client (safe because we have already taken the mutex)
2021-08-24 11:20:50 -04:00
TAILQ_INSERT_TAIL ( & client_obj - > mux_protected . interface_tailq , intf_obj , mux_protected . tailq_entry ) ;
2023-05-08 12:53:27 -04:00
// Add each endpoint wrapper item to the client's endpoint list
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
2021-12-23 05:22:55 -05:00
for ( int i = 0 ; i < intf_desc - > bNumEndpoints ; i + + ) {
2021-08-24 11:20:50 -04:00
TAILQ_INSERT_TAIL ( & client_obj - > dynamic . idle_ep_tailq , intf_obj - > constant . endpoints [ i ] , dynamic . tailq_entry ) ;
}
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Write back result
2021-08-24 11:20:50 -04:00
* intf_obj_ret = intf_obj ;
ret = ESP_OK ;
return ret ;
ep_alloc_err :
2021-12-23 05:22:55 -05:00
for ( int i = 0 ; i < intf_desc - > bNumEndpoints ; i + + ) {
2023-05-08 12:43:32 -04:00
ep_wrapper_free ( dev_hdl , intf_obj - > constant . endpoints [ i ] ) ;
2021-08-24 11:20:50 -04:00
intf_obj - > constant . endpoints [ i ] = NULL ;
}
interface_free ( intf_obj ) ;
exit :
return ret ;
}
static esp_err_t interface_release ( client_t * client_obj , usb_device_handle_t dev_hdl , uint8_t bInterfaceNumber )
{
esp_err_t ret ;
2023-05-08 12:53:27 -04:00
// Find the interface object
2021-08-24 11:20:50 -04:00
interface_t * intf_obj_iter ;
interface_t * intf_obj = NULL ;
TAILQ_FOREACH ( intf_obj_iter , & client_obj - > mux_protected . interface_tailq , mux_protected . tailq_entry ) {
if ( intf_obj_iter - > constant . dev_hdl = = dev_hdl & & intf_obj_iter - > constant . intf_desc - > bInterfaceNumber = = bInterfaceNumber ) {
intf_obj = intf_obj_iter ;
break ;
}
}
if ( intf_obj = = NULL ) {
ret = ESP_ERR_NOT_FOUND ;
goto exit ;
}
2023-05-08 12:53:27 -04:00
// Check that all endpoints in the interface are in a state to be freed
// Todo: Check that each EP is halted before allowing them to be freed (IDF-7273)
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
bool can_free = true ;
for ( int i = 0 ; i < intf_obj - > constant . intf_desc - > bNumEndpoints ; i + + ) {
2023-05-08 12:43:32 -04:00
ep_wrapper_t * ep_wrap = intf_obj - > constant . endpoints [ i ] ;
2023-05-08 12:53:27 -04:00
// Endpoint must not be on the pending list and must not have in-flight URBs
2023-05-08 12:43:32 -04:00
if ( ep_wrap - > dynamic . num_urb_inflight ! = 0 | | ep_wrap - > dynamic . flags . pending ) {
2021-08-24 11:20:50 -04:00
can_free = false ;
break ;
}
}
if ( ! can_free ) {
HOST_EXIT_CRITICAL ( ) ;
ret = ESP_ERR_INVALID_STATE ;
goto exit ;
}
2023-05-08 12:53:27 -04:00
// Proceed to remove all endpoint wrapper items from the list
2021-08-24 11:20:50 -04:00
for ( int i = 0 ; i < intf_obj - > constant . intf_desc - > bNumEndpoints ; i + + ) {
TAILQ_REMOVE ( & client_obj - > dynamic . idle_ep_tailq , intf_obj - > constant . endpoints [ i ] , dynamic . tailq_entry ) ;
}
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Remove the interface object from the list (safe because we have already taken the mutex)
2021-08-24 11:20:50 -04:00
TAILQ_REMOVE ( & client_obj - > mux_protected . interface_tailq , intf_obj , mux_protected . tailq_entry ) ;
2023-05-08 12:53:27 -04:00
// Free each endpoint in the interface
for ( int i = 0 ; i < intf_obj - > constant . intf_desc - > bNumEndpoints ; i + + ) {
2023-05-08 12:43:32 -04:00
ep_wrapper_free ( dev_hdl , intf_obj - > constant . endpoints [ i ] ) ;
2021-08-24 11:20:50 -04:00
intf_obj - > constant . endpoints [ i ] = NULL ;
}
2023-05-08 12:53:27 -04:00
// Free the interface object itself
2021-08-24 11:20:50 -04:00
interface_free ( intf_obj ) ;
ret = ESP_OK ;
exit :
return ret ;
}
// ----------------------- Public --------------------------
esp_err_t usb_host_interface_claim ( usb_host_client_handle_t client_hdl , usb_device_handle_t dev_hdl , uint8_t bInterfaceNumber , uint8_t bAlternateSetting )
{
HOST_CHECK ( client_hdl ! = NULL & & dev_hdl ! = NULL , ESP_ERR_INVALID_ARG ) ;
client_t * client_obj = ( client_t * ) client_hdl ;
HOST_ENTER_CRITICAL ( ) ;
uint8_t dev_addr ;
ESP_ERROR_CHECK ( usbh_dev_get_addr ( dev_hdl , & dev_addr ) ) ;
2023-05-08 12:53:27 -04:00
// Check if client actually opened device
2021-08-24 11:20:50 -04:00
HOST_CHECK_FROM_CRIT ( _check_client_opened_device ( client_obj , dev_addr ) , ESP_ERR_INVALID_STATE ) ;
client_obj - > dynamic . flags . taking_mux = 1 ;
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Take mux lock. This protects the client being released or other clients from claiming interfaces
2021-08-24 11:20:50 -04:00
xSemaphoreTake ( p_host_lib_obj - > constant . mux_lock , portMAX_DELAY ) ;
esp_err_t ret ;
const usb_config_desc_t * config_desc ;
ESP_ERROR_CHECK ( usbh_dev_get_config_desc ( dev_hdl , & config_desc ) ) ;
interface_t * intf_obj ;
2023-05-08 12:53:27 -04:00
// Claim interface
2021-08-24 11:20:50 -04:00
ret = interface_claim ( client_obj , dev_hdl , config_desc , bInterfaceNumber , bAlternateSetting , & intf_obj ) ;
if ( ret ! = ESP_OK ) {
goto exit ;
}
ret = ESP_OK ;
exit :
xSemaphoreGive ( p_host_lib_obj - > constant . mux_lock ) ;
HOST_ENTER_CRITICAL ( ) ;
if ( ret = = ESP_OK ) {
client_obj - > dynamic . flags . num_intf_claimed + + ;
}
client_obj - > dynamic . flags . taking_mux = 0 ;
HOST_EXIT_CRITICAL ( ) ;
return ret ;
}
esp_err_t usb_host_interface_release ( usb_host_client_handle_t client_hdl , usb_device_handle_t dev_hdl , uint8_t bInterfaceNumber )
{
HOST_CHECK ( client_hdl ! = NULL & & dev_hdl ! = NULL , ESP_ERR_INVALID_ARG ) ;
client_t * client_obj = ( client_t * ) client_hdl ;
HOST_ENTER_CRITICAL ( ) ;
uint8_t dev_addr ;
ESP_ERROR_CHECK ( usbh_dev_get_addr ( dev_hdl , & dev_addr ) ) ;
2023-05-08 12:53:27 -04:00
// Check if client actually opened device
2021-08-24 11:20:50 -04:00
HOST_CHECK_FROM_CRIT ( _check_client_opened_device ( client_obj , dev_addr ) , ESP_ERR_INVALID_STATE ) ;
client_obj - > dynamic . flags . taking_mux = 1 ;
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:53:27 -04:00
// Take mux lock. This protects the client being released or other clients from claiming interfaces
2021-08-24 11:20:50 -04:00
xSemaphoreTake ( p_host_lib_obj - > constant . mux_lock , portMAX_DELAY ) ;
esp_err_t ret = interface_release ( client_obj , dev_hdl , bInterfaceNumber ) ;
xSemaphoreGive ( p_host_lib_obj - > constant . mux_lock ) ;
HOST_ENTER_CRITICAL ( ) ;
if ( ret = = ESP_OK ) {
client_obj - > dynamic . flags . num_intf_claimed - - ;
}
client_obj - > dynamic . flags . taking_mux = 0 ;
HOST_EXIT_CRITICAL ( ) ;
return ret ;
}
esp_err_t usb_host_endpoint_halt ( usb_device_handle_t dev_hdl , uint8_t bEndpointAddress )
{
esp_err_t ret ;
2023-05-08 12:43:32 -04:00
usbh_ep_handle_t ep_hdl ;
ret = usbh_ep_get_handle ( dev_hdl , bEndpointAddress , & ep_hdl ) ;
2021-08-24 11:20:50 -04:00
if ( ret ! = ESP_OK ) {
goto exit ;
}
2023-05-08 12:43:32 -04:00
ret = usbh_ep_command ( ep_hdl , USBH_EP_CMD_HALT ) ;
2021-08-24 11:20:50 -04:00
exit :
return ret ;
}
esp_err_t usb_host_endpoint_flush ( usb_device_handle_t dev_hdl , uint8_t bEndpointAddress )
{
esp_err_t ret ;
2023-05-08 12:43:32 -04:00
usbh_ep_handle_t ep_hdl ;
ret = usbh_ep_get_handle ( dev_hdl , bEndpointAddress , & ep_hdl ) ;
2021-08-24 11:20:50 -04:00
if ( ret ! = ESP_OK ) {
goto exit ;
}
2023-05-08 12:43:32 -04:00
ret = usbh_ep_command ( ep_hdl , USBH_EP_CMD_FLUSH ) ;
2021-08-24 11:20:50 -04:00
exit :
return ret ;
}
esp_err_t usb_host_endpoint_clear ( usb_device_handle_t dev_hdl , uint8_t bEndpointAddress )
{
esp_err_t ret ;
2023-05-08 12:43:32 -04:00
usbh_ep_handle_t ep_hdl ;
ret = usbh_ep_get_handle ( dev_hdl , bEndpointAddress , & ep_hdl ) ;
2021-08-24 11:20:50 -04:00
if ( ret ! = ESP_OK ) {
goto exit ;
}
2023-05-08 12:43:32 -04:00
ret = usbh_ep_command ( ep_hdl , USBH_EP_CMD_CLEAR ) ;
2021-08-24 11:20:50 -04:00
exit :
return ret ;
}
// ------------------------------------------------ Asynchronous I/O ---------------------------------------------------
// ----------------------- Public --------------------------
esp_err_t usb_host_transfer_alloc ( size_t data_buffer_size , int num_isoc_packets , usb_transfer_t * * transfer )
{
2023-11-21 11:47:38 -05:00
urb_t * urb = urb_alloc ( data_buffer_size , num_isoc_packets ) ;
2021-08-24 11:20:50 -04:00
if ( urb = = NULL ) {
return ESP_ERR_NO_MEM ;
}
* transfer = & urb - > transfer ;
return ESP_OK ;
}
esp_err_t usb_host_transfer_free ( usb_transfer_t * transfer )
{
2021-07-16 04:01:15 -04:00
if ( transfer = = NULL ) {
return ESP_OK ;
}
2021-08-24 11:20:50 -04:00
urb_t * urb = __containerof ( transfer , urb_t , transfer ) ;
urb_free ( urb ) ;
return ESP_OK ;
}
esp_err_t usb_host_transfer_submit ( usb_transfer_t * transfer )
{
HOST_CHECK ( transfer ! = NULL , ESP_ERR_INVALID_ARG ) ;
2023-05-08 12:53:27 -04:00
// Check that transfer and target endpoint are valid
HOST_CHECK ( transfer - > device_handle ! = NULL , ESP_ERR_INVALID_ARG ) ; // Target device must be set
2021-08-24 11:20:50 -04:00
HOST_CHECK ( ( transfer - > bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK ) ! = 0 , ESP_ERR_INVALID_ARG ) ;
2023-05-08 12:43:32 -04:00
usbh_ep_handle_t ep_hdl ;
ep_wrapper_t * ep_wrap = NULL ;
2021-08-24 11:20:50 -04:00
urb_t * urb_obj = __containerof ( transfer , urb_t , transfer ) ;
esp_err_t ret ;
2023-05-08 12:43:32 -04:00
ret = usbh_ep_get_handle ( transfer - > device_handle , transfer - > bEndpointAddress , & ep_hdl ) ;
2021-08-24 11:20:50 -04:00
if ( ret ! = ESP_OK ) {
goto err ;
}
2023-05-08 12:43:32 -04:00
ep_wrap = usbh_ep_get_context ( ep_hdl ) ;
assert ( ep_wrap ! = NULL ) ;
2023-05-08 12:53:27 -04:00
// Check that we are not submitting a transfer already in-flight
2022-10-19 12:04:53 -04:00
HOST_CHECK ( ! urb_obj - > usb_host_inflight , ESP_ERR_NOT_FINISHED ) ;
urb_obj - > usb_host_inflight = true ;
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
2023-05-08 12:43:32 -04:00
ep_wrap - > dynamic . num_urb_inflight + + ;
2021-08-24 11:20:50 -04:00
HOST_EXIT_CRITICAL ( ) ;
2023-05-08 12:43:32 -04:00
ret = usbh_ep_enqueue_urb ( ep_hdl , urb_obj ) ;
2021-08-24 11:20:50 -04:00
if ( ret ! = ESP_OK ) {
2023-05-08 12:43:32 -04:00
goto submit_err ;
2021-08-24 11:20:50 -04:00
}
return ret ;
2023-05-08 12:43:32 -04:00
submit_err :
2021-08-24 11:20:50 -04:00
HOST_ENTER_CRITICAL ( ) ;
2023-05-08 12:43:32 -04:00
ep_wrap - > dynamic . num_urb_inflight - - ;
2021-08-24 11:20:50 -04:00
HOST_EXIT_CRITICAL ( ) ;
2022-10-19 12:04:53 -04:00
urb_obj - > usb_host_inflight = false ;
2021-08-24 11:20:50 -04:00
err :
return ret ;
}
esp_err_t usb_host_transfer_submit_control ( usb_host_client_handle_t client_hdl , usb_transfer_t * transfer )
{
HOST_CHECK ( client_hdl ! = NULL & & transfer ! = NULL , ESP_ERR_INVALID_ARG ) ;
2023-05-08 12:53:27 -04:00
// Check that control transfer is valid
HOST_CHECK ( transfer - > device_handle ! = NULL , ESP_ERR_INVALID_ARG ) ; // Target device must be set
// Control transfers must be targeted at EP 0
2021-10-27 12:54:27 -04:00
HOST_CHECK ( ( transfer - > bEndpointAddress & USB_B_ENDPOINT_ADDRESS_EP_NUM_MASK ) = = 0 , ESP_ERR_INVALID_ARG ) ;
2022-10-19 12:04:53 -04:00
2023-05-08 12:43:32 -04:00
usb_device_handle_t dev_hdl = transfer - > device_handle ;
2021-08-24 11:20:50 -04:00
urb_t * urb_obj = __containerof ( transfer , urb_t , transfer ) ;
2023-05-08 12:53:27 -04:00
// Check that we are not submitting a transfer already in-flight
2022-10-19 12:04:53 -04:00
HOST_CHECK ( ! urb_obj - > usb_host_inflight , ESP_ERR_NOT_FINISHED ) ;
urb_obj - > usb_host_inflight = true ;
2023-05-08 12:53:27 -04:00
// Save client handle into URB
2021-08-24 11:20:50 -04:00
urb_obj - > usb_host_client = ( void * ) client_hdl ;
2022-10-19 12:04:53 -04:00
esp_err_t ret ;
ret = usbh_dev_submit_ctrl_urb ( dev_hdl , urb_obj ) ;
if ( ret ! = ESP_OK ) {
urb_obj - > usb_host_inflight = false ;
}
return ret ;
2021-08-24 11:20:50 -04:00
}