Merge branch 'opt/bluedroid_adv_report_datapath' into 'master'

component/bt: optimize BLE adv report datapath and adv report flow control mechanism

Closes BT-2561

See merge request espressif/esp-idf!19171
This commit is contained in:
Jiang Jiang Jian 2022-08-03 16:31:23 +08:00
commit 373524cec2
30 changed files with 1604 additions and 341 deletions

View File

@ -53,6 +53,8 @@ if(CONFIG_BT_ENABLED)
"common/osi/buffer.c"
"common/osi/config.c"
"common/osi/fixed_queue.c"
"common/osi/pkt_queue.c"
"common/osi/fixed_pkt_queue.c"
"common/osi/future.c"
"common/osi/hash_functions.c"
"common/osi/hash_map.c"

View File

@ -60,6 +60,10 @@
#endif /* #if CLASSIC_BT_INCLUDED */
#endif
#if (BLE_INCLUDED == TRUE)
#include "btc_gap_ble.h"
#endif
#if CONFIG_BLE_MESH
#include "btc_ble_mesh_ble.h"
#include "btc_ble_mesh_prov.h"
@ -75,6 +79,9 @@
#define BTC_TASK_STACK_SIZE (BT_BTC_TASK_STACK_SIZE + BT_TASK_EXTRA_STACK_SIZE) //by menuconfig
#define BTC_TASK_NAME "BTC_TASK"
#define BTC_TASK_PRIO (BT_TASK_MAX_PRIORITIES - 6)
#define BTC_TASK_WORKQUEUE_NUM (2)
#define BTC_TASK_WORKQUEUE0_LEN (0)
#define BTC_TASK_WORKQUEUE1_LEN (5)
osi_thread_t *btc_thread;
@ -414,7 +421,9 @@ error_exit:;
bt_status_t btc_init(void)
{
btc_thread = osi_thread_create(BTC_TASK_NAME, BTC_TASK_STACK_SIZE, BTC_TASK_PRIO, BTC_TASK_PINNED_TO_CORE, 2);
const size_t workqueue_len[] = {BTC_TASK_WORKQUEUE0_LEN, BTC_TASK_WORKQUEUE1_LEN};
btc_thread = osi_thread_create(BTC_TASK_NAME, BTC_TASK_STACK_SIZE, BTC_TASK_PRIO, BTC_TASK_PINNED_TO_CORE,
BTC_TASK_WORKQUEUE_NUM, workqueue_len);
if (btc_thread == NULL) {
return BT_STATUS_NOMEM;
}
@ -427,6 +436,7 @@ bt_status_t btc_init(void)
#if (BLE_INCLUDED == TRUE)
btc_gap_callback_init();
btc_gap_ble_init();
#endif ///BLE_INCLUDED == TRUE
#if SCAN_QUEUE_CONGEST_CHECK
@ -444,7 +454,9 @@ void btc_deinit(void)
osi_thread_free(btc_thread);
btc_thread = NULL;
#if (BLE_INCLUDED == TRUE)
btc_gap_ble_deinit();
#endif ///BLE_INCLUDED == TRUE
#if SCAN_QUEUE_CONGEST_CHECK
btc_adv_list_deinit();
#endif
@ -463,3 +475,8 @@ int get_btc_work_queue_size(void)
{
return osi_thread_queue_wait_size(btc_thread, 0);
}
osi_thread_t *btc_get_current_thread(void)
{
return btc_thread;
}

View File

@ -124,6 +124,13 @@ void btc_deinit(void);
bool btc_check_queue_is_congest(void);
int get_btc_work_queue_size(void);
/**
* get the BTC thread handle
* @return NULL: fail
* others: pointer of osi_thread structure of BTC
*/
osi_thread_t *btc_get_current_thread(void);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,161 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "osi/allocator.h"
#include "osi/pkt_queue.h"
#include "osi/fixed_pkt_queue.h"
#include "osi/osi.h"
#include "osi/semaphore.h"
typedef struct fixed_pkt_queue_t {
struct pkt_queue *pkt_list;
osi_sem_t enqueue_sem;
osi_sem_t dequeue_sem;
size_t capacity;
fixed_pkt_queue_cb dequeue_ready;
} fixed_pkt_queue_t;
fixed_pkt_queue_t *fixed_pkt_queue_new(size_t capacity)
{
fixed_pkt_queue_t *ret = osi_calloc(sizeof(fixed_pkt_queue_t));
if (!ret) {
goto error;
}
ret->capacity = capacity;
ret->pkt_list = pkt_queue_create();
if (!ret->pkt_list) {
goto error;
}
osi_sem_new(&ret->enqueue_sem, capacity, capacity);
if (!ret->enqueue_sem) {
goto error;
}
osi_sem_new(&ret->dequeue_sem, capacity, 0);
if (!ret->dequeue_sem) {
goto error;
}
return ret;
error:
fixed_pkt_queue_free(ret, NULL);
return NULL;
}
void fixed_pkt_queue_free(fixed_pkt_queue_t *queue, fixed_pkt_queue_free_cb free_cb)
{
if (queue == NULL) {
return;
}
fixed_pkt_queue_unregister_dequeue(queue);
pkt_queue_destroy(queue->pkt_list, (pkt_queue_free_cb)free_cb);
queue->pkt_list = NULL;
if (queue->enqueue_sem) {
osi_sem_free(&queue->enqueue_sem);
}
if (queue->dequeue_sem) {
osi_sem_free(&queue->dequeue_sem);
}
osi_free(queue);
}
bool fixed_pkt_queue_is_empty(fixed_pkt_queue_t *queue)
{
if (queue == NULL) {
return true;
}
return pkt_queue_is_empty(queue->pkt_list);
}
size_t fixed_pkt_queue_length(fixed_pkt_queue_t *queue)
{
if (queue == NULL) {
return 0;
}
return pkt_queue_length(queue->pkt_list);
}
size_t fixed_pkt_queue_capacity(fixed_pkt_queue_t *queue)
{
assert(queue != NULL);
return queue->capacity;
}
bool fixed_pkt_queue_enqueue(fixed_pkt_queue_t *queue, pkt_linked_item_t *linked_pkt, uint32_t timeout)
{
bool ret = false;
assert(queue != NULL);
assert(linked_pkt != NULL);
if (osi_sem_take(&queue->enqueue_sem, timeout) != 0) {
return false;
}
ret = pkt_queue_enqueue(queue->pkt_list, linked_pkt);
assert(ret == true);
osi_sem_give(&queue->dequeue_sem);
return ret;
}
pkt_linked_item_t *fixed_pkt_queue_dequeue(fixed_pkt_queue_t *queue, uint32_t timeout)
{
pkt_linked_item_t *ret = NULL;
assert(queue != NULL);
if (osi_sem_take(&queue->dequeue_sem, timeout) != 0) {
return NULL;
}
ret = pkt_queue_dequeue(queue->pkt_list);
osi_sem_give(&queue->enqueue_sem);
return ret;
}
pkt_linked_item_t *fixed_pkt_queue_try_peek_first(fixed_pkt_queue_t *queue)
{
if (queue == NULL) {
return NULL;
}
return pkt_queue_try_peek_first(queue->pkt_list);
}
void fixed_pkt_queue_register_dequeue(fixed_pkt_queue_t *queue, fixed_pkt_queue_cb ready_cb)
{
assert(queue != NULL);
assert(ready_cb != NULL);
queue->dequeue_ready = ready_cb;
}
void fixed_pkt_queue_unregister_dequeue(fixed_pkt_queue_t *queue)
{
assert(queue != NULL);
queue->dequeue_ready = NULL;
}
void fixed_pkt_queue_process(fixed_pkt_queue_t *queue)
{
assert(queue != NULL);
if (queue->dequeue_ready) {
queue->dequeue_ready(queue);
}
}

View File

@ -0,0 +1,79 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _FIXED_PKT_QUEUE_H_
#define _FIXED_PKT_QUEUE_H_
#include "osi/pkt_queue.h"
#include "osi/semaphore.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef FIXED_PKT_QUEUE_SIZE_MAX
#define FIXED_PKT_QUEUE_SIZE_MAX 254
#endif
#define FIXED_PKT_QUEUE_MAX_TIMEOUT OSI_SEM_MAX_TIMEOUT
struct fixed_pkt_queue_t;
typedef struct fixed_pkt_queue_t fixed_pkt_queue_t;
typedef void (*fixed_pkt_queue_free_cb)(pkt_linked_item_t *data);
typedef void (*fixed_pkt_queue_cb)(fixed_pkt_queue_t *queue);
// Creates a new fixed queue with the given |capacity|. If more elements than
// |capacity| are added to the queue, the caller is blocked until space is
// made available in the queue. Returns NULL on failure. The caller must free
// the returned queue with |fixed_pkt_queue_free|.
fixed_pkt_queue_t *fixed_pkt_queue_new(size_t capacity);
// Freeing a queue that is currently in use (i.e. has waiters
// blocked on it) results in undefined behaviour.
void fixed_pkt_queue_free(fixed_pkt_queue_t *queue, fixed_pkt_queue_free_cb free_cb);
// Returns a value indicating whether the given |queue| is empty. If |queue|
// is NULL, the return value is true.
bool fixed_pkt_queue_is_empty(fixed_pkt_queue_t *queue);
// Returns the length of the |queue|. If |queue| is NULL, the return value
// is 0.
size_t fixed_pkt_queue_length(fixed_pkt_queue_t *queue);
// Returns the maximum number of elements this queue may hold. |queue| may
// not be NULL.
size_t fixed_pkt_queue_capacity(fixed_pkt_queue_t *queue);
// Enqueues the given |data| into the |queue|. The caller will be blocked or immediately return or wait for timeout according to the parameter timeout.
// If enqueue failed, it will return false, otherwise return true
bool fixed_pkt_queue_enqueue(fixed_pkt_queue_t *queue, pkt_linked_item_t *linked_pkt, uint32_t timeout);
// Dequeues the next element from |queue|. If the queue is currently empty,
// this function will block the caller until an item is enqueued or immediately return or wait for timeout according to the parameter timeout.
// If dequeue failed, it will return NULL, otherwise return a point.
pkt_linked_item_t *fixed_pkt_queue_dequeue(fixed_pkt_queue_t *queue, uint32_t timeout);
// Returns the first element from |queue|, if present, without dequeuing it.
// This function will never block the caller. Returns NULL if there are no
// elements in the queue or |queue| is NULL.
pkt_linked_item_t *fixed_pkt_queue_try_peek_first(fixed_pkt_queue_t *queue);
// Registers |queue| with |reactor| for dequeue operations. When there is an element
// in the queue, ready_cb will be called. The |context| parameter is passed, untouched,
// to the callback routine. Neither |queue|, nor |reactor|, nor |read_cb| may be NULL.
// |context| may be NULL.
void fixed_pkt_queue_register_dequeue(fixed_pkt_queue_t *queue, fixed_pkt_queue_cb ready_cb);
// Unregisters the dequeue ready callback for |queue| from whichever reactor
// it is registered with, if any. This function is idempotent.
void fixed_pkt_queue_unregister_dequeue(fixed_pkt_queue_t *queue);
void fixed_pkt_queue_process(fixed_pkt_queue_t *queue);
#endif

View File

@ -0,0 +1,88 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _PKT_LIST_H_
#define _PKT_LIST_H_
#include "sys/queue.h"
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
struct pkt_queue;
typedef struct pkt_linked_item {
STAILQ_ENTRY(pkt_linked_item) next;
uint8_t data[];
} pkt_linked_item_t;
#define BT_PKT_LINKED_HDR_SIZE (sizeof (pkt_linked_item_t))
typedef void (*pkt_queue_free_cb)(pkt_linked_item_t *item);
/*
* brief: create a pkt_queue instance. pkt_queue is a wrapper class of a FIFO implemented by single linked list.
* The enqueue and dequeue operations of the FIFO are protected against race conditions of multiple tasks
* return: NULL if not enough memory, otherwise a valid pointer
*/
struct pkt_queue *pkt_queue_create(void);
/*
* brief: enqueue one item to the FIFO
* param queue: pkt_queue instance created using pkt_queue_create
* param item: the item to be enqueued to the FIFO
* return: true if enqueued successfully, false when the arguments passed in are invalid
*/
bool pkt_queue_enqueue(struct pkt_queue *queue, pkt_linked_item_t *item);
/*
* brief: dequeue one item for the FIFO
* param queue: pkt_queue instance created using pkt_queue_create
* return: pointer of type pkt_linked_item_t dequeued, NULL if the queue is empty or upon exception
*/
pkt_linked_item_t *pkt_queue_dequeue(struct pkt_queue *queue);
/*
* brief: get the pointer of the first item from the FIFO but not get it dequeued
* param queue: pkt_queue instance created using pkt_queue_create
* return: pointer of the first item in the FIFO, NULL if the FIFO is empty
*/
pkt_linked_item_t *pkt_queue_try_peek_first(struct pkt_queue *queue);
/*
* brief: retrieve the number of items existing in the FIFO
* param queue: pkt_queue instance created using pkt_queue_create
* return: total number of items in the FIFO
*/
size_t pkt_queue_length(const struct pkt_queue *queue);
/*
* brief: retrieve the status whether the FIFO is empty
* param queue: pkt_queue instance created using pkt_queue_create
* return: false if the FIFO is not empty, otherwise true
*/
bool pkt_queue_is_empty(const struct pkt_queue *queue);
/*
* brief: delete the item in the FIFO one by one
* param free_cb: destructor function for each item in the FIFO, if set to NULL, will use osi_free_func by default
*/
void pkt_queue_flush(struct pkt_queue *queue, pkt_queue_free_cb free_cb);
/*
* brief: delete the items in the FIFO and then destroy the pkt_queue instance.
* param free_cb: destructor function for each item in the FIFO, if set to NULL, will use osi_free_func by default
*/
void pkt_queue_destroy(struct pkt_queue *queue, pkt_queue_free_cb free_cb);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -20,6 +20,7 @@
#define OSI_THREAD_MAX_TIMEOUT OSI_SEM_MAX_TIMEOUT
struct osi_thread;
struct osi_event;
typedef struct osi_thread osi_thread_t;
@ -40,7 +41,7 @@ typedef enum {
* param work_queue_num: speicify queue number, the queue[0] has highest priority, and the priority is decrease by index
* return : if create successfully, return thread handler; otherwise return NULL.
*/
osi_thread_t *osi_thread_create(const char *name, size_t stack_size, int priority, osi_thread_core_t core, uint8_t work_queue_num);
osi_thread_t *osi_thread_create(const char *name, size_t stack_size, int priority, osi_thread_core_t core, uint8_t work_queue_num, const size_t work_queue_len[]);
/*
* brief: Destroy a thread or task
@ -80,4 +81,42 @@ const char *osi_thread_name(osi_thread_t *thread);
*/
int osi_thread_queue_wait_size(osi_thread_t *thread, int wq_idx);
/*
* brief: Create an osi_event struct and register the handler function and its argument
* An osi_event is a kind of work that can be posted to the workqueue of osi_thread to process,
* but the work can have at most one instance the thread workqueue before it is processed. This
* allows the "single post, multiple data processing" jobs.
* param func: the handler to process the job
* param context: the argument to be passed to the handler function when the job is being processed
* return: NULL if no memory, otherwise a valid struct pointer
*/
struct osi_event *osi_event_create(osi_thread_func_t func, void *context);
/*
* brief: Bind an osi_event to a specific work queue for an osi_thread.
* After binding is completed, a function call of API osi_thread_post_event will send a work
* to the workqueue of the thread, with specified queue index.
* param func: event: the pointer to osi_event that is created using osi_event_create
* param thread: the pointer to osi_thread that is created using osi_thread_create
* param queue_idx: the index of the workqueue of the specified osi_thread, with range starting from 0 to work_queue_num - 1
* return: true if osi_event binds to the thread's workqueue successfully, otherwise false
*/
bool osi_event_bind(struct osi_event* event, osi_thread_t *thread, int queue_idx);
/*
* brief: Destroy the osi_event struct created by osi_event_create and free the allocated memory
* param event: the pointer to osi_event
*/
void osi_event_delete(struct osi_event* event);
/*
* brief: try sending a work to the binded thread's workqueue, so that it can be handled by the worker thread
* param event: pointer to osi_event, created by osi_event_create
* param timeout: post timeout, OSI_THREAD_MAX_TIMEOUT means blocking forever, 0 means never blocking, others means block millisecond
* return: true if the message is enqueued to the thread workqueue, otherwise failed
* note: if the return value of function is false, it is the case that the workqueue of the thread is full, and users
* are expected to post the event sometime later to get the work handled.
*/
bool osi_thread_post_event(struct osi_event *event, uint32_t timeout);
#endif /* __THREAD_H__ */

View File

@ -0,0 +1,144 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "osi/pkt_queue.h"
#include "osi/allocator.h"
#include "osi/mutex.h"
STAILQ_HEAD(pkt_queue_header, pkt_linked_item);
struct pkt_queue {
osi_mutex_t lock;
size_t length;
struct pkt_queue_header header;
} pkt_queue_t;
struct pkt_queue *pkt_queue_create(void)
{
struct pkt_queue *queue = calloc(1, sizeof(struct pkt_queue));
if (queue == NULL) {
return NULL;
}
if (osi_mutex_new(&queue->lock) != 0) {
osi_free(queue);
}
struct pkt_queue_header *p = &queue->header;
STAILQ_INIT(p);
return queue;
}
static void pkt_queue_cleanup(struct pkt_queue *queue, pkt_queue_free_cb free_cb)
{
if (queue == NULL) {
return;
}
struct pkt_queue_header *header = &queue->header;
pkt_linked_item_t *item = STAILQ_FIRST(header);
pkt_linked_item_t *tmp;
pkt_queue_free_cb free_func = (free_cb != NULL) ? free_cb : (pkt_queue_free_cb)osi_free_func;
while (item != NULL) {
tmp = STAILQ_NEXT(item, next);
free_func(item);
item = tmp;
queue->length--;
}
STAILQ_INIT(header);
queue->length = 0;
}
void pkt_queue_flush(struct pkt_queue *queue, pkt_queue_free_cb free_cb)
{
if (queue == NULL) {
return;
}
osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
pkt_queue_cleanup(queue, free_cb);
osi_mutex_unlock(&queue->lock);
}
void pkt_queue_destroy(struct pkt_queue *queue, pkt_queue_free_cb free_cb)
{
if (queue == NULL) {
return;
}
osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
pkt_queue_cleanup(queue, free_cb);
osi_mutex_unlock(&queue->lock);
osi_mutex_free(&queue->lock);
osi_free(queue);
}
pkt_linked_item_t *pkt_queue_dequeue(struct pkt_queue *queue)
{
if (queue == NULL || queue->length == 0) {
return NULL;
}
struct pkt_linked_item *item;
struct pkt_queue_header *header;
osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
header = &queue->header;
item = STAILQ_FIRST(header);
if (item != NULL) {
STAILQ_REMOVE_HEAD(header, next);
if (queue->length > 0) {
queue->length--;
}
}
osi_mutex_unlock(&queue->lock);
return item;
}
bool pkt_queue_enqueue(struct pkt_queue *queue, pkt_linked_item_t *item)
{
if (queue == NULL || item == NULL) {
return false;
}
struct pkt_queue_header *header;
osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
header = &queue->header;
STAILQ_INSERT_TAIL(header, item, next);
queue->length++;
osi_mutex_unlock(&queue->lock);
return true;
}
size_t pkt_queue_length(const struct pkt_queue *queue)
{
if (queue == NULL) {
return 0;
}
return queue->length;
}
bool pkt_queue_is_empty(const struct pkt_queue *queue)
{
return pkt_queue_length(queue) == 0;
}
pkt_linked_item_t *pkt_queue_try_peek_first(struct pkt_queue *queue)
{
if (queue == NULL) {
return NULL;
}
struct pkt_queue_header *header = &queue->header;
pkt_linked_item_t *item;
osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
item = STAILQ_FIRST(header);
osi_mutex_unlock(&queue->lock);
return item;
}

View File

@ -19,16 +19,28 @@
#include <string.h>
#include "osi/allocator.h"
#include "osi/fixed_queue.h"
#include "freertos/FreeRTOS.h"
#include "freertos/queue.h"
#include "osi/semaphore.h"
#include "osi/thread.h"
#include "osi/mutex.h"
struct work_item {
osi_thread_func_t func;
void *context;
};
struct work_queue {
QueueHandle_t queue;
size_t capacity;
};
struct osi_thread {
TaskHandle_t thread_handle; /*!< Store the thread object */
int thread_id; /*!< May for some OS, such as Linux */
bool stop;
uint8_t work_queue_num; /*!< Work queue number */
fixed_queue_t **work_queues; /*!< Point to queue array, and the priority inverse array index */
struct work_queue **work_queues; /*!< Point to queue array, and the priority inverse array index */
osi_sem_t work_sem;
osi_sem_t stop_sem;
};
@ -39,13 +51,98 @@ struct osi_thread_start_arg {
int error;
};
typedef struct {
osi_thread_func_t func;
void *context;
} work_item_t;
struct osi_event {
struct work_item item;
osi_mutex_t lock;
uint16_t is_queued;
uint16_t queue_idx;
osi_thread_t *thread;
};
static const size_t DEFAULT_WORK_QUEUE_CAPACITY = 100;
static struct work_queue *osi_work_queue_create(size_t capacity)
{
if (capacity == 0) {
return NULL;
}
struct work_queue *wq = (struct work_queue *)osi_malloc(sizeof(struct work_queue));
if (wq != NULL) {
wq->queue = xQueueCreate(capacity, sizeof(struct work_item));
if (wq->queue != 0) {
wq->capacity = capacity;
return wq;
} else {
osi_free(wq);
}
}
return NULL;
}
static void osi_work_queue_delete(struct work_queue *wq)
{
if (wq != NULL) {
if (wq->queue != 0) {
vQueueDelete(wq->queue);
}
wq->queue = 0;
wq->capacity = 0;
osi_free(wq);
}
return;
}
static bool osi_thead_work_queue_get(struct work_queue *wq, struct work_item *item)
{
assert (wq != NULL);
assert (wq->queue != 0);
assert (item != NULL);
if (pdTRUE == xQueueReceive(wq->queue, item, 0)) {
return true;
} else {
return false;
}
}
static bool osi_thead_work_queue_put(struct work_queue *wq, const struct work_item *item, uint32_t timeout)
{
assert (wq != NULL);
assert (wq->queue != 0);
assert (item != NULL);
bool ret = true;
if (timeout == OSI_SEM_MAX_TIMEOUT) {
if (xQueueSend(wq->queue, item, portMAX_DELAY) != pdTRUE) {
ret = false;
}
} else {
if (xQueueSend(wq->queue, item, timeout / portTICK_PERIOD_MS) != pdTRUE) {
ret = false;
}
}
return ret;
}
static size_t osi_thead_work_queue_len(struct work_queue *wq)
{
assert (wq != NULL);
assert (wq->queue != 0);
assert (wq->capacity != 0);
size_t available_spaces = (size_t)uxQueueSpacesAvailable(wq->queue);
if (available_spaces <= wq->capacity) {
return wq->capacity - available_spaces;
} else {
assert (0);
}
return 0;
}
static void osi_thread_run(void *arg)
{
struct osi_thread_start_arg *start = (struct osi_thread_start_arg *)arg;
@ -62,11 +159,10 @@ static void osi_thread_run(void *arg)
break;
}
struct work_item item;
while (!thread->stop && idx < thread->work_queue_num) {
work_item_t *item = fixed_queue_dequeue(thread->work_queues[idx], 0);
if (item) {
item->func(item->context);
osi_free(item);
if (osi_thead_work_queue_get(thread->work_queues[idx], &item) == true) {
item.func(item.context);
idx = 0;
continue;
} else {
@ -107,14 +203,14 @@ static void osi_thread_stop(osi_thread_t *thread)
}
//in linux, the stack_size, priority and core may not be set here, the code will be ignore the arguments
osi_thread_t *osi_thread_create(const char *name, size_t stack_size, int priority, osi_thread_core_t core, uint8_t work_queue_num)
osi_thread_t *osi_thread_create(const char *name, size_t stack_size, int priority, osi_thread_core_t core, uint8_t work_queue_num, const size_t work_queue_len[])
{
int ret;
struct osi_thread_start_arg start_arg = {0};
if (stack_size <= 0 ||
core < OSI_THREAD_CORE_0 || core > OSI_THREAD_CORE_AFFINITY ||
work_queue_num <= 0) {
work_queue_num <= 0 || work_queue_len == NULL) {
return NULL;
}
@ -125,13 +221,14 @@ osi_thread_t *osi_thread_create(const char *name, size_t stack_size, int priorit
thread->stop = false;
thread->work_queue_num = work_queue_num;
thread->work_queues = (fixed_queue_t **)osi_malloc(sizeof(fixed_queue_t *) * work_queue_num);
thread->work_queues = (struct work_queue **)osi_malloc(sizeof(struct work_queue *) * work_queue_num);
if (thread->work_queues == NULL) {
goto _err;
}
for (int i = 0; i < thread->work_queue_num; i++) {
thread->work_queues[i] = fixed_queue_new(DEFAULT_WORK_QUEUE_CAPACITY);
size_t queue_len = work_queue_len[i] ? work_queue_len[i] : DEFAULT_WORK_QUEUE_CAPACITY;
thread->work_queues[i] = osi_work_queue_create(queue_len);
if (thread->work_queues[i] == NULL) {
goto _err;
}
@ -175,12 +272,14 @@ _err:
for (int i = 0; i < thread->work_queue_num; i++) {
if (thread->work_queues[i]) {
fixed_queue_free(thread->work_queues[i], osi_free_func);
osi_work_queue_delete(thread->work_queues[i]);
}
thread->work_queues[i] = NULL;
}
if (thread->work_queues) {
osi_free(thread->work_queues);
thread->work_queues = NULL;
}
if (thread->work_sem) {
@ -206,12 +305,14 @@ void osi_thread_free(osi_thread_t *thread)
for (int i = 0; i < thread->work_queue_num; i++) {
if (thread->work_queues[i]) {
fixed_queue_free(thread->work_queues[i], osi_free_func);
osi_work_queue_delete(thread->work_queues[i]);
thread->work_queues[i] = NULL;
}
}
if (thread->work_queues) {
osi_free(thread->work_queues);
thread->work_queues = NULL;
}
if (thread->work_sem) {
@ -235,15 +336,12 @@ bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context
return false;
}
work_item_t *item = (work_item_t *)osi_malloc(sizeof(work_item_t));
if (item == NULL) {
return false;
}
item->func = func;
item->context = context;
struct work_item item;
if (fixed_queue_enqueue(thread->work_queues[queue_idx], item, timeout) == false) {
osi_free(item);
item.func = func;
item.context = context;
if (osi_thead_work_queue_put(thread->work_queues[queue_idx], &item, timeout) == false) {
return false;
}
@ -273,5 +371,83 @@ int osi_thread_queue_wait_size(osi_thread_t *thread, int wq_idx)
return -1;
}
return fixed_queue_length(thread->work_queues[wq_idx]);
return (int)(osi_thead_work_queue_len(thread->work_queues[wq_idx]));
}
struct osi_event *osi_event_create(osi_thread_func_t func, void *context)
{
struct osi_event *event = osi_calloc(sizeof(struct osi_event));
if (event != NULL) {
if (osi_mutex_new(&event->lock) == 0) {
event->item.func = func;
event->item.context = context;
return event;
}
osi_free(event);
}
return NULL;
}
void osi_event_delete(struct osi_event* event)
{
if (event != NULL) {
osi_mutex_free(&event->lock);
memset(event, 0, sizeof(struct osi_event));
osi_free(event);
}
}
bool osi_event_bind(struct osi_event* event, osi_thread_t *thread, int queue_idx)
{
if (event == NULL || event->thread != NULL) {
return false;
}
if (thread == NULL || queue_idx >= thread->work_queue_num) {
return false;
}
event->thread = thread;
event->queue_idx = queue_idx;
return true;
}
static void osi_thread_generic_event_handler(void *context)
{
struct osi_event *event = (struct osi_event *)context;
if (event != NULL && event->item.func != NULL) {
osi_mutex_lock(&event->lock, OSI_MUTEX_MAX_TIMEOUT);
event->is_queued = 0;
osi_mutex_unlock(&event->lock);
event->item.func(event->item.context);
}
}
bool osi_thread_post_event(struct osi_event *event, uint32_t timeout)
{
assert(event != NULL && event->thread != NULL);
assert(event->queue_idx >= 0 && event->queue_idx < event->thread->work_queue_num);
bool ret = false;
if (event->is_queued == 0) {
uint16_t acquire_cnt = 0;
osi_mutex_lock(&event->lock, OSI_MUTEX_MAX_TIMEOUT);
event->is_queued += 1;
acquire_cnt = event->is_queued;
osi_mutex_unlock(&event->lock);
if (acquire_cnt == 1) {
ret = osi_thread_post(event->thread, osi_thread_generic_event_handler, event, event->queue_idx, timeout);
if (!ret) {
// clear "is_queued" when post failure, to allow for following event posts
osi_mutex_lock(&event->lock, OSI_MUTEX_MAX_TIMEOUT);
event->is_queued = 0;
osi_mutex_unlock(&event->lock);
}
}
}
return ret;
}

View File

@ -74,9 +74,6 @@ static void btc_deinit_bluetooth(void)
#if BTA_DYNAMIC_MEMORY
xSemaphoreTake(deinit_semaphore, BTA_DISABLE_DELAY / portTICK_PERIOD_MS);
#endif /* #if BTA_DYNAMIC_MEMORY */
#if (BLE_INCLUDED == TRUE)
btc_gap_ble_deinit();
#endif ///BLE_INCLUDED == TRUE
bta_dm_sm_deinit();
#if (GATTC_INCLUDED)
bta_gattc_deinit();

View File

@ -16,7 +16,6 @@
#include "common/bt_defs.h"
#include "osi/allocator.h"
#include "osi/mutex.h"
#include "osi/semaphore.h"
#include "osi/thread.h"
#include "osi/fixed_queue.h"
#include "stack/a2d_api.h"
@ -38,8 +37,6 @@
#if (BTC_AV_SINK_INCLUDED == TRUE)
extern osi_thread_t *btc_thread;
/*****************************************************************************
** Constants
*****************************************************************************/
@ -82,6 +79,8 @@ enum {
#define MAX_OUTPUT_A2DP_SNK_FRAME_QUEUE_SZ (25)
#define JITTER_BUFFER_WATER_LEVEL (5)
#define BTC_A2DP_SNK_DATA_QUEUE_IDX (1)
typedef struct {
uint32_t sig;
void *param;
@ -97,7 +96,7 @@ typedef struct {
typedef struct {
BOOLEAN rx_flush; /* discards any incoming data when true */
UINT8 channel_count;
osi_sem_t post_sem;
struct osi_event *data_ready_event;
fixed_queue_t *RxSbcQ;
UINT32 sample_rate;
} tBTC_A2DP_SINK_CB;
@ -214,7 +213,7 @@ bool btc_a2dp_sink_startup(void)
APPL_TRACE_EVENT("## A2DP SINK START MEDIA THREAD ##");
a2dp_sink_local_param.btc_aa_snk_task_hdl = btc_thread;
a2dp_sink_local_param.btc_aa_snk_task_hdl = btc_get_current_thread();
if (btc_a2dp_sink_ctrl(BTC_MEDIA_TASK_SINK_INIT, NULL) == false) {
goto error_exit;
@ -294,11 +293,6 @@ void btc_a2dp_sink_on_suspended(tBTA_AV_SUSPEND *p_av)
return;
}
static void btc_a2dp_sink_data_post(void)
{
osi_thread_post(a2dp_sink_local_param.btc_aa_snk_task_hdl, btc_a2dp_sink_data_ready, NULL, 1, OSI_THREAD_MAX_TIMEOUT);
}
/*******************************************************************************
**
** Function btc_a2dp_sink_clear_track
@ -356,7 +350,6 @@ static void btc_a2dp_sink_data_ready(UNUSED_ATTR void *context)
tBT_SBC_HDR *p_msg;
int nb_of_msgs_to_process = 0;
osi_sem_give(&a2dp_sink_local_param.btc_aa_snk_cb.post_sem);
if (fixed_queue_is_empty(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ)) {
APPL_TRACE_DEBUG(" QUE EMPTY ");
} else {
@ -380,6 +373,10 @@ static void btc_a2dp_sink_data_ready(UNUSED_ATTR void *context)
nb_of_msgs_to_process--;
}
APPL_TRACE_DEBUG(" Process Frames - ");
if (!fixed_queue_is_empty(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ)) {
osi_thread_post_event(a2dp_sink_local_param.btc_aa_snk_cb.data_ready_event, OSI_THREAD_MAX_TIMEOUT);
}
}
}
@ -691,9 +688,7 @@ UINT8 btc_a2dp_sink_enque_buf(BT_HDR *p_pkt)
APPL_TRACE_VERBOSE("btc_a2dp_sink_enque_buf %d + \n", p_msg->num_frames_to_be_processed);
fixed_queue_enqueue(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ, p_msg, FIXED_QUEUE_MAX_TIMEOUT);
if (fixed_queue_length(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ) >= JITTER_BUFFER_WATER_LEVEL) {
if (osi_sem_take(&a2dp_sink_local_param.btc_aa_snk_cb.post_sem, 0) == 0) {
btc_a2dp_sink_data_post();
}
osi_thread_post_event(a2dp_sink_local_param.btc_aa_snk_cb.data_ready_event, OSI_THREAD_MAX_TIMEOUT);
}
} else {
/* let caller deal with a failed allocation */
@ -729,9 +724,12 @@ static void btc_a2dp_sink_thread_init(UNUSED_ATTR void *context)
memset(&a2dp_sink_local_param.btc_aa_snk_cb, 0, sizeof(a2dp_sink_local_param.btc_aa_snk_cb));
btc_a2dp_sink_state = BTC_A2DP_SINK_STATE_ON;
if (!a2dp_sink_local_param.btc_aa_snk_cb.post_sem) {
osi_sem_new(&a2dp_sink_local_param.btc_aa_snk_cb.post_sem, 1, 1);
}
struct osi_event *data_event = osi_event_create(btc_a2dp_sink_data_ready, NULL);
assert (data_event != NULL);
osi_event_bind(data_event, a2dp_sink_local_param.btc_aa_snk_task_hdl, BTC_A2DP_SNK_DATA_QUEUE_IDX);
a2dp_sink_local_param.btc_aa_snk_cb.data_ready_event = data_event;
a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ = fixed_queue_new(QUEUE_SIZE_MAX);
btc_a2dp_control_init();
@ -749,10 +747,8 @@ static void btc_a2dp_sink_thread_cleanup(UNUSED_ATTR void *context)
a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ = NULL;
if (a2dp_sink_local_param.btc_aa_snk_cb.post_sem) {
osi_sem_free(&a2dp_sink_local_param.btc_aa_snk_cb.post_sem);
a2dp_sink_local_param.btc_aa_snk_cb.post_sem = NULL;
}
osi_event_delete(a2dp_sink_local_param.btc_aa_snk_cb.data_ready_event);
a2dp_sink_local_param.btc_aa_snk_cb.data_ready_event = NULL;
}
#endif /* BTC_AV_SINK_INCLUDED */

View File

@ -42,8 +42,6 @@
#if BTC_AV_SRC_INCLUDED
extern osi_thread_t *btc_thread;
/*****************************************************************************
** Constants
*****************************************************************************/
@ -118,6 +116,8 @@ enum {
#define MAX_OUTPUT_A2DP_FRAME_QUEUE_SZ (5)
#define MAX_OUTPUT_A2DP_SRC_FRAME_QUEUE_SZ (27) // 18 for 20ms tick
#define BTC_A2DP_SRC_DATA_QUEUE_IDX (1)
typedef struct {
uint32_t sig;
void *param;
@ -154,6 +154,7 @@ typedef struct {
tBTC_AV_MEDIA_FEEDINGS media_feeding;
SBC_ENC_PARAMS encoder;
osi_alarm_t *media_alarm;
struct osi_event *poll_data;
} tBTC_A2DP_SOURCE_CB;
typedef struct {
@ -283,7 +284,7 @@ bool btc_a2dp_source_startup(void)
APPL_TRACE_EVENT("## A2DP SOURCE START MEDIA THREAD ##");
a2dp_source_local_param.btc_aa_src_task_hdl = btc_thread;
a2dp_source_local_param.btc_aa_src_task_hdl = btc_get_current_thread();
if (btc_a2dp_source_ctrl(BTC_MEDIA_TASK_INIT, NULL) == false) {
goto error_exit;
@ -1532,7 +1533,7 @@ static void btc_a2dp_source_aa_stop_tx(void)
static void btc_a2dp_source_alarm_cb(UNUSED_ATTR void *context)
{
if (a2dp_source_local_param.btc_aa_src_task_hdl) {
osi_thread_post(a2dp_source_local_param.btc_aa_src_task_hdl, btc_a2dp_source_handle_timer, NULL, 1, OSI_THREAD_MAX_TIMEOUT);
osi_thread_post_event(a2dp_source_local_param.btc_aa_src_cb.poll_data, OSI_THREAD_MAX_TIMEOUT);
} else {
APPL_TRACE_DEBUG("[%s] A2DP ALREADY FREED", __func__);
btc_a2dp_source_aa_stop_tx();
@ -1587,6 +1588,11 @@ static void btc_a2dp_source_thread_init(UNUSED_ATTR void *context)
btc_a2dp_source_state = BTC_A2DP_SOURCE_STATE_ON;
struct osi_event *poll_data = osi_event_create(btc_a2dp_source_handle_timer, NULL);
assert(poll_data != NULL);
osi_event_bind(poll_data, a2dp_source_local_param.btc_aa_src_task_hdl, BTC_A2DP_SRC_DATA_QUEUE_IDX);
a2dp_source_local_param.btc_aa_src_cb.poll_data = poll_data;
a2dp_source_local_param.btc_aa_src_cb.TxAaQ = fixed_queue_new(QUEUE_SIZE_MAX);
btc_a2dp_control_init();
@ -1602,6 +1608,9 @@ static void btc_a2dp_source_thread_cleanup(UNUSED_ATTR void *context)
fixed_queue_free(a2dp_source_local_param.btc_aa_src_cb.TxAaQ, osi_free_func);
a2dp_source_local_param.btc_aa_src_cb.TxAaQ = NULL;
osi_event_delete(a2dp_source_local_param.btc_aa_src_cb.poll_data);
a2dp_source_local_param.btc_aa_src_cb.poll_data = NULL;
}
#endif /* BTC_AV_INCLUDED */

View File

@ -21,6 +21,8 @@
#include "btc/btc_dm.h"
#include "btc/btc_util.h"
#include "osi/mutex.h"
#include "osi/thread.h"
#include "osi/pkt_queue.h"
#include "esp_bt.h"
#if (BLE_INCLUDED == TRUE)
@ -48,6 +50,19 @@ static uint16_t btc_adv_list_count = 0;
#define BTC_ADV_LIST_MAX_COUNT 200
#endif
#define BTC_GAP_BLE_ADV_RPT_QUEUE_IDX (1)
#define BTC_GAP_BLE_ADV_RPT_BATCH_SIZE (10)
#define BTC_GAP_BLE_ADV_RPT_QUEUE_LEN_MAX (200)
#if (BLE_42_FEATURE_SUPPORT == TRUE)
typedef struct {
struct pkt_queue *adv_rpt_queue;
struct osi_event *adv_rpt_ready;
} btc_gap_ble_env_t;
static btc_gap_ble_env_t btc_gap_ble_env;
#endif
static inline void btc_gap_ble_cb_to_app(esp_gap_ble_cb_event_t event, esp_ble_gap_cb_param_t *param)
{
esp_gap_ble_cb_t btc_gap_ble_cb = (esp_gap_ble_cb_t)btc_profile_cb_get(BTC_PID_GAP_BLE);
@ -548,8 +563,82 @@ static void btc_ble_set_scan_params(esp_ble_scan_params_t *scan_params, tBLE_SCA
}
}
static void btc_gap_ble_adv_pkt_handler(void *arg)
{
btc_gap_ble_env_t *p_env = &btc_gap_ble_env;
size_t pkts_to_process = pkt_queue_length(p_env->adv_rpt_queue);
if (pkts_to_process > BTC_GAP_BLE_ADV_RPT_BATCH_SIZE) {
pkts_to_process = BTC_GAP_BLE_ADV_RPT_BATCH_SIZE;
}
for (size_t i = 0; i < pkts_to_process; i++) {
pkt_linked_item_t *linked_pkt = pkt_queue_dequeue(p_env->adv_rpt_queue);
if (linked_pkt != NULL) {
esp_ble_gap_cb_param_t *param = (esp_ble_gap_cb_param_t *)(linked_pkt->data);
btc_gap_ble_cb_to_app(ESP_GAP_BLE_SCAN_RESULT_EVT, param);
osi_free(linked_pkt);
}
}
if (pkt_queue_length(p_env->adv_rpt_queue) != 0) {
osi_thread_post_event(p_env->adv_rpt_ready, OSI_THREAD_MAX_TIMEOUT);
}
}
static void btc_process_adv_rpt_pkt(tBTA_DM_SEARCH_EVT event, tBTA_DM_SEARCH *p_data)
{
#if SCAN_QUEUE_CONGEST_CHECK
if(btc_check_queue_is_congest()) {
BTC_TRACE_DEBUG("BtcQueue is congested");
if(btc_get_adv_list_length() > BTC_ADV_LIST_MAX_LENGTH || btc_adv_list_count > BTC_ADV_LIST_MAX_COUNT) {
btc_adv_list_refresh();
btc_adv_list_count = 0;
}
if(btc_check_adv_list(p_data->inq_res.bd_addr, p_data->inq_res.ble_addr_type)) {
return;
}
}
btc_adv_list_count ++;
#endif
// drop ADV packets if data queue length goes above threshold
btc_gap_ble_env_t *p_env = &btc_gap_ble_env;
if (pkt_queue_length(p_env->adv_rpt_queue) >= BTC_GAP_BLE_ADV_RPT_QUEUE_LEN_MAX) {
return;
}
pkt_linked_item_t *linked_pkt = osi_calloc(BT_PKT_LINKED_HDR_SIZE + sizeof(esp_ble_gap_cb_param_t));
if (linked_pkt == NULL) {
return;
}
struct ble_scan_result_evt_param *scan_rst = (struct ble_scan_result_evt_param *)linked_pkt->data;
do {
scan_rst->search_evt = event;
bdcpy(scan_rst->bda, p_data->inq_res.bd_addr);
scan_rst->dev_type = p_data->inq_res.device_type;
scan_rst->rssi = p_data->inq_res.rssi;
scan_rst->ble_addr_type = p_data->inq_res.ble_addr_type;
scan_rst->ble_evt_type = p_data->inq_res.ble_evt_type;
scan_rst->flag = p_data->inq_res.flag;
scan_rst->num_resps = 1;
scan_rst->adv_data_len = p_data->inq_res.adv_data_len;
scan_rst->scan_rsp_len = p_data->inq_res.scan_rsp_len;
memcpy(scan_rst->ble_adv, p_data->inq_res.p_eir, sizeof(scan_rst->ble_adv));
} while (0);
pkt_queue_enqueue(p_env->adv_rpt_queue, linked_pkt);
osi_thread_post_event(p_env->adv_rpt_ready, OSI_THREAD_MAX_TIMEOUT);
}
static void btc_search_callback(tBTA_DM_SEARCH_EVT event, tBTA_DM_SEARCH *p_data)
{
if (event == BTA_DM_INQ_RES_EVT) {
btc_process_adv_rpt_pkt(event, p_data);
return;
}
esp_ble_gap_cb_param_t param;
btc_msg_t msg = {0};
@ -559,32 +648,8 @@ static void btc_search_callback(tBTA_DM_SEARCH_EVT event, tBTA_DM_SEARCH *p_data
param.scan_rst.search_evt = event;
switch (event) {
case BTA_DM_INQ_RES_EVT: {
#if SCAN_QUEUE_CONGEST_CHECK
if(btc_check_queue_is_congest()) {
BTC_TRACE_DEBUG("BtcQueue is congested");
if(btc_get_adv_list_length() > BTC_ADV_LIST_MAX_LENGTH || btc_adv_list_count > BTC_ADV_LIST_MAX_COUNT) {
btc_adv_list_refresh();
btc_adv_list_count = 0;
}
if(btc_check_adv_list(p_data->inq_res.bd_addr, p_data->inq_res.ble_addr_type)) {
return;
}
}
btc_adv_list_count ++;
#endif
bdcpy(param.scan_rst.bda, p_data->inq_res.bd_addr);
param.scan_rst.dev_type = p_data->inq_res.device_type;
param.scan_rst.rssi = p_data->inq_res.rssi;
param.scan_rst.ble_addr_type = p_data->inq_res.ble_addr_type;
param.scan_rst.ble_evt_type = p_data->inq_res.ble_evt_type;
param.scan_rst.flag = p_data->inq_res.flag;
param.scan_rst.num_resps = 1;
param.scan_rst.adv_data_len = p_data->inq_res.adv_data_len;
param.scan_rst.scan_rsp_len = p_data->inq_res.scan_rsp_len;
memcpy(param.scan_rst.ble_adv, p_data->inq_res.p_eir, sizeof(param.scan_rst.ble_adv));
case BTA_DM_INQ_RES_EVT:
break;
}
case BTA_DM_INQ_CMPL_EVT: {
param.scan_rst.num_resps = p_data->inq_cmpl.num_resps;
BTC_TRACE_DEBUG("%s BLE observe complete. Num Resp %d\n", __FUNCTION__, p_data->inq_cmpl.num_resps);
@ -1841,9 +1906,31 @@ void btc_gap_callback_init(void)
#endif // #if (BLE_50_FEATURE_SUPPORT == TRUE)
}
bool btc_gap_ble_init(void)
{
#if (BLE_42_FEATURE_SUPPORT == TRUE)
btc_gap_ble_env_t *p_env = &btc_gap_ble_env;
p_env->adv_rpt_queue = pkt_queue_create();
assert(p_env->adv_rpt_queue != NULL);
p_env->adv_rpt_ready = osi_event_create(btc_gap_ble_adv_pkt_handler, NULL);
assert(p_env->adv_rpt_ready != NULL);
osi_event_bind(p_env->adv_rpt_ready, btc_get_current_thread(), BTC_GAP_BLE_ADV_RPT_QUEUE_IDX);
#endif
return true;
}
void btc_gap_ble_deinit(void)
{
#if (BLE_42_FEATURE_SUPPORT == TRUE)
#if (BLE_42_FEATURE_SUPPORT == TRUE)
btc_gap_ble_env_t *p_env = &btc_gap_ble_env;
osi_event_delete(p_env->adv_rpt_ready);
p_env->adv_rpt_ready = NULL;
pkt_queue_destroy(p_env->adv_rpt_queue, NULL);
p_env->adv_rpt_queue = NULL;
btc_cleanup_adv_data(&gl_bta_adv_data);
btc_cleanup_adv_data(&gl_bta_scan_rsp_data);
#endif // #if (BLE_42_FEATURE_SUPPORT == TRUE)

View File

@ -345,6 +345,7 @@ void btc_gap_ble_arg_deep_free(btc_msg_t *msg);
void btc_gap_ble_cb_deep_free(btc_msg_t *msg);
void btc_gap_ble_cb_deep_copy(btc_msg_t *msg, void *p_dest, void *p_src);
void btc_gap_callback_init(void);
bool btc_gap_ble_init(void);
void btc_gap_ble_deinit(void);
void btc_adv_list_init(void);
void btc_adv_list_deinit(void);

View File

@ -19,24 +19,38 @@
#include "common/bt_defs.h"
#include "common/bt_trace.h"
#include "stack/bt_types.h"
#include "osi/fixed_queue.h"
#include "hci/hci_hal.h"
#include "hci/hci_internals.h"
#include "hci/hci_layer.h"
#include "osi/thread.h"
#include "osi/pkt_queue.h"
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
#include "osi/mutex.h"
#include "osi/alarm.h"
#endif
#include "esp_bt.h"
#include "stack/hcimsgs.h"
#if SOC_ESP_NIMBLE_CONTROLLER
#include "nimble/ble_hci_trans.h"
#endif
#if (C2H_FLOW_CONTROL_INCLUDED == TRUE)
#include "l2c_int.h"
#endif ///C2H_FLOW_CONTROL_INCLUDED == TRUE
#include "stack/hcimsgs.h"
#if SOC_ESP_NIMBLE_CONTROLLER
#include "nimble/ble_hci_trans.h"
#endif
#define HCI_HAL_SERIAL_BUFFER_SIZE 1026
#define HCI_BLE_EVENT 0x3e
#define PACKET_TYPE_TO_INBOUND_INDEX(type) ((type) - 2)
#define PACKET_TYPE_TO_INDEX(type) ((type) - 1)
#define HCI_UPSTREAM_DATA_QUEUE_IDX (1)
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
#define HCI_BLE_ADV_MIN_CREDITS_TO_RELEASE (10)
#define HCI_ADV_FLOW_MONITOR_PERIOD_MS (500)
#else
#define HCI_HAL_BLE_ADV_RPT_QUEUE_LEN_MAX (200)
#endif
extern bool BTU_check_queue_is_congest(void);
@ -55,64 +69,102 @@ static const uint16_t outbound_event_types[] = {
};
typedef struct {
size_t buffer_size;
fixed_queue_t *rx_q;
uint16_t adv_free_num;
struct pkt_queue *adv_rpt_q;
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
osi_mutex_t adv_flow_lock;
osi_alarm_t *adv_flow_monitor;
int adv_credits;
int adv_credits_to_release;
pkt_linked_item_t *adv_fc_cmd_buf;
bool cmd_buf_in_use;
#endif
hci_hal_callbacks_t *callbacks;
osi_thread_t *hci_h4_thread;
struct osi_event *upstream_data_ready;
} hci_hal_env_t;
static hci_hal_env_t hci_hal_env;
static const hci_hal_t interface;
static const hci_hal_callbacks_t *callbacks;
static const esp_vhci_host_callback_t vhci_host_cb;
static osi_thread_t *hci_h4_thread;
static void host_send_pkt_available_cb(void);
static int host_recv_pkt_cb(uint8_t *data, uint16_t len);
static void hci_hal_h4_hdl_rx_packet(BT_HDR *packet);
static void hci_hal_h4_hdl_rx_adv_rpt(pkt_linked_item_t *linked_pkt);
static void hci_upstream_data_handler(void *arg);
static bool hci_upstream_data_post(uint32_t timeout);
static void hci_hal_h4_rx_handler(void *arg);
static void event_uart_has_bytes(fixed_queue_t *queue);
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
static void hci_adv_flow_monitor(void *context);
static void hci_adv_flow_cmd_free_cb(pkt_linked_item_t *linked_pkt);
#endif
static void hci_hal_env_init(
size_t buffer_size,
size_t max_buffer_count)
static bool hci_hal_env_init(const hci_hal_callbacks_t *upper_callbacks, osi_thread_t *task_thread)
{
assert(buffer_size > 0);
assert(max_buffer_count > 0);
assert(upper_callbacks != NULL);
assert(task_thread != NULL);
hci_hal_env.buffer_size = buffer_size;
hci_hal_env.adv_free_num = 0;
hci_hal_env.hci_h4_thread = task_thread;
hci_hal_env.callbacks = (hci_hal_callbacks_t *)upper_callbacks;
hci_hal_env.rx_q = fixed_queue_new(max_buffer_count);
if (hci_hal_env.rx_q) {
fixed_queue_register_dequeue(hci_hal_env.rx_q, event_uart_has_bytes);
} else {
HCI_TRACE_ERROR("%s unable to create rx queue.\n", __func__);
}
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
hci_hal_env.adv_fc_cmd_buf = osi_calloc(HCI_CMD_LINKED_BUF_SIZE(HCIC_PARAM_SIZE_BLE_UPDATE_ADV_FLOW_CONTROL));
assert(hci_hal_env.adv_fc_cmd_buf != NULL);
osi_mutex_new(&hci_hal_env.adv_flow_lock);
osi_mutex_lock(&hci_hal_env.adv_flow_lock, OSI_MUTEX_MAX_TIMEOUT);
hci_hal_env.adv_credits = BLE_ADV_REPORT_FLOW_CONTROL_NUM;
hci_hal_env.adv_credits_to_release = 0;
hci_hal_env.cmd_buf_in_use = false;
osi_mutex_unlock(&hci_hal_env.adv_flow_lock);
hci_hal_env.adv_flow_monitor = osi_alarm_new("adv_fc_mon", hci_adv_flow_monitor, NULL, HCI_ADV_FLOW_MONITOR_PERIOD_MS);
assert (hci_hal_env.adv_flow_monitor != NULL);
#endif
return;
hci_hal_env.rx_q = fixed_queue_new(QUEUE_SIZE_MAX);
assert(hci_hal_env.rx_q != NULL);
hci_hal_env.adv_rpt_q = pkt_queue_create();
assert(hci_hal_env.adv_rpt_q != NULL);
struct osi_event *event = osi_event_create(hci_upstream_data_handler, NULL);
assert(event != NULL);
hci_hal_env.upstream_data_ready = event;
osi_event_bind(hci_hal_env.upstream_data_ready, hci_hal_env.hci_h4_thread, HCI_UPSTREAM_DATA_QUEUE_IDX);
return true;
}
static void hci_hal_env_deinit(void)
{
fixed_queue_free(hci_hal_env.rx_q, osi_free_func);
hci_hal_env.rx_q = NULL;
pkt_queue_destroy(hci_hal_env.adv_rpt_q, NULL);
hci_hal_env.adv_rpt_q = NULL;
osi_event_delete(hci_hal_env.upstream_data_ready);
hci_hal_env.upstream_data_ready = NULL;
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
hci_hal_env.cmd_buf_in_use = true;
osi_alarm_cancel(hci_hal_env.adv_flow_monitor);
osi_alarm_free(hci_hal_env.adv_flow_monitor);
hci_hal_env.adv_flow_monitor = NULL;
osi_mutex_free(&hci_hal_env.adv_flow_lock);
osi_free(hci_hal_env.adv_fc_cmd_buf);
hci_hal_env.adv_fc_cmd_buf = NULL;
#endif
hci_hal_env.hci_h4_thread = NULL;
memset(&hci_hal_env, 0, sizeof(hci_hal_env_t));
}
static bool hal_open(const hci_hal_callbacks_t *upper_callbacks, void *task_thread)
{
assert(upper_callbacks != NULL);
assert(task_thread != NULL);
callbacks = upper_callbacks;
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
hci_hal_env_init(HCI_HAL_SERIAL_BUFFER_SIZE, BLE_ADV_REPORT_FLOW_CONTROL_NUM + L2CAP_HOST_FC_ACL_BUFS + QUEUE_SIZE_MAX); // adv flow control num + ACL flow control num + hci cmd numeber
#else
hci_hal_env_init(HCI_HAL_SERIAL_BUFFER_SIZE, QUEUE_SIZE_MAX);
#endif
hci_h4_thread = (osi_thread_t *)task_thread;
hci_hal_env_init(upper_callbacks, (osi_thread_t *)task_thread);
//register vhci host cb
if (esp_vhci_host_register_callback(&vhci_host_cb) != ESP_OK) {
@ -125,8 +177,6 @@ static bool hal_open(const hci_hal_callbacks_t *upper_callbacks, void *task_thre
static void hal_close(void)
{
hci_hal_env_deinit();
hci_h4_thread = NULL;
}
/**
@ -166,14 +216,40 @@ static uint16_t transmit_data(serial_data_type_t type,
}
// Internal functions
static void hci_hal_h4_rx_handler(void *arg)
static void hci_upstream_data_handler(void *arg)
{
fixed_queue_process(hci_hal_env.rx_q);
fixed_queue_t *rx_q = hci_hal_env.rx_q;
struct pkt_queue *adv_rpt_q = hci_hal_env.adv_rpt_q;
size_t pkts_to_process;
do {
pkts_to_process = fixed_queue_length(rx_q);
for (size_t i = 0; i < pkts_to_process; i++) {
BT_HDR *packet = fixed_queue_dequeue(rx_q, 0);
if (packet != NULL) {
hci_hal_h4_hdl_rx_packet(packet);
}
}
} while (0);
do {
pkts_to_process = pkt_queue_length(adv_rpt_q);
for (size_t i = 0; i < pkts_to_process; i++) {
pkt_linked_item_t *linked_pkt = pkt_queue_dequeue(adv_rpt_q);
if (linked_pkt != NULL) {
hci_hal_h4_hdl_rx_adv_rpt(linked_pkt);
}
}
} while (0);
if (!fixed_queue_is_empty(rx_q) || pkt_queue_length(adv_rpt_q) > 0) {
hci_upstream_data_post(OSI_THREAD_MAX_TIMEOUT);
}
}
bool hci_hal_h4_task_post(uint32_t timeout)
static bool hci_upstream_data_post(uint32_t timeout)
{
return osi_thread_post(hci_h4_thread, hci_hal_h4_rx_handler, NULL, 1, timeout);
return osi_thread_post_event(hci_hal_env.upstream_data_ready, timeout);
}
#if (C2H_FLOW_CONTROL_INCLUDED == TRUE)
@ -192,13 +268,13 @@ static void hci_packet_complete(BT_HDR *packet){
}
#endif ///C2H_FLOW_CONTROL_INCLUDED == TRUE
bool host_recv_adv_packet(BT_HDR *packet)
bool host_recv_adv_packet(uint8_t *packet)
{
assert(packet);
if(packet->data[0] == DATA_TYPE_EVENT && packet->data[1] == HCI_BLE_EVENT) {
if(packet->data[3] == HCI_BLE_ADV_PKT_RPT_EVT
if(packet[0] == DATA_TYPE_EVENT && packet[1] == HCI_BLE_EVENT) {
if(packet[3] == HCI_BLE_ADV_PKT_RPT_EVT || packet[3] == HCI_BLE_DIRECT_ADV_EVT
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
|| packet->data[3] == HCI_BLE_ADV_DISCARD_REPORT_EVT
|| packet[3] == HCI_BLE_ADV_DISCARD_REPORT_EVT
#endif
) {
return true;
@ -208,21 +284,128 @@ bool host_recv_adv_packet(BT_HDR *packet)
}
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
static void hci_update_adv_report_flow_control(BT_HDR *packet)
static void hci_adv_flow_monitor(void *context)
{
// this is adv packet
if(host_recv_adv_packet(packet)) {
// update adv free number
hci_hal_env.adv_free_num ++;
if (esp_vhci_host_check_send_available()){
// send hci cmd
btsnd_hcic_ble_update_adv_report_flow_control(hci_hal_env.adv_free_num);
hci_hal_env.adv_free_num = 0;
hci_adv_credits_force_release(0);
}
static void hci_adv_credits_consumed(uint16_t num)
{
osi_mutex_lock(&hci_hal_env.adv_flow_lock, OSI_MUTEX_MAX_TIMEOUT);
assert(hci_hal_env.adv_credits >= num);
hci_hal_env.adv_credits -= num;
osi_mutex_unlock(&hci_hal_env.adv_flow_lock);
}
int hci_adv_credits_prep_to_release(uint16_t num)
{
if (num == 0) {
return hci_hal_env.adv_credits_to_release;
}
osi_mutex_lock(&hci_hal_env.adv_flow_lock, OSI_MUTEX_MAX_TIMEOUT);
int credits_to_release = hci_hal_env.adv_credits_to_release + num;
assert(hci_hal_env.adv_credits_to_release <= BLE_ADV_REPORT_FLOW_CONTROL_NUM);
hci_hal_env.adv_credits_to_release = credits_to_release;
osi_mutex_unlock(&hci_hal_env.adv_flow_lock);
if (credits_to_release == num && num != 0) {
osi_alarm_cancel(hci_hal_env.adv_flow_monitor);
osi_alarm_set(hci_hal_env.adv_flow_monitor, HCI_ADV_FLOW_MONITOR_PERIOD_MS);
}
return credits_to_release;
}
static int hci_adv_credits_release(void)
{
osi_mutex_lock(&hci_hal_env.adv_flow_lock, OSI_MUTEX_MAX_TIMEOUT);
int credits_released = hci_hal_env.adv_credits_to_release;
hci_hal_env.adv_credits += credits_released;
hci_hal_env.adv_credits_to_release -= credits_released;
assert(hci_hal_env.adv_credits <= BLE_ADV_REPORT_FLOW_CONTROL_NUM);
assert(hci_hal_env.adv_credits_to_release >= 0);
osi_mutex_unlock(&hci_hal_env.adv_flow_lock);
if (hci_hal_env.adv_credits_to_release == 0) {
osi_alarm_cancel(hci_hal_env.adv_flow_monitor);
}
return credits_released;
}
static int hci_adv_credits_release_rollback(uint16_t num)
{
osi_mutex_lock(&hci_hal_env.adv_flow_lock, OSI_MUTEX_MAX_TIMEOUT);
hci_hal_env.adv_credits -= num;
hci_hal_env.adv_credits_to_release += num;
assert(hci_hal_env.adv_credits >=0);
assert(hci_hal_env.adv_credits_to_release <= BLE_ADV_REPORT_FLOW_CONTROL_NUM);
osi_mutex_unlock(&hci_hal_env.adv_flow_lock);
return num;
}
static void hci_adv_flow_cmd_free_cb(pkt_linked_item_t *linked_pkt)
{
osi_mutex_lock(&hci_hal_env.adv_flow_lock, OSI_MUTEX_MAX_TIMEOUT);
hci_hal_env.cmd_buf_in_use = false;
osi_mutex_unlock(&hci_hal_env.adv_flow_lock);
hci_adv_credits_try_release(0);
}
bool hci_adv_flow_try_send_command(uint16_t credits_released)
{
bool sent = false;
bool use_static_buffer = false;
/* first try using static buffer, then dynamic buffer */
if (!hci_hal_env.cmd_buf_in_use) {
osi_mutex_lock(&hci_hal_env.adv_flow_lock, OSI_MUTEX_MAX_TIMEOUT);
if (!hci_hal_env.cmd_buf_in_use) {
hci_hal_env.cmd_buf_in_use = true;
use_static_buffer = true;
}
osi_mutex_unlock(&hci_hal_env.adv_flow_lock);
}
if (use_static_buffer) {
hci_cmd_metadata_t *metadata = (hci_cmd_metadata_t *)(hci_hal_env.adv_fc_cmd_buf->data);
BT_HDR *static_buffer = &metadata->command;
metadata->command_free_cb = hci_adv_flow_cmd_free_cb;
sent = btsnd_hcic_ble_update_adv_report_flow_control(credits_released, static_buffer);
} else {
sent = btsnd_hcic_ble_update_adv_report_flow_control(credits_released, NULL);
}
return sent;
}
int hci_adv_credits_try_release(uint16_t num)
{
int credits_released = 0;
if (hci_adv_credits_prep_to_release(num) >= HCI_BLE_ADV_MIN_CREDITS_TO_RELEASE) {
credits_released = hci_adv_credits_release();
if (credits_released > 0) {
if (!hci_adv_flow_try_send_command(credits_released)) {
hci_adv_credits_release_rollback(credits_released);
}
} else {
//do nothing
assert (credits_released == 0);
}
}
return credits_released;
}
int hci_adv_credits_force_release(uint16_t num)
{
hci_adv_credits_prep_to_release(num);
int credits_released = hci_adv_credits_release();
if (credits_released > 0) {
if (!hci_adv_flow_try_send_command(credits_released)) {
hci_adv_credits_release_rollback(credits_released);
}
}
return credits_released;
}
#endif
@ -283,61 +466,125 @@ static void hci_hal_h4_hdl_rx_packet(BT_HDR *packet)
return;
}
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
hci_update_adv_report_flow_control(packet);
#endif
#if SCAN_QUEUE_CONGEST_CHECK
if(BTU_check_queue_is_congest() && host_recv_adv_packet(packet)) {
HCI_TRACE_DEBUG("BtuQueue is congested");
osi_free(packet);
return;
}
#endif
packet->event = outbound_event_types[PACKET_TYPE_TO_INDEX(type)];
callbacks->packet_ready(packet);
hci_hal_env.callbacks->packet_ready(packet);
}
static void event_uart_has_bytes(fixed_queue_t *queue)
static void hci_hal_h4_hdl_rx_adv_rpt(pkt_linked_item_t *linked_pkt)
{
BT_HDR *packet;
while (!fixed_queue_is_empty(queue)) {
packet = fixed_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
hci_hal_h4_hdl_rx_packet(packet);
uint8_t type;
uint8_t hdr_size;
uint16_t length;
uint8_t *stream = NULL;
if (!linked_pkt) {
return;
}
BT_HDR* packet = (BT_HDR *)linked_pkt->data;
stream = packet->data + packet->offset;
assert(host_recv_adv_packet(stream) == true);
STREAM_TO_UINT8(type, stream);
packet->offset++;
packet->len--;
hdr_size = preamble_sizes[type - 1];
if (packet->len < hdr_size) {
HCI_TRACE_ERROR("Wrong packet length type=%d pkt_len=%d hdr_len=%d",
type, packet->len, hdr_size);
goto _discard_packet;
}
stream += hdr_size - 1;
STREAM_TO_UINT8(length, stream);
if ((length + hdr_size) != packet->len) {
HCI_TRACE_ERROR("Wrong packet length type=%d hdr_len=%d pd_len=%d "
"pkt_len=%d", type, hdr_size, length, packet->len);
goto _discard_packet;
}
#if SCAN_QUEUE_CONGEST_CHECK
if(BTU_check_queue_is_congest()) {
HCI_TRACE_DEBUG("BtuQueue is congested");
goto _discard_packet;
}
#endif
packet->event = outbound_event_types[PACKET_TYPE_TO_INDEX(type)];
hci_hal_env.callbacks->adv_rpt_ready(linked_pkt);
return;
_discard_packet:
osi_free(linked_pkt);
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
hci_adv_credits_prep_to_release(1);
#endif
}
static void host_send_pkt_available_cb(void)
{
//Controller rx cache buffer is ready for receiving new host packet
//Just Call Host main thread task to process pending packets.
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
hci_downstream_data_post(OSI_THREAD_MAX_TIMEOUT);
}
static int host_recv_pkt_cb(uint8_t *data, uint16_t len)
{
//Target has packet to host, malloc new buffer for packet
BT_HDR *pkt;
BT_HDR *pkt = NULL;
pkt_linked_item_t *linked_pkt = NULL;
size_t pkt_size;
if (hci_hal_env.rx_q == NULL) {
return 0;
}
pkt_size = BT_HDR_SIZE + len;
pkt = (BT_HDR *) osi_calloc(pkt_size);
bool is_adv_rpt = host_recv_adv_packet(data);
if (!pkt) {
HCI_TRACE_ERROR("%s couldn't aquire memory for inbound data buffer.\n", __func__);
return -1;
if (!is_adv_rpt) {
pkt_size = BT_HDR_SIZE + len;
pkt = (BT_HDR *) osi_calloc(pkt_size);
if (!pkt) {
HCI_TRACE_ERROR("%s couldn't aquire memory for inbound data buffer.\n", __func__);
assert(0);
}
pkt->offset = 0;
pkt->len = len;
pkt->layer_specific = 0;
memcpy(pkt->data, data, len);
fixed_queue_enqueue(hci_hal_env.rx_q, pkt, FIXED_QUEUE_MAX_TIMEOUT);
} else {
#if !BLE_ADV_REPORT_FLOW_CONTROL
// drop the packets if pkt_queue length goes beyond upper limit
if (pkt_queue_length(hci_hal_env.adv_rpt_q) > HCI_HAL_BLE_ADV_RPT_QUEUE_LEN_MAX) {
return 0;
}
#endif
pkt_size = BT_PKT_LINKED_HDR_SIZE + BT_HDR_SIZE + len;
linked_pkt = (pkt_linked_item_t *) osi_calloc(pkt_size);
if (!linked_pkt) {
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
hci_adv_credits_consumed(1);
hci_adv_credits_prep_to_release(1);
#endif
return 0;
}
pkt = (BT_HDR *)linked_pkt->data;
pkt->offset = 0;
pkt->len = len;
pkt->layer_specific = 0;
memcpy(pkt->data, data, len);
pkt_queue_enqueue(hci_hal_env.adv_rpt_q, linked_pkt);
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
hci_adv_credits_consumed(1);
#endif
}
pkt->offset = 0;
pkt->len = len;
pkt->layer_specific = 0;
memcpy(pkt->data, data, len);
fixed_queue_enqueue(hci_hal_env.rx_q, pkt, FIXED_QUEUE_MAX_TIMEOUT);
hci_hal_h4_task_post(0);
hci_upstream_data_post(OSI_THREAD_MAX_TIMEOUT);
BTTRC_DUMP_BUFFER("Recv Pkt", pkt->data, len);

View File

@ -35,20 +35,17 @@
#include "osi/thread.h"
#include "osi/mutex.h"
#include "osi/fixed_queue.h"
#include "osi/fixed_pkt_queue.h"
#define HCI_HOST_TASK_PINNED_TO_CORE (TASK_PINNED_TO_CORE)
#define HCI_HOST_TASK_STACK_SIZE (2048 + BT_TASK_EXTRA_STACK_SIZE)
#define HCI_HOST_TASK_PRIO (BT_TASK_MAX_PRIORITIES - 3)
#define HCI_HOST_TASK_NAME "hciT"
#define HCI_HOST_TASK_WORKQUEUE_NUM (2)
#define HCI_HOST_TASK_WORKQUEUE0_LEN (1) // for downstream datapath
#define HCI_HOST_TASK_WORKQUEUE1_LEN (1) // for upstream datapath
typedef struct {
uint16_t opcode;
future_t *complete_future;
command_complete_cb complete_callback;
command_status_cb status_callback;
void *context;
BT_HDR *command;
} waiting_command_t;
#define HCI_DOWNSTREAM_DATA_QUEUE_IDX (0)
typedef struct {
bool timer_is_set;
@ -59,9 +56,9 @@ typedef struct {
typedef struct {
int command_credits;
fixed_queue_t *command_queue;
fixed_pkt_queue_t *command_queue;
fixed_queue_t *packet_queue;
struct osi_event *downstream_data_ready;
command_waiting_response_t cmd_waiting_q;
/*
@ -89,16 +86,17 @@ static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks;
static int hci_layer_init_env(void);
static void hci_layer_deinit_env(void);
static void hci_host_thread_handler(void *arg);
static void event_command_ready(fixed_queue_t *queue);
static void hci_downstream_data_handler(void *arg);
static void event_command_ready(fixed_pkt_queue_t *queue);
static void event_packet_ready(fixed_queue_t *queue);
static void restart_command_waiting_response_timer(command_waiting_response_t *cmd_wait_q);
static void command_timed_out(void *context);
static void hal_says_packet_ready(BT_HDR *packet);
static bool filter_incoming_event(BT_HDR *packet);
static serial_data_type_t event_to_data_type(uint16_t event);
static waiting_command_t *get_waiting_command(command_opcode_t opcode);
static pkt_linked_item_t *get_waiting_command(command_opcode_t opcode);
static void dispatch_reassembled(BT_HDR *packet);
static void dispatch_adv_report(pkt_linked_item_t *linked_pkt);
// Module lifecycle functions
int hci_start_up(void)
@ -107,11 +105,15 @@ int hci_start_up(void)
goto error;
}
hci_host_thread = osi_thread_create(HCI_HOST_TASK_NAME, HCI_HOST_TASK_STACK_SIZE, HCI_HOST_TASK_PRIO, HCI_HOST_TASK_PINNED_TO_CORE, 2);
const size_t workqueue_len[] = {HCI_HOST_TASK_WORKQUEUE0_LEN, HCI_HOST_TASK_WORKQUEUE1_LEN};
hci_host_thread = osi_thread_create(HCI_HOST_TASK_NAME, HCI_HOST_TASK_STACK_SIZE, HCI_HOST_TASK_PRIO, HCI_HOST_TASK_PINNED_TO_CORE,
HCI_HOST_TASK_WORKQUEUE_NUM, workqueue_len);
if (hci_host_thread == NULL) {
return -2;
}
osi_event_bind(hci_host_env.downstream_data_ready, hci_host_thread, HCI_DOWNSTREAM_DATA_QUEUE_IDX);
packet_fragmenter->init(&packet_fragmenter_callbacks);
hal->open(&hal_callbacks, hci_host_thread);
@ -136,10 +138,9 @@ void hci_shut_down(void)
hci_host_thread = NULL;
}
bool hci_host_task_post(uint32_t timeout)
bool hci_downstream_data_post(uint32_t timeout)
{
return osi_thread_post(hci_host_thread, hci_host_thread_handler, NULL, 0, timeout);
return osi_thread_post_event(hci_host_env.downstream_data_ready, timeout);
}
static int hci_layer_init_env(void)
@ -150,14 +151,18 @@ static int hci_layer_init_env(void)
// as per the Bluetooth spec, Volume 2, Part E, 4.4 (Command Flow Control)
// This value can change when you get a command complete or command status event.
hci_host_env.command_credits = 1;
hci_host_env.command_queue = fixed_queue_new(QUEUE_SIZE_MAX);
hci_host_env.command_queue = fixed_pkt_queue_new(QUEUE_SIZE_MAX);
if (hci_host_env.command_queue) {
fixed_queue_register_dequeue(hci_host_env.command_queue, event_command_ready);
fixed_pkt_queue_register_dequeue(hci_host_env.command_queue, event_command_ready);
} else {
HCI_TRACE_ERROR("%s unable to create pending command queue.", __func__);
return -1;
}
struct osi_event *event = osi_event_create(hci_downstream_data_handler, NULL);
assert(event != NULL);
hci_host_env.downstream_data_ready = event;
hci_host_env.packet_queue = fixed_queue_new(QUEUE_SIZE_MAX);
if (hci_host_env.packet_queue) {
fixed_queue_register_dequeue(hci_host_env.packet_queue, event_packet_ready);
@ -191,8 +196,11 @@ static void hci_layer_deinit_env(void)
{
command_waiting_response_t *cmd_wait_q;
osi_event_delete(hci_host_env.downstream_data_ready);
hci_host_env.downstream_data_ready = NULL;
if (hci_host_env.command_queue) {
fixed_queue_free(hci_host_env.command_queue, osi_free_func);
fixed_pkt_queue_free(hci_host_env.command_queue, (fixed_pkt_queue_free_cb)osi_free_func);
}
if (hci_host_env.packet_queue) {
fixed_queue_free(hci_host_env.packet_queue, osi_free_func);
@ -208,7 +216,7 @@ static void hci_layer_deinit_env(void)
#endif // #if (BLE_50_FEATURE_SUPPORT == TRUE)
}
static void hci_host_thread_handler(void *arg)
static void hci_downstream_data_handler(void *arg)
{
/*
* Previous task handles RX queue and two TX Queues, Since there is
@ -218,18 +226,19 @@ static void hci_host_thread_handler(void *arg)
* All packets will be directly copied to single queue in driver layer with
* H4 type header added (1 byte).
*/
if (esp_vhci_host_check_send_available()) {
while (esp_vhci_host_check_send_available()) {
/*Now Target only allowed one packet per TX*/
BT_HDR *pkt = packet_fragmenter->fragment_current_packet();
if (pkt != NULL) {
packet_fragmenter->fragment_and_dispatch(pkt);
} else {
if (!fixed_queue_is_empty(hci_host_env.command_queue) &&
} else if (!fixed_pkt_queue_is_empty(hci_host_env.command_queue) &&
hci_host_env.command_credits > 0) {
fixed_queue_process(hci_host_env.command_queue);
} else if (!fixed_queue_is_empty(hci_host_env.packet_queue)) {
fixed_queue_process(hci_host_env.packet_queue);
}
fixed_pkt_queue_process(hci_host_env.command_queue);
} else if (!fixed_queue_is_empty(hci_host_env.packet_queue)) {
fixed_queue_process(hci_host_env.packet_queue);
} else {
// No downstream packet to send, stop processing
break;
}
}
}
@ -240,81 +249,73 @@ static void transmit_command(
command_status_cb status_callback,
void *context)
{
uint8_t *stream;
waiting_command_t *wait_entry = osi_calloc(sizeof(waiting_command_t));
if (!wait_entry) {
HCI_TRACE_ERROR("%s couldn't allocate space for wait entry.", __func__);
return;
}
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(command);
pkt_linked_item_t *linked_pkt = HCI_GET_CMD_LINKED_STRUCT(metadata);
stream = command->data + command->offset;
STREAM_TO_UINT16(wait_entry->opcode, stream);
wait_entry->complete_callback = complete_callback;
wait_entry->status_callback = status_callback;
wait_entry->command = command;
wait_entry->context = context;
assert(command->layer_specific == HCI_CMD_BUF_TYPE_METADATA);
metadata->flags_vnd |= HCI_CMD_MSG_F_VND_QUEUED;
// Store the command message type in the event field
// in case the upper layer didn't already
command->event = MSG_STACK_TO_HC_HCI_CMD;
HCI_TRACE_DEBUG("HCI Enqueue Comamnd opcode=0x%x\n", wait_entry->opcode);
HCI_TRACE_DEBUG("HCI Enqueue Comamnd opcode=0x%x\n", metadata->opcode);
BTTRC_DUMP_BUFFER(NULL, command->data + command->offset, command->len);
fixed_queue_enqueue(hci_host_env.command_queue, wait_entry, FIXED_QUEUE_MAX_TIMEOUT);
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
fixed_pkt_queue_enqueue(hci_host_env.command_queue, linked_pkt, FIXED_PKT_QUEUE_MAX_TIMEOUT);
hci_downstream_data_post(OSI_THREAD_MAX_TIMEOUT);
}
static future_t *transmit_command_futured(BT_HDR *command)
{
waiting_command_t *wait_entry = osi_calloc(sizeof(waiting_command_t));
assert(wait_entry != NULL);
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(command);
pkt_linked_item_t *linked_pkt = HCI_GET_CMD_LINKED_STRUCT(metadata);
assert(command->layer_specific == HCI_CMD_BUF_TYPE_METADATA);
metadata->flags_vnd |= (HCI_CMD_MSG_F_VND_QUEUED | HCI_CMD_MSG_F_VND_FUTURE);
future_t *future = future_new();
uint8_t *stream = command->data + command->offset;
STREAM_TO_UINT16(wait_entry->opcode, stream);
wait_entry->complete_future = future;
wait_entry->command = command;
metadata->complete_future = future;
// Store the command message type in the event field
// in case the upper layer didn't already
command->event = MSG_STACK_TO_HC_HCI_CMD;
fixed_queue_enqueue(hci_host_env.command_queue, wait_entry, FIXED_QUEUE_MAX_TIMEOUT);
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
fixed_pkt_queue_enqueue(hci_host_env.command_queue, linked_pkt, FIXED_PKT_QUEUE_MAX_TIMEOUT);
hci_downstream_data_post(OSI_THREAD_MAX_TIMEOUT);
return future;
}
static void transmit_downward(uint16_t type, void *data)
{
if (type == MSG_STACK_TO_HC_HCI_CMD) {
transmit_command((BT_HDR *)data, NULL, NULL, NULL);
HCI_TRACE_WARNING("%s legacy transmit of command. Use transmit_command instead.\n", __func__);
HCI_TRACE_ERROR("%s legacy transmit of command. Use transmit_command instead.\n", __func__);
assert(0);
} else {
fixed_queue_enqueue(hci_host_env.packet_queue, data, FIXED_QUEUE_MAX_TIMEOUT);
}
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
hci_downstream_data_post(OSI_THREAD_MAX_TIMEOUT);
}
// Command/packet transmitting functions
static void event_command_ready(fixed_queue_t *queue)
static void event_command_ready(fixed_pkt_queue_t *queue)
{
waiting_command_t *wait_entry = NULL;
pkt_linked_item_t *wait_entry = NULL;
command_waiting_response_t *cmd_wait_q = &hci_host_env.cmd_waiting_q;
wait_entry = fixed_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
wait_entry = fixed_pkt_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
hci_cmd_metadata_t *metadata = (hci_cmd_metadata_t *)(wait_entry->data);
metadata->flags_vnd |= HCI_CMD_MSG_F_VND_SENT;
metadata->flags_vnd &= ~HCI_CMD_MSG_F_VND_QUEUED;
if(wait_entry->opcode == HCI_HOST_NUM_PACKETS_DONE
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
|| wait_entry->opcode == HCI_VENDOR_BLE_ADV_REPORT_FLOW_CONTROL
#endif
){
packet_fragmenter->fragment_and_dispatch(wait_entry->command);
osi_free(wait_entry->command);
osi_free(wait_entry);
if (metadata->flags_src & HCI_CMD_MSG_F_SRC_NOACK) {
packet_fragmenter->fragment_and_dispatch(&metadata->command);
hci_cmd_free_cb free_func = metadata->command_free_cb ? metadata->command_free_cb : (hci_cmd_free_cb) osi_free_func;
free_func(wait_entry);
return;
}
hci_host_env.command_credits--;
@ -324,7 +325,7 @@ static void event_command_ready(fixed_queue_t *queue)
osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
// Send it off
packet_fragmenter->fragment_and_dispatch(wait_entry->command);
packet_fragmenter->fragment_and_dispatch(&metadata->command);
restart_command_waiting_response_timer(cmd_wait_q);
}
@ -389,7 +390,7 @@ static void restart_command_waiting_response_timer(command_waiting_response_t *c
static void command_timed_out(void *context)
{
command_waiting_response_t *cmd_wait_q = (command_waiting_response_t *)context;
waiting_command_t *wait_entry;
pkt_linked_item_t *wait_entry;
osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
wait_entry = (list_is_empty(cmd_wait_q->commands_pending_response) ?
@ -402,7 +403,8 @@ static void command_timed_out(void *context)
// We shouldn't try to recover the stack from this command timeout.
// If it's caused by a software bug, fix it. If it's a hardware bug, fix it.
{
HCI_TRACE_ERROR("%s hci layer timeout waiting for response to a command. opcode: 0x%x", __func__, wait_entry->opcode);
hci_cmd_metadata_t *metadata = (hci_cmd_metadata_t *)(wait_entry->data);
HCI_TRACE_ERROR("%s hci layer timeout waiting for response to a command. opcode: 0x%x", __func__, metadata->opcode);
}
}
@ -416,12 +418,18 @@ static void hal_says_packet_ready(BT_HDR *packet)
}
}
static void hal_says_adv_rpt_ready(pkt_linked_item_t *linked_pkt)
{
dispatch_adv_report(linked_pkt);
}
// Returns true if the event was intercepted and should not proceed to
// higher layers. Also inspects an incoming event for interesting
// information, like how many commands are now able to be sent.
static bool filter_incoming_event(BT_HDR *packet)
{
waiting_command_t *wait_entry = NULL;
pkt_linked_item_t *wait_entry = NULL;
hci_cmd_metadata_t *metadata = NULL;
uint8_t *stream = packet->data + packet->offset;
uint8_t event_code;
command_opcode_t opcode;
@ -435,10 +443,11 @@ static bool filter_incoming_event(BT_HDR *packet)
STREAM_TO_UINT8(hci_host_env.command_credits, stream);
STREAM_TO_UINT16(opcode, stream);
wait_entry = get_waiting_command(opcode);
metadata = (hci_cmd_metadata_t *)(wait_entry->data);
if (!wait_entry) {
HCI_TRACE_WARNING("%s command complete event with no matching command. opcode: 0x%x.", __func__, opcode);
} else if (wait_entry->complete_callback) {
wait_entry->complete_callback(packet, wait_entry->context);
} else if (metadata->command_complete_cb) {
metadata->command_complete_cb(packet, metadata->context);
#if (BLE_50_FEATURE_SUPPORT == TRUE)
BlE_SYNC *sync_info = btsnd_hcic_ble_get_sync_info();
if(!sync_info) {
@ -450,8 +459,8 @@ static bool filter_incoming_event(BT_HDR *packet)
}
}
#endif // #if (BLE_50_FEATURE_SUPPORT == TRUE)
} else if (wait_entry->complete_future) {
future_ready(wait_entry->complete_future, packet);
} else if (metadata->flags_vnd & HCI_CMD_MSG_F_VND_FUTURE) {
future_ready((future_t *)(metadata->complete_future), packet);
}
goto intercepted;
@ -464,10 +473,11 @@ static bool filter_incoming_event(BT_HDR *packet)
// If a command generates a command status event, it won't be getting a command complete event
wait_entry = get_waiting_command(opcode);
metadata = (hci_cmd_metadata_t *)(wait_entry->data);
if (!wait_entry) {
HCI_TRACE_WARNING("%s command status event with no matching command. opcode: 0x%x", __func__, opcode);
} else if (wait_entry->status_callback) {
wait_entry->status_callback(status, wait_entry->command, wait_entry->context);
} else if (metadata->command_status_cb) {
metadata->command_status_cb(status, &metadata->command, metadata->context);
}
goto intercepted;
@ -479,23 +489,22 @@ intercepted:
/*Tell HCI Host Task to continue TX Pending commands*/
if (hci_host_env.command_credits &&
!fixed_queue_is_empty(hci_host_env.command_queue)) {
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
!fixed_pkt_queue_is_empty(hci_host_env.command_queue)) {
hci_downstream_data_post(OSI_THREAD_MAX_TIMEOUT);
}
if (wait_entry) {
// If it has a callback, it's responsible for freeing the packet
if (event_code == HCI_COMMAND_STATUS_EVT ||
(!wait_entry->complete_callback && !wait_entry->complete_future)) {
(!metadata->command_complete_cb && !metadata->complete_future)) {
osi_free(packet);
}
// If it has a callback, it's responsible for freeing the command
if (event_code == HCI_COMMAND_COMPLETE_EVT || !wait_entry->status_callback) {
osi_free(wait_entry->command);
if (event_code == HCI_COMMAND_COMPLETE_EVT || !metadata->command_status_cb) {
hci_cmd_free_cb free_func = metadata->command_free_cb ? metadata->command_free_cb : (hci_cmd_free_cb) osi_free_func;
free_func(wait_entry);
}
osi_free(wait_entry);
} else {
osi_free(packet);
}
@ -513,6 +522,17 @@ static void dispatch_reassembled(BT_HDR *packet)
}
}
static void dispatch_adv_report(pkt_linked_item_t *linked_pkt)
{
// Events should already have been dispatched before this point
//Tell Up-layer received packet.
if (btu_task_post(SIG_BTU_HCI_ADV_RPT_MSG, linked_pkt, OSI_THREAD_MAX_TIMEOUT) == false) {
osi_free(linked_pkt);
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
hci_adv_credits_try_release(1);
#endif
}
}
// Misc internal functions
// TODO(zachoverflow): we seem to do this a couple places, like the HCI inject module. #centralize
@ -531,7 +551,7 @@ static serial_data_type_t event_to_data_type(uint16_t event)
return 0;
}
static waiting_command_t *get_waiting_command(command_opcode_t opcode)
static pkt_linked_item_t *get_waiting_command(command_opcode_t opcode)
{
command_waiting_response_t *cmd_wait_q = &hci_host_env.cmd_waiting_q;
osi_mutex_lock(&cmd_wait_q->commands_pending_response_lock, OSI_MUTEX_MAX_TIMEOUT);
@ -539,15 +559,15 @@ static waiting_command_t *get_waiting_command(command_opcode_t opcode)
for (const list_node_t *node = list_begin(cmd_wait_q->commands_pending_response);
node != list_end(cmd_wait_q->commands_pending_response);
node = list_next(node)) {
waiting_command_t *wait_entry = list_node(node);
if (!wait_entry || wait_entry->opcode != opcode) {
continue;
pkt_linked_item_t *wait_entry = list_node(node);
if (wait_entry) {
hci_cmd_metadata_t *metadata = (hci_cmd_metadata_t *)(wait_entry->data);
if (metadata->opcode == opcode) {
list_remove(cmd_wait_q->commands_pending_response, wait_entry);
osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
return wait_entry;
}
}
list_remove(cmd_wait_q->commands_pending_response, wait_entry);
osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
return wait_entry;
}
osi_mutex_unlock(&cmd_wait_q->commands_pending_response_lock);
@ -565,7 +585,8 @@ static void init_layer_interface(void)
}
static const hci_hal_callbacks_t hal_callbacks = {
hal_says_packet_ready
hal_says_packet_ready,
hal_says_adv_rpt_ready,
};
static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks = {

View File

@ -27,7 +27,6 @@
#include "hci/hci_packet_factory.h"
static BT_HDR *make_packet(size_t data_size);
static BT_HDR *make_command_no_params(uint16_t opcode);
static BT_HDR *make_command(uint16_t opcode, size_t parameter_size, uint8_t **stream_out);
@ -234,7 +233,9 @@ static BT_HDR *make_command_no_params(uint16_t opcode)
static BT_HDR *make_command(uint16_t opcode, size_t parameter_size, uint8_t **stream_out)
{
BT_HDR *packet = make_packet(HCI_COMMAND_PREAMBLE_SIZE + parameter_size);
BT_HDR *packet = HCI_GET_CMD_BUF(parameter_size);
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(packet);
metadata->opcode = opcode;
uint8_t *stream = packet->data;
UINT16_TO_STREAM(stream, opcode);
@ -247,17 +248,6 @@ static BT_HDR *make_command(uint16_t opcode, size_t parameter_size, uint8_t **st
return packet;
}
static BT_HDR *make_packet(size_t data_size)
{
BT_HDR *ret = (BT_HDR *)osi_calloc(sizeof(BT_HDR) + data_size);
assert(ret);
ret->event = 0;
ret->offset = 0;
ret->layer_specific = 0;
ret->len = data_size;
return ret;
}
static const hci_packet_factory_t interface = {
make_reset,
make_read_buffer_size,

View File

@ -21,7 +21,7 @@
#include <stdbool.h>
#include <stdint.h>
#include "osi/pkt_queue.h"
#include "stack/bt_types.h"
#if SOC_ESP_NIMBLE_CONTROLLER
#include "os/os_mbuf.h"
@ -34,12 +34,14 @@ typedef enum {
} serial_data_type_t;
typedef void (*packet_ready_cb)(BT_HDR *packet);
typedef void (*adv_rpt_ready_cb)(pkt_linked_item_t *linked_pkt);
typedef struct {
// Called when the HAL detects inbound data.
// Data |type| may be ACL, SCO, or EVENT.
// Executes in the context of the thread supplied to |init|.
packet_ready_cb packet_ready;
adv_rpt_ready_cb adv_rpt_ready;
/*
// Called when the HAL detects inbound astronauts named Dave.

View File

@ -19,11 +19,13 @@
#ifndef _HCI_LAYER_H_
#define _HCI_LAYER_H_
#include "common/bt_target.h"
#include "stack/bt_types.h"
#include "osi/allocator.h"
#include "osi/osi.h"
#include "osi/future.h"
#include "osi/thread.h"
#include "osi/pkt_queue.h"
///// LEGACY DEFINITIONS /////
@ -46,6 +48,9 @@
/* Local Bluetooth Controller ID for BR/EDR */
#define LOCAL_BR_EDR_CONTROLLER_ID 0
#define HCI_CMD_MSG_F_VND_FUTURE (0x01)
#define HCI_CMD_MSG_F_VND_QUEUED (0x02)
#define HCI_CMD_MSG_F_VND_SENT (0x04)
///// END LEGACY DEFINITIONS /////
typedef struct hci_hal_t hci_hal_t;
@ -97,6 +102,12 @@ const hci_t *hci_layer_get_interface(void);
int hci_start_up(void);
void hci_shut_down(void);
bool hci_host_task_post(uint32_t timeout);
bool hci_downstream_data_post(uint32_t timeout);
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
int hci_adv_credits_prep_to_release(uint16_t num);
int hci_adv_credits_try_release(uint16_t num);
int hci_adv_credits_force_release(uint16_t num);
#endif
#endif /* _HCI_LAYER_H_ */

View File

@ -34,7 +34,7 @@
#include "device/controller.h"
#include "stack/hcimsgs.h"
#include "stack/gap_api.h"
#include "hci/hci_layer.h"
#if BLE_INCLUDED == TRUE
#include "l2c_int.h"
@ -57,6 +57,8 @@
#define MIN_ADV_LENGTH 2
#define BTM_VSC_CHIP_CAPABILITY_RSP_LEN_L_RELEASE 9
#define BTM_BLE_GAP_ADV_RPT_BATCH_SIZE (10)
#if BTM_DYNAMIC_MEMORY == FALSE
static tBTM_BLE_VSC_CB cmn_ble_gap_vsc_cb;
#else
@ -82,6 +84,7 @@ static UINT8 btm_set_conn_mode_adv_init_addr(tBTM_BLE_INQ_CB *p_cb,
tBLE_ADDR_TYPE *p_own_addr_type);
static void btm_ble_stop_observe(void);
static void btm_ble_stop_discover(void);
static void btm_adv_pkt_handler(void *arg);
uint32_t BTM_BleUpdateOwnType(uint8_t *own_bda_type, tBTM_START_ADV_CMPL_CBACK *cb);
#define BTM_BLE_INQ_RESULT 0x01
@ -3455,6 +3458,49 @@ void btm_send_sel_conn_callback(BD_ADDR remote_bda, UINT8 evt_type, UINT8 *p_dat
}
}
static void btm_adv_pkt_handler(void *arg)
{
UINT8 hci_evt_code, hci_evt_len;
UINT8 ble_sub_code;
tBTM_BLE_CB *p_cb = &btm_cb.ble_ctr_cb;
size_t pkts_to_process = pkt_queue_length(p_cb->adv_rpt_queue);
if (pkts_to_process > BTM_BLE_GAP_ADV_RPT_BATCH_SIZE) {
pkts_to_process = BTM_BLE_GAP_ADV_RPT_BATCH_SIZE;
}
for (size_t i = 0; i < pkts_to_process; i++) {
pkt_linked_item_t *linked_pkt = pkt_queue_dequeue(p_cb->adv_rpt_queue);
assert(linked_pkt != NULL);
BT_HDR *packet = (BT_HDR *)linked_pkt->data;
uint8_t *p = packet->data + packet->offset;
STREAM_TO_UINT8 (hci_evt_code, p);
STREAM_TO_UINT8 (hci_evt_len, p);
STREAM_TO_UINT8 (ble_sub_code, p);
if (ble_sub_code == HCI_BLE_ADV_PKT_RPT_EVT) {
btm_ble_process_adv_pkt(p);
} else if (ble_sub_code == HCI_BLE_ADV_DISCARD_REPORT_EVT) {
btm_ble_process_adv_discard_evt(p);
} else if (ble_sub_code == HCI_BLE_DIRECT_ADV_EVT) {
btm_ble_process_direct_adv_pkt(p);
} else {
assert (0);
}
osi_free(linked_pkt);
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
hci_adv_credits_try_release(1);
#endif
}
if (pkt_queue_length(p_cb->adv_rpt_queue) != 0) {
btu_task_post(SIG_BTU_HCI_ADV_RPT_MSG, NULL, OSI_THREAD_MAX_TIMEOUT);
}
UNUSED(hci_evt_code);
UNUSED(hci_evt_len);
}
/*******************************************************************************
**
** Function btm_ble_process_adv_pkt
@ -3750,6 +3796,12 @@ void btm_ble_process_adv_discard_evt(UINT8 *p)
}
#endif
}
void btm_ble_process_direct_adv_pkt(UINT8 *p)
{
// TODO
}
/*******************************************************************************
**
** Function btm_ble_start_scan
@ -4414,6 +4466,13 @@ void btm_ble_init (void)
p_cb->inq_var.evt_type = BTM_BLE_NON_CONNECT_EVT;
p_cb->adv_rpt_queue = pkt_queue_create();
assert(p_cb->adv_rpt_queue != NULL);
p_cb->adv_rpt_ready = osi_event_create(btm_adv_pkt_handler, NULL);
assert(p_cb->adv_rpt_ready != NULL);
osi_event_bind(p_cb->adv_rpt_ready, btu_get_current_thread(), 0);
#if BLE_VND_INCLUDED == FALSE
btm_ble_adv_filter_init();
#endif
@ -4436,6 +4495,12 @@ void btm_ble_free (void)
fixed_queue_free(p_cb->conn_pending_q, osi_free_func);
pkt_queue_destroy(p_cb->adv_rpt_queue, NULL);
p_cb->adv_rpt_queue = NULL;
osi_event_delete(p_cb->adv_rpt_ready);
p_cb->adv_rpt_ready = NULL;
#if BTM_DYNAMIC_MEMORY == TRUE
osi_free(cmn_ble_gap_vsc_cb_ptr);
cmn_ble_gap_vsc_cb_ptr = NULL;
@ -4530,4 +4595,22 @@ BOOLEAN BTM_Ble_Authorization(BD_ADDR bd_addr, BOOLEAN authorize)
return FALSE;
}
bool btm_ble_adv_pkt_ready(void)
{
tBTM_BLE_CB *p_cb = &btm_cb.ble_ctr_cb;
osi_thread_post_event(p_cb->adv_rpt_ready, OSI_THREAD_MAX_TIMEOUT);
return true;
}
bool btm_ble_adv_pkt_post(pkt_linked_item_t *pkt)
{
if (pkt == NULL) {
return false;
}
tBTM_BLE_CB *p_cb = &btm_cb.ble_ctr_cb;
pkt_queue_enqueue(p_cb->adv_rpt_queue, pkt);
return true;
}
#endif /* BLE_INCLUDED */

View File

@ -662,14 +662,13 @@ tBTM_DEV_STATUS_CB *BTM_RegisterForDeviceStatusNotif (tBTM_DEV_STATUS_CB *p_cb)
tBTM_STATUS BTM_VendorSpecificCommand(UINT16 opcode, UINT8 param_len,
UINT8 *p_param_buf, tBTM_VSC_CMPL_CB *p_cb)
{
void *p_buf;
BT_HDR *p_buf;
BTM_TRACE_EVENT ("BTM: BTM_VendorSpecificCommand: Opcode: 0x%04X, ParamLen: %i.",
opcode, param_len);
/* Allocate a buffer to hold HCI command plus the callback function */
if ((p_buf = osi_malloc((UINT16)(sizeof(BT_HDR) + sizeof (tBTM_CMPL_CB *) +
param_len + HCIC_PREAMBLE_SIZE))) != NULL) {
if ((p_buf = HCI_GET_CMD_BUF(param_len)) != NULL) {
/* Send the HCI command (opcode will be OR'd with HCI_GRP_VENDOR_SPECIFIC) */
btsnd_hcic_vendor_spec_cmd (p_buf, opcode, param_len, p_param_buf, (void *)p_cb);

View File

@ -2395,6 +2395,7 @@ tBTM_STATUS BTM_WriteEIR( BT_HDR *p_buff, BOOLEAN fec_required)
if (controller_get_interface()->supports_extended_inquiry_response()) {
BTM_TRACE_API("Write Extended Inquiry Response to controller\n");
btsnd_hcic_write_ext_inquiry_response (p_buff, fec_required);
osi_free(p_buff);
return BTM_SUCCESS;
} else {
osi_free(p_buff);

View File

@ -28,6 +28,8 @@
#include "common/bt_target.h"
#include "osi/fixed_queue.h"
#include "osi/pkt_queue.h"
#include "osi/thread.h"
#include "stack/hcidefs.h"
#include "stack/btm_ble_api.h"
#include "btm_int.h"
@ -341,6 +343,9 @@ typedef struct {
tBTM_CMPL_CB *p_scan_cmpl_cb;
TIMER_LIST_ENT scan_timer_ent;
struct pkt_queue *adv_rpt_queue;
struct osi_event *adv_rpt_ready;
/* background connection procedure cb value */
tBTM_BLE_CONN_TYPE bg_conn_type;
UINT32 scan_int;
@ -384,6 +389,9 @@ extern "C" {
void btm_ble_timeout(TIMER_LIST_ENT *p_tle);
void btm_ble_process_adv_pkt (UINT8 *p);
void btm_ble_process_adv_discard_evt(UINT8 *p);
void btm_ble_process_direct_adv_pkt (UINT8 *p);
bool btm_ble_adv_pkt_ready(void);
bool btm_ble_adv_pkt_post(pkt_linked_item_t *pkt);
void btm_ble_proc_scan_rsp_rpt (UINT8 *p);
tBTM_STATUS btm_ble_read_remote_name(BD_ADDR remote_bda, tBTM_INQ_INFO *p_cur, tBTM_CMPL_CB *p_cb);
BOOLEAN btm_ble_cancel_remote_name(BD_ADDR remote_bda);

View File

@ -43,6 +43,7 @@
#include "common/bt_trace.h"
#include "osi/thread.h"
#include "osi/pkt_queue.h"
//#include "osi/mutex.h"
// TODO(zachoverflow): remove this horrible hack
#include "stack/btu.h"
@ -124,8 +125,6 @@ static void btu_hcif_ssr_evt_dump (UINT8 *p, UINT16 evt_len);
#if BLE_INCLUDED == TRUE
static void btu_ble_ll_conn_complete_evt (UINT8 *p, UINT16 evt_len);
static void btu_ble_process_adv_pkt (UINT8 *p);
static void btu_ble_process_adv_dis(UINT8 *p);
static void btu_ble_read_remote_feat_evt (UINT8 *p);
static void btu_ble_ll_conn_param_upd_evt (UINT8 *p, UINT16 evt_len);
static void btu_ble_ll_get_conn_param_format_err_from_contoller (UINT8 status, UINT16 handle);
@ -360,10 +359,10 @@ void btu_hcif_process_event (UNUSED_ATTR UINT8 controller_id, BT_HDR *p_msg)
switch (ble_sub_code) {
case HCI_BLE_ADV_PKT_RPT_EVT: /* result of inquiry */
btu_ble_process_adv_pkt(p);
break;
case HCI_BLE_ADV_DISCARD_REPORT_EVT:
btu_ble_process_adv_dis(p);
case HCI_BLE_DIRECT_ADV_EVT:
// These three events are directed to another specialized processing path
HCI_TRACE_ERROR("Unexpected HCI BLE event = 0x%02x", ble_sub_code);
break;
case HCI_BLE_CONN_COMPLETE_EVT:
btu_ble_ll_conn_complete_evt(p, hci_evt_len);
@ -453,15 +452,21 @@ void btu_hcif_send_cmd (UNUSED_ATTR UINT8 controller_id, BT_HDR *p_buf)
STREAM_TO_UINT16(opcode, stream);
// Eww...horrible hackery here
/* If command was a VSC, then extract command_complete callback */
if ((opcode & HCI_GRP_VENDOR_SPECIFIC) == HCI_GRP_VENDOR_SPECIFIC
assert (p_buf->layer_specific == HCI_CMD_BUF_TYPE_METADATA);
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(p_buf);
metadata->command_complete_cb = btu_hcif_command_complete_evt;
metadata->command_status_cb = btu_hcif_command_status_evt;
metadata->opcode = opcode;
vsc_callback = metadata->context;
/* If command is not a VSC, then the context field should be empty */
if ((opcode & HCI_GRP_VENDOR_SPECIFIC) != HCI_GRP_VENDOR_SPECIFIC
#if BLE_INCLUDED == TRUE
|| (opcode == HCI_BLE_RAND)
|| (opcode == HCI_BLE_ENCRYPT)
&& (opcode != HCI_BLE_RAND)
&& (opcode != HCI_BLE_ENCRYPT)
#endif
) {
vsc_callback = *((void **)(p_buf + 1));
) {
assert (vsc_callback == NULL);
}
hci_layer_get_interface()->transmit_command(
@ -474,6 +479,7 @@ void btu_hcif_send_cmd (UNUSED_ATTR UINT8 controller_id, BT_HDR *p_buf)
btu_check_bt_sleep ();
#endif
}
#if (BLE_50_FEATURE_SUPPORT == TRUE)
UINT8 btu_hcif_send_cmd_sync (UINT8 controller_id, BT_HDR *p_buf)
{
@ -494,15 +500,22 @@ UINT8 btu_hcif_send_cmd_sync (UINT8 controller_id, BT_HDR *p_buf)
sync_info->opcode = opcode;
// Eww...horrible hackery here
/* If command was a VSC, then extract command_complete callback */
if ((opcode & HCI_GRP_VENDOR_SPECIFIC) == HCI_GRP_VENDOR_SPECIFIC
assert (p_buf->layer_specific == HCI_CMD_BUF_TYPE_METADATA);
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(p_buf);
metadata->command_complete_cb = btu_hcif_command_complete_evt;
metadata->command_status_cb = btu_hcif_command_status_evt;
metadata->command_free_cb = NULL;
metadata->opcode = opcode;
vsc_callback = metadata->context;
/* If command is not a VSC, then the context field should be empty */
if ((opcode & HCI_GRP_VENDOR_SPECIFIC) != HCI_GRP_VENDOR_SPECIFIC
#if BLE_INCLUDED == TRUE
|| (opcode == HCI_BLE_RAND)
|| (opcode == HCI_BLE_ENCRYPT)
&& (opcode != HCI_BLE_RAND)
&& (opcode != HCI_BLE_ENCRYPT)
#endif
) {
vsc_callback = *((void **)(p_buf + 1));
) {
assert (vsc_callback == NULL);
}
hci_layer_get_interface()->transmit_command(
@ -1436,7 +1449,11 @@ static void btu_hcif_command_status_evt_on_task(BT_HDR *event)
stream,
hack->context);
osi_free(hack->command);
// check the HCI command integrity: opcode
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(hack->command);
assert(metadata->opcode == opcode);
HCI_FREE_CMD_BUF(hack->command);
osi_free(event);
}
@ -2015,18 +2032,6 @@ static void btu_hcif_encryption_key_refresh_cmpl_evt (UINT8 *p)
}
#endif ///SMP_INCLUDED == TRUE
static void btu_ble_process_adv_pkt (UINT8 *p)
{
HCI_TRACE_DEBUG("btu_ble_process_adv_pkt\n");
btm_ble_process_adv_pkt(p);
}
static void btu_ble_process_adv_dis(UINT8 *p)
{
btm_ble_process_adv_discard_evt(p);
}
static void btu_ble_ll_conn_complete_evt ( UINT8 *p, UINT16 evt_len)
{
btm_ble_conn_complete(p, evt_len, FALSE);

View File

@ -48,6 +48,8 @@
#define BTU_TASK_STACK_SIZE (BT_BTU_TASK_STACK_SIZE + BT_TASK_EXTRA_STACK_SIZE)
#define BTU_TASK_PRIO (BT_TASK_MAX_PRIORITIES - 5)
#define BTU_TASK_NAME "BTU_TASK"
#define BTU_TASK_WORKQUEUE_NUM (2)
#define BTU_TASK_WORKQUEUE0_LEN (0)
hash_map_t *btu_general_alarm_hash_map;
osi_mutex_t btu_general_alarm_lock;
@ -181,7 +183,9 @@ void BTU_StartUp(void)
osi_mutex_new(&btu_l2cap_alarm_lock);
btu_thread = osi_thread_create(BTU_TASK_NAME, BTU_TASK_STACK_SIZE, BTU_TASK_PRIO, BTU_TASK_PINNED_TO_CORE, 1);
const size_t workqueue_len[] = {BTU_TASK_WORKQUEUE0_LEN};
btu_thread = osi_thread_create(BTU_TASK_NAME, BTU_TASK_STACK_SIZE, BTU_TASK_PRIO, BTU_TASK_PINNED_TO_CORE,
BTU_TASK_WORKQUEUE_NUM, workqueue_len);
if (btu_thread == NULL) {
goto error_exit;
}
@ -265,3 +269,8 @@ int get_btu_work_queue_size(void)
{
return osi_thread_queue_wait_size(btu_thread, 0);
}
osi_thread_t *btu_get_current_thread(void)
{
return btu_thread;
}

View File

@ -227,6 +227,18 @@ bool btu_task_post(uint32_t sig, void *param, uint32_t timeout)
case SIG_BTU_HCI_MSG:
status = osi_thread_post(btu_thread, btu_hci_msg_process, param, 0, timeout);
break;
case SIG_BTU_HCI_ADV_RPT_MSG:
#if BLE_INCLUDED == TRUE
if (param != NULL) {
btm_ble_adv_pkt_post(param);
}
btm_ble_adv_pkt_ready();
status = true;
#else
osi_free(param);
status = false;
#endif
break;
#if (defined(BTA_INCLUDED) && BTA_INCLUDED == TRUE)
case SIG_BTU_BTA_MSG:
status = osi_thread_post(btu_thread, bta_sys_event, param, 0, timeout);

View File

@ -33,7 +33,6 @@
#include <stddef.h>
#include <string.h>
#define HCI_GET_CMD_BUF(paramlen) ((BT_HDR *)osi_malloc(HCIC_PREAMBLE_SIZE + sizeof(BT_HDR) + paramlen))
#if (BLE_50_FEATURE_SUPPORT == TRUE)
static BlE_SYNC ble_sync_info;
@ -557,19 +556,17 @@ BOOLEAN btsnd_hcic_ble_encrypt (UINT8 *key, UINT8 key_len,
BT_HDR *p;
UINT8 *pp;
if ((p = HCI_GET_CMD_BUF(sizeof (void *) +
HCIC_PARAM_SIZE_BLE_ENCRYPT)) == NULL) {
if ((p = HCI_GET_CMD_BUF(HCIC_PARAM_SIZE_BLE_ENCRYPT)) == NULL) {
return (FALSE);
}
pp = (UINT8 *)(p + 1);
p->len = HCIC_PREAMBLE_SIZE + HCIC_PARAM_SIZE_BLE_ENCRYPT;
p->offset = sizeof(void *);
*((void **)pp) = p_cmd_cplt_cback; /* Store command complete callback in buffer */
pp += sizeof(void *); /* Skip over callback pointer */
p->offset = 0;
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(p);
metadata->context = p_cmd_cplt_cback;
UINT16_TO_STREAM (pp, HCI_BLE_ENCRYPT);
UINT8_TO_STREAM (pp, HCIC_PARAM_SIZE_BLE_ENCRYPT);
@ -596,18 +593,17 @@ BOOLEAN btsnd_hcic_ble_rand (void *p_cmd_cplt_cback)
BT_HDR *p;
UINT8 *pp;
if ((p = HCI_GET_CMD_BUF(sizeof (void *) +
HCIC_PARAM_SIZE_BLE_RAND)) == NULL) {
if ((p = HCI_GET_CMD_BUF(HCIC_PARAM_SIZE_BLE_RAND)) == NULL) {
return (FALSE);
}
pp = (UINT8 *)(p + 1);
p->len = HCIC_PREAMBLE_SIZE + HCIC_PARAM_SIZE_BLE_RAND;
p->offset = sizeof(void *);
p->offset = 0;
*((void **)pp) = p_cmd_cplt_cback; /* Store command complete callback in buffer */
pp += sizeof(void *); /* Skip over callback pointer */
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(p);
metadata->context = p_cmd_cplt_cback;
UINT16_TO_STREAM (pp, HCI_BLE_RAND);
UINT8_TO_STREAM (pp, HCIC_PARAM_SIZE_BLE_RAND);
@ -1032,15 +1028,26 @@ BOOLEAN btsnd_hcic_ble_set_data_length(UINT16 conn_handle, UINT16 tx_octets, UIN
return TRUE;
}
BOOLEAN btsnd_hcic_ble_update_adv_report_flow_control (UINT16 num)
BOOLEAN btsnd_hcic_ble_update_adv_report_flow_control (UINT16 num, BT_HDR *static_buf)
{
BT_HDR *p;
UINT8 *pp;
if ((p = HCI_GET_CMD_BUF (HCIC_PARAM_SIZE_BLE_UPDATE_ADV_FLOW_CONTROL)) == NULL) {
return (FALSE);
if (static_buf != NULL) {
p = static_buf;
} else {
if ((p = HCI_GET_CMD_BUF (HCIC_PARAM_SIZE_BLE_UPDATE_ADV_FLOW_CONTROL)) == NULL) {
return (FALSE);
}
}
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(p);
metadata->flags_src = HCI_CMD_MSG_F_SRC_NOACK;
if (static_buf == p) {
assert(metadata->command_free_cb != NULL);
}
p->layer_specific = HCI_CMD_BUF_TYPE_METADATA;
pp = (UINT8 *)(p + 1);
p->len = HCIC_PREAMBLE_SIZE + HCIC_PARAM_SIZE_BLE_UPDATE_ADV_FLOW_CONTROL;

View File

@ -35,8 +35,6 @@
#include "btm_int.h" /* Included for UIPC_* macro definitions */
#define HCI_GET_CMD_BUF(paramlen) ((BT_HDR *)osi_malloc(HCIC_PREAMBLE_SIZE + sizeof(BT_HDR) + paramlen))
BOOLEAN btsnd_hcic_inquiry(const LAP inq_lap, UINT8 duration, UINT8 response_cnt)
{
BT_HDR *p;
@ -1331,6 +1329,9 @@ BOOLEAN btsnd_hcic_host_num_xmitted_pkts (UINT8 num_handles, UINT16 *handle,
p->len = HCIC_PREAMBLE_SIZE + 1 + (num_handles * 4);
p->offset = 0;
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(p);
metadata->flags_src |= HCI_CMD_MSG_F_SRC_NOACK;
UINT16_TO_STREAM (pp, HCI_HOST_NUM_PACKETS_DONE);
UINT8_TO_STREAM (pp, p->len - HCIC_PREAMBLE_SIZE);
@ -1431,9 +1432,13 @@ BOOLEAN btsnd_hcic_sniff_sub_rate(UINT16 handle, UINT16 max_lat,
#endif /* BTM_SSR_INCLUDED */
/**** Extended Inquiry Response Commands ****/
void btsnd_hcic_write_ext_inquiry_response (void *buffer, UINT8 fec_req)
void btsnd_hcic_write_ext_inquiry_response (BT_HDR *buffer, UINT8 fec_req)
{
BT_HDR *p = (BT_HDR *)buffer;
BT_HDR *p;
if ((p = HCI_GET_CMD_BUF(HCIC_PARAM_SIZE_EXT_INQ_RESP)) == NULL) {
return;
}
UINT8 *pp = (UINT8 *)(p + 1);
p->len = HCIC_PREAMBLE_SIZE + HCIC_PARAM_SIZE_EXT_INQ_RESP;
@ -1441,9 +1446,10 @@ void btsnd_hcic_write_ext_inquiry_response (void *buffer, UINT8 fec_req)
UINT16_TO_STREAM (pp, HCI_WRITE_EXT_INQ_RESPONSE);
UINT8_TO_STREAM (pp, HCIC_PARAM_SIZE_EXT_INQ_RESP);
UINT8_TO_STREAM (pp, fec_req);
memcpy(pp, buffer->data + 4, p->len - 4);
btu_hcif_send_cmd (LOCAL_BR_EDR_CONTROLLER_ID, p);
}
@ -1862,17 +1868,17 @@ BOOLEAN btsnd_hcic_write_pagescan_type (UINT8 type)
#error "HCI_CMD_POOL_BUF_SIZE must be larger than 268"
#endif
void btsnd_hcic_vendor_spec_cmd (void *buffer, UINT16 opcode, UINT8 len,
void btsnd_hcic_vendor_spec_cmd (BT_HDR *buffer, UINT16 opcode, UINT8 len,
UINT8 *p_data, void *p_cmd_cplt_cback)
{
BT_HDR *p = (BT_HDR *)buffer;
BT_HDR *p = buffer;
UINT8 *pp = (UINT8 *)(p + 1);
p->len = HCIC_PREAMBLE_SIZE + len;
p->offset = sizeof(void *);
p->offset = 0;
*((void **)pp) = p_cmd_cplt_cback; /* Store command complete callback in buffer */
pp += sizeof(void *); /* Skip over callback pointer */
hci_cmd_metadata_t * metadata = HCI_GET_CMD_METAMSG(p);
metadata->context = p_cmd_cplt_cback;
UINT16_TO_STREAM (pp, HCI_GRP_VENDOR_SPECIFIC | opcode);
UINT8_TO_STREAM (pp, len);

View File

@ -175,6 +175,7 @@ typedef enum {
SIG_BTU_GENERAL_ALARM,
SIG_BTU_ONESHOT_ALARM,
SIG_BTU_L2CAP_ALARM,
SIG_BTU_HCI_ADV_RPT_MSG,
SIG_BTU_NUM,
} SIG_BTU_t;
@ -297,6 +298,7 @@ bool btu_task_post(uint32_t sig, void *param, uint32_t timeout);
int get_btu_work_queue_size(void);
osi_thread_t *btu_get_current_thread(void);
/*
#ifdef __cplusplus
}

View File

@ -19,9 +19,66 @@
#ifndef HCIMSGS_H
#define HCIMSGS_H
#include <stddef.h>
#include "common/bt_target.h"
#include "stack/hcidefs.h"
#include "stack/bt_types.h"
#include "osi/pkt_queue.h"
#include "osi/allocator.h"
#define HCI_CMD_BUF_TYPE_METADATA (0xa56e)
#define HCI_CMD_MSG_F_SRC_NOACK (0x01)
typedef void (*hci_cmd_cmpl_cb)(BT_HDR *response, void *context);
typedef void (*hci_cmd_stat_cb)(uint8_t status, BT_HDR *command, void *context);
typedef void (*hci_cmd_free_cb)(pkt_linked_item_t *linked_pkt);
typedef struct {
uint8_t flags_src;
uint8_t flags_vnd; // used for downstream layer
uint16_t opcode;
hci_cmd_cmpl_cb command_complete_cb;
hci_cmd_stat_cb command_status_cb;
void *context;
void *complete_future;
hci_cmd_free_cb command_free_cb;
BT_HDR command;
} hci_cmd_metadata_t;
#define HCI_CMD_METADATA_HDR_SIZE (sizeof(hci_cmd_metadata_t))
#define HCI_CMD_LINKED_BUF_SIZE(paramlen) (BT_PKT_LINKED_HDR_SIZE + HCI_CMD_METADATA_HDR_SIZE + HCIC_PREAMBLE_SIZE + (paramlen))
#define HCI_GET_CMD_METAMSG(cmd_ptr) (hci_cmd_metadata_t *)((void *)(cmd_ptr) - offsetof(hci_cmd_metadata_t, command))
#define HCI_GET_CMD_LINKED_STRUCT(metadata_ptr) (pkt_linked_item_t *)((void *)(metadata_ptr) - offsetof(pkt_linked_item_t, data))
static inline BT_HDR *hci_get_cmd_buf(size_t param_len)
{
pkt_linked_item_t *linked_pkt = osi_calloc(HCI_CMD_LINKED_BUF_SIZE(param_len));
if (linked_pkt == NULL) {
return NULL;
}
hci_cmd_metadata_t *metadata = (hci_cmd_metadata_t *)linked_pkt->data;
BT_HDR *command = &metadata->command;
command->layer_specific = HCI_CMD_BUF_TYPE_METADATA;
command->len = HCIC_PREAMBLE_SIZE + param_len;
command->offset = 0;
return command;
}
static inline void hci_free_cmd_buf(BT_HDR *buf)
{
assert(buf->layer_specific == HCI_CMD_BUF_TYPE_METADATA);
hci_cmd_metadata_t *metadata = HCI_GET_CMD_METAMSG(buf);
pkt_linked_item_t *linked_pkt = HCI_GET_CMD_LINKED_STRUCT(metadata);
osi_free(linked_pkt);
}
#define HCI_GET_CMD_BUF(param_len) hci_get_cmd_buf(param_len)
#define HCI_FREE_CMD_BUF(buf) hci_free_cmd_buf(buf)
void bte_main_hci_send(BT_HDR *p_msg, UINT16 event);
void bte_main_lpm_allow_bt_device_sleep(void);
@ -378,7 +435,7 @@ BOOLEAN btsnd_hcic_sniff_sub_rate(UINT16 handle, UINT16 max_lat,
#endif /* BTM_SSR_INCLUDED */
/* Extended Inquiry Response */
void btsnd_hcic_write_ext_inquiry_response(void *buffer, UINT8 fec_req);
void btsnd_hcic_write_ext_inquiry_response(BT_HDR *buffer, UINT8 fec_req);
#define HCIC_PARAM_SIZE_EXT_INQ_RESP 241
@ -641,7 +698,7 @@ BOOLEAN btsnd_hcic_write_inquiry_mode(UINT8 type); /* Write Inquiry
#define HCID_GET_SCO_LEN(p) (*((UINT8 *)((p) + 1) + p->offset + 2))
void btsnd_hcic_vendor_spec_cmd (void *buffer, UINT16 opcode,
void btsnd_hcic_vendor_spec_cmd (BT_HDR *buffer, UINT16 opcode,
UINT8 len, UINT8 *p_data,
void *p_cmd_cplt_cback);
@ -882,7 +939,7 @@ BOOLEAN btsnd_hcic_read_authenticated_payload_tout(UINT16 handle);
BOOLEAN btsnd_hcic_write_authenticated_payload_tout(UINT16 handle,
UINT16 timeout);
BOOLEAN btsnd_hcic_ble_update_adv_report_flow_control (UINT16 num);
BOOLEAN btsnd_hcic_ble_update_adv_report_flow_control (UINT16 num, BT_HDR *static_buf);
#if (BLE_50_FEATURE_SUPPORT == TRUE)
BOOLEAN btsnd_hcic_ble_read_phy(UINT16 conn_handle);