Merge branch 'bugfix/spp_50005_crash' into 'master'

bt: Fixed L2CAP ERTM tx window size being too large causing memory exhaustion in case of multiple SPP connections

Closes BT-2411

See merge request espressif/esp-idf!20228
This commit is contained in:
Wang Meng Yang 2022-10-28 17:12:44 +08:00
commit d938cb7e28
11 changed files with 231 additions and 130 deletions

View File

@ -61,6 +61,15 @@ config BT_SPP_ENABLED
help
This enables the Serial Port Profile
config BT_SPP_SEND_BUF_DEFAULT
int "SPP default send buffer size"
depends on BT_SPP_ENABLED
range 100 10000
default 4000
help
Sets the default send buffer size for new SPP channels. Setting a smaller
default SNDBUF size can save some memory, but may decrease performance.
config BT_L2CAP_ENABLED
bool "BT L2CAP"
depends on BT_CLASSIC_ENABLED

View File

@ -53,6 +53,7 @@ typedef UINT8 tBTA_JV_STATUS;
#define BTA_JV_MAX_L2C_CONN GAP_MAX_CONNECTIONS /* GAP handle is used as index, hence do not change this value */
#define BTA_JV_MAX_SCN PORT_MAX_RFC_PORTS /* same as BTM_MAX_SCN (in btm_int.h) */
#define BTA_JV_MAX_RFC_CONN MAX_RFC_PORTS
#define BTA_JV_MAX_CREDIT_NUM PORT_RX_BUF_HIGH_WM
#ifndef BTA_JV_DEF_RFC_MTU
#define BTA_JV_DEF_RFC_MTU (3*330)
@ -286,6 +287,7 @@ typedef struct {
/* data associated with BTA_JV_RFCOMM_OPEN_EVT */
typedef struct {
tBTA_JV_STATUS status; /* Whether the operation succeeded or failed. */
UINT16 peer_mtu; /* Max MTU that port can send */
UINT32 handle; /* The connection handle */
BD_ADDR rem_bda; /* The peer address */
} tBTA_JV_RFCOMM_OPEN;
@ -293,6 +295,7 @@ typedef struct {
/* data associated with BTA_JV_RFCOMM_SRV_OPEN_EVT */
typedef struct {
tBTA_JV_STATUS status; /* Whether the operation succeeded or failed. */
UINT16 peer_mtu; /* Max MTU that port can send */
UINT32 handle; /* The connection handle */
UINT32 new_listen_handle; /* The new listen handle */
BD_ADDR rem_bda; /* The peer address */

View File

@ -1685,6 +1685,7 @@ static void bta_jv_port_mgmt_cl_cback(UINT32 code, UINT16 port_handle, void* dat
BD_ADDR rem_bda = {0};
UINT16 lcid;
tBTA_JV_RFCOMM_CBACK *p_cback; /* the callback function */
tPORT_MGMT_CL_CALLBACK_ARG *p_mgmt_cb_arg = (tPORT_MGMT_CL_CALLBACK_ARG *)data;
APPL_TRACE_DEBUG( "bta_jv_port_mgmt_cl_cback:code:%d, port_handle%d", code, port_handle);
if (NULL == p_cb || NULL == p_cb->p_cback) {
@ -1701,6 +1702,9 @@ static void bta_jv_port_mgmt_cl_cback(UINT32 code, UINT16 port_handle, void* dat
evt_data.rfc_open.status = BTA_JV_SUCCESS;
bdcpy(evt_data.rfc_open.rem_bda, rem_bda);
p_pcb->state = BTA_JV_ST_CL_OPEN;
if (p_mgmt_cb_arg) {
evt_data.rfc_open.peer_mtu = p_mgmt_cb_arg->peer_mtu;
}
p_cb->p_cback(BTA_JV_RFCOMM_OPEN_EVT, &evt_data, p_pcb->user_data);
} else {
evt_data.rfc_close.handle = p_pcb->handle;
@ -1981,6 +1985,7 @@ static void bta_jv_port_mgmt_sr_cback(UINT32 code, UINT16 port_handle, void *dat
}
evt_data.rfc_srv_open.handle = p_pcb->handle;
evt_data.rfc_srv_open.status = BTA_JV_SUCCESS;
evt_data.rfc_srv_open.peer_mtu = p_mgmt_cb_arg->peer_mtu;
bdcpy(evt_data.rfc_srv_open.rem_bda, rem_bda);
tBTA_JV_PCB *p_pcb_new_listen = bta_jv_add_rfc_port(p_cb, p_pcb);
if (p_pcb_new_listen) {

View File

@ -18,8 +18,6 @@
#define ESP_SPP_MAX_SESSION BTA_JV_MAX_RFC_SR_SESSION
#define ESP_SPP_SERVER_NAME_MAX 32
#define ESP_SPP_RINGBUF_SIZE 1000
#define BTC_SPP_INVALID_SCN 0x00
typedef enum {

View File

@ -49,11 +49,14 @@ typedef struct {
typedef struct {
bool connected;
bool is_server;
bool is_writing;
uint8_t serial;
uint8_t scn;
uint8_t max_session;
uint16_t mtu;
uint16_t credit_rx;
uint16_t write_data_len;
uint32_t id;
uint32_t mtu;//unused
uint32_t sdp_handle;
uint32_t rfc_handle;
uint32_t rfc_port_handle;
@ -64,7 +67,10 @@ typedef struct {
esp_spp_sec_t security;
esp_bd_addr_t addr;
slot_data_t rx;
slot_data_t tx;
union {
slot_data_t tx;
RingbufHandle_t ringbuf_write;
};
uint8_t service_uuid[16];
char service_name[ESP_SPP_SERVER_NAME_MAX + 1];
} spp_slot_t;
@ -133,7 +139,11 @@ static spp_slot_t *spp_malloc_slot(void)
(*slot)->fd = -1;
(*slot)->connected = false;
(*slot)->is_server = false;
(*slot)->mtu = 0;
(*slot)->credit_rx = BTA_JV_MAX_CREDIT_NUM;
(*slot)->write_data = NULL;
(*slot)->write_data_len = 0;
(*slot)->is_writing = false;
(*slot)->close_alarm = NULL;
/* clear the old event bits */
if (spp_local_param.tx_event_group) {
@ -145,12 +155,18 @@ static spp_slot_t *spp_malloc_slot(void)
err_no = 1;
goto err;
}
if (init_slot_data(&(*slot)->tx, SLOT_TX_QUEUE_SIZE)) {
BTC_TRACE_ERROR("%s unable to malloc tx queue!", __func__);
err_no = 2;
goto err;
}
if (spp_local_param.spp_mode == ESP_SPP_MODE_VFS) {
if (spp_local_param.spp_mode == ESP_SPP_MODE_CB) {
if (init_slot_data(&(*slot)->tx, SLOT_TX_QUEUE_SIZE)) {
BTC_TRACE_ERROR("%s unable to malloc tx queue!", __func__);
err_no = 2;
goto err;
}
} else {
if (((*slot)->ringbuf_write = xRingbufferCreate(BTC_SPP_SEND_BUF_DEFAULT, RINGBUF_TYPE_BYTEBUF)) == NULL) {
BTC_TRACE_ERROR("%s write ringbuffer create error!", __func__);
err_no = 2;
goto err;
}
if (esp_vfs_register_fd(spp_local_param.spp_vfs_id, &(*slot)->fd) != ESP_OK) {
BTC_TRACE_ERROR("%s unable to register fd!", __func__);
err_no = 3;
@ -165,7 +181,9 @@ static spp_slot_t *spp_malloc_slot(void)
err:
switch (err_no) {
case 3:
free_slot_data(&(*slot)->tx);
if (spp_local_param.spp_mode == ESP_SPP_MODE_VFS) {
vRingbufferDelete((*slot)->ringbuf_write);
}
case 2:
free_slot_data(&(*slot)->rx);
case 1:
@ -238,7 +256,7 @@ static void close_timeout_handler(void *arg)
status = btc_transfer_context(&msg, arg, sizeof(tBTA_JV), NULL);
if (arg) {
free(arg);
osi_free(arg);
}
if (status != BT_STATUS_SUCCESS) {
@ -255,8 +273,10 @@ static void spp_free_slot(spp_slot_t *slot)
if (spp_local_param.spp_mode == ESP_SPP_MODE_VFS) {
(void) esp_vfs_unregister_fd(spp_local_param.spp_vfs_id, slot->fd);
xEventGroupSetBits(spp_local_param.tx_event_group, SLOT_CLOSE_BIT(slot->serial));
vRingbufferDelete(slot->ringbuf_write);
} else {
free_slot_data(&slot->tx);
}
free_slot_data(&slot->tx);
free_slot_data(&slot->rx);
if (slot->close_alarm) {
osi_alarm_free(slot->close_alarm);
@ -333,6 +353,7 @@ static void *btc_spp_rfcomm_inter_cb(tBTA_JV_EVT event, tBTA_JV *p_data, void *u
slot_new->max_session = slot->max_session;
strcpy(slot_new->service_name, slot->service_name);
slot_new->sdp_handle = slot->sdp_handle;
slot_new->mtu = p_data->rfc_srv_open.peer_mtu;
slot_new->rfc_handle = p_data->rfc_srv_open.handle;
slot_new->rfc_port_handle = BTA_JvRfcommGetPortHdl(slot_new->rfc_handle);
BTA_JvSetPmProfile(p_data->rfc_srv_open.handle, BTA_JV_PM_ALL, BTA_JV_CONN_OPEN);
@ -375,6 +396,7 @@ static void *btc_spp_rfcomm_inter_cb(tBTA_JV_EVT event, tBTA_JV *p_data, void *u
}
slot->connected = true;
slot->rfc_handle = p_data->rfc_open.handle;
slot->mtu = p_data->rfc_open.peer_mtu;
slot->rfc_port_handle = BTA_JvRfcommGetPortHdl(p_data->rfc_open.handle);
BTA_JvSetPmProfile(p_data->rfc_open.handle, BTA_JV_PM_ID_1, BTA_JV_CONN_OPEN);
break;
@ -731,7 +753,7 @@ static void btc_spp_stop_srv(btc_spp_args_t *arg)
esp_spp_status_t ret = ESP_SPP_SUCCESS;
bool is_remove_all = false;
uint8_t i, j, srv_cnt = 0;
uint8_t *srv_scn_arr = osi_malloc(MAX_RFC_PORTS);
uint8_t *srv_scn_arr = NULL;
if (arg->stop_srv.scn == BTC_SPP_INVALID_SCN) {
is_remove_all = true;
}
@ -742,6 +764,7 @@ static void btc_spp_stop_srv(btc_spp_args_t *arg)
ret = ESP_SPP_NEED_INIT;
break;
}
srv_scn_arr = osi_malloc(MAX_RFC_PORTS);
if (srv_scn_arr == NULL) {
BTC_TRACE_ERROR("%s malloc srv_scn_arr failed\n", __func__);
ret = ESP_SPP_NO_RESOURCE;
@ -834,6 +857,7 @@ static void btc_spp_stop_srv(btc_spp_args_t *arg)
static void btc_spp_write(btc_spp_args_t *arg)
{
esp_spp_status_t ret = ESP_SPP_SUCCESS;
do {
if (!is_spp_init()) {
BTC_TRACE_ERROR("%s SPP have not been init\n", __func__);
@ -854,18 +878,23 @@ static void btc_spp_write(btc_spp_args_t *arg)
break;
}
if (spp_local_param.spp_mode == ESP_SPP_MODE_VFS) {
BT_HDR *p_buf;
if ((p_buf = fixed_queue_try_peek_first(slot->tx.queue)) != NULL && p_buf->layer_specific == 0) {
p_buf->event++;
p_buf->layer_specific = 1;
BTA_JvRfcommWrite(arg->write.handle, slot->id, p_buf->len, p_buf->data + p_buf->offset);
if (slot->is_writing) {
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
return;
}
size_t item_size = 0;
uint8_t *data = xRingbufferReceiveUpTo(slot->ringbuf_write, &item_size, 0, slot->mtu);
if (item_size > 0) {
slot->write_data = data;
slot->write_data_len = item_size;
slot->is_writing = true;
BTA_JvRfcommWrite(arg->write.handle, slot->id, item_size, data);
}
} else {
if (fixed_queue_enqueue(slot->tx.queue, arg->write.p_data, 0)) {
BTA_JvRfcommWrite(arg->write.handle, slot->id, arg->write.len, arg->write.p_data);
} else {
ret = ESP_SPP_NO_RESOURCE;
break;
}
}
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
@ -1063,37 +1092,44 @@ void btc_spp_cb_handler(btc_msg_t *msg)
}
} else {
if (slot) {
BT_HDR *p_buf;
size_t item_size = 0;
size_t items_waiting = 0;
serial = slot->serial;
if ((p_buf = fixed_queue_try_peek_first(slot->tx.queue)) == NULL) {
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
break;
}
if (p_data->rfc_write.status == BTA_JV_SUCCESS) {
p_buf->len -= p_data->rfc_write.len;
p_buf->offset += p_data->rfc_write.len;
p_buf->layer_specific = 0;
if (p_buf->len == 0) {
osi_free(fixed_queue_dequeue(slot->tx.queue, FIXED_QUEUE_MAX_TIMEOUT));
if (fixed_queue_length(slot->tx.queue) <= SLOT_TX_QUEUE_LOW_WM) {
xEventGroupSetBits(spp_local_param.tx_event_group, SLOT_WRITE_BIT(serial));
}
vRingbufferReturnItem(slot->ringbuf_write,slot->write_data);
slot->write_data = NULL;
slot->is_writing = false;
slot->write_data_len = 0;
vRingbufferGetInfo(slot->ringbuf_write, NULL, NULL, NULL, NULL, &items_waiting);
if (BTC_SPP_SEND_BUF_DEFAULT > items_waiting) {
xEventGroupSetBits(spp_local_param.tx_event_group, SLOT_WRITE_BIT(serial));
}
if ((p_buf = fixed_queue_try_peek_first(slot->tx.queue)) != NULL && p_buf->layer_specific == 0 &&
!p_data->rfc_write.cong) {
p_buf->layer_specific = 1;
p_buf->event++;
BTA_JvRfcommWrite(p_data->rfc_write.handle, slot->id, p_buf->len, p_buf->data + p_buf->offset);
if (items_waiting == 0) {
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
break;
}
if (!p_data->rfc_write.cong) {
uint8_t *data = xRingbufferReceiveUpTo(slot->ringbuf_write, &item_size, 0, slot->mtu);
if (item_size > 0) {
slot->write_data = data;
slot->write_data_len = item_size;
slot->is_writing = true;
BTA_JvRfcommWrite(p_data->rfc_write.handle, slot->id, item_size, data);
}
}
} else {
if (!p_data->rfc_write.old_cong) {
// PORT_WriteDataCO failed
BTC_TRACE_ERROR("PORT_WriteDataCO failed p_buf:%p, handle:%d\n", p_buf,
BTC_TRACE_ERROR("PORT_WriteDataCO failed p_buf:%p, handle:%d\n", slot->write_data,
p_data->rfc_write.handle);
} else {
// need rewrite
p_buf->layer_specific = 0;
if (!p_data->rfc_write.cong && slot->connected) {
slot->is_writing = true;
BTA_JvRfcommWrite(p_data->rfc_write.handle, slot->id, slot->write_data_len, slot->write_data);
} else {
slot->is_writing = false;
}
}
}
}
@ -1185,12 +1221,19 @@ void btc_spp_cb_handler(btc_msg_t *msg)
BTC_TRACE_ERROR("%s unable to find RFCOMM slot, event:%d!", __func__, event);
break;
}
if (!p_data->rfc_cong.cong) {
BT_HDR *p_buf;
if ((p_buf = fixed_queue_try_peek_first(slot->tx.queue)) != NULL && p_buf->layer_specific == 0) {
p_buf->event++;
p_buf->layer_specific = 1;
BTA_JvRfcommWrite(p_data->rfc_cong.handle, slot->id, p_buf->len, p_buf->data + p_buf->offset);
if (!p_data->rfc_cong.cong && !slot->is_writing) {
if (slot->write_data == NULL && slot->write_data_len == 0) {
size_t item_size = 0;
uint8_t *data = xRingbufferReceiveUpTo(slot->ringbuf_write, &item_size, 0, slot->mtu);
if (item_size > 0) {
slot->write_data = data;
slot->write_data_len = item_size;
slot->is_writing = true;
BTA_JvRfcommWrite(p_data->rfc_cong.handle, slot->id, item_size, data);
}
} else {
slot->is_writing = true;
BTA_JvRfcommWrite(p_data->rfc_cong.handle, slot->id, slot->write_data_len, slot->write_data);
}
}
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
@ -1236,6 +1279,9 @@ void btc_spp_cb_handler(btc_msg_t *msg)
}
}
if (count != 0) {
osi_mutex_lock(&spp_local_param.spp_slot_mutex, OSI_MUTEX_MAX_TIMEOUT);
slot->credit_rx += count;
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
BTA_JvRfcommFlowControl(p_data->data_ind.handle, count);
BTC_TRACE_DEBUG("%s give credits:%d\n", __func__, count);
}
@ -1301,16 +1347,17 @@ int bta_co_rfc_data_incoming(void *user_data, BT_HDR *p_buf)
BTC_TRACE_DEBUG("%s data post! %d, %d", __func__, slot->rfc_handle, rx_len);
status = btc_transfer_context(&msg, &p_data, sizeof(tBTA_JV), NULL);
assert(status == BT_STATUS_SUCCESS);
} else if (fixed_queue_length(slot->rx.queue) > 2) {
BTC_TRACE_DEBUG("%s data post stop! %d %d", __func__, slot->rfc_handle, fixed_queue_length(slot->rx.queue));
ret = 0; // reserved for other flow control
}
} else {
fixed_queue_enqueue(slot->rx.queue, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
if (fixed_queue_length(slot->rx.queue) > 2) {
BTC_TRACE_DEBUG("%s data post stop! %d %d", __func__, slot->rfc_handle, fixed_queue_length(slot->rx.queue));
ret = 0; // reserved for other flow control
}
}
if (--slot->credit_rx == 0) {
BTC_TRACE_DEBUG("%s data post stop! %d %d", __func__, slot->rfc_handle, fixed_queue_length(slot->rx.queue));
ret = 0; // reserved for other flow control
}
if (slot->credit_rx > BTA_JV_MAX_CREDIT_NUM) {
BTC_TRACE_WARNING("%s credit %d", __func__, slot->credit_rx);
assert(0);
}
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
return ret;
@ -1348,7 +1395,6 @@ esp_err_t spp_send_data_to_btc(uint32_t handle, int len, uint8_t *p_data, esp_sp
== BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL);
}
static ssize_t spp_vfs_write(int fd, const void * data, size_t size)
{
assert(data != NULL);
@ -1375,77 +1421,67 @@ static ssize_t spp_vfs_write(int fd, const void * data, size_t size)
serial = slot->serial;
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
ssize_t sent = 0, write_size = 0;
size_t tx_len;
BT_HDR *p_buf = NULL;
bool enqueue_status= false;
ssize_t sent = 0;
size_t items_waiting = 0;
size_t item_size = 0;
EventBits_t tx_event_group_val = 0;
while (1) {
BaseType_t done = false;
while (size) {
tx_event_group_val = 0;
if (size) {
if (p_buf == NULL) {
write_size = size < BTA_JV_DEF_RFC_MTU ? size : BTA_JV_DEF_RFC_MTU;
if ((p_buf = osi_malloc(sizeof(BT_HDR) + write_size)) == NULL) {
BTC_TRACE_ERROR("%s malloc failed!", __func__);
errno = ENOMEM;
sent = -1;
break;
}
p_buf->offset = 0;
p_buf->len = write_size;
p_buf->event = 0; // indicate the p_buf be sent count
p_buf->layer_specific = 0; // indicate the p_buf whether to be sent, 0 - ready to send; 1 - have sent
memcpy((UINT8 *)(p_buf + 1), data + sent, write_size);
}
} else {
break;
}
osi_mutex_lock(&spp_local_param.spp_slot_mutex, OSI_MUTEX_MAX_TIMEOUT);
if ((slot = spp_local_param.spp_slots[serial]) != NULL) {
tx_len = fixed_queue_length(slot->tx.queue);
enqueue_status = fixed_queue_enqueue(slot->tx.queue, p_buf, 0);
if (!enqueue_status) {
BTC_TRACE_DEBUG("%s tx_len:%d, fd:%d\n", __func__, fixed_queue_length(slot->tx.queue), fd);
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
//block untill under water level, be closed or time out
tx_event_group_val =
xEventGroupWaitBits(spp_local_param.tx_event_group, SLOT_WRITE_BIT(serial) | SLOT_CLOSE_BIT(serial), pdTRUE,
pdFALSE, VFS_WRITE_TIMEOUT / portTICK_PERIOD_MS);
if (tx_event_group_val & SLOT_CLOSE_BIT(serial)) {
BTC_TRACE_ERROR("%s exit for RFCOMM close, fd:%d!", __func__, fd);
errno = EPIPE;
sent = -1;
break;
} else if (tx_event_group_val & SLOT_WRITE_BIT(serial)) {
slot = spp_local_param.spp_slots[serial];
if (slot && slot->connected) {
items_waiting = 0;
item_size = 0;
vRingbufferGetInfo(slot->ringbuf_write, NULL, NULL, NULL, NULL, &items_waiting);
if (items_waiting < BTC_SPP_SEND_BUF_DEFAULT) {
if ((BTC_SPP_SEND_BUF_DEFAULT - items_waiting) > size) {
item_size = size;
done = xRingbufferSend(slot->ringbuf_write, (void *)data + sent, item_size, 0);
} else {
item_size = BTC_SPP_SEND_BUF_DEFAULT - items_waiting;
done = xRingbufferSend(slot->ringbuf_write, (void *)data + sent, item_size, 0);
}
if (done) {
sent += item_size;
size -= item_size;
if (slot->write_data == NULL) {
spp_send_data_to_btc(slot->rfc_handle, 0, NULL, ESP_SPP_MODE_VFS);
}
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
continue;
} else if (tx_event_group_val == 0) {
BTC_TRACE_ERROR("%s exit for time out, fd:%d!", __func__, fd);
errno = EBUSY;
sent = -1;
break;
}
}
if (tx_len == 0) {
spp_send_data_to_btc(slot->rfc_handle, 0, NULL, ESP_SPP_MODE_VFS);
BTC_TRACE_DEBUG("%s items_waiting:%d, fd:%d\n", __func__, items_waiting, fd);
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
// block untill under water level, be closed or time out
tx_event_group_val =
xEventGroupWaitBits(spp_local_param.tx_event_group, SLOT_WRITE_BIT(serial) | SLOT_CLOSE_BIT(serial), pdTRUE,
pdFALSE, VFS_WRITE_TIMEOUT / portTICK_PERIOD_MS);
if (tx_event_group_val & SLOT_CLOSE_BIT(serial)) {
BTC_TRACE_ERROR("%s exit for RFCOMM close, fd:%d!", __func__, fd);
errno = EPIPE;
sent = -1;
break;
} else if (tx_event_group_val & SLOT_WRITE_BIT(serial)) {
continue;
} else if (tx_event_group_val == 0) {
BTC_TRACE_ERROR("%s exit for time out, fd:%d!", __func__, fd);
errno = EBUSY;
sent = -1;
break;
}
sent += write_size;
size -= write_size;
p_buf = NULL;
} else {
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
errno = EPIPE;
sent = -1;
break;
}
osi_mutex_unlock(&spp_local_param.spp_slot_mutex);
}
//errors occur, need to cleanup
if (p_buf) {
osi_free(p_buf);
p_buf = NULL;
}
return sent;
}
@ -1549,6 +1585,7 @@ static ssize_t spp_vfs_read(int fd, void * dst, size_t size)
}
if (count > 0) {
osi_mutex_lock(&spp_local_param.spp_slot_mutex, OSI_MUTEX_MAX_TIMEOUT);
slot->credit_rx += count;
if ((slot = spp_local_param.spp_slots[serial]) != NULL) {
BTA_JvRfcommFlowControl(slot->rfc_handle, count);
BTC_TRACE_DEBUG("%s give credits:%d\n", __func__, count);

View File

@ -296,6 +296,12 @@
#define UC_BT_HFP_WBS_ENABLE FALSE
#endif
#ifdef CONFIG_BT_SPP_SEND_BUF_DEFAULT
#define UC_BT_SPP_SEND_BUF_DEFAULT CONFIG_BT_SPP_SEND_BUF_DEFAULT
#else
#define UC_BT_SPP_SEND_BUF_DEFAULT 0
#endif
/**********************************************************
* Memory reference
**********************************************************/

View File

@ -351,6 +351,10 @@
#define SBC_ENC_INCLUDED FALSE
#endif
#ifndef BTC_SPP_SEND_BUF_DEFAULT
#define BTC_SPP_SEND_BUF_DEFAULT UC_BT_SPP_SEND_BUF_DEFAULT
#endif
/******************************************************************************
**
** BTA-layer components
@ -1578,7 +1582,7 @@
/* ERTM Tx window size */
#ifndef RFC_FCR_OPT_TX_WINDOW_SIZE
#define RFC_FCR_OPT_TX_WINDOW_SIZE 20
#define RFC_FCR_OPT_TX_WINDOW_SIZE 10
#endif
/* ERTM Maximum transmissions before disconnecting */

View File

@ -113,8 +113,16 @@ typedef void (tPORT_MGMT_CALLBACK) (UINT32 code, UINT16 port_handle, void* data)
typedef struct {
BOOLEAN accept; /* If upper layer accepts the incoming connection */
BOOLEAN ignore_rfc_state; /* If need to ignore rfc state for PORT_CheckConnection */
UINT16 peer_mtu; /* Max MTU that port can send */
} tPORT_MGMT_SR_CALLBACK_ARG;
/**
* Define the client port manage callback function argument
*/
typedef struct {
UINT16 peer_mtu; /* Max MTU that port can send */
} tPORT_MGMT_CL_CALLBACK_ARG;
/*
** Define events that registered application can receive in the callback
*/

View File

@ -928,6 +928,9 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word)
full_sdus_xmitted++;
}
osi_free(p_tmp);
if (p_ccb->cong_sent) {
l2cu_check_channel_congestion(p_ccb);
}
}
/* If we are still in a wait_ack state, do not mess with the timer */

View File

@ -3613,6 +3613,9 @@ void l2cu_set_acl_hci_header (BT_HDR *p_buf, tL2C_CCB *p_ccb)
void l2cu_check_channel_congestion (tL2C_CCB *p_ccb)
{
size_t q_count = fixed_queue_length(p_ccb->xmit_hold_q);
#if (CLASSIC_BT_INCLUDED == TRUE)
size_t q_waiting_ack_count = fixed_queue_length(p_ccb->fcrb.waiting_for_ack_q);
#endif
#if (L2CAP_UCD_INCLUDED == TRUE)
if ( p_ccb->local_cid == L2CAP_CONNECTIONLESS_CID ) {
@ -3625,7 +3628,11 @@ void l2cu_check_channel_congestion (tL2C_CCB *p_ccb)
/* If this channel was congested */
if ( p_ccb->cong_sent ) {
/* If the channel is not congested now, tell the app */
if (q_count <= (p_ccb->buff_quota / 2)) {
if (q_count <= (p_ccb->buff_quota / 2)
#if (CLASSIC_BT_INCLUDED == TRUE)
&& (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_BASIC_MODE || q_waiting_ack_count < p_ccb->our_cfg.fcr.tx_win_sz)
#endif
) {
p_ccb->cong_sent = FALSE;
if (p_ccb->p_rcb && p_ccb->p_rcb->api.pL2CA_CongestionStatus_Cb) {
L2CAP_TRACE_DEBUG ("L2CAP - Calling CongestionStatus_Cb (FALSE), CID: 0x%04x xmit_hold_q.count: %u buff_quota: %u",
@ -3664,7 +3671,11 @@ void l2cu_check_channel_congestion (tL2C_CCB *p_ccb)
} else {
tL2C_LCB *p_lcb = p_ccb->p_lcb;
/* If this channel was not congested but it is congested now, tell the app */
if (q_count > p_ccb->buff_quota || (p_lcb && (p_lcb->link_xmit_data_q) && (list_length(p_lcb->link_xmit_data_q) + q_count) > p_ccb->buff_quota)) {
if (q_count > p_ccb->buff_quota || (p_lcb && (p_lcb->link_xmit_data_q) && (list_length(p_lcb->link_xmit_data_q) + q_count) > p_ccb->buff_quota)
#if (CLASSIC_BT_INCLUDED == TRUE)
|| (p_ccb->peer_cfg.fcr.mode != L2CAP_FCR_BASIC_MODE && q_waiting_ack_count >= p_ccb->our_cfg.fcr.tx_win_sz)
#endif
) {
p_ccb->cong_sent = TRUE;
if (p_ccb->p_rcb && p_ccb->p_rcb->api.pL2CA_CongestionStatus_Cb) {
L2CAP_TRACE_DEBUG ("L2CAP - Calling CongestionStatus_Cb (TRUE),CID:0x%04x,XmitQ:%u,Quota:%u",

View File

@ -427,9 +427,11 @@ void PORT_ParNegCnf (tRFC_MCB *p_mcb, UINT8 dlci, UINT16 mtu, UINT8 cl, UINT8 k)
void PORT_DlcEstablishInd (tRFC_MCB *p_mcb, UINT8 dlci, UINT16 mtu)
{
tPORT *p_port = port_find_mcb_dlci_port (p_mcb, dlci);
tPORT_MGMT_SR_CALLBACK_ARG mgmt_cb_arg = {
tPORT_MGMT_CL_CALLBACK_ARG cl_mgmt_cb_arg = {0};
tPORT_MGMT_SR_CALLBACK_ARG sr_mgmt_cb_arg = {
.accept = TRUE,
.ignore_rfc_state = FALSE,
.peer_mtu = 0,
};
RFCOMM_TRACE_DEBUG ("PORT_DlcEstablishInd p_mcb:%p, dlci:%d mtu:%di, p_port:%p", p_mcb, dlci, mtu, p_port);
@ -464,22 +466,29 @@ void PORT_DlcEstablishInd (tRFC_MCB *p_mcb, UINT8 dlci, UINT16 mtu)
}
if (p_port->p_mgmt_callback) {
/**
* @note
* 1. The manage callback function may change the value of accept in mgmt_cb_arg.
* 2. Use mgmt_cb_arg.ignore_rfc_state to work around the issue caused by sending
* RFCOMM establish response after the manage callback function.
*/
mgmt_cb_arg.ignore_rfc_state = TRUE;
p_port->p_mgmt_callback (PORT_SUCCESS, p_port->inx, &mgmt_cb_arg);
if (p_port->is_server) {
sr_mgmt_cb_arg.peer_mtu = p_port->peer_mtu;
/**
* @note
* 1. The manage callback function may change the value of accept in mgmt_cb_arg.
* 2. Use mgmt_cb_arg.ignore_rfc_state to work around the issue caused by sending
* RFCOMM establish response after the manage callback function.
*/
sr_mgmt_cb_arg.ignore_rfc_state = TRUE;
p_port->p_mgmt_callback (PORT_SUCCESS, p_port->inx, &sr_mgmt_cb_arg);
if (!sr_mgmt_cb_arg.accept) {
RFCOMM_DlcEstablishRsp(p_mcb, dlci, 0, RFCOMM_LOW_RESOURCES);
return;
}
} else {
cl_mgmt_cb_arg.peer_mtu = p_port->peer_mtu;
p_port->p_mgmt_callback (PORT_SUCCESS, p_port->inx, &cl_mgmt_cb_arg);
}
}
if (mgmt_cb_arg.accept) {
RFCOMM_DlcEstablishRsp(p_mcb, dlci, p_port->mtu, RFCOMM_SUCCESS);
p_port->state = PORT_STATE_OPENED;
} else {
RFCOMM_DlcEstablishRsp(p_mcb, dlci, 0, RFCOMM_LOW_RESOURCES);
}
RFCOMM_DlcEstablishRsp(p_mcb, dlci, p_port->mtu, RFCOMM_SUCCESS);
p_port->state = PORT_STATE_OPENED;
}
@ -496,6 +505,8 @@ void PORT_DlcEstablishInd (tRFC_MCB *p_mcb, UINT8 dlci, UINT16 mtu)
void PORT_DlcEstablishCnf (tRFC_MCB *p_mcb, UINT8 dlci, UINT16 mtu, UINT16 result)
{
tPORT *p_port = port_find_mcb_dlci_port (p_mcb, dlci);
tPORT_MGMT_SR_CALLBACK_ARG sr_mgmt_cb_arg = {0};
tPORT_MGMT_CL_CALLBACK_ARG cl_mgmt_cb_arg = {0};
RFCOMM_TRACE_EVENT ("PORT_DlcEstablishCnf dlci:%d mtu:%d result:%d", dlci, mtu, result);
@ -522,7 +533,13 @@ void PORT_DlcEstablishCnf (tRFC_MCB *p_mcb, UINT8 dlci, UINT16 mtu, UINT16 resul
}
if (p_port->p_mgmt_callback) {
p_port->p_mgmt_callback (PORT_SUCCESS, p_port->inx, NULL);
if (p_port->is_server) {
sr_mgmt_cb_arg.peer_mtu = p_port->peer_mtu;
p_port->p_mgmt_callback (PORT_SUCCESS, p_port->inx, &sr_mgmt_cb_arg);
} else {
cl_mgmt_cb_arg.peer_mtu = p_port->peer_mtu;
p_port->p_mgmt_callback (PORT_SUCCESS, p_port->inx, &cl_mgmt_cb_arg);
}
}
p_port->state = PORT_STATE_OPENED;