Support timeout of fixed_queue and osi_thread

1. remove fixed_queue_try_dequeue and fixed_queue_try_enqueue,
2. add timeout parameter for fixed_queue_enqueue and fixed_queue_dequeue
3. replace where call fixed_queue_try_dequeue/enqueue to fixed_queue_dequeue/enqueue(..., timeout = 0)
4. replace where call fixed queue_enqueue/dequeue to fixed_queue_enqueue/dequeue( ..., timeout = FIXED_QUEUE_MAX_TIMEOUT)
5. modify the blocking_type of osi_thread_post to timeout.
This commit is contained in:
Tian Hao 2019-04-18 11:57:28 +08:00 committed by baohongde
parent 9863565a6f
commit 2ad65bb434
41 changed files with 207 additions and 221 deletions

View file

@ -572,7 +572,7 @@ void bta_sys_sendmsg(void *p_msg)
// there is a procedure in progress that can schedule a task via this
// message queue. This causes |btu_bta_msg_queue| to get cleaned up before
// it gets used here; hence we check for NULL before using it.
if (btu_task_post(SIG_BTU_BTA_MSG, p_msg, OSI_THREAD_BLOCKING) == false) {
if (btu_task_post(SIG_BTU_BTA_MSG, p_msg, OSI_THREAD_MAX_TIMEOUT) == false) {
osi_free(p_msg);
}
}
@ -592,7 +592,7 @@ void bta_alarm_cb(void *data)
assert(data != NULL);
TIMER_LIST_ENT *p_tle = (TIMER_LIST_ENT *)data;
btu_task_post(SIG_BTU_BTA_ALARM, p_tle, OSI_THREAD_BLOCKING);
btu_task_post(SIG_BTU_BTA_ALARM, p_tle, OSI_THREAD_MAX_TIMEOUT);
}
void bta_sys_start_timer(TIMER_LIST_ENT *p_tle, UINT16 type, INT32 timeout_ms)

View file

@ -128,7 +128,7 @@ static void btc_thread_handler(void *arg)
osi_free(msg);
}
static bt_status_t btc_task_post(btc_msg_t *msg, osi_thread_blocking_t blocking)
static bt_status_t btc_task_post(btc_msg_t *msg, uint32_t timeout)
{
btc_msg_t *lmsg;
@ -139,7 +139,7 @@ static bt_status_t btc_task_post(btc_msg_t *msg, osi_thread_blocking_t blocking)
memcpy(lmsg, msg, sizeof(btc_msg_t));
if (osi_thread_post(btc_thread, btc_thread_handler, lmsg, 2, blocking) == false) {
if (osi_thread_post(btc_thread, btc_thread_handler, lmsg, 2, timeout) == false) {
return BT_STATUS_BUSY;
}
@ -171,7 +171,7 @@ bt_status_t btc_transfer_context(btc_msg_t *msg, void *arg, int arg_len, btc_arg
lmsg.arg = NULL;
}
return btc_task_post(&lmsg, OSI_THREAD_BLOCKING);
return btc_task_post(&lmsg, OSI_THREAD_MAX_TIMEOUT);
}

View file

@ -182,7 +182,7 @@ static bool btc_a2dp_sink_ctrl_post(uint32_t sig, void *param)
evt->sig = sig;
evt->param = param;
return osi_thread_post(a2dp_sink_local_param.btc_aa_snk_task_hdl, btc_a2dp_sink_ctrl_handler, evt, 0, OSI_THREAD_BLOCKING);
return osi_thread_post(a2dp_sink_local_param.btc_aa_snk_task_hdl, btc_a2dp_sink_ctrl_handler, evt, 0, OSI_THREAD_MAX_TIMEOUT);
}
static void btc_a2dp_sink_ctrl_handler(void *arg)
@ -322,7 +322,7 @@ void btc_a2dp_sink_on_suspended(tBTA_AV_SUSPEND *p_av)
static void btc_a2dp_sink_data_post(void)
{
osi_thread_post(a2dp_sink_local_param.btc_aa_snk_task_hdl, btc_a2dp_sink_data_ready, NULL, 1, OSI_THREAD_BLOCKING);
osi_thread_post(a2dp_sink_local_param.btc_aa_snk_task_hdl, btc_a2dp_sink_data_ready, NULL, 1, OSI_THREAD_MAX_TIMEOUT);
}
/*******************************************************************************
@ -390,7 +390,7 @@ static void btc_a2dp_sink_data_ready(UNUSED_ATTR void *context)
return;
}
btc_a2dp_sink_handle_inc_media(p_msg);
p_msg = (tBT_SBC_HDR *)fixed_queue_try_dequeue(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ);
p_msg = (tBT_SBC_HDR *)fixed_queue_dequeue(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ, 0);
if ( p_msg == NULL ) {
APPL_TRACE_ERROR("Insufficient data in que ");
break;
@ -695,7 +695,7 @@ UINT8 btc_a2dp_sink_enque_buf(BT_HDR *p_pkt)
memcpy(p_msg, p_pkt, (sizeof(BT_HDR) + p_pkt->offset + p_pkt->len));
p_msg->num_frames_to_be_processed = (*((UINT8 *)(p_msg + 1) + p_msg->offset)) & 0x0f;
APPL_TRACE_VERBOSE("btc_a2dp_sink_enque_buf %d + \n", p_msg->num_frames_to_be_processed);
fixed_queue_enqueue(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ, p_msg);
fixed_queue_enqueue(a2dp_sink_local_param.btc_aa_snk_cb.RxSbcQ, p_msg, FIXED_QUEUE_MAX_TIMEOUT);
btc_a2dp_sink_data_post();
} else {
/* let caller deal with a failed allocation */
@ -721,7 +721,7 @@ static void btc_a2dp_sink_handle_clear_track (void)
static void btc_a2dp_sink_flush_q(fixed_queue_t *p_q)
{
while (! fixed_queue_is_empty(p_q)) {
osi_free(fixed_queue_try_dequeue(p_q));
osi_free(fixed_queue_dequeue(p_q, 0));
}
}

View file

@ -249,7 +249,7 @@ static bool btc_a2dp_source_ctrl_post(uint32_t sig, void *param)
evt->sig = sig;
evt->param = param;
return osi_thread_post(a2dp_source_local_param.btc_aa_src_task_hdl, btc_a2dp_source_ctrl_handler, evt, 0, OSI_THREAD_BLOCKING);
return osi_thread_post(a2dp_source_local_param.btc_aa_src_task_hdl, btc_a2dp_source_ctrl_handler, evt, 0, OSI_THREAD_MAX_TIMEOUT);
}
static void btc_a2dp_source_ctrl_handler(void *arg)
@ -421,7 +421,7 @@ void btc_a2dp_source_on_suspended(tBTA_AV_SUSPEND *p_av)
static void btc_a2dp_source_data_post(void)
{
osi_thread_post(a2dp_source_local_param.btc_aa_src_task_hdl, btc_a2dp_source_handle_timer, NULL, 1, OSI_THREAD_BLOCKING);
osi_thread_post(a2dp_source_local_param.btc_aa_src_task_hdl, btc_a2dp_source_handle_timer, NULL, 1, OSI_THREAD_MAX_TIMEOUT);
}
static UINT64 time_now_us()
@ -510,7 +510,7 @@ BT_HDR *btc_a2dp_source_audio_readbuf(void)
if (btc_a2dp_source_state != BTC_A2DP_SOURCE_STATE_ON){
return NULL;
}
return fixed_queue_try_dequeue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ);
return fixed_queue_dequeue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ, 0);
}
/*******************************************************************************
@ -1378,7 +1378,7 @@ static void btc_media_aa_prep_sbc_2_send(UINT8 nb_frame)
}
/* Enqueue the encoded SBC frame in AA Tx Queue */
fixed_queue_enqueue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ, p_buf);
fixed_queue_enqueue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
} else {
osi_free(p_buf);
}
@ -1407,7 +1407,7 @@ static void btc_a2dp_source_prep_2_send(UINT8 nb_frame)
}
while (fixed_queue_length(a2dp_source_local_param.btc_aa_src_cb.TxAaQ) > (MAX_OUTPUT_A2DP_SRC_FRAME_QUEUE_SZ - nb_frame)) {
osi_free(fixed_queue_try_dequeue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ));
osi_free(fixed_queue_dequeue(a2dp_source_local_param.btc_aa_src_cb.TxAaQ, 0));
}
// Transcode frame
@ -1589,7 +1589,7 @@ static void btc_a2dp_source_aa_stop_tx(void)
static void btc_a2dp_source_flush_q(fixed_queue_t *p_q)
{
while (! fixed_queue_is_empty(p_q)) {
osi_free(fixed_queue_try_dequeue(p_q));
osi_free(fixed_queue_dequeue(p_q, 0));
}
}

View file

@ -170,9 +170,9 @@ static void hci_hal_h4_rx_handler(void *arg)
fixed_queue_process(hci_hal_env.rx_q);
}
bool hci_hal_h4_task_post(osi_thread_blocking_t blocking)
bool hci_hal_h4_task_post(uint32_t timeout)
{
return osi_thread_post(hci_h4_thread, hci_hal_h4_rx_handler, NULL, 1, blocking);
return osi_thread_post(hci_h4_thread, hci_hal_h4_rx_handler, NULL, 1, timeout);
}
#if (C2H_FLOW_CONTROL_INCLUDED == TRUE)
@ -314,7 +314,7 @@ static void event_uart_has_bytes(fixed_queue_t *queue)
{
BT_HDR *packet;
while (!fixed_queue_is_empty(queue)) {
packet = fixed_queue_dequeue(queue);
packet = fixed_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
hci_hal_h4_hdl_rx_packet(packet);
}
}
@ -323,7 +323,7 @@ static void host_send_pkt_available_cb(void)
{
//Controller rx cache buffer is ready for receiving new host packet
//Just Call Host main thread task to process pending packets.
hci_host_task_post(OSI_THREAD_BLOCKING);
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
}
static int host_recv_pkt_cb(uint8_t *data, uint16_t len)
@ -347,8 +347,8 @@ static int host_recv_pkt_cb(uint8_t *data, uint16_t len)
pkt->len = len;
pkt->layer_specific = 0;
memcpy(pkt->data, data, len);
fixed_queue_enqueue(hci_hal_env.rx_q, pkt);
hci_hal_h4_task_post(OSI_THREAD_NON_BLOCKING);
fixed_queue_enqueue(hci_hal_env.rx_q, pkt, FIXED_QUEUE_MAX_TIMEOUT);
hci_hal_h4_task_post(0);
BTTRC_DUMP_BUFFER("Recv Pkt", pkt->data, len);

View file

@ -135,9 +135,9 @@ void hci_shut_down(void)
}
bool hci_host_task_post(osi_thread_blocking_t blocking)
bool hci_host_task_post(uint32_t timeout)
{
return osi_thread_post(hci_host_thread, hci_host_thread_handler, NULL, 0, blocking);
return osi_thread_post(hci_host_thread, hci_host_thread_handler, NULL, 0, timeout);
}
static int hci_layer_init_env(void)
@ -252,8 +252,8 @@ static void transmit_command(
HCI_TRACE_DEBUG("HCI Enqueue Comamnd opcode=0x%x\n", wait_entry->opcode);
BTTRC_DUMP_BUFFER(NULL, command->data + command->offset, command->len);
fixed_queue_enqueue(hci_host_env.command_queue, wait_entry);
hci_host_task_post(OSI_THREAD_BLOCKING);
fixed_queue_enqueue(hci_host_env.command_queue, wait_entry, FIXED_QUEUE_MAX_TIMEOUT);
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
}
@ -273,8 +273,8 @@ static future_t *transmit_command_futured(BT_HDR *command)
// in case the upper layer didn't already
command->event = MSG_STACK_TO_HC_HCI_CMD;
fixed_queue_enqueue(hci_host_env.command_queue, wait_entry);
hci_host_task_post(OSI_THREAD_BLOCKING);
fixed_queue_enqueue(hci_host_env.command_queue, wait_entry, FIXED_QUEUE_MAX_TIMEOUT);
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
return future;
}
@ -284,10 +284,10 @@ static void transmit_downward(uint16_t type, void *data)
transmit_command((BT_HDR *)data, NULL, NULL, NULL);
HCI_TRACE_WARNING("%s legacy transmit of command. Use transmit_command instead.\n", __func__);
} else {
fixed_queue_enqueue(hci_host_env.packet_queue, data);
fixed_queue_enqueue(hci_host_env.packet_queue, data, FIXED_QUEUE_MAX_TIMEOUT);
}
hci_host_task_post(OSI_THREAD_BLOCKING);
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
}
@ -297,7 +297,7 @@ static void event_command_ready(fixed_queue_t *queue)
waiting_command_t *wait_entry = NULL;
command_waiting_response_t *cmd_wait_q = &hci_host_env.cmd_waiting_q;
wait_entry = fixed_queue_dequeue(queue);
wait_entry = fixed_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
if(wait_entry->opcode == HCI_HOST_NUM_PACKETS_DONE
#if (BLE_ADV_REPORT_FLOW_CONTROL == TRUE)
@ -323,7 +323,7 @@ static void event_command_ready(fixed_queue_t *queue)
static void event_packet_ready(fixed_queue_t *queue)
{
BT_HDR *packet = (BT_HDR *)fixed_queue_dequeue(queue);
BT_HDR *packet = (BT_HDR *)fixed_queue_dequeue(queue, FIXED_QUEUE_MAX_TIMEOUT);
// The queue may be the command queue or the packet queue, we don't care
packet_fragmenter->fragment_and_dispatch(packet);
@ -461,7 +461,7 @@ intercepted:
/*Tell HCI Host Task to continue TX Pending commands*/
if (hci_host_env.command_credits &&
!fixed_queue_is_empty(hci_host_env.command_queue)) {
hci_host_task_post(OSI_THREAD_BLOCKING);
hci_host_task_post(OSI_THREAD_MAX_TIMEOUT);
}
if (wait_entry) {
@ -489,7 +489,7 @@ static void dispatch_reassembled(BT_HDR *packet)
{
// Events should already have been dispatched before this point
//Tell Up-layer received packet.
if (btu_task_post(SIG_BTU_HCI_MSG, packet, OSI_THREAD_BLOCKING) == false) {
if (btu_task_post(SIG_BTU_HCI_MSG, packet, OSI_THREAD_MAX_TIMEOUT) == false) {
osi_free(packet);
}
}

View file

@ -97,6 +97,6 @@ const hci_t *hci_layer_get_interface();
int hci_start_up(void);
void hci_shut_down(void);
bool hci_host_task_post(osi_thread_blocking_t blocking);
bool hci_host_task_post(uint32_t timeout);
#endif /* _HCI_LAYER_H_ */

View file

@ -129,45 +129,12 @@ size_t fixed_queue_capacity(fixed_queue_t *queue)
return queue->capacity;
}
void fixed_queue_enqueue(fixed_queue_t *queue, void *data)
bool fixed_queue_enqueue(fixed_queue_t *queue, void *data, uint32_t timeout)
{
assert(queue != NULL);
assert(data != NULL);
osi_sem_take(&queue->enqueue_sem, OSI_SEM_MAX_TIMEOUT);
osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
list_append(queue->list, data);
osi_mutex_unlock(&queue->lock);
osi_sem_give(&queue->dequeue_sem);
}
void *fixed_queue_dequeue(fixed_queue_t *queue)
{
void *ret = NULL;
assert(queue != NULL);
osi_sem_take(&queue->dequeue_sem, OSI_SEM_MAX_TIMEOUT);
osi_mutex_lock(&queue->lock, OSI_MUTEX_MAX_TIMEOUT);
ret = list_front(queue->list);
list_remove(queue->list, ret);
osi_mutex_unlock(&queue->lock);
osi_sem_give(&queue->enqueue_sem);
return ret;
}
bool fixed_queue_try_enqueue(fixed_queue_t *queue, void *data)
{
assert(queue != NULL);
assert(data != NULL);
if (osi_sem_take(&queue->enqueue_sem, 0) != 0) {
if (osi_sem_take(&queue->enqueue_sem, timeout) != 0) {
return false;
}
@ -181,15 +148,13 @@ bool fixed_queue_try_enqueue(fixed_queue_t *queue, void *data)
return true;
}
void *fixed_queue_try_dequeue(fixed_queue_t *queue)
void *fixed_queue_dequeue(fixed_queue_t *queue, uint32_t timeout)
{
void *ret = NULL;
if (queue == NULL) {
return NULL;
}
assert(queue != NULL);
if (osi_sem_take(queue->dequeue_sem, 0) != 0) {
if (osi_sem_take(queue->dequeue_sem, timeout) != 0) {
return NULL;
}

View file

@ -21,11 +21,14 @@
#include <stdbool.h>
#include "osi/list.h"
#include "osi/semaphore.h"
#ifndef QUEUE_SIZE_MAX
#define QUEUE_SIZE_MAX 254
#endif
#define FIXED_QUEUE_MAX_TIMEOUT OSI_SEM_MAX_TIMEOUT
struct fixed_queue_t;
typedef struct fixed_queue_t fixed_queue_t;
@ -56,27 +59,14 @@ size_t fixed_queue_length(fixed_queue_t *queue);
// not be NULL.
size_t fixed_queue_capacity(fixed_queue_t *queue);
// Enqueues the given |data| into the |queue|. The caller will be blocked
// if nore more space is available in the queue. Neither |queue| nor |data|
// may be NULL.
void fixed_queue_enqueue(fixed_queue_t *queue, void *data);
// Enqueues the given |data| into the |queue|. The caller will be blocked or immediately return or wait for timeout according to the parameter timeout.
// If enqueue failed, it will return false, otherwise return true
bool fixed_queue_enqueue(fixed_queue_t *queue, void *data, uint32_t timeout);
// Dequeues the next element from |queue|. If the queue is currently empty,
// this function will block the caller until an item is enqueued. This
// function will never return NULL. |queue| may not be NULL.
void *fixed_queue_dequeue(fixed_queue_t *queue);
// Tries to enqueue |data| into the |queue|. This function will never block
// the caller. If the queue capacity would be exceeded by adding one more
// element, this function returns false immediately. Otherwise, this function
// returns true. Neither |queue| nor |data| may be NULL.
bool fixed_queue_try_enqueue(fixed_queue_t *queue, void *data);
// Tries to dequeue an element from |queue|. This function will never block
// the caller. If the queue is empty, this function returns NULL immediately.
// Otherwise, the next element in the queue is returned. |queue| may not be
// NULL.
void *fixed_queue_try_dequeue(fixed_queue_t *queue);
// this function will block the caller until an item is enqueued or immediately return or wait for timeout according to the parameter timeout.
// If dequeue failed, it will return NULL, otherwise return a point.
void *fixed_queue_dequeue(fixed_queue_t *queue, uint32_t timeout);
// Returns the first element from |queue|, if present, without dequeuing it.
// This function will never block the caller. Returns NULL if there are no

View file

@ -23,9 +23,9 @@
#include "freertos/task.h"
#include "freertos/queue.h"
#include "freertos/semphr.h"
#include "osi/semaphore.h"
#define OSI_MUTEX_MAX_TIMEOUT 0xffffffffUL
#define OSI_MUTEX_MAX_TIMEOUT OSI_SEM_MAX_TIMEOUT
#define osi_mutex_valid( x ) ( ( ( *x ) == NULL) ? pdFALSE : pdTRUE )
#define osi_mutex_set_invalid( x ) ( ( *x ) = NULL )

View file

@ -22,9 +22,12 @@
#include "freertos/task.h"
#include "esp_task.h"
#include "common/bt_defs.h"
#include "osi/semaphore.h"
#define portBASE_TYPE int
#define OSI_THREAD_MAX_TIMEOUT OSI_SEM_MAX_TIMEOUT
struct osi_thread;
typedef struct osi_thread osi_thread_t;
@ -37,21 +40,53 @@ typedef enum {
OSI_THREAD_CORE_AFFINITY,
} osi_thread_core_t;
typedef enum {
OSI_THREAD_NON_BLOCKING = 0,
OSI_THREAD_BLOCKING,
} osi_thread_blocking_t;
/*
* brief: Create a thread or task
* param name: thread name
* param stack_size: thread stack size
* param priority: thread priority
* param core: the CPU core which this thread run, OSI_THREAD_CORE_AFFINITY means unspecific CPU core
* param work_queue_num: speicify queue number, the queue[0] has highest priority, and the priority is decrease by index
* return : if create successfully, return thread handler; otherwise return NULL.
*/
osi_thread_t *osi_thread_create(const char *name, size_t stack_size, int priority, osi_thread_core_t core, uint8_t work_queue_num);
/*
* brief: Destroy a thread or task
* param thread: point of thread handler
*/
void osi_thread_free(osi_thread_t *thread);
bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context, int queue_idx, osi_thread_blocking_t blocking);
/*
* brief: Post an msg to a thread and told the thread call the function
* param thread: point of thread handler
* param func: callback function that called by target thread
* param context: argument of callback function
* param queue_idx: the queue which the msg send to
* param timeout: post timeout, OSI_THREAD_MAX_TIMEOUT means blocking forever, 0 means never blocking, others means block millisecond
* return : if post successfully, return true, otherwise return false
*/
bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context, int queue_idx, uint32_t timeout);
/*
* brief: Set the priority of thread
* param thread: point of thread handler
* param priority: priority
* return : if set successfully, return true, otherwise return false
*/
bool osi_thread_set_priority(osi_thread_t *thread, int priority);
/* brief: Get thread name
* param thread: point of thread handler
* return: constant point of thread name
*/
const char *osi_thread_name(osi_thread_t *thread);
/* brief: Get the size of the specified queue
* param thread: point of thread handler
* param wq_idx: the queue index of the thread
* return: queue size
*/
int osi_thread_queue_wait_size(osi_thread_t *thread, int wq_idx);
#endif /* __THREAD_H__ */

View file

@ -63,7 +63,7 @@ static void osi_thread_run(void *arg)
}
while (!thread->stop && idx < thread->work_queue_num) {
work_item_t *item = fixed_queue_try_dequeue(thread->work_queues[idx]);
work_item_t *item = fixed_queue_dequeue(thread->work_queues[idx], 0);
if (item) {
item->func(item->context);
osi_free(item);
@ -227,7 +227,7 @@ void osi_thread_free(osi_thread_t *thread)
osi_free(thread);
}
bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context, int queue_idx, osi_thread_blocking_t blocking)
bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context, int queue_idx, uint32_t timeout)
{
assert(thread != NULL);
assert(func != NULL);
@ -243,13 +243,9 @@ bool osi_thread_post(osi_thread_t *thread, osi_thread_func_t func, void *context
item->func = func;
item->context = context;
if (blocking == OSI_THREAD_BLOCKING) {
fixed_queue_enqueue(thread->work_queues[queue_idx], item);
} else {
if (fixed_queue_try_enqueue(thread->work_queues[queue_idx], item) == false) {
osi_free(item);
return false;
}
if (fixed_queue_enqueue(thread->work_queues[queue_idx], item, timeout) == false) {
osi_free(item);
return false;
}
osi_sem_give(&thread->work_sem);

View file

@ -449,7 +449,7 @@ void avct_lcb_cong_ind(tAVCT_LCB *p_lcb, tAVCT_LCB_EVT *p_data)
if (p_lcb->cong == FALSE && !fixed_queue_is_empty(p_lcb->tx_q))
{
while (!p_lcb->cong &&
(p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_lcb->tx_q)) != NULL)
(p_buf = (BT_HDR *)fixed_queue_dequeue(p_lcb->tx_q, 0)) != NULL)
{
if (L2CA_DataWrite(p_lcb->ch_lcid, p_buf) == L2CAP_DW_CONGESTED)
{
@ -569,7 +569,7 @@ void avct_lcb_send_msg(tAVCT_LCB *p_lcb, tAVCT_LCB_EVT *p_data)
}
if (p_lcb->cong == TRUE) {
fixed_queue_enqueue(p_lcb->tx_q, p_buf);
fixed_queue_enqueue(p_lcb->tx_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
/* send message to L2CAP */

View file

@ -68,7 +68,7 @@ static void avdt_ccb_clear_ccb(tAVDT_CCB *p_ccb)
}
/* clear out response queue */
while ((p_buf = (BT_HDR *) fixed_queue_try_dequeue(p_ccb->rsp_q)) != NULL) {
while ((p_buf = (BT_HDR *) fixed_queue_dequeue(p_ccb->rsp_q, 0)) != NULL) {
osi_free(p_buf);
}
}
@ -659,7 +659,7 @@ void avdt_ccb_clear_cmds(tAVDT_CCB *p_ccb, tAVDT_CCB_EVT *p_data)
avdt_ccb_cmd_fail(p_ccb, (tAVDT_CCB_EVT *) &err_code);
/* set up next message */
p_ccb->p_curr_cmd = (BT_HDR *) fixed_queue_try_dequeue(p_ccb->cmd_q);
p_ccb->p_curr_cmd = (BT_HDR *) fixed_queue_dequeue(p_ccb->cmd_q, 0);
} while (p_ccb->p_curr_cmd != NULL);
@ -812,7 +812,7 @@ void avdt_ccb_snd_cmd(tAVDT_CCB *p_ccb, tAVDT_CCB_EVT *p_data)
** not congested, not sending fragment, not waiting for response
*/
if ((!p_ccb->cong) && (p_ccb->p_curr_msg == NULL) && (p_ccb->p_curr_cmd == NULL)) {
if ((p_msg = (BT_HDR *) fixed_queue_try_dequeue(p_ccb->cmd_q)) != NULL) {
if ((p_msg = (BT_HDR *) fixed_queue_dequeue(p_ccb->cmd_q, 0)) != NULL) {
/* make a copy of buffer in p_curr_cmd */
if ((p_ccb->p_curr_cmd = (BT_HDR *) osi_malloc(AVDT_CMD_BUF_SIZE)) != NULL) {
memcpy(p_ccb->p_curr_cmd, p_msg, (sizeof(BT_HDR) + p_msg->offset + p_msg->len));
@ -846,7 +846,7 @@ void avdt_ccb_snd_msg(tAVDT_CCB *p_ccb, tAVDT_CCB_EVT *p_data)
}
/* do we have responses to send? send them */
else if (!fixed_queue_is_empty(p_ccb->rsp_q)) {
while ((p_msg = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->rsp_q)) != NULL) {
while ((p_msg = (BT_HDR *)fixed_queue_dequeue(p_ccb->rsp_q, 0)) != NULL) {
if (avdt_msg_send(p_ccb, p_msg) == TRUE) {
/* break out if congested */
break;

View file

@ -1440,7 +1440,7 @@ void avdt_msg_send_cmd(tAVDT_CCB *p_ccb, void *p_scb, UINT8 sig_id, tAVDT_MSG *p
p_ccb->label = (p_ccb->label + 1) % 16;
/* queue message and trigger ccb to send it */
fixed_queue_enqueue(p_ccb->cmd_q, p_buf);
fixed_queue_enqueue(p_ccb->cmd_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
@ -1487,7 +1487,7 @@ void avdt_msg_send_rsp(tAVDT_CCB *p_ccb, UINT8 sig_id, tAVDT_MSG *p_params)
AVDT_BLD_LAYERSPEC(p_buf->layer_specific, AVDT_MSG_TYPE_RSP, p_params->hdr.label);
/* queue message and trigger ccb to send it */
fixed_queue_enqueue(p_ccb->rsp_q, p_buf);
fixed_queue_enqueue(p_ccb->rsp_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
@ -1547,7 +1547,7 @@ void avdt_msg_send_rej(tAVDT_CCB *p_ccb, UINT8 sig_id, tAVDT_MSG *p_params)
AVDT_BLD_LAYERSPEC(p_buf->layer_specific, AVDT_MSG_TYPE_REJ, p_params->hdr.label);
/* queue message and trigger ccb to send it */
fixed_queue_enqueue(p_ccb->rsp_q, p_buf);
fixed_queue_enqueue(p_ccb->rsp_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}
@ -1591,7 +1591,7 @@ void avdt_msg_send_grej(tAVDT_CCB *p_ccb, UINT8 sig_id, tAVDT_MSG *p_params)
AVDT_TRACE_DEBUG("avdt_msg_send_grej");
/* queue message and trigger ccb to send it */
fixed_queue_enqueue(p_ccb->rsp_q, p_buf);
fixed_queue_enqueue(p_ccb->rsp_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
avdt_ccb_event(p_ccb, AVDT_CCB_SENDMSG_EVT, NULL);
}

View file

@ -1232,7 +1232,7 @@ void avdt_scb_hdl_write_req_frag(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data)
/* this shouldn't be happening */
AVDT_TRACE_WARNING("*** Dropped media packet; congested");
BT_HDR *p_frag;
while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL)
while ((p_frag = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL)
osi_free(p_frag);
}
@ -1397,7 +1397,7 @@ void avdt_scb_snd_stream_close(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data)
/* clean fragments queue */
BT_HDR *p_frag;
while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
while ((p_frag = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL) {
osi_free(p_frag);
}
p_scb->frag_off = 0;
@ -1824,7 +1824,7 @@ void avdt_scb_free_pkt(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data)
#if AVDT_MULTIPLEXING == TRUE
/* clean fragments queue */
BT_HDR *p_frag;
while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
while ((p_frag = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL) {
osi_free(p_frag);
}
#endif
@ -1880,7 +1880,7 @@ void avdt_scb_clr_pkt(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data)
AVDT_TRACE_DEBUG("Dropped fragments queue");
/* clean fragments queue */
BT_HDR *p_frag;
while ((p_frag = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
while ((p_frag = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL) {
osi_free(p_frag);
}
p_scb->frag_off = 0;
@ -1933,7 +1933,7 @@ void avdt_scb_chk_snd_pkt(tAVDT_SCB *p_scb, tAVDT_SCB_EVT *p_data)
L2CA_FlushChannel(avdt_cb.ad.rt_tbl[avdt_ccb_to_idx(p_scb->p_ccb)][avdt_ad_type_to_tcid(AVDT_CHAN_MEDIA, p_scb)].lcid),
L2CAP_FLUSH_CHANS_GET);
#endif
while ((p_pkt = (BT_HDR*)fixed_queue_try_dequeue(p_scb->frag_q)) != NULL) {
while ((p_pkt = (BT_HDR*)fixed_queue_dequeue(p_scb->frag_q, 0)) != NULL) {
sent = TRUE;
AVDT_TRACE_DEBUG("Send fragment len=%d\n", p_pkt->len);
/* fragments queue contains fragment to send */
@ -2096,7 +2096,7 @@ void avdt_scb_queue_frags(tAVDT_SCB *p_scb, UINT8 **pp_data, UINT32 *p_data_len,
UINT16_TO_BE_STREAM(p, p_frag->layer_specific );
}
/* put fragment into gueue */
fixed_queue_enqueue(p_scb->frag_q, p_frag);
fixed_queue_enqueue(p_scb->frag_q, p_frag, FIXED_QUEUE_MAX_TIMEOUT);
num_frag--;
}
}

View file

@ -2364,7 +2364,7 @@ void btm_acl_resubmit_page (void)
BD_ADDR bda;
BTM_TRACE_DEBUG ("btm_acl_resubmit_page\n");
/* If there were other page request schedule can start the next one */
if ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(btm_cb.page_queue)) != NULL) {
if ((p_buf = (BT_HDR *)fixed_queue_dequeue(btm_cb.page_queue, 0)) != NULL) {
/* skip 3 (2 bytes opcode and 1 byte len) to get to the bd_addr
* for both create_conn and rmt_name */
pp = (UINT8 *)(p_buf + 1) + p_buf->offset + 3;
@ -2395,7 +2395,7 @@ void btm_acl_reset_paging (void)
BT_HDR *p;
BTM_TRACE_DEBUG ("btm_acl_reset_paging\n");
/* If we sent reset we are definitely not paging any more */
while ((p = (BT_HDR *)fixed_queue_try_dequeue(btm_cb.page_queue)) != NULL) {
while ((p = (BT_HDR *)fixed_queue_dequeue(btm_cb.page_queue, 0)) != NULL) {
osi_free (p);
}
@ -2419,7 +2419,7 @@ void btm_acl_paging (BT_HDR *p, BD_ADDR bda)
(bda[0] << 16) + (bda[1] << 8) + bda[2], (bda[3] << 16) + (bda[4] << 8) + bda[5]);
if (btm_cb.discing) {
btm_cb.paging = TRUE;
fixed_queue_enqueue(btm_cb.page_queue, p);
fixed_queue_enqueue(btm_cb.page_queue, p, FIXED_QUEUE_MAX_TIMEOUT);
} else {
if (!BTM_ACL_IS_CONNECTED (bda)) {
BTM_TRACE_DEBUG ("connecting_bda: %06x%06x\n",
@ -2429,7 +2429,7 @@ void btm_acl_paging (BT_HDR *p, BD_ADDR bda)
btm_cb.connecting_bda[5]);
if (btm_cb.paging &&
memcmp (bda, btm_cb.connecting_bda, BD_ADDR_LEN) != 0) {
fixed_queue_enqueue(btm_cb.page_queue, p);
fixed_queue_enqueue(btm_cb.page_queue, p, FIXED_QUEUE_MAX_TIMEOUT);
} else {
p_dev_rec = btm_find_or_alloc_dev (bda);
memcpy (btm_cb.connecting_bda, p_dev_rec->bd_addr, BD_ADDR_LEN);

View file

@ -785,7 +785,7 @@ void btm_ble_enqueue_direct_conn_req(void *p_param)
p->p_param = p_param;
fixed_queue_enqueue(btm_cb.ble_ctr_cb.conn_pending_q, p);
fixed_queue_enqueue(btm_cb.ble_ctr_cb.conn_pending_q, p, FIXED_QUEUE_MAX_TIMEOUT);
}
/*******************************************************************************
**
@ -801,7 +801,7 @@ BOOLEAN btm_send_pending_direct_conn(void)
tBTM_BLE_CONN_REQ *p_req;
BOOLEAN rt = FALSE;
p_req = (tBTM_BLE_CONN_REQ*)fixed_queue_try_dequeue(btm_cb.ble_ctr_cb.conn_pending_q);
p_req = (tBTM_BLE_CONN_REQ*)fixed_queue_dequeue(btm_cb.ble_ctr_cb.conn_pending_q, 0);
if (p_req != NULL) {
rt = l2cble_init_direct_conn((tL2C_LCB *)(p_req->p_param));

View file

@ -86,7 +86,7 @@ void btm_sco_flush_sco_data(UINT16 sco_inx)
if (sco_inx < BTM_MAX_SCO_LINKS) {
p = &btm_cb.sco_cb.sco_db[sco_inx];
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p->xmit_data_q)) != NULL) {
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p->xmit_data_q, 0)) != NULL) {
osi_free(p_buf);
}
}
@ -292,7 +292,7 @@ void btm_sco_check_send_pkts (UINT16 sco_inx)
BT_HDR *p_buf;
while (p_cb->xmit_window_size != 0)
{
if ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_data_q)) == NULL) {
if ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_data_q, 0)) == NULL) {
break;
}
#if BTM_SCO_HCI_DEBUG
@ -441,7 +441,7 @@ tBTM_STATUS BTM_WriteScoData (UINT16 sco_inx, BT_HDR *p_buf)
p_buf->len += HCI_SCO_PREAMBLE_SIZE;
if (fixed_queue_length(p_ccb->xmit_data_q) < BTM_SCO_XMIT_QUEUE_THRS) {
fixed_queue_enqueue(p_ccb->xmit_data_q, p_buf);
fixed_queue_enqueue(p_ccb->xmit_data_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
btm_sco_check_send_pkts (sco_inx);
} else {
BTM_TRACE_WARNING ("SCO xmit Q overflow, pkt dropped");

View file

@ -2791,7 +2791,7 @@ void btm_sec_check_pending_reqs (void)
btm_cb.sec_pending_q = fixed_queue_new(QUEUE_SIZE_MAX);
while ((p_e = (tBTM_SEC_QUEUE_ENTRY *)fixed_queue_try_dequeue(bq)) != NULL) {
while ((p_e = (tBTM_SEC_QUEUE_ENTRY *)fixed_queue_dequeue(bq, 0)) != NULL) {
/* Check that the ACL is still up before starting security procedures */
if (btm_bda_to_acl(p_e->bd_addr, p_e->transport) != NULL) {
if (p_e->psm != 0) {
@ -5784,7 +5784,7 @@ static BOOLEAN btm_sec_queue_mx_request (BD_ADDR bd_addr, UINT16 psm, BOOLEAN
BTM_TRACE_EVENT ("%s() PSM: 0x%04x Is_Orig: %u mx_proto_id: %u mx_chan_id: %u\n",
__func__, psm, is_orig, mx_proto_id, mx_chan_id);
fixed_queue_enqueue(btm_cb.sec_pending_q, p_e);
fixed_queue_enqueue(btm_cb.sec_pending_q, p_e, FIXED_QUEUE_MAX_TIMEOUT);
return (TRUE);
}
@ -5883,7 +5883,7 @@ static BOOLEAN btm_sec_queue_encrypt_request (BD_ADDR bd_addr, tBT_TRANSPORT tra
*(UINT8 *)p_e->p_ref_data = *(UINT8 *)(p_ref_data);
p_e->transport = transport;
memcpy(p_e->bd_addr, bd_addr, BD_ADDR_LEN);
fixed_queue_enqueue(btm_cb.sec_pending_q, p_e);
fixed_queue_enqueue(btm_cb.sec_pending_q, p_e, FIXED_QUEUE_MAX_TIMEOUT);
return TRUE;
}

View file

@ -1086,7 +1086,7 @@ static void btu_hcif_command_complete_evt(BT_HDR *response, void *context)
event->event = BTU_POST_TO_TASK_NO_GOOD_HORRIBLE_HACK;
btu_task_post(SIG_BTU_HCI_MSG, event, OSI_THREAD_BLOCKING);
btu_task_post(SIG_BTU_HCI_MSG, event, OSI_THREAD_MAX_TIMEOUT);
}
@ -1291,7 +1291,7 @@ static void btu_hcif_command_status_evt(uint8_t status, BT_HDR *command, void *c
event->event = BTU_POST_TO_TASK_NO_GOOD_HORRIBLE_HACK;
btu_task_post(SIG_BTU_HCI_MSG, event, OSI_THREAD_BLOCKING);
btu_task_post(SIG_BTU_HCI_MSG, event, OSI_THREAD_MAX_TIMEOUT);
}
/*******************************************************************************

View file

@ -186,7 +186,7 @@ void BTU_StartUp(void)
goto error_exit;
}
if (btu_task_post(SIG_BTU_START_UP, NULL, OSI_THREAD_BLOCKING) == false) {
if (btu_task_post(SIG_BTU_START_UP, NULL, OSI_THREAD_MAX_TIMEOUT) == false) {
goto error_exit;
}

View file

@ -246,7 +246,7 @@ void btu_thread_handler(void *arg)
osi_free(evt);
}
bool btu_task_post(uint32_t sig, void *param, osi_thread_blocking_t blocking)
bool btu_task_post(uint32_t sig, void *param, uint32_t timeout)
{
btu_thread_evt_t *evt;
@ -258,7 +258,7 @@ bool btu_task_post(uint32_t sig, void *param, osi_thread_blocking_t blocking)
evt->sig = sig;
evt->param = param;
return osi_thread_post(btu_thread, btu_thread_handler, evt, 0, blocking);
return osi_thread_post(btu_thread, btu_thread_handler, evt, 0, timeout);
}
void btu_task_start_up(void)
@ -417,7 +417,7 @@ void btu_general_alarm_cb(void *data)
assert(data != NULL);
TIMER_LIST_ENT *p_tle = (TIMER_LIST_ENT *)data;
btu_task_post(SIG_BTU_GENERAL_ALARM, p_tle, OSI_THREAD_BLOCKING);
btu_task_post(SIG_BTU_GENERAL_ALARM, p_tle, OSI_THREAD_MAX_TIMEOUT);
}
void btu_start_timer(TIMER_LIST_ENT *p_tle, UINT16 type, UINT32 timeout_sec)
@ -531,7 +531,7 @@ static void btu_l2cap_alarm_cb(void *data)
assert(data != NULL);
TIMER_LIST_ENT *p_tle = (TIMER_LIST_ENT *)data;
btu_task_post(SIG_BTU_L2CAP_ALARM, p_tle, OSI_THREAD_BLOCKING);
btu_task_post(SIG_BTU_L2CAP_ALARM, p_tle, OSI_THREAD_MAX_TIMEOUT);
}
void btu_start_quick_timer(TIMER_LIST_ENT *p_tle, UINT16 type, UINT32 timeout_ticks)
@ -614,7 +614,7 @@ void btu_oneshot_alarm_cb(void *data)
btu_stop_timer_oneshot(p_tle);
btu_task_post(SIG_BTU_ONESHOT_ALARM, p_tle, OSI_THREAD_BLOCKING);
btu_task_post(SIG_BTU_ONESHOT_ALARM, p_tle, OSI_THREAD_MAX_TIMEOUT);
}
/*

View file

@ -145,7 +145,7 @@ void gap_ble_dealloc_clcb(tGAP_CLCB *p_clcb)
{
tGAP_BLE_REQ *p_q;
while ((p_q = (tGAP_BLE_REQ *)fixed_queue_try_dequeue(p_clcb->pending_req_q)) != NULL) {
while ((p_q = (tGAP_BLE_REQ *)fixed_queue_dequeue(p_clcb->pending_req_q, 0)) != NULL) {
/* send callback to all pending requests if being removed*/
if (p_q->p_cback != NULL) {
(*p_q->p_cback)(FALSE, p_clcb->bda, 0, NULL);
@ -173,7 +173,7 @@ BOOLEAN gap_ble_enqueue_request (tGAP_CLCB *p_clcb, UINT16 uuid, tGAP_BLE_CMPL_C
if (p_q != NULL) {
p_q->p_cback = p_cback;
p_q->uuid = uuid;
fixed_queue_enqueue(p_clcb->pending_req_q, p_q);
fixed_queue_enqueue(p_clcb->pending_req_q, p_q, FIXED_QUEUE_MAX_TIMEOUT);
return TRUE;
}
@ -190,7 +190,7 @@ BOOLEAN gap_ble_enqueue_request (tGAP_CLCB *p_clcb, UINT16 uuid, tGAP_BLE_CMPL_C
*******************************************************************************/
BOOLEAN gap_ble_dequeue_request (tGAP_CLCB *p_clcb, UINT16 *p_uuid, tGAP_BLE_CMPL_CBACK **p_cback)
{
tGAP_BLE_REQ *p_q = (tGAP_BLE_REQ *)fixed_queue_try_dequeue(p_clcb->pending_req_q);;
tGAP_BLE_REQ *p_q = (tGAP_BLE_REQ *)fixed_queue_dequeue(p_clcb->pending_req_q, 0);;
if (p_q != NULL) {
*p_cback = p_q->p_cback;

View file

@ -332,7 +332,7 @@ UINT16 GAP_ConnReadData (UINT16 gap_handle, UINT8 *p_data, UINT16 max_len, UINT1
p_buf->len -= copy_len;
break;
}
osi_free(fixed_queue_try_dequeue(p_ccb->rx_queue));
osi_free(fixed_queue_dequeue(p_ccb->rx_queue, 0));
}
p_ccb->rx_queue_size -= *p_len;
@ -404,7 +404,7 @@ UINT16 GAP_ConnBTRead (UINT16 gap_handle, BT_HDR **pp_buf)
return (GAP_ERR_BAD_HANDLE);
}
p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->rx_queue);
p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->rx_queue, 0);
if (p_buf) {
*pp_buf = p_buf;
@ -451,7 +451,7 @@ UINT16 GAP_ConnBTWrite (UINT16 gap_handle, BT_HDR *p_buf)
return (GAP_ERR_BUF_OFFSET);
}
fixed_queue_enqueue(p_ccb->tx_queue, p_buf);
fixed_queue_enqueue(p_ccb->tx_queue, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
if (p_ccb->is_congested) {
return (BT_PASS);
@ -461,7 +461,7 @@ UINT16 GAP_ConnBTWrite (UINT16 gap_handle, BT_HDR *p_buf)
#if (GAP_CONN_POST_EVT_INCLUDED == TRUE)
gap_send_event (gap_handle);
#else
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->tx_queue)) != NULL) {
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->tx_queue, 0)) != NULL) {
UINT8 status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
if (status == L2CAP_DW_CONGESTED) {
@ -532,7 +532,7 @@ UINT16 GAP_ConnWriteData (UINT16 gap_handle, UINT8 *p_data, UINT16 max_len, UINT
GAP_TRACE_EVENT ("GAP_WriteData %d bytes", p_buf->len);
fixed_queue_enqueue(p_ccb->tx_queue, p_buf);
fixed_queue_enqueue(p_ccb->tx_queue, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
if (p_ccb->is_congested) {
@ -543,7 +543,7 @@ UINT16 GAP_ConnWriteData (UINT16 gap_handle, UINT8 *p_data, UINT16 max_len, UINT
#if (GAP_CONN_POST_EVT_INCLUDED == TRUE)
gap_send_event (gap_handle);
#else
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->tx_queue)) != NULL)
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->tx_queue, 0)) != NULL)
{
UINT8 status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
@ -989,7 +989,7 @@ static void gap_data_ind (UINT16 l2cap_cid, BT_HDR *p_msg)
}
if (p_ccb->con_state == GAP_CCB_STATE_CONNECTED) {
fixed_queue_enqueue(p_ccb->rx_queue, p_msg);
fixed_queue_enqueue(p_ccb->rx_queue, p_msg, FIXED_QUEUE_MAX_TIMEOUT);
p_ccb->rx_queue_size += p_msg->len;
/*
@ -1033,7 +1033,7 @@ static void gap_congestion_ind (UINT16 lcid, BOOLEAN is_congested)
p_ccb->p_callback (p_ccb->gap_handle, event);
if (!is_congested) {
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->tx_queue)) != NULL) {
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->tx_queue, 0)) != NULL) {
status = L2CA_DATA_WRITE (p_ccb->connection_id, p_buf);
if (status == L2CAP_DW_CONGESTED) {
@ -1154,13 +1154,13 @@ static void gap_release_ccb (tGAP_CCB *p_ccb)
p_ccb->rx_queue_size = 0;
while (!fixed_queue_is_empty(p_ccb->rx_queue)) {
osi_free(fixed_queue_try_dequeue(p_ccb->rx_queue));
osi_free(fixed_queue_dequeue(p_ccb->rx_queue, 0));
}
fixed_queue_free(p_ccb->rx_queue, NULL);
p_ccb->rx_queue = NULL;
while (!fixed_queue_is_empty(p_ccb->tx_queue)) {
osi_free(fixed_queue_try_dequeue(p_ccb->tx_queue));
osi_free(fixed_queue_dequeue(p_ccb->tx_queue, 0));
}
fixed_queue_free(p_ccb->tx_queue, NULL);
p_ccb->tx_queue = NULL;

View file

@ -176,7 +176,7 @@ void gatt_enc_cmpl_cback(BD_ADDR bd_addr, tBT_TRANSPORT transport, void *p_ref_d
return;
}
tGATT_PENDING_ENC_CLCB *p_buf =
(tGATT_PENDING_ENC_CLCB *)fixed_queue_try_dequeue(p_tcb->pending_enc_clcb);
(tGATT_PENDING_ENC_CLCB *)fixed_queue_dequeue(p_tcb->pending_enc_clcb, 0);
if (p_buf != NULL) {
if (result == BTM_SUCCESS) {
if (gatt_get_sec_act(p_tcb) == GATT_SEC_ENCRYPT_MITM ) {
@ -194,7 +194,7 @@ void gatt_enc_cmpl_cback(BD_ADDR bd_addr, tBT_TRANSPORT transport, void *p_ref_d
/* start all other pending operation in queue */
for (size_t count = fixed_queue_length(p_tcb->pending_enc_clcb);
count > 0; count--) {
p_buf = (tGATT_PENDING_ENC_CLCB *)fixed_queue_try_dequeue(p_tcb->pending_enc_clcb);
p_buf = (tGATT_PENDING_ENC_CLCB *)fixed_queue_dequeue(p_tcb->pending_enc_clcb, 0);
if (p_buf != NULL) {
gatt_security_check_start(p_buf->p_clcb);
osi_free(p_buf);
@ -238,7 +238,7 @@ void gatt_notify_enc_cmpl(BD_ADDR bd_addr)
size_t count = fixed_queue_length(p_tcb->pending_enc_clcb);
for (; count > 0; count--) {
tGATT_PENDING_ENC_CLCB *p_buf =
(tGATT_PENDING_ENC_CLCB *)fixed_queue_try_dequeue(p_tcb->pending_enc_clcb);
(tGATT_PENDING_ENC_CLCB *)fixed_queue_dequeue(p_tcb->pending_enc_clcb, 0);
if (p_buf != NULL) {
gatt_security_check_start(p_buf->p_clcb);
osi_free(p_buf);

View file

@ -1383,7 +1383,7 @@ static BOOLEAN allocate_svc_db_buf(tGATT_SVC_DB *p_db)
p_db->p_free_mem = (UINT8 *) p_buf;
p_db->mem_free = GATT_DB_BUF_SIZE;
fixed_queue_enqueue(p_db->svc_buffer, p_buf);
fixed_queue_enqueue(p_db->svc_buffer, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
return TRUE;

View file

@ -140,7 +140,7 @@ void gatt_dequeue_sr_cmd (tGATT_TCB *p_tcb)
if (p_tcb->sr_cmd.multi_rsp_q) {
while (!fixed_queue_is_empty(p_tcb->sr_cmd.multi_rsp_q)) {
osi_free(fixed_queue_try_dequeue(p_tcb->sr_cmd.multi_rsp_q));
osi_free(fixed_queue_dequeue(p_tcb->sr_cmd.multi_rsp_q, 0));
}
fixed_queue_free(p_tcb->sr_cmd.multi_rsp_q, NULL);
}
@ -178,7 +178,7 @@ static BOOLEAN process_read_multi_rsp (tGATT_SR_CMD *p_cmd, tGATT_STATUS status,
}
memcpy((void *)p_buf, (const void *)p_msg, sizeof(tGATTS_RSP));
fixed_queue_enqueue(p_cmd->multi_rsp_q, p_buf);
fixed_queue_enqueue(p_cmd->multi_rsp_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
p_cmd->status = status;
if (status == GATT_SUCCESS) {
@ -418,7 +418,7 @@ void gatt_process_exec_write_req (tGATT_TCB *p_tcb, UINT8 op_code, UINT16 len, U
//dequeue prepare write data
while(fixed_queue_try_peek_first(prepare_record->queue)) {
queue_data = fixed_queue_dequeue(prepare_record->queue);
queue_data = fixed_queue_dequeue(prepare_record->queue, FIXED_QUEUE_MAX_TIMEOUT);
if (is_prepare_write_valid){
if((queue_data->p_attr->p_value != NULL) && (queue_data->p_attr->p_value->attr_val.attr_val != NULL)){
if(is_first) {
@ -1291,7 +1291,7 @@ void gatt_attr_process_prepare_write (tGATT_TCB *p_tcb, UINT8 i_rcb, UINT16 hand
if (prepare_record->queue == NULL) {
prepare_record->queue = fixed_queue_new(QUEUE_SIZE_MAX);
}
fixed_queue_enqueue(prepare_record->queue, queue_data);
fixed_queue_enqueue(prepare_record->queue, queue_data, FIXED_QUEUE_MAX_TIMEOUT);
}
}

View file

@ -97,7 +97,7 @@ void gatt_free_pending_ind(tGATT_TCB *p_tcb)
/* release all queued indications */
while (!fixed_queue_is_empty(p_tcb->pending_ind_q)) {
osi_free(fixed_queue_try_dequeue(p_tcb->pending_ind_q));
osi_free(fixed_queue_dequeue(p_tcb->pending_ind_q, 0));
}
fixed_queue_free(p_tcb->pending_ind_q, NULL);
p_tcb->pending_ind_q = NULL;
@ -121,7 +121,7 @@ void gatt_free_pending_enc_queue(tGATT_TCB *p_tcb)
/* release all queued indications */
while (!fixed_queue_is_empty(p_tcb->pending_enc_clcb)) {
osi_free(fixed_queue_try_dequeue(p_tcb->pending_enc_clcb));
osi_free(fixed_queue_dequeue(p_tcb->pending_enc_clcb, 0));
}
fixed_queue_free(p_tcb->pending_enc_clcb, NULL);
p_tcb->pending_enc_clcb = NULL;
@ -143,7 +143,7 @@ void gatt_free_pending_prepare_write_queue(tGATT_TCB *p_tcb)
if (p_tcb->prepare_write_record.queue) {
/* release all queued prepare write packets */
while (!fixed_queue_is_empty(p_tcb->prepare_write_record.queue)) {
osi_free(fixed_queue_dequeue(p_tcb->prepare_write_record.queue));
osi_free(fixed_queue_dequeue(p_tcb->prepare_write_record.queue, FIXED_QUEUE_MAX_TIMEOUT));
}
fixed_queue_free(p_tcb->prepare_write_record.queue, NULL);
p_tcb->prepare_write_record.queue = NULL;
@ -265,7 +265,7 @@ tGATT_VALUE *gatt_add_pending_ind(tGATT_TCB *p_tcb, tGATT_VALUE *p_ind)
if ((p_buf = (tGATT_VALUE *)osi_malloc((UINT16)sizeof(tGATT_VALUE))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a pending indication");
memcpy(p_buf, p_ind, sizeof(tGATT_VALUE));
fixed_queue_enqueue(p_tcb->pending_ind_q, p_buf);
fixed_queue_enqueue(p_tcb->pending_ind_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
return p_buf;
}
@ -288,7 +288,7 @@ tGATTS_PENDING_NEW_SRV_START *gatt_add_pending_new_srv_start(tGATTS_HNDL_RANGE *
if ((p_buf = (tGATTS_PENDING_NEW_SRV_START *)osi_malloc((UINT16)sizeof(tGATTS_PENDING_NEW_SRV_START))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a new pending new srv start");
p_buf->p_new_srv_start = p_new_srv_start;
fixed_queue_enqueue(gatt_cb.pending_new_srv_start_q, p_buf);
fixed_queue_enqueue(gatt_cb.pending_new_srv_start_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
return p_buf;
}
@ -310,7 +310,7 @@ tGATTS_SRV_CHG *gatt_add_srv_chg_clt(tGATTS_SRV_CHG *p_srv_chg)
if ((p_buf = (tGATTS_SRV_CHG *)osi_malloc((UINT16)sizeof(tGATTS_SRV_CHG))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a srv chg client");
memcpy(p_buf, p_srv_chg, sizeof(tGATTS_SRV_CHG));
fixed_queue_enqueue(gatt_cb.srv_chg_clt_q, p_buf);
fixed_queue_enqueue(gatt_cb.srv_chg_clt_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
return p_buf;
@ -469,7 +469,7 @@ void gatt_free_hdl_buffer(tGATT_HDL_LIST_ELEM *p)
if (p) {
while (!fixed_queue_is_empty(p->svc_db.svc_buffer)) {
osi_free(fixed_queue_try_dequeue(p->svc_db.svc_buffer));
osi_free(fixed_queue_dequeue(p->svc_db.svc_buffer, 0));
}
fixed_queue_free(p->svc_db.svc_buffer, NULL);
memset(p, 0, sizeof(tGATT_HDL_LIST_ELEM));
@ -495,7 +495,7 @@ void gatt_free_srvc_db_buffer_app_id(tBT_UUID *p_app_id)
if (memcmp(p_app_id, &p_elem->asgn_range.app_uuid128, sizeof(tBT_UUID)) == 0) {
gatt_free_attr_value_buffer(p_elem);
while (!fixed_queue_is_empty(p_elem->svc_db.svc_buffer)) {
osi_free(fixed_queue_try_dequeue(p_elem->svc_db.svc_buffer));
osi_free(fixed_queue_dequeue(p_elem->svc_db.svc_buffer, 0));
}
fixed_queue_free(p_elem->svc_db.svc_buffer, NULL);
p_elem->svc_db.svc_buffer = NULL;
@ -2733,7 +2733,7 @@ tGATT_PENDING_ENC_CLCB *gatt_add_pending_enc_channel_clcb(tGATT_TCB *p_tcb, tGAT
if ((p_buf = (tGATT_PENDING_ENC_CLCB *)osi_malloc((UINT16)sizeof(tGATT_PENDING_ENC_CLCB))) != NULL) {
GATT_TRACE_DEBUG ("enqueue a new pending encryption channel clcb");
p_buf->p_clcb = p_clcb;
fixed_queue_enqueue(p_tcb->pending_enc_clcb, p_buf);
fixed_queue_enqueue(p_tcb->pending_enc_clcb, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
return p_buf;
}

View file

@ -288,7 +288,7 @@ void btu_task_shut_down(void);
UINT16 BTU_BleAclPktSize(void);
bool btu_task_post(uint32_t sig, void *param, osi_thread_blocking_t blocking);
bool btu_task_post(uint32_t sig, void *param, uint32_t timeout);
/*
#ifdef __cplusplus

View file

@ -2221,7 +2221,7 @@ UINT16 L2CA_FlushChannel (UINT16 lcid, UINT16 num_to_flush)
/* If needed, flush buffers in the CCB xmit hold queue */
while ( (num_to_flush != 0) && (!fixed_queue_is_empty(p_ccb->xmit_hold_q))) {
BT_HDR *p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
BT_HDR *p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0);
if (p_buf) {
osi_free (p_buf);
}

View file

@ -1361,7 +1361,7 @@ void l2cble_sec_comp(BD_ADDR p_bda, tBT_TRANSPORT transport, void *p_ref_data,
if (!fixed_queue_is_empty(p_lcb->le_sec_pending_q))
{
p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q);
p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q, FIXED_QUEUE_MAX_TIMEOUT);
if (!p_buf)
{
L2CAP_TRACE_WARNING ("%s Security complete for request not initiated from L2CAP",
@ -1406,7 +1406,7 @@ void l2cble_sec_comp(BD_ADDR p_bda, tBT_TRANSPORT transport, void *p_ref_data,
while (!fixed_queue_is_empty(p_lcb->le_sec_pending_q))
{
p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q);
p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q, FIXED_QUEUE_MAX_TIMEOUT);
if (status != BTM_SUCCESS) {
(*(p_buf->p_callback))(p_bda, BT_TRANSPORT_LE, p_buf->p_ref_data, status);
@ -1462,7 +1462,7 @@ BOOLEAN l2ble_sec_access_req(BD_ADDR bd_addr, UINT16 psm, BOOLEAN is_originator,
p_buf->is_originator = is_originator;
p_buf->p_callback = p_callback;
p_buf->p_ref_data = p_ref_data;
fixed_queue_enqueue(p_lcb->le_sec_pending_q, p_buf);
fixed_queue_enqueue(p_lcb->le_sec_pending_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
status = btm_ble_start_sec_check(bd_addr, psm, is_originator, &l2cble_sec_comp, p_ref_data);
return status;

View file

@ -1243,7 +1243,7 @@ void l2c_enqueue_peer_data (tL2C_CCB *p_ccb, BT_HDR *p_buf)
UINT16_TO_STREAM (p, p_ccb->remote_cid);
}
fixed_queue_enqueue(p_ccb->xmit_hold_q, p_buf);
fixed_queue_enqueue(p_ccb->xmit_hold_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
l2cu_check_channel_congestion (p_ccb);

View file

@ -752,7 +752,7 @@ void l2c_fcr_proc_pdu (tL2C_CCB *p_ccb, BT_HDR *p_buf)
fixed_queue_t *temp_q = p_ccb->fcrb.srej_rcv_hold_q;
p_ccb->fcrb.srej_rcv_hold_q = fixed_queue_new(QUEUE_SIZE_MAX);
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(temp_q)) != NULL) {
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(temp_q, 0)) != NULL) {
if (p_ccb->in_use && (p_ccb->chnl_state == CST_OPEN)) {
/* Get the control word */
p = ((UINT8 *)(p_buf + 1)) + p_buf->offset - L2CAP_FCR_OVERHEAD;
@ -921,7 +921,7 @@ static BOOLEAN process_reqseq (tL2C_CCB *p_ccb, UINT16 ctrl_word)
#endif
for (xx = 0; xx < num_bufs_acked; xx++) {
BT_HDR *p_tmp = (BT_HDR *)fixed_queue_try_dequeue(p_fcrb->waiting_for_ack_q);
BT_HDR *p_tmp = (BT_HDR *)fixed_queue_dequeue(p_fcrb->waiting_for_ack_q, 0);
ls = p_tmp->layer_specific & L2CAP_FCR_SAR_BITS;
if ( (ls == L2CAP_FCR_UNSEG_SDU) || (ls == L2CAP_FCR_END_SDU) ) {
@ -1118,7 +1118,7 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B
num_lost, tx_seq, p_fcrb->next_seq_expected, p_fcrb->rej_sent);
p_buf->layer_specific = tx_seq;
fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf);
fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
} else {
L2CAP_TRACE_WARNING ("process_i_frame() CID: 0x%04x frame dropped in Srej Sent next_srej:%u hold_q.count:%u win_sz:%u",
p_ccb->local_cid, next_srej, fixed_queue_length(p_fcrb->srej_rcv_hold_q), p_ccb->our_cfg.fcr.tx_win_sz);
@ -1147,7 +1147,7 @@ static void process_i_frame (tL2C_CCB *p_ccb, BT_HDR *p_buf, UINT16 ctrl_word, B
p_ccb->local_cid, tx_seq, fixed_queue_length(p_fcrb->srej_rcv_hold_q));
}
p_buf->layer_specific = tx_seq;
fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf);
fixed_queue_enqueue(p_fcrb->srej_rcv_hold_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
p_fcrb->srej_sent = TRUE;
l2c_fcr_send_S_frame (p_ccb, L2CAP_FCR_SUP_SREJ, 0);
}
@ -1471,7 +1471,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq)
/* Also flush our retransmission queue */
while (!fixed_queue_is_empty(p_ccb->fcrb.retrans_q)) {
osi_free(fixed_queue_try_dequeue(p_ccb->fcrb.retrans_q));
osi_free(fixed_queue_dequeue(p_ccb->fcrb.retrans_q, 0));
}
if (list_ack != NULL) {
@ -1490,7 +1490,7 @@ static BOOLEAN retransmit_i_frames (tL2C_CCB *p_ccb, UINT8 tx_seq)
{
p_buf2->layer_specific = p_buf->layer_specific;
fixed_queue_enqueue(p_ccb->fcrb.retrans_q, p_buf2);
fixed_queue_enqueue(p_ccb->fcrb.retrans_q, p_buf2, FIXED_QUEUE_MAX_TIMEOUT);
}
if ( (tx_seq != L2C_FCR_RETX_ALL_PKTS) || (p_buf2 == NULL) ) {
@ -1534,7 +1534,7 @@ BT_HDR *l2c_fcr_get_next_xmit_sdu_seg (tL2C_CCB *p_ccb, UINT16 max_packet_length
/* If there is anything in the retransmit queue, that goes first
*/
p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->fcrb.retrans_q);
p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->fcrb.retrans_q, 0);
if (p_buf != NULL) {
/* Update Rx Seq and FCS if we acked some packets while this one was queued */
prepare_I_frame (p_ccb, p_buf, TRUE);
@ -1586,7 +1586,7 @@ BT_HDR *l2c_fcr_get_next_xmit_sdu_seg (tL2C_CCB *p_ccb, UINT16 max_packet_length
return (NULL);
}
} else { /* Use the original buffer if no segmentation, or the last segment */
p_xmit = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
p_xmit = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0);
if (p_xmit->event != 0) {
last_seg = TRUE;
@ -1647,7 +1647,7 @@ BT_HDR *l2c_fcr_get_next_xmit_sdu_seg (tL2C_CCB *p_ccb, UINT16 max_packet_length
}
/* Pretend we sent it and it got lost */
fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_xmit);
fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_xmit, FIXED_QUEUE_MAX_TIMEOUT);
return (NULL);
} else {
#if (L2CAP_ERTM_STATS == TRUE)
@ -1661,7 +1661,7 @@ BT_HDR *l2c_fcr_get_next_xmit_sdu_seg (tL2C_CCB *p_ccb, UINT16 max_packet_length
}
p_wack->layer_specific = p_xmit->layer_specific;
fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_wack);
fixed_queue_enqueue(p_ccb->fcrb.waiting_for_ack_q, p_wack, FIXED_QUEUE_MAX_TIMEOUT);
}
#if (L2CAP_ERTM_STATS == TRUE)

View file

@ -596,13 +596,13 @@ void l2c_ucd_delete_sec_pending_q(tL2C_LCB *p_lcb)
{
/* clean up any security pending UCD */
while (p_lcb->ucd_out_sec_pending_q.p_first) {
osi_free(fixed_queue_try_dequeue(p_lcb->ucd_out_sec_pending_q));
osi_free(fixed_queue_dequeue(p_lcb->ucd_out_sec_pending_q, 0));
}
fixed_queue_free(p_lcb->ucd_out_sec_pending_q, NULL);
p_lcb->ucd_out_sec_pending_q = NULL;
while (! fixed_queue_is_empty(p_lcb->ucd_in_sec_pending_q)) {
osi_free(fixed_queue_try_dequeue(p_lcb->ucd_in_sec_pending_q));
osi_free(fixed_queue_dequeue(p_lcb->ucd_in_sec_pending_q, 0));
}
fixed_queue_free(p_lcb->ucd_in_sec_pending_q);
p_lcb->ucd_in_sec_pending_q = NULL;
@ -683,7 +683,7 @@ BOOLEAN l2c_ucd_check_pending_info_req(tL2C_CCB *p_ccb)
*******************************************************************************/
void l2c_ucd_enqueue_pending_out_sec_q(tL2C_CCB *p_ccb, void *p_data)
{
fixed_queue_enqueue(p_ccb->p_lcb->ucd_out_sec_pending_q, p_data);
fixed_queue_enqueue(p_ccb->p_lcb->ucd_out_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
l2cu_check_channel_congestion (p_ccb);
}
@ -727,7 +727,7 @@ BOOLEAN l2c_ucd_check_pending_out_sec_q(tL2C_CCB *p_ccb)
*******************************************************************************/
void l2c_ucd_send_pending_out_sec_q(tL2C_CCB *p_ccb)
{
BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q);
BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q, 0);
if (p_buf != NULL) {
l2c_enqueue_peer_data (p_ccb, (BT_HDR *)p_buf);
@ -747,7 +747,7 @@ void l2c_ucd_send_pending_out_sec_q(tL2C_CCB *p_ccb)
*******************************************************************************/
void l2c_ucd_discard_pending_out_sec_q(tL2C_CCB *p_ccb)
{
BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q);
BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_out_sec_pending_q, 0);
/* we may need to report to application */
@ -767,7 +767,7 @@ void l2c_ucd_discard_pending_out_sec_q(tL2C_CCB *p_ccb)
*******************************************************************************/
BOOLEAN l2c_ucd_check_pending_in_sec_q(tL2C_CCB *p_ccb)
{
BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q);
BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q, 0);
if (p_buf != NULL) {
UINT16 psm;
@ -795,7 +795,7 @@ BOOLEAN l2c_ucd_check_pending_in_sec_q(tL2C_CCB *p_ccb)
*******************************************************************************/
void l2c_ucd_send_pending_in_sec_q(tL2C_CCB *p_ccb)
{
BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q)
BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q, 0)
if (p_buf != NULL) {
p_ccb->p_rcb->ucd.cb_info.pL2CA_UCD_Data_Cb(p_ccb->p_lcb->remote_bd_addr, (BT_HDR *)p_buf);
@ -814,7 +814,7 @@ void l2c_ucd_send_pending_in_sec_q(tL2C_CCB *p_ccb)
*******************************************************************************/
void l2c_ucd_discard_pending_in_sec_q(tL2C_CCB *p_ccb)
{
BT_HDR *p_buf = (BT_HDR*)fixed_queue_try_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q);
BT_HDR *p_buf = (BT_HDR*)fixed_queue_dequeue(p_ccb->p_lcb->ucd_in_sec_pending_q, 0);
if (p_buf) {
osi_free (p_buf);
@ -898,7 +898,7 @@ BOOLEAN l2c_ucd_process_event(tL2C_CCB *p_ccb, UINT16 event, void *p_data)
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
break;
case L2CEVT_L2CA_DATA_WRITE: /* Upper layer data to send */
@ -958,7 +958,7 @@ BOOLEAN l2c_ucd_process_event(tL2C_CCB *p_ccb, UINT16 event, void *p_data)
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
break;
case L2CEVT_L2CAP_INFO_RSP:
@ -1006,7 +1006,7 @@ BOOLEAN l2c_ucd_process_event(tL2C_CCB *p_ccb, UINT16 event, void *p_data)
break;
case L2CEVT_L2CAP_DATA: /* Peer data packet rcvd */
fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
break;
case L2CEVT_SEC_RE_SEND_CMD: /* BTM has enough info to proceed */
@ -1033,7 +1033,7 @@ BOOLEAN l2c_ucd_process_event(tL2C_CCB *p_ccb, UINT16 event, void *p_data)
/* stop idle timer of UCD */
btu_stop_timer (&p_ccb->timer_entry);
fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data);
fixed_queue_enqueue(p_ccb->p_lcb->ucd_in_sec_pending_q, p_data, FIXED_QUEUE_MAX_TIMEOUT);
l2c_ucd_check_pending_in_sec_q (p_ccb);
break;

View file

@ -249,7 +249,7 @@ void l2cu_release_lcb (tL2C_LCB *p_lcb)
{
while (!fixed_queue_is_empty(p_lcb->le_sec_pending_q))
{
tL2CAP_SEC_DATA *p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q);
tL2CAP_SEC_DATA *p_buf = (tL2CAP_SEC_DATA*) fixed_queue_dequeue(p_lcb->le_sec_pending_q, FIXED_QUEUE_MAX_TIMEOUT);
if (p_buf->p_callback) {
p_buf->p_callback(p_lcb->remote_bd_addr, p_lcb->transport, p_buf->p_ref_data, BTM_DEV_RESET);
}
@ -930,7 +930,7 @@ void l2cu_send_peer_disc_req (tL2C_CCB *p_ccb)
layer checks that all buffers are sent before disconnecting.
*/
if (p_ccb->peer_cfg.fcr.mode == L2CAP_FCR_BASIC_MODE) {
while ((p_buf2 = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q)) != NULL) {
while ((p_buf2 = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0)) != NULL) {
l2cu_set_acl_hci_header (p_buf2, p_ccb);
l2c_link_check_send_pkts (p_ccb->p_lcb, p_ccb, p_buf2);
}
@ -3488,7 +3488,7 @@ BT_HDR *l2cu_get_next_buffer_to_send (tL2C_LCB *p_lcb)
} else {
if (!fixed_queue_is_empty(p_ccb->xmit_hold_q)) {
p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0);
if (NULL == p_buf) {
L2CAP_TRACE_ERROR("l2cu_get_buffer_to_send: No data to be sent");
return (NULL);
@ -3525,7 +3525,7 @@ BT_HDR *l2cu_get_next_buffer_to_send (tL2C_LCB *p_lcb)
}
} else {
p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_ccb->xmit_hold_q);
p_buf = (BT_HDR *)fixed_queue_dequeue(p_ccb->xmit_hold_q, 0);
if (NULL == p_buf) {
L2CAP_TRACE_ERROR("l2cu_get_buffer_to_send() #2: No data to be sent");
return (NULL);

View file

@ -1098,7 +1098,7 @@ int PORT_Purge (UINT16 handle, UINT8 purge_flags)
count = fixed_queue_length(p_port->rx.queue);
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue)) != NULL) {
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->rx.queue, 0)) != NULL) {
osi_free (p_buf);
}
@ -1115,7 +1115,7 @@ int PORT_Purge (UINT16 handle, UINT8 purge_flags)
if (purge_flags & PORT_PURGE_TXCLEAR) {
osi_mutex_global_lock(); /* to prevent tx.queue_size from being negative */
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->tx.queue, 0)) != NULL) {
osi_free (p_buf);
}
@ -1218,7 +1218,7 @@ int PORT_ReadData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len)
p_data += p_buf->len;
}
osi_free(fixed_queue_try_dequeue(p_port->rx.queue));
osi_free(fixed_queue_dequeue(p_port->rx.queue, 0));
osi_mutex_global_unlock();
@ -1274,7 +1274,7 @@ int PORT_Read (UINT16 handle, BT_HDR **pp_buf)
osi_mutex_global_lock();
p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue);
p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->rx.queue, 0);
if (p_buf) {
p_port->rx.queue_size -= p_buf->len;
@ -1340,7 +1340,7 @@ static int port_write (tPORT *p_port, BT_HDR *p_buf)
p_port->rfc.state,
p_port->port_ctrl);
fixed_queue_enqueue(p_port->tx.queue, p_buf);
fixed_queue_enqueue(p_port->tx.queue, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
p_port->tx.queue_size += p_buf->len;
return (PORT_CMD_PENDING);

View file

@ -869,7 +869,7 @@ void PORT_DataInd (tRFC_MCB *p_mcb, UINT8 dlci, BT_HDR *p_buf)
osi_mutex_global_lock();
fixed_queue_enqueue(p_port->rx.queue, p_buf);
fixed_queue_enqueue(p_port->rx.queue, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
p_port->rx.queue_size += p_buf->len;
osi_mutex_global_unlock();
@ -976,7 +976,7 @@ UINT32 port_rfc_send_tx_data (tPORT *p_port)
/* get data from tx queue and send it */
osi_mutex_global_lock();
if ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
if ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->tx.queue, 0)) != NULL) {
p_port->tx.queue_size -= p_buf->len;
osi_mutex_global_unlock();

View file

@ -210,13 +210,13 @@ void port_release_port (tPORT *p_port)
osi_mutex_global_lock();
RFCOMM_TRACE_DEBUG("port_release_port, p_port:%p", p_port);
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue)) != NULL) {
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->rx.queue, 0)) != NULL) {
osi_free (p_buf);
}
p_port->rx.queue_size = 0;
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->tx.queue, 0)) != NULL) {
osi_free (p_buf);
}

View file

@ -488,7 +488,7 @@ void rfc_mx_sm_state_disc_wait_ua (tRFC_MCB *p_mcb, UINT16 event, void *p_data)
rfc_save_lcid_mcb (p_mcb, p_mcb->lcid);
/* clean up before reuse it */
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_mcb->cmd_q)) != NULL) {
while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_mcb->cmd_q, 0)) != NULL) {
osi_free(p_buf);
}

View file

@ -492,12 +492,12 @@ void rfc_check_send_cmd(tRFC_MCB *p_mcb, BT_HDR *p_buf)
__func__, p_mcb, p_mcb->lcid,
rfc_find_lcid_mcb(p_mcb->lcid));
}
fixed_queue_enqueue(p_mcb->cmd_q, p_buf);
fixed_queue_enqueue(p_mcb->cmd_q, p_buf, FIXED_QUEUE_MAX_TIMEOUT);
}
/* handle queue if L2CAP not congested */
while (p_mcb->l2cap_congested == FALSE) {
if ((p = (BT_HDR *)fixed_queue_try_dequeue(p_mcb->cmd_q)) == NULL) {
if ((p = (BT_HDR *)fixed_queue_dequeue(p_mcb->cmd_q, 0)) == NULL) {
break;
}