Bluetooth: controller: use RX node piggy-back for NTF when possible
When possible re-use the already allocated RX node for notifications. Store (retain) RX node and Link element on RX if NTF could occur. Pass link element to LLCP (ull_cp_rx()) together with RX node. New RX node type RETAIN introduced to signal retention When no RX node is available allocate one and hold off TX on procedures until such time that a node is available for NTF. In case waiting for NTF buffer avail is needed, allocate and store TX node to use for TX once NTF becomes available. CIS Established (incl. timeout handling) is now handled entirely as a specific event driven by ull_conn_iso - ie removal of procedure check of cis->established and cis->expire, as this is doubling mechanism in the conn_iso context. Unit test and helpers updated to handle new node type. Function ull_cp_release_ntf() was used only in unit test, so moved to helper context. Updating release_ntf to handle the fact that with piggy-backing in test context the node used for NTF can be from two different memory pools Signed-off-by: Erik Brockhoff <erbr@oticon.com>
This commit is contained in:
parent
7c3a708ab8
commit
998512f59b
|
@ -319,6 +319,8 @@ enum node_rx_type {
|
|||
NODE_RX_TYPE_DTM_IQ_SAMPLE_REPORT,
|
||||
NODE_RX_TYPE_IQ_SAMPLE_REPORT_ULL_RELEASE,
|
||||
NODE_RX_TYPE_IQ_SAMPLE_REPORT_LLL_RELEASE,
|
||||
/* Signals retention (ie non-release) of rx node */
|
||||
NODE_RX_TYPE_RETAIN,
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_USER_EXT)
|
||||
/* No entries shall be added after the NODE_RX_TYPE_USER_START/END */
|
||||
|
|
|
@ -2846,7 +2846,8 @@ static inline int rx_demux_rx(memq_link_t *link, struct node_rx_hdr *rx)
|
|||
|
||||
(void)memq_dequeue(memq_ull_rx.tail, &memq_ull_rx.head, NULL);
|
||||
|
||||
if (rx) {
|
||||
/* Only schedule node if not marked as retain by LLCP */
|
||||
if (rx && rx->type != NODE_RX_TYPE_RETAIN) {
|
||||
ll_rx_put_sched(link, rx);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -824,13 +824,11 @@ int ull_conn_rx(memq_link_t *link, struct node_rx_pdu **rx)
|
|||
switch (pdu_rx->ll_id) {
|
||||
case PDU_DATA_LLID_CTRL:
|
||||
{
|
||||
ARG_UNUSED(link);
|
||||
ARG_UNUSED(pdu_rx);
|
||||
|
||||
ull_cp_rx(conn, *rx);
|
||||
|
||||
/* Mark buffer for release */
|
||||
(*rx)->hdr.type = NODE_RX_TYPE_RELEASE;
|
||||
|
||||
ull_cp_rx(conn, link, *rx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -279,6 +279,17 @@ void llcp_tx_resume_data(struct ll_conn *conn, enum llcp_tx_q_pause_data_mask re
|
|||
}
|
||||
}
|
||||
|
||||
void llcp_rx_node_retain(struct proc_ctx *ctx)
|
||||
{
|
||||
LL_ASSERT(ctx->node_ref.rx);
|
||||
|
||||
/* Mark RX node to NOT release */
|
||||
ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
|
||||
|
||||
/* store link element reference to use once this node is moved up */
|
||||
ctx->node_ref.rx->hdr.link = ctx->node_ref.link;
|
||||
}
|
||||
|
||||
/*
|
||||
* LLCP Procedure Creation
|
||||
*/
|
||||
|
@ -296,7 +307,8 @@ static struct proc_ctx *create_procedure(enum llcp_proc proc, struct llcp_mem_po
|
|||
ctx->collision = 0U;
|
||||
ctx->done = 0U;
|
||||
ctx->rx_greedy = 0U;
|
||||
ctx->tx_ack = NULL;
|
||||
ctx->node_ref.rx = NULL;
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
|
||||
/* Clear procedure data */
|
||||
memset((void *)&ctx->data, 0, sizeof(ctx->data));
|
||||
|
@ -575,12 +587,6 @@ void ull_cp_release_tx(struct ll_conn *conn, struct node_tx *tx)
|
|||
tx_release(tx);
|
||||
}
|
||||
|
||||
void ull_cp_release_ntf(struct node_rx_pdu *ntf)
|
||||
{
|
||||
ntf->hdr.next = NULL;
|
||||
ll_rx_mem_release((void **)&ntf);
|
||||
}
|
||||
|
||||
static int prt_elapse(uint16_t *expire, uint16_t elapsed_event)
|
||||
{
|
||||
if (*expire != 0U) {
|
||||
|
@ -1699,13 +1705,13 @@ void ull_cp_tx_ack(struct ll_conn *conn, struct node_tx *tx)
|
|||
struct proc_ctx *ctx;
|
||||
|
||||
ctx = llcp_lr_peek(conn);
|
||||
if (ctx && ctx->tx_ack == tx) {
|
||||
if (ctx && ctx->node_ref.tx_ack == tx) {
|
||||
/* TX ack re. local request */
|
||||
llcp_lr_tx_ack(conn, ctx, tx);
|
||||
}
|
||||
|
||||
ctx = llcp_rr_peek(conn);
|
||||
if (ctx && ctx->tx_ack == tx) {
|
||||
if (ctx && ctx->node_ref.tx_ack == tx) {
|
||||
/* TX ack re. remote response */
|
||||
llcp_rr_tx_ack(conn, ctx, tx);
|
||||
}
|
||||
|
@ -1728,7 +1734,7 @@ void ull_cp_tx_ntf(struct ll_conn *conn)
|
|||
}
|
||||
}
|
||||
|
||||
void ull_cp_rx(struct ll_conn *conn, struct node_rx_pdu *rx)
|
||||
void ull_cp_rx(struct ll_conn *conn, memq_link_t *link, struct node_rx_pdu *rx)
|
||||
{
|
||||
struct proc_ctx *ctx_l;
|
||||
struct proc_ctx *ctx_r;
|
||||
|
@ -1802,7 +1808,7 @@ void ull_cp_rx(struct ll_conn *conn, struct node_rx_pdu *rx)
|
|||
*/
|
||||
|
||||
/* Process PDU in remote procedure */
|
||||
llcp_rr_rx(conn, ctx_r, rx);
|
||||
llcp_rr_rx(conn, ctx_r, link, rx);
|
||||
} else if (unexpected_r) {
|
||||
/* Local active procedure
|
||||
* Expected local procedure PDU
|
||||
|
@ -1811,7 +1817,7 @@ void ull_cp_rx(struct ll_conn *conn, struct node_rx_pdu *rx)
|
|||
*/
|
||||
|
||||
/* Process PDU in local procedure */
|
||||
llcp_lr_rx(conn, ctx_l, rx);
|
||||
llcp_lr_rx(conn, ctx_l, link, rx);
|
||||
}
|
||||
/* no else clause as this cannot occur with the logic above:
|
||||
* if they are not identical then one must be true
|
||||
|
@ -1833,7 +1839,7 @@ void ull_cp_rx(struct ll_conn *conn, struct node_rx_pdu *rx)
|
|||
|
||||
/* Process PDU as a new remote request */
|
||||
LL_ASSERT(pdu_valid);
|
||||
llcp_rr_new(conn, rx, true);
|
||||
llcp_rr_new(conn, link, rx, true);
|
||||
} else {
|
||||
/* Local active procedure
|
||||
* Expected local procedure PDU
|
||||
|
@ -1841,7 +1847,7 @@ void ull_cp_rx(struct ll_conn *conn, struct node_rx_pdu *rx)
|
|||
*/
|
||||
|
||||
/* Process PDU in local procedure */
|
||||
llcp_lr_rx(conn, ctx_l, rx);
|
||||
llcp_lr_rx(conn, ctx_l, link, rx);
|
||||
}
|
||||
}
|
||||
} else if (ctx_r) {
|
||||
|
@ -1850,14 +1856,14 @@ void ull_cp_rx(struct ll_conn *conn, struct node_rx_pdu *rx)
|
|||
*/
|
||||
|
||||
/* Process PDU in remote procedure */
|
||||
llcp_rr_rx(conn, ctx_r, rx);
|
||||
llcp_rr_rx(conn, ctx_r, link, rx);
|
||||
} else {
|
||||
/* No local active procedure
|
||||
* No remote active procedure
|
||||
*/
|
||||
|
||||
/* Process PDU as a new remote request */
|
||||
llcp_rr_new(conn, rx, pdu_valid);
|
||||
llcp_rr_new(conn, link, rx, pdu_valid);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,11 +35,6 @@ void ull_cp_update_tx_buffer_queue(struct ll_conn *conn);
|
|||
*/
|
||||
void ull_cp_release_tx(struct ll_conn *conn, struct node_tx *tx);
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
void ull_cp_release_ntf(struct node_rx_pdu *ntf);
|
||||
|
||||
/**
|
||||
* @brief Procedure Response Timeout Check
|
||||
* @param elapsed_event The number of elapsed events.
|
||||
|
@ -68,7 +63,7 @@ void ull_cp_tx_ntf(struct ll_conn *conn);
|
|||
/**
|
||||
* @brief Handle received LL Control PDU.
|
||||
*/
|
||||
void ull_cp_rx(struct ll_conn *conn, struct node_rx_pdu *rx);
|
||||
void ull_cp_rx(struct ll_conn *conn, memq_link_t *link, struct node_rx_pdu *rx);
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_LE_PING)
|
||||
/**
|
||||
|
|
|
@ -52,31 +52,18 @@
|
|||
#include <soc.h>
|
||||
#include "hal/debug.h"
|
||||
|
||||
static bool cc_check_cis_established_or_timeout_lll(struct proc_ctx *ctx)
|
||||
{
|
||||
const struct ll_conn_iso_stream *cis =
|
||||
ll_conn_iso_stream_get(ctx->data.cis_create.cis_handle);
|
||||
|
||||
if (cis->established) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!cis->event_expire) {
|
||||
ctx->data.cis_create.error = BT_HCI_ERR_CONN_FAIL_TO_ESTAB;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void cc_ntf_established(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
{
|
||||
struct node_rx_pdu *ntf;
|
||||
struct node_rx_conn_iso_estab *pdu;
|
||||
struct node_rx_pdu *ntf;
|
||||
uint8_t piggy_back;
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
ntf = ctx->node_ref.rx;
|
||||
LL_ASSERT(ntf);
|
||||
ctx->node_ref.rx = NULL;
|
||||
|
||||
piggy_back = (ntf->hdr.type != NODE_RX_TYPE_RETAIN);
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_CIS_ESTABLISHED;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
|
@ -87,8 +74,10 @@ static void cc_ntf_established(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
pdu->cis_handle = ctx->data.cis_create.cis_handle;
|
||||
pdu->status = ctx->data.cis_create.error;
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
if (!piggy_back) {
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
|
@ -97,14 +86,13 @@ enum {
|
|||
/* Establish Procedure */
|
||||
RP_CC_STATE_IDLE,
|
||||
RP_CC_STATE_WAIT_RX_CIS_REQ,
|
||||
RP_CC_STATE_WAIT_NTF_CIS_CREATE,
|
||||
RP_CC_STATE_WAIT_REPLY,
|
||||
RP_CC_STATE_WAIT_TX_CIS_RSP,
|
||||
RP_CC_STATE_WAIT_TX_REJECT_IND,
|
||||
RP_CC_STATE_WAIT_RX_CIS_IND,
|
||||
RP_CC_STATE_WAIT_INSTANT,
|
||||
RP_CC_STATE_WAIT_CIS_ESTABLISHED,
|
||||
RP_CC_STATE_WAIT_NTF,
|
||||
RP_CC_STATE_WAIT_NTF_AVAIL,
|
||||
};
|
||||
|
||||
/* LLCP Remote Procedure FSM events */
|
||||
|
@ -202,8 +190,9 @@ static void llcp_rp_cc_tx_reject(struct ll_conn *conn, struct proc_ctx *ctx, uin
|
|||
struct pdu_data *pdu;
|
||||
|
||||
/* Allocate tx node */
|
||||
tx = llcp_tx_alloc(conn, ctx);
|
||||
tx = ctx->node_ref.tx_ack;
|
||||
LL_ASSERT(tx);
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
|
||||
pdu = (struct pdu_data *)tx->pdu;
|
||||
|
||||
|
@ -220,8 +209,8 @@ static void rp_cc_ntf_create(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
struct node_rx_pdu *ntf;
|
||||
struct node_rx_conn_iso_req *pdu;
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
ntf = ctx->node_ref.rx;
|
||||
ctx->node_ref.rx = NULL;
|
||||
LL_ASSERT(ntf);
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_CIS_REQUEST;
|
||||
|
@ -233,20 +222,13 @@ static void rp_cc_ntf_create(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
pdu->cis_handle = ctx->data.cis_create.cis_handle;
|
||||
|
||||
ctx->data.cis_create.host_request_to = 0U;
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
}
|
||||
|
||||
static void rp_cc_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = RP_CC_STATE_WAIT_NTF;
|
||||
} else {
|
||||
cc_ntf_established(conn, ctx);
|
||||
llcp_rr_complete(conn);
|
||||
ctx->state = RP_CC_STATE_IDLE;
|
||||
}
|
||||
cc_ntf_established(conn, ctx);
|
||||
llcp_rr_complete(conn);
|
||||
ctx->state = RP_CC_STATE_IDLE;
|
||||
}
|
||||
|
||||
static void rp_cc_send_cis_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
|
@ -263,23 +245,28 @@ static void rp_cc_send_cis_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8
|
|||
}
|
||||
}
|
||||
|
||||
static void rp_cc_send_create_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = RP_CC_STATE_WAIT_NTF_CIS_CREATE;
|
||||
} else {
|
||||
rp_cc_ntf_create(conn, ctx);
|
||||
ctx->state = RP_CC_STATE_WAIT_REPLY;
|
||||
}
|
||||
}
|
||||
|
||||
static void rp_cc_send_reject_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
|
||||
ctx->state = RP_CC_STATE_WAIT_TX_REJECT_IND;
|
||||
} else {
|
||||
/* Allocate TX node to use, store in case we need to wait for NTF node */
|
||||
ctx->node_ref.tx_ack = llcp_tx_alloc(conn, ctx);
|
||||
if (ctx->data.cis_create.error == BT_HCI_ERR_CONN_ACCEPT_TIMEOUT) {
|
||||
/* We complete with error, so we must generate NTF, thus we must make sure
|
||||
* we have a node to use for NTF before TX'ing
|
||||
*/
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = RP_CC_STATE_WAIT_NTF_AVAIL;
|
||||
return;
|
||||
}
|
||||
ctx->node_ref.rx = llcp_ntf_alloc();
|
||||
|
||||
/* Mark node as RETAIN to trigger put/sched */
|
||||
ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
|
||||
}
|
||||
|
||||
llcp_rp_cc_tx_reject(conn, ctx, PDU_DATA_LLCTRL_TYPE_CIS_REQ);
|
||||
|
||||
if (ctx->data.cis_create.error == BT_HCI_ERR_CONN_ACCEPT_TIMEOUT) {
|
||||
|
@ -357,7 +344,8 @@ static void rp_cc_state_wait_rx_cis_req(struct ll_conn *conn, struct proc_ctx *c
|
|||
|
||||
if (ctx->data.cis_create.error == BT_HCI_ERR_SUCCESS) {
|
||||
/* Now controller accepts, so go ask the host to accept or decline */
|
||||
rp_cc_send_create_ntf(conn, ctx, evt, param);
|
||||
rp_cc_ntf_create(conn, ctx);
|
||||
ctx->state = RP_CC_STATE_WAIT_REPLY;
|
||||
} else {
|
||||
/* Now controller rejects, right out */
|
||||
rp_cc_send_reject_ind(conn, ctx, evt, param);
|
||||
|
@ -410,6 +398,9 @@ static void rp_cc_state_wait_rx_cis_ind(struct ll_conn *conn, struct proc_ctx *c
|
|||
/* CIS has been setup, go wait for 'instant' before starting */
|
||||
ctx->state = RP_CC_STATE_WAIT_INSTANT;
|
||||
|
||||
/* Mark node as RETAIN to keep until we need for NTF */
|
||||
llcp_rx_node_retain(ctx);
|
||||
|
||||
/* Check if this connection event is where we need to start the CIS */
|
||||
rp_cc_check_instant(conn, ctx, evt, param);
|
||||
break;
|
||||
|
@ -427,12 +418,20 @@ static void rp_cc_state_wait_rx_cis_ind(struct ll_conn *conn, struct proc_ctx *c
|
|||
}
|
||||
}
|
||||
|
||||
static void rp_cc_state_wait_ntf_cis_create(struct ll_conn *conn, struct proc_ctx *ctx,
|
||||
uint8_t evt, void *param)
|
||||
static void rp_cc_state_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case RP_CC_EVT_RUN:
|
||||
rp_cc_send_create_ntf(conn, ctx, evt, param);
|
||||
if (llcp_ntf_alloc_is_available()) {
|
||||
ctx->node_ref.rx = llcp_ntf_alloc();
|
||||
/* Mark node as RETAIN to trigger put/sched */
|
||||
ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
|
||||
|
||||
/* Now we're good to TX reject and complete procedure*/
|
||||
llcp_rp_cc_tx_reject(conn, ctx, PDU_DATA_LLCTRL_TYPE_CIS_REQ);
|
||||
rp_cc_complete(conn, ctx, evt, param);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
|
@ -440,18 +439,6 @@ static void rp_cc_state_wait_ntf_cis_create(struct ll_conn *conn, struct proc_ct
|
|||
}
|
||||
}
|
||||
|
||||
static void rp_cc_state_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case RP_CC_EVT_RUN:
|
||||
rp_cc_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void rp_cc_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
|
@ -525,18 +512,7 @@ static void rp_cc_state_wait_cis_established(struct ll_conn *conn, struct proc_c
|
|||
uint8_t evt, void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case RP_CC_EVT_RUN:
|
||||
/* Check for CIS state */
|
||||
if (cc_check_cis_established_or_timeout_lll(ctx)) {
|
||||
/* CIS was established or establishement timed out,
|
||||
* In either case complete procedure and generate
|
||||
* notification
|
||||
*/
|
||||
rp_cc_complete(conn, ctx, evt, param);
|
||||
}
|
||||
break;
|
||||
case RP_CC_EVT_CIS_ESTABLISHED:
|
||||
/* CIS was established, so let's go ahead and complete procedure */
|
||||
rp_cc_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
|
@ -556,9 +532,6 @@ static void rp_cc_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_
|
|||
case RP_CC_STATE_WAIT_RX_CIS_REQ:
|
||||
rp_cc_state_wait_rx_cis_req(conn, ctx, evt, param);
|
||||
break;
|
||||
case RP_CC_STATE_WAIT_NTF_CIS_CREATE:
|
||||
rp_cc_state_wait_ntf_cis_create(conn, ctx, evt, param);
|
||||
break;
|
||||
case RP_CC_STATE_WAIT_TX_REJECT_IND:
|
||||
rp_cc_state_wait_tx_reject_ind(conn, ctx, evt, param);
|
||||
break;
|
||||
|
@ -577,8 +550,8 @@ static void rp_cc_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_
|
|||
case RP_CC_STATE_WAIT_CIS_ESTABLISHED:
|
||||
rp_cc_state_wait_cis_established(conn, ctx, evt, param);
|
||||
break;
|
||||
case RP_CC_STATE_WAIT_NTF:
|
||||
rp_cc_state_wait_ntf(conn, ctx, evt, param);
|
||||
case RP_CC_STATE_WAIT_NTF_AVAIL:
|
||||
rp_cc_state_wait_ntf_avail(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Unknown state */
|
||||
|
@ -671,7 +644,6 @@ enum {
|
|||
LP_CC_STATE_WAIT_TX_CIS_IND,
|
||||
LP_CC_STATE_WAIT_INSTANT,
|
||||
LP_CC_STATE_WAIT_ESTABLISHED,
|
||||
LP_CC_STATE_WAIT_NTF,
|
||||
};
|
||||
|
||||
/* LLCP Local Procedure CIS Creation FSM events */
|
||||
|
@ -840,13 +812,9 @@ static void lp_cc_st_wait_tx_cis_req(struct ll_conn *conn, struct proc_ctx *ctx,
|
|||
|
||||
static void lp_cc_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = LP_CC_STATE_WAIT_NTF;
|
||||
} else {
|
||||
cc_ntf_established(conn, ctx);
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_CC_STATE_IDLE;
|
||||
}
|
||||
cc_ntf_established(conn, ctx);
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_CC_STATE_IDLE;
|
||||
}
|
||||
|
||||
static void lp_cc_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
|
@ -916,6 +884,10 @@ static void lp_cc_st_wait_rx_cis_rsp(struct ll_conn *conn, struct proc_ctx *ctx,
|
|||
case LP_CC_EVT_CIS_RSP:
|
||||
/* TODO: Reject response if outside offset range? */
|
||||
llcp_pdu_decode_cis_rsp(ctx, param);
|
||||
|
||||
/* Mark RX node to NOT release */
|
||||
llcp_rx_node_retain(ctx);
|
||||
|
||||
lp_cc_send_cis_ind(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_CC_EVT_UNKNOWN:
|
||||
|
@ -991,12 +963,6 @@ static void lp_cc_st_wait_established(struct ll_conn *conn, struct proc_ctx *ctx
|
|||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case LP_CC_EVT_RUN:
|
||||
if (cc_check_cis_established_or_timeout_lll(ctx)) {
|
||||
/* CIS was established, so let's got ahead and complete procedure */
|
||||
lp_cc_complete(conn, ctx, evt, param);
|
||||
}
|
||||
break;
|
||||
case LP_CC_EVT_ESTABLISHED:
|
||||
/* CIS was established, so let's go ahead and complete procedure */
|
||||
lp_cc_complete(conn, ctx, evt, param);
|
||||
|
@ -1007,18 +973,6 @@ static void lp_cc_st_wait_established(struct ll_conn *conn, struct proc_ctx *ctx
|
|||
}
|
||||
}
|
||||
|
||||
static void lp_cc_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case LP_CC_EVT_RUN:
|
||||
lp_cc_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void lp_cc_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
switch (ctx->state) {
|
||||
|
@ -1046,9 +1000,6 @@ static void lp_cc_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_
|
|||
case LP_CC_STATE_WAIT_ESTABLISHED:
|
||||
lp_cc_st_wait_established(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_CC_STATE_WAIT_NTF:
|
||||
lp_cc_st_wait_ntf(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Unknown state */
|
||||
LL_ASSERT(0);
|
||||
|
|
|
@ -57,7 +57,7 @@ enum {
|
|||
LP_COMMON_STATE_WAIT_TX,
|
||||
LP_COMMON_STATE_WAIT_TX_ACK,
|
||||
LP_COMMON_STATE_WAIT_RX,
|
||||
LP_COMMON_STATE_WAIT_NTF,
|
||||
LP_COMMON_STATE_WAIT_NTF_AVAIL,
|
||||
};
|
||||
|
||||
/* LLCP Local Procedure Common FSM events */
|
||||
|
@ -88,7 +88,6 @@ enum {
|
|||
RP_COMMON_STATE_POSTPONE_TERMINATE,
|
||||
RP_COMMON_STATE_WAIT_TX,
|
||||
RP_COMMON_STATE_WAIT_TX_ACK,
|
||||
RP_COMMON_STATE_WAIT_NTF,
|
||||
};
|
||||
/* LLCP Remote Procedure Common FSM events */
|
||||
enum {
|
||||
|
@ -151,6 +150,11 @@ static void lp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
|
||||
pdu = (struct pdu_data *)tx->pdu;
|
||||
|
||||
/* Clear tx_ack/rx node reference due to dual/union functionality
|
||||
* rx node might be !=NULL and thus tx_ack !=NULL
|
||||
*/
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
|
||||
/* Encode LL Control PDU */
|
||||
switch (ctx->proc) {
|
||||
#if defined(CONFIG_BT_CTLR_LE_PING)
|
||||
|
@ -166,7 +170,7 @@ static void lp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
|
||||
case PROC_MIN_USED_CHANS:
|
||||
llcp_pdu_encode_min_used_chans_ind(ctx, pdu);
|
||||
ctx->tx_ack = tx;
|
||||
ctx->node_ref.tx_ack = tx;
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
|
||||
|
@ -176,13 +180,13 @@ static void lp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
break;
|
||||
case PROC_TERMINATE:
|
||||
llcp_pdu_encode_terminate_ind(ctx, pdu);
|
||||
ctx->tx_ack = tx;
|
||||
ctx->node_ref.tx_ack = tx;
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
break;
|
||||
#if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
|
||||
case PROC_CIS_TERMINATE:
|
||||
llcp_pdu_encode_cis_terminate_ind(ctx, pdu);
|
||||
ctx->tx_ack = tx;
|
||||
ctx->node_ref.tx_ack = tx;
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
|
||||
|
@ -299,13 +303,9 @@ static void lp_comm_ntf_cte_req(struct ll_conn *conn, struct proc_ctx *ctx, stru
|
|||
|
||||
static void lp_comm_ntf_cte_req_tx(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
{
|
||||
if (llcp_ntf_alloc_is_available()) {
|
||||
lp_comm_ntf(conn, ctx);
|
||||
ull_cp_cte_req_set_disable(conn);
|
||||
ctx->state = LP_COMMON_STATE_IDLE;
|
||||
} else {
|
||||
ctx->state = LP_COMMON_STATE_WAIT_NTF;
|
||||
}
|
||||
lp_comm_ntf(conn, ctx);
|
||||
ull_cp_cte_req_set_disable(conn);
|
||||
ctx->state = LP_COMMON_STATE_IDLE;
|
||||
}
|
||||
|
||||
static void lp_comm_complete_cte_req(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
|
@ -376,12 +376,18 @@ static void lp_comm_ntf_sca(struct node_rx_pdu *ntf, struct proc_ctx *ctx, struc
|
|||
|
||||
static void lp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
{
|
||||
uint8_t piggy_back = 1U;
|
||||
struct node_rx_pdu *ntf;
|
||||
struct pdu_data *pdu;
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
LL_ASSERT(ntf);
|
||||
ntf = ctx->node_ref.rx;
|
||||
ctx->node_ref.rx = NULL;
|
||||
if (!ntf) {
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
LL_ASSERT(ntf);
|
||||
piggy_back = 0U;
|
||||
}
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
|
@ -414,8 +420,14 @@ static void lp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
break;
|
||||
}
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
if (!piggy_back) {
|
||||
/* Enqueue notification towards LL, unless we re-use RX node,
|
||||
* in which case it is handled on the ull_cp_rx return path
|
||||
*/
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
static void lp_comm_terminate_invalid_pdu(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
|
@ -443,13 +455,9 @@ static void lp_comm_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t
|
|||
break;
|
||||
#endif /* CONFIG_BT_CTLR_LE_PING */
|
||||
case PROC_FEATURE_EXCHANGE:
|
||||
if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP ||
|
||||
ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_FEATURE_RSP) {
|
||||
if ((ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP ||
|
||||
ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_FEATURE_RSP)) {
|
||||
if (ctx->data.fex.host_initiated) {
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = LP_COMMON_STATE_WAIT_NTF;
|
||||
break;
|
||||
}
|
||||
lp_comm_ntf(conn, ctx);
|
||||
}
|
||||
llcp_lr_complete(conn);
|
||||
|
@ -467,12 +475,19 @@ static void lp_comm_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t
|
|||
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
|
||||
case PROC_VERSION_EXCHANGE:
|
||||
if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_VERSION_IND) {
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = LP_COMMON_STATE_WAIT_NTF;
|
||||
} else {
|
||||
if (ctx->node_ref.rx || llcp_ntf_alloc_is_available()) {
|
||||
/* Either this is a piggy-back or there is a NTF node avail */
|
||||
lp_comm_ntf(conn, ctx);
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_COMMON_STATE_IDLE;
|
||||
} else {
|
||||
/* Handle procedure TO, in case we end up waiting 'forever' for
|
||||
* NTF buffer. This is a simple way to implement mechanism to
|
||||
* trigger disconnect in case NTF buffer 'never' becomes avail
|
||||
* see elaborate note in lp_comm_st_wait_ntf_avail()
|
||||
*/
|
||||
llcp_lr_prt_restart(conn);
|
||||
ctx->state = LP_COMMON_STATE_WAIT_NTF_AVAIL;
|
||||
}
|
||||
} else {
|
||||
/* Illegal response opcode */
|
||||
|
@ -500,16 +515,11 @@ static void lp_comm_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t
|
|||
/* Apply changes in data lengths/times */
|
||||
uint8_t dle_changed = ull_dle_update_eff(conn);
|
||||
|
||||
if (dle_changed && !llcp_ntf_alloc_is_available()) {
|
||||
/* We need to generate NTF but no buffers avail so wait for one */
|
||||
ctx->state = LP_COMMON_STATE_WAIT_NTF;
|
||||
} else {
|
||||
if (dle_changed) {
|
||||
lp_comm_ntf(conn, ctx);
|
||||
}
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_COMMON_STATE_IDLE;
|
||||
if (dle_changed) {
|
||||
lp_comm_ntf(conn, ctx);
|
||||
}
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_COMMON_STATE_IDLE;
|
||||
} else if (ctx->response_opcode == PDU_DATA_LLCTRL_TYPE_UNKNOWN_RSP) {
|
||||
/* Peer does not accept DLU, so disable on current connection */
|
||||
feature_unmask_features(conn, LL_FEAT_BIT_DLE);
|
||||
|
@ -555,13 +565,9 @@ static void lp_comm_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t
|
|||
#endif /* defined(CONFIG_BT_CTLR_PERIPHERAL_ISO) */
|
||||
}
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = LP_COMMON_STATE_WAIT_NTF;
|
||||
} else {
|
||||
lp_comm_ntf(conn, ctx);
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_COMMON_STATE_IDLE;
|
||||
}
|
||||
lp_comm_ntf(conn, ctx);
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_COMMON_STATE_IDLE;
|
||||
break;
|
||||
default:
|
||||
/* Illegal response opcode */
|
||||
|
@ -590,7 +596,8 @@ static bool lp_comm_tx_proxy(struct ll_conn *conn, struct proc_ctx *ctx, const b
|
|||
lp_comm_tx(conn, ctx);
|
||||
|
||||
/* Select correct state, depending on TX ack handling 'request' */
|
||||
ctx->state = ctx->tx_ack ? LP_COMMON_STATE_WAIT_TX_ACK : LP_COMMON_STATE_WAIT_RX;
|
||||
ctx->state = ctx->node_ref.tx_ack ?
|
||||
LP_COMMON_STATE_WAIT_TX_ACK : LP_COMMON_STATE_WAIT_RX;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -622,6 +629,8 @@ static void lp_comm_send_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t
|
|||
}
|
||||
} else {
|
||||
ctx->response_opcode = PDU_DATA_LLCTRL_TYPE_VERSION_IND;
|
||||
/* Clear node_ref to signal no NTF piggy-backing */
|
||||
ctx->node_ref.rx = NULL;
|
||||
lp_comm_complete(conn, ctx, evt, param);
|
||||
}
|
||||
break;
|
||||
|
@ -672,7 +681,7 @@ static void lp_comm_send_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t
|
|||
1) {
|
||||
#endif /* CONFIG_BT_CTLR_PHY */
|
||||
lp_comm_tx_proxy(conn, ctx,
|
||||
(llcp_rr_get_paused_cmd(conn) == PROC_CTE_REQ));
|
||||
llcp_rr_get_paused_cmd(conn) == PROC_CTE_REQ);
|
||||
} else {
|
||||
/* The PHY was changed to CODED when the request was waiting in a local
|
||||
* request queue.
|
||||
|
@ -714,11 +723,7 @@ static void lp_comm_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t
|
|||
ctx->data.cis_term.error_code);
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
|
||||
if (llcp_lr_ispaused(conn)) {
|
||||
ctx->state = LP_COMMON_STATE_WAIT_TX;
|
||||
} else {
|
||||
lp_comm_send_req(conn, ctx, evt, param);
|
||||
}
|
||||
lp_comm_send_req(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
|
@ -747,17 +752,14 @@ static void lp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, u
|
|||
switch (ctx->proc) {
|
||||
#if defined(CONFIG_BT_CTLR_MIN_USED_CHAN) && defined(CONFIG_BT_PERIPHERAL)
|
||||
case PROC_MIN_USED_CHANS:
|
||||
ctx->tx_ack = NULL;
|
||||
lp_comm_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_MIN_USED_CHAN && CONFIG_BT_PERIPHERAL */
|
||||
case PROC_TERMINATE:
|
||||
ctx->tx_ack = NULL;
|
||||
lp_comm_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
#if defined(CONFIG_BT_CTLR_CENTRAL_ISO) || defined(CONFIG_BT_CTLR_PERIPHERAL_ISO)
|
||||
case PROC_CIS_TERMINATE:
|
||||
ctx->tx_ack = NULL;
|
||||
lp_comm_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_CENTRAL_ISO || CONFIG_BT_CTLR_PERIPHERAL_ISO */
|
||||
|
@ -848,36 +850,34 @@ static void lp_comm_st_wait_rx(struct ll_conn *conn, struct proc_ctx *ctx, uint8
|
|||
}
|
||||
}
|
||||
|
||||
static void lp_comm_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
static void lp_comm_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case LP_COMMON_EVT_RUN:
|
||||
switch (ctx->proc) {
|
||||
case PROC_FEATURE_EXCHANGE:
|
||||
case PROC_VERSION_EXCHANGE:
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
case PROC_DATA_LENGTH_UPDATE:
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
#if defined(CONFIG_BT_CTLR_SCA_UPDATE)
|
||||
case PROC_SCA_UPDATE:
|
||||
#endif /* CONFIG_BT_CTLR_SCA_UPDATE) */
|
||||
/* Note re. procedure timeout handling:
|
||||
* Procedure TO is specifically NOT reset while in wait state, since
|
||||
* the mechanism is being 'hi-jacked' to implement a TO on the NTF wait
|
||||
* This to catch the very unlikely case:
|
||||
* local VERSION IND started after a VERSION IND had already been TX'ed
|
||||
* in which case the local procedure should complete with NTF without
|
||||
* prior TX (ie no procedure TO handling initiated). IF this NTF never
|
||||
* finds buffer avail it would wait forever, but not with proc TO active
|
||||
*/
|
||||
if (llcp_ntf_alloc_is_available()) {
|
||||
lp_comm_ntf(conn, ctx);
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_COMMON_STATE_IDLE;
|
||||
}
|
||||
break;
|
||||
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_REQ)
|
||||
case PROC_CTE_REQ:
|
||||
if (llcp_ntf_alloc_is_available()) {
|
||||
lp_comm_ntf(conn, ctx);
|
||||
ctx->state = LP_COMMON_STATE_IDLE;
|
||||
lp_comm_complete_cte_req_finalize(conn);
|
||||
}
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_REQ */
|
||||
default:
|
||||
/* If we get here it is not good since only VERSION EXCHANGE procedure
|
||||
* out of the ones handled in ull_llcp_common should end up waiting for
|
||||
* non-piggy-back'ed NTF
|
||||
*/
|
||||
LL_ASSERT(0);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -902,8 +902,8 @@ static void lp_comm_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint
|
|||
case LP_COMMON_STATE_WAIT_RX:
|
||||
lp_comm_st_wait_rx(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_COMMON_STATE_WAIT_NTF:
|
||||
lp_comm_st_wait_ntf(conn, ctx, evt, param);
|
||||
case LP_COMMON_STATE_WAIT_NTF_AVAIL:
|
||||
lp_comm_st_wait_ntf_avail(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Unknown state */
|
||||
|
@ -1010,6 +1010,9 @@ static void rp_comm_rx_decode(struct ll_conn *conn, struct proc_ctx *ctx, struct
|
|||
*/
|
||||
llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
|
||||
ctx->data.dle.ntf_dle = ull_dle_update_eff_rx(conn);
|
||||
|
||||
/* Mark RX pdu to be removed from RX queue, but NOT be released */
|
||||
llcp_rx_node_retain(ctx);
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
|
||||
|
@ -1039,6 +1042,9 @@ static void rp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
|
||||
pdu = (struct pdu_data *)tx->pdu;
|
||||
|
||||
/* Clear tx_ack/rx node reference */
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
|
||||
/* Encode LL Control PDU */
|
||||
switch (ctx->proc) {
|
||||
#if defined(CONFIG_BT_CTLR_LE_PING)
|
||||
|
@ -1058,7 +1064,7 @@ static void rp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
case PROC_DATA_LENGTH_UPDATE:
|
||||
llcp_pdu_encode_length_rsp(conn, pdu);
|
||||
ctx->tx_ack = tx;
|
||||
ctx->node_ref.tx_ack = tx;
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
|
@ -1091,7 +1097,7 @@ static void rp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
}
|
||||
|
||||
ctx->tx_ack = tx;
|
||||
ctx->node_ref.tx_ack = tx;
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -1099,7 +1105,7 @@ static void rp_comm_tx(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
#if defined(CONFIG_BT_CTLR_SCA_UPDATE)
|
||||
case PROC_SCA_UPDATE:
|
||||
llcp_pdu_encode_clock_accuracy_rsp(ctx, pdu);
|
||||
ctx->tx_ack = tx;
|
||||
ctx->node_ref.tx_ack = tx;
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_SCA_UPDATE */
|
||||
|
@ -1129,40 +1135,30 @@ static void rp_comm_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t
|
|||
}
|
||||
}
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
static void rp_comm_ntf_length_change(struct ll_conn *conn, struct proc_ctx *ctx,
|
||||
struct pdu_data *pdu)
|
||||
{
|
||||
llcp_ntf_encode_length_change(conn, pdu);
|
||||
}
|
||||
|
||||
static void rp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
static void rp_comm_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t generate_ntf)
|
||||
{
|
||||
struct node_rx_pdu *ntf;
|
||||
struct pdu_data *pdu;
|
||||
|
||||
ARG_UNUSED(pdu);
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
ntf = ctx->node_ref.rx;
|
||||
LL_ASSERT(ntf);
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
pdu = (struct pdu_data *)ntf->pdu;
|
||||
switch (ctx->proc) {
|
||||
/* Note: the 'double' ifdef in case this switch case expands
|
||||
* in the future and the function is re-instated
|
||||
*/
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
case PROC_DATA_LENGTH_UPDATE:
|
||||
rp_comm_ntf_length_change(conn, ctx, pdu);
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
default:
|
||||
LL_ASSERT(0);
|
||||
break;
|
||||
/* This should be an 'old' RX node, so put/sched when done */
|
||||
LL_ASSERT(ntf->hdr.type == NODE_RX_TYPE_RETAIN);
|
||||
|
||||
/* And release memory if no NTF to be generated */
|
||||
ntf->hdr.type = NODE_RX_TYPE_RELEASE;
|
||||
|
||||
if (generate_ntf) {
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
pdu = (struct pdu_data *)ntf->pdu;
|
||||
LL_ASSERT(ctx->proc == PROC_DATA_LENGTH_UPDATE);
|
||||
llcp_ntf_encode_length_change(conn, pdu);
|
||||
}
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
/* Enqueue notification towards LL - releases mem if no ntf */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
|
@ -1271,7 +1267,8 @@ static void rp_comm_send_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t
|
|||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
|
||||
case PROC_CTE_REQ:
|
||||
if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx) ||
|
||||
if (llcp_rr_ispaused(conn) ||
|
||||
!llcp_tx_alloc_peek(conn, ctx) ||
|
||||
(llcp_rr_get_paused_cmd(conn) == PROC_CTE_REQ)) {
|
||||
ctx->state = RP_COMMON_STATE_WAIT_TX;
|
||||
} else {
|
||||
|
@ -1349,25 +1346,20 @@ static void rp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, u
|
|||
/* Apply changes in data lengths/times */
|
||||
uint8_t dle_changed = ull_dle_update_eff_tx(conn);
|
||||
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
dle_changed |= ctx->data.dle.ntf_dle;
|
||||
llcp_tx_resume_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_DATA_LENGTH);
|
||||
|
||||
if (dle_changed && !llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = RP_COMMON_STATE_WAIT_NTF;
|
||||
} else {
|
||||
if (dle_changed) {
|
||||
rp_comm_ntf(conn, ctx);
|
||||
}
|
||||
llcp_rr_complete(conn);
|
||||
ctx->state = RP_COMMON_STATE_IDLE;
|
||||
}
|
||||
rp_comm_ntf(conn, ctx, dle_changed);
|
||||
llcp_rr_complete(conn);
|
||||
ctx->state = RP_COMMON_STATE_IDLE;
|
||||
break;
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
#if defined(CONFIG_BT_CTLR_DF_CONN_CTE_RSP)
|
||||
case PROC_CTE_REQ: {
|
||||
/* add PHY update pause = false here */
|
||||
ctx->tx_ack = NULL;
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
llcp_rr_set_paused_cmd(conn, PROC_NONE);
|
||||
llcp_rr_complete(conn);
|
||||
ctx->state = RP_COMMON_STATE_IDLE;
|
||||
|
@ -1375,7 +1367,7 @@ static void rp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, u
|
|||
#endif /* CONFIG_BT_CTLR_DF_CONN_CTE_RSP */
|
||||
#if defined(CONFIG_BT_CTLR_SCA_UPDATE)
|
||||
case PROC_SCA_UPDATE: {
|
||||
ctx->tx_ack = NULL;
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
|
||||
conn->periph.sca = ctx->data.sca_update.sca;
|
||||
|
@ -1400,18 +1392,6 @@ static void rp_comm_st_wait_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, u
|
|||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
static void rp_comm_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
if (llcp_ntf_alloc_is_available()) {
|
||||
rp_comm_ntf(conn, ctx);
|
||||
llcp_rr_complete(conn);
|
||||
ctx->state = RP_COMMON_STATE_IDLE;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
|
||||
static void rp_comm_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
|
@ -1431,11 +1411,6 @@ static void rp_comm_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint
|
|||
case RP_COMMON_STATE_WAIT_TX_ACK:
|
||||
rp_comm_st_wait_tx_ack(conn, ctx, evt, param);
|
||||
break;
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
case RP_COMMON_STATE_WAIT_NTF:
|
||||
rp_comm_st_wait_ntf(conn, ctx, evt, param);
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
default:
|
||||
/* Unknown state */
|
||||
LL_ASSERT(0);
|
||||
|
|
|
@ -85,7 +85,7 @@ enum {
|
|||
LP_CU_STATE_WAIT_RX_CONN_UPDATE_IND,
|
||||
LP_CU_STATE_WAIT_TX_REJECT_EXT_IND,
|
||||
LP_CU_STATE_WAIT_INSTANT,
|
||||
LP_CU_STATE_WAIT_NTF,
|
||||
LP_CU_STATE_WAIT_NTF_AVAIL,
|
||||
};
|
||||
|
||||
/* LLCP Local Procedure Connection Update FSM events */
|
||||
|
@ -120,7 +120,7 @@ enum {
|
|||
RP_CU_STATE_WAIT_TX_CONN_UPDATE_IND,
|
||||
RP_CU_STATE_WAIT_RX_CONN_UPDATE_IND,
|
||||
RP_CU_STATE_WAIT_INSTANT,
|
||||
RP_CU_STATE_WAIT_NTF,
|
||||
RP_CU_STATE_WAIT_NTF_AVAIL,
|
||||
RP_CU_STATE_WAIT_TX_UNKNOWN_RSP
|
||||
};
|
||||
|
||||
|
@ -238,11 +238,15 @@ static void cu_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
{
|
||||
struct node_rx_pdu *ntf;
|
||||
struct node_rx_cu *pdu;
|
||||
uint8_t piggy_back;
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
ntf = ctx->node_ref.rx;
|
||||
ctx->node_ref.rx = NULL;
|
||||
LL_ASSERT(ntf);
|
||||
|
||||
piggy_back = (ntf->hdr.type != NODE_RX_TYPE_RETAIN);
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_CONN_UPDATE;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
pdu = (struct node_rx_cu *)ntf->pdu;
|
||||
|
@ -258,8 +262,12 @@ static void cu_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
pdu->timeout = conn->supervision_timeout;
|
||||
}
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
if (!piggy_back) {
|
||||
/* Enqueue notification towards LL, unless piggy-backing,
|
||||
* in which case this is done on the rx return path
|
||||
*/
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_CENTRAL) || defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
|
||||
|
@ -268,9 +276,16 @@ static void lp_cu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t opcode)
|
|||
struct node_tx *tx;
|
||||
struct pdu_data *pdu;
|
||||
|
||||
/* Allocate tx node */
|
||||
tx = llcp_tx_alloc(conn, ctx);
|
||||
LL_ASSERT(tx);
|
||||
/* Get pre-allocated tx node */
|
||||
tx = ctx->node_ref.tx_ack;
|
||||
/* Clear to not trigger tx-ack*/
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
|
||||
if (!tx) {
|
||||
/* Allocate tx node if non pre-alloc'ed */
|
||||
tx = llcp_tx_alloc(conn, ctx);
|
||||
LL_ASSERT(tx);
|
||||
}
|
||||
|
||||
pdu = (struct pdu_data *)tx->pdu;
|
||||
|
||||
|
@ -323,15 +338,11 @@ static void lp_cu_complete(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
ctx->state = LP_CU_STATE_IDLE;
|
||||
}
|
||||
|
||||
static void lp_cu_wait_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
static void lp_cu_ntf_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = LP_CU_STATE_WAIT_NTF;
|
||||
} else {
|
||||
cu_ntf(conn, ctx);
|
||||
lp_cu_complete(conn, ctx);
|
||||
}
|
||||
cu_ntf(conn, ctx);
|
||||
lp_cu_complete(conn, ctx);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_CONN_PARAM_REQ)
|
||||
|
@ -389,16 +400,54 @@ static void lp_cu_send_conn_param_req(struct ll_conn *conn, struct proc_ctx *ctx
|
|||
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
|
||||
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
static void lp_cu_send_conn_update_ind_finalize(struct ll_conn *conn, struct proc_ctx *ctx,
|
||||
uint8_t evt, void *param)
|
||||
{
|
||||
if (ctx->node_ref.rx == NULL) {
|
||||
/* If we get here without RX node we know one is avail to be allocated,
|
||||
* so pre-alloc NTF node
|
||||
*/
|
||||
ctx->node_ref.rx = llcp_ntf_alloc();
|
||||
}
|
||||
|
||||
/* Signal put/sched on NTF - ie non-RX node piggy */
|
||||
ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
|
||||
|
||||
cu_prepare_update_ind(conn, ctx);
|
||||
lp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND);
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
ctx->state = LP_CU_STATE_WAIT_INSTANT;
|
||||
}
|
||||
|
||||
static void lp_cu_send_conn_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
|
||||
ctx->state = LP_CU_STATE_WAIT_TX_CONN_UPDATE_IND;
|
||||
} else {
|
||||
cu_prepare_update_ind(conn, ctx);
|
||||
lp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND);
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
ctx->state = LP_CU_STATE_WAIT_INSTANT;
|
||||
/* ensure alloc of TX node, before possibly waiting for NTF node */
|
||||
ctx->node_ref.tx_ack = llcp_tx_alloc(conn, ctx);
|
||||
if (ctx->node_ref.rx == NULL && !llcp_ntf_alloc_is_available()) {
|
||||
/* No RX node piggy, and no NTF avail, so go wait for one, before TX'ing */
|
||||
ctx->state = LP_CU_STATE_WAIT_NTF_AVAIL;
|
||||
} else {
|
||||
lp_cu_send_conn_update_ind_finalize(conn, ctx, evt, param);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void lp_cu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case LP_CU_EVT_RUN:
|
||||
if (llcp_ntf_alloc_is_available()) {
|
||||
lp_cu_send_conn_update_ind_finalize(conn, ctx, evt, param);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
@ -415,6 +464,8 @@ static void lp_cu_st_idle(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t ev
|
|||
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
case PROC_CONN_UPDATE:
|
||||
/* Ensure the non-piggy-back'ing is signaled */
|
||||
ctx->node_ref.rx = NULL;
|
||||
lp_cu_send_conn_update_ind(conn, ctx, evt, param);
|
||||
break;
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
@ -477,12 +528,16 @@ static void lp_cu_st_wait_rx_conn_param_rsp(struct ll_conn *conn, struct proc_ct
|
|||
lp_cu_send_reject_ext_ind(conn, ctx, evt, param);
|
||||
break;
|
||||
}
|
||||
/* Keep RX node to use for NTF */
|
||||
llcp_rx_node_retain(ctx);
|
||||
lp_cu_send_conn_update_ind(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_CU_EVT_UNKNOWN:
|
||||
llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
|
||||
/* Unsupported in peer, so disable locally for this connection */
|
||||
feature_unmask_features(conn, LL_FEAT_BIT_CONN_PARAM_REQ);
|
||||
/* Keep RX node to use for NTF */
|
||||
llcp_rx_node_retain(ctx);
|
||||
lp_cu_send_conn_update_ind(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_CU_EVT_REJECT:
|
||||
|
@ -491,11 +546,13 @@ static void lp_cu_st_wait_rx_conn_param_rsp(struct ll_conn *conn, struct proc_ct
|
|||
llcp_rr_set_incompat(conn, INCOMPAT_RESERVED);
|
||||
/* Unsupported in peer, so disable locally for this connection */
|
||||
feature_unmask_features(conn, LL_FEAT_BIT_CONN_PARAM_REQ);
|
||||
/* Keep RX node to use for NTF */
|
||||
llcp_rx_node_retain(ctx);
|
||||
lp_cu_send_conn_update_ind(conn, ctx, evt, param);
|
||||
} else {
|
||||
llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
|
||||
ctx->data.cu.error = pdu->llctrl.reject_ext_ind.error_code;
|
||||
lp_cu_wait_complete(conn, ctx, evt, param);
|
||||
lp_cu_ntf_complete(conn, ctx, evt, param);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -528,17 +585,19 @@ static void lp_cu_st_wait_rx_conn_update_ind(struct ll_conn *conn, struct proc_c
|
|||
switch (evt) {
|
||||
case LP_CU_EVT_CONN_UPDATE_IND:
|
||||
llcp_pdu_decode_conn_update_ind(ctx, param);
|
||||
/* Keep RX node to use for NTF */
|
||||
llcp_rx_node_retain(ctx);
|
||||
ctx->state = LP_CU_STATE_WAIT_INSTANT;
|
||||
break;
|
||||
case LP_CU_EVT_UNKNOWN:
|
||||
/* Unsupported in peer, so disable locally for this connection */
|
||||
feature_unmask_features(conn, LL_FEAT_BIT_CONN_PARAM_REQ);
|
||||
ctx->data.cu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
|
||||
lp_cu_wait_complete(conn, ctx, evt, param);
|
||||
lp_cu_ntf_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_CU_EVT_REJECT:
|
||||
ctx->data.cu.error = pdu->llctrl.reject_ext_ind.error_code;
|
||||
lp_cu_wait_complete(conn, ctx, evt, param);
|
||||
lp_cu_ntf_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
|
@ -571,8 +630,13 @@ static void lp_cu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint
|
|||
notify = cu_should_notify_host(ctx);
|
||||
if (notify) {
|
||||
ctx->data.cu.error = BT_HCI_ERR_SUCCESS;
|
||||
lp_cu_wait_complete(conn, ctx, evt, param);
|
||||
lp_cu_ntf_complete(conn, ctx, evt, param);
|
||||
} else {
|
||||
/* Release RX node kept for NTF */
|
||||
ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RELEASE;
|
||||
ll_rx_put_sched(ctx->node_ref.rx->hdr.link, ctx->node_ref.rx);
|
||||
ctx->node_ref.rx = NULL;
|
||||
|
||||
lp_cu_complete(conn, ctx);
|
||||
}
|
||||
}
|
||||
|
@ -591,18 +655,6 @@ static void lp_cu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, ui
|
|||
}
|
||||
}
|
||||
|
||||
static void lp_cu_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case LP_CU_EVT_RUN:
|
||||
lp_cu_wait_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void lp_cu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
switch (ctx->state) {
|
||||
|
@ -623,6 +675,9 @@ static void lp_cu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_
|
|||
case LP_CU_STATE_WAIT_TX_CONN_UPDATE_IND:
|
||||
lp_cu_st_wait_tx_conn_update_ind(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_CU_STATE_WAIT_NTF_AVAIL:
|
||||
lp_cu_st_wait_ntf_avail(conn, ctx, evt, param);
|
||||
break;
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
case LP_CU_STATE_WAIT_RX_CONN_UPDATE_IND:
|
||||
|
@ -637,9 +692,6 @@ static void lp_cu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_
|
|||
case LP_CU_STATE_WAIT_INSTANT:
|
||||
lp_cu_st_wait_instant(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_CU_STATE_WAIT_NTF:
|
||||
lp_cu_st_wait_ntf(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Unknown state */
|
||||
LL_ASSERT(0);
|
||||
|
@ -694,9 +746,15 @@ static void rp_cu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t opcode)
|
|||
struct node_tx *tx;
|
||||
struct pdu_data *pdu;
|
||||
|
||||
/* Allocate tx node */
|
||||
tx = llcp_tx_alloc(conn, ctx);
|
||||
LL_ASSERT(tx);
|
||||
/* Get pre-allocated tx node */
|
||||
tx = ctx->node_ref.tx_ack;
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
|
||||
if (!tx) {
|
||||
/* Allocate tx node if non pre-alloc'ed */
|
||||
tx = llcp_tx_alloc(conn, ctx);
|
||||
LL_ASSERT(tx);
|
||||
}
|
||||
|
||||
pdu = (struct pdu_data *)tx->pdu;
|
||||
|
||||
|
@ -743,19 +801,28 @@ static void rp_cu_conn_param_req_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
{
|
||||
struct node_rx_pdu *ntf;
|
||||
struct pdu_data *pdu;
|
||||
uint8_t piggy_back;
|
||||
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
ntf = ctx->node_ref.rx;
|
||||
ctx->node_ref.rx = NULL;
|
||||
LL_ASSERT(ntf);
|
||||
|
||||
piggy_back = (ntf->hdr.type != NODE_RX_TYPE_RETAIN);
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
pdu = (struct pdu_data *)ntf->pdu;
|
||||
|
||||
llcp_pdu_encode_conn_param_req(ctx, pdu);
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
if (!piggy_back) {
|
||||
/* Enqueue notification towards LL, unless piggy-backing,
|
||||
* in which case this is done on the rx return path
|
||||
*/
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_CONN_PARAM_REQ */
|
||||
|
||||
|
@ -770,15 +837,21 @@ static void rp_cu_complete(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
ctx->state = RP_CU_STATE_IDLE;
|
||||
}
|
||||
|
||||
static void rp_cu_wait_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
static void rp_cu_send_conn_update_ind_finalize(struct ll_conn *conn, struct proc_ctx *ctx,
|
||||
uint8_t evt, void *param)
|
||||
{
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = RP_CU_STATE_WAIT_NTF;
|
||||
} else {
|
||||
cu_ntf(conn, ctx);
|
||||
rp_cu_complete(conn, ctx);
|
||||
}
|
||||
/* Central role path, should not get here with !=NULL rx-node reference */
|
||||
LL_ASSERT(ctx->node_ref.rx == NULL);
|
||||
/* We pre-alloc NTF node */
|
||||
ctx->node_ref.rx = llcp_ntf_alloc();
|
||||
|
||||
/* Signal put/sched on NTF - ie non-RX node piggy */
|
||||
ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RETAIN;
|
||||
|
||||
cu_prepare_update_ind(conn, ctx);
|
||||
rp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND);
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
ctx->state = RP_CU_STATE_WAIT_INSTANT;
|
||||
}
|
||||
|
||||
static void rp_cu_send_conn_update_ind(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
|
@ -787,10 +860,30 @@ static void rp_cu_send_conn_update_ind(struct ll_conn *conn, struct proc_ctx *ct
|
|||
if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
|
||||
ctx->state = RP_CU_STATE_WAIT_TX_CONN_UPDATE_IND;
|
||||
} else {
|
||||
cu_prepare_update_ind(conn, ctx);
|
||||
rp_cu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_CONN_UPDATE_IND);
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
ctx->state = RP_CU_STATE_WAIT_INSTANT;
|
||||
/* ensure alloc of TX node, before possibly waiting for NTF node */
|
||||
ctx->node_ref.tx_ack = llcp_tx_alloc(conn, ctx);
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
/* No RX node piggy, and no NTF avail, so go wait for one, before TX'ing */
|
||||
ctx->state = RP_CU_STATE_WAIT_NTF_AVAIL;
|
||||
} else {
|
||||
rp_cu_send_conn_update_ind_finalize(conn, ctx, evt, param);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void rp_cu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case RP_CU_EVT_RUN:
|
||||
if (llcp_ntf_alloc_is_available()) {
|
||||
/* If NTF node is now avail, so pick it up and continue */
|
||||
rp_cu_send_conn_update_ind_finalize(conn, ctx, evt, param);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -886,6 +979,8 @@ static void rp_cu_st_wait_conn_param_req_available(struct ll_conn *conn, struct
|
|||
ctx->data.cu.error = BT_HCI_ERR_UNSUPP_LL_PARAM_VAL;
|
||||
rp_cu_send_reject_ext_ind(conn, ctx, evt, param);
|
||||
}
|
||||
/* In case we have to defer NTF */
|
||||
llcp_rx_node_retain(ctx);
|
||||
} else {
|
||||
cpr_active_set(conn);
|
||||
const bool params_changed =
|
||||
|
@ -894,7 +989,8 @@ static void rp_cu_st_wait_conn_param_req_available(struct ll_conn *conn, struct
|
|||
|
||||
/* notify Host if conn parameters changed, else respond */
|
||||
if (params_changed) {
|
||||
rp_cu_send_conn_param_req_ntf(conn, ctx, evt, param);
|
||||
rp_cu_conn_param_req_ntf(conn, ctx);
|
||||
ctx->state = RP_CU_STATE_WAIT_CONN_PARAM_REQ_REPLY;
|
||||
} else {
|
||||
#if defined(CONFIG_BT_CTLR_USER_CPR_ANCHOR_POINT_MOVE)
|
||||
/* Handle APM as a vendor specific user extension */
|
||||
|
@ -977,6 +1073,8 @@ static void rp_cu_state_wait_conn_param_req_reply_continue(struct ll_conn *conn,
|
|||
switch (evt) {
|
||||
case RP_CU_EVT_RUN:
|
||||
if (conn->lll.role == BT_HCI_ROLE_CENTRAL) {
|
||||
/* Ensure that node_ref does not indicate RX node for piggyback */
|
||||
ctx->node_ref.rx = NULL;
|
||||
rp_cu_send_conn_update_ind(conn, ctx, evt, param);
|
||||
} else if (conn->lll.role == BT_HCI_ROLE_PERIPHERAL) {
|
||||
if (!ctx->data.cu.error) {
|
||||
|
@ -1076,10 +1174,14 @@ static void rp_cu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint
|
|||
notify = cu_should_notify_host(ctx);
|
||||
if (notify) {
|
||||
ctx->data.cu.error = BT_HCI_ERR_SUCCESS;
|
||||
rp_cu_wait_complete(conn, ctx, evt, param);
|
||||
cu_ntf(conn, ctx);
|
||||
} else {
|
||||
rp_cu_complete(conn, ctx);
|
||||
/* Release RX node kept for NTF */
|
||||
ctx->node_ref.rx->hdr.type = NODE_RX_TYPE_RELEASE;
|
||||
ll_rx_put_sched(ctx->node_ref.rx->hdr.link, ctx->node_ref.rx);
|
||||
ctx->node_ref.rx = NULL;
|
||||
}
|
||||
rp_cu_complete(conn, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1099,6 +1201,8 @@ static void rp_cu_st_wait_rx_conn_update_ind(struct ll_conn *conn, struct proc_c
|
|||
if (is_instant_not_passed(ctx->data.cu.instant,
|
||||
ull_conn_event_counter(conn))) {
|
||||
|
||||
llcp_rx_node_retain(ctx);
|
||||
|
||||
ctx->state = RP_CU_STATE_WAIT_INSTANT;
|
||||
/* In case we only just received it in time */
|
||||
rp_cu_check_instant(conn, ctx, evt, param);
|
||||
|
@ -1131,18 +1235,6 @@ static void rp_cu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, ui
|
|||
}
|
||||
}
|
||||
|
||||
static void rp_cu_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case RP_CU_EVT_RUN:
|
||||
rp_cu_wait_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void rp_cu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
switch (ctx->state) {
|
||||
|
@ -1186,8 +1278,8 @@ static void rp_cu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_
|
|||
case RP_CU_STATE_WAIT_INSTANT:
|
||||
rp_cu_st_wait_instant(conn, ctx, evt, param);
|
||||
break;
|
||||
case RP_CU_STATE_WAIT_NTF:
|
||||
rp_cu_st_wait_ntf(conn, ctx, evt, param);
|
||||
case RP_CU_STATE_WAIT_NTF_AVAIL:
|
||||
rp_cu_st_wait_ntf_avail(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Unknown state */
|
||||
|
|
|
@ -59,7 +59,6 @@ enum {
|
|||
LP_ENC_STATE_WAIT_RX_START_ENC_REQ,
|
||||
LP_ENC_STATE_WAIT_TX_START_ENC_RSP,
|
||||
LP_ENC_STATE_WAIT_RX_START_ENC_RSP,
|
||||
LP_ENC_STATE_WAIT_NTF,
|
||||
/* Pause Procedure */
|
||||
LP_ENC_STATE_ENCRYPTED,
|
||||
LP_ENC_STATE_WAIT_TX_PAUSE_ENC_REQ,
|
||||
|
@ -99,12 +98,10 @@ enum {
|
|||
RP_ENC_STATE_UNENCRYPTED,
|
||||
RP_ENC_STATE_WAIT_RX_ENC_REQ,
|
||||
RP_ENC_STATE_WAIT_TX_ENC_RSP,
|
||||
RP_ENC_STATE_WAIT_NTF_LTK_REQ,
|
||||
RP_ENC_STATE_WAIT_LTK_REPLY,
|
||||
RP_ENC_STATE_WAIT_TX_START_ENC_REQ,
|
||||
RP_ENC_STATE_WAIT_TX_REJECT_IND,
|
||||
RP_ENC_STATE_WAIT_RX_START_ENC_RSP,
|
||||
RP_ENC_STATE_WAIT_NTF,
|
||||
RP_ENC_STATE_WAIT_TX_START_ENC_RSP,
|
||||
/* Pause Procedure */
|
||||
RP_ENC_STATE_ENCRYPTED,
|
||||
|
@ -225,8 +222,8 @@ static void lp_enc_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
struct node_rx_pdu *ntf;
|
||||
struct pdu_data *pdu;
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
/* Piggy-back on RX node */
|
||||
ntf = ctx->node_ref.rx;
|
||||
LL_ASSERT(ntf);
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
|
@ -248,20 +245,13 @@ static void lp_enc_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
} else {
|
||||
llcp_pdu_encode_reject_ind(pdu, ctx->data.enc.error);
|
||||
}
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
}
|
||||
|
||||
static void lp_enc_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = LP_ENC_STATE_WAIT_NTF;
|
||||
} else {
|
||||
lp_enc_ntf(conn, ctx);
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_ENC_STATE_UNENCRYPTED;
|
||||
}
|
||||
lp_enc_ntf(conn, ctx);
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_ENC_STATE_UNENCRYPTED;
|
||||
}
|
||||
|
||||
static void lp_enc_store_m(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
|
||||
|
@ -519,18 +509,6 @@ static void lp_enc_st_wait_rx_start_enc_rsp(struct ll_conn *conn, struct proc_ct
|
|||
}
|
||||
}
|
||||
|
||||
static void lp_enc_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case LP_ENC_EVT_RUN:
|
||||
lp_enc_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void lp_enc_state_encrypted(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
|
@ -612,9 +590,6 @@ static void lp_enc_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8
|
|||
case LP_ENC_STATE_WAIT_RX_START_ENC_RSP:
|
||||
lp_enc_st_wait_rx_start_enc_rsp(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_ENC_STATE_WAIT_NTF:
|
||||
lp_enc_st_wait_ntf(conn, ctx, evt, param);
|
||||
break;
|
||||
/* Pause Procedure */
|
||||
case LP_ENC_STATE_ENCRYPTED:
|
||||
lp_enc_state_encrypted(conn, ctx, evt, param);
|
||||
|
@ -755,19 +730,26 @@ static void rp_enc_ntf_ltk(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
{
|
||||
struct node_rx_pdu *ntf;
|
||||
struct pdu_data *pdu;
|
||||
uint8_t piggy_back;
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
/* Piggy-back on RX node */
|
||||
ntf = ctx->node_ref.rx;
|
||||
ctx->node_ref.rx = NULL;
|
||||
LL_ASSERT(ntf);
|
||||
|
||||
piggy_back = (ntf->hdr.type != NODE_RX_TYPE_RETAIN);
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
pdu = (struct pdu_data *)ntf->pdu;
|
||||
|
||||
llcp_ntf_encode_enc_req(ctx, pdu);
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
if (!piggy_back) {
|
||||
/* Enqueue notification towards LL unless it's piggybacked */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void rp_enc_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
|
@ -775,8 +757,9 @@ static void rp_enc_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
struct node_rx_pdu *ntf;
|
||||
struct pdu_data *pdu;
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
/* Piggy-back on RX node */
|
||||
ntf = ctx->node_ref.rx;
|
||||
ctx->node_ref.rx = NULL;
|
||||
LL_ASSERT(ntf);
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
|
@ -794,9 +777,6 @@ static void rp_enc_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
/* Should never happen */
|
||||
LL_ASSERT(0);
|
||||
}
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
}
|
||||
|
||||
static void rp_enc_send_start_enc_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
|
@ -804,23 +784,8 @@ static void rp_enc_send_start_enc_rsp(struct ll_conn *conn, struct proc_ctx *ctx
|
|||
|
||||
static void rp_enc_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = RP_ENC_STATE_WAIT_NTF;
|
||||
} else {
|
||||
rp_enc_ntf(conn, ctx);
|
||||
rp_enc_send_start_enc_rsp(conn, ctx, evt, param);
|
||||
}
|
||||
}
|
||||
|
||||
static void rp_enc_send_ltk_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
ctx->state = RP_ENC_STATE_WAIT_NTF_LTK_REQ;
|
||||
} else {
|
||||
rp_enc_ntf_ltk(conn, ctx);
|
||||
ctx->state = RP_ENC_STATE_WAIT_LTK_REPLY;
|
||||
}
|
||||
rp_enc_ntf(conn, ctx);
|
||||
rp_enc_send_start_enc_rsp(conn, ctx, evt, param);
|
||||
}
|
||||
|
||||
static void rp_enc_store_s(struct ll_conn *conn, struct proc_ctx *ctx, struct pdu_data *pdu)
|
||||
|
@ -840,11 +805,15 @@ static void rp_enc_send_enc_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint
|
|||
struct node_tx *tx;
|
||||
|
||||
if (!llcp_tx_alloc_peek(conn, ctx)) {
|
||||
/* Mark RX node to not release, needed for LTK NTF */
|
||||
llcp_rx_node_retain(ctx);
|
||||
ctx->state = RP_ENC_STATE_WAIT_TX_ENC_RSP;
|
||||
} else {
|
||||
tx = llcp_rp_enc_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_ENC_RSP);
|
||||
rp_enc_store_s(conn, ctx, (struct pdu_data *)tx->pdu);
|
||||
rp_enc_send_ltk_ntf(conn, ctx, evt, param);
|
||||
|
||||
rp_enc_ntf_ltk(conn, ctx);
|
||||
ctx->state = RP_ENC_STATE_WAIT_LTK_REPLY;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -969,6 +938,7 @@ static void rp_enc_state_wait_rx_enc_req(struct ll_conn *conn, struct proc_ctx *
|
|||
llcp_lr_pause(conn);
|
||||
|
||||
rp_enc_store_m(conn, ctx, param);
|
||||
|
||||
rp_enc_send_enc_rsp(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
|
@ -990,19 +960,6 @@ static void rp_enc_state_wait_tx_enc_rsp(struct ll_conn *conn, struct proc_ctx *
|
|||
}
|
||||
}
|
||||
|
||||
static void rp_enc_state_wait_ntf_ltk_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case RP_ENC_EVT_RUN:
|
||||
rp_enc_send_ltk_ntf(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void rp_enc_state_wait_ltk_reply(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
|
@ -1058,19 +1015,6 @@ static void rp_enc_state_wait_rx_start_enc_rsp(struct ll_conn *conn, struct proc
|
|||
}
|
||||
}
|
||||
|
||||
static void rp_enc_state_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case RP_ENC_EVT_RUN:
|
||||
rp_enc_complete(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void rp_enc_state_wait_tx_start_enc_rsp(struct ll_conn *conn, struct proc_ctx *ctx,
|
||||
uint8_t evt, void *param)
|
||||
{
|
||||
|
@ -1162,9 +1106,6 @@ static void rp_enc_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8
|
|||
case RP_ENC_STATE_WAIT_TX_ENC_RSP:
|
||||
rp_enc_state_wait_tx_enc_rsp(conn, ctx, evt, param);
|
||||
break;
|
||||
case RP_ENC_STATE_WAIT_NTF_LTK_REQ:
|
||||
rp_enc_state_wait_ntf_ltk_req(conn, ctx, evt, param);
|
||||
break;
|
||||
case RP_ENC_STATE_WAIT_LTK_REPLY:
|
||||
rp_enc_state_wait_ltk_reply(conn, ctx, evt, param);
|
||||
break;
|
||||
|
@ -1177,9 +1118,6 @@ static void rp_enc_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8
|
|||
case RP_ENC_STATE_WAIT_RX_START_ENC_RSP:
|
||||
rp_enc_state_wait_rx_start_enc_rsp(conn, ctx, evt, param);
|
||||
break;
|
||||
case RP_ENC_STATE_WAIT_NTF:
|
||||
rp_enc_state_wait_ntf(conn, ctx, evt, param);
|
||||
break;
|
||||
case RP_ENC_STATE_WAIT_TX_START_ENC_RSP:
|
||||
rp_enc_state_wait_tx_start_enc_rsp(conn, ctx, evt, param);
|
||||
break;
|
||||
|
|
|
@ -156,9 +156,14 @@ struct proc_ctx {
|
|||
enum llcp_wait_reason wait_reason;
|
||||
#endif /* LLCP_TX_CTRL_BUF_QUEUE_ENABLE */
|
||||
|
||||
/* TX node awaiting ack */
|
||||
struct node_tx *tx_ack;
|
||||
|
||||
struct {
|
||||
/* Rx node link element */
|
||||
memq_link_t *link;
|
||||
/* TX node awaiting ack */
|
||||
struct node_tx *tx_ack;
|
||||
/* most recent RX node */
|
||||
struct node_rx_pdu *rx;
|
||||
} node_ref;
|
||||
/*
|
||||
* This flag is set to 1 when we are finished with the control
|
||||
* procedure and it is safe to release the context ctx
|
||||
|
@ -194,6 +199,7 @@ struct proc_ctx {
|
|||
uint8_t ntf_pu:1;
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
uint8_t ntf_dle:1;
|
||||
struct node_rx_pdu *ntf_dle_node;
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
uint8_t error;
|
||||
uint16_t instant;
|
||||
|
@ -400,6 +406,10 @@ bool llcp_tx_alloc_peek(struct ll_conn *conn, struct proc_ctx *ctx);
|
|||
void llcp_tx_alloc_unpeek(struct proc_ctx *ctx);
|
||||
struct node_tx *llcp_tx_alloc(struct ll_conn *conn, struct proc_ctx *ctx);
|
||||
void llcp_proc_ctx_release(struct proc_ctx *ctx);
|
||||
void llcp_ntf_set_pending(struct ll_conn *conn);
|
||||
void llcp_ntf_clear_pending(struct ll_conn *conn);
|
||||
bool llcp_ntf_pending(struct ll_conn *conn);
|
||||
void llcp_rx_node_retain(struct proc_ctx *ctx);
|
||||
|
||||
/*
|
||||
* ULL -> LLL Interface
|
||||
|
@ -521,7 +531,8 @@ void llcp_lr_pause(struct ll_conn *conn);
|
|||
void llcp_lr_resume(struct ll_conn *conn);
|
||||
void llcp_lr_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx);
|
||||
void llcp_lr_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx);
|
||||
void llcp_lr_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx);
|
||||
void llcp_lr_rx(struct ll_conn *conn, struct proc_ctx *ctx, memq_link_t *link,
|
||||
struct node_rx_pdu *rx);
|
||||
void llcp_lr_enqueue(struct ll_conn *conn, struct proc_ctx *ctx);
|
||||
void llcp_lr_init(struct ll_conn *conn);
|
||||
void llcp_lr_run(struct ll_conn *conn);
|
||||
|
@ -544,14 +555,16 @@ void llcp_rr_pause(struct ll_conn *conn);
|
|||
void llcp_rr_resume(struct ll_conn *conn);
|
||||
void llcp_rr_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *tx);
|
||||
void llcp_rr_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx);
|
||||
void llcp_rr_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx);
|
||||
void llcp_rr_rx(struct ll_conn *conn, struct proc_ctx *ctx, memq_link_t *link,
|
||||
struct node_rx_pdu *rx);
|
||||
void llcp_rr_init(struct ll_conn *conn);
|
||||
void llcp_rr_prepare(struct ll_conn *conn, struct node_rx_pdu *rx);
|
||||
void llcp_rr_run(struct ll_conn *conn);
|
||||
void llcp_rr_complete(struct ll_conn *conn);
|
||||
void llcp_rr_connect(struct ll_conn *conn);
|
||||
void llcp_rr_disconnect(struct ll_conn *conn);
|
||||
void llcp_rr_new(struct ll_conn *conn, struct node_rx_pdu *rx, bool valid_pdu);
|
||||
void llcp_rr_new(struct ll_conn *conn, memq_link_t *link, struct node_rx_pdu *rx,
|
||||
bool valid_pdu);
|
||||
void llcp_rr_check_done(struct ll_conn *conn, struct proc_ctx *ctx);
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_LE_PING)
|
||||
|
|
|
@ -220,8 +220,13 @@ void llcp_lr_prt_stop(struct ll_conn *conn)
|
|||
conn->llcp.local.prt_expire = 0U;
|
||||
}
|
||||
|
||||
void llcp_lr_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
|
||||
void llcp_lr_rx(struct ll_conn *conn, struct proc_ctx *ctx, memq_link_t *link,
|
||||
struct node_rx_pdu *rx)
|
||||
{
|
||||
/* Store RX node and link */
|
||||
ctx->node_ref.rx = rx;
|
||||
ctx->node_ref.link = link;
|
||||
|
||||
switch (ctx->proc) {
|
||||
#if defined(CONFIG_BT_CTLR_LE_PING)
|
||||
case PROC_LE_PING:
|
||||
|
@ -326,6 +331,10 @@ void llcp_lr_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *
|
|||
break;
|
||||
/* Ignore tx_ack */
|
||||
}
|
||||
|
||||
/* Clear TX node reference */
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
|
||||
llcp_lr_check_done(conn, ctx);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,9 +57,9 @@ enum {
|
|||
LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND,
|
||||
LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND,
|
||||
LP_PU_STATE_WAIT_RX_PHY_UPDATE_IND,
|
||||
LP_PU_STATE_WAIT_NTF_AVAIL,
|
||||
LP_PU_STATE_WAIT_INSTANT,
|
||||
LP_PU_STATE_WAIT_INSTANT_ON_AIR,
|
||||
LP_PU_STATE_WAIT_NTF,
|
||||
};
|
||||
|
||||
/* LLCP Local Procedure PHY Update FSM events */
|
||||
|
@ -95,9 +95,9 @@ enum {
|
|||
RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND,
|
||||
RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND,
|
||||
RP_PU_STATE_WAIT_RX_PHY_UPDATE_IND,
|
||||
RP_PU_STATE_WAIT_NTF_AVAIL,
|
||||
RP_PU_STATE_WAIT_INSTANT,
|
||||
RP_PU_STATE_WAIT_INSTANT_ON_AIR,
|
||||
RP_PU_STATE_WAIT_NTF,
|
||||
};
|
||||
|
||||
/* LLCP Remote Procedure PHY Update FSM events */
|
||||
|
@ -372,38 +372,56 @@ static void pu_prepare_instant(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
* LLCP Local Procedure PHY Update FSM
|
||||
*/
|
||||
|
||||
static void lp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t opcode)
|
||||
static void lp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
struct node_tx *tx;
|
||||
struct pdu_data *pdu;
|
||||
|
||||
/* Allocate tx node */
|
||||
tx = llcp_tx_alloc(conn, ctx);
|
||||
LL_ASSERT(tx);
|
||||
/* Allocate tx node, but only do it if not already done */
|
||||
if (ctx->node_ref.tx_ack == NULL) {
|
||||
ctx->node_ref.tx_ack = llcp_tx_alloc(conn, ctx);
|
||||
LL_ASSERT(ctx->node_ref.tx_ack);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
if (!((ctx->tx_opcode == PDU_DATA_LLCTRL_TYPE_PHY_REQ) &&
|
||||
(conn->lll.role == BT_HCI_ROLE_CENTRAL))) {
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
/* No NTF nodes avail, so we need to hold off TX */
|
||||
ctx->state = LP_PU_STATE_WAIT_NTF_AVAIL;
|
||||
return;
|
||||
}
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
ctx->data.pu.ntf_dle_node = llcp_ntf_alloc();
|
||||
LL_ASSERT(ctx->data.pu.ntf_dle_node);
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
}
|
||||
#endif
|
||||
|
||||
tx = ctx->node_ref.tx_ack;
|
||||
pdu = (struct pdu_data *)tx->pdu;
|
||||
|
||||
/* Encode LL Control PDU */
|
||||
switch (opcode) {
|
||||
switch (ctx->tx_opcode) {
|
||||
case PDU_DATA_LLCTRL_TYPE_PHY_REQ:
|
||||
pu_set_preferred_phys(conn, ctx);
|
||||
llcp_pdu_encode_phy_req(ctx, pdu);
|
||||
llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
|
||||
ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_REQ;
|
||||
break;
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
case PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND:
|
||||
pu_prep_update_ind(conn, ctx);
|
||||
pu_prepare_instant(conn, ctx);
|
||||
llcp_pdu_encode_phy_update_ind(ctx, pdu);
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
|
||||
break;
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
default:
|
||||
LL_ASSERT(0);
|
||||
}
|
||||
|
||||
/* Always 'request' the ACK signal */
|
||||
ctx->tx_ack = tx;
|
||||
ctx->tx_opcode = pdu->llctrl.opcode;
|
||||
|
||||
/* Enqueue LL Control PDU towards LLL */
|
||||
llcp_tx_enqueue(conn, tx);
|
||||
|
||||
|
@ -416,21 +434,33 @@ static void pu_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
struct node_rx_pdu *ntf;
|
||||
struct node_rx_pu *pdu;
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
/* Piggy-back on stored RX node */
|
||||
ntf = ctx->node_ref.rx;
|
||||
LL_ASSERT(ntf);
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
pdu = (struct node_rx_pu *)ntf->pdu;
|
||||
if (ctx->data.pu.ntf_pu) {
|
||||
LL_ASSERT(ntf->hdr.type == NODE_RX_TYPE_RETAIN);
|
||||
ntf->hdr.type = NODE_RX_TYPE_PHY_UPDATE;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
pdu = (struct node_rx_pu *)ntf->pdu;
|
||||
|
||||
pdu->status = ctx->data.pu.error;
|
||||
pdu->rx = conn->lll.phy_rx;
|
||||
pdu->tx = conn->lll.phy_tx;
|
||||
pdu->status = ctx->data.pu.error;
|
||||
pdu->rx = conn->lll.phy_rx;
|
||||
pdu->tx = conn->lll.phy_tx;
|
||||
} else {
|
||||
ntf->hdr.type = NODE_RX_TYPE_RELEASE;
|
||||
}
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
/* only 'put' as the 'sched' is handled when handling DLE ntf */
|
||||
ll_rx_put(ntf->hdr.link, ntf);
|
||||
#else
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
|
||||
ctx->data.pu.ntf_pu = 0;
|
||||
ctx->node_ref.rx = NULL;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
|
@ -439,82 +469,69 @@ static void pu_dle_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
|||
struct node_rx_pdu *ntf;
|
||||
struct pdu_data *pdu;
|
||||
|
||||
/* Allocate ntf node */
|
||||
ntf = llcp_ntf_alloc();
|
||||
LL_ASSERT(ntf);
|
||||
/* Retrieve DLE ntf node */
|
||||
ntf = ctx->data.pu.ntf_dle_node;
|
||||
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
pdu = (struct pdu_data *)ntf->pdu;
|
||||
if (!ctx->data.pu.ntf_dle) {
|
||||
if (!ntf) {
|
||||
/* If no DLE ntf was pre-allocated there is nothing more to do */
|
||||
/* This will happen in case of a completion on UNKNOWN_RSP to PHY_REQ
|
||||
* in Central case.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
/* Signal to release pre-allocated node in case there is no DLE ntf */
|
||||
ntf->hdr.type = NODE_RX_TYPE_RELEASE;
|
||||
} else {
|
||||
LL_ASSERT(ntf);
|
||||
|
||||
llcp_ntf_encode_length_change(conn, pdu);
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
ntf->hdr.handle = conn->lll.handle;
|
||||
pdu = (struct pdu_data *)ntf->pdu;
|
||||
|
||||
llcp_ntf_encode_length_change(conn, pdu);
|
||||
}
|
||||
|
||||
/* Enqueue notification towards LL */
|
||||
ll_rx_put_sched(ntf->hdr.link, ntf);
|
||||
|
||||
ctx->data.pu.ntf_dle = 0;
|
||||
ctx->data.pu.ntf_dle_node = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void lp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt)
|
||||
static void lp_pu_complete_finalize(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
{
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
#define NTF_DLE (ctx->data.pu.ntf_dle)
|
||||
#else
|
||||
#define NTF_DLE 0
|
||||
#endif
|
||||
uint8_t ntf_count = ctx->data.pu.ntf_pu + NTF_DLE;
|
||||
|
||||
/* if we need to send both PHY and DLE notification, but we
|
||||
* do not have 2 buffers available we serialize the sending
|
||||
* of notifications
|
||||
*/
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
if ((ntf_count > 1) && !llcp_ntf_alloc_num_available(ntf_count)) {
|
||||
ntf_count = 1;
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
if (ntf_count && !llcp_ntf_alloc_num_available(ntf_count)) {
|
||||
ctx->state = LP_PU_STATE_WAIT_NTF;
|
||||
} else {
|
||||
if (ctx->data.pu.ntf_pu) {
|
||||
pu_ntf(conn, ctx);
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
if (ntf_count == 1 && NTF_DLE == 1) {
|
||||
ctx->state = LP_PU_STATE_WAIT_NTF;
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
}
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
if (ctx->data.pu.ntf_dle) {
|
||||
pu_dle_ntf(conn, ctx);
|
||||
}
|
||||
#endif
|
||||
llcp_lr_complete(conn);
|
||||
ctx->state = LP_PU_STATE_IDLE;
|
||||
llcp_rr_set_paused_cmd(conn, PROC_NONE);
|
||||
}
|
||||
llcp_lr_complete(conn);
|
||||
llcp_rr_set_paused_cmd(conn, PROC_NONE);
|
||||
ctx->state = LP_PU_STATE_IDLE;
|
||||
}
|
||||
|
||||
static void lp_pu_complete_after_inst_on_air(struct ll_conn *conn, struct proc_ctx *ctx,
|
||||
uint8_t evt, void *param)
|
||||
static void lp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
/* When complete reset timing restrictions - idempotent
|
||||
* (so no problem if we need to wait for NTF buffer)
|
||||
*/
|
||||
pu_reset_timing_restrict(conn);
|
||||
|
||||
/* Wait for instant on air to send notification */
|
||||
ctx->state = LP_PU_STATE_WAIT_INSTANT_ON_AIR;
|
||||
pu_ntf(conn, ctx);
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
pu_dle_ntf(conn, ctx);
|
||||
#endif
|
||||
lp_pu_complete_finalize(conn, ctx);
|
||||
}
|
||||
|
||||
static void lp_pu_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
/* when complete reset timing restrictions - idempotent
|
||||
* (so no problem if we need to wait for NTF buffer)
|
||||
*/
|
||||
pu_reset_timing_restrict(conn);
|
||||
|
||||
lp_pu_tx_ntf(conn, ctx, evt);
|
||||
/* Postpone procedure completion (and possible NTF generation) to actual 'air instant'
|
||||
* Since LLCP STM is driven from LLL prepare this actually happens BEFORE instant
|
||||
* and thus NTFs are generated and propagated up prior to actual instant on air.
|
||||
* Instead postpone completion/NTF to the beginning of RX handling
|
||||
*/
|
||||
ctx->state = LP_PU_STATE_WAIT_INSTANT_ON_AIR;
|
||||
|
||||
if (ctx->node_ref.rx) {
|
||||
/* Mark RX node to NOT release */
|
||||
llcp_rx_node_retain(ctx);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void lp_pu_send_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
|
@ -526,9 +543,8 @@ static void lp_pu_send_phy_req(struct ll_conn *conn, struct proc_ctx *ctx, uint8
|
|||
} else {
|
||||
llcp_rr_set_incompat(conn, INCOMPAT_RESOLVABLE);
|
||||
llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
|
||||
lp_pu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_PHY_REQ);
|
||||
llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
|
||||
ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_REQ;
|
||||
ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_REQ;
|
||||
lp_pu_tx(conn, ctx, evt, param);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -539,9 +555,8 @@ static void lp_pu_send_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx
|
|||
if (llcp_lr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx)) {
|
||||
ctx->state = LP_PU_STATE_WAIT_TX_PHY_UPDATE_IND;
|
||||
} else {
|
||||
lp_pu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND);
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
ctx->state = LP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
|
||||
ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
|
||||
lp_pu_tx(conn, ctx, evt, param);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
@ -587,6 +602,10 @@ static void lp_pu_st_wait_rx_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx,
|
|||
llcp_tx_pause_data(conn, LLCP_TX_QUEUE_PAUSE_DATA_PHY_UPDATE);
|
||||
/* Combine with the 'Preferred' phys */
|
||||
pu_combine_phys(conn, ctx, tx_pref, rx_pref);
|
||||
|
||||
/* Mark RX node to NOT release */
|
||||
llcp_rx_node_retain(ctx);
|
||||
|
||||
lp_pu_send_phy_update_ind(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_PU_EVT_UNKNOWN:
|
||||
|
@ -595,6 +614,10 @@ static void lp_pu_st_wait_rx_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx,
|
|||
* Peer does not accept PHY UPDATE, so disable non 1M phys on current connection
|
||||
*/
|
||||
feature_unmask_features(conn, LL_FEAT_BIT_PHY_2M | LL_FEAT_BIT_PHY_CODED);
|
||||
|
||||
/* Mark RX node to NOT release */
|
||||
llcp_rx_node_retain(ctx);
|
||||
|
||||
ctx->data.pu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
|
||||
ctx->data.pu.ntf_pu = 1;
|
||||
lp_pu_complete(conn, ctx, evt, param);
|
||||
|
@ -699,6 +722,9 @@ static void lp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ct
|
|||
llcp_pdu_decode_phy_update_ind(ctx, (struct pdu_data *)param);
|
||||
const uint8_t end_procedure = pu_check_update_ind(conn, ctx);
|
||||
|
||||
/* Mark RX node to NOT release */
|
||||
llcp_rx_node_retain(ctx);
|
||||
|
||||
if (!end_procedure) {
|
||||
if (ctx->data.pu.p_to_c_phy) {
|
||||
/* If periph to central phy changes apply tx timing restriction */
|
||||
|
@ -725,6 +751,10 @@ static void lp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ct
|
|||
case LP_PU_EVT_REJECT:
|
||||
llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
|
||||
llcp_pdu_decode_reject_ext_ind(ctx, (struct pdu_data *) param);
|
||||
|
||||
/* Mark RX node to NOT release */
|
||||
llcp_rx_node_retain(ctx);
|
||||
|
||||
ctx->data.pu.error = ctx->reject_ext_ind.error_code;
|
||||
ctx->data.pu.ntf_pu = 1;
|
||||
lp_pu_complete(conn, ctx, evt, param);
|
||||
|
@ -736,6 +766,10 @@ static void lp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ct
|
|||
* Peer does not accept PHY UPDATE, so disable non 1M phys on current connection
|
||||
*/
|
||||
feature_unmask_features(conn, LL_FEAT_BIT_PHY_2M | LL_FEAT_BIT_PHY_CODED);
|
||||
|
||||
/* Mark RX node to NOT release */
|
||||
llcp_rx_node_retain(ctx);
|
||||
|
||||
ctx->data.pu.error = BT_HCI_ERR_UNSUPP_REMOTE_FEATURE;
|
||||
ctx->data.pu.ntf_pu = 1;
|
||||
lp_pu_complete(conn, ctx, evt, param);
|
||||
|
@ -761,7 +795,7 @@ static void lp_pu_check_instant(struct ll_conn *conn, struct proc_ctx *ctx, uint
|
|||
llcp_rr_set_incompat(conn, INCOMPAT_NO_COLLISION);
|
||||
ctx->data.pu.error = BT_HCI_ERR_SUCCESS;
|
||||
ctx->data.pu.ntf_pu = (phy_changed || ctx->data.pu.host_initiated);
|
||||
lp_pu_complete_after_inst_on_air(conn, ctx, evt, param);
|
||||
lp_pu_complete(conn, ctx, evt, param);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -778,11 +812,12 @@ static void lp_pu_st_wait_instant(struct ll_conn *conn, struct proc_ctx *ctx, ui
|
|||
}
|
||||
}
|
||||
|
||||
static void lp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt)
|
||||
static void lp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case LP_PU_EVT_NTF:
|
||||
lp_pu_tx_ntf(conn, ctx, evt);
|
||||
lp_pu_tx_ntf(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
|
@ -790,11 +825,12 @@ static void lp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *
|
|||
}
|
||||
}
|
||||
|
||||
static void lp_pu_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
static void lp_pu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case LP_PU_EVT_RUN:
|
||||
lp_pu_tx_ntf(conn, ctx, evt);
|
||||
lp_pu_tx(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
|
@ -834,10 +870,10 @@ static void lp_pu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_
|
|||
lp_pu_st_wait_instant(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_PU_STATE_WAIT_INSTANT_ON_AIR:
|
||||
lp_pu_st_wait_instant_on_air(conn, ctx, evt);
|
||||
lp_pu_st_wait_instant_on_air(conn, ctx, evt, param);
|
||||
break;
|
||||
case LP_PU_STATE_WAIT_NTF:
|
||||
lp_pu_st_wait_ntf(conn, ctx, evt, param);
|
||||
case LP_PU_STATE_WAIT_NTF_AVAIL:
|
||||
lp_pu_st_wait_ntf_avail(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Unknown state */
|
||||
|
@ -904,22 +940,38 @@ bool llcp_lp_pu_awaiting_instant(struct proc_ctx *ctx)
|
|||
/*
|
||||
* LLCP Remote Procedure PHY Update FSM
|
||||
*/
|
||||
static void rp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t opcode)
|
||||
static void rp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
struct node_tx *tx;
|
||||
struct pdu_data *pdu;
|
||||
|
||||
/* Allocate tx node */
|
||||
tx = llcp_tx_alloc(conn, ctx);
|
||||
LL_ASSERT(tx);
|
||||
/* (pre)allocate tx node, but only do it if not already done */
|
||||
if (ctx->node_ref.tx_ack == NULL) {
|
||||
ctx->node_ref.tx_ack = llcp_tx_alloc(conn, ctx);
|
||||
LL_ASSERT(ctx->node_ref.tx_ack);
|
||||
}
|
||||
|
||||
if (!llcp_ntf_alloc_is_available()) {
|
||||
/* No NTF nodes avail, so we need to hold off TX */
|
||||
ctx->state = RP_PU_STATE_WAIT_NTF_AVAIL;
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
ctx->data.pu.ntf_dle_node = llcp_ntf_alloc();
|
||||
LL_ASSERT(ctx->data.pu.ntf_dle_node);
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
|
||||
tx = ctx->node_ref.tx_ack;
|
||||
pdu = (struct pdu_data *)tx->pdu;
|
||||
|
||||
/* Encode LL Control PDU */
|
||||
switch (opcode) {
|
||||
switch (ctx->tx_opcode) {
|
||||
#if defined(CONFIG_BT_PERIPHERAL)
|
||||
case PDU_DATA_LLCTRL_TYPE_PHY_RSP:
|
||||
llcp_pdu_encode_phy_rsp(conn, pdu);
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
|
||||
ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_RSP;
|
||||
break;
|
||||
#endif /* CONFIG_BT_PERIPHERAL */
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
|
@ -927,15 +979,14 @@ static void rp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t opcode)
|
|||
pu_prep_update_ind(conn, ctx);
|
||||
pu_prepare_instant(conn, ctx);
|
||||
llcp_pdu_encode_phy_update_ind(ctx, pdu);
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
|
||||
break;
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
default:
|
||||
LL_ASSERT(0);
|
||||
}
|
||||
|
||||
ctx->tx_ack = tx;
|
||||
ctx->tx_opcode = pdu->llctrl.opcode;
|
||||
|
||||
/* Enqueue LL Control PDU towards LLL */
|
||||
llcp_tx_enqueue(conn, tx);
|
||||
|
||||
|
@ -945,65 +996,29 @@ static void rp_pu_tx(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t opcode)
|
|||
|
||||
static void rp_pu_complete_finalize(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
{
|
||||
llcp_rr_set_paused_cmd(conn, PROC_NONE);
|
||||
llcp_rr_complete(conn);
|
||||
llcp_rr_set_paused_cmd(conn, PROC_NONE);
|
||||
ctx->state = RP_PU_STATE_IDLE;
|
||||
}
|
||||
|
||||
static void rp_pu_complete(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
/* when complete reset timing restrictions - idempotent
|
||||
* (so no problem if we need to wait for NTF buffer)
|
||||
*/
|
||||
pu_reset_timing_restrict(conn);
|
||||
|
||||
/* For remote initiated PHY update Host is notified only if a PHY changes */
|
||||
if (ctx->data.pu.ntf_pu) {
|
||||
/* Notification may be send after instant is on air */
|
||||
ctx->state = RP_PU_STATE_WAIT_INSTANT_ON_AIR;
|
||||
} else {
|
||||
rp_pu_complete_finalize(conn, ctx);
|
||||
}
|
||||
/* Postpone procedure completion (and possible NTF generation) to actual 'air instant'
|
||||
* Since LLCP STM is driven from LLL prepare this actually happens BEFORE instant
|
||||
* and thus NTFs are generated and propagated up prior to actual instant on air.
|
||||
* Instead postpone completion/NTF to the beginning of RX handling
|
||||
*/
|
||||
ctx->state = RP_PU_STATE_WAIT_INSTANT_ON_AIR;
|
||||
}
|
||||
|
||||
void rp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
static void rp_pu_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
{
|
||||
|
||||
pu_ntf(conn, ctx);
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
#define NTF_DLE (ctx->data.pu.ntf_dle)
|
||||
#else
|
||||
#define NTF_DLE 0
|
||||
pu_dle_ntf(conn, ctx);
|
||||
#endif
|
||||
uint8_t ntf_count = ctx->data.pu.ntf_pu + NTF_DLE;
|
||||
|
||||
/* if we need to send both PHY and DLE notification, but we
|
||||
* do not have 2 buffers available we serialize the sending
|
||||
* of notifications
|
||||
*/
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
if ((ntf_count > 1) && !llcp_ntf_alloc_num_available(ntf_count)) {
|
||||
ntf_count = 1;
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH) */
|
||||
if ((ntf_count > 0) && !llcp_ntf_alloc_num_available(ntf_count)) {
|
||||
ctx->state = RP_PU_STATE_WAIT_NTF;
|
||||
} else {
|
||||
if (ctx->data.pu.ntf_pu) {
|
||||
pu_ntf(conn, ctx);
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
if (ntf_count == 1 && NTF_DLE == 1) {
|
||||
ctx->state = RP_PU_STATE_WAIT_NTF;
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
}
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
if (ctx->data.pu.ntf_dle) {
|
||||
pu_dle_ntf(conn, ctx);
|
||||
}
|
||||
#endif
|
||||
rp_pu_complete_finalize(conn, ctx);
|
||||
}
|
||||
rp_pu_complete_finalize(conn, ctx);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
|
@ -1011,15 +1026,14 @@ static void rp_pu_send_phy_update_ind(struct ll_conn *conn, struct proc_ctx *ctx
|
|||
void *param)
|
||||
{
|
||||
if (llcp_rr_ispaused(conn) || !llcp_tx_alloc_peek(conn, ctx) ||
|
||||
(llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE) ||
|
||||
(llcp_rr_get_paused_cmd(conn) == PROC_PHY_UPDATE) ||
|
||||
!ull_is_lll_tx_queue_empty(conn)) {
|
||||
ctx->state = RP_PU_STATE_WAIT_TX_PHY_UPDATE_IND;
|
||||
} else {
|
||||
llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
|
||||
rp_pu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND);
|
||||
ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
|
||||
rp_pu_tx(conn, ctx, evt, param);
|
||||
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_UNUSED;
|
||||
ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_UPDATE_IND;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
@ -1032,9 +1046,8 @@ static void rp_pu_send_phy_rsp(struct ll_conn *conn, struct proc_ctx *ctx, uint8
|
|||
ctx->state = RP_PU_STATE_WAIT_TX_PHY_RSP;
|
||||
} else {
|
||||
llcp_rr_set_paused_cmd(conn, PROC_CTE_REQ);
|
||||
rp_pu_tx(conn, ctx, PDU_DATA_LLCTRL_TYPE_PHY_RSP);
|
||||
ctx->rx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_UPD_IND;
|
||||
ctx->state = RP_PU_STATE_WAIT_TX_ACK_PHY_RSP;
|
||||
ctx->tx_opcode = PDU_DATA_LLCTRL_TYPE_PHY_RSP;
|
||||
rp_pu_tx(conn, ctx, evt, param);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
@ -1064,6 +1077,8 @@ static void rp_pu_st_wait_rx_phy_req(struct ll_conn *conn, struct proc_ctx *ctx,
|
|||
switch (conn->lll.role) {
|
||||
#if defined(CONFIG_BT_CENTRAL)
|
||||
case BT_HCI_ROLE_CENTRAL:
|
||||
/* Mark RX node to NOT release */
|
||||
llcp_rx_node_retain(ctx);
|
||||
rp_pu_send_phy_update_ind(conn, ctx, evt, param);
|
||||
break;
|
||||
#endif /* CONFIG_BT_CENTRAL */
|
||||
|
@ -1165,12 +1180,14 @@ static void rp_pu_st_wait_rx_phy_update_ind(struct ll_conn *conn, struct proc_ct
|
|||
llcp_pdu_decode_phy_update_ind(ctx, (struct pdu_data *)param);
|
||||
const uint8_t end_procedure = pu_check_update_ind(conn, ctx);
|
||||
|
||||
/* Mark RX node to NOT release */
|
||||
llcp_rx_node_retain(ctx);
|
||||
|
||||
if (!end_procedure) {
|
||||
/* Since at least one phy will change,
|
||||
* stop the procedure response timeout
|
||||
*/
|
||||
llcp_rr_prt_stop(conn);
|
||||
|
||||
ctx->state = RP_PU_STATE_WAIT_INSTANT;
|
||||
} else {
|
||||
if (ctx->data.pu.error == BT_HCI_ERR_INSTANT_PASSED) {
|
||||
|
@ -1230,11 +1247,12 @@ static void rp_pu_st_wait_instant_on_air(struct ll_conn *conn, struct proc_ctx *
|
|||
}
|
||||
}
|
||||
|
||||
static void rp_pu_st_wait_ntf(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt, void *param)
|
||||
static void rp_pu_st_wait_ntf_avail(struct ll_conn *conn, struct proc_ctx *ctx, uint8_t evt,
|
||||
void *param)
|
||||
{
|
||||
switch (evt) {
|
||||
case RP_PU_EVT_RUN:
|
||||
rp_pu_tx_ntf(conn, ctx, evt, param);
|
||||
rp_pu_tx(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Ignore other evts */
|
||||
|
@ -1276,8 +1294,8 @@ static void rp_pu_execute_fsm(struct ll_conn *conn, struct proc_ctx *ctx, uint8_
|
|||
case RP_PU_STATE_WAIT_INSTANT_ON_AIR:
|
||||
rp_pu_st_wait_instant_on_air(conn, ctx, evt, param);
|
||||
break;
|
||||
case RP_PU_STATE_WAIT_NTF:
|
||||
rp_pu_st_wait_ntf(conn, ctx, evt, param);
|
||||
case RP_PU_STATE_WAIT_NTF_AVAIL:
|
||||
rp_pu_st_wait_ntf_avail(conn, ctx, evt, param);
|
||||
break;
|
||||
default:
|
||||
/* Unknown state */
|
||||
|
|
|
@ -190,7 +190,7 @@ struct proc_ctx *llcp_rr_peek(struct ll_conn *conn)
|
|||
|
||||
bool llcp_rr_ispaused(struct ll_conn *conn)
|
||||
{
|
||||
return conn->llcp.remote.pause == 1U;
|
||||
return (conn->llcp.remote.pause == 1U);
|
||||
}
|
||||
|
||||
void llcp_rr_pause(struct ll_conn *conn)
|
||||
|
@ -213,8 +213,13 @@ void llcp_rr_prt_stop(struct ll_conn *conn)
|
|||
conn->llcp.remote.prt_expire = 0U;
|
||||
}
|
||||
|
||||
void llcp_rr_rx(struct ll_conn *conn, struct proc_ctx *ctx, struct node_rx_pdu *rx)
|
||||
void llcp_rr_rx(struct ll_conn *conn, struct proc_ctx *ctx, memq_link_t *link,
|
||||
struct node_rx_pdu *rx)
|
||||
{
|
||||
/* Store RX node and link */
|
||||
ctx->node_ref.rx = rx;
|
||||
ctx->node_ref.link = link;
|
||||
|
||||
switch (ctx->proc) {
|
||||
case PROC_UNKNOWN:
|
||||
/* Do nothing */
|
||||
|
@ -319,17 +324,15 @@ void llcp_rr_tx_ack(struct ll_conn *conn, struct proc_ctx *ctx, struct node_tx *
|
|||
break;
|
||||
}
|
||||
|
||||
/* Clear TX node reference */
|
||||
ctx->node_ref.tx_ack = NULL;
|
||||
|
||||
llcp_rr_check_done(conn, ctx);
|
||||
}
|
||||
|
||||
void llcp_rr_tx_ntf(struct ll_conn *conn, struct proc_ctx *ctx)
|
||||
{
|
||||
switch (ctx->proc) {
|
||||
#if defined(CONFIG_BT_CTLR_DATA_LENGTH)
|
||||
case PROC_DATA_LENGTH_UPDATE:
|
||||
/* llcp_rp_comm_tx_ntf(conn, ctx); */
|
||||
break;
|
||||
#endif /* CONFIG_BT_CTLR_DATA_LENGTH */
|
||||
#ifdef CONFIG_BT_CTLR_PHY
|
||||
case PROC_PHY_UPDATE:
|
||||
llcp_rp_pu_tx_ntf(conn, ctx);
|
||||
|
@ -867,7 +870,7 @@ static const struct proc_role new_proc_lut[] = {
|
|||
#endif /* CONFIG_BT_CTLR_SCA_UPDATE */
|
||||
};
|
||||
|
||||
void llcp_rr_new(struct ll_conn *conn, struct node_rx_pdu *rx, bool valid_pdu)
|
||||
void llcp_rr_new(struct ll_conn *conn, memq_link_t *link, struct node_rx_pdu *rx, bool valid_pdu)
|
||||
{
|
||||
struct proc_ctx *ctx;
|
||||
struct pdu_data *pdu;
|
||||
|
@ -906,7 +909,7 @@ void llcp_rr_new(struct ll_conn *conn, struct node_rx_pdu *rx, bool valid_pdu)
|
|||
/* Handle PDU */
|
||||
ctx = llcp_rr_peek(conn);
|
||||
if (ctx) {
|
||||
llcp_rr_rx(conn, ctx, rx);
|
||||
llcp_rr_rx(conn, ctx, link, rx);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,4 +42,5 @@ void ut_rx_node_real(const char *file, uint32_t line, enum helper_node_opcode op
|
|||
struct node_rx_pdu **ntf_ref, void *param);
|
||||
void ut_rx_q_is_empty_real(const char *file, uint32_t line);
|
||||
|
||||
void release_ntf(struct node_rx_pdu *ntf);
|
||||
void encode_pdu(enum helper_pdu_opcode opcode, struct pdu_data *pdu, void *param);
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "ull_conn_iso_internal.h"
|
||||
#include "ull_conn_types.h"
|
||||
|
||||
#include "ull_internal.h"
|
||||
#include "ull_conn_internal.h"
|
||||
#include "ull_llcp_internal.h"
|
||||
#include "ull_llcp.h"
|
||||
|
@ -354,14 +355,25 @@ void event_done(struct ll_conn *conn)
|
|||
zassert_equal(*evt_active, 1, "Called outside an active event");
|
||||
*evt_active = 0;
|
||||
|
||||
/* Notify all conotrol procedures that wait with Host notifications for instant to be on
|
||||
/* Notify all control procedures that wait with Host notifications for instant to be on
|
||||
* air. This is done here because UT does not maintain actual connection events.
|
||||
*/
|
||||
ull_cp_tx_ntf(conn);
|
||||
|
||||
while ((rx = (struct node_rx_pdu *)sys_slist_get(<_tx_q))) {
|
||||
ull_cp_rx(conn, rx);
|
||||
free(rx);
|
||||
|
||||
/* Mark buffer for release */
|
||||
rx->hdr.type = NODE_RX_TYPE_RELEASE;
|
||||
|
||||
ull_cp_rx(conn, NULL, rx);
|
||||
|
||||
if (rx->hdr.type == NODE_RX_TYPE_RELEASE) {
|
||||
/* Only release if node was not hi-jacked by LLCP */
|
||||
ll_rx_release(rx);
|
||||
} else if (rx->hdr.type != NODE_RX_TYPE_RETAIN) {
|
||||
/* Otherwise put/sched to emulate ull_cp_rx return path */
|
||||
ll_rx_put_sched(rx->hdr.link, rx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -383,6 +395,8 @@ uint16_t event_counter(struct ll_conn *conn)
|
|||
return event_counter;
|
||||
}
|
||||
|
||||
static struct node_rx_pdu *rx_malloc_store;
|
||||
|
||||
void lt_tx_real(const char *file, uint32_t line, enum helper_pdu_opcode opcode,
|
||||
struct ll_conn *conn, void *param)
|
||||
{
|
||||
|
@ -392,6 +406,9 @@ void lt_tx_real(const char *file, uint32_t line, enum helper_pdu_opcode opcode,
|
|||
rx = malloc(PDU_RX_NODE_SIZE);
|
||||
zassert_not_null(rx, "Out of memory.\nCalled at %s:%d\n", file, line);
|
||||
|
||||
/* Remember RX node to allow for correct release */
|
||||
rx_malloc_store = rx;
|
||||
|
||||
/* Encode node_rx_pdu if required by particular procedure */
|
||||
if (helper_node_encode[opcode]) {
|
||||
helper_node_encode[opcode](rx, param);
|
||||
|
@ -411,10 +428,25 @@ void lt_tx_real_no_encode(const char *file, uint32_t line, struct pdu_data *pdu,
|
|||
|
||||
rx = malloc(PDU_RX_NODE_SIZE);
|
||||
zassert_not_null(rx, "Out of memory.\nCalled at %s:%d\n", file, line);
|
||||
|
||||
/* Remember RX node to allow for correct release */
|
||||
rx_malloc_store = rx;
|
||||
|
||||
memcpy((struct pdu_data *)rx->pdu, pdu, sizeof(struct pdu_data));
|
||||
sys_slist_append(<_tx_q, (sys_snode_t *)rx);
|
||||
}
|
||||
|
||||
void release_ntf(struct node_rx_pdu *ntf)
|
||||
{
|
||||
if (ntf == rx_malloc_store) {
|
||||
free(ntf);
|
||||
return;
|
||||
}
|
||||
|
||||
ntf->hdr.next = NULL;
|
||||
ll_rx_mem_release((void **)&ntf);
|
||||
}
|
||||
|
||||
void lt_rx_real(const char *file, uint32_t line, enum helper_pdu_opcode opcode,
|
||||
struct ll_conn *conn, struct node_tx **tx_ref, void *param)
|
||||
{
|
||||
|
|
|
@ -153,7 +153,7 @@ ZTEST(cis_create, test_cc_create_periph_rem_host_accept)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Accept request */
|
||||
ull_cp_cc_accept(&conn, 0U);
|
||||
|
@ -195,7 +195,15 @@ ZTEST(cis_create, test_cc_create_periph_rem_host_accept)
|
|||
ut_rx_q_is_empty();
|
||||
}
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* Emulate CIS becoming established */
|
||||
ull_cp_cc_established(&conn, 0);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
|
@ -273,6 +281,9 @@ ZTEST(cis_create, test_cc_create_periph_rem_host_reject)
|
|||
ut_rx_node(NODE_CIS_REQUEST, &ntf, &cis_req);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Decline request */
|
||||
ull_cp_cc_reject(&conn, ERROR_CODE);
|
||||
|
||||
|
@ -327,6 +338,10 @@ ZTEST(cis_create, test_cc_create_periph_rem_host_accept_to)
|
|||
.error_code = BT_HCI_ERR_CONN_ACCEPT_TIMEOUT,
|
||||
.reject_opcode = PDU_DATA_LLCTRL_TYPE_CIS_REQ
|
||||
};
|
||||
struct node_rx_conn_iso_estab cis_estab = {
|
||||
.cis_handle = 0x00,
|
||||
.status = BT_HCI_ERR_CONN_ACCEPT_TIMEOUT
|
||||
};
|
||||
|
||||
/* Role */
|
||||
test_set_role(&conn, BT_HCI_ROLE_PERIPHERAL);
|
||||
|
@ -347,6 +362,9 @@ ZTEST(cis_create, test_cc_create_periph_rem_host_accept_to)
|
|||
ut_rx_node(NODE_CIS_REQUEST, &ntf, &cis_req);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Emulate that time passes real fast re. timeout */
|
||||
conn.connect_accept_to = 0;
|
||||
|
||||
|
@ -366,6 +384,13 @@ ZTEST(cis_create, test_cc_create_periph_rem_host_accept_to)
|
|||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* There should be excactly one host notification */
|
||||
ut_rx_node(NODE_CIS_ESTABLISHED, &ntf, &cis_estab);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
|
|
@ -309,7 +309,7 @@ ZTEST(collision, test_phy_update_central_loc_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -430,7 +430,7 @@ ZTEST(collision, test_phy_update_central_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
@ -481,7 +481,7 @@ ZTEST(collision, test_phy_update_central_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -551,13 +551,19 @@ ZTEST(collision, test_phy_update_periph_loc_collision)
|
|||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* There should be one host notification */
|
||||
pu.status = BT_HCI_ERR_LL_PROC_COLLISION;
|
||||
ut_rx_node(NODE_PHY_UPDATE, &ntf, &pu);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
@ -605,7 +611,7 @@ ZTEST(collision, test_phy_update_periph_loc_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -744,7 +750,7 @@ ZTEST(collision, test_phy_conn_update_central_loc_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
|
|
@ -323,7 +323,7 @@ ZTEST(central_loc, test_conn_update_central_loc_accept)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -581,7 +581,7 @@ ZTEST(central_loc, test_conn_update_central_loc_accept_reject_2nd_cpr)
|
|||
ull_cp_release_tx(&conn_3rd, tx);
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* One less CTXs as the conn_3rd CPR is still 'running' */
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt()-1,
|
||||
|
@ -794,7 +794,7 @@ ZTEST(central_loc, test_conn_update_central_loc_reject)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -919,7 +919,7 @@ ZTEST(central_loc, test_conn_update_central_loc_remote_legacy)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -1044,7 +1044,7 @@ ZTEST(central_loc, test_conn_update_central_loc_unsupp_wo_feat_exch)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -1140,7 +1140,7 @@ ZTEST(central_loc, test_conn_update_central_loc_unsupp_w_feat_exch)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -1307,7 +1307,7 @@ ZTEST(central_loc, test_conn_update_central_loc_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -1372,7 +1372,7 @@ ZTEST(central_rem, test_conn_update_central_rem_accept)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -1427,7 +1427,7 @@ ZTEST(central_rem, test_conn_update_central_rem_accept)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -1544,7 +1544,7 @@ ZTEST(central_rem, test_conn_update_central_rem_reject)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -1683,7 +1683,7 @@ ZTEST(central_rem, test_conn_update_central_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -1744,7 +1744,7 @@ ZTEST(central_rem, test_conn_update_central_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
@ -1802,7 +1802,7 @@ ZTEST(central_rem, test_conn_update_central_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -1906,7 +1906,7 @@ ZTEST(periph_loc, test_conn_update_periph_loc_accept)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -1986,7 +1986,7 @@ ZTEST(periph_loc, test_conn_update_periph_loc_reject)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -2066,7 +2066,7 @@ ZTEST(periph_loc, test_conn_update_periph_loc_unsupp_feat_wo_feat_exch)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -2234,7 +2234,7 @@ ZTEST(periph_loc, test_conn_update_periph_loc_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -2262,7 +2262,7 @@ ZTEST(periph_loc, test_conn_update_periph_loc_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
@ -2306,7 +2306,7 @@ ZTEST(periph_loc, test_conn_update_periph_loc_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -2376,7 +2376,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_accept)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -2437,7 +2437,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_accept)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -3213,7 +3213,7 @@ ZTEST(periph_loc, test_conn_update_periph_loc_collision_reject_2nd_cpr)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -3241,7 +3241,7 @@ ZTEST(periph_loc, test_conn_update_periph_loc_collision_reject_2nd_cpr)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
{
|
||||
/* Initiate a parallel local Connection Parameter Request Procedure */
|
||||
|
@ -3326,7 +3326,7 @@ ZTEST(periph_loc, test_conn_update_periph_loc_collision_reject_2nd_cpr)
|
|||
}
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* One less CTXs as the conn_2nd CPR is still 'running' */
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt()-1,
|
||||
|
@ -3487,7 +3487,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_accept_reject_2nd_cpr)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -3587,7 +3587,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_accept_reject_2nd_cpr)
|
|||
}
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* One less CTXs as the conn_2nd CPR is still 'running' */
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt()-1,
|
||||
|
@ -3720,7 +3720,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_invalid_ind)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -3781,7 +3781,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_invalid_ind)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -3843,7 +3843,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_invalid_ind)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -3942,7 +3942,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_reject)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -4081,7 +4081,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/*******************/
|
||||
|
||||
|
@ -4146,7 +4146,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
@ -4190,7 +4190,7 @@ ZTEST(periph_rem, test_conn_update_periph_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
@ -4296,7 +4296,7 @@ ZTEST(central_loc_no_param_req, test_conn_update_central_loc_accept_no_param_req
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
}
|
||||
} while (parameters_changed-- > 0U);
|
||||
|
||||
|
@ -4539,7 +4539,7 @@ ZTEST(periph_rem_no_param_req, test_conn_update_periph_rem_accept_no_param_req)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
}
|
||||
} while (parameters_changed-- > 0U);
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ ZTEST(cte_req_after_fex, test_cte_req_central_local)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Release tx node */
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
|
@ -208,7 +208,7 @@ ZTEST(cte_req_after_fex, test_cte_req_peripheral_local)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Release tx node */
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
|
@ -427,7 +427,7 @@ ZTEST(cte_req_after_fex, test_cte_req_rejected_inv_ll_param_central_local)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Release tx node */
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
|
@ -506,7 +506,7 @@ ZTEST(cte_req_after_fex, test_cte_req_rejected_inv_ll_param_peripheral_local)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Release tx node */
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
|
@ -726,7 +726,7 @@ static void test_cte_req_ll_unknown_rsp_local(uint8_t role)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Release tx node */
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
|
@ -861,7 +861,7 @@ static void run_local_cte_req(struct pdu_data_llctrl_cte_req *cte_req)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Release tx node */
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
|
@ -918,7 +918,7 @@ static void check_phy_update(bool is_local, struct pdu_data_llctrl_phy_req *phy_
|
|||
}
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* The RX queue should be empty now */
|
||||
ut_rx_q_is_empty();
|
||||
|
|
|
@ -97,13 +97,6 @@ ZTEST(dle_central, test_data_length_update_central_loc)
|
|||
ull_conn_default_tx_time_set(2120);
|
||||
ull_dle_init(&conn, PHY_1M);
|
||||
|
||||
/* Steal all ntf buffers, so as to check that the wait_ntf mechanism works */
|
||||
while (ll_pdu_rx_alloc_peek(1)) {
|
||||
ntf = ll_pdu_rx_alloc();
|
||||
/* Make sure we use a correct type or the release won't work */
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
}
|
||||
|
||||
/* Initiate a Data Length Update Procedure */
|
||||
err = ull_cp_data_length_update(&conn, 211, 1800);
|
||||
zassert_equal(err, BT_HCI_ERR_SUCCESS);
|
||||
|
@ -121,19 +114,14 @@ ZTEST(dle_central, test_data_length_update_central_loc)
|
|||
|
||||
event_done(&conn);
|
||||
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf, so next cycle will generate NTF and complete procedure */
|
||||
ull_cp_release_ntf(ntf);
|
||||
|
||||
event_prepare(&conn);
|
||||
event_done(&conn);
|
||||
|
||||
/* There should be one host notification */
|
||||
ut_rx_pdu(LL_LENGTH_RSP, &ntf, &length_ntf);
|
||||
ut_rx_q_is_empty();
|
||||
zassert_equal(conn.lll.event_counter, 2, "Wrong event-count %d\n",
|
||||
zassert_equal(conn.lll.event_counter, 1, "Wrong event-count %d\n",
|
||||
conn.lll.event_counter);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -613,13 +601,6 @@ ZTEST(dle_periph, test_data_length_update_periph_rem)
|
|||
ull_conn_default_tx_time_set(1800);
|
||||
ull_dle_init(&conn, PHY_1M);
|
||||
|
||||
/* Steal all ntf buffers, so as to check that the wait_ntf mechanism works */
|
||||
while (ll_pdu_rx_alloc_peek(1)) {
|
||||
ntf = ll_pdu_rx_alloc();
|
||||
/* Make sure we use a correct type or the release won't work */
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
}
|
||||
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Tx Queue should have one LL Control PDU */
|
||||
|
@ -636,13 +617,6 @@ ZTEST(dle_periph, test_data_length_update_periph_rem)
|
|||
/* TX Ack */
|
||||
event_tx_ack(&conn, tx);
|
||||
|
||||
event_done(&conn);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf, so next cycle will generate NTF and complete procedure */
|
||||
ull_cp_release_ntf(ntf);
|
||||
|
||||
event_prepare(&conn);
|
||||
event_done(&conn);
|
||||
|
||||
ut_rx_pdu(LL_LENGTH_RSP, &ntf, &length_ntf);
|
||||
|
|
|
@ -290,7 +290,7 @@ ZTEST(encryption_start, test_encryption_start_central_loc)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -392,13 +392,6 @@ ZTEST(encryption_start, test_encryption_start_central_loc_limited_memory)
|
|||
/* Dummy remove, as above loop might queue up ctx */
|
||||
llcp_tx_alloc_unpeek(ctx);
|
||||
|
||||
/* Steal all ntf buffers */
|
||||
while (ll_pdu_rx_alloc_peek(1)) {
|
||||
ntf = ll_pdu_rx_alloc();
|
||||
/* Make sure we use a correct type or the release won't work */
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
}
|
||||
|
||||
/* Initiate an Encryption Start Procedure */
|
||||
err = ull_cp_encryption_start(&conn, rand, ediv, ltk);
|
||||
zassert_equal(err, BT_HCI_ERR_SUCCESS);
|
||||
|
@ -485,32 +478,12 @@ ZTEST(encryption_start, test_encryption_start_central_loc_limited_memory)
|
|||
CHECK_RX_PE_STATE(conn, RESUMED, ENCRYPTED); /* Rx enc. */
|
||||
CHECK_TX_PE_STATE(conn, RESUMED, ENCRYPTED); /* Tx enc. */
|
||||
|
||||
/* There should be no host notifications */
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Check state */
|
||||
CHECK_RX_PE_STATE(conn, RESUMED, ENCRYPTED); /* Rx enc. */
|
||||
CHECK_TX_PE_STATE(conn, RESUMED, ENCRYPTED); /* Tx enc. */
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* Check state */
|
||||
CHECK_RX_PE_STATE(conn, RESUMED, ENCRYPTED); /* Rx enc. */
|
||||
CHECK_TX_PE_STATE(conn, RESUMED, ENCRYPTED); /* Tx enc. */
|
||||
|
||||
/* There should be one host notification */
|
||||
ut_rx_pdu(LL_START_ENC_RSP, &ntf, NULL);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Tx Encryption should be enabled */
|
||||
zassert_equal(conn.lll.enc_tx, 1U);
|
||||
|
@ -621,7 +594,7 @@ ZTEST(encryption_start, test_encryption_start_central_loc_reject_ext)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -718,7 +691,7 @@ ZTEST(encryption_start, test_encryption_start_central_loc_reject)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -829,7 +802,7 @@ ZTEST(encryption_start, test_encryption_start_central_loc_no_ltk)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -935,7 +908,7 @@ ZTEST(encryption_start, test_encryption_start_central_loc_no_ltk_2)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -1197,7 +1170,7 @@ ZTEST(encryption_start, test_encryption_start_periph_rem)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* LTK request reply */
|
||||
ull_cp_ltk_req_reply(&conn, ltk);
|
||||
|
@ -1379,13 +1352,6 @@ ZTEST(encryption_start, test_encryption_start_periph_rem_limited_memory)
|
|||
/* Dummy remove, as above loop might queue up ctx */
|
||||
llcp_tx_alloc_unpeek(ctx);
|
||||
|
||||
/* Steal all ntf buffers */
|
||||
while (ll_pdu_rx_alloc_peek(1)) {
|
||||
ntf = ll_pdu_rx_alloc();
|
||||
/* Make sure we use a correct type or the release won't work */
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
}
|
||||
|
||||
/* Check state */
|
||||
CHECK_RX_PE_STATE(conn, RESUMED, UNENCRYPTED); /* Rx unenc. */
|
||||
CHECK_TX_PE_STATE(conn, RESUMED, UNENCRYPTED); /* Tx unenc. */
|
||||
|
@ -1424,30 +1390,13 @@ ZTEST(encryption_start, test_encryption_start_periph_rem_limited_memory)
|
|||
CHECK_RX_PE_STATE(conn, PAUSED, UNENCRYPTED); /* Rx paused & unenc. */
|
||||
CHECK_TX_PE_STATE(conn, PAUSED, UNENCRYPTED); /* Tx paused & unenc. */
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* Check state */
|
||||
CHECK_RX_PE_STATE(conn, PAUSED, UNENCRYPTED); /* Rx paused & unenc. */
|
||||
CHECK_TX_PE_STATE(conn, PAUSED, UNENCRYPTED); /* Tx paused & unenc. */
|
||||
|
||||
/* There should not be a host notification */
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Check state */
|
||||
CHECK_RX_PE_STATE(conn, PAUSED, UNENCRYPTED); /* Rx paused & unenc. */
|
||||
CHECK_TX_PE_STATE(conn, PAUSED, UNENCRYPTED); /* Tx paused & unenc. */
|
||||
|
||||
/* There should be one host notification */
|
||||
ut_rx_pdu(LL_ENC_REQ, &ntf, &enc_req);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release ntf */
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
|
@ -1523,19 +1472,13 @@ ZTEST(encryption_start, test_encryption_start_periph_rem_limited_memory)
|
|||
CHECK_RX_PE_STATE(conn, PAUSED, ENCRYPTED); /* Rx paused & enc. */
|
||||
CHECK_TX_PE_STATE(conn, PAUSED, UNENCRYPTED); /* Tx paused & unenc. */
|
||||
|
||||
/* There should not be a host notification */
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* There should be one host notification */
|
||||
ut_rx_pdu(LL_START_ENC_RSP, &ntf, NULL);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Tx Queue should not have a LL Control PDU */
|
||||
lt_rx_q_is_empty(&conn);
|
||||
|
||||
|
@ -1687,7 +1630,7 @@ ZTEST(encryption_start, test_encryption_start_periph_rem_no_ltk)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* LTK request reply */
|
||||
ull_cp_ltk_req_neq_reply(&conn);
|
||||
|
@ -1827,7 +1770,7 @@ ZTEST(encryption_start, test_encryption_start_periph_rem_mic)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
@ -2012,7 +1955,7 @@ ZTEST(encryption_pause, test_encryption_pause_central_loc)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Tx Encryption should be enabled */
|
||||
zassert_equal(conn.lll.enc_tx, 1U);
|
||||
|
@ -2129,7 +2072,7 @@ ZTEST(encryption_pause, test_encryption_pause_periph_rem)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* LTK request reply */
|
||||
ull_cp_ltk_req_reply(&conn, ltk);
|
||||
|
|
|
@ -129,7 +129,7 @@ ZTEST(fex_central, test_feat_exchange_central_loc)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
}
|
||||
|
||||
/* Test that host enabled feature makes it into feature exchange */
|
||||
|
@ -159,7 +159,7 @@ ZTEST(fex_central, test_feat_exchange_central_loc)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Remove host feature bit again */
|
||||
ll_set_host_feature(BT_LE_FEAT_BIT_ISO_CHANNELS, 0);
|
||||
|
@ -432,7 +432,7 @@ ZTEST(fex_central, test_feat_exchange_central_rem_2)
|
|||
lt_rx_q_is_empty(&conn);
|
||||
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
}
|
||||
|
||||
zassert_equal(conn.lll.event_counter, CENTRAL_NR_OF_EVENTS * (feat_to_test),
|
||||
|
@ -460,13 +460,6 @@ ZTEST(fex_periph, test_peripheral_feat_exchange_periph_loc)
|
|||
/* Connect */
|
||||
ull_cp_state_set(&conn, ULL_CP_CONNECTED);
|
||||
|
||||
/* Steal all ntf buffers, so as to check that the wait_ntf mechanism works */
|
||||
while (ll_pdu_rx_alloc_peek(1)) {
|
||||
ntf = ll_pdu_rx_alloc();
|
||||
/* Make sure we use a correct type or the release won't work */
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
}
|
||||
|
||||
/* Initiate a Feature Exchange Procedure */
|
||||
err = ull_cp_feature_exchange(&conn, 1U);
|
||||
zassert_equal(err, BT_HCI_ERR_SUCCESS);
|
||||
|
@ -481,19 +474,11 @@ ZTEST(fex_periph, test_peripheral_feat_exchange_periph_loc)
|
|||
|
||||
event_done(&conn);
|
||||
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf, so next cycle will generate NTF and complete procedure */
|
||||
ull_cp_release_ntf(ntf);
|
||||
|
||||
event_prepare(&conn);
|
||||
event_done(&conn);
|
||||
|
||||
/* There should be one host notification */
|
||||
|
||||
ut_rx_pdu(LL_FEATURE_RSP, &ntf, &remote_feature_rsp);
|
||||
ut_rx_q_is_empty();
|
||||
zassert_equal(conn.lll.event_counter, 2, "Wrong event-count %d\n",
|
||||
|
||||
zassert_equal(conn.lll.event_counter, 1, "Wrong event-count %d\n",
|
||||
conn.lll.event_counter);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -519,15 +504,7 @@ ZTEST(fex_periph, test_feat_exchange_periph_loc_unknown_rsp)
|
|||
|
||||
ull_cp_state_set(&conn, ULL_CP_CONNECTED);
|
||||
|
||||
/* Steal all ntf buffers, so as to check that the wait_ntf mechanism works */
|
||||
while (ll_pdu_rx_alloc_peek(1)) {
|
||||
ntf = ll_pdu_rx_alloc();
|
||||
/* Make sure we use a correct type or the release won't work */
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
}
|
||||
|
||||
/* Initiate a Feature Exchange Procedure */
|
||||
|
||||
event_prepare(&conn);
|
||||
err = ull_cp_feature_exchange(&conn, 1U);
|
||||
zassert_equal(err, BT_HCI_ERR_SUCCESS);
|
||||
|
@ -545,17 +522,9 @@ ZTEST(fex_periph, test_feat_exchange_periph_loc_unknown_rsp)
|
|||
|
||||
event_done(&conn);
|
||||
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf, so next cycle will generate NTF and complete procedure */
|
||||
ull_cp_release_ntf(ntf);
|
||||
|
||||
event_prepare(&conn);
|
||||
event_done(&conn);
|
||||
|
||||
ut_rx_pdu(LL_UNKNOWN_RSP, &ntf, &unknown_rsp);
|
||||
ut_rx_q_is_empty();
|
||||
zassert_equal(conn.lll.event_counter, 3, "Wrong event-count %d\n",
|
||||
zassert_equal(conn.lll.event_counter, 2, "Wrong event-count %d\n",
|
||||
conn.lll.event_counter);
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
|
|
@ -136,7 +136,7 @@ ZTEST(hci_fex, test_hci_feat_exchange_central_loc)
|
|||
"Wrong event count %d\n", conn_from_pool->lll.event_counter);
|
||||
|
||||
ull_cp_release_tx(conn_from_pool, tx);
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
ll_conn_release(conn_from_pool);
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ ZTEST(hci_fex, test_hci_feature_exchange)
|
|||
zassert_equal(conn_from_pool->lll.event_counter, 1, "Wrong event count %d\n",
|
||||
conn_from_pool->lll.event_counter);
|
||||
ull_cp_release_tx(conn_from_pool, tx);
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
ll_conn_release(conn_from_pool);
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ ZTEST(hci_version, test_hci_version_ind)
|
|||
zassert_equal(conn_from_pool->lll.event_counter, 1, "Wrong event count %d\n",
|
||||
conn_from_pool->lll.event_counter);
|
||||
ull_cp_release_tx(conn_from_pool, tx);
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
ll_conn_release(conn_from_pool);
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "ull_iso_types.h"
|
||||
#include "ull_conn_iso_types.h"
|
||||
|
||||
#include "ull_internal.h"
|
||||
#include "ull_conn_types.h"
|
||||
#include "ull_llcp.h"
|
||||
#include "ull_conn_internal.h"
|
||||
|
@ -120,7 +121,7 @@ ZTEST(phy_central, test_phy_update_central_loc)
|
|||
struct pdu_data_llctrl_phy_req req = { .rx_phys = PHY_2M, .tx_phys = PHY_2M };
|
||||
struct pdu_data_llctrl_phy_req rsp = { .rx_phys = PHY_1M | PHY_2M,
|
||||
.tx_phys = PHY_1M | PHY_2M };
|
||||
struct pdu_data_llctrl_phy_upd_ind ind = { .instant = 7,
|
||||
struct pdu_data_llctrl_phy_upd_ind ind = { .instant = 8,
|
||||
.c_to_p_phy = PHY_2M,
|
||||
.p_to_c_phy = PHY_2M };
|
||||
struct pdu_data_llctrl_length_rsp length_ntf = {
|
||||
|
@ -144,6 +145,13 @@ ZTEST(phy_central, test_phy_update_central_loc)
|
|||
err = ull_cp_phy_update(&conn, PHY_2M, PREFER_S8_CODING, PHY_2M, HOST_INITIATED);
|
||||
zassert_equal(err, BT_HCI_ERR_SUCCESS);
|
||||
|
||||
/* Steal all ntf buffers, to trigger TX stall on non avail of NTF buffer for DLE */
|
||||
while (ll_pdu_rx_alloc_peek(1)) {
|
||||
ntf = ll_pdu_rx_alloc();
|
||||
/* Make sure we use a correct type or the release won't work */
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
}
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
|
@ -169,6 +177,18 @@ ZTEST(phy_central, test_phy_update_central_loc)
|
|||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* No TX yet as unable to pre-allocate NTF buffer for DLE */
|
||||
lt_rx_q_is_empty(&conn);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* Release RX node to now allow pre-alloc for DLE NTF */
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Tx Queue should have one LL Control PDU */
|
||||
lt_rx(LL_PHY_UPDATE_IND, &conn, &tx, &ind);
|
||||
lt_rx_q_is_empty(&conn);
|
||||
|
@ -217,11 +237,15 @@ ZTEST(phy_central, test_phy_update_central_loc)
|
|||
|
||||
/* There should be two host notifications, one pu and one dle */
|
||||
ut_rx_node(NODE_PHY_UPDATE, &ntf, &pu);
|
||||
/* Release Ntf */
|
||||
release_ntf(ntf);
|
||||
|
||||
ut_rx_pdu(LL_LENGTH_RSP, &ntf, &length_ntf);
|
||||
/* Release Ntf */
|
||||
release_ntf(ntf);
|
||||
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
|
||||
CHECK_CURRENT_PHY_STATE(conn, PHY_2M, PREFER_S8_CODING, PHY_2M);
|
||||
CHECK_PREF_PHY_STATE(conn, PHY_2M, PHY_2M);
|
||||
|
@ -318,12 +342,18 @@ ZTEST(phy_central, test_phy_update_central_loc_unsupp_feat)
|
|||
/* Release Tx */
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* There should be one host notification */
|
||||
ut_rx_node(NODE_PHY_UPDATE, &ntf, &pu);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -335,7 +365,7 @@ ZTEST(phy_central, test_phy_update_central_rem)
|
|||
struct node_rx_pdu *ntf;
|
||||
struct pdu_data *pdu;
|
||||
struct pdu_data_llctrl_phy_req req = { .rx_phys = PHY_1M, .tx_phys = PHY_2M };
|
||||
struct pdu_data_llctrl_phy_upd_ind ind = { .instant = 7,
|
||||
struct pdu_data_llctrl_phy_upd_ind ind = { .instant = 8,
|
||||
.c_to_p_phy = 0,
|
||||
.p_to_c_phy = PHY_2M };
|
||||
uint16_t instant;
|
||||
|
@ -348,6 +378,13 @@ ZTEST(phy_central, test_phy_update_central_rem)
|
|||
/* Connect */
|
||||
ull_cp_state_set(&conn, ULL_CP_CONNECTED);
|
||||
|
||||
/* Steal all ntf buffers, to trigger TX stall on non avail of NTF buffer for DLE */
|
||||
while (ll_pdu_rx_alloc_peek(1)) {
|
||||
ntf = ll_pdu_rx_alloc();
|
||||
/* Make sure we use a correct type or the release won't work */
|
||||
ntf->hdr.type = NODE_RX_TYPE_DC_PDU;
|
||||
}
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
|
@ -360,6 +397,18 @@ ZTEST(phy_central, test_phy_update_central_rem)
|
|||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* No TX yet as unable to pre-allocate NTF buffer for DLE */
|
||||
lt_rx_q_is_empty(&conn);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* Release RX node to now allow pre-alloc for DLE NTF */
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Check that data tx was paused */
|
||||
zassert_equal(conn.tx_q.pause_data, 1U, "Data tx is not paused");
|
||||
|
||||
|
@ -415,7 +464,7 @@ ZTEST(phy_central, test_phy_update_central_rem)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
CHECK_CURRENT_PHY_STATE(conn, PHY_1M, PREFER_S8_CODING, PHY_2M);
|
||||
CHECK_PREF_PHY_STATE(conn, PHY_1M | PHY_2M | PHY_CODED, PHY_1M | PHY_2M | PHY_CODED);
|
||||
|
||||
|
@ -504,7 +553,7 @@ ZTEST(phy_periph, test_phy_update_periph_loc)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
CHECK_CURRENT_PHY_STATE(conn, PHY_2M, PREFER_S8_CODING, PHY_2M);
|
||||
CHECK_PREF_PHY_STATE(conn, PHY_2M, PHY_2M);
|
||||
|
||||
|
@ -602,7 +651,7 @@ ZTEST(phy_periph, test_phy_update_periph_rem)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
CHECK_CURRENT_PHY_STATE(conn, PHY_2M, PREFER_S8_CODING, PHY_1M);
|
||||
CHECK_PREF_PHY_STATE(conn, PHY_1M | PHY_2M | PHY_CODED, PHY_1M | PHY_2M | PHY_CODED);
|
||||
|
@ -651,12 +700,18 @@ ZTEST(phy_periph, test_phy_update_periph_loc_unsupp_feat)
|
|||
/* Release Tx */
|
||||
ull_cp_release_tx(&conn, tx);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* There should be one host notification */
|
||||
ut_rx_node(NODE_PHY_UPDATE, &ntf, &pu);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -881,7 +936,7 @@ ZTEST(phy_central, test_phy_update_central_loc_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -1007,7 +1062,7 @@ ZTEST(phy_central, test_phy_update_central_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
@ -1058,7 +1113,7 @@ ZTEST(phy_central, test_phy_update_central_rem_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -1128,13 +1183,19 @@ ZTEST(phy_periph, test_phy_update_periph_loc_collision)
|
|||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* There should be one host notification */
|
||||
pu.status = BT_HCI_ERR_LL_PROC_COLLISION;
|
||||
ut_rx_node(NODE_PHY_UPDATE, &ntf, &pu);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
@ -1182,7 +1243,7 @@ ZTEST(phy_periph, test_phy_update_periph_loc_collision)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
zassert_equal(llcp_ctx_buffers_free(), test_ctx_buffers_cnt(),
|
||||
"Free CTX buffers %d", llcp_ctx_buffers_free());
|
||||
|
@ -1265,7 +1326,7 @@ ZTEST(phy_central, test_phy_update_central_loc_no_act_change)
|
|||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
CHECK_CURRENT_PHY_STATE(conn, PHY_1M, PREFER_S8_CODING, PHY_1M);
|
||||
CHECK_PREF_PHY_STATE(conn, PHY_1M, PHY_1M);
|
||||
|
@ -1388,12 +1449,18 @@ ZTEST(phy_periph, test_phy_update_periph_loc_no_actual_change)
|
|||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
/* There should be one notification due to Host initiated PHY UPD */
|
||||
ut_rx_node(NODE_PHY_UPDATE, &ntf, &pu);
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Release Ntf */
|
||||
ull_cp_release_ntf(ntf);
|
||||
release_ntf(ntf);
|
||||
|
||||
CHECK_CURRENT_PHY_STATE(conn, PHY_1M, PREFER_S8_CODING, PHY_1M);
|
||||
CHECK_PREF_PHY_STATE(conn, PHY_1M, PHY_1M);
|
||||
|
@ -1459,6 +1526,12 @@ ZTEST(phy_periph, test_phy_update_periph_rem_no_actual_change)
|
|||
/* There should be no host notification */
|
||||
ut_rx_q_is_empty();
|
||||
|
||||
/* Prepare */
|
||||
event_prepare(&conn);
|
||||
|
||||
/* Done */
|
||||
event_done(&conn);
|
||||
|
||||
CHECK_CURRENT_PHY_STATE(conn, PHY_1M, PREFER_S8_CODING, PHY_1M);
|
||||
CHECK_PREF_PHY_STATE(conn, PHY_1M | PHY_2M | PHY_CODED, PHY_1M | PHY_2M | PHY_CODED);
|
||||
|
||||
|
|
|
@ -137,6 +137,7 @@ void ll_rx_mem_release(void **node_rx)
|
|||
case NODE_RX_TYPE_ENC_REFRESH:
|
||||
case NODE_RX_TYPE_PHY_UPDATE:
|
||||
case NODE_RX_TYPE_CIS_REQUEST:
|
||||
case NODE_RX_TYPE_CIS_ESTABLISHED:
|
||||
|
||||
ll_rx_link_inc_quota(1);
|
||||
mem_release(rx_free, &mem_pdu_rx.free);
|
||||
|
@ -179,7 +180,10 @@ void ll_rx_release(void *node_rx)
|
|||
|
||||
void ll_rx_put(memq_link_t *link, void *rx)
|
||||
{
|
||||
sys_slist_append(&ut_rx_q, (sys_snode_t *)rx);
|
||||
if (((struct node_rx_hdr *)rx)->type != NODE_RX_TYPE_RELEASE) {
|
||||
/* Only put/sched if node was not marked for release */
|
||||
sys_slist_append(&ut_rx_q, (sys_snode_t *)rx);
|
||||
}
|
||||
}
|
||||
|
||||
void ll_rx_sched(void)
|
||||
|
|
Loading…
Reference in a new issue