Bluetooth: Controller: Fix Periodic Sync Terminate race condition

Fix Periodic Sync Terminate implementation for race
conditions with ULL scheduling by using a flag to stop any
new ULL scheduling to receive chain PDUs.

Signed-off-by: Vinayak Kariappa Chettimada <vich@nordicsemi.no>
This commit is contained in:
Vinayak Kariappa Chettimada 2021-12-11 19:51:38 +05:30 committed by Carles Cufí
parent 87bd890dc3
commit 32e812c944
5 changed files with 122 additions and 41 deletions

View file

@ -51,7 +51,7 @@ struct lll_sync {
uint32_t window_size_event_us;
/* used to store lll_aux when chain is being scanned */
struct lll_scan_aux *lll_aux;
struct lll_scan_aux *volatile lll_aux;
#if defined(CONFIG_BT_CTLR_DF_SCAN_CTE_RX)
struct lll_df_sync df_cfg;

View file

@ -937,6 +937,9 @@ static void isr_done(void *param)
lll_isr_status_reset();
/* Generate incomplete data status and release aux context when
* sync event is using LLL scheduling.
*/
lll = param;
/* LLL scheduling used for chain PDU reception is aborted/preempted */

View file

@ -52,11 +52,8 @@ static inline struct ll_sync_iso_set *
sync_iso_create_get(struct ll_sync_set *sync);
static void done_disabled_cb(void *param);
static void flush(void *param);
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
static void rx_release_put(struct node_rx_hdr *rx);
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
static void aux_sync_incomplete(void *param);
static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
uint32_t remainder, uint16_t lazy, uint8_t force,
void *param);
@ -557,10 +554,23 @@ void ull_scan_aux_setup(memq_link_t *link, struct node_rx_hdr *rx)
if (!IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) || lll) {
lll->lll_aux = NULL;
} else {
struct ll_sync_set *sync;
LL_ASSERT(sync_lll &&
(!sync_lll->lll_aux || sync_lll->lll_aux == lll_aux));
/* Do not ULL schedule if sync terminate requested */
sync = HDR_LLL2ULL(sync_lll);
if (unlikely(sync->is_stop)) {
goto ull_scan_aux_rx_flush;
}
/* Associate the auxiliary context with sync context */
sync_lll->lll_aux = lll_aux;
/* Backup the node rx to be dispatch on successfully ULL
* scheduling setup.
*/
aux->rx_head = rx;
}
@ -663,6 +673,7 @@ ull_scan_aux_rx_flush:
if (aux) {
struct ull_hdr *hdr;
uint8_t ref;
/* Enqueue last rx in aux context if possible, otherwise send
* immediately since we are in sync context.
@ -687,12 +698,17 @@ ull_scan_aux_rx_flush:
* ull_hdr before done event was processed.
*/
hdr = &aux->ull;
LL_ASSERT(ull_ref_get(hdr) < 2);
if (ull_ref_get(hdr) == 0) {
ref = ull_ref_get(hdr);
if (ref == 0) {
flush(aux);
} else {
LL_ASSERT(!hdr->disabled_cb);
/* A specific single shot scheduled aux context cannot
* overlap, i.e. ULL reference count shall be less than
* 2.
*/
LL_ASSERT(ref < 2);
LL_ASSERT(!hdr->disabled_cb);
hdr->disabled_param = aux;
hdr->disabled_cb = done_disabled_cb;
}
@ -702,11 +718,9 @@ ull_scan_aux_rx_flush:
ll_rx_put(link, rx);
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
if (rx_incomplete) {
if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && rx_incomplete) {
rx_release_put(rx_incomplete);
}
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
ll_rx_sched();
}
@ -727,7 +741,7 @@ void ull_scan_aux_done(struct node_rx_event_done *done)
LL_ASSERT(ull_sync_is_valid_get(sync));
hdr = &sync->ull;
if (!sync->lll.lll_aux) {
if (unlikely(sync->is_stop) || !sync->lll.lll_aux) {
return;
}
@ -832,21 +846,61 @@ void ull_scan_aux_release(memq_link_t *link, struct node_rx_hdr *rx)
if (lll_aux) {
struct ll_scan_aux_set *aux;
struct ll_scan_set *scan;
struct lll_scan *lll;
struct ull_hdr *hdr;
uint8_t is_stop;
aux = HDR_LLL2ULL(lll_aux);
hdr = &aux->ull;
LL_ASSERT(ull_ref_get(hdr) < 2);
/* Flush from here of from done event, if one is pending */
if (ull_ref_get(hdr) == 0) {
flush(aux);
lll = aux->parent;
scan = HDR_LLL2ULL(lll);
scan = ull_scan_is_valid_get(scan);
if (scan) {
is_stop = 0U;
} else {
LL_ASSERT(!hdr->disabled_cb);
struct lll_sync *sync_lll;
struct ll_sync_set *sync;
hdr->disabled_param = aux;
hdr->disabled_cb = done_disabled_cb;
sync_lll = (void *)lll;
sync = HDR_LLL2ULL(sync_lll);
is_stop = sync->is_stop;
}
if (!is_stop) {
uint8_t ref;
/* Flush from here or from done event, if one is
* pending.
*/
hdr = &aux->ull;
ref = ull_ref_get(hdr);
if (ref == 0) {
flush(aux);
} else {
/* A specific single shot scheduled aux context
* cannot overlap, i.e. ULL reference count
* shall be less than 2.
*/
LL_ASSERT(ref < 2);
LL_ASSERT(!hdr->disabled_cb);
hdr->disabled_param = aux;
hdr->disabled_cb = done_disabled_cb;
}
} else {
/* Sync terminate requested, enqueue node rx that will
* be flushed by the disabled_cb setup by the
* terminate.
*/
rx->link = link;
if (aux->rx_last) {
aux->rx_last->rx_ftr.extra = rx;
} else {
aux->rx_head = rx;
}
aux->rx_last = rx;
return;
}
}
@ -857,7 +911,7 @@ void ull_scan_aux_release(memq_link_t *link, struct node_rx_hdr *rx)
int ull_scan_aux_stop(struct ll_scan_aux_set *aux)
{
static memq_link_t link;
static struct mayfly mfy = {0, 0, &link, NULL, flush};
static struct mayfly mfy = {0, 0, &link, NULL, NULL};
uint8_t aux_handle;
uint32_t ret;
int err;
@ -876,6 +930,11 @@ int ull_scan_aux_stop(struct ll_scan_aux_set *aux)
if (ret) {
return -EBUSY;
}
mfy.fp = flush;
} else {
/* ULL scheduling stopped before prepare */
mfy.fp = aux_sync_incomplete;
}
/* Release auxiliary context in ULL execution context */
@ -970,17 +1029,18 @@ static void flush(void *param)
lll->lll_aux = NULL;
} else {
struct lll_sync *sync_lll;
struct ll_sync_set *sync;
sync_lll = aux->parent;
sync = HDR_LLL2ULL(sync_lll);
LL_ASSERT(sync_lll->lll_aux);
LL_ASSERT(sync->is_stop || sync_lll->lll_aux);
sync_lll->lll_aux = NULL;
}
aux_release(aux);
}
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
static void rx_release_put(struct node_rx_hdr *rx)
{
rx->type = NODE_RX_TYPE_RELEASE;
@ -1005,9 +1065,14 @@ static void aux_sync_partial(void *param)
static void aux_sync_incomplete(void *param)
{
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
struct ll_scan_aux_set *aux;
aux = param;
/* ULL scheduling succeeded hence no backup node rx present, use the
* extra node rx reserved for incomplete data status generation.
*/
if (!aux->rx_head) {
struct ll_sync_set *sync;
struct node_rx_hdr *rx;
@ -1042,8 +1107,8 @@ static void aux_sync_incomplete(void *param)
}
flush(aux);
}
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
}
static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
uint32_t remainder, uint16_t lazy, uint8_t force,
@ -1098,24 +1163,14 @@ static void ticker_op_cb(uint32_t status, void *param)
}
if (status == TICKER_STATUS_SUCCESS) {
if (0) {
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
} else if (sync) {
if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
mfy.fp = aux_sync_partial;
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
} else {
return;
}
} else {
if (0) {
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
} else if (sync) {
if (IS_ENABLED(CONFIG_BT_CTLR_SYNC_PERIODIC) && sync) {
mfy.fp = aux_sync_incomplete;
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC */
} else {
mfy.fp = flush;
}

View file

@ -190,6 +190,7 @@ uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
1U : 0U;
#endif
sync->skip = skip;
sync->is_stop = 0U;
/* NOTE: Use timeout not zero to represent sync context used for sync
* create.
@ -223,6 +224,7 @@ uint8_t ll_sync_create(uint8_t options, uint8_t sid, uint8_t adv_addr_type,
/* Initialize sync LLL context */
lll_sync = &sync->lll;
lll_sync->lll_aux = NULL;
lll_sync->is_rx_enabled = sync->rx_enable;
lll_sync->skip_prepare = 0U;
lll_sync->skip_event = 0U;
@ -344,6 +346,11 @@ uint8_t ll_sync_terminate(uint16_t handle)
return BT_HCI_ERR_UNKNOWN_ADV_IDENTIFIER;
}
/* Request terminate, no new ULL scheduling to be setup */
sync->is_stop = 1U;
cpu_dmb();
/* Stop periodic sync ticker timeouts */
err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_SYNC_BASE + handle,
sync, &sync->lll);
LL_ASSERT(err == 0 || err == -EALREADY);
@ -351,6 +358,7 @@ uint8_t ll_sync_terminate(uint16_t handle)
return BT_HCI_ERR_CMD_DISALLOWED;
}
/* Check and stop any auxiliary PDU receptions */
lll_aux = sync->lll.lll_aux;
if (lll_aux) {
struct ll_scan_aux_set *aux;
@ -845,14 +853,17 @@ void ull_sync_done(struct node_rx_event_done *done)
uint32_t ticks_drift_plus;
struct ll_sync_set *sync;
uint16_t elapsed_event;
struct lll_sync *lll;
uint16_t skip_event;
uint16_t lazy;
uint8_t force;
/* Get reference to ULL context */
sync = CONTAINER_OF(done->param, struct ll_sync_set, ull);
lll = &sync->lll;
/* Do nothing if local terminate requested */
if (unlikely(sync->is_stop)) {
return;
}
#if defined(CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING)
#if defined(CONFIG_BT_CTLR_CTEINLINE_SUPPORT)
@ -865,6 +876,10 @@ void ull_sync_done(struct node_rx_event_done *done)
} else
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING */
{
struct lll_sync *lll;
lll = &sync->lll;
/* Events elapsed used in timeout checks below */
skip_event = lll->skip_event;
elapsed_event = skip_event + 1;
@ -1202,7 +1217,14 @@ static void ticker_stop_sync_lost_op_cb(uint32_t status, void *param)
static memq_link_t link;
static struct mayfly mfy = {0, 0, &link, NULL, sync_lost};
LL_ASSERT(status == TICKER_STATUS_SUCCESS);
/* When in race between terminate requested in thread context and
* sync lost scenario, do not generate the sync lost node rx from here
*/
if (status != TICKER_STATUS_SUCCESS) {
LL_ASSERT(param == ull_disable_mark_get());
return;
}
mfy.param = param;

View file

@ -43,6 +43,7 @@ struct ll_sync_set {
uint8_t is_term:1;
#endif /* CONFIG_BT_CTLR_SYNC_PERIODIC_CTE_TYPE_FILTERING && !CONFIG_BT_CTLR_CTEINLINE_SUPPORT */
uint8_t is_stop:1; /* sync terminate requested */
uint8_t sync_expire:3; /* countdown of 6 before fail to establish */
#if defined(CONFIG_BT_CTLR_CHECK_SAME_PEER_SYNC)