kernel: fix error in synchronous work cancellation return value

The return value is documented to be true if the work was pending, but
the implementation returned true only if the work was actually running
(i.e. the caller had to wait).  It should also return true if
scheduled or submitted work was cancelled.

Note that this means the return value cannot be used to determine
whether the call slept.

Signed-off-by: Peter Bigot <peter.bigot@nordicsemi.no>
This commit is contained in:
Peter Bigot 2021-04-16 11:48:50 -05:00 committed by Anas Nashif
parent bcd43ff494
commit 707dc22fb0
3 changed files with 28 additions and 16 deletions

View file

@ -3020,8 +3020,9 @@ int k_work_cancel(struct k_work *work);
* one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
* must be allocated in coherent memory.
*
* @retval true if work was not idle (call had to wait for cancellation to
* complete);
* @retval true if work was pending (call had to wait for cancellation of a
* running handler to complete, or scheduled or submitted operations were
* cancelled);
* @retval false otherwise
*/
bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
@ -3357,8 +3358,9 @@ int k_work_cancel_delayable(struct k_work_delayable *dwork);
* one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
* must be allocated in coherent memory.
*
* @retval true if work was not idle (call had to wait for cancellation to
* complete);
* @retval true if work was not idle (call had to wait for cancellation of a
* running handler to complete, or scheduled or submitted operations were
* cancelled);
* @retval false otherwise
*/
bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,

View file

@ -524,10 +524,13 @@ bool k_work_cancel_sync(struct k_work *work,
struct z_work_canceller *canceller = &sync->canceller;
k_spinlock_key_t key = k_spin_lock(&lock);
bool pending = (work_busy_get_locked(work) != 0U);
bool need_wait = false;
(void)cancel_async_locked(work);
bool need_wait = cancel_sync_locked(work, canceller);
if (pending) {
(void)cancel_async_locked(work);
need_wait = cancel_sync_locked(work, canceller);
}
k_spin_unlock(&lock, key);
@ -535,7 +538,7 @@ bool k_work_cancel_sync(struct k_work *work,
k_sem_take(&canceller->sem, K_FOREVER);
}
return need_wait;
return pending;
}
/* Work has been dequeued and is about to be invoked by the work
@ -983,10 +986,13 @@ bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
struct z_work_canceller *canceller = &sync->canceller;
k_spinlock_key_t key = k_spin_lock(&lock);
bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
bool need_wait = false;
(void)cancel_delayable_async_locked(dwork);
bool need_wait = cancel_sync_locked(&dwork->work, canceller);
if (pending) {
(void)cancel_delayable_async_locked(dwork);
need_wait = cancel_sync_locked(&dwork->work, canceller);
}
k_spin_unlock(&lock, key);
@ -994,7 +1000,7 @@ bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
k_sem_take(&canceller->sem, K_FOREVER);
}
return need_wait;
return pending;
}
bool k_work_flush_delayable(struct k_work_delayable *dwork,

View file

@ -537,8 +537,10 @@ static void test_1cpu_queued_cancel_sync(void)
zassert_equal(rc, 1, NULL);
zassert_equal(coophi_counter(), 0, NULL);
/* Cancellation should complete immediately. */
zassert_false(k_work_cancel_sync(&work, &work_sync), NULL);
/* Cancellation should complete immediately, indicating that
* work was pending.
*/
zassert_true(k_work_cancel_sync(&work, &work_sync), NULL);
/* Shouldn't have run. */
zassert_equal(coophi_counter(), 0, NULL);
@ -582,8 +584,10 @@ static void test_1cpu_delayed_cancel_sync(void)
zassert_equal(rc, 1, NULL);
zassert_equal(coophi_counter(), 0, NULL);
/* Cancellation should complete immediately. */
zassert_false(k_work_cancel_delayable_sync(&dwork, &work_sync), NULL);
/* Cancellation should complete immediately, indicating that
* work was pending.
*/
zassert_true(k_work_cancel_delayable_sync(&dwork, &work_sync), NULL);
/* Shouldn't have run. */
zassert_equal(coophi_counter(), 0, NULL);