unified: Ensure delays do not time out prematurely
Ensures that all APIs which accept a timeout value wait for at least the specified amount of time, and do not time out prematurely. * The kernel now waits for the next system clock tick to occur before the timeout interval is considered to have started. (That is, the only way to ensure a delay of N tick intervals is to wait for N+1 ticks to occur.) * Gets rid of ticks -> milliseconds -> ticks conversion in task_sleep() and fiber_sleep() legacy APIs, since this introduces rounding that -- coupled with the previous change -- can alter the number of ticks being requested during the sleep operation. * Corrects work queue API that was incorrectly shown to use a delay measured in ticks, rather than milliseconds. Change-Id: I8b04467237b24fb0364c8f344d872457418c18da Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
This commit is contained in:
parent
811d97c320
commit
6c98c4d378
|
@ -273,6 +273,9 @@ extern void *k_thread_custom_data_get(void);
|
|||
|
||||
/* private internal time manipulation (users should never play with ticks) */
|
||||
|
||||
/* added tick needed to account for tick in progress */
|
||||
#define _TICK_ALIGN 1
|
||||
|
||||
static int64_t __ticks_to_ms(int64_t ticks)
|
||||
{
|
||||
#if CONFIG_SYS_CLOCK_EXISTS
|
||||
|
@ -694,15 +697,15 @@ extern void k_delayed_work_init(struct k_delayed_work *work,
|
|||
* mutual exclusion mechanism. Such usage is not recommended and if necessary,
|
||||
* it should be explicitly done between the submitter and the handler.
|
||||
*
|
||||
* @param work_q to schedule the work item
|
||||
* @param work_q Workqueue to schedule the work item
|
||||
* @param work Delayed work item
|
||||
* @param ticks Ticks to wait before scheduling the work item
|
||||
* @param delay Delay before scheduling the work item (in milliseconds)
|
||||
*
|
||||
* @return 0 in case of success or negative value in case of error.
|
||||
*/
|
||||
extern int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
|
||||
struct k_delayed_work *work,
|
||||
int32_t ticks);
|
||||
int32_t delay);
|
||||
|
||||
/**
|
||||
* @brief Cancel a delayed work item
|
||||
|
@ -749,9 +752,9 @@ static inline void k_work_submit(struct k_work *work)
|
|||
* unexpected behavior.
|
||||
*/
|
||||
static inline int k_delayed_work_submit(struct k_delayed_work *work,
|
||||
int ticks)
|
||||
int32_t delay)
|
||||
{
|
||||
return k_delayed_work_submit_to_queue(&k_sys_work_q, work, ticks);
|
||||
return k_delayed_work_submit_to_queue(&k_sys_work_q, work, delay);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYS_CLOCK_EXISTS */
|
||||
|
|
|
@ -138,17 +138,16 @@ fiber_delayed_start(char *stack, unsigned int stack_size_in_bytes,
|
|||
|
||||
#define fiber_yield k_yield
|
||||
#define fiber_abort() k_thread_abort(k_current_get())
|
||||
static inline void fiber_sleep(int32_t timeout)
|
||||
{
|
||||
k_sleep(_ticks_to_ms(timeout));
|
||||
}
|
||||
|
||||
extern void _legacy_sleep(int32_t ticks);
|
||||
#define fiber_sleep _legacy_sleep
|
||||
#define task_sleep _legacy_sleep
|
||||
|
||||
#define fiber_wakeup k_wakeup
|
||||
#define isr_fiber_wakeup k_wakeup
|
||||
#define fiber_fiber_wakeup k_wakeup
|
||||
#define task_fiber_wakeup k_wakeup
|
||||
|
||||
#define task_sleep fiber_sleep
|
||||
#define task_yield k_yield
|
||||
#define task_priority_set(task, prio) k_thread_priority_set(task, (int)prio)
|
||||
#define task_entry_set(task, entry) \
|
||||
|
|
|
@ -17,8 +17,29 @@
|
|||
#include <kernel.h>
|
||||
#include <init.h>
|
||||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
#include <misc/__assert.h>
|
||||
#include <misc/util.h>
|
||||
|
||||
void _legacy_sleep(int32_t ticks)
|
||||
{
|
||||
__ASSERT(!_is_in_isr(), "");
|
||||
__ASSERT(ticks != TICKS_UNLIMITED, "");
|
||||
|
||||
if (ticks <= 0) {
|
||||
k_yield();
|
||||
return;
|
||||
}
|
||||
|
||||
int key = irq_lock();
|
||||
|
||||
_mark_thread_as_timing(_current);
|
||||
_remove_thread_from_ready_q(_current);
|
||||
_add_thread_timeout(_current, NULL, ticks);
|
||||
|
||||
_Swap(key);
|
||||
}
|
||||
|
||||
#if (CONFIG_NUM_DYNAMIC_TIMERS > 0)
|
||||
|
||||
static struct k_timer dynamic_timers[CONFIG_NUM_DYNAMIC_TIMERS];
|
||||
|
|
|
@ -152,7 +152,8 @@ void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout)
|
|||
|
||||
if (timeout != K_FOREVER) {
|
||||
_mark_thread_as_timing(thread);
|
||||
_add_thread_timeout(thread, wait_q, _ms_to_ticks(timeout));
|
||||
_add_thread_timeout(thread, wait_q,
|
||||
_TICK_ALIGN + _ms_to_ticks(timeout));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -296,7 +297,8 @@ void k_sleep(int32_t duration)
|
|||
|
||||
_mark_thread_as_timing(_current);
|
||||
_remove_thread_from_ready_q(_current);
|
||||
_add_thread_timeout(_current, NULL, _ms_to_ticks(duration));
|
||||
_add_thread_timeout(_current, NULL,
|
||||
_TICK_ALIGN + _ms_to_ticks(duration));
|
||||
|
||||
_Swap(key);
|
||||
}
|
||||
|
|
|
@ -271,7 +271,8 @@ static void schedule_new_thread(struct k_thread *thread, int32_t delay)
|
|||
start_thread(thread);
|
||||
} else {
|
||||
_mark_thread_as_timing(thread);
|
||||
_add_thread_timeout(thread, NULL, _ms_to_ticks(delay));
|
||||
_add_thread_timeout(thread, NULL,
|
||||
_TICK_ALIGN + _ms_to_ticks(delay));
|
||||
}
|
||||
#else
|
||||
ARG_UNUSED(delay);
|
||||
|
|
|
@ -31,7 +31,10 @@ static void timer_expiration_handler(struct _timeout *t)
|
|||
struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
|
||||
struct k_thread *pending_thread;
|
||||
|
||||
/* if the time is periodic, start it again */
|
||||
/*
|
||||
* if the timer is periodic, start it again; don't add _TICK_ALIGN
|
||||
* since we're already aligned to a tick boundary
|
||||
*/
|
||||
if (timer->period > 0) {
|
||||
_add_timeout(NULL, &timer->timeout, &timer->wait_q,
|
||||
timer->period);
|
||||
|
@ -87,7 +90,7 @@ void k_timer_start(struct k_timer *timer, int32_t duration, int32_t period)
|
|||
|
||||
timer->period = _ms_to_ticks(period);
|
||||
_add_timeout(NULL, &timer->timeout, &timer->wait_q,
|
||||
_ms_to_ticks(duration));
|
||||
_TICK_ALIGN + _ms_to_ticks(duration));
|
||||
timer->status = 0;
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
|
|||
|
||||
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
|
||||
struct k_delayed_work *work,
|
||||
int32_t timeout)
|
||||
int32_t delay)
|
||||
{
|
||||
int key = irq_lock();
|
||||
int err;
|
||||
|
@ -103,12 +103,13 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
|
|||
/* Attach workqueue so the timeout callback can submit it */
|
||||
work->work_q = work_q;
|
||||
|
||||
if (!timeout) {
|
||||
if (!delay) {
|
||||
/* Submit work if no ticks is 0 */
|
||||
k_work_submit_to_queue(work_q, &work->work);
|
||||
} else {
|
||||
/* Add timeout */
|
||||
_add_timeout(NULL, &work->timeout, NULL, _ms_to_ticks(timeout));
|
||||
_add_timeout(NULL, &work->timeout, NULL,
|
||||
_TICK_ALIGN + _ms_to_ticks(delay));
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
|
Loading…
Reference in a new issue