zephyr/kernel/timeout.c
Andy Ross 987c0e5fc1 kernel: New timeout implementation
Now that the API has been fixed up, replace the existing timeout queue
with a much smaller version.  The basic algorithm is unchanged:
timeouts are stored in a sorted dlist with each node nolding a delta
time from the previous node in the list; the announce call just walks
this list pulling off the heads as needed.  Advantages:

* Properly spinlocked and SMP-aware.  The earlier timer implementation
  relied on only CPU 0 doing timeout work, and on an irq_lock() being
  taken before entry (something that was violated in a few spots).
  Now any CPU can wake up for an event (or all of them) and everything
  works correctly.

* The *_thread_timeout() API is now expressible as a clean wrapping
  (just one liners) around the lower-level interface based on function
  pointer callbacks.  As a result the timeout objects no longer need
  to store backpointers to the thread and wait_q and have shrunk by
  33%.

* MUCH smaller, to the tune of hundreds of lines of code removed.

* Future proof, in that all operations on the queue are now fronted by
  just two entry points (_add_timeout() and z_clock_announce()) which
  can easily be augmented with fancier data structures.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2018-10-16 15:03:10 -04:00

244 lines
4.4 KiB
C

/*
* Copyright (c) 2018 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <timeout_q.h>
#include <drivers/system_timer.h>
#include <sys_clock.h>
#include <spinlock.h>
#include <ksched.h>
#include <syscall_handler.h>
#define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
__key = k_spin_lock(lck); \
!__i.key; \
k_spin_unlock(lck, __key), __i.key = 1)
static u64_t curr_tick;
static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
static struct k_spinlock timeout_lock;
static bool can_wait_forever;
/* During a call to z_clock_announce(), the "current" time is "ahead"
* of the reference used by timeout_list by this amount.
*/
static int announce_advance;
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
#endif
static struct _timeout *first(void)
{
sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
}
static struct _timeout *next(struct _timeout *t)
{
sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
}
static void remove(struct _timeout *t)
{
if (next(t) != NULL) {
next(t)->dticks += t->dticks;
}
sys_dlist_remove(&t->node);
t->dticks = _INACTIVE;
}
static s32_t adjust_elapsed(s32_t ticks)
{
ticks -= z_clock_elapsed();
return ticks < 0 ? 0 : ticks;
}
void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
{
__ASSERT(to->dticks < 0, "");
to->fn = fn;
LOCKED(&timeout_lock) {
struct _timeout *t;
to->dticks = adjust_elapsed(ticks) + announce_advance;
for (t = first(); t != NULL; t = next(t)) {
__ASSERT(t->dticks >= 0, "");
if (t->dticks > to->dticks) {
t->dticks -= to->dticks;
sys_dlist_insert_before(&timeout_list,
&t->node, &to->node);
break;
}
to->dticks -= t->dticks;
}
if (t == NULL) {
sys_dlist_append(&timeout_list, &to->node);
}
}
z_clock_set_timeout(_get_next_timeout_expiry(), false);
}
int _abort_timeout(struct _timeout *to)
{
int ret = _INACTIVE;
LOCKED(&timeout_lock) {
if (to->dticks != _INACTIVE) {
remove(to);
ret = 0;
}
}
return ret;
}
s32_t z_timeout_remaining(struct _timeout *to)
{
s32_t ticks = 0;
if (to->dticks == _INACTIVE) {
return 0;
}
LOCKED(&timeout_lock) {
for (struct _timeout *t = first(); t != NULL; t = next(t)) {
ticks += t->dticks;
if (to == t) {
break;
}
}
}
return ticks;
}
void z_clock_announce(s32_t ticks)
{
struct _timeout *t = NULL;
#ifdef CONFIG_TIMESLICING
z_time_slice(ticks);
#endif
LOCKED(&timeout_lock) {
curr_tick += ticks;
announce_advance = ticks;
}
while (true) {
LOCKED(&timeout_lock) {
t = first();
if (t != NULL) {
if (t->dticks <= announce_advance) {
announce_advance -= t->dticks;
t->dticks = 0;
remove(t);
} else {
t->dticks -= announce_advance;
t = NULL;
}
}
}
if (t == NULL) {
break;
}
t->fn(t);
}
announce_advance = 0;
z_clock_set_timeout(_get_next_timeout_expiry(), false);
}
s32_t _get_next_timeout_expiry(void)
{
s32_t ret = 0;
int max = can_wait_forever ? K_FOREVER : INT_MAX;
LOCKED(&timeout_lock) {
struct _timeout *to = first();
ret = to == NULL ? max : adjust_elapsed(to->dticks);
}
#ifdef CONFIG_TIMESLICING
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
ret = _current_cpu->slice_ticks;
}
#endif
return ret;
}
int k_enable_sys_clock_always_on(void)
{
int ret = !can_wait_forever;
can_wait_forever = 0;
return ret;
}
void k_disable_sys_clock_always_on(void)
{
can_wait_forever = 1;
}
s64_t z_tick_get(void)
{
u64_t t = 0;
LOCKED(&timeout_lock) {
t = curr_tick + z_clock_elapsed();
}
return t;
}
u32_t z_tick_get_32(void)
{
/* Returning just the low word doesn't require locking as the
* API is by definition at risk of overflow
*/
return z_clock_elapsed() + (u32_t)curr_tick;
}
u32_t _impl_k_uptime_get_32(void)
{
return __ticks_to_ms(z_tick_get_32());
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(k_uptime_get_32)
{
return _impl_k_uptime_get_32();
}
#endif
s64_t _impl_k_uptime_get(void)
{
return __ticks_to_ms(z_tick_get());
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(k_uptime_get, ret_p)
{
u64_t *ret = (u64_t *)ret_p;
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(ret, sizeof(*ret)));
*ret = _impl_k_uptime_get();
return 0;
}
#endif