2016-11-22 17:03:32 +01:00
|
|
|
/*
|
2017-02-13 16:31:53 +01:00
|
|
|
* Copyright (c) 2016-2017 Nordic Semiconductor ASA
|
2018-10-15 18:04:21 +02:00
|
|
|
* Copyright (c) 2018 Intel Corporation
|
2016-11-22 17:03:32 +01:00
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-11-22 17:03:32 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <soc.h>
|
2019-06-25 21:53:47 +02:00
|
|
|
#include <drivers/clock_control.h>
|
2019-01-15 10:27:20 +01:00
|
|
|
#include <drivers/clock_control/nrf_clock_control.h>
|
2019-06-21 18:55:37 +02:00
|
|
|
#include <drivers/timer/system_timer.h>
|
kernel: tickless: Add tickless kernel support
Adds event based scheduling logic to the kernel. Updates
management of timeouts, timers, idling etc. based on
time tracked at events rather than periodic ticks. Provides
interfaces for timers to announce and get next timer expiry
based on kernel scheduling decisions involving time slicing
of threads, timeouts and idling. Uses wall time units instead
of ticks in all scheduling activities.
The implementation involves changes in the following areas
1. Management of time in wall units like ms/us instead of ticks
The existing implementation already had an option to configure
number of ticks in a second. The new implementation builds on
top of that feature and provides option to set the size of the
scheduling granurality to mili seconds or micro seconds. This
allows most of the current implementation to be reused. Due to
this re-use and co-existence with tick based kernel, the names
of variables may contain the word "tick". However, in the
tickless kernel implementation, it represents the currently
configured time unit, which would be be mili seconds or
micro seconds. The APIs that take time as a parameter are not
impacted and they continue to pass time in mili seconds.
2. Timers would not be programmed in periodic mode
generating ticks. Instead they would be programmed in one
shot mode to generate events at the time the kernel scheduler
needs to gain control for its scheduling activities like
timers, timeouts, time slicing, idling etc.
3. The scheduler provides interfaces that the timer drivers
use to announce elapsed time and get the next time the scheduler
needs a timer event. It is possible that the scheduler may not
need another timer event, in which case the system would wait
for a non-timer event to wake it up if it is idling.
4. New APIs are defined to be implemented by timer drivers. Also
they need to handler timer events differently. These changes
have been done in the HPET timer driver. In future other timers
that support tickles kernel should implement these APIs as well.
These APIs are to re-program the timer, update and announce
elapsed time.
5. Philosopher and timer_api applications have been enabled to
test tickless kernel. Separate configuration files are created
which define the necessary CONFIG flags. Run these apps using
following command
make pristine && make BOARD=qemu_x86 CONF_FILE=prj_tickless.conf qemu
Jira: ZEP-339 ZEP-1946 ZEP-948
Change-Id: I7d950c31bf1ff929a9066fad42c2f0559a2e5983
Signed-off-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-02-06 04:37:19 +01:00
|
|
|
#include <sys_clock.h>
|
2019-09-19 13:25:18 +02:00
|
|
|
#include <hal/nrf_rtc.h>
|
2018-10-15 18:04:21 +02:00
|
|
|
#include <spinlock.h>
|
2017-05-08 07:59:37 +02:00
|
|
|
|
2018-10-15 18:04:21 +02:00
|
|
|
#define RTC NRF_RTC1
|
2017-05-08 07:59:37 +02:00
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
#define COUNTER_MAX 0x00ffffff
|
2019-04-23 15:08:00 +02:00
|
|
|
#define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
|
2018-10-15 18:04:21 +02:00
|
|
|
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
2019-03-07 23:18:17 +01:00
|
|
|
#define MAX_TICKS ((COUNTER_MAX - CYC_PER_TICK) / CYC_PER_TICK)
|
|
|
|
|
|
|
|
static struct k_spinlock lock;
|
2017-05-08 07:59:37 +02:00
|
|
|
|
2018-10-15 18:04:21 +02:00
|
|
|
static u32_t last_count;
|
2016-11-22 17:03:32 +01:00
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
static u32_t counter_sub(u32_t a, u32_t b)
|
2017-05-08 07:59:37 +02:00
|
|
|
{
|
2018-10-15 18:04:21 +02:00
|
|
|
return (a - b) & COUNTER_MAX;
|
2017-05-08 07:59:37 +02:00
|
|
|
}
|
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
static void set_comparator(u32_t cyc)
|
2017-05-08 07:59:37 +02:00
|
|
|
{
|
2019-03-07 23:18:17 +01:00
|
|
|
nrf_rtc_cc_set(RTC, 0, cyc & COUNTER_MAX);
|
2017-05-08 07:59:37 +02:00
|
|
|
}
|
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
static u32_t counter(void)
|
2016-11-22 17:03:32 +01:00
|
|
|
{
|
2018-10-15 18:04:21 +02:00
|
|
|
return nrf_rtc_counter_get(RTC);
|
2016-11-22 17:03:32 +01:00
|
|
|
}
|
|
|
|
|
2018-10-15 18:04:21 +02:00
|
|
|
/* Note: this function has public linkage, and MUST have this
|
|
|
|
* particular name. The platform architecture itself doesn't care,
|
2019-10-01 16:02:27 +02:00
|
|
|
* but there is a test (tests/arch/arm_irq_vector_table) that needs
|
2018-10-15 18:04:21 +02:00
|
|
|
* to find it to it can set it in a custom vector table. Should
|
|
|
|
* probably better abstract that at some point (e.g. query and reset
|
|
|
|
* it by pointer at runtime, maybe?) so we don't have this leaky
|
|
|
|
* symbol.
|
2017-02-13 16:31:53 +01:00
|
|
|
*/
|
2019-01-16 14:52:54 +01:00
|
|
|
void rtc1_nrf_isr(void *arg)
|
2016-11-22 17:03:32 +01:00
|
|
|
{
|
2017-05-08 07:59:37 +02:00
|
|
|
ARG_UNUSED(arg);
|
2018-10-15 18:04:21 +02:00
|
|
|
RTC->EVENTS_COMPARE[0] = 0;
|
2017-05-08 07:59:37 +02:00
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2018-10-15 18:04:21 +02:00
|
|
|
u32_t t = counter();
|
|
|
|
u32_t dticks = counter_sub(t, last_count) / CYC_PER_TICK;
|
2017-08-31 16:52:18 +02:00
|
|
|
|
2018-10-15 18:04:21 +02:00
|
|
|
last_count += dticks * CYC_PER_TICK;
|
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
|
|
|
u32_t next = last_count + CYC_PER_TICK;
|
|
|
|
|
2019-06-11 00:50:26 +02:00
|
|
|
/* As below: we're guaranteed to get an interrupt as
|
|
|
|
* long as it's set two or more cycles in the future
|
|
|
|
*/
|
|
|
|
if (counter_sub(next, t) < 3) {
|
2018-10-15 18:04:21 +02:00
|
|
|
next += CYC_PER_TICK;
|
2018-06-25 08:02:54 +02:00
|
|
|
}
|
2018-10-15 18:04:21 +02:00
|
|
|
set_comparator(next);
|
2018-06-25 08:02:54 +02:00
|
|
|
}
|
2017-08-31 16:52:18 +02:00
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
k_spin_unlock(&lock, key);
|
2019-02-28 19:16:08 +01:00
|
|
|
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
|
2016-11-22 17:03:32 +01:00
|
|
|
}
|
|
|
|
|
2018-09-21 18:33:36 +02:00
|
|
|
int z_clock_driver_init(struct device *device)
|
2016-11-22 17:03:32 +01:00
|
|
|
{
|
|
|
|
struct device *clock;
|
|
|
|
|
|
|
|
ARG_UNUSED(device);
|
|
|
|
|
2019-06-11 21:20:32 +02:00
|
|
|
clock = device_get_binding(DT_INST_0_NORDIC_NRF_CLOCK_LABEL "_32K");
|
2016-11-22 17:03:32 +01:00
|
|
|
if (!clock) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-07-03 09:22:06 +02:00
|
|
|
clock_control_on(clock, NULL);
|
2016-11-22 17:03:32 +01:00
|
|
|
|
|
|
|
/* TODO: replace with counter driver to access RTC */
|
2018-10-15 18:04:21 +02:00
|
|
|
nrf_rtc_prescaler_set(RTC, 0);
|
|
|
|
nrf_rtc_cc_set(RTC, 0, CYC_PER_TICK);
|
|
|
|
nrf_rtc_int_enable(RTC, RTC_INTENSET_COMPARE0_Msk);
|
2016-11-22 17:03:32 +01:00
|
|
|
|
2017-02-28 15:41:18 +01:00
|
|
|
/* Clear the event flag and possible pending interrupt */
|
2018-10-15 18:04:21 +02:00
|
|
|
nrf_rtc_event_clear(RTC, NRF_RTC_EVENT_COMPARE_0);
|
2019-01-16 14:52:54 +01:00
|
|
|
NVIC_ClearPendingIRQ(RTC1_IRQn);
|
2017-02-28 15:41:18 +01:00
|
|
|
|
2019-01-16 14:52:54 +01:00
|
|
|
IRQ_CONNECT(RTC1_IRQn, 1, rtc1_nrf_isr, 0, 0);
|
|
|
|
irq_enable(RTC1_IRQn);
|
2016-11-22 17:03:32 +01:00
|
|
|
|
2018-10-15 18:04:21 +02:00
|
|
|
nrf_rtc_task_trigger(RTC, NRF_RTC_TASK_CLEAR);
|
|
|
|
nrf_rtc_task_trigger(RTC, NRF_RTC_TASK_START);
|
|
|
|
|
|
|
|
if (!IS_ENABLED(TICKLESS_KERNEL)) {
|
|
|
|
set_comparator(counter() + CYC_PER_TICK);
|
|
|
|
}
|
2016-11-22 17:03:32 +01:00
|
|
|
|
2016-11-28 04:35:52 +01:00
|
|
|
return 0;
|
2016-11-22 17:03:32 +01:00
|
|
|
}
|
|
|
|
|
2018-10-15 18:04:21 +02:00
|
|
|
void z_clock_set_timeout(s32_t ticks, bool idle)
|
2016-11-22 17:03:32 +01:00
|
|
|
{
|
2018-10-15 18:04:21 +02:00
|
|
|
ARG_UNUSED(idle);
|
2016-11-30 17:34:28 +01:00
|
|
|
|
2018-10-15 18:04:21 +02:00
|
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
|
|
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
|
2019-02-11 18:14:19 +01:00
|
|
|
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
|
2017-02-28 15:41:18 +01:00
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2019-06-11 00:50:26 +02:00
|
|
|
u32_t cyc, dt, t = counter();
|
2019-08-19 13:28:12 +02:00
|
|
|
bool zli_fixup = IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS);
|
2019-03-07 23:18:17 +01:00
|
|
|
|
|
|
|
/* Round up to next tick boundary */
|
2019-06-11 00:50:26 +02:00
|
|
|
cyc = ticks * CYC_PER_TICK + 1 + counter_sub(t, last_count);
|
2019-03-07 23:18:17 +01:00
|
|
|
cyc += (CYC_PER_TICK - 1);
|
|
|
|
cyc = (cyc / CYC_PER_TICK) * CYC_PER_TICK;
|
|
|
|
cyc += last_count;
|
|
|
|
|
2019-06-11 00:50:26 +02:00
|
|
|
/* Per NRF docs, the RTC is guaranteed to trigger a compare
|
|
|
|
* event if the comparator value to be set is at least two
|
|
|
|
* cycles later than the current value of the counter. So if
|
|
|
|
* we're three or more cycles out, we can set it blindly. If
|
|
|
|
* not, check the time again immediately after setting: it's
|
|
|
|
* possible we "just missed it" and can flag an immediate
|
|
|
|
* interrupt. Or it could be exactly two cycles out, which
|
|
|
|
* will have worked. Otherwise, there's no way to get an
|
|
|
|
* interrupt at the right time and we have to slip the event
|
|
|
|
* by one clock cycle (or we could spin, but this is a slow
|
|
|
|
* clock and spinning for a whole cycle can be thousands of
|
|
|
|
* instructions!)
|
|
|
|
*
|
|
|
|
* You might ask: why not set the comparator first and then
|
|
|
|
* check the timer synchronously to see if we missed it, which
|
|
|
|
* would avoid the need for a slipped cycle. That doesn't
|
|
|
|
* work, the states overlap inside the counter hardware. It's
|
|
|
|
* possible to set a comparator value of "N", issue a DSB
|
|
|
|
* instruction to flush the pipeline, and then immediately
|
|
|
|
* read a counter value of "N-1" (i.e. the comparator is still
|
|
|
|
* in the future), and yet still not receive an interrupt at
|
|
|
|
* least on nRF52. Some experimentation on nrf52840 shows
|
|
|
|
* that you need to be early by about 400 processor cycles
|
|
|
|
* (about 1/5th of a RTC cycle) in order to reliably get the
|
|
|
|
* interrupt. The docs say two cycles, they mean two cycles.
|
|
|
|
*/
|
|
|
|
if (counter_sub(cyc, t) > 2) {
|
|
|
|
set_comparator(cyc);
|
|
|
|
} else {
|
|
|
|
set_comparator(cyc);
|
|
|
|
dt = counter_sub(cyc, counter());
|
|
|
|
if (dt == 0 || dt > 0x7fffff) {
|
|
|
|
/* Missed it! */
|
|
|
|
NVIC_SetPendingIRQ(RTC1_IRQn);
|
|
|
|
if (IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS)) {
|
2019-08-19 13:28:12 +02:00
|
|
|
zli_fixup = false;
|
2019-06-11 00:50:26 +02:00
|
|
|
}
|
|
|
|
} else if (dt == 1) {
|
|
|
|
/* Too soon, interrupt won't arrive. */
|
2019-07-29 14:06:52 +02:00
|
|
|
set_comparator(cyc + 2);
|
2019-06-11 00:50:26 +02:00
|
|
|
}
|
|
|
|
/* Otherwise it was two cycles out, we're fine */
|
2018-10-15 18:04:21 +02:00
|
|
|
}
|
2018-06-15 15:17:49 +02:00
|
|
|
|
2019-06-11 00:50:26 +02:00
|
|
|
#ifdef CONFIG_ZERO_LATENCY_IRQS
|
|
|
|
/* Failsafe. ZLIs can preempt us even though interrupts are
|
2019-08-19 13:28:12 +02:00
|
|
|
* masked, blowing up the sensitive timing above. If the
|
|
|
|
* feature is enabled and we haven't recorded the presence of
|
|
|
|
* a pending interrupt then we need a final check (in a loop!
|
|
|
|
* because this too can be interrupted) to confirm that the
|
|
|
|
* comparator is still in the future. Don't bother being
|
|
|
|
* fancy with cycle counting here, just set an interrupt
|
|
|
|
* "soon" that we know will get the timer back to a known
|
|
|
|
* state. This handles (via some hairy modular expressions)
|
|
|
|
* the wraparound cases where we are preempted for as much as
|
|
|
|
* half the counter space.
|
2019-06-11 00:50:26 +02:00
|
|
|
*/
|
2019-08-19 13:28:12 +02:00
|
|
|
if (zli_fixup && counter_sub(cyc, counter()) <= 0x7fffff) {
|
2019-06-11 00:50:26 +02:00
|
|
|
while (counter_sub(cyc, counter() + 2) > 0x7fffff) {
|
|
|
|
cyc = counter() + 3;
|
|
|
|
set_comparator(cyc);
|
|
|
|
}
|
|
|
|
}
|
2018-10-15 18:04:21 +02:00
|
|
|
#endif
|
2019-06-11 00:50:26 +02:00
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
#endif /* CONFIG_TICKLESS_KERNEL */
|
2018-10-15 18:04:21 +02:00
|
|
|
}
|
2017-02-28 15:41:18 +01:00
|
|
|
|
2018-10-15 18:04:21 +02:00
|
|
|
u32_t z_clock_elapsed(void)
|
|
|
|
{
|
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
|
|
|
return 0;
|
|
|
|
}
|
2017-02-28 15:41:18 +01:00
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2018-10-15 18:04:21 +02:00
|
|
|
u32_t ret = counter_sub(counter(), last_count) / CYC_PER_TICK;
|
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
k_spin_unlock(&lock, key);
|
2018-10-15 18:04:21 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-08 22:19:05 +01:00
|
|
|
u32_t z_timer_cycle_get_32(void)
|
2018-10-15 18:04:21 +02:00
|
|
|
{
|
2019-03-07 23:18:17 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2018-10-15 18:04:21 +02:00
|
|
|
u32_t ret = counter_sub(counter(), last_count) + last_count;
|
2016-11-30 17:34:28 +01:00
|
|
|
|
2019-03-07 23:18:17 +01:00
|
|
|
k_spin_unlock(&lock, key);
|
2018-10-15 18:04:21 +02:00
|
|
|
return ret;
|
2016-11-30 17:34:28 +01:00
|
|
|
}
|