2015-04-11 01:44:37 +02:00
|
|
|
/*
|
2018-09-30 17:48:11 +02:00
|
|
|
* Copyright (c) 2018 Intel Corporation
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
2019-06-21 18:55:37 +02:00
|
|
|
#include <drivers/timer/system_timer.h>
|
2018-09-30 17:48:11 +02:00
|
|
|
#include <sys_clock.h>
|
|
|
|
#include <spinlock.h>
|
2019-11-09 18:49:36 +01:00
|
|
|
#include <arch/arm/aarch32/cortex_m/cmsis.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-09-30 21:31:07 +02:00
|
|
|
void z_arm_exc_exit(void);
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
#define COUNTER_MAX 0x00ffffff
|
|
|
|
#define TIMER_STOPPED 0xff000000
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-04-23 15:08:00 +02:00
|
|
|
#define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
|
2018-09-30 17:48:11 +02:00
|
|
|
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
|
|
|
#define MAX_TICKS ((COUNTER_MAX / CYC_PER_TICK) - 1)
|
|
|
|
#define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
/* Minimum cycles in the future to try to program. Note that this is
|
|
|
|
* NOT simply "enough cycles to get the counter read and reprogrammed
|
|
|
|
* reliably" -- it becomes the minimum value of the LOAD register, and
|
|
|
|
* thus reflects how much time we can reliably see expire between
|
|
|
|
* calls to elapsed() to read the COUNTFLAG bit. So it needs to be
|
|
|
|
* set to be larger than the maximum time the interrupt might be
|
|
|
|
* masked. Choosing a fraction of a tick is probably a good enough
|
|
|
|
* default, with an absolute minimum of 1k cyc.
|
|
|
|
*/
|
|
|
|
#define MIN_DELAY MAX(1024, (CYC_PER_TICK/16))
|
|
|
|
|
2019-09-09 16:27:53 +02:00
|
|
|
#define TICKLESS (IS_ENABLED(CONFIG_TICKLESS_KERNEL))
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
static struct k_spinlock lock;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
static u32_t last_load;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-04-16 15:06:07 +02:00
|
|
|
/*
|
|
|
|
* This local variable holds the amount of SysTick HW cycles elapsed
|
|
|
|
* and it is updated in z_clock_isr() and z_clock_set_timeout().
|
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* At an arbitrary point in time the "current" value of the SysTick
|
|
|
|
* HW timer is calculated as:
|
|
|
|
*
|
|
|
|
* t = cycle_counter + elapsed();
|
|
|
|
*/
|
2018-09-30 17:48:11 +02:00
|
|
|
static u32_t cycle_count;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-04-16 15:06:07 +02:00
|
|
|
/*
|
|
|
|
* This local variable holds the amount of elapsed SysTick HW cycles
|
|
|
|
* that have been announced to the kernel.
|
|
|
|
*/
|
2018-09-30 17:48:11 +02:00
|
|
|
static u32_t announced_cycles;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-04-16 15:06:07 +02:00
|
|
|
/*
|
|
|
|
* This local variable holds the amount of elapsed HW cycles due to
|
|
|
|
* SysTick timer wraps ('overflows') and is used in the calculation
|
|
|
|
* in elapsed() function, as well as in the updates to cycle_count.
|
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* Each time cycle_count is updated with the value from overflow_cyc,
|
|
|
|
* the overflow_cyc must be reset to zero.
|
|
|
|
*/
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
static volatile u32_t overflow_cyc;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-04-16 15:06:07 +02:00
|
|
|
/* This internal function calculates the amount of HW cycles that have
|
|
|
|
* elapsed since the last time the absolute HW cycles counter has been
|
|
|
|
* updated. 'cycle_count' may be updated either by the ISR, or when we
|
|
|
|
* re-program the SysTick.LOAD register, in z_clock_set_timeout().
|
|
|
|
*
|
|
|
|
* Additionally, the function updates the 'overflow_cyc' counter, that
|
|
|
|
* holds the amount of elapsed HW cycles due to (possibly) multiple
|
|
|
|
* timer wraps (overflows).
|
|
|
|
*
|
|
|
|
* Prerequisites:
|
|
|
|
* - reprogramming of SysTick.LOAD must be clearing the SysTick.COUNTER
|
|
|
|
* register and the 'overflow_cyc' counter.
|
|
|
|
* - ISR must be clearing the 'overflow_cyc' counter.
|
|
|
|
* - no more than one counter-wrap has occurred between
|
|
|
|
* - the timer reset or the last time the function was called
|
|
|
|
* - and until the current call of the function is completed.
|
|
|
|
* - the function is invoked with interrupts disabled.
|
|
|
|
*/
|
2018-09-30 17:48:11 +02:00
|
|
|
static u32_t elapsed(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2019-11-26 00:21:53 +01:00
|
|
|
u32_t val1 = SysTick->VAL; /* A */
|
|
|
|
u32_t ctrl = SysTick->CTRL; /* B */
|
|
|
|
u32_t val2 = SysTick->VAL; /* C */
|
|
|
|
|
|
|
|
/* SysTick behavior: The counter wraps at zero automatically,
|
|
|
|
* setting the COUNTFLAG field of the CTRL register when it
|
|
|
|
* does. Reading the control register automatically clears
|
|
|
|
* that field.
|
|
|
|
*
|
|
|
|
* If the count wrapped...
|
|
|
|
* 1) Before A then COUNTFLAG will be set and val1 >= val2
|
|
|
|
* 2) Between A and B then COUNTFLAG will be set and val1 < val2
|
|
|
|
* 3) Between B and C then COUNTFLAG will be clear and val1 < val2
|
|
|
|
* 4) After C we'll see it next time
|
|
|
|
*
|
|
|
|
* So the count in val2 is post-wrap and last_load needs to be
|
|
|
|
* added if and only if COUNTFLAG is set or val1 < val2.
|
2019-04-16 15:06:07 +02:00
|
|
|
*/
|
2019-11-26 00:21:53 +01:00
|
|
|
if ((ctrl & SysTick_CTRL_COUNTFLAG_Msk)
|
|
|
|
|| (val1 < val2)) {
|
|
|
|
overflow_cyc += last_load;
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
|
2019-11-26 00:21:53 +01:00
|
|
|
/* We know there was a wrap, but we might not have
|
|
|
|
* seen it in CTRL, so clear it. */
|
|
|
|
(void)SysTick->CTRL;
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
}
|
2017-01-11 19:50:23 +01:00
|
|
|
|
2019-11-26 00:21:53 +01:00
|
|
|
return (last_load - val2) + overflow_cyc;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
/* Callout out of platform assembly, not hooked via IRQ_CONNECT... */
|
2019-02-03 09:36:40 +01:00
|
|
|
void z_clock_isr(void *arg)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
ARG_UNUSED(arg);
|
|
|
|
u32_t dticks;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-04-16 15:08:56 +02:00
|
|
|
/* Update overflow_cyc and clear COUNTFLAG by invoking elapsed() */
|
|
|
|
elapsed();
|
2017-04-06 11:45:10 +02:00
|
|
|
|
2019-04-16 15:08:56 +02:00
|
|
|
/* Increment the amount of HW cycles elapsed (complete counter
|
|
|
|
* cycles) and announce the progress to the kernel.
|
|
|
|
*/
|
|
|
|
cycle_count += overflow_cyc;
|
|
|
|
overflow_cyc = 0;
|
|
|
|
|
|
|
|
if (TICKLESS) {
|
|
|
|
/* In TICKLESS mode, the SysTick.LOAD is re-programmed
|
|
|
|
* in z_clock_set_timeout(), followed by resetting of
|
|
|
|
* the counter (VAL = 0).
|
|
|
|
*
|
|
|
|
* If a timer wrap occurs right when we re-program LOAD,
|
|
|
|
* the ISR is triggered immediately after z_clock_set_timeout()
|
|
|
|
* returns; in that case we shall not increment the cycle_count
|
|
|
|
* because the value has been updated before LOAD re-program.
|
|
|
|
*
|
|
|
|
* We can assess if this is the case by inspecting COUNTFLAG.
|
|
|
|
*/
|
|
|
|
|
|
|
|
dticks = (cycle_count - announced_cycles) / CYC_PER_TICK;
|
|
|
|
announced_cycles += dticks * CYC_PER_TICK;
|
|
|
|
z_clock_announce(dticks);
|
|
|
|
} else {
|
|
|
|
z_clock_announce(1);
|
|
|
|
}
|
2019-09-30 21:31:07 +02:00
|
|
|
z_arm_exc_exit();
|
2018-09-30 17:48:11 +02:00
|
|
|
}
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
int z_clock_driver_init(struct device *device)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
|
2019-04-09 22:54:42 +02:00
|
|
|
last_load = CYC_PER_TICK - 1;
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
overflow_cyc = 0U;
|
2018-09-30 17:48:11 +02:00
|
|
|
SysTick->LOAD = last_load;
|
|
|
|
SysTick->VAL = 0; /* resets timer to last_load */
|
|
|
|
SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
|
|
|
|
SysTick_CTRL_TICKINT_Msk |
|
|
|
|
SysTick_CTRL_CLKSOURCE_Msk);
|
|
|
|
return 0;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
void z_clock_set_timeout(s32_t ticks, bool idle)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
/* Fast CPUs and a 24 bit counter mean that even idle systems
|
|
|
|
* need to wake up multiple times per second. If the kernel
|
|
|
|
* allows us to miss tick announcements in idle, then shut off
|
|
|
|
* the counter. (Note: we can assume if idle==true that
|
|
|
|
* interrupts are already disabled)
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
2018-09-30 17:48:11 +02:00
|
|
|
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) {
|
|
|
|
SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
|
|
|
|
last_load = TIMER_STOPPED;
|
2017-04-06 11:45:10 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-09-09 16:27:53 +02:00
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL)
|
2018-09-30 17:48:11 +02:00
|
|
|
u32_t delay;
|
2017-04-06 11:45:10 +02:00
|
|
|
|
2019-11-25 11:26:26 +01:00
|
|
|
ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
|
|
|
|
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-11-11 21:20:03 +01:00
|
|
|
u32_t pending = elapsed();
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-11-11 21:20:03 +01:00
|
|
|
cycle_count += pending;
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
overflow_cyc = 0U;
|
2019-11-11 21:20:03 +01:00
|
|
|
|
|
|
|
u32_t unannounced = cycle_count - announced_cycles;
|
|
|
|
|
|
|
|
if ((s32_t)unannounced < 0) {
|
|
|
|
/* We haven't announced for more than half the 32-bit
|
|
|
|
* wrap duration, because new timeouts keep being set
|
|
|
|
* before the existing one fires. Force an announce
|
|
|
|
* to avoid loss of a wrap event, making sure the
|
|
|
|
* delay is at least the minimum delay possible.
|
|
|
|
*/
|
|
|
|
last_load = MIN_DELAY;
|
|
|
|
} else {
|
2019-11-25 11:26:26 +01:00
|
|
|
/* Desired delay in the future */
|
|
|
|
delay = ticks * CYC_PER_TICK;
|
|
|
|
|
2019-11-11 21:20:03 +01:00
|
|
|
/* Round delay up to next tick boundary */
|
|
|
|
delay += unannounced;
|
|
|
|
delay =
|
|
|
|
((delay + CYC_PER_TICK - 1) / CYC_PER_TICK) * CYC_PER_TICK;
|
|
|
|
delay -= unannounced;
|
2019-11-25 11:26:26 +01:00
|
|
|
delay = MAX(delay, MIN_DELAY);
|
|
|
|
if (delay > MAX_CYCLES) {
|
|
|
|
last_load = MAX_CYCLES;
|
|
|
|
} else {
|
|
|
|
last_load = delay;
|
|
|
|
}
|
2019-11-11 21:20:03 +01:00
|
|
|
}
|
2019-04-11 12:53:17 +02:00
|
|
|
SysTick->LOAD = last_load - 1;
|
2018-09-30 17:48:11 +02:00
|
|
|
SysTick->VAL = 0; /* resets timer to last_load */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
2017-05-03 09:41:51 +02:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
u32_t z_clock_elapsed(void)
|
2017-04-06 11:45:10 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
if (!TICKLESS) {
|
2017-04-06 11:45:10 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
u32_t cyc = elapsed() + cycle_count - announced_cycles;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return cyc / CYC_PER_TICK;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2019-03-08 22:19:05 +01:00
|
|
|
u32_t z_timer_cycle_get_32(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
u32_t ret = elapsed() + cycle_count;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return ret;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2018-09-21 18:33:36 +02:00
|
|
|
void z_clock_idle_exit(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
if (last_load == TIMER_STOPPED) {
|
|
|
|
SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;
|
2017-04-06 11:45:10 +02:00
|
|
|
}
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2015-09-28 20:23:35 +02:00
|
|
|
void sys_clock_disable(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-12-12 11:53:09 +01:00
|
|
|
SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|