2015-04-11 01:44:37 +02:00
|
|
|
/*
|
2018-09-30 17:48:11 +02:00
|
|
|
* Copyright (c) 2018 Intel Corporation
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
#include <drivers/system_timer.h>
|
2018-09-30 17:48:11 +02:00
|
|
|
#include <sys_clock.h>
|
|
|
|
#include <spinlock.h>
|
2017-01-11 19:50:23 +01:00
|
|
|
#include <arch/arm/cortex_m/cmsis.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-03-08 22:19:05 +01:00
|
|
|
void z_ExcExit(void);
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
#define COUNTER_MAX 0x00ffffff
|
|
|
|
#define TIMER_STOPPED 0xff000000
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
#define CYC_PER_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \
|
|
|
|
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
|
|
|
#define MAX_TICKS ((COUNTER_MAX / CYC_PER_TICK) - 1)
|
|
|
|
#define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
/* Minimum cycles in the future to try to program. Note that this is
|
|
|
|
* NOT simply "enough cycles to get the counter read and reprogrammed
|
|
|
|
* reliably" -- it becomes the minimum value of the LOAD register, and
|
|
|
|
* thus reflects how much time we can reliably see expire between
|
|
|
|
* calls to elapsed() to read the COUNTFLAG bit. So it needs to be
|
|
|
|
* set to be larger than the maximum time the interrupt might be
|
|
|
|
* masked. Choosing a fraction of a tick is probably a good enough
|
|
|
|
* default, with an absolute minimum of 1k cyc.
|
|
|
|
*/
|
|
|
|
#define MIN_DELAY MAX(1024, (CYC_PER_TICK/16))
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
#define TICKLESS (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && \
|
|
|
|
!IS_ENABLED(CONFIG_QEMU_TICKLESS_WORKAROUND))
|
2015-04-11 01:44:37 +02:00
|
|
|
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
/* VAL value above which we assume that a subsequent COUNTFLAG
|
|
|
|
* overflow seen in CTRL is real and not an artifact of wraparound
|
|
|
|
* timing.
|
|
|
|
*/
|
|
|
|
#define VAL_ABOUT_TO_WRAP 8
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
static struct k_spinlock lock;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
static u32_t last_load;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
static u32_t cycle_count;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
static u32_t announced_cycles;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
static volatile u32_t overflow_cyc;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
static u32_t elapsed(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
u32_t val, ctrl1, ctrl2;
|
|
|
|
|
|
|
|
/* SysTick is infuriatingly racy. The counter wraps at zero
|
|
|
|
* automatically, setting a 1 in the COUNTFLAG bit of the CTRL
|
|
|
|
* register when it does. But reading the control register
|
|
|
|
* automatically resets that bit, so we need to save it for
|
|
|
|
* future calls. And ordering is critical and race-prone: if
|
|
|
|
* we read CTRL first, then it is possible for VAL to wrap
|
|
|
|
* after that read but before we read VAL and we'll miss the
|
|
|
|
* overflow. If we read VAL first, then it can wrap after we
|
|
|
|
* read it and we'll see an "extra" overflow in CTRL. And we
|
|
|
|
* want to handle multiple overflows, so we effectively must
|
|
|
|
* read CTRL first otherwise there will be no way to detect
|
|
|
|
* the double-overflow if called at the end of a cycle. There
|
|
|
|
* is no safe algorithm here, so we split the difference by
|
|
|
|
* reading CTRL twice, suppressing the second overflow bit if
|
|
|
|
* VAL was "about to overflow".
|
|
|
|
*/
|
|
|
|
ctrl1 = SysTick->CTRL;
|
|
|
|
val = SysTick->VAL & COUNTER_MAX;
|
|
|
|
ctrl2 = SysTick->CTRL;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
overflow_cyc += (ctrl1 & SysTick_CTRL_COUNTFLAG_Msk) ? last_load : 0;
|
|
|
|
if (val > VAL_ABOUT_TO_WRAP) {
|
|
|
|
int wrap = ctrl2 & SysTick_CTRL_COUNTFLAG_Msk;
|
|
|
|
|
|
|
|
overflow_cyc += (wrap != 0) ? last_load : 0;
|
|
|
|
}
|
2017-01-11 19:50:23 +01:00
|
|
|
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
return (last_load - val) + overflow_cyc;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
/* Callout out of platform assembly, not hooked via IRQ_CONNECT... */
|
2019-02-03 09:36:40 +01:00
|
|
|
void z_clock_isr(void *arg)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
ARG_UNUSED(arg);
|
|
|
|
u32_t dticks;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
cycle_count += last_load;
|
|
|
|
dticks = (cycle_count - announced_cycles) / CYC_PER_TICK;
|
|
|
|
announced_cycles += dticks * CYC_PER_TICK;
|
2017-04-06 11:45:10 +02:00
|
|
|
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
overflow_cyc = SysTick->CTRL; /* Reset overflow flag */
|
|
|
|
overflow_cyc = 0U;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
z_clock_announce(TICKLESS ? dticks : 1);
|
2019-03-08 22:19:05 +01:00
|
|
|
z_ExcExit();
|
2018-09-30 17:48:11 +02:00
|
|
|
}
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
int z_clock_driver_init(struct device *device)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
|
2019-04-09 22:54:42 +02:00
|
|
|
last_load = CYC_PER_TICK - 1;
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
overflow_cyc = 0U;
|
2018-09-30 17:48:11 +02:00
|
|
|
SysTick->LOAD = last_load;
|
|
|
|
SysTick->VAL = 0; /* resets timer to last_load */
|
|
|
|
SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
|
|
|
|
SysTick_CTRL_TICKINT_Msk |
|
|
|
|
SysTick_CTRL_CLKSOURCE_Msk);
|
|
|
|
return 0;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
void z_clock_set_timeout(s32_t ticks, bool idle)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
/* Fast CPUs and a 24 bit counter mean that even idle systems
|
|
|
|
* need to wake up multiple times per second. If the kernel
|
|
|
|
* allows us to miss tick announcements in idle, then shut off
|
|
|
|
* the counter. (Note: we can assume if idle==true that
|
|
|
|
* interrupts are already disabled)
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
2018-09-30 17:48:11 +02:00
|
|
|
if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) {
|
|
|
|
SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
|
|
|
|
last_load = TIMER_STOPPED;
|
2017-04-06 11:45:10 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
|
|
|
|
u32_t delay;
|
2017-04-06 11:45:10 +02:00
|
|
|
|
2019-02-11 18:14:19 +01:00
|
|
|
ticks = MIN(MAX_TICKS, MAX(ticks - 1, 0));
|
2017-04-06 11:45:10 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
/* Desired delay in the future */
|
|
|
|
delay = (ticks == 0) ? MIN_DELAY : ticks * CYC_PER_TICK;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
cycle_count += elapsed();
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
/* Round delay up to next tick boundary */
|
|
|
|
delay = delay + (cycle_count - announced_cycles);
|
|
|
|
delay = ((delay + CYC_PER_TICK - 1) / CYC_PER_TICK) * CYC_PER_TICK;
|
|
|
|
last_load = delay - (cycle_count - announced_cycles);
|
2015-04-11 01:44:37 +02:00
|
|
|
|
drivers/timer/systick: Improve clock slippage under irq_lock load
The SysTick logic looked logically sound, but it was allowing us to
set a LOAD value as low as 512 cycles. On other platforms, that
minimum future interrupt delay is there to protect the "read, compute,
write, unmask" cycle that sets the new interrupt from trying to set
one sooner than it can handle.
But with SysTick, that value then becomes the value of the LOAD
register, which is effectively the frequency with which timer
interrupts arrive. This has two side effects:
1. It opens up the possibility that future code that masks interrupts
longer than 512 cycles will miss more than one overflow, slipping
the clock backward as viewed by z_clock_announce().
2. The original code only understood one overflow cycle, so in the
event we do set one of these very near timeouts and then mask
interrupts, we'll only add at most one overflow to the "elapsed()"
time, slipping the CURRENT time backward (actually turning it into
a non-monotonic sawtooth which would slip every LOAD cycle) and
thus messing up future scheduled interrupts, slipping those forward
relative to what the ISR was counting.
This patch simplifies the logic for reading SysTick VAL/CTRL (the loop
wasn't needed), handles the case where we see more than one overflow,
and increases the MIN_DELAY cycles from 512 to 1/16th of a tick.
Fixes #15216
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-04-05 18:59:19 +02:00
|
|
|
overflow_cyc = 0U;
|
2018-09-30 17:48:11 +02:00
|
|
|
SysTick->LOAD = last_load;
|
|
|
|
SysTick->VAL = 0; /* resets timer to last_load */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
2017-05-03 09:41:51 +02:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
u32_t z_clock_elapsed(void)
|
2017-04-06 11:45:10 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
if (!TICKLESS) {
|
2017-04-06 11:45:10 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
u32_t cyc = elapsed() + cycle_count - announced_cycles;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return cyc / CYC_PER_TICK;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2019-03-08 22:19:05 +01:00
|
|
|
u32_t z_timer_cycle_get_32(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
u32_t ret = elapsed() + cycle_count;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-09-30 17:48:11 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return ret;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2018-09-21 18:33:36 +02:00
|
|
|
void z_clock_idle_exit(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-09-30 17:48:11 +02:00
|
|
|
if (last_load == TIMER_STOPPED) {
|
|
|
|
SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;
|
2017-04-06 11:45:10 +02:00
|
|
|
}
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2015-09-28 20:23:35 +02:00
|
|
|
void sys_clock_disable(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2018-12-12 11:53:09 +01:00
|
|
|
SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|