2019-10-27 17:12:58 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <drivers/timer/arm_arch_timer.h>
|
|
|
|
#include <drivers/timer/system_timer.h>
|
|
|
|
#include <sys_clock.h>
|
|
|
|
#include <spinlock.h>
|
|
|
|
#include <arch/cpu.h>
|
|
|
|
|
2020-07-03 13:32:20 +02:00
|
|
|
#define CYC_PER_TICK ((uint64_t)sys_clock_hw_cycles_per_sec() \
|
|
|
|
/ (uint64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
|
|
|
#define MAX_TICKS INT32_MAX
|
2019-10-27 17:12:58 +01:00
|
|
|
#define MIN_DELAY (1000)
|
|
|
|
|
|
|
|
static struct k_spinlock lock;
|
2021-03-31 10:19:54 +02:00
|
|
|
static uint64_t last_cycle;
|
2019-10-27 17:12:58 +01:00
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void arm_arch_timer_compare_isr(const void *arg)
|
2019-10-27 17:12:58 +01:00
|
|
|
{
|
|
|
|
ARG_UNUSED(arg);
|
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
2021-07-15 11:41:34 +02:00
|
|
|
#ifdef CONFIG_ARM_ARCH_TIMER_ERRATUM_740657
|
|
|
|
/*
|
|
|
|
* Workaround required for Cortex-A9 MPCore erratum 740657
|
|
|
|
* comp. ARM Cortex-A9 processors Software Developers Errata Notice,
|
|
|
|
* ARM document ID032315.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!arm_arch_timer_get_int_status()) {
|
|
|
|
/*
|
|
|
|
* If the event flag is not set, this is a spurious interrupt.
|
|
|
|
* DO NOT modify the compare register's value, DO NOT announce
|
|
|
|
* elapsed ticks!
|
|
|
|
*/
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 */
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint64_t curr_cycle = arm_arch_timer_count();
|
|
|
|
uint32_t delta_ticks = (uint32_t)((curr_cycle - last_cycle) / CYC_PER_TICK);
|
2019-10-27 17:12:58 +01:00
|
|
|
|
|
|
|
last_cycle += delta_ticks * CYC_PER_TICK;
|
|
|
|
|
2020-05-12 04:32:40 +02:00
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
2020-05-27 18:26:57 +02:00
|
|
|
uint64_t next_cycle = last_cycle + CYC_PER_TICK;
|
2019-10-27 17:12:58 +01:00
|
|
|
|
2020-07-03 13:32:20 +02:00
|
|
|
if ((uint64_t)(next_cycle - curr_cycle) < MIN_DELAY) {
|
2019-10-27 17:12:58 +01:00
|
|
|
next_cycle += CYC_PER_TICK;
|
|
|
|
}
|
|
|
|
arm_arch_timer_set_compare(next_cycle);
|
2020-11-19 21:48:46 +01:00
|
|
|
arm_arch_timer_set_irq_mask(false);
|
|
|
|
} else {
|
|
|
|
arm_arch_timer_set_irq_mask(true);
|
2021-07-15 11:41:34 +02:00
|
|
|
#ifdef CONFIG_ARM_ARCH_TIMER_ERRATUM_740657
|
|
|
|
/*
|
|
|
|
* In tickless mode, the compare register is normally not
|
|
|
|
* updated from within the ISR. Yet, to work around the timer's
|
|
|
|
* erratum, a new value *must* be written while the interrupt
|
|
|
|
* is being processed before the interrupt is acknowledged
|
|
|
|
* by the handling interrupt controller.
|
|
|
|
*/
|
|
|
|
arm_arch_timer_set_compare(~0ULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the event flag so that in case the erratum strikes (the timer's
|
|
|
|
* vector will still be indicated as pending by the GIC's pending register
|
|
|
|
* after this ISR has been executed) the error will be detected by the
|
|
|
|
* check performed upon entry of the ISR -> the event flag is not set,
|
|
|
|
* therefore, no actual hardware interrupt has occurred.
|
|
|
|
*/
|
|
|
|
arm_arch_timer_clear_int_status();
|
|
|
|
#else
|
2019-10-27 17:12:58 +01:00
|
|
|
}
|
2021-07-15 11:41:34 +02:00
|
|
|
#endif /* CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 */
|
2019-10-27 17:12:58 +01:00
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
|
2021-03-31 10:19:54 +02:00
|
|
|
sys_clock_announce(delta_ticks);
|
2019-10-27 17:12:58 +01:00
|
|
|
}
|
|
|
|
|
2021-03-22 15:28:25 +01:00
|
|
|
int sys_clock_driver_init(const struct device *dev)
|
2019-10-27 17:12:58 +01:00
|
|
|
{
|
2021-03-22 15:28:25 +01:00
|
|
|
ARG_UNUSED(dev);
|
2019-10-27 17:12:58 +01:00
|
|
|
|
2020-05-12 12:00:03 +02:00
|
|
|
IRQ_CONNECT(ARM_ARCH_TIMER_IRQ, ARM_ARCH_TIMER_PRIO,
|
|
|
|
arm_arch_timer_compare_isr, NULL, ARM_ARCH_TIMER_FLAGS);
|
2021-01-21 15:57:25 +01:00
|
|
|
arm_arch_timer_init();
|
2019-10-27 17:12:58 +01:00
|
|
|
arm_arch_timer_set_compare(arm_arch_timer_count() + CYC_PER_TICK);
|
|
|
|
arm_arch_timer_enable(true);
|
|
|
|
irq_enable(ARM_ARCH_TIMER_IRQ);
|
2020-11-19 21:48:46 +01:00
|
|
|
arm_arch_timer_set_irq_mask(false);
|
2019-10-27 17:12:58 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
void sys_clock_set_timeout(int32_t ticks, bool idle)
|
2019-10-27 17:12:58 +01:00
|
|
|
{
|
2020-05-12 04:32:40 +02:00
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL)
|
2019-10-27 17:12:58 +01:00
|
|
|
|
2020-12-21 21:27:30 +01:00
|
|
|
if (ticks == K_TICKS_FOREVER && idle) {
|
2019-10-27 17:12:58 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-07-03 13:32:20 +02:00
|
|
|
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : \
|
|
|
|
MIN(MAX_TICKS, MAX(ticks - 1, 0));
|
2019-10-27 17:12:58 +01:00
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2020-05-27 18:26:57 +02:00
|
|
|
uint64_t curr_cycle = arm_arch_timer_count();
|
2020-07-03 13:32:20 +02:00
|
|
|
uint64_t req_cycle = ticks * CYC_PER_TICK;
|
2019-10-27 17:12:58 +01:00
|
|
|
|
|
|
|
/* Round up to next tick boundary */
|
2020-07-03 13:32:20 +02:00
|
|
|
req_cycle += (curr_cycle - last_cycle) + (CYC_PER_TICK - 1);
|
|
|
|
|
2019-10-27 17:12:58 +01:00
|
|
|
req_cycle = (req_cycle / CYC_PER_TICK) * CYC_PER_TICK;
|
|
|
|
|
2020-07-03 13:32:20 +02:00
|
|
|
if ((req_cycle + last_cycle - curr_cycle) < MIN_DELAY) {
|
2019-10-27 17:12:58 +01:00
|
|
|
req_cycle += CYC_PER_TICK;
|
|
|
|
}
|
|
|
|
|
|
|
|
arm_arch_timer_set_compare(req_cycle + last_cycle);
|
2020-11-19 21:48:46 +01:00
|
|
|
arm_arch_timer_set_irq_mask(false);
|
2019-10-27 17:12:58 +01:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
|
2020-12-21 21:39:04 +01:00
|
|
|
#else /* CONFIG_TICKLESS_KERNEL */
|
2020-12-21 21:41:10 +01:00
|
|
|
ARG_UNUSED(ticks);
|
2020-12-21 21:39:04 +01:00
|
|
|
ARG_UNUSED(idle);
|
2019-10-27 17:12:58 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
uint32_t sys_clock_elapsed(void)
|
2019-10-27 17:12:58 +01:00
|
|
|
{
|
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2020-07-03 13:32:20 +02:00
|
|
|
uint32_t ret = (uint32_t)((arm_arch_timer_count() - last_cycle)
|
|
|
|
/ CYC_PER_TICK);
|
2019-10-27 17:12:58 +01:00
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-03-12 18:46:52 +01:00
|
|
|
uint32_t sys_clock_cycle_get_32(void)
|
2019-10-27 17:12:58 +01:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
return (uint32_t)arm_arch_timer_count();
|
2019-10-27 17:12:58 +01:00
|
|
|
}
|
2020-11-09 08:49:21 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
|
|
|
|
void arch_busy_wait(uint32_t usec_to_wait)
|
|
|
|
{
|
|
|
|
if (usec_to_wait == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t start_cycles = arm_arch_timer_count();
|
|
|
|
|
|
|
|
uint64_t cycles_to_wait = sys_clock_hw_cycles_per_sec() / USEC_PER_SEC * usec_to_wait;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
uint64_t current_cycles = arm_arch_timer_count();
|
|
|
|
|
|
|
|
/* this handles the rollover on an unsigned 32-bit value */
|
|
|
|
if ((current_cycles - start_cycles) >= cycles_to_wait) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2020-11-09 08:51:00 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
void smp_timer_init(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* set the initial status of timer0 of each secondary core
|
|
|
|
*/
|
|
|
|
arm_arch_timer_set_compare(arm_arch_timer_count() + CYC_PER_TICK);
|
|
|
|
arm_arch_timer_enable(true);
|
|
|
|
irq_enable(ARM_ARCH_TIMER_IRQ);
|
|
|
|
arm_arch_timer_set_irq_mask(false);
|
|
|
|
}
|
|
|
|
#endif
|