2015-04-11 01:44:37 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014-2015 Wind River Systems, Inc.
|
2018-11-22 10:55:27 +01:00
|
|
|
* Copyright (c) 2018 Synopsys Inc, Inc.
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
2023-08-28 13:15:43 +02:00
|
|
|
#include <zephyr/init.h>
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/drivers/timer/system_timer.h>
|
|
|
|
#include <zephyr/sys_clock.h>
|
|
|
|
#include <zephyr/spinlock.h>
|
|
|
|
#include <zephyr/arch/arc/v2/aux_regs.h>
|
2022-10-17 10:24:11 +02:00
|
|
|
#include <zephyr/irq.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
/*
|
2016-05-06 21:56:17 +02:00
|
|
|
* note: This implementation assumes Timer0 is present. Be sure
|
|
|
|
* to build the ARC CPU with Timer0.
|
2019-08-01 06:39:35 +02:00
|
|
|
*
|
|
|
|
* If secureshield is present and secure firmware is configured,
|
|
|
|
* use secure Timer 0
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2019-08-01 06:39:35 +02:00
|
|
|
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
|
|
|
|
|
|
|
#undef _ARC_V2_TMR0_COUNT
|
|
|
|
#undef _ARC_V2_TMR0_CONTROL
|
|
|
|
#undef _ARC_V2_TMR0_LIMIT
|
|
|
|
|
|
|
|
#define _ARC_V2_TMR0_COUNT _ARC_V2_S_TMR0_COUNT
|
|
|
|
#define _ARC_V2_TMR0_CONTROL _ARC_V2_S_TMR0_CONTROL
|
|
|
|
#define _ARC_V2_TMR0_LIMIT _ARC_V2_S_TMR0_LIMIT
|
2022-06-15 13:24:21 +02:00
|
|
|
#define IRQ_TIMER0 DT_IRQN(DT_NODELABEL(sectimer0))
|
2019-08-01 06:39:35 +02:00
|
|
|
|
2022-06-15 13:14:40 +02:00
|
|
|
#else
|
|
|
|
#define IRQ_TIMER0 DT_IRQN(DT_NODELABEL(timer0))
|
2019-08-01 06:39:35 +02:00
|
|
|
#endif
|
2018-09-20 22:56:45 +02:00
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
#define _ARC_V2_TMR_CTRL_IE 0x1 /* interrupt enable */
|
|
|
|
#define _ARC_V2_TMR_CTRL_NH 0x2 /* count only while not halted */
|
|
|
|
#define _ARC_V2_TMR_CTRL_W 0x4 /* watchdog mode enable */
|
|
|
|
#define _ARC_V2_TMR_CTRL_IP 0x8 /* interrupt pending flag */
|
|
|
|
|
2018-11-22 10:55:27 +01:00
|
|
|
/* Minimum cycles in the future to try to program. */
|
2020-04-17 11:42:50 +02:00
|
|
|
#define MIN_DELAY 1024
|
|
|
|
/* arc timer has 32 bit, here use 31 bit to avoid the possible
|
|
|
|
* overflow,e.g, 0xffffffff + any value will cause overflow
|
|
|
|
*/
|
|
|
|
#define COUNTER_MAX 0x7fffffff
|
2018-11-22 10:55:27 +01:00
|
|
|
#define TIMER_STOPPED 0x0
|
2019-04-23 15:08:00 +02:00
|
|
|
#define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
|
2018-11-22 10:55:27 +01:00
|
|
|
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
2017-04-09 01:17:14 +02:00
|
|
|
|
2018-11-22 10:55:27 +01:00
|
|
|
#define MAX_TICKS ((COUNTER_MAX / CYC_PER_TICK) - 1)
|
|
|
|
#define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-11-30 10:24:53 +01:00
|
|
|
#define TICKLESS (IS_ENABLED(CONFIG_TICKLESS_KERNEL))
|
2018-11-22 10:55:27 +01:00
|
|
|
|
2022-10-19 23:47:27 +02:00
|
|
|
#define SMP_TIMER_DRIVER (CONFIG_SMP && CONFIG_MP_MAX_NUM_CPUS > 1)
|
2019-12-25 18:55:07 +01:00
|
|
|
|
2022-06-28 23:58:40 +02:00
|
|
|
#if defined(CONFIG_TEST)
|
|
|
|
const int32_t z_sys_timer_irq_for_test = IRQ_TIMER0;
|
|
|
|
#endif
|
2018-11-22 10:55:27 +01:00
|
|
|
static struct k_spinlock lock;
|
|
|
|
|
2019-07-10 10:51:27 +02:00
|
|
|
|
2019-12-25 18:55:07 +01:00
|
|
|
#if SMP_TIMER_DRIVER
|
2020-05-27 18:26:57 +02:00
|
|
|
volatile static uint64_t last_time;
|
|
|
|
volatile static uint64_t start_time;
|
2019-07-10 10:51:27 +02:00
|
|
|
|
|
|
|
#else
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint32_t last_load;
|
2018-11-22 10:55:27 +01:00
|
|
|
|
2020-04-17 11:42:50 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This local variable holds the amount of timer cycles elapsed
|
2021-03-12 19:51:00 +01:00
|
|
|
* and it is updated in timer_int_handler and sys_clock_set_timeout().
|
2020-04-17 11:42:50 +02:00
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* At an arbitrary point in time the "current" value of the
|
|
|
|
* HW timer is calculated as:
|
|
|
|
*
|
|
|
|
* t = cycle_counter + elapsed();
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint32_t cycle_count;
|
2020-04-17 11:42:50 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This local variable holds the amount of elapsed HW cycles
|
|
|
|
* that have been announced to the kernel.
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint32_t announced_cycles;
|
2020-04-17 11:42:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This local variable holds the amount of elapsed HW cycles due to
|
|
|
|
* timer wraps ('overflows') and is used in the calculation
|
|
|
|
* in elapsed() function, as well as in the updates to cycle_count.
|
|
|
|
*
|
|
|
|
* Note:
|
2020-05-21 04:09:00 +02:00
|
|
|
* Each time cycle_count is updated with the value from overflow_cycles,
|
|
|
|
* the overflow_cycles must be reset to zero.
|
2020-04-17 11:42:50 +02:00
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static volatile uint32_t overflow_cycles;
|
2019-07-10 10:51:27 +02:00
|
|
|
#endif
|
2016-11-03 14:39:23 +01:00
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
2015-10-05 16:07:20 +02:00
|
|
|
* @brief Get contents of Timer0 count register
|
|
|
|
*
|
|
|
|
* @return Current Timer0 count
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static ALWAYS_INLINE uint32_t timer0_count_register_get(void)
|
2015-10-05 16:07:20 +02:00
|
|
|
{
|
2019-03-08 22:19:05 +01:00
|
|
|
return z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
|
2015-10-05 16:07:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set Timer0 count register to the specified value
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static ALWAYS_INLINE void timer0_count_register_set(uint32_t value)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2019-03-08 22:19:05 +01:00
|
|
|
z_arc_v2_aux_reg_write(_ARC_V2_TMR0_COUNT, value);
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
2015-10-05 16:07:20 +02:00
|
|
|
* @brief Get contents of Timer0 control register
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
2022-01-07 01:38:03 +01:00
|
|
|
* @return Contents of Timer0 control register.
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static ALWAYS_INLINE uint32_t timer0_control_register_get(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2019-03-08 22:19:05 +01:00
|
|
|
return z_arc_v2_aux_reg_read(_ARC_V2_TMR0_CONTROL);
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
2015-10-05 16:07:20 +02:00
|
|
|
* @brief Set Timer0 control register to the specified value
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static ALWAYS_INLINE void timer0_control_register_set(uint32_t value)
|
2015-10-05 16:07:20 +02:00
|
|
|
{
|
2019-03-08 22:19:05 +01:00
|
|
|
z_arc_v2_aux_reg_write(_ARC_V2_TMR0_CONTROL, value);
|
2015-10-05 16:07:20 +02:00
|
|
|
}
|
|
|
|
|
2016-11-03 14:39:23 +01:00
|
|
|
/**
|
|
|
|
* @brief Get contents of Timer0 limit register
|
|
|
|
*
|
2022-01-07 01:38:03 +01:00
|
|
|
* @return Contents of Timer0 limit register.
|
2016-11-03 14:39:23 +01:00
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static ALWAYS_INLINE uint32_t timer0_limit_register_get(void)
|
2016-11-03 14:39:23 +01:00
|
|
|
{
|
2019-03-08 22:19:05 +01:00
|
|
|
return z_arc_v2_aux_reg_read(_ARC_V2_TMR0_LIMIT);
|
2016-11-03 14:39:23 +01:00
|
|
|
}
|
|
|
|
|
2015-10-05 16:07:20 +02:00
|
|
|
/**
|
|
|
|
* @brief Set Timer0 limit register to the specified value
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static ALWAYS_INLINE void timer0_limit_register_set(uint32_t count)
|
2015-10-05 16:07:20 +02:00
|
|
|
{
|
2019-03-08 22:19:05 +01:00
|
|
|
z_arc_v2_aux_reg_write(_ARC_V2_TMR0_LIMIT, count);
|
2015-10-05 16:07:20 +02:00
|
|
|
}
|
|
|
|
|
2019-12-25 18:55:07 +01:00
|
|
|
#if !SMP_TIMER_DRIVER
|
2020-04-17 11:42:50 +02:00
|
|
|
/* This internal function calculates the amount of HW cycles that have
|
|
|
|
* elapsed since the last time the absolute HW cycles counter has been
|
|
|
|
* updated. 'cycle_count' may be updated either by the ISR, or
|
2021-02-25 21:33:15 +01:00
|
|
|
* in sys_clock_set_timeout().
|
2020-04-17 11:42:50 +02:00
|
|
|
*
|
2020-05-21 04:09:00 +02:00
|
|
|
* Additionally, the function updates the 'overflow_cycles' counter, that
|
2020-04-17 11:42:50 +02:00
|
|
|
* holds the amount of elapsed HW cycles due to (possibly) multiple
|
|
|
|
* timer wraps (overflows).
|
|
|
|
*
|
|
|
|
* Prerequisites:
|
|
|
|
* - reprogramming of LIMIT must be clearing the COUNT
|
2020-05-21 04:09:00 +02:00
|
|
|
* - ISR must be clearing the 'overflow_cycles' counter.
|
2020-04-17 11:42:50 +02:00
|
|
|
* - no more than one counter-wrap has occurred between
|
|
|
|
* - the timer reset or the last time the function was called
|
|
|
|
* - and until the current call of the function is completed.
|
|
|
|
* - the function is invoked with interrupts disabled.
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint32_t elapsed(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t val, ctrl;
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-11-22 10:55:27 +01:00
|
|
|
do {
|
|
|
|
val = timer0_count_register_get();
|
2018-12-06 07:58:12 +01:00
|
|
|
ctrl = timer0_control_register_get();
|
2018-11-22 10:55:27 +01:00
|
|
|
} while (timer0_count_register_get() < val);
|
|
|
|
|
2020-04-17 11:42:50 +02:00
|
|
|
if (ctrl & _ARC_V2_TMR_CTRL_IP) {
|
2020-05-21 04:09:00 +02:00
|
|
|
overflow_cycles += last_load;
|
2020-04-17 11:42:50 +02:00
|
|
|
/* clear the IP bit of the control register */
|
|
|
|
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH |
|
|
|
|
_ARC_V2_TMR_CTRL_IE);
|
2020-05-21 04:14:45 +02:00
|
|
|
/* use sw triggered irq to remember the timer irq request
|
|
|
|
* which may be cleared by the above operation. when elapsed ()
|
2021-03-12 19:51:00 +01:00
|
|
|
* is called in timer_int_handler, no need to do this.
|
2020-05-21 04:14:45 +02:00
|
|
|
*/
|
|
|
|
if (!z_arc_v2_irq_unit_is_in_isr() ||
|
|
|
|
z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE) != IRQ_TIMER0) {
|
|
|
|
z_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_HINT,
|
|
|
|
IRQ_TIMER0);
|
|
|
|
}
|
2020-04-17 11:42:50 +02:00
|
|
|
}
|
|
|
|
|
2020-05-21 04:09:00 +02:00
|
|
|
return val + overflow_cycles;
|
2017-04-09 01:17:14 +02:00
|
|
|
}
|
2019-07-10 10:51:27 +02:00
|
|
|
#endif
|
2017-04-09 01:17:14 +02:00
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
2015-07-01 23:51:40 +02:00
|
|
|
* @brief System clock periodic tick handler
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
2018-11-22 10:55:27 +01:00
|
|
|
* This routine handles the system clock tick interrupt. It always
|
|
|
|
* announces one tick when TICKLESS is not enabled, or multiple ticks
|
|
|
|
* when TICKLESS is enabled.
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void timer_int_handler(const void *unused)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(unused);
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t dticks;
|
2018-11-22 10:55:27 +01:00
|
|
|
|
2022-10-19 23:47:27 +02:00
|
|
|
#if defined(CONFIG_SMP) && CONFIG_MP_MAX_NUM_CPUS > 1
|
2020-05-27 18:26:57 +02:00
|
|
|
uint64_t curr_time;
|
2019-07-10 10:51:27 +02:00
|
|
|
k_spinlock_key_t key;
|
|
|
|
|
2020-05-21 04:30:18 +02:00
|
|
|
/* clear the IP bit of the control register */
|
|
|
|
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH |
|
|
|
|
_ARC_V2_TMR_CTRL_IE);
|
2019-07-10 10:51:27 +02:00
|
|
|
key = k_spin_lock(&lock);
|
|
|
|
/* gfrc is the wall clock */
|
|
|
|
curr_time = z_arc_connect_gfrc_read();
|
|
|
|
|
|
|
|
dticks = (curr_time - last_time) / CYC_PER_TICK;
|
2020-05-21 04:30:18 +02:00
|
|
|
/* last_time should be aligned to ticks */
|
|
|
|
last_time += dticks * CYC_PER_TICK;
|
2019-07-10 10:51:27 +02:00
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
sys_clock_announce(dticks);
|
2019-07-10 10:51:27 +02:00
|
|
|
#else
|
2020-05-21 04:14:45 +02:00
|
|
|
/* timer_int_handler may be triggered by timer irq or
|
|
|
|
* software helper irq
|
|
|
|
*/
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
/* irq with higher priority may call sys_clock_set_timeout
|
2020-05-21 04:14:45 +02:00
|
|
|
* so need a lock here
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t key;
|
2020-05-21 04:14:45 +02:00
|
|
|
|
|
|
|
key = arch_irq_lock();
|
|
|
|
|
2020-04-17 11:42:50 +02:00
|
|
|
elapsed();
|
2020-05-21 04:09:00 +02:00
|
|
|
cycle_count += overflow_cycles;
|
|
|
|
overflow_cycles = 0;
|
2020-04-17 11:42:50 +02:00
|
|
|
|
2020-05-21 04:14:45 +02:00
|
|
|
arch_irq_unlock(key);
|
2020-04-17 11:42:50 +02:00
|
|
|
|
|
|
|
dticks = (cycle_count - announced_cycles) / CYC_PER_TICK;
|
|
|
|
announced_cycles += dticks * CYC_PER_TICK;
|
2021-02-25 21:33:15 +01:00
|
|
|
sys_clock_announce(TICKLESS ? dticks : 1);
|
2019-07-10 10:51:27 +02:00
|
|
|
#endif
|
2017-04-09 01:17:14 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
void sys_clock_set_timeout(int32_t ticks, bool idle)
|
2016-11-03 14:39:23 +01:00
|
|
|
{
|
2018-11-22 10:55:27 +01:00
|
|
|
/* If the kernel allows us to miss tick announcements in idle,
|
2018-11-22 15:26:02 +01:00
|
|
|
* then shut off the counter. (Note: we can assume if idle==true
|
|
|
|
* that interrupts are already disabled)
|
2018-11-22 10:55:27 +01:00
|
|
|
*/
|
2019-12-25 18:55:07 +01:00
|
|
|
#if SMP_TIMER_DRIVER
|
2019-10-25 01:41:34 +02:00
|
|
|
/* as 64-bits GFRC is used as wall clock, it's ok to ignore idle
|
|
|
|
* systick will not be missed.
|
|
|
|
* However for single core using 32-bits arc timer, idle cannot
|
|
|
|
* be ignored, as 32-bits timer will overflow in a not-long time.
|
|
|
|
*/
|
2021-03-12 19:13:22 +01:00
|
|
|
if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && ticks == K_TICKS_FOREVER) {
|
2019-07-10 10:51:27 +02:00
|
|
|
timer0_control_register_set(0);
|
|
|
|
timer0_count_register_set(0);
|
|
|
|
timer0_limit_register_set(0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL)
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t delay;
|
|
|
|
uint32_t key;
|
2019-07-10 10:51:27 +02:00
|
|
|
|
2019-10-25 01:41:34 +02:00
|
|
|
ticks = MIN(MAX_TICKS, ticks);
|
2019-07-10 10:51:27 +02:00
|
|
|
|
2020-05-21 04:30:18 +02:00
|
|
|
/* Desired delay in the future
|
|
|
|
* use MIN_DEALY here can trigger the timer
|
|
|
|
* irq more soon, no need to go to CYC_PER_TICK
|
|
|
|
* later.
|
|
|
|
*/
|
|
|
|
delay = MAX(ticks * CYC_PER_TICK, MIN_DELAY);
|
2019-07-10 10:51:27 +02:00
|
|
|
|
2019-11-07 21:43:29 +01:00
|
|
|
key = arch_irq_lock();
|
2019-07-10 10:51:27 +02:00
|
|
|
|
|
|
|
timer0_limit_register_set(delay - 1);
|
|
|
|
timer0_count_register_set(0);
|
|
|
|
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH |
|
|
|
|
_ARC_V2_TMR_CTRL_IE);
|
|
|
|
|
2019-11-07 21:43:29 +01:00
|
|
|
arch_irq_unlock(key);
|
2019-07-10 10:51:27 +02:00
|
|
|
#endif
|
|
|
|
#else
|
2021-03-12 19:13:22 +01:00
|
|
|
if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && idle && ticks == K_TICKS_FOREVER) {
|
2018-11-22 10:55:27 +01:00
|
|
|
timer0_control_register_set(0);
|
|
|
|
timer0_count_register_set(0);
|
|
|
|
timer0_limit_register_set(0);
|
|
|
|
last_load = TIMER_STOPPED;
|
|
|
|
return;
|
|
|
|
}
|
2016-11-03 14:39:23 +01:00
|
|
|
|
2018-11-30 10:24:53 +01:00
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL)
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t delay;
|
|
|
|
uint32_t unannounced;
|
2016-11-03 14:39:23 +01:00
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
ticks = MIN(MAX_TICKS, (uint32_t)(MAX((int32_t)(ticks - 1), 0)));
|
2016-11-03 14:39:23 +01:00
|
|
|
|
2018-11-22 10:55:27 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2016-11-03 14:39:23 +01:00
|
|
|
|
|
|
|
|
2020-04-17 11:42:50 +02:00
|
|
|
cycle_count += elapsed();
|
|
|
|
/* clear counter early to avoid cycle loss as few as possible,
|
|
|
|
* between cycle_count and clearing 0, few cycles are possible
|
|
|
|
* to loss
|
|
|
|
*/
|
|
|
|
timer0_count_register_set(0);
|
2020-05-21 04:09:00 +02:00
|
|
|
overflow_cycles = 0U;
|
2020-04-17 11:42:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
/* normal case */
|
|
|
|
unannounced = cycle_count - announced_cycles;
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
if ((int32_t)unannounced < 0) {
|
2020-04-17 11:42:50 +02:00
|
|
|
/* We haven't announced for more than half the 32-bit
|
|
|
|
* wrap duration, because new timeouts keep being set
|
|
|
|
* before the existing one fires. Force an announce
|
|
|
|
* to avoid loss of a wrap event, making sure the
|
|
|
|
* delay is at least the minimum delay possible.
|
|
|
|
*/
|
|
|
|
last_load = MIN_DELAY;
|
|
|
|
} else {
|
|
|
|
/* Desired delay in the future */
|
|
|
|
delay = ticks * CYC_PER_TICK;
|
|
|
|
|
|
|
|
/* Round delay up to next tick boundary */
|
|
|
|
delay += unannounced;
|
2023-04-11 15:34:39 +02:00
|
|
|
delay = DIV_ROUND_UP(delay, CYC_PER_TICK) * CYC_PER_TICK;
|
2020-04-17 11:42:50 +02:00
|
|
|
|
|
|
|
delay -= unannounced;
|
|
|
|
delay = MAX(delay, MIN_DELAY);
|
|
|
|
|
2020-05-21 04:09:00 +02:00
|
|
|
last_load = MIN(delay, MAX_CYCLES);
|
2019-01-16 19:10:28 +01:00
|
|
|
}
|
2016-11-03 14:39:23 +01:00
|
|
|
|
2020-04-17 11:42:50 +02:00
|
|
|
timer0_limit_register_set(last_load - 1);
|
|
|
|
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH | _ARC_V2_TMR_CTRL_IE);
|
|
|
|
|
2018-11-22 10:55:27 +01:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
#endif
|
2019-07-10 10:51:27 +02:00
|
|
|
#endif
|
2016-11-03 14:39:23 +01:00
|
|
|
}
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
uint32_t sys_clock_elapsed(void)
|
2016-11-03 14:39:23 +01:00
|
|
|
{
|
2018-11-22 10:55:27 +01:00
|
|
|
if (!TICKLESS) {
|
2016-11-03 14:39:23 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t cyc;
|
2018-11-22 10:55:27 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2019-07-10 10:51:27 +02:00
|
|
|
|
2019-12-25 18:55:07 +01:00
|
|
|
#if SMP_TIMER_DRIVER
|
2020-04-17 11:42:50 +02:00
|
|
|
cyc = (z_arc_connect_gfrc_read() - last_time);
|
2019-07-10 10:51:27 +02:00
|
|
|
#else
|
2020-04-17 11:42:50 +02:00
|
|
|
cyc = elapsed() + cycle_count - announced_cycles;
|
2019-07-10 10:51:27 +02:00
|
|
|
#endif
|
2018-11-22 10:55:27 +01:00
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
2019-07-10 10:51:27 +02:00
|
|
|
|
2020-04-17 11:42:50 +02:00
|
|
|
return cyc / CYC_PER_TICK;
|
2016-11-03 14:39:23 +01:00
|
|
|
}
|
|
|
|
|
2021-03-12 18:46:52 +01:00
|
|
|
uint32_t sys_clock_cycle_get_32(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2019-12-25 18:55:07 +01:00
|
|
|
#if SMP_TIMER_DRIVER
|
2019-07-10 10:51:27 +02:00
|
|
|
return z_arc_connect_gfrc_read() - start_time;
|
|
|
|
#else
|
2018-11-22 10:55:27 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t ret = elapsed() + cycle_count;
|
2017-02-16 01:00:43 +01:00
|
|
|
|
2018-11-22 10:55:27 +01:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
return ret;
|
2019-07-10 10:51:27 +02:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2019-12-25 18:55:07 +01:00
|
|
|
#if SMP_TIMER_DRIVER
|
2019-07-10 10:51:27 +02:00
|
|
|
void smp_timer_init(void)
|
|
|
|
{
|
|
|
|
/* set the initial status of timer0 of each slave core
|
|
|
|
*/
|
|
|
|
timer0_control_register_set(0);
|
|
|
|
timer0_count_register_set(0);
|
|
|
|
timer0_limit_register_set(0);
|
|
|
|
|
|
|
|
z_irq_priority_set(IRQ_TIMER0, CONFIG_ARCV2_TIMER_IRQ_PRIORITY, 0);
|
|
|
|
irq_enable(IRQ_TIMER0);
|
|
|
|
}
|
|
|
|
#endif
|
2021-11-04 12:51:39 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @brief Initialize and enable the system clock
|
|
|
|
*
|
|
|
|
* This routine is used to program the ARCv2 timer to deliver interrupts at the
|
|
|
|
* rate specified via the CYC_PER_TICK.
|
|
|
|
*
|
|
|
|
* @return 0
|
|
|
|
*/
|
init: remove the need for a dummy device pointer in SYS_INIT functions
The init infrastructure, found in `init.h`, is currently used by:
- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices
They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:
```c
struct init_entry {
int (*init)(const struct device *dev);
/* only set by DEVICE_*, otherwise NULL */
const struct device *dev;
}
```
As a result, we end up with such weird/ugly pattern:
```c
static int my_init(const struct device *dev)
{
/* always NULL! add ARG_UNUSED to avoid compiler warning */
ARG_UNUSED(dev);
...
}
```
This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:
```c
static int my_init(void)
{
...
}
```
This is achieved using a union:
```c
union init_function {
/* for SYS_INIT, used when init_entry.dev == NULL */
int (*sys)(void);
/* for DEVICE*, used when init_entry.dev != NULL */
int (*dev)(const struct device *dev);
};
struct init_entry {
/* stores init function (either for SYS_INIT or DEVICE*)
union init_function init_fn;
/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
* to know which union entry to call.
*/
const struct device *dev;
}
```
This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.
**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
init: convert SYS_INIT functions to the new signature
Conversion scripted using scripts/utils/migrate_sys_init.py.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
manifest: update projects for SYS_INIT changes
Update modules with updated SYS_INIT calls:
- hal_ti
- lvgl
- sof
- TraceRecorderSource
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: devicetree: devices: adjust test
Adjust test according to the recently introduced SYS_INIT
infrastructure.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: kernel: threads: adjust SYS_INIT call
Adjust to the new signature: int (*init_fn)(void);
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-10-19 09:33:44 +02:00
|
|
|
static int sys_clock_driver_init(void)
|
2021-11-04 12:51:39 +01:00
|
|
|
{
|
|
|
|
|
|
|
|
/* ensure that the timer will not generate interrupts */
|
|
|
|
timer0_control_register_set(0);
|
|
|
|
|
|
|
|
#if SMP_TIMER_DRIVER
|
|
|
|
IRQ_CONNECT(IRQ_TIMER0, CONFIG_ARCV2_TIMER_IRQ_PRIORITY,
|
|
|
|
timer_int_handler, NULL, 0);
|
|
|
|
|
|
|
|
timer0_limit_register_set(CYC_PER_TICK - 1);
|
|
|
|
last_time = z_arc_connect_gfrc_read();
|
|
|
|
start_time = last_time;
|
|
|
|
#else
|
|
|
|
last_load = CYC_PER_TICK;
|
|
|
|
overflow_cycles = 0;
|
|
|
|
announced_cycles = 0;
|
|
|
|
|
|
|
|
IRQ_CONNECT(IRQ_TIMER0, CONFIG_ARCV2_TIMER_IRQ_PRIORITY,
|
|
|
|
timer_int_handler, NULL, 0);
|
|
|
|
|
|
|
|
timer0_limit_register_set(last_load - 1);
|
|
|
|
#endif
|
|
|
|
timer0_count_register_set(0);
|
|
|
|
timer0_control_register_set(_ARC_V2_TMR_CTRL_NH | _ARC_V2_TMR_CTRL_IE);
|
|
|
|
|
|
|
|
/* everything has been configured: safe to enable the interrupt */
|
|
|
|
|
|
|
|
irq_enable(IRQ_TIMER0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
|
|
|
|
CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
|