2017-01-24 01:03:56 +01:00
|
|
|
/*
|
2023-02-03 08:13:53 +01:00
|
|
|
* Copyright (c) 2018-2023 Intel Corporation
|
2017-01-24 01:03:56 +01:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2022-10-04 17:05:05 +02:00
|
|
|
|
|
|
|
#include <limits.h>
|
|
|
|
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/device.h>
|
2022-07-28 17:42:58 +02:00
|
|
|
#include <zephyr/devicetree.h>
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/drivers/timer/system_timer.h>
|
|
|
|
#include <zephyr/sys_clock.h>
|
|
|
|
#include <zephyr/spinlock.h>
|
2022-10-17 10:24:11 +02:00
|
|
|
#include <zephyr/irq.h>
|
2022-07-28 17:42:58 +02:00
|
|
|
|
2022-08-02 11:04:27 +02:00
|
|
|
/* andestech,machine-timer */
|
2022-07-28 17:42:58 +02:00
|
|
|
#if DT_HAS_COMPAT_STATUS_OKAY(andestech_machine_timer)
|
|
|
|
#define DT_DRV_COMPAT andestech_machine_timer
|
|
|
|
|
|
|
|
#define MTIME_REG DT_INST_REG_ADDR(0)
|
|
|
|
#define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8)
|
|
|
|
#define TIMER_IRQN DT_INST_IRQN(0)
|
|
|
|
/* neorv32-machine-timer */
|
|
|
|
#elif DT_HAS_COMPAT_STATUS_OKAY(neorv32_machine_timer)
|
|
|
|
#define DT_DRV_COMPAT neorv32_machine_timer
|
|
|
|
|
|
|
|
#define MTIME_REG DT_INST_REG_ADDR(0)
|
|
|
|
#define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8)
|
|
|
|
#define TIMER_IRQN DT_INST_IRQN(0)
|
|
|
|
/* nuclei,systimer */
|
|
|
|
#elif DT_HAS_COMPAT_STATUS_OKAY(nuclei_systimer)
|
|
|
|
#define DT_DRV_COMPAT nuclei_systimer
|
|
|
|
|
|
|
|
#define MTIME_REG DT_INST_REG_ADDR(0)
|
|
|
|
#define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8)
|
2022-08-06 11:02:47 +02:00
|
|
|
#define TIMER_IRQN DT_INST_IRQ_BY_IDX(0, 1, irq)
|
2022-07-28 17:42:58 +02:00
|
|
|
/* sifive,clint0 */
|
|
|
|
#elif DT_HAS_COMPAT_STATUS_OKAY(sifive_clint0)
|
|
|
|
#define DT_DRV_COMPAT sifive_clint0
|
|
|
|
|
|
|
|
#define MTIME_REG (DT_INST_REG_ADDR(0) + 0xbff8U)
|
|
|
|
#define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 0x4000U)
|
|
|
|
#define TIMER_IRQN DT_INST_IRQ_BY_IDX(0, 1, irq)
|
|
|
|
/* telink,machine-timer */
|
|
|
|
#elif DT_HAS_COMPAT_STATUS_OKAY(telink_machine_timer)
|
|
|
|
#define DT_DRV_COMPAT telink_machine_timer
|
|
|
|
|
|
|
|
#define MTIME_REG DT_INST_REG_ADDR(0)
|
|
|
|
#define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8)
|
|
|
|
#define TIMER_IRQN DT_INST_IRQN(0)
|
2023-01-14 00:22:57 +01:00
|
|
|
/* lowrisc,machine-timer */
|
|
|
|
#elif DT_HAS_COMPAT_STATUS_OKAY(lowrisc_machine_timer)
|
|
|
|
#define DT_DRV_COMPAT lowrisc_machine_timer
|
|
|
|
|
|
|
|
#define MTIME_REG (DT_INST_REG_ADDR(0) + 0x110)
|
|
|
|
#define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 0x118)
|
|
|
|
#define TIMER_IRQN DT_INST_IRQN(0)
|
2023-02-03 08:13:53 +01:00
|
|
|
/* niosv-machine-timer */
|
|
|
|
#elif DT_HAS_COMPAT_STATUS_OKAY(niosv_machine_timer)
|
|
|
|
#define DT_DRV_COMPAT niosv_machine_timer
|
|
|
|
|
|
|
|
#define MTIMECMP_REG DT_INST_REG_ADDR(0)
|
|
|
|
#define MTIME_REG (DT_INST_REG_ADDR(0) + 8)
|
|
|
|
#define TIMER_IRQN DT_INST_IRQN(0)
|
2022-07-28 17:42:58 +02:00
|
|
|
#endif
|
2017-01-24 01:03:56 +01:00
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
#define CYC_PER_TICK (uint32_t)(sys_clock_hw_cycles_per_sec() \
|
|
|
|
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
2017-01-24 01:03:56 +01:00
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
/* the unsigned long cast limits divisions to native CPU register width */
|
|
|
|
#define cycle_diff_t unsigned long
|
2018-09-20 22:56:45 +02:00
|
|
|
|
2018-10-18 19:17:48 +02:00
|
|
|
static struct k_spinlock lock;
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint64_t last_count;
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
static uint64_t last_ticks;
|
|
|
|
static uint32_t last_elapsed;
|
|
|
|
|
2022-06-28 23:58:40 +02:00
|
|
|
#if defined(CONFIG_TEST)
|
2022-07-28 17:42:58 +02:00
|
|
|
const int32_t z_sys_timer_irq_for_test = TIMER_IRQN;
|
2022-06-28 23:58:40 +02:00
|
|
|
#endif
|
2017-01-24 01:03:56 +01:00
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
static uintptr_t get_hart_mtimecmp(void)
|
2021-12-29 19:47:21 +01:00
|
|
|
{
|
2023-01-16 15:20:41 +01:00
|
|
|
return MTIMECMP_REG + (arch_proc_id() * 8);
|
2021-12-29 19:47:21 +01:00
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static void set_mtimecmp(uint64_t time)
|
2017-01-24 01:03:56 +01:00
|
|
|
{
|
2020-01-30 12:00:00 +01:00
|
|
|
#ifdef CONFIG_64BIT
|
2021-12-29 19:47:21 +01:00
|
|
|
*(volatile uint64_t *)get_hart_mtimecmp() = time;
|
2020-01-30 12:00:00 +01:00
|
|
|
#else
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
volatile uint32_t *r = (uint32_t *)get_hart_mtimecmp();
|
2017-01-24 01:03:56 +01:00
|
|
|
|
2018-10-18 19:17:48 +02:00
|
|
|
/* Per spec, the RISC-V MTIME/MTIMECMP registers are 64 bit,
|
|
|
|
* but are NOT internally latched for multiword transfers. So
|
|
|
|
* we have to be careful about sequencing to avoid triggering
|
|
|
|
* spurious interrupts: always set the high word to a max
|
|
|
|
* value first.
|
2017-03-14 22:15:16 +01:00
|
|
|
*/
|
2018-10-18 19:17:48 +02:00
|
|
|
r[1] = 0xffffffff;
|
2020-05-27 18:26:57 +02:00
|
|
|
r[0] = (uint32_t)time;
|
|
|
|
r[1] = (uint32_t)(time >> 32);
|
2020-01-30 12:00:00 +01:00
|
|
|
#endif
|
2018-10-18 19:17:48 +02:00
|
|
|
}
|
2017-01-24 01:03:56 +01:00
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint64_t mtime(void)
|
2018-10-18 19:17:48 +02:00
|
|
|
{
|
2020-01-30 12:00:00 +01:00
|
|
|
#ifdef CONFIG_64BIT
|
2022-07-28 17:42:58 +02:00
|
|
|
return *(volatile uint64_t *)MTIME_REG;
|
2020-01-30 12:00:00 +01:00
|
|
|
#else
|
2022-07-28 17:42:58 +02:00
|
|
|
volatile uint32_t *r = (uint32_t *)MTIME_REG;
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t lo, hi;
|
2017-03-14 22:15:16 +01:00
|
|
|
|
2018-10-18 19:17:48 +02:00
|
|
|
/* Likewise, must guard against rollover when reading */
|
|
|
|
do {
|
|
|
|
hi = r[1];
|
|
|
|
lo = r[0];
|
|
|
|
} while (r[1] != hi);
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
return (((uint64_t)hi) << 32) | lo;
|
2020-01-30 12:00:00 +01:00
|
|
|
#endif
|
2017-01-24 01:03:56 +01:00
|
|
|
}
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void timer_isr(const void *arg)
|
2017-01-24 01:03:56 +01:00
|
|
|
{
|
2018-10-18 19:17:48 +02:00
|
|
|
ARG_UNUSED(arg);
|
2017-01-24 01:03:56 +01:00
|
|
|
|
2018-10-18 19:17:48 +02:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint64_t now = mtime();
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
uint64_t dcycles = now - last_count;
|
|
|
|
uint32_t dticks = (cycle_diff_t)dcycles / CYC_PER_TICK;
|
2017-01-24 01:03:56 +01:00
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
last_count += (cycle_diff_t)dticks * CYC_PER_TICK;
|
|
|
|
last_ticks += dticks;
|
|
|
|
last_elapsed = 0;
|
2018-07-23 12:24:22 +02:00
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
2020-05-27 18:26:57 +02:00
|
|
|
uint64_t next = last_count + CYC_PER_TICK;
|
2017-01-24 01:03:56 +01:00
|
|
|
|
2018-10-18 19:17:48 +02:00
|
|
|
set_mtimecmp(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
sys_clock_announce(dticks);
|
2018-10-18 19:17:48 +02:00
|
|
|
}
|
2017-01-24 01:03:56 +01:00
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
void sys_clock_set_timeout(int32_t ticks, bool idle)
|
2018-10-18 19:17:48 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(idle);
|
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
2018-10-18 19:17:48 +02:00
|
|
|
return;
|
|
|
|
}
|
2017-01-24 01:03:56 +01:00
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
if (ticks == K_TICKS_FOREVER) {
|
|
|
|
set_mtimecmp(UINT64_MAX);
|
|
|
|
return;
|
2019-11-26 20:27:19 +01:00
|
|
|
}
|
2018-10-18 19:17:48 +02:00
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
/*
|
|
|
|
* Clamp the max period length to a number of cycles that can fit
|
|
|
|
* in half the range of a cycle_diff_t for native width divisions
|
|
|
|
* to be usable elsewhere. Also clamp it to half the range of an
|
|
|
|
* int32_t as this is the type used for elapsed tick announcements.
|
|
|
|
* The half range gives us extra room to cope with the unavoidable IRQ
|
|
|
|
* servicing latency. The compiler should optimize away the least
|
|
|
|
* restrictive of those tests automatically.
|
|
|
|
*/
|
|
|
|
ticks = CLAMP(ticks, 0, (cycle_diff_t)-1 / 2 / CYC_PER_TICK);
|
|
|
|
ticks = CLAMP(ticks, 0, INT32_MAX / 2);
|
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
uint64_t cyc = (last_ticks + last_elapsed + ticks) * CYC_PER_TICK;
|
2018-10-18 19:17:48 +02:00
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
set_mtimecmp(cyc);
|
2018-10-18 19:17:48 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
}
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
uint32_t sys_clock_elapsed(void)
|
2018-10-18 19:17:48 +02:00
|
|
|
{
|
|
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
uint64_t now = mtime();
|
|
|
|
uint64_t dcycles = now - last_count;
|
|
|
|
uint32_t dticks = (cycle_diff_t)dcycles / CYC_PER_TICK;
|
2018-10-18 19:17:48 +02:00
|
|
|
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
last_elapsed = dticks;
|
2018-10-18 19:17:48 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
return dticks;
|
2017-01-24 01:03:56 +01:00
|
|
|
}
|
|
|
|
|
2021-03-12 18:46:52 +01:00
|
|
|
uint32_t sys_clock_cycle_get_32(void)
|
2017-01-24 01:03:56 +01:00
|
|
|
{
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
return ((uint32_t)mtime()) << CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER;
|
2017-01-24 01:03:56 +01:00
|
|
|
}
|
2021-10-30 02:10:35 +02:00
|
|
|
|
|
|
|
uint64_t sys_clock_cycle_get_64(void)
|
|
|
|
{
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
return mtime() << CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER;
|
2021-10-30 02:10:35 +02:00
|
|
|
}
|
2021-11-04 12:51:39 +01:00
|
|
|
|
|
|
|
static int sys_clock_driver_init(const struct device *dev)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(dev);
|
|
|
|
|
2022-07-28 17:42:58 +02:00
|
|
|
IRQ_CONNECT(TIMER_IRQN, 0, timer_isr, NULL, 0);
|
riscv: timer: driver revamp
Several issues:
- `last_count` should not be updated with current time or this will
cause a time drift and induce jitter due to IRQ servicing latency.
- `sys_clock_set_timeout()` should not base its `mtime` on the current
time either. Tracking the `last_tick` and `last_elapsed` values avoids
the need for all the tick rounding computation.
- The MIN_DELAY thing is pointless. If the delay gets close or even behind
current time then the IRQ will be triggered right away. This is unlikely
to happen very often anyway so the constant overhead is uncalled for.
- Runtime 64-bits divisions on 32-bits hardware are very expensive.
Fix the above, and improve the following:
- Prime the accounting by simply invoking the IRQ handler from the init
code. That will make the "ticks since boot" counter right.
- Remove excessive casts, especially a few wrong ones.
- Simplify the code overall.
Here's the output from the timer_jitter_drift test.
Before this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 907.600000 us, 54456 cycles
| max: 1099.750000 us, 65985 cycles
| mean: 1008.594633 us, 60515.678000 cycles
| variance: 2.184205 us, 7863.136316 cycles
| stddev: 1.477906 us, 88.674332 cycles
|timer start cycle 995589, end cycle 606152369,
|total time 10085946.333333 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 85946.333333 us
After this patch:
|timer clock rate 60000000, kernel tick rate 10000
|period duration statistics for 10000 samples (0 rollovers):
| expected: 1000 us, 60000.000000 cycles
| min: 992.116667 us, 59527 cycles
| max: 1030.366667 us, 61822 cycles
| mean: 1000.001902 us, 60000.114100 cycles
| variance: 0.105334 us, 379.201081 cycles
| stddev: 0.324551 us, 19.473087 cycles
|timer start cycle 987431, end cycle 600988572,
|total time 10000019.016667 us, expected time 10000000.000000 us,
|expected time drift 0.000000 us, difference 19.016667 us
The mean, variance and standard deviation number differences speak for
themselves, even in the absence of competing ISRs and/or IRQ-disabled
periods which would have made the comparison even worse.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
2023-02-02 07:19:54 +01:00
|
|
|
timer_isr(NULL); /* prime it */
|
2022-07-28 17:42:58 +02:00
|
|
|
irq_enable(TIMER_IRQN);
|
2021-11-04 12:51:39 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-29 19:47:21 +01:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
void smp_timer_init(void)
|
|
|
|
{
|
|
|
|
set_mtimecmp(last_count + CYC_PER_TICK);
|
2022-07-28 17:42:58 +02:00
|
|
|
irq_enable(TIMER_IRQN);
|
2021-12-29 19:47:21 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-11-04 12:51:39 +01:00
|
|
|
SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
|
|
|
|
CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
|