4dcfb5531c
The goal of this patch is to replace the 'void *' parameter by 'struct device *' if they use such variable or just 'const void *' on all relevant ISRs This will avoid not-so-nice const qualifier tweaks when device instances will be constant. Note that only the ISR passed to IRQ_CONNECT are of interest here. In order to do so, the script fix_isr.py below is necessary: from pathlib import Path import subprocess import pickle import mmap import sys import re import os cocci_template = """ @r_fix_isr_0 @ type ret_type; identifier P; identifier D; @@ -ret_type <!fn!>(void *P) +ret_type <!fn!>(const struct device *P) { ... ( const struct device *D = (const struct device *)P; | const struct device *D = P; ) ... } @r_fix_isr_1 @ type ret_type; identifier P; identifier D; @@ -ret_type <!fn!>(void *P) +ret_type <!fn!>(const struct device *P) { ... const struct device *D; ... ( D = (const struct device *)P; | D = P; ) ... } @r_fix_isr_2 @ type ret_type; identifier A; @@ -ret_type <!fn!>(void *A) +ret_type <!fn!>(const void *A) { ... } @r_fix_isr_3 @ const struct device *D; @@ -<!fn!>((void *)D); +<!fn!>(D); @r_fix_isr_4 @ type ret_type; identifier D; identifier P; @@ -ret_type <!fn!>(const struct device *P) +ret_type <!fn!>(const struct device *D) { ... ( -const struct device *D = (const struct device *)P; | -const struct device *D = P; ) ... } @r_fix_isr_5 @ type ret_type; identifier D; identifier P; @@ -ret_type <!fn!>(const struct device *P) +ret_type <!fn!>(const struct device *D) { ... -const struct device *D; ... ( -D = (const struct device *)P; | -D = P; ) ... } """ def find_isr(fn): db = [] data = None start = 0 try: with open(fn, 'r+') as f: data = str(mmap.mmap(f.fileno(), 0).read()) except Exception as e: return db while True: isr = "" irq = data.find('IRQ_CONNECT', start) while irq > -1: p = 1 arg = 1 p_o = data.find('(', irq) if p_o < 0: irq = -1 break; pos = p_o + 1 while p > 0: if data[pos] == ')': p -= 1 elif data[pos] == '(': p += 1 elif data[pos] == ',' and p == 1: arg += 1 if arg == 3: isr += data[pos] pos += 1 isr = isr.strip(',\\n\\t ') if isr not in db and len(isr) > 0: db.append(isr) start = pos break if irq < 0: break return db def patch_isr(fn, isr_list): if len(isr_list) <= 0: return for isr in isr_list: tmplt = cocci_template.replace('<!fn!>', isr) with open('/tmp/isr_fix.cocci', 'w') as f: f.write(tmplt) cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn] subprocess.run(cmd) def process_files(path): if path.is_file() and path.suffix in ['.h', '.c']: p = str(path.parent) + '/' + path.name isr_list = find_isr(p) patch_isr(p, isr_list) elif path.is_dir(): for p in path.iterdir(): process_files(p) if len(sys.argv) < 2: print("You need to provide a dir/file path") sys.exit(1) process_files(Path(sys.argv[1])) And is run: ./fix_isr.py <zephyr root directory> Finally, some files needed manual fixes such. Fixes #27399 Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
150 lines
3.4 KiB
C
150 lines
3.4 KiB
C
/*
|
|
* Copyright (c) 2018 Intel Corporation
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
#include <drivers/timer/system_timer.h>
|
|
#include <sys_clock.h>
|
|
#include <spinlock.h>
|
|
#include <soc.h>
|
|
|
|
#define CYC_PER_TICK ((uint32_t)((uint64_t)sys_clock_hw_cycles_per_sec() \
|
|
/ (uint64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC))
|
|
#define MAX_CYC 0xffffffffu
|
|
#define MAX_TICKS ((MAX_CYC - CYC_PER_TICK) / CYC_PER_TICK)
|
|
#define MIN_DELAY 1000
|
|
|
|
#define TICKLESS IS_ENABLED(CONFIG_TICKLESS_KERNEL)
|
|
|
|
static struct k_spinlock lock;
|
|
static uint64_t last_count;
|
|
|
|
static void set_mtimecmp(uint64_t time)
|
|
{
|
|
#ifdef CONFIG_64BIT
|
|
*(volatile uint64_t *)RISCV_MTIMECMP_BASE = time;
|
|
#else
|
|
volatile uint32_t *r = (uint32_t *)RISCV_MTIMECMP_BASE;
|
|
|
|
/* Per spec, the RISC-V MTIME/MTIMECMP registers are 64 bit,
|
|
* but are NOT internally latched for multiword transfers. So
|
|
* we have to be careful about sequencing to avoid triggering
|
|
* spurious interrupts: always set the high word to a max
|
|
* value first.
|
|
*/
|
|
r[1] = 0xffffffff;
|
|
r[0] = (uint32_t)time;
|
|
r[1] = (uint32_t)(time >> 32);
|
|
#endif
|
|
}
|
|
|
|
static uint64_t mtime(void)
|
|
{
|
|
#ifdef CONFIG_64BIT
|
|
return *(volatile uint64_t *)RISCV_MTIME_BASE;
|
|
#else
|
|
volatile uint32_t *r = (uint32_t *)RISCV_MTIME_BASE;
|
|
uint32_t lo, hi;
|
|
|
|
/* Likewise, must guard against rollover when reading */
|
|
do {
|
|
hi = r[1];
|
|
lo = r[0];
|
|
} while (r[1] != hi);
|
|
|
|
return (((uint64_t)hi) << 32) | lo;
|
|
#endif
|
|
}
|
|
|
|
static void timer_isr(const void *arg)
|
|
{
|
|
ARG_UNUSED(arg);
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
uint64_t now = mtime();
|
|
uint32_t dticks = (uint32_t)((now - last_count) / CYC_PER_TICK);
|
|
|
|
last_count += dticks * CYC_PER_TICK;
|
|
|
|
if (!TICKLESS) {
|
|
uint64_t next = last_count + CYC_PER_TICK;
|
|
|
|
if ((int64_t)(next - now) < MIN_DELAY) {
|
|
next += CYC_PER_TICK;
|
|
}
|
|
set_mtimecmp(next);
|
|
}
|
|
|
|
k_spin_unlock(&lock, key);
|
|
z_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ? dticks : 1);
|
|
}
|
|
|
|
int z_clock_driver_init(const struct device *device)
|
|
{
|
|
ARG_UNUSED(device);
|
|
|
|
IRQ_CONNECT(RISCV_MACHINE_TIMER_IRQ, 0, timer_isr, NULL, 0);
|
|
last_count = mtime();
|
|
set_mtimecmp(last_count + CYC_PER_TICK);
|
|
irq_enable(RISCV_MACHINE_TIMER_IRQ);
|
|
return 0;
|
|
}
|
|
|
|
void z_clock_set_timeout(int32_t ticks, bool idle)
|
|
{
|
|
ARG_UNUSED(idle);
|
|
|
|
#if defined(CONFIG_TICKLESS_KERNEL)
|
|
/* RISCV has no idle handler yet, so if we try to spin on the
|
|
* logic below to reset the comparator, we'll always bump it
|
|
* forward to the "next tick" due to MIN_DELAY handling and
|
|
* the interrupt will never fire! Just rely on the fact that
|
|
* the OS gave us the proper timeout already.
|
|
*/
|
|
if (idle) {
|
|
return;
|
|
}
|
|
|
|
ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks;
|
|
ticks = MAX(MIN(ticks - 1, (int32_t)MAX_TICKS), 0);
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
uint64_t now = mtime();
|
|
uint32_t adj, cyc = ticks * CYC_PER_TICK;
|
|
|
|
/* Round up to next tick boundary. */
|
|
adj = (uint32_t)(now - last_count) + (CYC_PER_TICK - 1);
|
|
if (cyc <= MAX_CYC - adj) {
|
|
cyc += adj;
|
|
} else {
|
|
cyc = MAX_CYC;
|
|
}
|
|
cyc = (cyc / CYC_PER_TICK) * CYC_PER_TICK;
|
|
|
|
if ((int32_t)(cyc + last_count - now) < MIN_DELAY) {
|
|
cyc += CYC_PER_TICK;
|
|
}
|
|
|
|
set_mtimecmp(cyc + last_count);
|
|
k_spin_unlock(&lock, key);
|
|
#endif
|
|
}
|
|
|
|
uint32_t z_clock_elapsed(void)
|
|
{
|
|
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
|
|
return 0;
|
|
}
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
uint32_t ret = ((uint32_t)mtime() - (uint32_t)last_count) / CYC_PER_TICK;
|
|
|
|
k_spin_unlock(&lock, key);
|
|
return ret;
|
|
}
|
|
|
|
uint32_t z_timer_cycle_get_32(void)
|
|
{
|
|
return (uint32_t)mtime();
|
|
}
|