2019-06-12 21:22:43 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2019 Intel Corporation
|
|
|
|
* Copyright (c) 2019 Microchip Technology Incorporated
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2020-03-24 21:45:46 +01:00
|
|
|
#define DT_DRV_COMPAT microchip_xec_rtos_timer
|
|
|
|
|
2023-08-28 13:15:43 +02:00
|
|
|
#include <zephyr/init.h>
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/devicetree.h>
|
2019-06-12 21:22:43 +02:00
|
|
|
#include <soc.h>
|
2022-05-06 10:25:46 +02:00
|
|
|
#include <zephyr/drivers/timer/system_timer.h>
|
|
|
|
#include <zephyr/sys_clock.h>
|
|
|
|
#include <zephyr/spinlock.h>
|
2023-06-29 18:22:37 +02:00
|
|
|
#include <cmsis_core.h>
|
2022-10-04 15:33:53 +02:00
|
|
|
#include <zephyr/irq.h>
|
2019-06-12 21:22:43 +02:00
|
|
|
|
2020-03-12 16:16:00 +01:00
|
|
|
BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP), "XEC RTOS timer doesn't support SMP");
|
|
|
|
BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 32768,
|
|
|
|
"XEC RTOS timer HW frequency is fixed at 32768");
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
#define DEBUG_RTOS_TIMER 0
|
|
|
|
|
|
|
|
#if DEBUG_RTOS_TIMER != 0
|
|
|
|
/* Enable feature to halt timer on JTAG/SWD CPU halt */
|
|
|
|
#define TIMER_START_VAL (MCHP_RTMR_CTRL_BLK_EN | MCHP_RTMR_CTRL_START \
|
|
|
|
| MCHP_RTMR_CTRL_HW_HALT_EN)
|
|
|
|
#else
|
|
|
|
#define TIMER_START_VAL (MCHP_RTMR_CTRL_BLK_EN | MCHP_RTMR_CTRL_START)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Overview:
|
|
|
|
*
|
|
|
|
* This driver enables the Microchip XEC 32KHz based RTOS timer as the Zephyr
|
|
|
|
* system timer. It supports both legacy ("tickful") mode as well as
|
|
|
|
* TICKLESS_KERNEL. The XEC RTOS timer is a down counter with a fixed
|
|
|
|
* frequency of 32768 Hz. The driver is based upon the Intel local APIC
|
|
|
|
* timer driver.
|
|
|
|
* Configuration:
|
|
|
|
*
|
|
|
|
* CONFIG_MCHP_XEC_RTOS_TIMER=y
|
|
|
|
*
|
|
|
|
* CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC=<hz> must be set to 32768.
|
|
|
|
*
|
2022-02-24 13:00:55 +01:00
|
|
|
* To reduce truncation errors from accumulating due to conversion
|
2019-06-12 21:22:43 +02:00
|
|
|
* to/from time, ticks, and HW cycles set ticks per second equal to
|
|
|
|
* the frequency. With tickless kernel mode enabled the kernel will not
|
|
|
|
* program a periodic timer at this fast rate.
|
|
|
|
* CONFIG_SYS_CLOCK_TICKS_PER_SEC=32768
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define CYCLES_PER_TICK \
|
|
|
|
(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
|
|
|
|
|
2020-03-11 19:40:58 +01:00
|
|
|
#define TIMER_REGS \
|
2021-07-22 19:46:24 +02:00
|
|
|
((struct rtmr_regs *)DT_INST_REG_ADDR(0))
|
2020-03-11 19:40:58 +01:00
|
|
|
|
2021-07-22 19:46:24 +02:00
|
|
|
#define ECIA_XEC_REGS \
|
|
|
|
((struct ecia_regs *)DT_REG_ADDR(DT_NODELABEL(ecia)))
|
2019-06-12 21:22:43 +02:00
|
|
|
|
2021-07-22 19:46:24 +02:00
|
|
|
#ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
|
|
|
|
#define PCR_XEC_REGS \
|
|
|
|
((struct pcr_regs *)DT_REG_ADDR(DT_NODELABEL(pcr)))
|
2019-06-12 21:22:43 +02:00
|
|
|
|
2021-07-22 19:46:24 +02:00
|
|
|
/*
|
|
|
|
* pcrs property at index 0 is register index into array of 32-bit PCR SLP_EN,
|
|
|
|
* CLK_REQ, or RST_EN registers. Property at index 1 is the bit position.
|
|
|
|
*/ /*DT_PROP_BY_IDX(DT_NODELABEL(kbc0), girqs, 0)*/
|
|
|
|
#define BTMR32_0_PCR_REG_IDX (DT_PROP_BY_IDX(DT_NODELABEL(timer4), pcrs, 0))
|
|
|
|
#define BTMR32_0_PCR_BITPOS (DT_PROP_BY_IDX(DT_NODELABEL(timer4), pcrs, 1))
|
|
|
|
|
|
|
|
#define BTMR32_0_REGS \
|
|
|
|
((struct btmr_regs *)(DT_REG_ADDR(DT_NODELABEL(timer4))))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Mask off bits[31:28] of 32-bit count */
|
|
|
|
#define TIMER_MAX 0x0fffffffu
|
|
|
|
#define TIMER_COUNT_MASK 0x0fffffffu
|
|
|
|
#define TIMER_STOPPED 0xf0000000u
|
2019-06-12 21:22:43 +02:00
|
|
|
|
2019-08-10 17:03:41 +02:00
|
|
|
/* Adjust cycle count programmed into timer for HW restart latency */
|
|
|
|
#define TIMER_ADJUST_LIMIT 2
|
|
|
|
#define TIMER_ADJUST_CYCLES 1
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
/* max number of ticks we can load into the timer in one shot */
|
2019-08-10 17:03:41 +02:00
|
|
|
#define MAX_TICKS (TIMER_MAX / CYCLES_PER_TICK)
|
2019-06-12 21:22:43 +02:00
|
|
|
|
2021-07-22 19:46:24 +02:00
|
|
|
#define TIMER_GIRQ DT_INST_PROP_BY_IDX(0, girqs, 0)
|
|
|
|
#define TIMER_GIRQ_POS DT_INST_PROP_BY_IDX(0, girqs, 1)
|
|
|
|
#define TIMER_NVIC_NO DT_INST_IRQN(0)
|
|
|
|
#define TIMER_NVIC_PRIO DT_INST_IRQ(0, priority)
|
|
|
|
|
2019-06-12 21:22:43 +02:00
|
|
|
/*
|
|
|
|
* The spinlock protects all access to the RTMR registers, as well as
|
|
|
|
* 'total_cycles', 'last_announcement', and 'cached_icr'.
|
|
|
|
*
|
|
|
|
* One important invariant that must be observed: `total_cycles` + `cached_icr`
|
|
|
|
* is always an integral multiple of CYCLE_PER_TICK; this is, timer interrupts
|
|
|
|
* are only ever scheduled to occur at tick boundaries.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct k_spinlock lock;
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint32_t total_cycles;
|
|
|
|
static uint32_t cached_icr = CYCLES_PER_TICK;
|
2019-06-12 21:22:43 +02:00
|
|
|
|
2021-07-22 19:46:24 +02:00
|
|
|
/*
|
|
|
|
* NOTE: using inline for speed instead of call to external SoC function.
|
|
|
|
* MEC GIRQ numbers are documented as 8 to 26, check and convert to zero
|
|
|
|
* based index.
|
|
|
|
*/
|
|
|
|
static inline void girq_src_clr(int girq, int bitpos)
|
|
|
|
{
|
|
|
|
if ((girq < 8) || (girq > 26)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ECIA_XEC_REGS->GIRQ[girq - 8].SRC = BIT(bitpos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void girq_src_en(int girq, int bitpos)
|
|
|
|
{
|
|
|
|
if ((girq < 8) || (girq > 26)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ECIA_XEC_REGS->GIRQ[girq - 8].EN_SET = BIT(bitpos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void girq_src_dis(int girq, int bitpos)
|
|
|
|
{
|
|
|
|
if ((girq < 8) || (girq > 26)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ECIA_XEC_REGS->GIRQ[girq - 8].EN_CLR = BIT(bitpos);
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static void timer_restart(uint32_t countdown)
|
2019-08-10 17:03:41 +02:00
|
|
|
{
|
2020-03-11 19:40:58 +01:00
|
|
|
TIMER_REGS->CTRL = 0U;
|
|
|
|
TIMER_REGS->CTRL = MCHP_RTMR_CTRL_BLK_EN;
|
|
|
|
TIMER_REGS->PRLD = countdown;
|
|
|
|
TIMER_REGS->CTRL = TIMER_START_VAL;
|
2019-08-10 17:03:41 +02:00
|
|
|
}
|
|
|
|
|
2019-06-12 21:22:43 +02:00
|
|
|
/*
|
2019-08-10 17:03:41 +02:00
|
|
|
* Read the RTOS timer counter handling the case where the timer
|
|
|
|
* has been reloaded within 1 32KHz clock of reading its count register.
|
|
|
|
* The RTOS timer hardware must synchronize the write to its control register
|
|
|
|
* on the AHB clock domain with the 32KHz clock domain of its internal logic.
|
|
|
|
* This synchronization can take from nearly 0 time up to 1 32KHz clock as it
|
|
|
|
* depends upon which 48MHz AHB clock with a 32KHz period the register write
|
|
|
|
* was on. We detect the timer is in the load state by checking the read-only
|
|
|
|
* count register and the START bit in the control register. If count register
|
|
|
|
* is 0 and the START bit is set then the timer has been started and is in the
|
|
|
|
* process of moving the preload register value into the count register.
|
2019-06-12 21:22:43 +02:00
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
static inline uint32_t timer_count(void)
|
2019-06-12 21:22:43 +02:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t ccr = TIMER_REGS->CNT;
|
2019-08-10 17:03:41 +02:00
|
|
|
|
2020-03-11 19:40:58 +01:00
|
|
|
if ((ccr == 0) && (TIMER_REGS->CTRL & MCHP_RTMR_CTRL_START)) {
|
2019-08-10 17:03:41 +02:00
|
|
|
ccr = cached_icr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ccr;
|
2019-06-12 21:22:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
static uint32_t last_announcement; /* last time we called sys_clock_announce() */
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Request a timeout n Zephyr ticks in the future from now.
|
|
|
|
* Requested number of ticks in the future of n <= 1 means the kernel wants
|
|
|
|
* the tick announced as soon as possible, ideally no more than one tick
|
|
|
|
* in the future.
|
|
|
|
*
|
|
|
|
* Per comment below we don't clear RTMR pending interrupt.
|
|
|
|
* RTMR counter register is read-only and is loaded from the preload
|
|
|
|
* register by a 0->1 transition of the control register start bit.
|
|
|
|
* Writing a new value to preload only takes effect once the count
|
|
|
|
* register reaches 0.
|
|
|
|
*/
|
2021-02-25 21:33:15 +01:00
|
|
|
void sys_clock_set_timeout(int32_t n, bool idle)
|
2019-06-12 21:22:43 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(idle);
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t ccr, temp;
|
2019-06-12 21:22:43 +02:00
|
|
|
int full_ticks; /* number of complete ticks we'll wait */
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t full_cycles; /* full_ticks represented as cycles */
|
|
|
|
uint32_t partial_cycles; /* number of cycles to first tick boundary */
|
2019-06-12 21:22:43 +02:00
|
|
|
|
kernel/timeout: Make timeout arguments an opaque type
Add a k_timeout_t type, and use it everywhere that kernel API
functions were accepting a millisecond timeout argument. Instead of
forcing milliseconds everywhere (which are often not integrally
representable as system ticks), do the conversion to ticks at the
point where the timeout is created. This avoids an extra unit
conversion in some application code, and allows us to express the
timeout in units other than milliseconds to achieve greater precision.
The existing K_MSEC() et. al. macros now return initializers for a
k_timeout_t.
The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t
values, which means they cannot be operated on as integers.
Applications which have their own APIs that need to inspect these
vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to
test for equality.
Timer drivers, which receive an integer tick count in ther
z_clock_set_timeout() functions, now use the integer-valued
K_TICKS_FOREVER constant instead of K_FOREVER.
For the initial release, to preserve source compatibility, a
CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the
k_timeout_t will remain a compatible 32 bit value that will work with
any legacy Zephyr application.
Some subsystems present timeout (or timeout-like) values to their own
users as APIs that would re-use the kernel's own constants and
conventions. These will require some minor design work to adapt to
the new scheme (in most cases just using k_timeout_t directly in their
own API), and they have not been changed in this patch, instead
selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems
include: CAN Bus, the Microbit display driver, I2S, LoRa modem
drivers, the UART Async API, Video hardware drivers, the console
subsystem, and the network buffer abstraction.
k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant
provided that works identically to the original API.
Most of the changes here are just type/configuration management and
documentation, but there are logic changes in mempool, where a loop
that used a timeout numerically has been reworked using a new
z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was
enabled) a similar loop was needlessly used to try to retry the
k_poll() call after a spurious failure. But k_poll() does not fail
spuriously, so the loop was removed.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2020-03-06 00:18:14 +01:00
|
|
|
if (idle && (n == K_TICKS_FOREVER)) {
|
2019-08-10 17:03:41 +02:00
|
|
|
/*
|
|
|
|
* We are not in a locked section. Are writes to two
|
|
|
|
* global objects safe from pre-emption?
|
|
|
|
*/
|
2020-03-11 19:40:58 +01:00
|
|
|
TIMER_REGS->CTRL = 0U; /* stop timer */
|
2019-08-10 17:03:41 +02:00
|
|
|
cached_icr = TIMER_STOPPED;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-12 21:22:43 +02:00
|
|
|
if (n < 1) {
|
|
|
|
full_ticks = 0;
|
kernel/timeout: Make timeout arguments an opaque type
Add a k_timeout_t type, and use it everywhere that kernel API
functions were accepting a millisecond timeout argument. Instead of
forcing milliseconds everywhere (which are often not integrally
representable as system ticks), do the conversion to ticks at the
point where the timeout is created. This avoids an extra unit
conversion in some application code, and allows us to express the
timeout in units other than milliseconds to achieve greater precision.
The existing K_MSEC() et. al. macros now return initializers for a
k_timeout_t.
The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t
values, which means they cannot be operated on as integers.
Applications which have their own APIs that need to inspect these
vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to
test for equality.
Timer drivers, which receive an integer tick count in ther
z_clock_set_timeout() functions, now use the integer-valued
K_TICKS_FOREVER constant instead of K_FOREVER.
For the initial release, to preserve source compatibility, a
CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the
k_timeout_t will remain a compatible 32 bit value that will work with
any legacy Zephyr application.
Some subsystems present timeout (or timeout-like) values to their own
users as APIs that would re-use the kernel's own constants and
conventions. These will require some minor design work to adapt to
the new scheme (in most cases just using k_timeout_t directly in their
own API), and they have not been changed in this patch, instead
selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems
include: CAN Bus, the Microbit display driver, I2S, LoRa modem
drivers, the UART Async API, Video hardware drivers, the console
subsystem, and the network buffer abstraction.
k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant
provided that works identically to the original API.
Most of the changes here are just type/configuration management and
documentation, but there are logic changes in mempool, where a loop
that used a timeout numerically has been reworked using a new
z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was
enabled) a similar loop was needlessly used to try to retry the
k_poll() call after a spurious failure. But k_poll() does not fail
spuriously, so the loop was removed.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2020-03-06 00:18:14 +01:00
|
|
|
} else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) {
|
2019-06-12 21:22:43 +02:00
|
|
|
full_ticks = MAX_TICKS - 1;
|
|
|
|
} else {
|
|
|
|
full_ticks = n - 1;
|
|
|
|
}
|
|
|
|
|
2019-08-10 17:03:41 +02:00
|
|
|
full_cycles = full_ticks * CYCLES_PER_TICK;
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
2019-08-10 17:03:41 +02:00
|
|
|
ccr = timer_count();
|
|
|
|
|
|
|
|
/* turn off to clear any pending interrupt status */
|
2021-07-22 19:46:24 +02:00
|
|
|
TIMER_REGS->CTRL = 0u;
|
|
|
|
girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS);
|
|
|
|
NVIC_ClearPendingIRQ(TIMER_NVIC_NO);
|
2019-08-10 17:03:41 +02:00
|
|
|
|
|
|
|
temp = total_cycles;
|
|
|
|
temp += (cached_icr - ccr);
|
|
|
|
temp &= TIMER_COUNT_MASK;
|
|
|
|
total_cycles = temp;
|
|
|
|
|
2019-06-12 21:22:43 +02:00
|
|
|
partial_cycles = CYCLES_PER_TICK - (total_cycles % CYCLES_PER_TICK);
|
2019-08-10 17:03:41 +02:00
|
|
|
cached_icr = full_cycles + partial_cycles;
|
|
|
|
/* adjust for up to one 32KHz cycle startup time */
|
|
|
|
temp = cached_icr;
|
|
|
|
if (temp > TIMER_ADJUST_LIMIT) {
|
|
|
|
temp -= TIMER_ADJUST_CYCLES;
|
|
|
|
}
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
timer_restart(temp);
|
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the number of Zephyr ticks elapsed from last call to
|
2021-02-25 21:33:15 +01:00
|
|
|
* sys_clock_announce in the ISR. The caller casts uint32_t to int32_t.
|
2019-08-10 17:03:41 +02:00
|
|
|
* We must make sure bit[31] is 0 in the return value.
|
2019-06-12 21:22:43 +02:00
|
|
|
*/
|
2021-02-25 21:33:15 +01:00
|
|
|
uint32_t sys_clock_elapsed(void)
|
2019-06-12 21:22:43 +02:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t ccr;
|
|
|
|
uint32_t ticks;
|
|
|
|
int32_t elapsed;
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
2019-08-10 17:03:41 +02:00
|
|
|
ccr = timer_count();
|
|
|
|
|
|
|
|
/* It may not look efficient but the compiler does a good job */
|
2020-05-27 18:26:57 +02:00
|
|
|
elapsed = (int32_t)total_cycles - (int32_t)last_announcement;
|
2019-08-10 17:03:41 +02:00
|
|
|
if (elapsed < 0) {
|
|
|
|
elapsed = -1 * elapsed;
|
|
|
|
}
|
2020-05-27 18:26:57 +02:00
|
|
|
ticks = (uint32_t)elapsed;
|
2019-06-12 21:22:43 +02:00
|
|
|
ticks += cached_icr - ccr;
|
|
|
|
ticks /= CYCLES_PER_TICK;
|
2019-08-10 17:03:41 +02:00
|
|
|
ticks &= TIMER_COUNT_MASK;
|
|
|
|
|
|
|
|
k_spin_unlock(&lock, key);
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
return ticks;
|
|
|
|
}
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void xec_rtos_timer_isr(const void *arg)
|
2019-06-12 21:22:43 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(arg);
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t cycles;
|
|
|
|
int32_t ticks;
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
2021-07-22 19:46:24 +02:00
|
|
|
girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS);
|
2020-03-11 19:40:58 +01:00
|
|
|
|
2019-08-10 17:03:41 +02:00
|
|
|
/* Restart the timer as early as possible to minimize drift... */
|
|
|
|
timer_restart(MAX_TICKS * CYCLES_PER_TICK);
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
cycles = cached_icr;
|
2019-08-10 17:03:41 +02:00
|
|
|
cached_icr = MAX_TICKS * CYCLES_PER_TICK;
|
|
|
|
|
2019-06-12 21:22:43 +02:00
|
|
|
total_cycles += cycles;
|
2019-08-10 17:03:41 +02:00
|
|
|
total_cycles &= TIMER_COUNT_MASK;
|
|
|
|
|
|
|
|
/* handle wrap by using (power of 2) - 1 mask */
|
|
|
|
ticks = total_cycles - last_announcement;
|
|
|
|
ticks &= TIMER_COUNT_MASK;
|
|
|
|
ticks /= CYCLES_PER_TICK;
|
|
|
|
|
2019-06-12 21:22:43 +02:00
|
|
|
last_announcement = total_cycles;
|
2019-08-10 17:03:41 +02:00
|
|
|
|
2019-06-12 21:22:43 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
2021-02-25 21:33:15 +01:00
|
|
|
sys_clock_announce(ticks);
|
2019-06-12 21:22:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
/* Non-tickless kernel build. */
|
|
|
|
|
isr: Normalize usage of device instance through ISR
The goal of this patch is to replace the 'void *' parameter by 'struct
device *' if they use such variable or just 'const void *' on all
relevant ISRs
This will avoid not-so-nice const qualifier tweaks when device instances
will be constant.
Note that only the ISR passed to IRQ_CONNECT are of interest here.
In order to do so, the script fix_isr.py below is necessary:
from pathlib import Path
import subprocess
import pickle
import mmap
import sys
import re
import os
cocci_template = """
@r_fix_isr_0
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
(
const struct device *D = (const struct device *)P;
|
const struct device *D = P;
)
...
}
@r_fix_isr_1
@
type ret_type;
identifier P;
identifier D;
@@
-ret_type <!fn!>(void *P)
+ret_type <!fn!>(const struct device *P)
{
...
const struct device *D;
...
(
D = (const struct device *)P;
|
D = P;
)
...
}
@r_fix_isr_2
@
type ret_type;
identifier A;
@@
-ret_type <!fn!>(void *A)
+ret_type <!fn!>(const void *A)
{
...
}
@r_fix_isr_3
@
const struct device *D;
@@
-<!fn!>((void *)D);
+<!fn!>(D);
@r_fix_isr_4
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
(
-const struct device *D = (const struct device *)P;
|
-const struct device *D = P;
)
...
}
@r_fix_isr_5
@
type ret_type;
identifier D;
identifier P;
@@
-ret_type <!fn!>(const struct device *P)
+ret_type <!fn!>(const struct device *D)
{
...
-const struct device *D;
...
(
-D = (const struct device *)P;
|
-D = P;
)
...
}
"""
def find_isr(fn):
db = []
data = None
start = 0
try:
with open(fn, 'r+') as f:
data = str(mmap.mmap(f.fileno(), 0).read())
except Exception as e:
return db
while True:
isr = ""
irq = data.find('IRQ_CONNECT', start)
while irq > -1:
p = 1
arg = 1
p_o = data.find('(', irq)
if p_o < 0:
irq = -1
break;
pos = p_o + 1
while p > 0:
if data[pos] == ')':
p -= 1
elif data[pos] == '(':
p += 1
elif data[pos] == ',' and p == 1:
arg += 1
if arg == 3:
isr += data[pos]
pos += 1
isr = isr.strip(',\\n\\t ')
if isr not in db and len(isr) > 0:
db.append(isr)
start = pos
break
if irq < 0:
break
return db
def patch_isr(fn, isr_list):
if len(isr_list) <= 0:
return
for isr in isr_list:
tmplt = cocci_template.replace('<!fn!>', isr)
with open('/tmp/isr_fix.cocci', 'w') as f:
f.write(tmplt)
cmd = ['spatch', '--sp-file', '/tmp/isr_fix.cocci', '--in-place', fn]
subprocess.run(cmd)
def process_files(path):
if path.is_file() and path.suffix in ['.h', '.c']:
p = str(path.parent) + '/' + path.name
isr_list = find_isr(p)
patch_isr(p, isr_list)
elif path.is_dir():
for p in path.iterdir():
process_files(p)
if len(sys.argv) < 2:
print("You need to provide a dir/file path")
sys.exit(1)
process_files(Path(sys.argv[1]))
And is run: ./fix_isr.py <zephyr root directory>
Finally, some files needed manual fixes such.
Fixes #27399
Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
2020-06-17 14:58:56 +02:00
|
|
|
static void xec_rtos_timer_isr(const void *arg)
|
2019-06-12 21:22:43 +02:00
|
|
|
{
|
|
|
|
ARG_UNUSED(arg);
|
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
2021-07-22 19:46:24 +02:00
|
|
|
girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS);
|
2020-03-11 19:40:58 +01:00
|
|
|
|
2019-08-10 17:03:41 +02:00
|
|
|
/* Restart the timer as early as possible to minimize drift... */
|
2019-06-12 21:22:43 +02:00
|
|
|
timer_restart(cached_icr);
|
2019-08-10 17:03:41 +02:00
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t temp = total_cycles + CYCLES_PER_TICK;
|
2019-08-10 17:03:41 +02:00
|
|
|
|
|
|
|
total_cycles = temp & TIMER_COUNT_MASK;
|
2019-06-12 21:22:43 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
sys_clock_announce(1);
|
2019-06-12 21:22:43 +02:00
|
|
|
}
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
uint32_t sys_clock_elapsed(void)
|
2019-06-12 21:22:43 +02:00
|
|
|
{
|
|
|
|
return 0U;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_TICKLESS_KERNEL */
|
|
|
|
|
|
|
|
/*
|
2019-08-10 17:03:41 +02:00
|
|
|
* Warning RTOS timer resolution is 30.5 us.
|
|
|
|
* This is called by two code paths:
|
2019-11-07 21:43:29 +01:00
|
|
|
* 1. Kernel call to k_cycle_get_32() -> arch_k_cycle_get_32() -> here.
|
2019-08-10 17:03:41 +02:00
|
|
|
* The kernel is casting return to (int) and using it uncasted in math
|
|
|
|
* expressions with int types. Expression result is stored in an int.
|
|
|
|
* 2. If CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT is not defined then
|
2020-05-27 18:26:57 +02:00
|
|
|
* z_impl_k_busy_wait calls here. This code path uses the value as uint32_t.
|
2019-08-10 17:03:41 +02:00
|
|
|
*
|
2019-06-12 21:22:43 +02:00
|
|
|
*/
|
2021-03-12 18:46:52 +01:00
|
|
|
uint32_t sys_clock_cycle_get_32(void)
|
2019-06-12 21:22:43 +02:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t ret;
|
|
|
|
uint32_t ccr;
|
2019-06-12 21:22:43 +02:00
|
|
|
|
|
|
|
k_spinlock_key_t key = k_spin_lock(&lock);
|
|
|
|
|
2019-08-10 17:03:41 +02:00
|
|
|
ccr = timer_count();
|
|
|
|
ret = (total_cycles + (cached_icr - ccr)) & TIMER_COUNT_MASK;
|
|
|
|
|
2019-06-12 21:22:43 +02:00
|
|
|
k_spin_unlock(&lock, key);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-02-25 21:33:15 +01:00
|
|
|
void sys_clock_idle_exit(void)
|
2019-08-10 17:03:41 +02:00
|
|
|
{
|
|
|
|
if (cached_icr == TIMER_STOPPED) {
|
|
|
|
cached_icr = CYCLES_PER_TICK;
|
|
|
|
timer_restart(cached_icr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void sys_clock_disable(void)
|
|
|
|
{
|
2020-03-11 19:40:58 +01:00
|
|
|
TIMER_REGS->CTRL = 0U;
|
2019-08-10 17:03:41 +02:00
|
|
|
}
|
|
|
|
|
2021-11-04 12:51:39 +01:00
|
|
|
#ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We implement custom busy wait using a MEC1501 basic timer running on
|
|
|
|
* the 48MHz clock domain. This code is here for future power management
|
|
|
|
* save/restore of the timer context.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 32-bit basic timer 0 configured for 1MHz count up, auto-reload,
|
|
|
|
* and no interrupt generation.
|
|
|
|
*/
|
|
|
|
void arch_busy_wait(uint32_t usec_to_wait)
|
|
|
|
{
|
|
|
|
if (usec_to_wait == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t start = BTMR32_0_REGS->CNT;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
uint32_t curr = BTMR32_0_REGS->CNT;
|
|
|
|
|
|
|
|
if ((curr - start) >= usec_to_wait) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
init: remove the need for a dummy device pointer in SYS_INIT functions
The init infrastructure, found in `init.h`, is currently used by:
- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices
They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:
```c
struct init_entry {
int (*init)(const struct device *dev);
/* only set by DEVICE_*, otherwise NULL */
const struct device *dev;
}
```
As a result, we end up with such weird/ugly pattern:
```c
static int my_init(const struct device *dev)
{
/* always NULL! add ARG_UNUSED to avoid compiler warning */
ARG_UNUSED(dev);
...
}
```
This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:
```c
static int my_init(void)
{
...
}
```
This is achieved using a union:
```c
union init_function {
/* for SYS_INIT, used when init_entry.dev == NULL */
int (*sys)(void);
/* for DEVICE*, used when init_entry.dev != NULL */
int (*dev)(const struct device *dev);
};
struct init_entry {
/* stores init function (either for SYS_INIT or DEVICE*)
union init_function init_fn;
/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
* to know which union entry to call.
*/
const struct device *dev;
}
```
This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.
**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
init: convert SYS_INIT functions to the new signature
Conversion scripted using scripts/utils/migrate_sys_init.py.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
manifest: update projects for SYS_INIT changes
Update modules with updated SYS_INIT calls:
- hal_ti
- lvgl
- sof
- TraceRecorderSource
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: devicetree: devices: adjust test
Adjust test according to the recently introduced SYS_INIT
infrastructure.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: kernel: threads: adjust SYS_INIT call
Adjust to the new signature: int (*init_fn)(void);
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-10-19 09:33:44 +02:00
|
|
|
static int sys_clock_driver_init(void)
|
2019-06-12 21:22:43 +02:00
|
|
|
{
|
|
|
|
|
2019-08-10 17:03:41 +02:00
|
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
|
|
cached_icr = MAX_TICKS;
|
|
|
|
#endif
|
|
|
|
|
2021-07-22 19:46:24 +02:00
|
|
|
TIMER_REGS->CTRL = 0u;
|
|
|
|
girq_src_clr(TIMER_GIRQ, TIMER_GIRQ_POS);
|
|
|
|
girq_src_dis(TIMER_GIRQ, TIMER_GIRQ_POS);
|
|
|
|
NVIC_ClearPendingIRQ(TIMER_NVIC_NO);
|
2019-06-12 21:22:43 +02:00
|
|
|
|
2021-07-22 19:46:24 +02:00
|
|
|
IRQ_CONNECT(TIMER_NVIC_NO, TIMER_NVIC_PRIO, xec_rtos_timer_isr, 0, 0);
|
|
|
|
irq_enable(TIMER_NVIC_NO);
|
|
|
|
girq_src_en(TIMER_GIRQ, TIMER_GIRQ_POS);
|
2019-06-12 21:22:43 +02:00
|
|
|
|
2019-08-10 17:03:41 +02:00
|
|
|
#ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
|
2021-07-22 19:46:24 +02:00
|
|
|
uint32_t btmr_ctrl = (MCHP_BTMR_CTRL_ENABLE
|
|
|
|
| MCHP_BTMR_CTRL_AUTO_RESTART
|
|
|
|
| MCHP_BTMR_CTRL_COUNT_UP
|
|
|
|
| (47UL << MCHP_BTMR_CTRL_PRESCALE_POS));
|
|
|
|
|
|
|
|
#if CONFIG_SOC_SERIES_MEC1501X
|
|
|
|
mchp_pcr_periph_slp_ctrl(PCR_B32TMR0, 0);
|
|
|
|
#else
|
|
|
|
PCR_XEC_REGS->SLP_EN[BTMR32_0_PCR_REG_IDX] &= ~BIT(BTMR32_0_PCR_BITPOS);
|
|
|
|
#endif
|
|
|
|
BTMR32_0_REGS->CTRL = MCHP_BTMR_CTRL_SOFT_RESET;
|
|
|
|
BTMR32_0_REGS->CTRL = btmr_ctrl;
|
|
|
|
BTMR32_0_REGS->PRLD = UINT32_MAX;
|
2019-08-10 17:03:41 +02:00
|
|
|
btmr_ctrl |= MCHP_BTMR_CTRL_START;
|
|
|
|
|
|
|
|
timer_restart(cached_icr);
|
2021-07-22 19:46:24 +02:00
|
|
|
/* wait for RTOS timer to load count register from preload */
|
|
|
|
while (TIMER_REGS->CNT == 0) {
|
2019-08-10 17:03:41 +02:00
|
|
|
;
|
2021-07-22 19:46:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
BTMR32_0_REGS->CTRL = btmr_ctrl;
|
2019-08-10 17:03:41 +02:00
|
|
|
#else
|
|
|
|
timer_restart(cached_icr);
|
|
|
|
#endif
|
|
|
|
|
2019-06-12 21:22:43 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2019-08-10 17:03:41 +02:00
|
|
|
|
2021-11-04 12:51:39 +01:00
|
|
|
SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
|
|
|
|
CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
|