drivers/timer: Add x86 APIC TSC_DEADLINE driver

Modern hardware all supports a TSC_DEADLINE mode for the APIC timer,
where the same GHz-scale 64 bit TSC used for performance monitoring
becomes the free-running counter used for cpu-local timer interrupts.
Being a free running counter that does not need to be reset, it will
not lose time in an interrupt.  Being 64 bit, it needs no rollover or
clamping logic in the driver when presented with a 32 bit tick count.
Being a proper comparator, it will correctly trigger interrupts for
times set "in the past" and thus needs no minimum/clamping logic.  The
counter is synchronized across the system architecturally (modulo one
burp where firmware likes to change the adjustment value) so usage is
SMP-safe by default.  Access to the 64 bit counter and comparator
value are single-instruction atomics even on 32 bit systems, so it
beats even the RISC-V machine timer in complexity (which was our
reigning champ for "simplest timer driver").

Really this is just ideal for Zephyr.  So rather than try to add
support for it to the existing APIC driver and increase complexity,
make this a new standalone driver instead.  All modern hardware has
what it needs.  The sole gotcha is that it's not easily emulatable
(qemu supports it only under kvm where they can freeload on the host
TSC) so it can be exercised only on hardware platforms right now.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2021-05-05 10:37:39 -07:00 committed by Anas Nashif
parent d5e0e8b0f3
commit 662b0bf765
4 changed files with 232 additions and 7 deletions

View file

@ -304,6 +304,7 @@
/drivers/spi/ @tbursztyka
/drivers/spi/spi_rv32m1_lpspi* @karstenkoenig
/drivers/timer/apic_timer.c @dcpleung @nashif
/drivers/timer/apic_tsc.c @andyross
/drivers/timer/arm_arch_timer.c @carlocaione
/drivers/timer/cortex_m_systick.c @ioannisg
/drivers/timer/altera_avalon_timer_hal.c @nashif

View file

@ -24,3 +24,4 @@ zephyr_sources_ifdef(CONFIG_LEON_GPTIMER leon_gptimer.c)
zephyr_sources_ifdef(CONFIG_NPCX_ITIM_TIMER npcx_itim_timer.c)
zephyr_sources_ifdef(CONFIG_MCUX_OS_TIMER mcux_os_timer.c)
zephyr_sources_ifdef(CONFIG_RCAR_CMT_TIMER rcar_cmt_timer.c)
zephyr_sources_ifdef(CONFIG_APIC_TSC_DEADLINE_TIMER apic_tsc.c)

View file

@ -13,7 +13,11 @@ menuconfig APIC_TIMER
depends on LOAPIC
select TICKLESS_CAPABLE
help
Use the x86 local APIC as the system time source.
Use the x86 local APIC in one-shot mode as the system time
source. NOTE: this probably isn't what you want except on
older or idiosyncratic hardware (or environments like qemu
without complete APIC emulation). Modern hardware will work
better with CONFIG_APIC_TSC_DEADLINE_TIMER.
if APIC_TIMER
@ -22,12 +26,11 @@ config APIC_TIMER_IRQ
default 24
help
This option specifies the IRQ used by the local APIC timer.
config APIC_TIMER_IRQ_PRIORITY
int "Local APIC timer IRQ priority"
default 4
help
This option specifies the IRQ priority used by the local APIC timer.
Note: this MUST be set to the index immediately after the
last IO-APIC IRQ (the timer is the first entry in the APIC
local vector table). This footgun is not intended to be
user-configurable and almost certainly should be managed via
a different mechanism.
config APIC_TIMER_TSC
bool "Use invariant TSC for sys_clock_cycle_get_32()"
@ -51,6 +54,29 @@ endif # APIC_TIMER_TSC
endif # APIC_TIMER
config APIC_TIMER_IRQ_PRIORITY
int "Local APIC timer interrupt priority"
default 4
help
This option specifies the interrupt priority used by the
local APIC timer.
config APIC_TSC_DEADLINE_TIMER
bool "Even newer APIC timer using TSC deadline mode"
depends on X86
select LOAPIC
select TICKLESS_CAPABLE
help
Extremely simple timer driver based the local APIC TSC
deadline capability. The use of a free-running 64 bit
counter with comparator eliminates almost all edge cases
from the handling, and the near-instruction-cycle resolution
permits effectively unlimited precision where needed (the
limit becomes the CPU time taken to execute the timing
logic). SMP-safe and very fast, this should be the obvious
choice for any x86 device with invariant TSC and TSC
deadline capability.
config HPET_TIMER
bool "HPET timer"
depends on X86

197
drivers/timer/apic_tsc.c Normal file
View file

@ -0,0 +1,197 @@
/*
* Copyright (c) 2021 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <drivers/timer/system_timer.h>
#include <sys_clock.h>
#include <spinlock.h>
#include <drivers/interrupt_controller/loapic.h>
#define IA32_TSC_DEADLINE_MSR 0x6e0
#define IA32_TSC_ADJUST_MSR 0x03b
#define CYC_PER_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \
/ (uint64_t) CONFIG_SYS_CLOCK_TICKS_PER_SEC)
struct apic_timer_lvt {
uint8_t vector : 8;
uint8_t unused0 : 8;
uint8_t masked : 1;
enum { ONE_SHOT, PERIODIC, TSC_DEADLINE } mode: 2;
uint32_t unused2 : 13;
};
static struct k_spinlock lock;
static uint64_t last_announce;
static union { uint32_t val; struct apic_timer_lvt lvt; } lvt_reg;
static ALWAYS_INLINE uint64_t rdtsc(void)
{
uint32_t hi, lo;
__asm__ volatile("rdtsc" : "=d"(hi), "=a"(lo));
return lo + (((uint64_t)hi) << 32);
}
static void isr(const void *arg)
{
ARG_UNUSED(arg);
k_spinlock_key_t key = k_spin_lock(&lock);
uint32_t ticks = (rdtsc() - last_announce) / CYC_PER_TICK;
last_announce += ticks * CYC_PER_TICK;
k_spin_unlock(&lock, key);
sys_clock_announce(ticks);
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
sys_clock_set_timeout(1, false);
}
}
static inline void wrmsr(int32_t msr, uint64_t val)
{
uint32_t hi = (uint32_t) (val >> 32);
uint32_t lo = (uint32_t) val;
__asm__ volatile("wrmsr" :: "d"(hi), "a"(lo), "c"(msr));
}
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
uint64_t now = rdtsc();
k_spinlock_key_t key = k_spin_lock(&lock);
uint64_t expires = now + MAX(ticks - 1, 0) * CYC_PER_TICK;
expires = last_announce + (((expires - last_announce + CYC_PER_TICK - 1)
/ CYC_PER_TICK) * CYC_PER_TICK);
/* The second condition is to catch the wraparound.
* Interpreted strictly, the IA SDM description of the
* TSC_DEADLINE MSR implies that it will trigger an immediate
* interrupt if we try to set an expiration across the 64 bit
* rollover. Unfortunately there's no way to test that as on
* real hardware it requires more than a century of uptime,
* but this is cheap and safe.
*/
if (ticks == K_TICKS_FOREVER || expires < last_announce) {
expires = UINT64_MAX;
}
wrmsr(IA32_TSC_DEADLINE_MSR, expires);
k_spin_unlock(&lock, key);
}
uint32_t sys_clock_elapsed(void)
{
k_spinlock_key_t key = k_spin_lock(&lock);
uint32_t ret = (rdtsc() - last_announce) / CYC_PER_TICK;
k_spin_unlock(&lock, key);
return ret;
}
uint32_t sys_clock_cycle_get_32(void)
{
return (uint32_t) rdtsc();
}
static inline uint32_t timer_irq(void)
{
/* The Zephyr APIC API is... idiosyncratic. The timer is a
* "local vector table" interrupt. These aren't system IRQs
* presented to the IO-APIC, they're indices into a register
* array in the local APIC. By Zephyr convention they come
* after all the external IO-APIC interrupts, but that number
* changes depending on device configuration so we have to
* fetch it at runtime. The timer happens to be the first
* entry in the table.
*/
return z_loapic_irq_base();
}
/* The TSC_ADJUST MSR implements a synchronized offset such that
* multiple CPUs (within a socket, anyway) can synchronize exactly, or
* implement managed timing spaces for guests in a recoverable way,
* etc... We set it to zero on all cores for simplicity, because
* firmware often leaves it in an inconsistent state between cores.
*/
static void clear_tsc_adjust(void)
{
/* But don't touch it on ACRN, where an hypervisor bug
* confuses the APIC emulation and deadline interrupts don't
* arrive.
*/
#ifndef CONFIG_BOARD_ACRN
wrmsr(IA32_TSC_ADJUST_MSR, 0);
#endif
}
void smp_timer_init(void)
{
/* Copy the LVT configuration from CPU0, because IRQ_CONNECT()
* doesn't know how to manage LVT interrupts for anything
* other than the calling/initial CPU. Same fence needed to
* prevent later MSR writes from reordering before the APIC
* configuration write.
*/
x86_write_loapic(LOAPIC_TIMER, lvt_reg.val);
__asm__ volatile("mfence" ::: "memory");
clear_tsc_adjust();
irq_enable(timer_irq());
}
static inline void cpuid(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
{
__asm__ volatile("cpuid"
: "=b"(*ebx), "=c"(*ecx), "=d"(*edx)
: "a"(*eax), "c"(*ecx));
}
int sys_clock_driver_init(const struct device *dev)
{
#ifdef CONFIG_ASSERT
uint32_t eax, ebx, ecx, edx;
eax = 1; ecx = 0;
cpuid(&eax, &ebx, &ecx, &edx);
__ASSERT((ecx & BIT(24)) != 0, "No TSC Deadline support");
eax = 0x80000007; ecx = 0;
cpuid(&eax, &ebx, &ecx, &edx);
__ASSERT((edx & BIT(8)) != 0, "No Invariant TSC support");
eax = 7; ecx = 0;
cpuid(&eax, &ebx, &ecx, &edx);
__ASSERT((ebx & BIT(1)) != 0, "No TSC_ADJUST MSR support");
#endif
clear_tsc_adjust();
/* Timer interrupt number is runtime-fetched, so can't use
* static IRQ_CONNECT()
*/
irq_connect_dynamic(timer_irq(), CONFIG_APIC_TIMER_IRQ_PRIORITY, isr, 0, 0);
lvt_reg.val = x86_read_loapic(LOAPIC_TIMER);
lvt_reg.lvt.mode = TSC_DEADLINE;
lvt_reg.lvt.masked = 0;
x86_write_loapic(LOAPIC_TIMER, lvt_reg.val);
/* Per the SDM, the TSC_DEADLINE MSR is not serializing, so
* this fence is needed to be sure that an upcoming MSR write
* (i.e. a timeout we're about to set) cannot possibly reorder
* around the initialization we just did.
*/
__asm__ volatile("mfence" ::: "memory");
last_announce = rdtsc();
irq_enable(timer_irq());
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
sys_clock_set_timeout(1, false);
}
return 0;
}