arch/x86: Use NMI stack for NMIs

NMI can be triggered at any time, even when in the process of
switching stacks. Use special stack for it.

Signed-off-by: Andrei Emeltchenko <andrei.emeltchenko@intel.com>
This commit is contained in:
Andrei Emeltchenko 2020-10-21 17:58:18 +03:00 committed by Johan Hedberg
parent 8db06aee69
commit 85db42883f
3 changed files with 30 additions and 2 deletions

View file

@ -28,6 +28,11 @@ extern uint8_t _exception_stack1[];
extern uint8_t _exception_stack2[];
extern uint8_t _exception_stack3[];
extern uint8_t _nmi_stack[];
extern uint8_t _nmi_stack1[];
extern uint8_t _nmi_stack2[];
extern uint8_t _nmi_stack3[];
#ifdef CONFIG_X86_KPTI
extern uint8_t z_x86_trampoline_stack[];
extern uint8_t z_x86_trampoline_stack1[];
@ -40,6 +45,7 @@ struct x86_tss64 tss0 = {
#ifdef CONFIG_X86_KPTI
.ist2 = (uint64_t) z_x86_trampoline_stack + Z_X86_TRAMPOLINE_STACK_SIZE,
#endif
.ist6 = (uint64_t) _nmi_stack + CONFIG_X86_EXCEPTION_STACK_SIZE,
.ist7 = (uint64_t) _exception_stack + CONFIG_X86_EXCEPTION_STACK_SIZE,
.iomapb = 0xFFFF,
.cpu = &(_kernel.cpus[0])
@ -51,6 +57,7 @@ struct x86_tss64 tss1 = {
#ifdef CONFIG_X86_KPTI
.ist2 = (uint64_t) z_x86_trampoline_stack1 + Z_X86_TRAMPOLINE_STACK_SIZE,
#endif
.ist6 = (uint64_t) _nmi_stack1 + CONFIG_X86_EXCEPTION_STACK_SIZE,
.ist7 = (uint64_t) _exception_stack1 + CONFIG_X86_EXCEPTION_STACK_SIZE,
.iomapb = 0xFFFF,
.cpu = &(_kernel.cpus[1])
@ -63,6 +70,7 @@ struct x86_tss64 tss2 = {
#ifdef CONFIG_X86_KPTI
.ist2 = (uint64_t) z_x86_trampoline_stack2 + Z_X86_TRAMPOLINE_STACK_SIZE,
#endif
.ist6 = (uint64_t) _nmi_stack2 + CONFIG_X86_EXCEPTION_STACK_SIZE,
.ist7 = (uint64_t) _exception_stack2 + CONFIG_X86_EXCEPTION_STACK_SIZE,
.iomapb = 0xFFFF,
.cpu = &(_kernel.cpus[2])
@ -75,6 +83,7 @@ struct x86_tss64 tss3 = {
#ifdef CONFIG_X86_KPTI
.ist2 = (uint64_t) z_x86_trampoline_stack3 + Z_X86_TRAMPOLINE_STACK_SIZE,
#endif
.ist6 = (uint64_t) _nmi_stack3 + CONFIG_X86_EXCEPTION_STACK_SIZE,
.ist7 = (uint64_t) _exception_stack3 + CONFIG_X86_EXCEPTION_STACK_SIZE,
.iomapb = 0xFFFF,
.cpu = &(_kernel.cpus[3])

View file

@ -821,16 +821,18 @@ IRQ(248); IRQ(249); IRQ(250); IRQ(251); IRQ(252); IRQ(253); IRQ(254); IRQ(255)
#define IRQ_STACK 2
#define EXC_STACK 2
#define BAD_STACK 2
#define NMI_STACK 2
#else
#define IRQ_STACK 1
#define NMI_STACK 6 /* NMI stack */
#define EXC_STACK 7
#define BAD_STACK 7 /* Horrible things: NMIs, double faults, MCEs */
#define BAD_STACK 7 /* Horrible things: double faults, MCEs */
#endif
.align 16
idt:
IDT( 0, TRAP, EXC_STACK); IDT( 1, TRAP, EXC_STACK)
IDT( 2, TRAP, BAD_STACK); IDT( 3, TRAP, EXC_STACK)
IDT( 2, TRAP, NMI_STACK); IDT( 3, TRAP, EXC_STACK)
IDT( 4, TRAP, EXC_STACK); IDT( 5, TRAP, EXC_STACK)
IDT( 6, TRAP, EXC_STACK); IDT( 7, TRAP, EXC_STACK)
IDT( 8, TRAP, BAD_STACK); IDT( 9, TRAP, EXC_STACK)
@ -1041,12 +1043,20 @@ gdt80: /* LGDT descriptor for long mode */
.align 16
_exception_stack:
.fill CONFIG_X86_EXCEPTION_STACK_SIZE, 1, 0xAA
.global _nmi_stack
.align 16
_nmi_stack:
.fill CONFIG_X86_EXCEPTION_STACK_SIZE, 1, 0xAA
#if CONFIG_MP_NUM_CPUS > 1
.global _exception_stack1
.align 16
_exception_stack1:
.fill CONFIG_X86_EXCEPTION_STACK_SIZE, 1, 0xAA
.global _nmi_stack1
.align 16
_nmi_stack1:
.fill CONFIG_X86_EXCEPTION_STACK_SIZE, 1, 0xAA
#endif
#if CONFIG_MP_NUM_CPUS > 2
@ -1054,6 +1064,10 @@ _exception_stack1:
.align 16
_exception_stack2:
.fill CONFIG_X86_EXCEPTION_STACK_SIZE, 1, 0xAA
.global _nmi_stack2
.align 16
_nmi_stack2:
.fill CONFIG_X86_EXCEPTION_STACK_SIZE, 1, 0xAA
#endif
#if CONFIG_MP_NUM_CPUS > 3
@ -1061,6 +1075,10 @@ _exception_stack2:
.align 16
_exception_stack3:
.fill CONFIG_X86_EXCEPTION_STACK_SIZE, 1, 0xAA
.global _nmi_stack3
.align 16
_nmi_stack3:
.fill CONFIG_X86_EXCEPTION_STACK_SIZE, 1, 0xAA
#endif
#ifdef CONFIG_X86_KPTI

View file

@ -37,6 +37,7 @@ GEN_OFFSET_SYM(_thread_arch_t, ptables);
GEN_OFFSET_SYM(x86_tss64_t, ist1);
GEN_OFFSET_SYM(x86_tss64_t, ist2);
GEN_OFFSET_SYM(x86_tss64_t, ist6);
GEN_OFFSET_SYM(x86_tss64_t, ist7);
GEN_OFFSET_SYM(x86_tss64_t, cpu);
#ifdef CONFIG_USERSPACE