x86: enable stack overflow detection on 64-bit
CONFIG_HW_STACK_PROTECTION is now available. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
9fb89ccf32
commit
7d1ae023f8
|
@ -16,14 +16,14 @@ config ARCH
|
|||
config CPU_ATOM
|
||||
bool # hidden
|
||||
select CPU_HAS_FPU
|
||||
select ARCH_HAS_STACK_PROTECTION if X86_MMU && !X86_64
|
||||
select ARCH_HAS_STACK_PROTECTION if X86_MMU
|
||||
select ARCH_HAS_USERSPACE if X86_MMU && !X86_64
|
||||
help
|
||||
This option signifies the use of a CPU from the Atom family.
|
||||
|
||||
config CPU_MINUTEIA
|
||||
bool # hidden
|
||||
select ARCH_HAS_STACK_PROTECTION if X86_MMU && !X86_64
|
||||
select ARCH_HAS_STACK_PROTECTION if X86_MMU
|
||||
select ARCH_HAS_USERSPACE if X86_MMU && !X86_64
|
||||
help
|
||||
This option signifies the use of a CPU from the Minute IA family.
|
||||
|
@ -31,7 +31,7 @@ config CPU_MINUTEIA
|
|||
config CPU_APOLLO_LAKE
|
||||
bool # hidden
|
||||
select CPU_HAS_FPU
|
||||
select ARCH_HAS_STACK_PROTECTION if X86_MMU && !X86_64
|
||||
select ARCH_HAS_STACK_PROTECTION if X86_MMU
|
||||
select ARCH_HAS_USERSPACE if X86_MMU && !X86_64
|
||||
help
|
||||
This option signifies the use of a CPU from the Apollo Lake family.
|
||||
|
|
|
@ -58,4 +58,13 @@ config ISR_SUBSTACK_SIZE
|
|||
CONFIG_ISR_SUBSTACK_SIZE * CONFIG_ISR_DEPTH must be equal to
|
||||
CONFIG_ISR_STACK_SIZE.
|
||||
|
||||
config X86_STACK_PROTECTION
|
||||
bool
|
||||
default y if HW_STACK_PROTECTION
|
||||
select THREAD_STACK_INFO
|
||||
help
|
||||
This option leverages the MMU to cause a system fatal error if the
|
||||
bounds of the current process stack are overflowed. This is done
|
||||
by preceding all stack areas with a 4K guard page.
|
||||
|
||||
endif # X86_64
|
||||
|
|
|
@ -15,5 +15,11 @@ void z_x86_exception(const z_arch_esf_t *esf)
|
|||
LOG_ERR("** CPU Exception %ld (code %ld/0x%lx) **",
|
||||
esf->vector, esf->code, esf->code);
|
||||
|
||||
#ifdef CONFIG_THREAD_STACK_INFO
|
||||
if (z_x86_check_stack_bounds(esf->rsp, 0, esf->cs)) {
|
||||
z_x86_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
|
||||
}
|
||||
#endif
|
||||
|
||||
z_x86_fatal_error(K_ERR_CPU_EXCEPTION, esf);
|
||||
}
|
||||
|
|
|
@ -15,10 +15,22 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
void *parameter1, void *parameter2, void *parameter3,
|
||||
int priority, unsigned int options)
|
||||
{
|
||||
#if defined(CONFIG_X86_USERSPACE) || defined(CONFIG_X86_STACK_PROTECTION)
|
||||
struct z_x86_thread_stack_header *header =
|
||||
(struct z_x86_thread_stack_header *)stack;
|
||||
#endif
|
||||
|
||||
Z_ASSERT_VALID_PRIO(priority, entry);
|
||||
z_new_thread_init(thread, Z_THREAD_STACK_BUFFER(stack),
|
||||
stack_size, priority, options);
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
/* Set guard area to read-only to catch stack overflows */
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_ptables, &header->guard_page,
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW,
|
||||
true);
|
||||
#endif
|
||||
|
||||
thread->callee_saved.rsp = (long) Z_THREAD_STACK_BUFFER(stack);
|
||||
thread->callee_saved.rsp += (stack_size - 8); /* fake RIP for ABI */
|
||||
thread->callee_saved.rip = (long) z_thread_entry;
|
||||
|
|
Loading…
Reference in a new issue