x86: add support for memory mapped stack for threads
This adds the necessary bits to enable memory mapping thread stacks on both x86 and x86_64. Note that currently these do not support multi level mappings (e.g. demand paging and running in virtual address space: qemu_x86/atom/virt board) as the mapped stacks require actual physical addresses. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
parent
414250d013
commit
027a1c30cd
|
@ -91,6 +91,7 @@ config X86
|
|||
&& !BOARD_HAS_TIMING_FUNCTIONS \
|
||||
&& !SOC_HAS_TIMING_FUNCTIONS
|
||||
select ARCH_HAS_STACK_CANARIES_TLS
|
||||
select ARCH_SUPPORTS_MEM_MAPPED_STACKS if X86_MMU && !DEMAND_PAGING
|
||||
help
|
||||
x86 architecture
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@ config X86_STACK_PROTECTION
|
|||
select SET_GDT
|
||||
select GDT_DYNAMIC
|
||||
select X86_ENABLE_TSS
|
||||
imply THREAD_STACK_MEM_MAPPED if !DEMAND_PAGING
|
||||
help
|
||||
This option leverages the MMU to cause a system fatal error if the
|
||||
bounds of the current process stack are overflowed. This is done
|
||||
|
|
|
@ -73,6 +73,7 @@ config X86_STACK_PROTECTION
|
|||
bool
|
||||
default y if HW_STACK_PROTECTION
|
||||
select THREAD_STACK_INFO
|
||||
imply THREAD_STACK_MEM_MAPPED
|
||||
help
|
||||
This option leverages the MMU to cause a system fatal error if the
|
||||
bounds of the current process stack are overflowed. This is done
|
||||
|
|
|
@ -86,6 +86,40 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
|
||||
/**
|
||||
* Check if the fault is in the guard pages.
|
||||
*
|
||||
* @param addr Address to be tested.
|
||||
*
|
||||
* @return True Address is in guard pages, false otherwise.
|
||||
*/
|
||||
__pinned_func
|
||||
bool z_x86_check_guard_page(uintptr_t addr)
|
||||
{
|
||||
struct k_thread *thread = _current;
|
||||
uintptr_t start, end;
|
||||
|
||||
/* Front guard size - before thread stack area */
|
||||
start = (uintptr_t)thread->stack_info.mapped.addr - CONFIG_MMU_PAGE_SIZE;
|
||||
end = (uintptr_t)thread->stack_info.mapped.addr;
|
||||
|
||||
if ((addr >= start) && (addr < end)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Rear guard size - after thread stack area */
|
||||
start = (uintptr_t)thread->stack_info.mapped.addr + thread->stack_info.mapped.sz;
|
||||
end = start + CONFIG_MMU_PAGE_SIZE;
|
||||
|
||||
if ((addr >= start) && (addr < end)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
|
||||
|
||||
#ifdef CONFIG_EXCEPTION_DEBUG
|
||||
|
||||
static inline uintptr_t esf_get_code(const z_arch_esf_t *esf)
|
||||
|
@ -441,6 +475,14 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf)
|
|||
z_x86_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
|
||||
void *fault_addr = z_x86_cr2_get();
|
||||
|
||||
if (z_x86_check_guard_page((uintptr_t)fault_addr)) {
|
||||
z_x86_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
|
||||
}
|
||||
#endif
|
||||
|
||||
z_x86_fatal_error(K_ERR_CPU_EXCEPTION, esf);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -79,7 +79,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
void *swap_entry;
|
||||
struct _x86_initial_frame *initial_frame;
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
#if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED)
|
||||
/* This unconditionally set the first page of stack as guard page,
|
||||
* which is only needed if the stack is not memory mapped.
|
||||
*/
|
||||
z_x86_set_stack_guard(stack);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -32,7 +32,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
void *switch_entry;
|
||||
struct x86_initial_frame *iframe;
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
#if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED)
|
||||
/* This unconditionally set the first page of stack as guard page,
|
||||
* which is only needed if the stack is not memory mapped.
|
||||
*/
|
||||
z_x86_set_stack_guard(stack);
|
||||
#endif
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
|
@ -69,8 +69,13 @@ void z_x86_swap_update_page_tables(struct k_thread *incoming)
|
|||
void *z_x86_userspace_prepare_thread(struct k_thread *thread)
|
||||
{
|
||||
void *initial_entry;
|
||||
|
||||
struct z_x86_thread_stack_header *header =
|
||||
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED
|
||||
(struct z_x86_thread_stack_header *)thread->stack_info.mapped.addr;
|
||||
#else
|
||||
(struct z_x86_thread_stack_header *)thread->stack_obj;
|
||||
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
|
||||
|
||||
thread->arch.psp =
|
||||
header->privilege_stack + sizeof(header->privilege_stack);
|
||||
|
|
|
@ -14,16 +14,20 @@
|
|||
#define ARCH_STACK_PTR_ALIGN 4UL
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_STACK_PROTECTION) || defined(CONFIG_USERSPACE)
|
||||
#if defined(CONFIG_X86_STACK_PROTECTION) || defined(CONFIG_USERSPACE) \
|
||||
|| defined(CONFIG_THREAD_STACK_MEM_MAPPED)
|
||||
#define Z_X86_STACK_BASE_ALIGN CONFIG_MMU_PAGE_SIZE
|
||||
#else
|
||||
#define Z_X86_STACK_BASE_ALIGN ARCH_STACK_PTR_ALIGN
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#if defined(CONFIG_USERSPACE) || defined(CONFIG_THREAD_STACK_MEM_MAPPED)
|
||||
/* If user mode enabled, expand any stack size to fill a page since that is
|
||||
* the access control granularity and we don't want other kernel data to
|
||||
* unintentionally fall in the latter part of the page
|
||||
*
|
||||
* This is also true when memory mapped stacks are used with since
|
||||
* access control applies to one page at a time.
|
||||
*/
|
||||
#define Z_X86_STACK_SIZE_ALIGN CONFIG_MMU_PAGE_SIZE
|
||||
#else
|
||||
|
@ -34,17 +38,38 @@
|
|||
/* With both hardware stack protection and userspace enabled, stacks are
|
||||
* arranged as follows:
|
||||
*
|
||||
* --- Without stack being memory mapped:
|
||||
* High memory addresses
|
||||
* +-----------------------------------------+
|
||||
* | Thread stack (varies) |
|
||||
* +-----------------------------------------+
|
||||
* | Privilege elevation stack |
|
||||
* | (4096 bytes) |
|
||||
* | (CONFIG_PRIVILEGED_STACK_SIZE) |
|
||||
* +-----------------------------------------+
|
||||
* | Guard page (4096 bytes) |
|
||||
* | - 'guard_page' in struct |
|
||||
* | z_x86_thread_stack_header |
|
||||
* +-----------------------------------------+
|
||||
* Low Memory addresses
|
||||
*
|
||||
* --- With stack being memory mapped:
|
||||
* High memory addresses
|
||||
* +-----------------------------------------+
|
||||
* | Guard page (empty page) |
|
||||
* +-----------------------------------------+
|
||||
* | Thread stack (varies) |
|
||||
* +-----------------------------------------+
|
||||
* | Privilege elevation stack |
|
||||
* | (CONFIG_PRIVILEGED_STACK_SIZE) |
|
||||
* +-----------------------------------------+
|
||||
* | Guard page (empty page) |
|
||||
* +-----------------------------------------+
|
||||
* Low Memory addresses
|
||||
*
|
||||
* Without memory mapped stacks, the guard page is actually allocated
|
||||
* as part of the stack struct, which takes up physical memory during
|
||||
* linking.
|
||||
*
|
||||
* Privilege elevation stacks are fixed-size. All the pages containing the
|
||||
* thread stack are marked as user-accessible. The guard page is marked
|
||||
* read-only to catch stack overflows in supervisor mode.
|
||||
|
@ -62,7 +87,7 @@
|
|||
* privileged mode stack.
|
||||
*/
|
||||
struct z_x86_thread_stack_header {
|
||||
#ifdef CONFIG_X86_STACK_PROTECTION
|
||||
#if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED)
|
||||
char guard_page[CONFIG_MMU_PAGE_SIZE];
|
||||
#endif
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
Loading…
Reference in a new issue