aarch64: userspace: Add support for page tables swapping

Introduce the necessary routines to have the user thread stack correctly
mapped and the functions to swap page tables on context switch.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2020-11-26 12:57:36 +01:00 committed by Anas Nashif
parent b1eefc0c26
commit ec70b2bc7a
7 changed files with 86 additions and 1 deletions

View file

@ -2,4 +2,4 @@
zephyr_library()
zephyr_library_sources(arm_mmu.c)
zephyr_library_sources(arm_mmu.c low_level.S)

View file

@ -10,6 +10,7 @@
#include <device.h>
#include <init.h>
#include <kernel.h>
#include <kernel_arch_func.h>
#include <kernel_arch_interface.h>
#include <kernel_internal.h>
#include <logging/log.h>
@ -993,6 +994,10 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
reset_map(old_ptables, __func__, thread->stack_info.start,
thread->stack_info.size);
}
if (thread == _current && !is_ptable_active(domain_ptables)) {
z_arm64_swap_ptables(thread);
}
}
void arch_mem_domain_thread_remove(struct k_thread *thread)
@ -1015,4 +1020,30 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
thread->stack_info.size);
}
void z_arm64_swap_ptables(struct k_thread *incoming)
{
struct arm_mmu_ptables *ptables = incoming->arch.ptables;
if (!is_ptable_active(ptables)) {
z_arm64_set_ttbr0((uintptr_t)ptables->base_xlat_table);
} else {
invalidate_tlb_all();
}
}
void z_arm64_thread_pt_init(struct k_thread *incoming)
{
struct arm_mmu_ptables *ptables;
if ((incoming->base.user_options & K_USER) == 0)
return;
ptables = incoming->arch.ptables;
/* Map the thread stack */
map_thread_stack(incoming, ptables);
z_arm64_swap_ptables(incoming);
}
#endif /* CONFIG_USERSPACE */

View file

@ -0,0 +1,40 @@
/*
* Copyright (c) 2021 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
_ASM_FILE_PROLOGUE
/*
* Switch TTBR0
*/
GTEXT(z_arm64_set_ttbr0)
SECTION_FUNC(TEXT, z_arm64_set_ttbr0)
/* Disable all the caches */
mrs x2, sctlr_el1
mov_imm x1, (SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
and x1, x2, x1
msr sctlr_el1, x1
isb
/* Invalidate the TLBs */
tlbi vmalle1
dsb sy
isb
/* Switch the TTBR0 */
msr ttbr0_el1, x0
isb
/* Restore the saved SCTLR_EL1 */
msr sctlr_el1, x2
isb
ret

View file

@ -76,6 +76,12 @@ SECTION_FUNC(TEXT, z_arm64_context_switch)
mov sp, x1
#ifdef CONFIG_USERSPACE
stp xzr, x30, [sp, #-16]!
bl z_arm64_swap_ptables
ldp xzr, x30, [sp], #16
#endif
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
stp xzr, x30, [sp, #-16]!
bl z_thread_mark_switched_in

View file

@ -83,6 +83,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
z_arch_esf_t *pInitCtx;
uintptr_t stack_ptr;
/* Map the thread stack */
z_arm64_thread_pt_init(_current);
/* Setup the private stack */
_current->arch.priv_stack_start = (uint64_t)(_current->stack_obj);

View file

@ -41,6 +41,7 @@ static inline void arch_switch(void *switch_to, void **switched_from)
extern void z_arm64_fatal_error(z_arch_esf_t *esf, unsigned int reason);
extern void z_arm64_userspace_enter(z_arch_esf_t *esf);
extern void z_arm64_set_ttbr0(uintptr_t ttbr0);
#endif /* _ASMLANGUAGE */

View file

@ -194,7 +194,11 @@ struct arm_mmu_ptables {
*/
extern const struct arm_mmu_config mmu_config;
struct k_thread;
void z_arm64_mmu_init(void);
void z_arm64_thread_pt_init(struct k_thread *thread);
void z_arm64_swap_ptables(struct k_thread *thread);
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_MMU_ARM_MMU_H_ */