x86: refactor mmustructs.h
The struct definitions for pdpt, pd, and pt entries has been removed: - Bitfield ordering in a struct is implementation dependent, it can be right-to-left or left-to-right - The two different structures for page directory entries were not being used consistently, or when the type of the PDE was unknown - Anonymous structs/unions are GCC extensions Instead these are now u64_t, with bitwise operations used to get/set fields. A new set of inline functions for fetcing various page table structures has been implemented, replacing the older macros. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
ab4d647e6d
commit
31620b90e2
|
@ -120,7 +120,7 @@ FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
|
|||
|
||||
/* Set up the kernel stack used during privilege elevation */
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_ptables, &header->privilege_stack,
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_WRITE, MMU_PTE_RW_MASK,
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_WRITE, Z_X86_MMU_RW,
|
||||
true);
|
||||
|
||||
/* Initialize per-thread page tables, since that wasn't done when
|
||||
|
@ -199,13 +199,13 @@ void z_arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
z_x86_mmu_set_flags(&z_x86_kernel_ptables, &header->privilege_stack,
|
||||
MMU_PAGE_SIZE,
|
||||
((options & K_USER) == 0U) ? MMU_ENTRY_READ : MMU_ENTRY_WRITE,
|
||||
MMU_PTE_RW_MASK, true);
|
||||
Z_X86_MMU_RW, true);
|
||||
#endif /* CONFIG_X86_USERSPACE */
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
/* Set guard area to read-only to catch stack overflows */
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_ptables, &header->guard_page,
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_READ, MMU_PTE_RW_MASK,
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW,
|
||||
true);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -27,19 +27,19 @@ BUILD_ASSERT(DT_PHYS_RAM_ADDR + (DT_RAM_SIZE * 1024ULL) - 1ULL <=
|
|||
* Userspace may read all text and rodata.
|
||||
*/
|
||||
MMU_BOOT_REGION((u32_t)&_image_text_start, (u32_t)&_image_text_size,
|
||||
MMU_ENTRY_READ | MMU_ENTRY_USER);
|
||||
Z_X86_MMU_US);
|
||||
|
||||
MMU_BOOT_REGION((u32_t)&_image_rodata_start, (u32_t)&_image_rodata_size,
|
||||
MMU_ENTRY_READ | MMU_ENTRY_USER | MMU_ENTRY_EXECUTE_DISABLE);
|
||||
Z_X86_MMU_US | Z_X86_MMU_XD);
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
MMU_BOOT_REGION((u32_t)&_app_smem_start, (u32_t)&_app_smem_size,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_EXECUTE_DISABLE);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_XD);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COVERAGE_GCOV
|
||||
MMU_BOOT_REGION((u32_t)&__gcov_bss_start, (u32_t)&__gcov_bss_size,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_USER | MMU_ENTRY_EXECUTE_DISABLE);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US | Z_X86_MMU_XD);
|
||||
#endif
|
||||
|
||||
/* __kernel_ram_size includes all unused memory, which is used for heaps.
|
||||
|
@ -47,7 +47,39 @@ MMU_BOOT_REGION((u32_t)&__gcov_bss_start, (u32_t)&__gcov_bss_size,
|
|||
* automatically for stacks.
|
||||
*/
|
||||
MMU_BOOT_REGION((u32_t)&__kernel_ram_start, (u32_t)&__kernel_ram_size,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_EXECUTE_DISABLE);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_XD);
|
||||
|
||||
/*
|
||||
* Inline functions for setting memory addresses in page table structures
|
||||
*/
|
||||
|
||||
static inline void pdpte_update_pd(u64_t *pdpte, struct x86_mmu_pd *pd)
|
||||
{
|
||||
uintptr_t pd_addr = (uintptr_t)pd;
|
||||
|
||||
*pdpte = ((*pdpte & ~Z_X86_MMU_PDPTE_PD_MASK) |
|
||||
(pd_addr & Z_X86_MMU_PDPTE_PD_MASK));
|
||||
}
|
||||
|
||||
static inline void pde_update_pt(u64_t *pde, struct x86_mmu_pt *pt)
|
||||
{
|
||||
uintptr_t pt_addr = (uintptr_t)pt;
|
||||
|
||||
__ASSERT((*pde & Z_X86_MMU_PS) == 0, "pde is for 2MB page");
|
||||
|
||||
*pde = ((*pde & ~Z_X86_MMU_PDE_PT_MASK) |
|
||||
(pt_addr & Z_X86_MMU_PDE_PT_MASK));
|
||||
}
|
||||
|
||||
static inline void pte_update_addr(u64_t *pte, uintptr_t addr)
|
||||
{
|
||||
*pte = ((*pte & ~Z_X86_MMU_PTE_ADDR_MASK) |
|
||||
(addr & Z_X86_MMU_PTE_ADDR_MASK));
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions for dumping page tables to console
|
||||
*/
|
||||
|
||||
/* Works for PDPT, PD, PT entries, the bits we check here are all the same.
|
||||
*
|
||||
|
@ -58,12 +90,12 @@ static char get_entry_code(u64_t value)
|
|||
{
|
||||
char ret;
|
||||
|
||||
if ((value & MMU_ENTRY_PRESENT) == 0) {
|
||||
if ((value & Z_X86_MMU_P) == 0) {
|
||||
ret = '.';
|
||||
} else {
|
||||
if ((value & MMU_ENTRY_WRITE) != 0) {
|
||||
if ((value & Z_X86_MMU_RW) != 0) {
|
||||
/* Writable page */
|
||||
if ((value & MMU_ENTRY_EXECUTE_DISABLE) != 0) {
|
||||
if ((value & Z_X86_MMU_XD) != 0) {
|
||||
/* RW */
|
||||
ret = 'w';
|
||||
} else {
|
||||
|
@ -71,7 +103,7 @@ static char get_entry_code(u64_t value)
|
|||
ret = 'a';
|
||||
}
|
||||
} else {
|
||||
if ((value & MMU_ENTRY_EXECUTE_DISABLE) != 0) {
|
||||
if ((value & Z_X86_MMU_XD) != 0) {
|
||||
/* R */
|
||||
ret = 'r';
|
||||
} else {
|
||||
|
@ -80,7 +112,7 @@ static char get_entry_code(u64_t value)
|
|||
}
|
||||
}
|
||||
|
||||
if ((value & MMU_ENTRY_USER) != 0) {
|
||||
if ((value & Z_X86_MMU_US) != 0) {
|
||||
/* Uppercase indicates user mode access */
|
||||
ret = toupper(ret);
|
||||
}
|
||||
|
@ -97,7 +129,7 @@ static void z_x86_dump_pt(struct x86_mmu_pt *pt, uintptr_t base, int index)
|
|||
index, base, base + Z_X86_PT_AREA - 1, pt);
|
||||
|
||||
for (int i = 0; i < Z_X86_NUM_PT_ENTRIES; i++) {
|
||||
printk("%c", get_entry_code(pt->entry[i].value));
|
||||
printk("%c", get_entry_code(pt->entry[i]));
|
||||
|
||||
column++;
|
||||
if (column == 64) {
|
||||
|
@ -115,7 +147,7 @@ static void z_x86_dump_pd(struct x86_mmu_pd *pd, uintptr_t base, int index)
|
|||
index, base, base + Z_X86_PD_AREA - 1, pd);
|
||||
|
||||
for (int i = 0; i < Z_X86_NUM_PD_ENTRIES; i++) {
|
||||
printk("%c", get_entry_code(pd->entry[i].pt.value));
|
||||
printk("%c", get_entry_code(pd->entry[i]));
|
||||
|
||||
column++;
|
||||
if (column == 64) {
|
||||
|
@ -126,14 +158,14 @@ static void z_x86_dump_pd(struct x86_mmu_pd *pd, uintptr_t base, int index)
|
|||
|
||||
for (int i = 0; i < Z_X86_NUM_PD_ENTRIES; i++) {
|
||||
struct x86_mmu_pt *pt;
|
||||
union x86_mmu_pde_pt *pde = &pd->entry[i].pt;
|
||||
u64_t pde = pd->entry[i];
|
||||
|
||||
if (pde->p == 0 || pde->ps == 1) {
|
||||
if (((pde & Z_X86_MMU_P) == 0) || ((pde & Z_X86_MMU_PS) != 0)) {
|
||||
/* Skip non-present, or 2MB directory entries, there's
|
||||
* no page table to examine */
|
||||
continue;
|
||||
}
|
||||
pt = (struct x86_mmu_pt *)(pde->pt << MMU_PAGE_SHIFT);
|
||||
pt = z_x86_pde_get_pt(pde);
|
||||
|
||||
z_x86_dump_pt(pt, base + (i * Z_X86_PT_AREA), i);
|
||||
}
|
||||
|
@ -146,35 +178,35 @@ static void z_x86_dump_pdpt(struct x86_mmu_pdpt *pdpt, uintptr_t base,
|
|||
index, base, base + Z_X86_PDPT_AREA - 1, pdpt);
|
||||
|
||||
for (int i = 0; i < Z_X86_NUM_PDPT_ENTRIES; i++) {
|
||||
printk("%c", get_entry_code(pdpt->entry[i].value));
|
||||
printk("%c", get_entry_code(pdpt->entry[i]));
|
||||
}
|
||||
printk("\n");
|
||||
for (int i = 0; i < Z_X86_NUM_PDPT_ENTRIES; i++) {
|
||||
struct x86_mmu_pd *pd;
|
||||
u64_t pdpte = pdpt->entry[i];
|
||||
|
||||
if (pdpt->entry[i].p == 0) {
|
||||
if ((pdpte & Z_X86_MMU_P) == 0) {
|
||||
continue;
|
||||
}
|
||||
pd = (struct x86_mmu_pd *)(pdpt->entry[i].pd << MMU_PAGE_SHIFT);
|
||||
|
||||
pd = z_x86_pdpte_get_pd(pdpte);
|
||||
z_x86_dump_pd(pd, base + (i * Z_X86_PD_AREA), i);
|
||||
}
|
||||
}
|
||||
|
||||
void z_x86_dump_page_tables(struct x86_page_tables *ptables)
|
||||
{
|
||||
z_x86_dump_pdpt(X86_MMU_GET_PDPT(ptables, 0), 0, 0);
|
||||
z_x86_dump_pdpt(z_x86_get_pdpt(ptables, 0), 0, 0);
|
||||
}
|
||||
|
||||
void z_x86_mmu_get_flags(struct x86_page_tables *ptables, void *addr,
|
||||
u64_t *pde_flags, u64_t *pte_flags)
|
||||
{
|
||||
*pde_flags = X86_MMU_GET_PDE(ptables, addr)->value &
|
||||
~MMU_PDE_PAGE_TABLE_MASK;
|
||||
*pde_flags = *z_x86_get_pde(ptables, (uintptr_t)addr) &
|
||||
~Z_X86_MMU_PDE_PT_MASK;
|
||||
|
||||
if ((*pde_flags & MMU_ENTRY_PRESENT) != 0) {
|
||||
*pte_flags = X86_MMU_GET_PTE(ptables, addr)->value &
|
||||
~MMU_PTE_PAGE_MASK;
|
||||
if ((*pde_flags & Z_X86_MMU_P) != 0) {
|
||||
*pte_flags = *z_x86_get_pte(ptables, (uintptr_t)addr) &
|
||||
~Z_X86_MMU_PTE_ADDR_MASK;
|
||||
} else {
|
||||
*pte_flags = 0;
|
||||
}
|
||||
|
@ -209,12 +241,10 @@ static int x86_mmu_validate_pt(struct x86_mmu_pt *pt, uintptr_t addr,
|
|||
int ret = 0;
|
||||
|
||||
while (true) {
|
||||
union x86_mmu_pte *pte = &pt->entry[MMU_PAGE_NUM(addr)];
|
||||
u64_t pte = *z_x86_pt_get_pte(pt, pos);
|
||||
|
||||
if (pte->p == 0 || pte->us == 0 || (write && pte->rw == 0)) {
|
||||
/* Either non-present, non user-accessible, or
|
||||
* we want to write and write flag is not set
|
||||
*/
|
||||
if ((pte & Z_X86_MMU_P) == 0 || (pte & Z_X86_MMU_US) == 0 ||
|
||||
(write && (pte & Z_X86_MMU_RW) == 0)) {
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
@ -240,26 +270,23 @@ static int x86_mmu_validate_pd(struct x86_mmu_pd *pd, uintptr_t addr,
|
|||
size_t to_examine;
|
||||
|
||||
while (remaining) {
|
||||
union x86_mmu_pde *pde = &pd->entry[MMU_PDE_NUM(pos)];
|
||||
u64_t pde = *z_x86_pd_get_pde(pd, pos);
|
||||
|
||||
if (pde->pt.p == 0 || pde->pt.us == 0 ||
|
||||
(write && pde->pt.rw == 0)) {
|
||||
/* Either non-present, non user-accessible, or
|
||||
* we want to write and write flag is not set
|
||||
*/
|
||||
if ((pde & Z_X86_MMU_P) == 0 || (pde & Z_X86_MMU_US) == 0 ||
|
||||
(write && (pde & Z_X86_MMU_RW) == 0)) {
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
to_examine = get_table_max(pos, remaining, Z_X86_PT_AREA);
|
||||
|
||||
if (pde->pt.ps == 0) {
|
||||
if ((pde & Z_X86_MMU_PS) == 0) {
|
||||
/* Not a 2MB PDE. Need to check all the linked
|
||||
* tables for this entry
|
||||
*/
|
||||
struct x86_mmu_pt *pt = (struct x86_mmu_pt *)
|
||||
(pde->pt.pt << MMU_PAGE_SHIFT);
|
||||
struct x86_mmu_pt *pt;
|
||||
|
||||
pt = z_x86_pde_get_pt(pde);
|
||||
ret = x86_mmu_validate_pt(pt, pos, to_examine, write);
|
||||
if (ret != 0) {
|
||||
break;
|
||||
|
@ -285,16 +312,16 @@ static int x86_mmu_validate_pdpt(struct x86_mmu_pdpt *pdpt, uintptr_t addr,
|
|||
size_t to_examine;
|
||||
|
||||
while (remaining) {
|
||||
union x86_mmu_pdpte *pdpte = &pdpt->entry[MMU_PDPTE_NUM(pos)];
|
||||
u64_t pdpte = *z_x86_pdpt_get_pdpte(pdpt, pos);
|
||||
struct x86_mmu_pd *pd;
|
||||
|
||||
if (pdpte->p == 0) {
|
||||
if ((pdpte & Z_X86_MMU_P) == 0) {
|
||||
/* Non-present */
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
pd = (struct x86_mmu_pd *)(pdpte->pd << MMU_PAGE_SHIFT);
|
||||
pd = z_x86_pdpte_get_pd(pdpte);
|
||||
to_examine = get_table_max(pos, remaining, Z_X86_PD_AREA);
|
||||
|
||||
ret = x86_mmu_validate_pd(pd, pos, to_examine, write);
|
||||
|
@ -313,7 +340,7 @@ int z_x86_mmu_validate(struct x86_page_tables *ptables, void *addr, size_t size,
|
|||
{
|
||||
int ret;
|
||||
/* 32-bit just has one PDPT that covers the entire address space */
|
||||
struct x86_mmu_pdpt *pdpt = X86_MMU_GET_PDPT(ptables, addr);
|
||||
struct x86_mmu_pdpt *pdpt = z_x86_get_pdpt(ptables, (uintptr_t)addr);
|
||||
|
||||
ret = x86_mmu_validate_pdpt(pdpt, (uintptr_t)addr, size, write);
|
||||
|
||||
|
@ -334,14 +361,14 @@ static inline void tlb_flush_page(void *addr)
|
|||
__asm__ ("invlpg %0" :: "m" (*page));
|
||||
}
|
||||
|
||||
#define PDPTE_FLAGS_MASK MMU_ENTRY_PRESENT
|
||||
#define PDPTE_FLAGS_MASK Z_X86_MMU_P
|
||||
|
||||
#define PDE_FLAGS_MASK (MMU_ENTRY_WRITE | MMU_ENTRY_USER | \
|
||||
#define PDE_FLAGS_MASK (Z_X86_MMU_RW | Z_X86_MMU_US | \
|
||||
PDPTE_FLAGS_MASK)
|
||||
|
||||
#define PTE_FLAGS_MASK (PDE_FLAGS_MASK | MMU_ENTRY_EXECUTE_DISABLE | \
|
||||
MMU_ENTRY_WRITE_THROUGH | \
|
||||
MMU_ENTRY_CACHING_DISABLE)
|
||||
#define PTE_FLAGS_MASK (PDE_FLAGS_MASK | Z_X86_MMU_XD | \
|
||||
Z_X86_MMU_PWT | \
|
||||
Z_X86_MMU_PCD)
|
||||
|
||||
void z_x86_mmu_set_flags(struct x86_page_tables *ptables, void *ptr,
|
||||
size_t size, u64_t flags, u64_t mask, bool flush)
|
||||
|
@ -355,41 +382,45 @@ void z_x86_mmu_set_flags(struct x86_page_tables *ptables, void *ptr,
|
|||
* zeroed. Expand the mask to include address bits if we are changing
|
||||
* the present bit.
|
||||
*/
|
||||
if ((mask & MMU_PTE_P_MASK) != 0) {
|
||||
mask |= MMU_PTE_PAGE_MASK;
|
||||
if ((mask & Z_X86_MMU_P) != 0) {
|
||||
mask |= Z_X86_MMU_PTE_ADDR_MASK;
|
||||
}
|
||||
|
||||
while (size != 0) {
|
||||
union x86_mmu_pte *pte;
|
||||
union x86_mmu_pde_pt *pde;
|
||||
union x86_mmu_pdpte *pdpte;
|
||||
u64_t *pte;
|
||||
u64_t *pde;
|
||||
u64_t *pdpte;
|
||||
u64_t cur_flags = flags;
|
||||
|
||||
pdpte = X86_MMU_GET_PDPTE(ptables, addr);
|
||||
__ASSERT(pdpte->p == 1, "set flags on non-present PDPTE");
|
||||
pdpte->value |= (flags & PDPTE_FLAGS_MASK);
|
||||
pdpte = z_x86_pdpt_get_pdpte(z_x86_get_pdpt(ptables, addr),
|
||||
addr);
|
||||
__ASSERT((*pdpte & Z_X86_MMU_P) != 0,
|
||||
"set flags on non-present PDPTE");
|
||||
*pdpte |= (flags & PDPTE_FLAGS_MASK);
|
||||
|
||||
pde = z_x86_pd_get_pde(z_x86_pdpte_get_pd(*pdpte), addr);
|
||||
__ASSERT((*pde & Z_X86_MMU_P) != 0,
|
||||
"set flags on non-present PDE");
|
||||
*pde |= (flags & PDE_FLAGS_MASK);
|
||||
|
||||
pde = X86_MMU_GET_PDE(ptables, addr);
|
||||
__ASSERT(pde->p == 1, "set flags on non-present PDE");
|
||||
pde->value |= (flags & PDE_FLAGS_MASK);
|
||||
/* If any flags enable execution, clear execute disable at the
|
||||
* page directory level
|
||||
*/
|
||||
if ((flags & MMU_ENTRY_EXECUTE_DISABLE) == 0) {
|
||||
pde->value &= ~MMU_ENTRY_EXECUTE_DISABLE;
|
||||
if ((flags & Z_X86_MMU_XD) == 0) {
|
||||
*pde &= ~Z_X86_MMU_XD;
|
||||
}
|
||||
|
||||
pte = X86_MMU_GET_PTE(ptables, addr);
|
||||
pte = z_x86_pt_get_pte(z_x86_pde_get_pt(*pde), addr);
|
||||
|
||||
/* If we're setting the present bit, restore the address
|
||||
* field. If we're clearing it, then the address field
|
||||
* will be zeroed instead, mapping the PTE to the NULL page.
|
||||
*/
|
||||
if (((mask & MMU_PTE_P_MASK) != 0) &&
|
||||
((flags & MMU_ENTRY_PRESENT) != 0)) {
|
||||
if ((mask & Z_X86_MMU_P) != 0 && ((flags & Z_X86_MMU_P) != 0)) {
|
||||
cur_flags |= addr;
|
||||
}
|
||||
|
||||
pte->value = (pte->value & ~mask) | cur_flags;
|
||||
*pte = (*pte & ~mask) | cur_flags;
|
||||
if (flush) {
|
||||
tlb_flush_page((void *)addr);
|
||||
}
|
||||
|
@ -426,15 +457,17 @@ static inline bool is_within_system_ram(uintptr_t addr)
|
|||
(addr < (DT_PHYS_RAM_ADDR + (DT_RAM_SIZE * 1024U)));
|
||||
}
|
||||
|
||||
#define PDE_IGNORED BIT64(11)
|
||||
|
||||
static void add_mmu_region_page(struct x86_page_tables *ptables,
|
||||
uintptr_t addr, u64_t flags, bool user_table)
|
||||
{
|
||||
struct x86_mmu_pdpt *pdpt;
|
||||
union x86_mmu_pdpte *pdpte;
|
||||
u64_t *pdpte;
|
||||
struct x86_mmu_pd *pd;
|
||||
union x86_mmu_pde_pt *pde;
|
||||
u64_t *pde;
|
||||
struct x86_mmu_pt *pt;
|
||||
union x86_mmu_pte *pte;
|
||||
u64_t *pte;
|
||||
|
||||
#ifdef CONFIG_X86_KPTI
|
||||
/* If we are generating a page table for user mode, and this address
|
||||
|
@ -443,55 +476,54 @@ static void add_mmu_region_page(struct x86_page_tables *ptables,
|
|||
* we will never need them later as memory domains are limited to
|
||||
* regions within system RAM.
|
||||
*/
|
||||
if (user_table && (flags & MMU_ENTRY_USER) == 0 &&
|
||||
if (user_table && (flags & Z_X86_MMU_US) == 0 &&
|
||||
!is_within_system_ram(addr)) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
pdpt = X86_MMU_GET_PDPT(ptables, addr);
|
||||
pdpt = z_x86_get_pdpt(ptables, addr);
|
||||
|
||||
/* Setup the PDPTE entry for the address, creating a page directory
|
||||
* if one didn't exist
|
||||
*/
|
||||
pdpte = &pdpt->entry[MMU_PDPTE_NUM(addr)];
|
||||
if (pdpte->p == 0) {
|
||||
pdpte = z_x86_pdpt_get_pdpte(pdpt, addr);
|
||||
if ((*pdpte & Z_X86_MMU_P) == 0) {
|
||||
pd = get_page();
|
||||
pdpte->pd = ((uintptr_t)pd) >> MMU_PAGE_SHIFT;
|
||||
pdpte_update_pd(pdpte, pd);
|
||||
} else {
|
||||
pd = (struct x86_mmu_pd *)(pdpte->pd << MMU_PAGE_SHIFT);
|
||||
pd = z_x86_pdpte_get_pd(*pdpte);
|
||||
}
|
||||
pdpte->value |= (flags & PDPTE_FLAGS_MASK);
|
||||
*pdpte |= (flags & PDPTE_FLAGS_MASK);
|
||||
|
||||
/* Setup the PDE entry for the address, creating a page table
|
||||
* if necessary
|
||||
*/
|
||||
pde = &pd->entry[MMU_PDE_NUM(addr)].pt;
|
||||
if (pde->p == 0) {
|
||||
pde = z_x86_pd_get_pde(pd, addr);
|
||||
if ((*pde & Z_X86_MMU_P) == 0) {
|
||||
pt = get_page();
|
||||
pde->pt = ((uintptr_t)pt) >> MMU_PAGE_SHIFT;
|
||||
pde_update_pt(pde, pt);
|
||||
} else {
|
||||
pt = (struct x86_mmu_pt *)(pde->pt << MMU_PAGE_SHIFT);
|
||||
pt = z_x86_pde_get_pt(*pde);
|
||||
}
|
||||
pde->value |= (flags & PDE_FLAGS_MASK);
|
||||
*pde |= (flags & PDE_FLAGS_MASK);
|
||||
|
||||
/* Execute disable bit needs special handling, we should only set it
|
||||
* at the page directory level if ALL pages have XD set (instead of
|
||||
* just one).
|
||||
/* Execute disable bit needs special handling, we should only set it at
|
||||
* the page directory level if ALL pages have XD set (instead of just
|
||||
* one).
|
||||
*
|
||||
* Use the 'ignored2' field to store a marker on whether any
|
||||
* configured region allows execution, the CPU never looks at
|
||||
* or modifies it.
|
||||
* Use an ignored bit position in the PDE to store a marker on whether
|
||||
* any configured region allows execution.
|
||||
*/
|
||||
if ((flags & MMU_ENTRY_EXECUTE_DISABLE) == 0) {
|
||||
pde->ignored2 = 1;
|
||||
pde->value &= ~MMU_ENTRY_EXECUTE_DISABLE;
|
||||
} else if (pde->ignored2 == 0) {
|
||||
pde->value |= MMU_ENTRY_EXECUTE_DISABLE;
|
||||
if ((flags & Z_X86_MMU_XD) == 0) {
|
||||
*pde |= PDE_IGNORED;
|
||||
*pde &= ~Z_X86_MMU_XD;
|
||||
} else if ((*pde & PDE_IGNORED) == 0) {
|
||||
*pde |= Z_X86_MMU_XD;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_KPTI
|
||||
if (user_table && (flags & MMU_ENTRY_USER) == 0 &&
|
||||
if (user_table && (flags & Z_X86_MMU_US) == 0 &&
|
||||
addr != (uintptr_t)(&z_shared_kernel_page_start)) {
|
||||
/* All non-user accessible pages except the shared page
|
||||
* are marked non-present in the page table.
|
||||
|
@ -503,9 +535,9 @@ static void add_mmu_region_page(struct x86_page_tables *ptables,
|
|||
#endif
|
||||
|
||||
/* Finally set up the page table entry */
|
||||
pte = &pt->entry[MMU_PAGE_NUM(addr)];
|
||||
pte->page = addr >> MMU_PAGE_SHIFT;
|
||||
pte->value |= (flags & PTE_FLAGS_MASK);
|
||||
pte = z_x86_pt_get_pte(pt, addr);
|
||||
pte_update_addr(pte, addr);
|
||||
*pte |= (flags & PTE_FLAGS_MASK);
|
||||
}
|
||||
|
||||
static void add_mmu_region(struct x86_page_tables *ptables,
|
||||
|
@ -522,11 +554,7 @@ static void add_mmu_region(struct x86_page_tables *ptables,
|
|||
"unaligned size provided");
|
||||
|
||||
addr = rgn->address;
|
||||
|
||||
/* Add the present flag, and filter out 'runtime user' since this
|
||||
* has no meaning to the actual MMU
|
||||
*/
|
||||
flags = rgn->flags | MMU_ENTRY_PRESENT;
|
||||
flags = rgn->flags | Z_X86_MMU_P;
|
||||
|
||||
/* Iterate through the region a page at a time, creating entries as
|
||||
* necessary.
|
||||
|
@ -564,6 +592,7 @@ void z_x86_paging_init(void)
|
|||
}
|
||||
|
||||
z_x86_enable_paging();
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_USERSPACE
|
||||
|
@ -580,13 +609,13 @@ static uintptr_t thread_pd_create(uintptr_t pages,
|
|||
uintptr_t pos = pages, phys_addr = Z_X86_PD_START;
|
||||
|
||||
for (int i = 0; i < Z_X86_NUM_PD; i++, phys_addr += Z_X86_PD_AREA) {
|
||||
union x86_mmu_pdpte *pdpte;
|
||||
u64_t *pdpte;
|
||||
struct x86_mmu_pd *master_pd, *dest_pd;
|
||||
|
||||
/* Obtain PD in master tables for the address range and copy
|
||||
* into the per-thread PD for this range
|
||||
*/
|
||||
master_pd = X86_MMU_GET_PD_ADDR(master_ptables, phys_addr);
|
||||
master_pd = z_x86_get_pd(master_ptables, phys_addr);
|
||||
dest_pd = (struct x86_mmu_pd *)pos;
|
||||
|
||||
(void)memcpy(dest_pd, master_pd, sizeof(struct x86_mmu_pd));
|
||||
|
@ -594,8 +623,8 @@ static uintptr_t thread_pd_create(uintptr_t pages,
|
|||
/* Update pointer in per-thread pdpt to point to the per-thread
|
||||
* directory we just copied
|
||||
*/
|
||||
pdpte = X86_MMU_GET_PDPTE(thread_ptables, phys_addr);
|
||||
pdpte->pd = pos >> MMU_PAGE_SHIFT;
|
||||
pdpte = z_x86_get_pdpte(thread_ptables, phys_addr);
|
||||
pdpte_update_pd(pdpte, dest_pd);
|
||||
pos += MMU_PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
@ -610,22 +639,22 @@ static uintptr_t thread_pt_create(uintptr_t pages,
|
|||
uintptr_t pos = pages, phys_addr = Z_X86_PT_START;
|
||||
|
||||
for (int i = 0; i < Z_X86_NUM_PT; i++, phys_addr += Z_X86_PT_AREA) {
|
||||
union x86_mmu_pde_pt *pde;
|
||||
u64_t *pde;
|
||||
struct x86_mmu_pt *master_pt, *dest_pt;
|
||||
|
||||
/* Same as we did with the directories, obtain PT in master
|
||||
* tables for the address range and copy into per-thread PT
|
||||
* for this range
|
||||
*/
|
||||
master_pt = X86_MMU_GET_PT_ADDR(master_ptables, phys_addr);
|
||||
master_pt = z_x86_get_pt(master_ptables, phys_addr);
|
||||
dest_pt = (struct x86_mmu_pt *)pos;
|
||||
(void)memcpy(dest_pt, master_pt, sizeof(struct x86_mmu_pd));
|
||||
(void)memcpy(dest_pt, master_pt, sizeof(struct x86_mmu_pt));
|
||||
|
||||
/* And then wire this up to the relevant per-thread
|
||||
* page directory entry
|
||||
*/
|
||||
pde = X86_MMU_GET_PDE(thread_ptables, phys_addr);
|
||||
pde->pt = pos >> MMU_PAGE_SHIFT;
|
||||
pde = z_x86_get_pde(thread_ptables, phys_addr);
|
||||
pde_update_pt(pde, dest_pt);
|
||||
pos += MMU_PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
@ -700,12 +729,12 @@ static void reset_mem_partition(struct x86_page_tables *thread_ptables,
|
|||
__ASSERT((size & MMU_PAGE_MASK) == 0U, "unaligned size provided");
|
||||
|
||||
while (size != 0) {
|
||||
union x86_mmu_pte *thread_pte, *master_pte;
|
||||
u64_t *thread_pte, *master_pte;
|
||||
|
||||
thread_pte = X86_MMU_GET_PTE(thread_ptables, addr);
|
||||
master_pte = X86_MMU_GET_PTE(&USER_PTABLES, addr);
|
||||
thread_pte = z_x86_get_pte(thread_ptables, addr);
|
||||
master_pte = z_x86_get_pte(&USER_PTABLES, addr);
|
||||
|
||||
(void)memcpy(thread_pte, master_pte, sizeof(union x86_mmu_pte));
|
||||
*thread_pte = *master_pte;
|
||||
|
||||
size -= MMU_PAGE_SIZE;
|
||||
addr += MMU_PAGE_SIZE;
|
||||
|
@ -719,8 +748,8 @@ static void apply_mem_partition(struct x86_page_tables *ptables,
|
|||
u64_t mask;
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86_KPTI)) {
|
||||
x86_attr = partition->attr | MMU_ENTRY_PRESENT;
|
||||
mask = K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK;
|
||||
x86_attr = partition->attr | Z_X86_MMU_P;
|
||||
mask = K_MEM_PARTITION_PERM_MASK | Z_X86_MMU_P;
|
||||
} else {
|
||||
x86_attr = partition->attr;
|
||||
mask = K_MEM_PARTITION_PERM_MASK;
|
||||
|
@ -780,8 +809,8 @@ void z_x86_thread_pt_init(struct k_thread *thread)
|
|||
/* Enable access to the thread's own stack buffer */
|
||||
z_x86_mmu_set_flags(ptables, (void *)thread->stack_info.start,
|
||||
ROUND_UP(thread->stack_info.size, MMU_PAGE_SIZE),
|
||||
MMU_ENTRY_PRESENT | K_MEM_PARTITION_P_RW_U_RW,
|
||||
MMU_PTE_P_MASK | K_MEM_PARTITION_PERM_MASK,
|
||||
Z_X86_MMU_P | K_MEM_PARTITION_P_RW_U_RW,
|
||||
Z_X86_MMU_P | K_MEM_PARTITION_PERM_MASK,
|
||||
false);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ FUNC_NORETURN void z_x86_prep_c(int unused, struct multiboot_info *info)
|
|||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_ptables, _interrupt_stack,
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_READ, MMU_PTE_RW_MASK,
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW,
|
||||
true);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#ifndef ZEPHYR_INCLUDE_ARCH_X86_MMUSTRUCTS_H_
|
||||
#define ZEPHYR_INCLUDE_ARCH_X86_MMUSTRUCTS_H_
|
||||
|
||||
#include <sys/util.h>
|
||||
|
||||
#define MMU_PAGE_SIZE 4096U
|
||||
#define MMU_PAGE_MASK 0xfffU
|
||||
#define MMU_PAGE_SHIFT 12U
|
||||
|
@ -17,145 +19,67 @@
|
|||
#define MMU_IS_ON_PAGE_BOUNDARY(a) (!((u32_t)(a) & MMU_PAGE_MASK))
|
||||
|
||||
/*
|
||||
* The following bitmasks correspond to the bit-fields in the
|
||||
* x86_mmu_pde_pt structure.
|
||||
* Common flags in the same bit position regardless of which structure level,
|
||||
* although not every flag is supported at every level, and some may be
|
||||
* ignored depending on the state of other bits (such as P or PS)
|
||||
*
|
||||
* These flags indicate bit position, and can be used for setting flags or
|
||||
* masks as needed.
|
||||
*/
|
||||
|
||||
#define MMU_PDE_P_MASK 0x00000001ULL
|
||||
#define MMU_PDE_RW_MASK 0x00000002ULL
|
||||
#define MMU_PDE_US_MASK 0x00000004ULL
|
||||
#define MMU_PDE_PWT_MASK 0x00000008ULL
|
||||
#define MMU_PDE_PCD_MASK 0x00000010ULL
|
||||
#define MMU_PDE_A_MASK 0x00000020ULL
|
||||
#define MMU_PDE_PS_MASK 0x00000080ULL
|
||||
#define MMU_PDE_IGNORED_MASK 0x00000F40ULL
|
||||
|
||||
#define MMU_PDE_XD_MASK 0x8000000000000000ULL
|
||||
#define MMU_PDE_PAGE_TABLE_MASK 0x00000000fffff000ULL
|
||||
#define MMU_PDE_NUM_SHIFT 21U
|
||||
#define MMU_PDE_NUM(v) (((u32_t)(v) >> MMU_PDE_NUM_SHIFT) & 0x1ffU)
|
||||
#define MMU_ENTRIES_PER_PGT 512U
|
||||
#define MMU_PDPTE_NUM_SHIFT 30U
|
||||
#define MMU_PDPTE_NUM(v) (((u32_t)(v) >> MMU_PDPTE_NUM_SHIFT) & 0x3U)
|
||||
#define Z_X86_MMU_P BIT64(0) /** Present */
|
||||
#define Z_X86_MMU_RW BIT64(1) /** Read-Write */
|
||||
#define Z_X86_MMU_US BIT64(2) /** User-Supervisor */
|
||||
#define Z_X86_MMU_PWT BIT64(3) /** Page Write Through */
|
||||
#define Z_X86_MMU_PCD BIT64(4) /** Page Cache Disable */
|
||||
#define Z_X86_MMU_A BIT64(5) /** Accessed */
|
||||
#define Z_X86_MMU_D BIT64(6) /** Dirty */
|
||||
#define Z_X86_MMU_PS BIT64(7) /** Page Size */
|
||||
#define Z_X86_MMU_G BIT64(8) /** Global */
|
||||
#define Z_X86_MMU_XD BIT64(63) /** Execute Disable */
|
||||
|
||||
/*
|
||||
* The following bitmasks correspond to the bit-fields in the
|
||||
* x86_mmu_pde_2mb structure.
|
||||
* Structure-specific flags / masks
|
||||
*/
|
||||
|
||||
#define MMU_2MB_PDE_P_MASK 0x00000001ULL
|
||||
#define MMU_2MB_PDE_RW_MASK 0x00000002ULL
|
||||
#define MMU_2MB_PDE_US_MASK 0x00000004ULL
|
||||
#define MMU_2MB_PDE_PWT_MASK 0x00000008ULL
|
||||
#define MMU_2MB_PDE_PCD_MASK 0x00000010ULL
|
||||
#define MMU_2MB_PDE_A_MASK 0x00000020ULL
|
||||
#define MMU_2MB_PDE_D_MASK 0x00000040ULL
|
||||
#define MMU_2MB_PDE_PS_MASK 0x00000080ULL
|
||||
#define MMU_2MB_PDE_G_MASK 0x00000100ULL
|
||||
#define MMU_2MB_PDE_IGNORED_MASK 0x00380e00ULL
|
||||
#define MMU_2MB_PDE_PAT_MASK 0x00001000ULL
|
||||
#define MMU_2MB_PDE_PAGE_TABLE_MASK 0x0007e000ULL
|
||||
#define MMU_2MB_PDE_PAGE_MASK 0xffc00000ULL
|
||||
#define MMU_2MB_PDE_CLEAR_PS 0x00000000ULL
|
||||
#define MMU_2MB_PDE_SET_PS 0x00000080ULL
|
||||
#define Z_X86_MMU_PDE_PAT BIT64(12)
|
||||
#define Z_X86_MMU_PTE_PAT BIT64(7) /** Page Attribute Table */
|
||||
|
||||
#define Z_X86_MMU_PDPTE_PD_MASK 0x00000000FFFFF000ULL
|
||||
#define Z_X86_MMU_PDE_PT_MASK 0x00000000FFFFF000ULL
|
||||
#define Z_X86_MMU_PDE_2MB_MASK 0x00000000FFC00000ULL
|
||||
#define Z_X86_MMU_PTE_ADDR_MASK 0x00000000FFFFF000ULL
|
||||
|
||||
/*
|
||||
* The following bitmasks correspond to the bit-fields in the
|
||||
* x86_mmu_pte structure.
|
||||
* These flags indicate intention when setting access properties.
|
||||
*/
|
||||
|
||||
#define MMU_PTE_P_MASK 0x00000001ULL
|
||||
#define MMU_PTE_RW_MASK 0x00000002ULL
|
||||
#define MMU_PTE_US_MASK 0x00000004ULL
|
||||
#define MMU_PTE_PWT_MASK 0x00000008ULL
|
||||
#define MMU_PTE_PCD_MASK 0x00000010ULL
|
||||
#define MMU_PTE_A_MASK 0x00000020ULL
|
||||
#define MMU_PTE_D_MASK 0x00000040ULL
|
||||
#define MMU_PTE_PAT_MASK 0x00000080ULL
|
||||
#define MMU_PTE_G_MASK 0x00000100ULL
|
||||
#define MMU_PTE_ALLOC_MASK 0x00000200ULL
|
||||
#define MMU_PTE_CUSTOM_MASK 0x00000c00ULL
|
||||
#define MMU_PTE_XD_MASK 0x8000000000000000ULL
|
||||
#define MMU_PTE_PAGE_MASK 0x00000000fffff000ULL
|
||||
#define MMU_PTE_MASK_ALL 0xffffffffffffffffULL
|
||||
#define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x1ffU)
|
||||
#define MMU_PAGE_NUM_SHIFT 12
|
||||
#define MMU_ENTRY_NOT_PRESENT 0ULL
|
||||
#define MMU_ENTRY_PRESENT Z_X86_MMU_P
|
||||
|
||||
/*
|
||||
* The following values are to are to be OR'ed together to mark the use or
|
||||
* unuse of various options in a PTE or PDE as appropriate.
|
||||
*/
|
||||
#define MMU_ENTRY_READ 0ULL
|
||||
#define MMU_ENTRY_WRITE Z_X86_MMU_RW
|
||||
|
||||
#define MMU_ENTRY_NOT_PRESENT 0x00000000ULL
|
||||
#define MMU_ENTRY_PRESENT 0x00000001ULL
|
||||
#define MMU_ENTRY_SUPERVISOR 0ULL
|
||||
#define MMU_ENTRY_USER Z_X86_MMU_US
|
||||
|
||||
#define MMU_ENTRY_READ 0x00000000ULL
|
||||
#define MMU_ENTRY_WRITE 0x00000002ULL
|
||||
#define MMU_ENTRY_WRITE_BACK 0ULL
|
||||
#define MMU_ENTRY_WRITE_THROUGH Z_X86_MMU_PWT
|
||||
|
||||
#define MMU_ENTRY_SUPERVISOR 0x00000000ULL
|
||||
#define MMU_ENTRY_USER 0x00000004ULL
|
||||
#define MMU_ENTRY_CACHING_ENABLE 0ULL
|
||||
#define MMU_ENTRY_CACHING_DISABLE Z_X86_MMU_PCD
|
||||
|
||||
#define MMU_ENTRY_WRITE_BACK 0x00000000ULL
|
||||
#define MMU_ENTRY_WRITE_THROUGH 0x00000008ULL
|
||||
#define MMU_ENTRY_NOT_ACCESSED 0ULL
|
||||
#define MMU_ENTRY_ACCESSED Z_X86_MMU_A
|
||||
|
||||
#define MMU_ENTRY_CACHING_ENABLE 0x00000000ULL
|
||||
#define MMU_ENTRY_CACHING_DISABLE 0x00000010ULL
|
||||
#define MMU_ENTRY_NOT_DIRTY 0ULL
|
||||
#define MMU_ENTRY_DIRTY Z_X86_MMU_D
|
||||
|
||||
#define MMU_ENTRY_NOT_ACCESSED 0x00000000ULL
|
||||
#define MMU_ENTRY_ACCESSED 0x00000020ULL
|
||||
#define MMU_ENTRY_NOT_GLOBAL 0ULL
|
||||
#define MMU_ENTRY_GLOBAL Z_X86_MMU_G
|
||||
|
||||
#define MMU_ENTRY_NOT_DIRTY 0x00000000ULL
|
||||
#define MMU_ENTRY_DIRTY 0x00000040ULL
|
||||
|
||||
#define MMU_ENTRY_NOT_GLOBAL 0x00000000ULL
|
||||
#define MMU_ENTRY_GLOBAL 0x00000100ULL
|
||||
|
||||
#define MMU_ENTRY_NOT_ALLOC 0x00000000ULL
|
||||
#define MMU_ENTRY_ALLOC 0x00000200ULL
|
||||
|
||||
#define MMU_ENTRY_EXECUTE_DISABLE 0x8000000000000000ULL
|
||||
|
||||
/* Helper macros to ease the usage of the MMU page table structures.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Returns the page table entry for the addr
|
||||
* use the union to extract page entry related information.
|
||||
*/
|
||||
#define X86_MMU_GET_PTE(ptables, addr)\
|
||||
((union x86_mmu_pte *)\
|
||||
(&X86_MMU_GET_PT_ADDR(ptables, addr)->entry[MMU_PAGE_NUM(addr)]))
|
||||
|
||||
/*
|
||||
* Returns the Page table address for the particular address.
|
||||
* Page Table address(returned value) is always 4KBytes aligned.
|
||||
*/
|
||||
#define X86_MMU_GET_PT_ADDR(ptables, addr) \
|
||||
((struct x86_mmu_pt *)\
|
||||
(X86_MMU_GET_PDE(ptables, addr)->pt << MMU_PAGE_SHIFT))
|
||||
|
||||
/* Returns the page directory entry for the addr
|
||||
* use the union to extract page directory entry related information.
|
||||
*/
|
||||
#define X86_MMU_GET_PDE(ptables, addr)\
|
||||
((union x86_mmu_pde_pt *) \
|
||||
(&X86_MMU_GET_PD_ADDR(ptables, addr)->entry[MMU_PDE_NUM(addr)]))
|
||||
|
||||
/* Returns the page directory entry for the addr
|
||||
* use the union to extract page directory entry related information.
|
||||
*/
|
||||
#define X86_MMU_GET_PD_ADDR(ptables, addr) \
|
||||
((struct x86_mmu_pd *) \
|
||||
(X86_MMU_GET_PDPTE(ptables, addr)->pd << MMU_PAGE_SHIFT))
|
||||
|
||||
/* Returns the page directory pointer entry */
|
||||
#define X86_MMU_GET_PDPTE(ptables, addr) \
|
||||
(&X86_MMU_GET_PDPT(ptables, addr)->entry[MMU_PDPTE_NUM(addr)])
|
||||
|
||||
/* Returns the page directory pointer table corresponding to the address */
|
||||
#define X86_MMU_GET_PDPT(ptables, addr) \
|
||||
(&((ptables)->pdpt))
|
||||
#define MMU_ENTRY_EXECUTE_DISABLE Z_X86_MMU_XD
|
||||
#define MMU_ENTRY_EXECUTE_ENABLE 0ULL
|
||||
|
||||
/* memory partition arch/soc independent attribute */
|
||||
#define K_MEM_PARTITION_P_RW_U_RW (MMU_ENTRY_WRITE | \
|
||||
|
@ -185,11 +109,11 @@
|
|||
|
||||
|
||||
/* memory partition access permission mask */
|
||||
#define K_MEM_PARTITION_PERM_MASK (MMU_PTE_RW_MASK |\
|
||||
MMU_PTE_US_MASK |\
|
||||
MMU_PTE_XD_MASK)
|
||||
#define K_MEM_PARTITION_PERM_MASK (Z_X86_MMU_RW | Z_X86_MMU_US | \
|
||||
Z_X86_MMU_XD)
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
#include <sys/__assert.h>
|
||||
#include <zephyr/types.h>
|
||||
|
||||
/* Structure used by gen_mmu.py to create page directories and page tables.
|
||||
|
@ -225,225 +149,6 @@ struct mmu_region {
|
|||
#define MMU_BOOT_REGION(addr, region_size, permission_flags) \
|
||||
Z_MMU_BOOT_REGION(__COUNTER__, addr, region_size, permission_flags)
|
||||
|
||||
/*
|
||||
* The following defines the format of a 64-bit page directory pointer entry
|
||||
* that references a page directory table
|
||||
*/
|
||||
union x86_mmu_pdpte {
|
||||
/** access Page directory entry through use of bitmasks */
|
||||
u64_t value;
|
||||
struct {
|
||||
/** present: must be 1 to reference a page table */
|
||||
u64_t p:1;
|
||||
|
||||
u64_t reserved:2;
|
||||
|
||||
/** page-level write-through: determines the memory type used
|
||||
* to access the page table referenced by this entry
|
||||
*/
|
||||
u64_t pwt:1;
|
||||
|
||||
/** page-level cache disable: determines the memory
|
||||
* type used to access the page table referenced by
|
||||
* this entry
|
||||
*/
|
||||
u64_t pcd:1;
|
||||
|
||||
u64_t ignored1:7;
|
||||
|
||||
/** page table: physical address of page table */
|
||||
u64_t pd:20;
|
||||
|
||||
u64_t ignored3:32;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* The following defines the format of a 32-bit page directory entry
|
||||
* that references a page table (as opposed to a 2 Mb page).
|
||||
*/
|
||||
union x86_mmu_pde_pt {
|
||||
/** access Page directory entry through use of bitmasks */
|
||||
u64_t value;
|
||||
struct {
|
||||
/** present: must be 1 to reference a page table */
|
||||
u64_t p:1;
|
||||
|
||||
/** read/write: if 0, writes may not be allowed to the region
|
||||
* controlled by this entry
|
||||
*/
|
||||
u64_t rw:1;
|
||||
|
||||
/** user/supervisor: if 0, accesses with CPL=3 are not allowed
|
||||
* to the region controlled by this entry
|
||||
*/
|
||||
u64_t us:1;
|
||||
|
||||
/** page-level write-through: determines the memory type used
|
||||
* to access the page table referenced by this entry
|
||||
*/
|
||||
u64_t pwt:1;
|
||||
|
||||
/** page-level cache disable: determines the memory
|
||||
* type used to access the page table referenced by
|
||||
* this entry
|
||||
*/
|
||||
u64_t pcd:1;
|
||||
|
||||
/** accessed: if 1 -> entry has been used to translate
|
||||
*/
|
||||
u64_t a:1;
|
||||
|
||||
u64_t ignored1:1;
|
||||
|
||||
/** page size: ignored when CR4.PSE=0 */
|
||||
u64_t ps:1;
|
||||
|
||||
u64_t ignored2:4;
|
||||
|
||||
/** page table: physical address of page table */
|
||||
u64_t pt:20;
|
||||
|
||||
u64_t ignored3:31;
|
||||
|
||||
/* Execute disable */
|
||||
u64_t xd:1;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* The following defines the format of a 64-bit page directory entry
|
||||
* that references a 2 Mb page (as opposed to a page table).
|
||||
*/
|
||||
|
||||
union x86_mmu_pde_2mb {
|
||||
u32_t value;
|
||||
struct {
|
||||
/** present: must be 1 to map a 4 Mb page */
|
||||
u64_t p:1;
|
||||
|
||||
/** read/write: if 0, writes may not be allowed to the 4 Mb
|
||||
* page referenced by this entry
|
||||
*/
|
||||
u64_t rw:1;
|
||||
|
||||
/** user/supervisor: if 0, accesses with CPL=3 are not allowed
|
||||
* to the 4 Mb page referenced by this entry
|
||||
*/
|
||||
u64_t us:1;
|
||||
|
||||
/** page-level write-through: determines the memory type used
|
||||
* to access the 4 Mb page referenced by
|
||||
* this entry
|
||||
*/
|
||||
u64_t pwt:1;
|
||||
|
||||
/** page-level cache disable: determines the memory type used
|
||||
* to access the 4 Mb page referenced by this entry
|
||||
*/
|
||||
u64_t pcd:1;
|
||||
|
||||
/** accessed: if 1 -> entry has been used to translate */
|
||||
u64_t a:1;
|
||||
|
||||
/** dirty: indicates whether software has written to the 4 Mb
|
||||
* page referenced by this entry
|
||||
*/
|
||||
u64_t d:1;
|
||||
|
||||
/** page size: must be 1 otherwise this entry references a page
|
||||
* table entry
|
||||
*/
|
||||
u64_t ps:1;
|
||||
|
||||
/** global: if CR4.PGE=1, then determines whether this
|
||||
* translation is global, i.e. used regardless of PCID
|
||||
*/
|
||||
u64_t g:1;
|
||||
|
||||
u64_t ignored1:3;
|
||||
|
||||
/** If PAT is supported, indirectly determines the memory type
|
||||
* used to access the 4 Mb page, otherwise must be 0
|
||||
*/
|
||||
u64_t pat:1;
|
||||
|
||||
u64_t reserved1:8;
|
||||
|
||||
/** page table: physical address of page table */
|
||||
u64_t pt:11;
|
||||
|
||||
u64_t reserved2:31;
|
||||
|
||||
/** execute disable */
|
||||
u64_t xd:1;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* The following defines the format of a 64-bit page table entry that maps
|
||||
* a 4 Kb page.
|
||||
*/
|
||||
union x86_mmu_pte {
|
||||
u64_t value;
|
||||
|
||||
struct {
|
||||
/** present: must be 1 to map a 4 Kb page */
|
||||
u64_t p:1;
|
||||
|
||||
/** read/write: if 0, writes may not be allowed to the 4 Kb
|
||||
* page controlled by this entry
|
||||
*/
|
||||
u64_t rw:1;
|
||||
|
||||
/** user/supervisor: if 0, accesses with CPL=3 are not allowed
|
||||
* to the 4 Kb page controlled by this entry
|
||||
*/
|
||||
u64_t us:1;
|
||||
|
||||
/** page-level write-through: determines the memory type used
|
||||
* to access the 4 Kb page referenced by this entry
|
||||
*/
|
||||
u64_t pwt:1;
|
||||
|
||||
/** page-level cache disable: determines the memory type used
|
||||
* to access the 4 Kb page referenced by this entry
|
||||
*/
|
||||
u64_t pcd:1;
|
||||
|
||||
/** accessed: if 1 -> 4 Kb page has been referenced */
|
||||
u64_t a:1;
|
||||
|
||||
/** dirty: if 1 -> 4 Kb page has been written to */
|
||||
u64_t d:1;
|
||||
|
||||
/** If PAT is supported, indirectly determines the memory type
|
||||
* used to access the 4 Kb page, otherwise must be 0
|
||||
*/
|
||||
u64_t pat:1;
|
||||
|
||||
/** global: if CR4.PGE=1, then determines whether this
|
||||
* translation is global, i.e. used regardless of PCID
|
||||
*/
|
||||
u64_t g:1;
|
||||
|
||||
/** allocated: if 1 -> this PTE has been allocated/ reserved;
|
||||
* this is only used by software, i.e. this bit is ignored by
|
||||
* the MMU
|
||||
*/
|
||||
u64_t ignore1:3;
|
||||
|
||||
/** page: physical address of the 4 Kb page */
|
||||
u64_t page:20;
|
||||
|
||||
u64_t ignore2:31;
|
||||
|
||||
/* Execute disable */
|
||||
u64_t xd:1;
|
||||
};
|
||||
};
|
||||
|
||||
#define Z_X86_NUM_PDPT_ENTRIES 4
|
||||
#define Z_X86_NUM_PD_ENTRIES 512
|
||||
#define Z_X86_NUM_PT_ENTRIES 512
|
||||
|
@ -456,26 +161,105 @@ union x86_mmu_pte {
|
|||
typedef u64_t k_mem_partition_attr_t;
|
||||
|
||||
struct x86_mmu_pdpt {
|
||||
union x86_mmu_pdpte entry[Z_X86_NUM_PDPT_ENTRIES];
|
||||
};
|
||||
|
||||
union x86_mmu_pde {
|
||||
union x86_mmu_pde_pt pt;
|
||||
union x86_mmu_pde_2mb twomb;
|
||||
u64_t entry[Z_X86_NUM_PDPT_ENTRIES];
|
||||
};
|
||||
|
||||
struct x86_mmu_pd {
|
||||
union x86_mmu_pde entry[Z_X86_NUM_PD_ENTRIES];
|
||||
u64_t entry[Z_X86_NUM_PD_ENTRIES];
|
||||
};
|
||||
|
||||
struct x86_mmu_pt {
|
||||
union x86_mmu_pte entry[Z_X86_NUM_PT_ENTRIES];
|
||||
u64_t entry[Z_X86_NUM_PT_ENTRIES];
|
||||
};
|
||||
|
||||
struct x86_page_tables {
|
||||
struct x86_mmu_pdpt pdpt;
|
||||
};
|
||||
|
||||
/*
|
||||
* Inline functions for getting the next linked structure
|
||||
*/
|
||||
|
||||
static inline u64_t *z_x86_pdpt_get_pdpte(struct x86_mmu_pdpt *pdpt,
|
||||
uintptr_t addr)
|
||||
{
|
||||
int index = (addr >> 30U) & (Z_X86_NUM_PDPT_ENTRIES - 1);
|
||||
|
||||
return &pdpt->entry[index];
|
||||
}
|
||||
|
||||
static inline struct x86_mmu_pd *z_x86_pdpte_get_pd(u64_t pdpte)
|
||||
{
|
||||
uintptr_t addr = pdpte & Z_X86_MMU_PDPTE_PD_MASK;
|
||||
|
||||
return (struct x86_mmu_pd *)addr;
|
||||
}
|
||||
|
||||
static inline u64_t *z_x86_pd_get_pde(struct x86_mmu_pd *pd, uintptr_t addr)
|
||||
{
|
||||
int index = (addr >> 21U) & (Z_X86_NUM_PD_ENTRIES - 1);
|
||||
|
||||
return &pd->entry[index];
|
||||
}
|
||||
|
||||
static inline struct x86_mmu_pt *z_x86_pde_get_pt(u64_t pde)
|
||||
{
|
||||
uintptr_t addr = pde & Z_X86_MMU_PDE_PT_MASK;
|
||||
|
||||
__ASSERT((pde & Z_X86_MMU_PS) == 0, "pde is for 2MB page");
|
||||
|
||||
return (struct x86_mmu_pt *)addr;
|
||||
}
|
||||
|
||||
static inline u64_t *z_x86_pt_get_pte(struct x86_mmu_pt *pt, uintptr_t addr)
|
||||
{
|
||||
int index = (addr >> 12U) & (Z_X86_NUM_PT_ENTRIES - 1);
|
||||
|
||||
return &pt->entry[index];
|
||||
}
|
||||
|
||||
/*
|
||||
* Inline functions for obtaining page table structures from the top-level
|
||||
*/
|
||||
|
||||
static inline struct x86_mmu_pdpt *
|
||||
z_x86_get_pdpt(struct x86_page_tables *ptables, uintptr_t addr)
|
||||
{
|
||||
ARG_UNUSED(addr);
|
||||
|
||||
return &ptables->pdpt;
|
||||
}
|
||||
|
||||
static inline u64_t *z_x86_get_pdpte(struct x86_page_tables *ptables,
|
||||
uintptr_t addr)
|
||||
{
|
||||
return z_x86_pdpt_get_pdpte(z_x86_get_pdpt(ptables, addr), addr);
|
||||
}
|
||||
|
||||
static inline struct x86_mmu_pd *
|
||||
z_x86_get_pd(struct x86_page_tables *ptables, uintptr_t addr)
|
||||
{
|
||||
return z_x86_pdpte_get_pd(*z_x86_get_pdpte(ptables, addr));
|
||||
}
|
||||
|
||||
static inline u64_t *z_x86_get_pde(struct x86_page_tables *ptables,
|
||||
uintptr_t addr)
|
||||
{
|
||||
return z_x86_pd_get_pde(z_x86_get_pd(ptables, addr), addr);
|
||||
}
|
||||
|
||||
static inline struct x86_mmu_pt *
|
||||
z_x86_get_pt(struct x86_page_tables *ptables, uintptr_t addr)
|
||||
{
|
||||
return z_x86_pde_get_pt(*z_x86_get_pde(ptables, addr));
|
||||
}
|
||||
|
||||
static inline u64_t *z_x86_get_pte(struct x86_page_tables *ptables,
|
||||
uintptr_t addr)
|
||||
{
|
||||
return z_x86_pt_get_pte(z_x86_get_pt(ptables, addr), addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Debug function for dumping out page tables
|
||||
*
|
||||
|
|
|
@ -214,6 +214,9 @@ u8_t u8_to_dec(char *buf, u8_t buflen, u8_t value);
|
|||
#endif
|
||||
#endif
|
||||
|
||||
/** 64-bit unsigned integer with bit position _n set */
|
||||
#define BIT64(_n) (1ULL << (_n))
|
||||
|
||||
/**
|
||||
* @brief Macro sets or clears bit depending on boolean value
|
||||
*
|
||||
|
|
|
@ -17,22 +17,17 @@ MMU_BOOT_REGION(START_ADDR_RANGE2, ADDR_SIZE, REGION_PERM);
|
|||
MMU_BOOT_REGION(START_ADDR_RANGE3, ADDR_SIZE, REGION_PERM);
|
||||
MMU_BOOT_REGION(START_ADDR_RANGE4, ADDR_SIZE, REGION_PERM);
|
||||
|
||||
static int check_param(union x86_mmu_pte *value, uint32_t perm)
|
||||
static int check_param(u64_t value, u64_t perm)
|
||||
{
|
||||
u32_t status = (value->rw == ((perm & MMU_PTE_RW_MASK) >> 0x1));
|
||||
|
||||
status &= (value->us == ((perm & MMU_PTE_US_MASK) >> 0x2));
|
||||
status &= value->p;
|
||||
return status;
|
||||
return ((value & REGION_PERM) == REGION_PERM);
|
||||
}
|
||||
|
||||
static int check_param_nonset_region(union x86_mmu_pte *value,
|
||||
uint32_t perm)
|
||||
static int check_param_nonset_region(u64_t value, u64_t perm)
|
||||
{
|
||||
u32_t status = (value->rw == 0);
|
||||
u32_t status = ((value & Z_X86_MMU_RW) == 0);
|
||||
|
||||
status &= (value->us == 0);
|
||||
status &= (value->p == 0);
|
||||
status &= ((value & Z_X86_MMU_US) == 0);
|
||||
status &= ((value & Z_X86_MMU_P) == 0);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -40,12 +35,12 @@ static void starting_addr_range(u32_t start_addr_range)
|
|||
{
|
||||
|
||||
u32_t addr_range, status = true;
|
||||
union x86_mmu_pte *value;
|
||||
u64_t value;
|
||||
|
||||
for (addr_range = start_addr_range; addr_range <=
|
||||
(start_addr_range + STARTING_ADDR_RANGE_LMT);
|
||||
addr_range += 0x1000) {
|
||||
value = X86_MMU_GET_PTE(&z_x86_kernel_ptables, addr_range);
|
||||
value = *z_x86_get_pte(&z_x86_kernel_ptables, addr_range);
|
||||
status &= check_param(value, REGION_PERM);
|
||||
zassert_false((status == 0U), "error at %d permissions %d\n",
|
||||
addr_range, REGION_PERM);
|
||||
|
@ -55,12 +50,12 @@ static void starting_addr_range(u32_t start_addr_range)
|
|||
static void before_start_addr_range(u32_t start_addr_range)
|
||||
{
|
||||
u32_t addr_range, status = true;
|
||||
union x86_mmu_pte *value;
|
||||
u64_t value;
|
||||
|
||||
for (addr_range = start_addr_range - 0x7000;
|
||||
addr_range < (start_addr_range); addr_range += 0x1000) {
|
||||
|
||||
value = X86_MMU_GET_PTE(&z_x86_kernel_ptables, addr_range);
|
||||
value = *z_x86_get_pte(&z_x86_kernel_ptables, addr_range);
|
||||
status &= check_param_nonset_region(value, REGION_PERM);
|
||||
|
||||
zassert_false((status == 0U), "error at %d permissions %d\n",
|
||||
|
@ -71,12 +66,12 @@ static void before_start_addr_range(u32_t start_addr_range)
|
|||
static void ending_start_addr_range(u32_t start_addr_range)
|
||||
{
|
||||
u32_t addr_range, status = true;
|
||||
union x86_mmu_pte *value;
|
||||
u64_t value;
|
||||
|
||||
for (addr_range = start_addr_range + ADDR_SIZE; addr_range <
|
||||
(start_addr_range + ADDR_SIZE + 0x10000);
|
||||
addr_range += 0x1000) {
|
||||
value = X86_MMU_GET_PTE(&z_x86_kernel_ptables, addr_range);
|
||||
value = *z_x86_get_pte(&z_x86_kernel_ptables, addr_range);
|
||||
status &= check_param_nonset_region(value, REGION_PERM);
|
||||
zassert_false((status == 0U), "error at %d permissions %d\n",
|
||||
addr_range, REGION_PERM);
|
||||
|
|
|
@ -6,28 +6,6 @@
|
|||
#ifndef __BOOT_PAGE_TABLE_H__
|
||||
#define __BOOT_PAGE_TABLE_H__
|
||||
|
||||
#ifndef X86_MMU_GET_PT_ADDR
|
||||
|
||||
/* Helper macros to ease the usage of the MMU page table structures.
|
||||
* Returns the Page table address for the particular address.
|
||||
* Page Table address(returned value) is always 4KBytes aligned.
|
||||
*/
|
||||
#define X86_MMU_GET_PT_ADDR(addr) \
|
||||
((struct x86_mmu_page_table *) \
|
||||
(X86_MMU_GET_PDE(addr)->page_table << MMU_PAGE_SHIFT))
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef X86_MMU_GET_PTE
|
||||
/* Returns the page table entry for the addr
|
||||
* use the union to extract page entry related information.
|
||||
*/
|
||||
|
||||
#define X86_MMU_GET_PTE(addr) \
|
||||
((union x86_mmu_pae_pte *) \
|
||||
(&X86_MMU_GET_PT_ADDR(addr)->entry[MMU_PAGE_NUM(addr)]))
|
||||
#endif
|
||||
|
||||
#define MMU_READ 0x00
|
||||
#define MMU_WRITE 0x01
|
||||
#define MMU_READ_WRITE (MMU_READ | MMU_WRITE)
|
||||
|
|
|
@ -29,9 +29,10 @@ void reset_multi_pde_flag(void);
|
|||
|
||||
#define ADDR_PAGE_1 ((u8_t *)__bss_start + SKIP_SIZE * MMU_PAGE_SIZE)
|
||||
#define ADDR_PAGE_2 ((u8_t *)__bss_start + (SKIP_SIZE + 1) * MMU_PAGE_SIZE)
|
||||
#define PRESET_PAGE_1_VALUE (X86_MMU_GET_PTE(PTABLES, ADDR_PAGE_1)->p = 1)
|
||||
#define PRESET_PAGE_2_VALUE (X86_MMU_GET_PTE(PTABLES, ADDR_PAGE_2)->p = 1)
|
||||
|
||||
#define PRESET_PAGE_1_VALUE set_flags(ADDR_PAGE_1, MMU_PAGE_SIZE, \
|
||||
MMU_ENTRY_PRESENT, Z_X86_MMU_P)
|
||||
#define PRESET_PAGE_2_VALUE set_flags(ADDR_PAGE_2, MMU_PAGE_SIZE, \
|
||||
MMU_ENTRY_PRESENT, Z_X86_MMU_P)
|
||||
|
||||
static void set_flags(void *ptr, size_t size, u64_t flags,
|
||||
u64_t mask)
|
||||
|
@ -59,7 +60,7 @@ static int buffer_rw_read(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE);
|
||||
|
||||
|
@ -78,7 +79,7 @@ static int buffer_writeable_write(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE);
|
||||
if (status != 0) {
|
||||
|
@ -96,7 +97,7 @@ static int buffer_readable_read(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_READABLE);
|
||||
if (status != 0) {
|
||||
|
@ -114,7 +115,7 @@ static int buffer_readable_write(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_READABLE);
|
||||
if (status != 0) {
|
||||
|
@ -133,7 +134,7 @@ static int buffer_supervisor_rw(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_READABLE |
|
||||
BUFF_USER);
|
||||
|
@ -152,7 +153,7 @@ static int buffer_supervisor_w(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE);
|
||||
if (status != -EPERM) {
|
||||
|
@ -170,7 +171,7 @@ static int buffer_user_rw_user(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE |
|
||||
BUFF_USER);
|
||||
if (status != 0) {
|
||||
|
@ -188,7 +189,7 @@ static int buffer_user_rw_supervisor(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE |
|
||||
BUFF_USER);
|
||||
if (status != -EPERM) {
|
||||
|
@ -208,12 +209,12 @@ static int multi_page_buffer_user(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1,
|
||||
2 * MMU_PAGE_SIZE,
|
||||
|
@ -234,12 +235,12 @@ static int multi_page_buffer_write_user(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
|
||||
BUFF_WRITEABLE);
|
||||
|
@ -259,12 +260,12 @@ static int multi_page_buffer_read_user(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE, BUFF_READABLE
|
||||
| BUFF_USER);
|
||||
|
@ -284,12 +285,12 @@ static int multi_page_buffer_read(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
|
||||
BUFF_WRITEABLE);
|
||||
|
@ -309,12 +310,12 @@ static int multi_pde_buffer_rw(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
|
||||
BUFF_WRITEABLE);
|
||||
|
@ -334,12 +335,12 @@ static int multi_pde_buffer_writeable_write(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
|
||||
BUFF_WRITEABLE);
|
||||
|
@ -359,12 +360,12 @@ static int multi_pde_buffer_readable_read(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
|
||||
BUFF_READABLE);
|
||||
|
@ -384,12 +385,12 @@ static int multi_pde_buffer_readable_write(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE,
|
||||
MMU_PDE_RW_MASK);
|
||||
Z_X86_MMU_RW);
|
||||
|
||||
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
|
||||
BUFF_READABLE);
|
||||
|
@ -406,7 +407,7 @@ void reset_flag(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
}
|
||||
|
||||
void reset_multi_pte_page_flag(void)
|
||||
|
@ -414,12 +415,12 @@ void reset_multi_pte_page_flag(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
|
||||
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
}
|
||||
|
||||
void reset_multi_pde_flag(void)
|
||||
|
@ -427,12 +428,12 @@ void reset_multi_pde_flag(void)
|
|||
set_flags(ADDR_PAGE_1,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
|
||||
MMU_PDE_RW_MASK | MMU_PDE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
|
||||
set_flags(ADDR_PAGE_2,
|
||||
MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
|
||||
MMU_PDE_RW_MASK | MMU_PDE_US_MASK);
|
||||
Z_X86_MMU_RW | Z_X86_MMU_US);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in a new issue