x86: remove support for non-PAE page tables

PAE tables introduce the NX bit which is very desirable
from a security perspetive, back in 1995.

PAE tables are larger, but we are not targeting x86 memory
protection for RAM constrained devices.

Remove the old style 32-bit tables to make the x86 port
easier to maintain.

Renamed some verbosely named data structures, and fixed
incorrect number of entries for the page directory
pointer table.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-02-05 13:32:49 -08:00 committed by Andrew Boie
parent a463625b76
commit 2d9bbdf5f3
12 changed files with 83 additions and 869 deletions

View file

@ -66,20 +66,9 @@ config X86_MMU
bool "Enable Memory Management Unit"
select MEMORY_PROTECTION
help
This options enables the memory management unit present in x86. Enabling
this will create boot time page table structure.
config X86_PAE_MODE
bool "Enable PAE page tables"
depends on X86_MMU
help
When selected the Page address extension mode is enabled. The PAE
page tables provides a mechanism to selectively disable execution.
So any Page Table Entry (PTE) that sets the XD bit will have all
instruction fetches disabled in that 4KB region. The amount of RAM
needed for PAE tables is more than twice that of 32-Bit paging
because each PAE entry is 64bits wide.
Note: Do not enable in RAM constrained devices.
This options enables the memory management unit present in x86
and creates a set of page tables at build time. Requires an MMU
which supports PAE page tables.
config X86_NO_MELTDOWN
bool

View file

@ -357,12 +357,7 @@ __csSet:
movl $__mmu_tables_start, %eax
movl %eax, %cr3
#ifndef CONFIG_X86_PAE_MODE
/*Set CR4.PAE = 0 (5th bit in CR4*/
movl %cr4, %eax
andl $CR4_PAE_DISABLE, %eax
movl %eax, %cr4
#else
/* Enable PAE */
movl %cr4, %eax
orl $CR4_PAE_ENABLE, %eax
movl %eax, %cr4
@ -372,7 +367,7 @@ __csSet:
rdmsr
orl $0x800, %eax
wrmsr
#endif
/* Enable paging (CR0.PG, bit 31) / write protect (CR0.WP, bit 16) */
movl %cr0, %eax
orl $CR0_PG_WP_ENABLE, %eax

View file

@ -289,7 +289,6 @@ EXC_FUNC_NOCODE(IV_MACHINE_CHECK);
#ifdef CONFIG_X86_MMU
static void dump_entry_flags(x86_page_entry_data_t flags)
{
#ifdef CONFIG_X86_PAE_MODE
printk("0x%x%x %s, %s, %s, %s\n", (u32_t)(flags>>32),
(u32_t)(flags),
flags & (x86_page_entry_data_t)MMU_ENTRY_PRESENT ?
@ -300,15 +299,6 @@ static void dump_entry_flags(x86_page_entry_data_t flags)
"User" : "Supervisor",
flags & (x86_page_entry_data_t)MMU_ENTRY_EXECUTE_DISABLE ?
"Execute Disable" : "Execute Enabled");
#else
printk("0x%03x %s, %s, %s\n", flags,
flags & (x86_page_entry_data_t)MMU_ENTRY_PRESENT ?
"Present" : "Non-present",
flags & (x86_page_entry_data_t)MMU_ENTRY_WRITE ?
"Writable" : "Read-only",
flags & (x86_page_entry_data_t)MMU_ENTRY_USER ?
"User" : "Supervisor");
#endif /* CONFIG_X86_PAE_MODE */
}
static void dump_mmu_flags(void *addr)
@ -400,11 +390,7 @@ struct task_state_segment _df_tss = {
.es = DATA_SEG,
.ss = DATA_SEG,
.eip = (u32_t)_df_handler_top,
#ifdef CONFIG_X86_PAE_MODE
.cr3 = (u32_t)X86_MMU_PDPT
#else
.cr3 = (u32_t)X86_MMU_PD
#endif
};
static FUNC_NORETURN __used void _df_handler_bottom(void)
@ -459,11 +445,7 @@ static FUNC_NORETURN __used void _df_handler_top(void)
_main_tss.es = DATA_SEG;
_main_tss.ss = DATA_SEG;
_main_tss.eip = (u32_t)_df_handler_bottom;
#ifdef CONFIG_X86_PAE_MODE
_main_tss.cr3 = (u32_t)X86_MMU_PDPT;
#else
_main_tss.cr3 = (u32_t)X86_MMU_PD;
#endif
/* NT bit is set in EFLAGS so we will task switch back to _main_tss
* and run _df_handler_bottom

View file

@ -70,22 +70,16 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
u32_t ending_pte_num;
u32_t pde;
u32_t pte;
#ifdef CONFIG_X86_PAE_MODE
union x86_mmu_pae_pte pte_value;
union x86_mmu_pte pte_value;
u32_t start_pdpte_num = MMU_PDPTE_NUM(addr);
u32_t end_pdpte_num = MMU_PDPTE_NUM((char *)addr + size - 1);
u32_t pdpte;
#else
union x86_mmu_pte pte_value;
#endif
struct x86_mmu_page_table *pte_address;
struct x86_mmu_pt *pte_address;
start_pde_num = MMU_PDE_NUM(addr);
end_pde_num = MMU_PDE_NUM((char *)addr + size - 1);
starting_pte_num = MMU_PAGE_NUM((char *)addr);
#ifdef CONFIG_X86_PAE_MODE
for (pdpte = start_pdpte_num; pdpte <= end_pdpte_num; pdpte++) {
if (pdpte != start_pdpte_num) {
start_pde_num = 0U;
@ -97,21 +91,16 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
end_pde_num = MMU_PDE_NUM((char *)addr + size - 1);
}
struct x86_mmu_page_directory *pd_address =
struct x86_mmu_pd *pd_address =
X86_MMU_GET_PD_ADDR_INDEX(pdpte);
#endif
/* Iterate for all the pde's the buffer might take up.
* (depends on the size of the buffer and start address
* of the buff)
*/
for (pde = start_pde_num; pde <= end_pde_num; pde++) {
#ifdef CONFIG_X86_PAE_MODE
union x86_mmu_pae_pde pde_value =
pd_address->entry[pde];
#else
union x86_mmu_pde_pt pde_value =
X86_MMU_PD->entry[pde].pt;
#endif
pd_address->entry[pde].pt;
if (!pde_value.p ||
!pde_value.us ||
@ -119,8 +108,8 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
return -EPERM;
}
pte_address = (struct x86_mmu_page_table *)
(pde_value.page_table << MMU_PAGE_SHIFT);
pte_address = (struct x86_mmu_pt *)
(pde_value.pt << MMU_PAGE_SHIFT);
/* loop over all the possible page tables for the
* required size. If the pde is not the last one
@ -163,9 +152,7 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
return -EPERM;
}
}
#ifdef CONFIG_X86_PAE_MODE
}
#endif
return 0;
}
@ -186,11 +173,7 @@ void _x86_mmu_set_flags(void *ptr,
x86_page_entry_data_t flags,
x86_page_entry_data_t mask)
{
#ifdef CONFIG_X86_PAE_MODE
union x86_mmu_pae_pte *pte;
#else
union x86_mmu_pte *pte;
#endif
u32_t addr = (u32_t)ptr;
@ -199,13 +182,8 @@ void _x86_mmu_set_flags(void *ptr,
while (size != 0) {
#ifdef CONFIG_X86_PAE_MODE
/* TODO we're not generating 2MB entries at the moment */
__ASSERT(X86_MMU_GET_PDE(addr)->ps != 1, "2MB PDE found");
#else
/* TODO we're not generating 4MB entries at the moment */
__ASSERT(X86_MMU_GET_4MB_PDE(addr)->ps != 1, "4MB PDE found");
#endif
pte = X86_MMU_GET_PTE(addr);
pte->value = (pte->value & ~mask) | flags;

View file

@ -30,42 +30,34 @@
#define MMU_PDE_PS_MASK 0x00000080
#define MMU_PDE_IGNORED_MASK 0x00000F40
#ifdef CONFIG_X86_PAE_MODE
#define MMU_PDE_XD_MASK 0x8000000000000000
#define MMU_PDE_PAGE_TABLE_MASK 0x00000000fffff000
#define MMU_PDE_XD_MASK 0x8000000000000000ULL
#define MMU_PDE_PAGE_TABLE_MASK 0x00000000fffff000ULL
#define MMU_PDE_NUM_SHIFT 21
#define MMU_PDE_NUM(v) (((u32_t)(v) >> MMU_PDE_NUM_SHIFT) & 0x1ff)
#define MMU_ENTRIES_PER_PGT 512
#define MMU_PDPTE_NUM_SHIFT 30
#define MMU_PDPTE_NUM(v) (((u32_t)(v) >> MMU_PDPTE_NUM_SHIFT) & 0x3)
#else
#define MMU_PDE_PAGE_TABLE_MASK 0xfffff000
#define MMU_PDE_NUM_SHIFT 22
#define MMU_PDE_NUM(v) ((u32_t)(v) >> MMU_PDE_NUM_SHIFT)
#define MMU_ENTRIES_PER_PGT 1024
#endif
/*
* The following bitmasks correspond to the bit-fields in the
* x86_mmu_pde_4mb structure.
* x86_mmu_pde_2mb structure.
*/
#define MMU_4MB_PDE_P_MASK 0x00000001
#define MMU_4MB_PDE_RW_MASK 0x00000002
#define MMU_4MB_PDE_US_MASK 0x00000004
#define MMU_4MB_PDE_PWT_MASK 0x00000008
#define MMU_4MB_PDE_PCD_MASK 0x00000010
#define MMU_4MB_PDE_A_MASK 0x00000020
#define MMU_4MB_PDE_D_MASK 0x00000040
#define MMU_4MB_PDE_PS_MASK 0x00000080
#define MMU_4MB_PDE_G_MASK 0x00000100
#define MMU_4MB_PDE_IGNORED_MASK 0x00380e00
#define MMU_4MB_PDE_PAT_MASK 0x00001000
#define MMU_4MB_PDE_PAGE_TABLE_MASK 0x0007e000
#define MMU_4MB_PDE_PAGE_MASK 0xffc00000
#define MMU_4MB_PDE_CLEAR_PS 0x00000000
#define MMU_4MB_PDE_SET_PS 0x00000080
#define MMU_2MB_PDE_P_MASK 0x00000001
#define MMU_2MB_PDE_RW_MASK 0x00000002
#define MMU_2MB_PDE_US_MASK 0x00000004
#define MMU_2MB_PDE_PWT_MASK 0x00000008
#define MMU_2MB_PDE_PCD_MASK 0x00000010
#define MMU_2MB_PDE_A_MASK 0x00000020
#define MMU_2MB_PDE_D_MASK 0x00000040
#define MMU_2MB_PDE_PS_MASK 0x00000080
#define MMU_2MB_PDE_G_MASK 0x00000100
#define MMU_2MB_PDE_IGNORED_MASK 0x00380e00
#define MMU_2MB_PDE_PAT_MASK 0x00001000
#define MMU_2MB_PDE_PAGE_TABLE_MASK 0x0007e000
#define MMU_2MB_PDE_PAGE_MASK 0xffc00000
#define MMU_2MB_PDE_CLEAR_PS 0x00000000
#define MMU_2MB_PDE_SET_PS 0x00000080
/*
@ -84,18 +76,10 @@
#define MMU_PTE_G_MASK 0x00000100
#define MMU_PTE_ALLOC_MASK 0x00000200
#define MMU_PTE_CUSTOM_MASK 0x00000c00
#ifdef CONFIG_X86_PAE_MODE
#define MMU_PTE_XD_MASK 0x8000000000000000
#define MMU_PTE_PAGE_MASK 0x00000000fffff000
#define MMU_PTE_MASK_ALL 0xffffffffffffffff
#define MMU_PTE_XD_MASK 0x8000000000000000ULL
#define MMU_PTE_PAGE_MASK 0x00000000fffff000ULL
#define MMU_PTE_MASK_ALL 0xffffffffffffffffULL
#define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x1ff)
#else
#define MMU_PTE_PAGE_MASK 0xfffff000
#define MMU_PTE_MASK_ALL 0xffffffff
#define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x3ff)
#endif
#define MMU_PAGE_NUM_SHIFT 12
/*
@ -130,11 +114,7 @@
#define MMU_ENTRY_NOT_ALLOC 0x00000000
#define MMU_ENTRY_ALLOC 0x00000200
#ifdef CONFIG_X86_PAE_MODE
#define MMU_ENTRY_EXECUTE_DISABLE 0x8000000000000000
#else
#define MMU_ENTRY_EXECUTE_DISABLE 0x0
#endif
#define MMU_ENTRY_EXECUTE_DISABLE 0x8000000000000000ULL
/* Special flag argument for MMU_BOOT region invocations */
@ -161,14 +141,13 @@
/* Helper macros to ease the usage of the MMU page table structures.
*/
#ifdef CONFIG_X86_PAE_MODE
/*
* Returns the page table entry for the addr
* use the union to extract page entry related information.
*/
#define X86_MMU_GET_PTE(addr)\
((union x86_mmu_pae_pte *)\
((union x86_mmu_pte *)\
(&X86_MMU_GET_PT_ADDR(addr)->entry[MMU_PAGE_NUM(addr)]))
/*
@ -176,77 +155,40 @@
* Page Table address(returned value) is always 4KBytes aligned.
*/
#define X86_MMU_GET_PT_ADDR(addr) \
((struct x86_mmu_page_table *)\
(X86_MMU_GET_PDE(addr)->page_table << MMU_PAGE_SHIFT))
((struct x86_mmu_pt *)\
(X86_MMU_GET_PDE(addr)->pt << MMU_PAGE_SHIFT))
/* Returns the page directory entry for the addr
* use the union to extract page directory entry related information.
*/
#define X86_MMU_GET_PDE(addr)\
((union x86_mmu_pae_pde *) \
((union x86_mmu_pde_pt *) \
(&X86_MMU_GET_PD_ADDR(addr)->entry[MMU_PDE_NUM(addr)]))
/* Returns the page directory entry for the addr
* use the union to extract page directory entry related information.
*/
#define X86_MMU_GET_PD_ADDR(addr) \
((struct x86_mmu_page_directory *) \
(X86_MMU_GET_PDPTE(addr)->page_directory << MMU_PAGE_SHIFT))
((struct x86_mmu_pd *) \
(X86_MMU_GET_PDPTE(addr)->pd << MMU_PAGE_SHIFT))
/* Returns the page directory pointer entry */
#define X86_MMU_GET_PDPTE(addr) \
((union x86_mmu_pae_pdpte *) \
((union x86_mmu_pdpte *) \
(&X86_MMU_PDPT->entry[MMU_PDPTE_NUM(addr)]))
/* Return the Page directory address.
* input is the entry number
*/
#define X86_MMU_GET_PD_ADDR_INDEX(index) \
((struct x86_mmu_page_directory *) \
(X86_MMU_GET_PDPTE_INDEX(index)->page_directory << MMU_PAGE_SHIFT))
((struct x86_mmu_pd *) \
(X86_MMU_GET_PDPTE_INDEX(index)->pd << MMU_PAGE_SHIFT))
/* Returns the page directory pointer entry.
* Input is the entry number
*/
#define X86_MMU_GET_PDPTE_INDEX(index) \
((union x86_mmu_pae_pdpte *)(&X86_MMU_PDPT->entry[index]))
#else
/* Normal 32-Bit paging */
#define X86_MMU_GET_PT_ADDR(addr) \
((struct x86_mmu_page_table *)\
(X86_MMU_PD->entry[MMU_PDE_NUM(addr)].pt.page_table \
<< MMU_PAGE_SHIFT))
/* Returns the page table entry for the addr
* use the union to extract page entry related information.
*/
#define X86_MMU_GET_PTE(addr)\
((union x86_mmu_pte *)\
(&X86_MMU_GET_PT_ADDR(addr)->entry[MMU_PAGE_NUM(addr)]))
/* Returns the page directory entry for the addr
* use the union to extract page directory entry related information.
*/
#define X86_MMU_GET_PDE(addr)\
((union x86_mmu_pde_pt *)\
(&X86_MMU_PD->entry[MMU_PDE_NUM(addr)].pt))
#define X86_MMU_GET_PD_ADDR(addr) (X86_MMU_PD)
/* Returns the 4 MB page directory entry for the addr
* use the union to extract page directory entry related information.
*/
#define X86_MMU_GET_4MB_PDE(addr)\
((union x86_mmu_pde_4mb *)\
(&X86_MMU_PD->entry[MMU_PDE_NUM(addr)].fourmb))
#endif /* CONFIG_X86_PAE_MODE */
#ifdef CONFIG_X86_MMU
/* Flags which are only available for PAE mode page tables */
#ifdef CONFIG_X86_PAE_MODE
((union x86_mmu_pdpte *)(&X86_MMU_PDPT->entry[index]))
/* memory partition arch/soc independent attribute */
#define K_MEM_PARTITION_P_RW_U_RW (MMU_ENTRY_WRITE | \
@ -280,25 +222,6 @@
MMU_PTE_US_MASK |\
MMU_PTE_XD_MASK)
#else /* 32-bit paging mode enabled */
/* memory partition arch/soc independent attribute */
#define K_MEM_PARTITION_P_RW_U_RW (MMU_ENTRY_WRITE | MMU_ENTRY_USER)
#define K_MEM_PARTITION_P_RW_U_NA (MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR)
#define K_MEM_PARTITION_P_RO_U_RO (MMU_ENTRY_READ | MMU_ENTRY_USER)
#define K_MEM_PARTITION_P_RO_U_NA (MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR)
/* memory partition access permission mask */
#define K_MEM_PARTITION_PERM_MASK (MMU_PTE_RW_MASK | MMU_PTE_US_MASK)
#endif /* CONFIG_X86_PAE_MODE */
#endif /* CONFIG_X86_MMU */
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
@ -335,190 +258,11 @@ struct mmu_region {
#define MMU_BOOT_REGION(addr, region_size, permission_flags) \
_MMU_BOOT_REGION(__COUNTER__, addr, region_size, permission_flags)
/*
* The following defines the format of a 32-bit page directory entry
* that references a page table (as opposed to a 4 Mb page).
*/
union x86_mmu_pde_pt {
/** access PT entry through use of bitmasks */
u32_t value;
struct {
/** present: must be 1 to reference a page table */
u32_t p:1;
/** read/write: if 0, writes may not be allowed to the region
* controlled by this entry
*/
u32_t rw:1;
/** user/supervisor: if 0, accesses with CPL=3 are not allowed
* to the region controlled by this entry
*/
u32_t us:1;
/** page-level write-through: determines the memory type used
* to access the page table referenced by this entry
*/
u32_t pwt:1;
/** page-level cache disable: determines the memory
* type used to access the page table referenced by
* this entry
*/
u32_t pcd:1;
/** accessed: if 1 -> entry has been used to translate
*/
u32_t a:1;
u32_t ignored1:1;
/** page size: ignored when CR4.PSE=0 */
u32_t ps:1;
u32_t ignored2:4;
/** page table: physical address of page table */
u32_t page_table:20;
};
};
/*
* The following defines the format of a 32-bit page directory entry
* that references a 4 Mb page (as opposed to a page table).
*/
union x86_mmu_pde_4mb {
u32_t value;
struct {
/** present: must be 1 to map a 4 Mb page */
u32_t p:1;
/** read/write: if 0, writes may not be allowed to the 4 Mb
* page referenced by this entry
*/
u32_t rw:1;
/** user/supervisor: if 0, accesses with CPL=3 are not allowed
* to the 4 Mb page referenced by this entry
*/
u32_t us:1;
/** page-level write-through: determines the memory type used
* to access the 4 Mb page referenced by
* this entry
*/
u32_t pwt:1;
/** page-level cache disable: determines the memory type used
* to access the 4 Mb page referenced by this entry
*/
u32_t pcd:1;
/** accessed: if 1 -> entry has been used to translate */
u32_t a:1;
/** dirty: indicates whether software has written to the 4 Mb
* page referenced by this entry
*/
u32_t d:1;
/** page size: must be 1 otherwise this entry references a page
* table entry
*/
u32_t ps:1;
/** global: if CR4.PGE=1, then determines whether this
* translation is global, i.e. used regardless of PCID
*/
u32_t g:1;
u32_t ignored1:3;
/** If PAT is supported, indirectly determines the memory type
* used to access the 4 Mb page, otherwise must be 0
*/
u32_t pat:1;
/** page table: physical address of page table */
u32_t page_table:6;
u32_t ignored2:3;
/** page: physical address of the 4 Mb page */
u32_t page:10;
};
};
/*
* The following defines the format of a 32-bit page table entry that maps
* a 4 Kb page.
*/
union x86_mmu_pte {
u32_t value;
struct {
/** present: must be 1 to map a 4 Kb page */
u32_t p:1;
/** read/write: if 0, writes may not be allowed to the 4 Kb
* page controlled by this entry
*/
u32_t rw:1;
/** user/supervisor: if 0, accesses with CPL=3 are not allowed
* to the 4 Kb page controlled by this entry
*/
u32_t us:1;
/** page-level write-through: determines the memory type used
* to access the 4 Kb page referenced by this entry
*/
u32_t pwt:1;
/** page-level cache disable: determines the memory type used
* to access the 4 Kb page referenced by this entry
*/
u32_t pcd:1;
/** accessed: if 1 -> 4 Kb page has been referenced */
u32_t a:1;
/** dirty: if 1 -> 4 Kb page has been written to */
u32_t d:1;
/** If PAT is supported, indirectly determines the memory type
* used to access the 4 Kb page, otherwise must be 0
*/
u32_t pat:1;
/** global: if CR4.PGE=1, then determines whether this
* translation is global, i.e. used regardless of PCID
*/
u32_t g:1;
/** allocated: if 1 -> this PTE has been allocated/ reserved;
* this is only used by software, i.e. this bit is ignored by
* the MMU
*/
u32_t alloc:1;
/** Ignored by h/w, available for use by s/w */
u32_t custom:2;
/** page: physical address of the 4 Kb page */
u32_t page:20;
};
};
/* PAE paging mode structures and unions */
/*
* The following defines the format of a 64-bit page directory pointer entry
* that references a page directory table
*/
union x86_mmu_pae_pdpte {
union x86_mmu_pdpte {
/** access Page directory entry through use of bitmasks */
u64_t value;
struct {
@ -541,7 +285,7 @@ union x86_mmu_pae_pdpte {
u64_t ignored1:7;
/** page table: physical address of page table */
u64_t page_directory:20;
u64_t pd:20;
u64_t ignored3:32;
};
@ -549,9 +293,9 @@ union x86_mmu_pae_pdpte {
/*
* The following defines the format of a 32-bit page directory entry
* that references a page table (as opposed to a 4 Mb page).
* that references a page table (as opposed to a 2 Mb page).
*/
union x86_mmu_pae_pde {
union x86_mmu_pde_pt {
/** access Page directory entry through use of bitmasks */
u64_t value;
struct {
@ -591,7 +335,7 @@ union x86_mmu_pae_pde {
u64_t ignored2:4;
/** page table: physical address of page table */
u64_t page_table:20;
u64_t pt:20;
u64_t ignored3:31;
@ -606,7 +350,7 @@ union x86_mmu_pae_pde {
* that references a 2 Mb page (as opposed to a page table).
*/
union x86_mmu_pae_pde_2mb {
union x86_mmu_pde_2mb {
u32_t value;
struct {
/** present: must be 1 to map a 4 Mb page */
@ -661,7 +405,7 @@ union x86_mmu_pae_pde_2mb {
u64_t reserved1:8;
/** page table: physical address of page table */
u64_t page_table:11;
u64_t pt:11;
u64_t reserved2:31;
@ -674,7 +418,7 @@ union x86_mmu_pae_pde_2mb {
* The following defines the format of a 64-bit page table entry that maps
* a 4 Kb page.
*/
union x86_mmu_pae_pte {
union x86_mmu_pte {
u64_t value;
struct {
@ -734,46 +478,25 @@ union x86_mmu_pae_pte {
};
#ifdef CONFIG_X86_PAE_MODE
typedef u64_t x86_page_entry_data_t;
#else
typedef u32_t x86_page_entry_data_t;
#endif
typedef x86_page_entry_data_t k_mem_partition_attr_t;
#ifdef CONFIG_X86_PAE_MODE
struct x86_mmu_page_directory_pointer {
union x86_mmu_pae_pdpte entry[512];
struct x86_mmu_pdpt {
union x86_mmu_pdpte entry[4];
};
#endif
union x86_mmu_pde {
#ifndef CONFIG_X86_PAE_MODE
union x86_mmu_pde_pt pt;
union x86_mmu_pde_4mb fourmb;
#else
union x86_mmu_pae_pde pt;
union x86_mmu_pae_pde_2mb twomb;
#endif
union x86_mmu_pde_2mb twomb;
};
/** Page Directory structure for 32-bit/PAE paging mode */
struct x86_mmu_page_directory {
#ifndef CONFIG_X86_PAE_MODE
union x86_mmu_pde entry[1024];
#else
union x86_mmu_pae_pde entry[512];
#endif
struct x86_mmu_pd {
union x86_mmu_pde entry[512];
};
/** Page Table structure for 32-bit/PAE paging mode */
struct x86_mmu_page_table {
#ifndef CONFIG_X86_PAE_MODE
union x86_mmu_pte entry[1024];
#else
union x86_mmu_pae_pte entry[512];
#endif
struct x86_mmu_pt {
union x86_mmu_pte entry[512];
};
#endif /* _ASMLANGUAGE */

View file

@ -15,7 +15,6 @@ CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC=25000000
CONFIG_TEST_RANDOM_GENERATOR=y
CONFIG_XIP=y
CONFIG_X86_MMU=y
CONFIG_X86_PAE_MODE=y
CONFIG_DEBUG_INFO=y
CONFIG_SCHED_SCALABLE=y
CONFIG_WAITQ_SCALABLE=y

View file

@ -17,5 +17,4 @@ CONFIG_UART_CONSOLE=y
CONFIG_TEST_RANDOM_GENERATOR=y
CONFIG_XIP=n
CONFIG_X86_MMU=y
CONFIG_X86_PAE_MODE=y
CONFIG_REALMODE=y

View file

@ -649,17 +649,9 @@ extern const NANO_ESF _default_esf;
#ifdef CONFIG_X86_MMU
/* Linker variable. It is needed to access the start of the Page directory */
#ifdef CONFIG_X86_PAE_MODE
extern u64_t __mmu_tables_start;
#define X86_MMU_PDPT ((struct x86_mmu_page_directory_pointer *)\
#define X86_MMU_PDPT ((struct x86_mmu_pdpt *)\
(u32_t *)(void *)&__mmu_tables_start)
#else
extern u32_t __mmu_tables_start;
#define X86_MMU_PD ((struct x86_mmu_page_directory *)\
(void *)&__mmu_tables_start)
#endif
/**
* @brief Fetch page table flags for a particular page

View file

@ -54,410 +54,6 @@ PAGE_ENTRY_ALLOC = 1 << 9
PAGE_ENTRY_CUSTOM = 0 << 10
#############
#*****************************************************************************#
# class for 4Kb Mode
class PageMode_4kb:
total_pages = 1023
write_page_entry_bin = "I"
size_addressed_per_pde = (1024 * 4096) # 4MB In Bytes
# return the page directory number for the give address
def get_pde_number(self, value):
return (value >> 22) & 0x3FF
# return the page table number for the given address
def get_pte_number(self, value):
return (value >> 12) & 0x3FF
# get the total number of pd available
def get_number_of_pd(self):
return len(list_of_pde.keys())
# the return value will have the page address and it is assumed
# to be a 4096 boundary
# hence the output of this API will be a 20bit address of the page table
def address_of_page_table(self, page_table_number):
global pd_start_addr
# location from where the Page tables will be written
PT_start_addr = pd_start_addr + 4096
return ((PT_start_addr +
(page_tables_list.index(page_table_number) * 4096) >> 12))
# union x86_mmu_pde_pt {
# u32_t value;
# struct {
# u32_t p:1;
# u32_t rw:1;
# u32_t us:1;
# u32_t pwt:1;
# u32_t pcd:1;
# u32_t a:1;
# u32_t ignored1:1;
# u32_t ps:1;
# u32_t ignored2:4;
# u32_t page_table:20;
# };
# };
def get_binary_pde_value(self, value):
perms = value.page_entries_info[0].permissions
present = PAGE_ENTRY_PRESENT
read_write = check_bits(perms, [1, 29]) << 1
user_mode = check_bits(perms, [2, 28]) << 2
pwt = PAGE_ENTRY_PWT
pcd = PAGE_ENTRY_PCD
a = PAGE_ENTRY_ACCESSED
ps = 0 << 7 # this is a read only field
page_table = self.address_of_page_table(value.pde_index) << 12
return (present |
read_write |
user_mode |
pwt |
pcd |
a |
ps |
page_table)
# union x86_mmu_pte {
# u32_t value;
# struct {
# u32_t p:1;
# u32_t rw:1;
# u32_t us:1;
# u32_t pwt:1;
# u32_t pcd:1;
# u32_t a:1;
# u32_t d:1;
# u32_t pat:1;
# u32_t g:1;
# u32_t alloc:1;
# u32_t custom:2;
# u32_t page:20;
# };
# };
def get_binary_pte_value(self, value, pte, perm_for_pte):
present = PAGE_ENTRY_PRESENT
read_write = ((perm_for_pte >> 1) & 0x1) << 1
user_mode = ((perm_for_pte >> 2) & 0x1) << 2
pwt = PAGE_ENTRY_PWT
pcd = PAGE_ENTRY_PCD
a = PAGE_ENTRY_ACCESSED
d = PAGE_ENTRY_DIRTY
pat = PAGE_ENTRY_PAT
g = PAGE_ENTRY_GLOBAL
alloc = PAGE_ENTRY_ALLOC
custom = PAGE_ENTRY_CUSTOM
# This points to the actual memory in the HW
# totally 20 bits to rep the phy address
# first 10 is the number got from pde and next 10 is pte
page_table = ((value.pde_index << 10) | pte) << 12
binary_value = (present | read_write | user_mode |
pwt | pcd | a | d | pat | g | alloc | custom |
page_table)
return binary_value
def populate_required_structs(self):
for region in raw_info:
pde_index = self.get_pde_number(region[0])
pte_valid_addr_start = self.get_pte_number(region[0])
# Get the end of the page table entries
# Since a memory region can take up only a few entries in the Page
# table, this helps us get the last valid PTE.
pte_valid_addr_end = self.get_pte_number(region[0] +
region[1] - 1)
mem_size = region[1]
# In-case the start address aligns with a page table entry other
# than zero and the mem_size is greater than (1024*4096) i.e 4MB
# in case where it overflows the currenty PDE's range then limit the
# PTE to 1024 and so make the mem_size reflect the actual size taken
# up in the current PDE
if (region[1] + (pte_valid_addr_start * 4096)) >= \
(self.size_addressed_per_pde):
pte_valid_addr_end = self.total_pages
mem_size = (((self.total_pages + 1) -
pte_valid_addr_start) * 4096)
self.set_pde_pte_values(pde_index, region[0], mem_size,
pte_valid_addr_start,
pte_valid_addr_end,
region[2])
if pde_index not in page_tables_list:
page_tables_list.append(pde_index)
# IF the current pde couldn't fit the entire requested region size
# then there is a need to create new PDEs to match the size.
# Here the overflow_size represents the size that couldn't be fit
# inside the current PDE, this is will now to used to create a
# new PDE/PDEs so the size remaining will be
# requested size - allocated size(in the current PDE)
overflow_size = region[1] - mem_size
# create all the extra PDEs needed to fit the requested size
# this loop starts from the current pde till the last pde that is
# needed the last pde is calcualted as the (start_addr + size) >>
# 22
if overflow_size != 0:
for extra_pde in range(pde_index + 1, self.get_pde_number(
region[0] + region[1]) + 1):
# new pde's start address
# each page directory entry has a addr range of (1024 *4096)
# thus the new PDE start address is a multiple of that
# number
extra_pde_start_address = (extra_pde *
(self.size_addressed_per_pde))
# the start address of and extra pde will always be 0
# and the end address is calculated with the new pde's start
# address and the overflow_size
extra_pte_valid_addr_end = self.get_pte_number(
extra_pde_start_address + overflow_size - 1)
# if the overflow_size couldn't be fit inside this new pde
# then need another pde and so we now need to limit the end
# of the PTE to 1024 and set the size of this new region to
# the max possible
extra_region_size = overflow_size
if overflow_size >= (self.size_addressed_per_pde):
extra_region_size = self.size_addressed_per_pde
extra_pte_valid_addr_end = self.total_pages
# load the new PDE's details
self.set_pde_pte_values(extra_pde,
extra_pde_start_address,
extra_region_size,
0,
extra_pte_valid_addr_end,
region[2])
# for the next iteration of the loop the size needs to
# decreased.
overflow_size -= extra_region_size
# print(hex_32(overflow_size),extra_pde)
if extra_pde not in page_tables_list:
page_tables_list.append(extra_pde)
if overflow_size == 0:
break
page_tables_list.sort()
# update the tuple values for the memory regions needed
def set_pde_pte_values(self, pde_index, address, mem_size,
pte_valid_addr_start, pte_valid_addr_end, perm):
pages_tuple = valid_pages_inside_pde(
start_addr=address,
size=mem_size,
pte_valid_addr_start=pte_valid_addr_start,
pte_valid_addr_end=pte_valid_addr_end,
permissions=perm)
mem_region_values = mmu_region_details(pde_index=pde_index,
page_entries_info=[])
mem_region_values.page_entries_info.append(pages_tuple)
if pde_index in list_of_pde.keys():
# this step adds the new page info to the exsisting pages info
list_of_pde[pde_index].page_entries_info.append(pages_tuple)
else:
list_of_pde[pde_index] = mem_region_values
def page_directory_create_binary_file(self):
global output_buffer
global output_offset
for pde in range(self.total_pages + 1):
binary_value = 0 # the page directory entry is not valid
# if i have a valid entry to populate
if pde in sorted(list_of_pde.keys()):
value = list_of_pde[pde]
binary_value = self.get_binary_pde_value(value)
self.pde_verbose_output(pde, binary_value)
struct.pack_into(self.write_page_entry_bin,
output_buffer,
output_offset,
binary_value)
output_offset += struct.calcsize(self.write_page_entry_bin)
def page_table_create_binary_file(self):
global output_buffer
global output_offset
for key, value in sorted(list_of_pde.items()):
for pte in range(self.total_pages + 1):
binary_value = 0 # the page directory entry is not valid
valid_pte = 0
for i in value.page_entries_info:
temp_value = ((pte >= i.pte_valid_addr_start) and
(pte <= i.pte_valid_addr_end))
if temp_value:
perm_for_pte = i.permissions
valid_pte |= temp_value
# if i have a valid entry to populate
if valid_pte:
binary_value = self.get_binary_pte_value(value,
pte,
perm_for_pte)
self.pte_verbose_output(key, pte, binary_value)
struct.pack_into(self.write_page_entry_bin,
output_buffer,
output_offset,
binary_value)
output_offset += struct.calcsize(self.write_page_entry_bin)
# To populate the binary file the module struct needs a buffer of the
# excat size. This returns the size needed for the given set of page
# tables.
def set_binary_file_size(self):
binary_size = ctypes.create_string_buffer((4096) +
(len(list_of_pde.keys()) *
4096))
return binary_size
# prints the details of the pde
def verbose_output(self):
print("\nTotal Page directory entries " + str(self.get_number_of_pd()))
count = 0
for key, value in list_of_pde.items():
for i in value.page_entries_info:
count += 1
print("In Page directory entry " +
format_string(value.pde_index) +
": valid start address = " +
hex_32(i.start_addr) + ", end address = " +
hex_32((i.pte_valid_addr_end + 1) * 4096 - 1 +
(value.pde_index * (FourMB))))
# print all the tables for a given page table mode
def print_all_page_table_info(self):
self.pde_print_elements()
self.pte_print_elements()
def pde_verbose_output(self, pde, binary_value):
if args.verbose < 2:
return
global print_string_pde_list
present = format_string(binary_value & 0x1)
read_write = format_string((binary_value >> 1) & 0x1)
user_mode = format_string((binary_value >> 2) & 0x1)
pwt = format_string((binary_value >> 3) & 0x1)
pcd = format_string((binary_value >> 4) & 0x1)
a = format_string((binary_value >> 5) & 0x1)
ignored1 = format_string(0)
ps = format_string((binary_value >> 7) & 0x1)
ignored2 = format_string(0000)
page_table_addr = format_string(hex((binary_value >> 12) & 0xFFFFF))
print_string_pde_list += (format_string(str(pde)) +
" | " +
(present) +
" | " +
(read_write) + " | " +
(user_mode) + " | " +
(pwt) + " | " +
(pcd) + " | " +
(a) + " | " +
(ps) + " | " +
page_table_addr + "\n"
)
def pde_print_elements(self):
global print_string_pde_list
print("PAGE DIRECTORY ")
print(format_string("PDE") + " | " +
format_string('P') + " | " +
format_string('rw') + " | " +
format_string('us') + " | " +
format_string('pwt') + " | " +
format_string('pcd') + " | " +
format_string('a') + " | " +
format_string('ps') + " | " +
format_string('Addr page table'))
print(print_string_pde_list)
print("END OF PAGE DIRECTORY")
def pte_verbose_output(self, pde, pte, binary_value):
global pde_pte_string
present = format_string((binary_value >> 0) & 0x1)
read_write = format_string((binary_value >> 1) & 0x1)
user_mode = format_string((binary_value >> 2) & 0x1)
pwt = format_string((binary_value >> 3) & 0x1)
pcd = format_string((binary_value >> 4) & 0x1)
a = format_string((binary_value >> 5) & 0x1)
d = format_string((binary_value >> 6) & 0x1)
pat = format_string((binary_value >> 7) & 0x1)
g = format_string((binary_value >> 8) & 0x1)
alloc = format_string((binary_value >> 9) & 0x1)
custom = format_string((binary_value >> 10) & 0x3)
page_table_addr = hex_20((binary_value >> 12) & 0xFFFFF)
print_string_list = (format_string(str(pte)) + " | " +
(present) + " | " +
(read_write) + " | " +
(user_mode) + " | " +
(pwt) + " | " +
(pcd) + " | " +
(a) + " | " +
(d) + " | " +
(pat) + " | " +
(g) + " | " +
(alloc) + " | " +
(custom) + " | " +
page_table_addr + "\n"
)
if pde in pde_pte_string.keys():
pde_pte_string[pde] += (print_string_list)
else:
pde_pte_string[pde] = print_string_list
def pte_print_elements(self):
global pde_pte_string
for pde, print_string in sorted(pde_pte_string.items()):
print("\nPAGE TABLE " + str(pde))
print(format_string("PTE") + " | " +
format_string('P') + " | " +
format_string('rw') + " | " +
format_string('us') + " | " +
format_string('pwt') + " | " +
format_string('pcd') + " | " +
format_string('a') + " | " +
format_string('d') + " | " +
format_string('pat') + " | " +
format_string('g') + " | " +
format_string('alloc') + " | " +
format_string('custom') + " | " +
format_string('page addr'))
print(print_string)
print("END OF PAGE TABLE " + str(pde))
#*****************************************************************************#
# class for PAE 4KB Mode
@ -1008,7 +604,7 @@ def print_list_of_pde(list_of_pde):
# size of the region - so page tables entries will be created with this
# read write permissions
def read_mmu_list_marshal_param(page_mode):
def read_mmu_list_marshal_param(page_table):
global read_buff
global page_tables_list
@ -1053,7 +649,7 @@ def read_mmu_list_marshal_param(page_mode):
validation_issue_memory_overlap = [
True,
start_location,
page_mode.get_pde_number(start_location)]
page_table.get_pde_number(start_location)]
return
# add the retrived info another list
@ -1120,7 +716,7 @@ def parse_args():
# the format for writing in the binary file would be decided by the
# endian selected
def set_struct_endian_format(page_mode):
def set_struct_endian_format(page_table):
endian_string = "<"
if args.big_endian is True:
endian_string = ">"
@ -1129,8 +725,8 @@ def set_struct_endian_format(page_mode):
struct_mmu_regions_format = endian_string + "IIQ"
header_values_format = endian_string + "II"
page_mode.write_page_entry_bin = (endian_string +
page_mode.write_page_entry_bin)
page_table.write_page_entry_bin = (endian_string +
page_table.write_page_entry_bin)
def format_string(input_str):
@ -1152,7 +748,7 @@ def hex_20(input_value):
return output_value
def verbose_output(page_mode):
def verbose_output(page_table):
if args.verbose == 0:
return
@ -1162,10 +758,10 @@ def verbose_output(page_mode):
", Memory size = " + hex_32(info[1]) +
", Permission = " + hex(info[2]))
page_mode.verbose_output()
page_table.verbose_output()
if args.verbose > 1:
page_mode.print_all_page_table_info()
page_table.print_all_page_table_info()
# build sym table
@ -1180,7 +776,7 @@ def get_symbols(obj):
# determine which paging mode was selected
def get_page_mode():
def get_page_table():
with open(args.kernel, "rb") as fp:
kernel = ELFFile(fp)
sym = get_symbols(kernel)
@ -1195,12 +791,9 @@ def main():
parse_args()
# select the page table needed
if get_page_mode():
page_mode = PageMode_PAE()
else:
page_mode = PageMode_4kb()
page_table = PageMode_PAE()
set_struct_endian_format(page_mode)
set_struct_endian_format(page_table)
global input_file
input_file = open(args.input, 'rb')
@ -1209,31 +802,31 @@ def main():
binary_output_file = open(args.output, 'wb')
# inputfile= file_name
read_mmu_list_marshal_param(page_mode)
read_mmu_list_marshal_param(page_table)
# populate the required structs
page_mode.populate_required_structs()
page_table.populate_required_structs()
# validate the inputs
validate_pde_regions()
# The size of the output buffer has to match the number of bytes we write
# this corresponds to the number of page tables gets created.
output_buffer = page_mode.set_binary_file_size()
output_buffer = page_table.set_binary_file_size()
try:
page_mode.pdpte_create_binary_file()
page_table.pdpte_create_binary_file()
except BaseException:
pass
page_mode.page_directory_create_binary_file()
page_mode.page_table_create_binary_file()
page_table.page_directory_create_binary_file()
page_table.page_table_create_binary_file()
# write the binary data into the file
binary_output_file.write(output_buffer)
binary_output_file.close()
# verbose output needed by the build system
verbose_output(page_mode)
verbose_output(page_table)
if __name__ == "__main__":

View file

@ -6,4 +6,3 @@ config SOC_ATOM
select BOOTLOADER_UNKNOWN
select X86_MMU
select ARCH_HAS_USERSPACE
select X86_PAE_MODE

View file

@ -17,7 +17,7 @@ MMU_BOOT_REGION(START_ADDR_RANGE2, ADDR_SIZE, REGION_PERM);
MMU_BOOT_REGION(START_ADDR_RANGE3, ADDR_SIZE, REGION_PERM);
MMU_BOOT_REGION(START_ADDR_RANGE4, ADDR_SIZE, REGION_PERM);
static int check_param(union x86_mmu_pae_pte *value, uint32_t perm)
static int check_param(union x86_mmu_pte *value, uint32_t perm)
{
u32_t status = (value->rw == ((perm & MMU_PTE_RW_MASK) >> 0x1));
@ -26,7 +26,7 @@ static int check_param(union x86_mmu_pae_pte *value, uint32_t perm)
return status;
}
static int check_param_nonset_region(union x86_mmu_pae_pte *value,
static int check_param_nonset_region(union x86_mmu_pte *value,
uint32_t perm)
{
u32_t status = (value->rw == 0);
@ -40,7 +40,7 @@ static void starting_addr_range(u32_t start_addr_range)
{
u32_t addr_range, status = true;
union x86_mmu_pae_pte *value;
union x86_mmu_pte *value;
for (addr_range = start_addr_range; addr_range <=
(start_addr_range + STARTING_ADDR_RANGE_LMT);
@ -55,7 +55,7 @@ static void starting_addr_range(u32_t start_addr_range)
static void before_start_addr_range(u32_t start_addr_range)
{
u32_t addr_range, status = true;
union x86_mmu_pae_pte *value;
union x86_mmu_pte *value;
for (addr_range = start_addr_range - 0x7000;
addr_range < (start_addr_range); addr_range += 0x1000) {
@ -71,7 +71,7 @@ static void before_start_addr_range(u32_t start_addr_range)
static void ending_start_addr_range(u32_t start_addr_range)
{
u32_t addr_range, status = true;
union x86_mmu_pae_pte *value;
union x86_mmu_pte *value;
for (addr_range = start_addr_range + ADDR_SIZE; addr_range <
(start_addr_range + ADDR_SIZE + 0x10000);

View file

@ -48,26 +48,11 @@ void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
#define DO_BARRIERS() do { } while (0)
#endif
#if defined(CONFIG_ARM)
#define NO_EXECUTE_SUPPORT 1
#elif defined(CONFIG_ARC)
#define NO_EXECUTE_SUPPORT 1
#elif defined(CONFIG_X86)
#if defined(CONFIG_X86_PAE_MODE)
#define NO_EXECUTE_SUPPORT 1
#else
/* 32-bit paging mode in x86 doesn't support execute disable capability.*/
#endif /* x86 */
#else
#error "Architecture not supported"
#endif
static int __attribute__((noinline)) add_one(int i)
{
return (i + 1);
}
#ifdef NO_EXECUTE_SUPPORT
static void execute_from_buffer(u8_t *dst)
{
void *src = FUNC_TO_PTR(add_one);
@ -94,7 +79,6 @@ static void execute_from_buffer(u8_t *dst)
INFO("Did not get expected return value!\n");
}
}
#endif
/**
* @brief Test write to read only section
@ -161,7 +145,6 @@ static void write_text(void)
*
* @ingroup kernel_memprotect_tests
*/
#ifdef NO_EXECUTE_SUPPORT
static void exec_data(void)
{
execute_from_buffer(data_buf);
@ -202,24 +185,6 @@ static void exec_heap(void)
}
#endif
#else
static void exec_data(void)
{
ztest_test_skip();
}
static void exec_stack(void)
{
ztest_test_skip();
}
static void exec_heap(void)
{
ztest_test_skip();
}
#endif /* NO_EXECUTE_SUPPORT */
void test_main(void)
{
ztest_test_suite(protection,