mmu: support only identity RAM mapping
We no longer plan to support a split address space with the kernel in high memory and per-process address spaces. Because of this, we can simplify some things. System RAM is now always identity mapped at boot. We no longer require any virtual-to-physical translation for page tables, and can remove the dual-mapping logic from the page table generation script since we won't need to transition the instruction point off of physical addresses. CONFIG_KERNEL_VM_BASE and CONFIG_KERNEL_VM_LIMIT have been removed. The kernel's address space always starts at CONFIG_SRAM_BASE_ADDRESS, of a fixed size specified by CONFIG_KERNEL_VM_SIZE. Driver MMIOs and other uses of k_mem_map() are still virtually mapped, and the later introduction of demand paging will result in only a subset of system RAM being a fixed identity mapping instead of all of it. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
ce29baa0ef
commit
7d32e9f9a5
42
arch/Kconfig
42
arch/Kconfig
|
@ -534,43 +534,17 @@ config SRAM_REGION_PERMISSIONS
|
|||
If not enabled, all SRAM mappings will allow supervisor mode to
|
||||
read, write, and execute. User mode support requires this.
|
||||
|
||||
config KERNEL_VM_BASE
|
||||
hex "Base virtual address for the kernel"
|
||||
default SRAM_BASE_ADDRESS if X86 # TODO remove once x86 is linked properly
|
||||
default 0xFFFF800000000000 if 64BIT
|
||||
config KERNEL_VM_SIZE
|
||||
hex "Size of kernel address space in bytes"
|
||||
default 0xC0000000
|
||||
help
|
||||
Define the base virtual memory address for the core kernel. There will
|
||||
be a permemant mapping of all RAM starting at this virtual address,
|
||||
with any unused space up to the KERNEL_VM_LIMIT available for memory
|
||||
mappings. This denotes the start of the RAM mapping and may not be
|
||||
the base address of the kernel itself, but the offset of the kernel here
|
||||
will be the same as the offset from the beginning of physical memory
|
||||
where it was loaded.
|
||||
Size of the kernel's address space. Constraining this helps control
|
||||
how much total memory can be used for page tables.
|
||||
|
||||
The value here must have similar alignment to the base physical address,
|
||||
such that the same page tables can be mapped to the top-level paging
|
||||
structure entries for the virtual and physical address.
|
||||
|
||||
It is valid to set this to SRAM_BASE_ADDRESS, in which case RAM will
|
||||
be identity-mapped. Otherwise, the SRAM and KERNEL_VM regions must
|
||||
not overlap.
|
||||
|
||||
The default for 64-bit presumes a 48-bit canonical address space.
|
||||
|
||||
config KERNEL_VM_LIMIT
|
||||
hex "Upper bound on kernel address space"
|
||||
default 0xFFFFFFFFFFFFFFFF if 64BIT
|
||||
default 0xFFFFFFFF
|
||||
help
|
||||
Inclusive upper bound on the virtual memory area reserved for the kernel.
|
||||
No mappings will be made past this point. Constraining this helps control
|
||||
how much memory is used for page tables.
|
||||
|
||||
The area defined by KERNEL_VM_BASE to KERNEL_VM_LIMIT must have enough
|
||||
room to map system RAM, plus any driver mappings. Further mappings
|
||||
may be made at runtime depending on configuration options (such as
|
||||
memory-mapping stacks, VDSO pages, etc).
|
||||
The area defined by SRAM_BASE_ADDRESS to SRAM_BASE_ADDRESS +
|
||||
KERNEL_VM_SIZE must have enough room to map system RAM, plus any driver
|
||||
mappings. Further mappings may be made at runtime depending on
|
||||
configuration options (such as memory-mapping stacks, VDSO pages, etc).
|
||||
|
||||
endif # MMU
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ static inline uintptr_t get_cr3(const z_arch_esf_t *esf)
|
|||
|
||||
static inline pentry_t *get_ptables(const z_arch_esf_t *esf)
|
||||
{
|
||||
return z_mem_virt_addr(get_cr3(esf));
|
||||
return (pentry_t *)get_cr3(esf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include <kernel_arch_data.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <arch/x86/multiboot.h>
|
||||
#include <sys/mem_manage.h>
|
||||
|
||||
/* exports (private APIs) */
|
||||
|
||||
|
@ -42,55 +41,7 @@
|
|||
GTEXT(_sys_resume_from_deep_sleep)
|
||||
#endif
|
||||
|
||||
.macro install_page_tables
|
||||
#ifdef CONFIG_X86_MMU
|
||||
/* Enable paging. If virtual memory is enabled, the instruction pointer
|
||||
* is currently at a physical address. There is an identity mapping
|
||||
* for all RAM, plus a virtual mapping of RAM starting at
|
||||
* CONFIG_KERNEL_VM_BASE using the same paging structures.
|
||||
*
|
||||
* Until we enable these page tables, only physical memory addresses
|
||||
* work.
|
||||
*/
|
||||
movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
|
||||
movl %eax, %cr3
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
/* Enable PAE */
|
||||
movl %cr4, %eax
|
||||
orl $CR4_PAE, %eax
|
||||
movl %eax, %cr4
|
||||
|
||||
/* IA32_EFER NXE bit set */
|
||||
movl $0xC0000080, %ecx
|
||||
rdmsr
|
||||
orl $0x800, %eax
|
||||
wrmsr
|
||||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
/* Enable paging (CR0.PG, bit 31) / write protect (CR0.WP, bit 16) */
|
||||
movl %cr0, %eax
|
||||
orl $(CR0_PG | CR0_WP), %eax
|
||||
movl %eax, %cr0
|
||||
|
||||
#if CONFIG_KERNEL_VM_BASE != CONFIG_SRAM_BASE_ADDRESS
|
||||
/* Jump to a virtual address, which works because the identity and
|
||||
* virtual mappings both are to the same physical address.
|
||||
*/
|
||||
lea vm_enter, %eax
|
||||
jmp *%eax
|
||||
vm_enter:
|
||||
/* We are now executing in virtual memory. We'll un-map the identity
|
||||
* mappings later once we are in the C domain
|
||||
*/
|
||||
#endif /* CONFIG_KERNEL_VM_BASE != CONFIG_SRAM_BASE_ADDRESS */
|
||||
#endif /* CONFIG_X86_MMU */
|
||||
.endm
|
||||
|
||||
SECTION_FUNC(TEXT_START, __start)
|
||||
#ifndef CONFIG_XIP
|
||||
install_page_tables
|
||||
#endif /* CONFIG_XIP */
|
||||
|
||||
#include "../common.S"
|
||||
|
||||
|
@ -249,11 +200,6 @@ __csSet:
|
|||
|
||||
call _x86_data_copy
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/* Have to do this here, the page tables aren't loaded into RAM
|
||||
* until after the data copy
|
||||
*/
|
||||
install_page_tables
|
||||
#endif /* CONFIG_XIP */
|
||||
|
||||
/*
|
||||
|
@ -283,6 +229,30 @@ __csSet:
|
|||
#endif
|
||||
lidt z_x86_idt /* load 32-bit operand size IDT */
|
||||
|
||||
#ifdef CONFIG_X86_MMU
|
||||
/* Install page tables */
|
||||
movl $z_x86_kernel_ptables, %eax
|
||||
movl %eax, %cr3
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
/* Enable PAE */
|
||||
movl %cr4, %eax
|
||||
orl $CR4_PAE, %eax
|
||||
movl %eax, %cr4
|
||||
|
||||
/* IA32_EFER NXE bit set */
|
||||
movl $0xC0000080, %ecx
|
||||
rdmsr
|
||||
orl $0x800, %eax
|
||||
wrmsr
|
||||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
/* Enable paging (CR0.PG, bit 31) / write protect (CR0.WP, bit 16) */
|
||||
movl %cr0, %eax
|
||||
orl $(CR0_PG | CR0_WP), %eax
|
||||
movl %eax, %cr0
|
||||
#endif /* CONFIG_X86_MMU */
|
||||
|
||||
#ifdef CONFIG_LOAPIC
|
||||
/* For BSP, cpu_number is 0 */
|
||||
xorl %eax, %eax
|
||||
|
|
|
@ -151,7 +151,7 @@ struct task_state_segment _df_tss = {
|
|||
.es = DATA_SEG,
|
||||
.ss = DATA_SEG,
|
||||
.eip = (uint32_t)df_handler_top,
|
||||
.cr3 = Z_MEM_PHYS_ADDR((uint32_t)&z_x86_kernel_ptables)
|
||||
.cr3 = (uint32_t)&z_x86_kernel_ptables
|
||||
};
|
||||
|
||||
static __used void df_handler_bottom(void)
|
||||
|
@ -199,7 +199,7 @@ static FUNC_NORETURN __used void df_handler_top(void)
|
|||
_main_tss.es = DATA_SEG;
|
||||
_main_tss.ss = DATA_SEG;
|
||||
_main_tss.eip = (uint32_t)df_handler_bottom;
|
||||
_main_tss.cr3 = z_mem_phys_addr(&z_x86_kernel_ptables);
|
||||
_main_tss.cr3 = (uint32_t)(&z_x86_kernel_ptables);
|
||||
_main_tss.eflags = 0U;
|
||||
|
||||
/* NT bit is set in EFLAGS so we will task switch back to _main_tss
|
||||
|
|
|
@ -50,7 +50,7 @@ SECTION_FUNC(TEXT, z_x86_trampoline_to_kernel)
|
|||
pushl %edi
|
||||
|
||||
/* Switch to kernel page table */
|
||||
movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
|
||||
movl $z_x86_kernel_ptables, %esi
|
||||
movl %esi, %cr3
|
||||
|
||||
/* Save old trampoline stack pointer in %edi */
|
||||
|
@ -155,7 +155,7 @@ SECTION_FUNC(TEXT, z_x86_syscall_entry_stub)
|
|||
pushl %edi
|
||||
|
||||
/* Switch to kernel page table */
|
||||
movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
|
||||
movl $z_x86_kernel_ptables, %esi
|
||||
movl %esi, %cr3
|
||||
|
||||
/* Save old trampoline stack pointer in %edi */
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
clts
|
||||
|
||||
/* Page tables created at build time by gen_mmu.py */
|
||||
movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
|
||||
movl $z_x86_kernel_ptables, %eax
|
||||
movl %eax, %cr3
|
||||
|
||||
set_efer
|
||||
|
@ -70,7 +70,7 @@
|
|||
movq %rax, %cr4
|
||||
clts
|
||||
|
||||
movq $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
|
||||
movq $z_x86_kernel_ptables, %rax
|
||||
movq %rax, %cr3
|
||||
|
||||
set_efer
|
||||
|
|
|
@ -84,7 +84,7 @@ z_x86_syscall_entry_stub:
|
|||
|
||||
/* Load kernel's page table */
|
||||
pushq %rax
|
||||
movq $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
|
||||
movq $z_x86_kernel_ptables, %rax
|
||||
movq %rax, %cr3
|
||||
popq %rax
|
||||
movq $0, -8(%rsp) /* Delete stashed RAX data */
|
||||
|
|
|
@ -26,10 +26,6 @@ FUNC_NORETURN void z_x86_prep_c(void *arg)
|
|||
z_x86_early_serial_init();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
z_x86_mmu_init();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
x86_64_irq_init();
|
||||
#endif
|
||||
|
|
|
@ -100,7 +100,7 @@ void *z_x86_userspace_prepare_thread(struct k_thread *thread)
|
|||
z_x86_thread_pt_init(thread);
|
||||
initial_entry = drop_to_user;
|
||||
} else {
|
||||
thread->arch.ptables = z_mem_phys_addr(&z_x86_kernel_ptables);
|
||||
thread->arch.ptables = (uintptr_t)&z_x86_kernel_ptables;
|
||||
initial_entry = z_thread_entry;
|
||||
}
|
||||
|
||||
|
|
|
@ -141,14 +141,6 @@ static const struct paging_level paging_levels[] = {
|
|||
* Utility functions
|
||||
*/
|
||||
|
||||
/* For a physical address, return its permanent virtual mapping in the kernel's
|
||||
* address space
|
||||
*/
|
||||
static inline void *ram_phys_to_virt(uintptr_t phys)
|
||||
{
|
||||
return (void *)(phys + Z_MEM_VM_OFFSET);
|
||||
}
|
||||
|
||||
/* For a table at a particular level, get the entry index that corresponds to
|
||||
* the provided virtual address
|
||||
*/
|
||||
|
@ -177,7 +169,7 @@ static inline uintptr_t get_entry_phys(pentry_t entry, int level)
|
|||
/* Return the virtual address of a linked table stored in the provided entry */
|
||||
static inline pentry_t *next_table(pentry_t entry, int level)
|
||||
{
|
||||
return ram_phys_to_virt(get_entry_phys(entry, level));
|
||||
return (pentry_t *)(get_entry_phys(entry, level));
|
||||
}
|
||||
|
||||
/* 4K for everything except PAE PDPTs */
|
||||
|
@ -324,12 +316,9 @@ static void print_entries(pentry_t entries_array[], uint8_t *base, int level,
|
|||
if (phys == virt) {
|
||||
/* Identity mappings */
|
||||
COLOR(YELLOW);
|
||||
} else if (phys + Z_MEM_VM_OFFSET == virt) {
|
||||
/* Permanent ram mappings */
|
||||
COLOR(GREEN);
|
||||
} else {
|
||||
/* general mapped pages */
|
||||
COLOR(CYAN);
|
||||
/* Other mappings */
|
||||
COLOR(GREEN);
|
||||
}
|
||||
} else {
|
||||
COLOR(MAGENTA);
|
||||
|
@ -364,8 +353,7 @@ static void dump_ptables(pentry_t *table, uint8_t *base, int level)
|
|||
}
|
||||
#endif
|
||||
|
||||
printk("%s at %p (0x%" PRIxPTR ") ", info->name, table,
|
||||
z_mem_phys_addr(table));
|
||||
printk("%s at %p: ", info->name, table);
|
||||
if (level == 0) {
|
||||
printk("entire address space\n");
|
||||
} else {
|
||||
|
@ -598,7 +586,7 @@ static int page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val,
|
|||
if (new_table == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
*entryp = z_mem_phys_addr(new_table) | INT_FLAGS;
|
||||
*entryp = ((uintptr_t)new_table) | INT_FLAGS;
|
||||
table = new_table;
|
||||
} else {
|
||||
/* We fail an assertion here due to no support for
|
||||
|
@ -694,42 +682,6 @@ int arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void identity_map_remove(void)
|
||||
{
|
||||
#if CONFIG_SRAM_BASE_ADDRESS != CONFIG_KERNEL_VM_BASE
|
||||
size_t size, scope = get_entry_scope(0);
|
||||
uint8_t *pos;
|
||||
|
||||
k_mem_region_align((uintptr_t *)&pos, &size,
|
||||
(uintptr_t)CONFIG_SRAM_BASE_ADDRESS,
|
||||
(size_t)CONFIG_SRAM_SIZE * 1024U, scope);
|
||||
|
||||
/* We booted with RAM mapped both to its identity and virtual
|
||||
* mapping starting at CONFIG_KERNEL_VM_BASE. This was done by
|
||||
* double-linking the relevant tables in the top-level table.
|
||||
* At this point we don't need the identity mapping(s) any more,
|
||||
* zero the top-level table entries corresponding to the
|
||||
* physical mapping.
|
||||
*/
|
||||
while (size) {
|
||||
pentry_t *entry = get_entry_ptr(&z_x86_kernel_ptables, pos, 0);
|
||||
|
||||
/* set_pte */
|
||||
*entry = 0;
|
||||
pos += scope;
|
||||
size -= scope;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Invoked to remove the identity mappings in the page tables,
|
||||
* they were only needed to tranisition the instruction pointer at early boot
|
||||
*/
|
||||
void z_x86_mmu_init(void)
|
||||
{
|
||||
identity_map_remove();
|
||||
}
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
/* Legacy stack guard function. This will eventually be replaced in favor
|
||||
* of memory-mapping stacks (with a non-present mapping immediately below each
|
||||
|
@ -737,7 +689,7 @@ void z_x86_mmu_init(void)
|
|||
*/
|
||||
static void stack_guard_set(void *guard_page)
|
||||
{
|
||||
pentry_t pte = z_mem_phys_addr(guard_page) | MMU_P | MMU_XD;
|
||||
pentry_t pte = ((uintptr_t)guard_page) | MMU_P | MMU_XD;
|
||||
int ret;
|
||||
|
||||
assert_virt_addr_aligned(guard_page);
|
||||
|
@ -859,7 +811,7 @@ static void *thread_page_pool_get(void *context)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#define RAM_BASE ((uintptr_t)CONFIG_KERNEL_VM_BASE)
|
||||
#define RAM_BASE ((uintptr_t)CONFIG_SRAM_BASE_ADDRESS)
|
||||
#define RAM_END (RAM_BASE + (CONFIG_SRAM_SIZE * 1024UL))
|
||||
|
||||
/* Establish a mapping in the thread's page tables */
|
||||
|
@ -889,7 +841,7 @@ static void thread_map(struct k_thread *thread, void *ptr, size_t size,
|
|||
/* L1TF */
|
||||
pte = 0U;
|
||||
} else {
|
||||
pte = z_mem_phys_addr(pos) | flags;
|
||||
pte = ((uintptr_t)pos) | flags;
|
||||
}
|
||||
|
||||
ret = page_map_set(ptables, pos, pte, thread_page_pool_get,
|
||||
|
@ -1043,7 +995,7 @@ static void setup_thread_tables(struct k_thread *thread,
|
|||
(void)memcpy(user_table, master_table,
|
||||
table_size(level));
|
||||
|
||||
*link = z_mem_phys_addr(user_table) | INT_FLAGS;
|
||||
*link = ((pentry_t)user_table) | INT_FLAGS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1080,7 +1032,7 @@ void z_x86_thread_pt_init(struct k_thread *thread)
|
|||
|
||||
ptables = (pentry_t *)&header->kernel_data.ptables;
|
||||
#endif
|
||||
thread->arch.ptables = z_mem_phys_addr(ptables);
|
||||
thread->arch.ptables = ((uintptr_t)ptables);
|
||||
|
||||
setup_thread_tables(thread, ptables);
|
||||
|
||||
|
|
|
@ -29,25 +29,6 @@ vary:
|
|||
- On x86_64, the _locore region will have Present set and
|
||||
the _lorodata region will have Present and Execute Disable set.
|
||||
|
||||
This script will establish a dual mapping at the address defined by
|
||||
CONFIG_KERNEL_VM_BASE if it is not the same as CONFIG_SRAM_BASE_ADDRESS.
|
||||
|
||||
- The double-mapping is used to transition the
|
||||
instruction pointer from a physical address at early boot to the
|
||||
virtual address where the kernel is actually linked.
|
||||
|
||||
- The mapping is always double-mapped at the top-level paging structure
|
||||
and the physical/virtual base addresses must have the same alignment
|
||||
with respect to the scope of top-level paging structure entries.
|
||||
This allows the same second-level paging structure(s) to be used for
|
||||
both memory bases.
|
||||
|
||||
- The double-mapping is needed so that we can still fetch instructions
|
||||
from identity-mapped physical addresses after we program this table
|
||||
into the MMU, then jump to the equivalent virtual address.
|
||||
The kernel then unlinks the identity mapping before continuing,
|
||||
the address space is purely virtual after that.
|
||||
|
||||
Because the set of page tables are linked together by physical address,
|
||||
we must know a priori the physical address of each table. The linker
|
||||
script must define a z_x86_pagetables_start symbol where the page
|
||||
|
@ -72,7 +53,6 @@ import argparse
|
|||
import os
|
||||
import struct
|
||||
import elftools
|
||||
import math
|
||||
from distutils.version import LooseVersion
|
||||
from elftools.elf.elffile import ELFFile
|
||||
from elftools.elf.sections import SymbolTableSection
|
||||
|
@ -330,61 +310,21 @@ class PtableSet(object):
|
|||
# Set up entry in leaf page table
|
||||
table.map(virt_addr, phys_addr, flags)
|
||||
|
||||
def map(self, phys_base, virt_base, size, flags):
|
||||
def map(self, phys_base, size, flags):
|
||||
"""Identity map an address range in the page tables, with provided
|
||||
access flags.
|
||||
|
||||
If the virt_base argument is not the same address as phys_base,
|
||||
the same memory will be double mapped to the virt_base address.
|
||||
"""
|
||||
debug("Identity-mapping 0x%x (%d): %s" %
|
||||
(phys_base, size, dump_flags(flags)))
|
||||
|
||||
skip_vm_map = virt_base is None or virt_base == phys_base
|
||||
|
||||
align_check(phys_base, size)
|
||||
for addr in range(phys_base, phys_base + size, 4096):
|
||||
if addr == 0 and skip_vm_map:
|
||||
if addr == 0:
|
||||
# Never map the NULL page
|
||||
continue
|
||||
|
||||
self.map_page(addr, addr, flags)
|
||||
|
||||
if skip_vm_map:
|
||||
return
|
||||
|
||||
# Find how much VM a top-level entry covers
|
||||
scope = 1 << self.toplevel.addr_shift
|
||||
debug("Double map %s entries with scope 0x%x" %
|
||||
(self.toplevel.__class__.__name__, scope))
|
||||
|
||||
# Round bases down to the entry granularity
|
||||
pd_virt_base = math.floor(virt_base / scope) * scope
|
||||
pd_phys_base = math.floor(phys_base / scope) * scope
|
||||
size = size + (phys_base - pd_phys_base)
|
||||
|
||||
# The base addresses have to line up such that they can be mapped
|
||||
# by the same second-level table
|
||||
if phys_base - pd_phys_base != virt_base - pd_virt_base:
|
||||
error("mis-aligned virtual 0x%x and physical base addresses 0x%x" %
|
||||
(virt_base, phys_base))
|
||||
|
||||
# Round size up to entry granularity
|
||||
size = math.ceil(size / scope) * scope
|
||||
|
||||
for offset in range(0, size, scope):
|
||||
cur_virt = pd_virt_base + offset
|
||||
cur_phys = pd_phys_base + offset
|
||||
|
||||
# Get the physical address of the second-level table that identity
|
||||
# maps the current chunk of physical memory
|
||||
table_link_phys = self.toplevel.lookup(cur_phys)
|
||||
|
||||
debug("copy mappings 0x%x - 0x%x to 0x%x, using table 0x%x" %
|
||||
(cur_phys, cur_phys + scope - 1, cur_virt, table_link_phys))
|
||||
|
||||
# Link that to the entry for the virtual mapping
|
||||
self.toplevel.map(cur_virt, table_link_phys, INT_FLAGS)
|
||||
|
||||
def set_region_perms(self, name, flags):
|
||||
"""Set access permissions for a named region that is already mapped
|
||||
|
||||
|
@ -488,17 +428,13 @@ def main():
|
|||
debug("building %s" % pclass.__name__)
|
||||
|
||||
ram_base = syms["CONFIG_SRAM_BASE_ADDRESS"]
|
||||
virt_base = syms["CONFIG_KERNEL_VM_BASE"]
|
||||
ram_size = syms["CONFIG_SRAM_SIZE"] * 1024
|
||||
ptables_virt = syms["z_x86_pagetables_start"]
|
||||
ptables_phys = syms["z_x86_pagetables_start"]
|
||||
|
||||
debug("Base addresses: physical 0x%x virtual 0x%x size %d" %
|
||||
(ram_base, virt_base, ram_size))
|
||||
debug("Base addresses: physical 0x%x size %d" % (ram_base, ram_size))
|
||||
|
||||
is_perm_regions = isdef("CONFIG_SRAM_REGION_PERMISSIONS")
|
||||
|
||||
ptables_phys = ptables_virt - (virt_base - ram_base)
|
||||
|
||||
if is_perm_regions:
|
||||
# Don't allow execution by default for any pages. We'll adjust this
|
||||
# in later calls to pt.set_region_perms()
|
||||
|
@ -507,14 +443,11 @@ def main():
|
|||
map_flags = FLAG_P
|
||||
|
||||
pt = pclass(ptables_phys)
|
||||
pt.map(ram_base, virt_base, ram_size, map_flags | FLAG_RW)
|
||||
pt.map(ram_base, ram_size, map_flags | FLAG_RW)
|
||||
|
||||
if isdef("CONFIG_XIP"):
|
||||
if virt_base != ram_base:
|
||||
error("XIP and virtual memory are currently incompatible")
|
||||
|
||||
# Additionally identity-map all ROM as read-only
|
||||
pt.map(syms["CONFIG_FLASH_BASE_ADDRESS"], None,
|
||||
pt.map(syms["CONFIG_FLASH_BASE_ADDRESS"],
|
||||
syms["CONFIG_FLASH_SIZE"] * 1024, map_flags)
|
||||
|
||||
# Adjust mapped region permissions if configured
|
||||
|
|
|
@ -141,7 +141,7 @@ static inline uintptr_t z_x86_cr3_get(void)
|
|||
/* Return the virtual address of the page tables installed in this CPU in CR3 */
|
||||
static inline pentry_t *z_x86_page_tables_get(void)
|
||||
{
|
||||
return z_mem_virt_addr(z_x86_cr3_get());
|
||||
return (pentry_t *)z_x86_cr3_get();
|
||||
}
|
||||
|
||||
/* Kernel's page table. This is in CR3 for all supervisor threads.
|
||||
|
@ -153,12 +153,9 @@ extern pentry_t z_x86_kernel_ptables;
|
|||
static inline pentry_t *z_x86_thread_page_tables_get(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_USERSPACE
|
||||
return z_mem_virt_addr(thread->arch.ptables);
|
||||
return (pentry_t *)(thread->arch.ptables);
|
||||
#else
|
||||
return &z_x86_kernel_ptables;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Early-boot paging setup tasks, called from prep_c */
|
||||
void z_x86_mmu_init(void);
|
||||
#endif /* ZEPHYR_ARCH_X86_INCLUDE_X86_MMU_H */
|
||||
|
|
|
@ -38,26 +38,6 @@
|
|||
/** Region will be accessible to user mode (normally supervisor-only) */
|
||||
#define K_MEM_PERM_USER BIT(5)
|
||||
|
||||
/*
|
||||
* This is the offset to subtract from a virtual address mapped in the
|
||||
* kernel's permanent mapping of RAM, to obtain its physical address.
|
||||
*
|
||||
* virt_addr - Z_VM_OFFSET = phys_addr
|
||||
*
|
||||
* This only works for virtual addresses within the interval
|
||||
* [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
|
||||
*
|
||||
* These macros are intended for assembly, linker code, and static initializers.
|
||||
* Use with care.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
#define Z_MEM_VM_OFFSET (CONFIG_KERNEL_VM_BASE - CONFIG_SRAM_BASE_ADDRESS)
|
||||
#else
|
||||
#define Z_MEM_VM_OFFSET 0
|
||||
#endif
|
||||
#define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET)
|
||||
#define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET)
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
@ -123,38 +103,6 @@ void k_mem_map(uint8_t **linear_addr, uintptr_t phys_addr, size_t size,
|
|||
size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
|
||||
uintptr_t addr, size_t size, size_t align);
|
||||
|
||||
/* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
|
||||
static inline uintptr_t z_mem_phys_addr(void *virt)
|
||||
{
|
||||
uintptr_t addr = (uintptr_t)virt;
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
__ASSERT((addr >= CONFIG_KERNEL_VM_BASE) &&
|
||||
(addr < (CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024UL))),
|
||||
"address %p not in permanent mappings", virt);
|
||||
#else
|
||||
/* Should be identity-mapped */
|
||||
__ASSERT((addr >= CONFIG_SRAM_BASE_ADDRESS) &&
|
||||
(addr < (CONFIG_SRAM_BASE_ADDRESS +
|
||||
(CONFIG_SRAM_SIZE * 1024UL))),
|
||||
"physical address 0x%lx not in RAM",
|
||||
(unsigned long)addr);
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
return Z_MEM_PHYS_ADDR(addr);
|
||||
}
|
||||
|
||||
/* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */
|
||||
static inline void *z_mem_virt_addr(uintptr_t phys)
|
||||
{
|
||||
__ASSERT((phys >= CONFIG_SRAM_BASE_ADDRESS) &&
|
||||
(phys < (CONFIG_SRAM_BASE_ADDRESS +
|
||||
(CONFIG_SRAM_SIZE * 1024UL))),
|
||||
"physical address 0x%lx not in RAM", (unsigned long)phys);
|
||||
|
||||
return (void *)Z_MEM_VIRT_ADDR(phys);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
25
kernel/mmu.c
25
kernel/mmu.c
|
@ -20,15 +20,15 @@ LOG_MODULE_DECLARE(os);
|
|||
static struct k_spinlock mm_lock;
|
||||
|
||||
/*
|
||||
* Overall virtual memory map:
|
||||
* Overall virtual memory map. System RAM is identity-mapped:
|
||||
*
|
||||
* +--------------+ <- CONFIG_KERNEL_VM_BASE
|
||||
* +--------------+ <- CONFIG_SRAM_BASE_ADDRESS
|
||||
* | Mapping for |
|
||||
* | all RAM |
|
||||
* | |
|
||||
* | |
|
||||
* +--------------+ <- mapping_limit
|
||||
* | Available |
|
||||
* +--------------+ <- CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_SIZE
|
||||
* | Available | also the mapping limit as mappings grown downward
|
||||
* | virtual mem |
|
||||
* | |
|
||||
* |..............| <- mapping_pos (grows downward as more mappings are made)
|
||||
|
@ -39,7 +39,7 @@ static struct k_spinlock mm_lock;
|
|||
* | ... |
|
||||
* +--------------+
|
||||
* | Mapping |
|
||||
* +--------------+ <- CONFIG_KERNEL_VM_LIMIT
|
||||
* +--------------+ <- CONFIG_SRAM_BASE_ADDRESS + CONFIG_KERNEL_VM_SIZE
|
||||
*
|
||||
* At the moment we just have one area for mappings and they are permanent.
|
||||
* This is under heavy development and may change.
|
||||
|
@ -50,16 +50,21 @@ static struct k_spinlock mm_lock;
|
|||
* k_mem_map() mappings start at the end of the address space, and grow
|
||||
* downward.
|
||||
*
|
||||
* The Kconfig value is inclusive so add one, even if it wraps around to 0.
|
||||
* TODO: If we ever encounter a board with RAM in high enough memory
|
||||
* such that there isn't room in the address space, define mapping_pos
|
||||
* and mapping_limit such that we have mappings grow downward from the
|
||||
* beginning of system RAM.
|
||||
*/
|
||||
static uint8_t *mapping_pos =
|
||||
(uint8_t *)((uintptr_t)CONFIG_KERNEL_VM_LIMIT + 1UL);
|
||||
(uint8_t *)((uintptr_t)(CONFIG_SRAM_BASE_ADDRESS +
|
||||
CONFIG_KERNEL_VM_SIZE));
|
||||
|
||||
/* Lower-limit of virtual address mapping. Immediately below this is the
|
||||
* permanent mapping for all SRAM.
|
||||
* permanent identity mapping for all SRAM.
|
||||
*/
|
||||
static uint8_t *mapping_limit = (uint8_t *)((uintptr_t)CONFIG_KERNEL_VM_BASE +
|
||||
KB((size_t)CONFIG_SRAM_SIZE));
|
||||
static uint8_t *mapping_limit =
|
||||
(uint8_t *)((uintptr_t)CONFIG_SRAM_BASE_ADDRESS +
|
||||
KB((size_t)CONFIG_SRAM_SIZE));
|
||||
|
||||
size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
|
||||
uintptr_t phys_addr, size_t size, size_t align)
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <x86_mmu.h>
|
||||
#include <linker/linker-defs.h>
|
||||
|
||||
#define VM_BASE ((uint8_t *)CONFIG_KERNEL_VM_BASE)
|
||||
#define VM_BASE ((uint8_t *)CONFIG_SRAM_BASE_ADDRESS)
|
||||
#define VM_LIMIT (VM_BASE + KB((size_t)CONFIG_SRAM_SIZE))
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
|
@ -51,7 +51,7 @@ void test_k_mem_map_rw(void)
|
|||
expect_fault = false;
|
||||
|
||||
/* Map in a page that allows writes */
|
||||
k_mem_map(&mapped_rw, Z_MEM_PHYS_ADDR((uintptr_t)buf),
|
||||
k_mem_map(&mapped_rw, (uintptr_t)buf,
|
||||
BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
|
||||
|
||||
/* Initialize buf with some bytes */
|
||||
|
@ -60,7 +60,7 @@ void test_k_mem_map_rw(void)
|
|||
}
|
||||
|
||||
/* Map again this time only allowing reads */
|
||||
k_mem_map(&mapped_ro, Z_MEM_PHYS_ADDR((uintptr_t)buf),
|
||||
k_mem_map(&mapped_ro, (uintptr_t)buf,
|
||||
BUF_SIZE, BASE_FLAGS);
|
||||
|
||||
/* Check that the mapped area contains the expected data. */
|
||||
|
@ -97,13 +97,13 @@ void test_k_mem_map_exec(void)
|
|||
expect_fault = false;
|
||||
|
||||
/* Map with write permissions and copy the function into the page */
|
||||
k_mem_map(&mapped_rw, Z_MEM_PHYS_ADDR((uintptr_t)test_page),
|
||||
k_mem_map(&mapped_rw, (uintptr_t)test_page,
|
||||
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW);
|
||||
|
||||
memcpy(mapped_rw, &transplanted_function, CONFIG_MMU_PAGE_SIZE);
|
||||
|
||||
/* Now map with execution enabled and try to run the copied fn */
|
||||
k_mem_map(&mapped_exec, Z_MEM_PHYS_ADDR((uintptr_t)test_page),
|
||||
k_mem_map(&mapped_exec, (uintptr_t)test_page,
|
||||
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_EXEC);
|
||||
|
||||
func = (void (*)(bool *executed))mapped_exec;
|
||||
|
@ -111,7 +111,7 @@ void test_k_mem_map_exec(void)
|
|||
zassert_true(executed, "function did not execute");
|
||||
|
||||
/* Now map without execution and execution should now fail */
|
||||
k_mem_map(&mapped_ro, Z_MEM_PHYS_ADDR((uintptr_t)test_page),
|
||||
k_mem_map(&mapped_ro, (uintptr_t)test_page,
|
||||
sizeof(test_page), BASE_FLAGS);
|
||||
|
||||
func = (void (*)(bool *executed))mapped_ro;
|
||||
|
@ -143,7 +143,7 @@ void test_k_mem_map_side_effect(void)
|
|||
* Show that by mapping test_page to an RO region, we can still
|
||||
* modify test_page.
|
||||
*/
|
||||
k_mem_map(&mapped, Z_MEM_PHYS_ADDR((uintptr_t)test_page),
|
||||
k_mem_map(&mapped, (uintptr_t)test_page,
|
||||
sizeof(test_page), BASE_FLAGS);
|
||||
|
||||
/* Should NOT fault */
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#define FAULTY_ADDRESS 0x0FFFFFFF
|
||||
#elif CONFIG_MMU
|
||||
/* Just past the permanent RAM mapping should be a non-present page */
|
||||
#define FAULTY_ADDRESS (CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024UL))
|
||||
#define FAULTY_ADDRESS (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL))
|
||||
#else
|
||||
#define FAULTY_ADDRESS 0xFFFFFFF0
|
||||
#endif
|
||||
|
|
Loading…
Reference in a new issue