kernel: mm: introduce k_mem_phys_map()/_unmap()
This is similar to k_mem_map()/_unmap(). But instead of using anonymous memory, the provided physical region is mapped into virtual address instead. In addition to simple mapping physical ro virtual addresses, the mapping also adds two guard pages before and after the virtual region to catch buffer under-/over-flow. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
parent
378131c266
commit
04c5632bd4
|
@ -205,6 +205,47 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
|
|||
*/
|
||||
void z_phys_unmap(uint8_t *virt, size_t size);
|
||||
|
||||
/**
|
||||
* Map memory into virtual address space with guard pages.
|
||||
*
|
||||
* This maps memory into virtual address space with a preceding and
|
||||
* a succeeding guard pages.
|
||||
*
|
||||
* @see k_mem_map() for additional information if called via that.
|
||||
*
|
||||
* @see k_mem_phys_map() for additional information if called via that.
|
||||
*
|
||||
* @param phys Physical address base of the memory region if not requesting
|
||||
* anonymous memory. Must be page-aligned.
|
||||
* @param size Size of the memory mapping. This must be page-aligned.
|
||||
* @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
|
||||
* @param is_anon True is requesting mapping with anonymous memory.
|
||||
*
|
||||
* @return The mapped memory location, or NULL if insufficient virtual address
|
||||
* space, insufficient physical memory to establish the mapping,
|
||||
* or insufficient memory for paging structures.
|
||||
*/
|
||||
void *k_mem_map_impl(uintptr_t phys, size_t size, uint32_t flags, bool is_anon);
|
||||
|
||||
/**
|
||||
* Un-map mapped memory
|
||||
*
|
||||
* This removes the memory mappings for the provided page-aligned region,
|
||||
* and the two guard pages surrounding the region.
|
||||
*
|
||||
* @see k_mem_unmap() for additional information if called via that.
|
||||
*
|
||||
* @see k_mem_phys_unmap() for additional information if called via that.
|
||||
*
|
||||
* @note Calling this function on a region which was not mapped to begin
|
||||
* with is undefined behavior.
|
||||
*
|
||||
* @param addr Page-aligned memory region base virtual address
|
||||
* @param size Page-aligned memory region size
|
||||
* @param is_anon True if the mapped memory is from anonymous memory.
|
||||
*/
|
||||
void k_mem_unmap_impl(void *addr, size_t size, bool is_anon);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -168,7 +168,54 @@ size_t k_mem_free_get(void);
|
|||
* space, insufficient physical memory to establish the mapping,
|
||||
* or insufficient memory for paging structures.
|
||||
*/
|
||||
void *k_mem_map(size_t size, uint32_t flags);
|
||||
static inline void *k_mem_map(size_t size, uint32_t flags)
|
||||
{
|
||||
return k_mem_map_impl((uintptr_t)NULL, size, flags, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Map a physical memory region into kernel's virtual address space with guard pages.
|
||||
*
|
||||
* This function maps a contiguous physical memory region into kernel's
|
||||
* virtual address space. Given a physical address and a size, return a
|
||||
* linear address representing the base of where the physical region is mapped
|
||||
* in the virtual address space for the Zephyr kernel.
|
||||
*
|
||||
* This function alters the active page tables in the area reserved
|
||||
* for the kernel. This function will choose the virtual address
|
||||
* and return it to the caller.
|
||||
*
|
||||
* If user thread access control needs to be managed in any way, do not enable
|
||||
* K_MEM_PERM_USER flags here; instead manage the region's permissions
|
||||
* with memory domain APIs after the mapping has been established. Setting
|
||||
* K_MEM_PERM_USER here will allow all user threads to access this memory
|
||||
* which is usually undesirable.
|
||||
*
|
||||
* Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
|
||||
*
|
||||
* The returned virtual memory pointer will be page-aligned. The size
|
||||
* parameter, and any base address for re-mapping purposes must be page-
|
||||
* aligned.
|
||||
*
|
||||
* Note that the allocation includes two guard pages immediately before
|
||||
* and after the requested region. The total size of the allocation will be
|
||||
* the requested size plus the size of these two guard pages.
|
||||
*
|
||||
* Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
|
||||
* function, with details in the documentation for these flags.
|
||||
*
|
||||
* @param phys Physical address base of the memory region.
|
||||
* This must be page-aligned.
|
||||
* @param size Size of the memory mapping. This must be page-aligned.
|
||||
* @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
|
||||
*
|
||||
* @return The mapped memory location, or NULL if insufficient virtual address
|
||||
* space or insufficient memory for paging structures.
|
||||
*/
|
||||
static inline void *k_mem_phys_map(uintptr_t phys, size_t size, uint32_t flags)
|
||||
{
|
||||
return k_mem_map_impl(phys, size, flags, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Un-map mapped memory
|
||||
|
@ -183,7 +230,33 @@ void *k_mem_map(size_t size, uint32_t flags);
|
|||
* @param addr Page-aligned memory region base virtual address
|
||||
* @param size Page-aligned memory region size
|
||||
*/
|
||||
void k_mem_unmap(void *addr, size_t size);
|
||||
static inline void k_mem_unmap(void *addr, size_t size)
|
||||
{
|
||||
k_mem_unmap_impl(addr, size, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Un-map memory mapped via k_mem_phys_map().
|
||||
*
|
||||
* This unmaps a virtual memory region from kernel's virtual address space.
|
||||
*
|
||||
* This function alters the active page tables in the area reserved
|
||||
* for the kernel.
|
||||
*
|
||||
* This removes a memory mapping for the provided page-aligned region
|
||||
* and the guard pages. The kernel may re-use the associated virtual address
|
||||
* region later.
|
||||
*
|
||||
* @note Calling this function on a region which was not mapped via
|
||||
* k_mem_phys_map() to begin with is undefined behavior.
|
||||
*
|
||||
* @param addr Page-aligned memory region base virtual address
|
||||
* @param size Page-aligned memory region size
|
||||
*/
|
||||
static inline void k_mem_phys_unmap(void *addr, size_t size)
|
||||
{
|
||||
k_mem_unmap_impl(addr, size, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Given an arbitrary region, provide a aligned region that covers it
|
||||
|
|
135
kernel/mmu.c
135
kernel/mmu.c
|
@ -513,7 +513,6 @@ static int map_anon_page(void *addr, uint32_t flags)
|
|||
struct z_page_frame *pf;
|
||||
uintptr_t phys;
|
||||
bool lock = (flags & K_MEM_MAP_LOCK) != 0U;
|
||||
bool uninit = (flags & K_MEM_MAP_UNINIT) != 0U;
|
||||
|
||||
pf = free_page_frame_list_get();
|
||||
if (pf == NULL) {
|
||||
|
@ -549,23 +548,17 @@ static int map_anon_page(void *addr, uint32_t flags)
|
|||
|
||||
LOG_DBG("memory mapping anon page %p -> 0x%lx", addr, phys);
|
||||
|
||||
if (!uninit) {
|
||||
/* If we later implement mappings to a copy-on-write
|
||||
* zero page, won't need this step
|
||||
*/
|
||||
memset(addr, 0, CONFIG_MMU_PAGE_SIZE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *k_mem_map(size_t size, uint32_t flags)
|
||||
void *k_mem_map_impl(uintptr_t phys, size_t size, uint32_t flags, bool is_anon)
|
||||
{
|
||||
uint8_t *dst;
|
||||
size_t total_size;
|
||||
int ret;
|
||||
k_spinlock_key_t key;
|
||||
uint8_t *pos;
|
||||
bool uninit = (flags & K_MEM_MAP_UNINIT) != 0U;
|
||||
|
||||
__ASSERT(!(((flags & K_MEM_PERM_USER) != 0U) &&
|
||||
((flags & K_MEM_MAP_UNINIT) != 0U)),
|
||||
|
@ -573,7 +566,8 @@ void *k_mem_map(size_t size, uint32_t flags)
|
|||
__ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0U,
|
||||
"unaligned size %zu passed to %s", size, __func__);
|
||||
__ASSERT(size != 0, "zero sized memory mapping");
|
||||
__ASSERT(page_frames_initialized, "%s called too early", __func__);
|
||||
__ASSERT(!is_anon || (is_anon && page_frames_initialized),
|
||||
"%s called too early", __func__);
|
||||
__ASSERT((flags & K_MEM_CACHE_MASK) == 0U,
|
||||
"%s does not support explicit cache settings", __func__);
|
||||
|
||||
|
@ -605,24 +599,43 @@ void *k_mem_map(size_t size, uint32_t flags)
|
|||
/* Skip over the "before" guard page in returned address. */
|
||||
dst += CONFIG_MMU_PAGE_SIZE;
|
||||
|
||||
VIRT_FOREACH(dst, size, pos) {
|
||||
ret = map_anon_page(pos, flags);
|
||||
if (is_anon) {
|
||||
/* Mapping from annoymous memory */
|
||||
VIRT_FOREACH(dst, size, pos) {
|
||||
ret = map_anon_page(pos, flags);
|
||||
|
||||
if (ret != 0) {
|
||||
/* TODO: call k_mem_unmap(dst, pos - dst) when
|
||||
* implemented in #28990 and release any guard virtual
|
||||
* page as well.
|
||||
*/
|
||||
dst = NULL;
|
||||
goto out;
|
||||
if (ret != 0) {
|
||||
/* TODO: call k_mem_unmap(dst, pos - dst) when
|
||||
* implemented in #28990 and release any guard virtual
|
||||
* page as well.
|
||||
*/
|
||||
dst = NULL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* Mapping known physical memory.
|
||||
*
|
||||
* arch_mem_map() is a void function and does not return
|
||||
* anything. Arch code usually uses ASSERT() to catch
|
||||
* mapping errors. Assume this works correctly for now.
|
||||
*/
|
||||
arch_mem_map(dst, phys, size, flags);
|
||||
}
|
||||
|
||||
if (!uninit) {
|
||||
/* If we later implement mappings to a copy-on-write
|
||||
* zero page, won't need this step
|
||||
*/
|
||||
memset(dst, 0, size);
|
||||
}
|
||||
|
||||
out:
|
||||
k_spin_unlock(&z_mm_lock, key);
|
||||
return dst;
|
||||
}
|
||||
|
||||
void k_mem_unmap(void *addr, size_t size)
|
||||
void k_mem_unmap_impl(void *addr, size_t size, bool is_anon)
|
||||
{
|
||||
uintptr_t phys;
|
||||
uint8_t *pos;
|
||||
|
@ -663,43 +676,55 @@ void k_mem_unmap(void *addr, size_t size)
|
|||
goto out;
|
||||
}
|
||||
|
||||
VIRT_FOREACH(addr, size, pos) {
|
||||
ret = arch_page_phys_get(pos, &phys);
|
||||
if (is_anon) {
|
||||
/* Unmapping anonymous memory */
|
||||
VIRT_FOREACH(addr, size, pos) {
|
||||
ret = arch_page_phys_get(pos, &phys);
|
||||
|
||||
__ASSERT(ret == 0,
|
||||
"%s: cannot unmap an unmapped address %p",
|
||||
__func__, pos);
|
||||
if (ret != 0) {
|
||||
/* Found an address not mapped. Do not continue. */
|
||||
goto out;
|
||||
__ASSERT(ret == 0,
|
||||
"%s: cannot unmap an unmapped address %p",
|
||||
__func__, pos);
|
||||
if (ret != 0) {
|
||||
/* Found an address not mapped. Do not continue. */
|
||||
goto out;
|
||||
}
|
||||
|
||||
__ASSERT(z_is_page_frame(phys),
|
||||
"%s: 0x%lx is not a page frame", __func__, phys);
|
||||
if (!z_is_page_frame(phys)) {
|
||||
/* Physical address has no corresponding page frame
|
||||
* description in the page frame array.
|
||||
* This should not happen. Do not continue.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Grab the corresponding page frame from physical address */
|
||||
pf = z_phys_to_page_frame(phys);
|
||||
|
||||
__ASSERT(z_page_frame_is_mapped(pf),
|
||||
"%s: 0x%lx is not a mapped page frame", __func__, phys);
|
||||
if (!z_page_frame_is_mapped(pf)) {
|
||||
/* Page frame is not marked mapped.
|
||||
* This should not happen. Do not continue.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
arch_mem_unmap(pos, CONFIG_MMU_PAGE_SIZE);
|
||||
|
||||
/* Put the page frame back into free list */
|
||||
page_frame_free_locked(pf);
|
||||
}
|
||||
|
||||
__ASSERT(z_is_page_frame(phys),
|
||||
"%s: 0x%lx is not a page frame", __func__, phys);
|
||||
if (!z_is_page_frame(phys)) {
|
||||
/* Physical address has no corresponding page frame
|
||||
* description in the page frame array.
|
||||
* This should not happen. Do not continue.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Grab the corresponding page frame from physical address */
|
||||
pf = z_phys_to_page_frame(phys);
|
||||
|
||||
__ASSERT(z_page_frame_is_mapped(pf),
|
||||
"%s: 0x%lx is not a mapped page frame", __func__, phys);
|
||||
if (!z_page_frame_is_mapped(pf)) {
|
||||
/* Page frame is not marked mapped.
|
||||
* This should not happen. Do not continue.
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
arch_mem_unmap(pos, CONFIG_MMU_PAGE_SIZE);
|
||||
|
||||
/* Put the page frame back into free list */
|
||||
page_frame_free_locked(pf);
|
||||
} else {
|
||||
/*
|
||||
* Unmapping previous mapped memory with specific physical address.
|
||||
*
|
||||
* Note that we don't have to unmap the guard pages, as they should
|
||||
* have been unmapped. We just need to unmapped the in-between
|
||||
* region [addr, (addr + size)).
|
||||
*/
|
||||
arch_mem_unmap(addr, size);
|
||||
}
|
||||
|
||||
/* There are guard pages just before and after the mapped
|
||||
|
|
Loading…
Reference in a new issue