kernel: add k_mem_map() interface

Allows applications to increase the data space available to Zephyr
via anonymous memory mappings. Loosely based on mmap().

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-12-16 18:56:02 -08:00 committed by Anas Nashif
parent 2ca5fb7e06
commit 69d39af5e6
2 changed files with 178 additions and 4 deletions

View file

@ -14,13 +14,13 @@
*/
/** No caching. Most drivers want this. */
#define K_MEM_CACHE_NONE 0
#define K_MEM_CACHE_NONE 2
/** Write-through caching. Used by certain drivers. */
#define K_MEM_CACHE_WT 1
/** Full write-back caching. Any RAM mapped wants this. */
#define K_MEM_CACHE_WB 2
#define K_MEM_CACHE_WB 0
/** Reserved bits for cache modes in k_map() flags argument */
#define K_MEM_CACHE_MASK (BIT(3) - 1)
@ -94,6 +94,87 @@ extern "C" {
void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
uint32_t flags);
/*
* k_mem_map() control flags
*/
/**
* @def K_MEM_MAP_UNINIT
*
* @brief The mapped region is not guaranteed to be zeroed.
*
* This may improve performance. The associated page frames may contain
* indeterminate data, zeroes, or even sensitive information.
*
* This may not be used with K_MEM_PERM_USER as there are no circumstances
* where this is safe.
*/
#define K_MEM_MAP_UNINIT BIT(16)
/**
* @def K_MEM_MAP_LOCK
*
* Region will be pinned in memory and never paged
*
* Such memory is guaranteed to never produce a page fault due to page-outs
* or copy-on-write once the mapping call has returned. Physical page frames
* will be pre-fetched as necessary and pinned.
*/
#define K_MEM_MAP_LOCK BIT(17)
/**
* @def K_MEM_MAP_GUARD
*
* A un-mapped virtual guard page will be placed in memory immediately preceding
* the mapped region. This page will still be noted as being used by the
* virtual memory manager. The total size of the allocation will be the
* requested size plus the size of this guard page. The returned address
* pointer will not include the guard page immediately below it. The typical
* use-case is downward-growing thread stacks.
*
* Zephyr treats page faults on this guard page as a fatal K_ERR_STACK_CHK_FAIL
* if it determines it immediately precedes a stack buffer, this is
* implemented in the architecture layer.
*/
#define K_MEM_MAP_GUARD BIT(18)
/**
* Map anonymous memory into Zephyr's address space
*
* This function effectively increases the data space available to Zephyr.
* The kernel will choose a base virtual address and return it to the caller.
* The memory will have access permissions for all contexts set per the
* provided flags argument.
*
* If user thread access control needs to be managed in any way, do not enable
* K_MEM_PERM_USER flags here; instead manage the region's permissions
* with memory domain APIs after the mapping has been established. Setting
* K_MEM_PERM_USER here will allow all user threads to access this memory
* which is usually undesirable.
*
* Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
*
* The mapped region is not guaranteed to be physically contiguous in memory.
* Physically contiguous buffers should be allocated statically and pinned
* at build time.
*
* Pages mapped in this way have write-back cache settings.
*
* The returned virtual memory pointer will be page-aligned. The size
* parameter, and any base address for re-mapping purposes must be page-
* aligned.
*
* Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
* function, with details in the documentation for these flags.
*
* @param size Size of the memory mapping. This must be page-aligned.
* @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
* @return The mapped memory location, or NULL if insufficient virtual address
* space, insufficient physical memory to establish the mapping,
* or insufficient memory for paging structures.
*/
void *k_mem_map(size_t size, uint32_t flags);
/**
* Given an arbitrary region, provide a aligned region that covers it
*

View file

@ -252,7 +252,7 @@ static void free_page_frame_list_init(void)
* local ontology (and do some assertions while we're at it)
*/
static void frame_mapped_set(struct z_page_frame *pf, void *addr)
{
{
PF_ASSERT(pf, !z_page_frame_is_reserved(pf),
"attempted to map a reserved page frame");
@ -266,9 +266,102 @@ static void frame_mapped_set(struct z_page_frame *pf, void *addr)
pf->flags |= Z_PAGE_FRAME_MAPPED;
pf->addr = addr;
pf->refcount++;
}
/* Allocate a free page frame, and map it to a specified virtual address
*
* TODO: Add optional support for copy-on-write mappings to a zero page instead
* of allocating, in which case page frames will be allocated lazily as
* the mappings to the zero page get touched.
*/
static int map_anon_page(void *addr, uint32_t flags)
{
int ret;
struct z_page_frame *pf;
uintptr_t phys;
bool lock = (flags & K_MEM_MAP_LOCK) != 0;
pf = free_page_frame_list_get();
if (pf == NULL) {
return -ENOMEM;
}
phys = z_page_frame_to_phys(pf);
ret = arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE,
flags | K_MEM_CACHE_WB);
if (ret != 0) {
free_page_frame_list_put(pf);
return -ENOMEM;
}
if (lock) {
pf->flags |= Z_PAGE_FRAME_PINNED;
}
frame_mapped_set(pf, addr);
return 0;
}
void *k_mem_map(size_t size, uint32_t flags)
{;
uint8_t *dst;
size_t total_size = size;
int ret;
k_spinlock_key_t key;
bool uninit = (flags & K_MEM_MAP_UNINIT) != 0;
bool guard = (flags & K_MEM_MAP_GUARD) != 0;
uint8_t *pos;
__ASSERT(!(((flags & K_MEM_PERM_USER) != 0) && uninit),
"user access to anonymous uninitialized pages is forbidden");
__ASSERT(size % CONFIG_MMU_PAGE_SIZE == 0,
"unaligned size %zu passed to %s", size, __func__);
__ASSERT(size != 0, "zero sized memory mapping");
__ASSERT(page_frames_initialized, "%s called too early", __func__);
__ASSERT((flags & K_MEM_CACHE_MASK) == 0,
"%s does not support explicit cache settings", __func__);
key = k_spin_lock(&z_mm_lock);
if (guard) {
/* Need extra virtual page for the guard which we
* won't map
*/
total_size += CONFIG_MMU_PAGE_SIZE;
}
dst = virt_region_get(total_size);
if (dst == NULL) {
/* Address space has no free region */
goto out;
}
if (guard) {
/* Skip over the guard page in returned address. */
dst += CONFIG_MMU_PAGE_SIZE;
}
VIRT_FOREACH(dst, size, pos) {
ret = map_anon_page(pos, flags);
if (ret != 0) {
/* TODO: call k_mem_unmap(dst, pos - dst) when
* implmented in #28990 and release any guard virtual
* page as well.
*/
dst = NULL;
goto out;
}
}
if (!uninit) {
/* If we later implement mappings to a copy-on-write zero
* page, won't need this step
*/
memset(dst, 0, size);
}
out:
k_spin_unlock(&z_mm_lock, key);
return dst;
}
/* This may be called from arch early boot code before z_cstart() is invoked.
* Data will be copied and BSS zeroed, but this must not rely on any