mmu: backing stores reserve page fault room

If we evict enough pages to completely fill the backing store,
through APIs like k_mem_map(), z_page_frame_evict(), or
z_mem_page_out(), this will produce a crash the next time we
try to handle a page fault.

The backing store now always reserves a free storage location
for actual page faults.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2021-01-15 12:07:45 -08:00 committed by Anas Nashif
parent cad944e259
commit c7be5dddda
4 changed files with 62 additions and 11 deletions

View file

@ -268,14 +268,21 @@ void z_eviction_init(void);
* may simply generate location tokens purely as a function of pf->addr with no
* other management necessary.
*
* This function distinguishes whether it was called on behalf of a page
* fault. A free backing store location must always be reserved in order for
* page faults to succeed. If the page_fault parameter is not set, this
* function should return -ENOMEM even if one location is available.
*
* This function is invoked with interrupts locked.
*
* @param addr Virtual address to obtain a storage location
* @param [out] location storage location token
* @param page_fault Whether this request was for a page fault
* @return 0 Success
* @return -ENOMEM Backing store is full
*/
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location);
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location,
bool page_fault);
/**
* Free a backing store location

View file

@ -607,7 +607,7 @@ static void page_frame_free_locked(struct z_page_frame *pf)
* Returns -ENOMEM if the backing store is full
*/
static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
bool page_in, uintptr_t *location_ptr)
bool page_fault, uintptr_t *location_ptr)
{
uintptr_t phys;
int ret;
@ -632,12 +632,13 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
dirty = dirty || !z_page_frame_is_backed(pf);
}
if (dirty || page_in) {
if (dirty || page_fault) {
arch_mem_scratch(phys);
}
if (z_page_frame_is_mapped(pf)) {
ret = z_backing_store_location_get(pf, location_ptr);
ret = z_backing_store_location_get(pf, location_ptr,
page_fault);
if (ret != 0) {
LOG_ERR("out of backing store memory");
return -ENOMEM;

View file

@ -51,6 +51,7 @@
static char backing_store[CONFIG_MMU_PAGE_SIZE *
CONFIG_BACKING_STORE_RAM_PAGES];
static struct k_mem_slab backing_slabs;
static unsigned int free_slabs;
static void *location_to_slab(uintptr_t location)
{
@ -78,17 +79,21 @@ static uintptr_t slab_to_location(void *slab)
return offset;
}
int z_backing_store_location_get(struct z_page_frame *pf,
uintptr_t *location)
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location,
bool page_fault)
{
int ret;
void *slab;
ret = k_mem_slab_alloc(&backing_slabs, &slab, K_NO_WAIT);
if (ret != 0) {
if ((!page_fault && free_slabs == 1) || free_slabs == 0) {
return -ENOMEM;
}
ret = k_mem_slab_alloc(&backing_slabs, &slab, K_NO_WAIT);
__ASSERT(ret == 0, "slab count mismatch");
(void)ret;
*location = slab_to_location(slab);
free_slabs--;
return 0;
}
@ -98,6 +103,7 @@ void z_backing_store_location_free(uintptr_t location)
void *slab = location_to_slab(location);
k_mem_slab_free(&backing_slabs, &slab);
free_slabs++;
}
void z_backing_store_page_out(uintptr_t location)
@ -121,4 +127,5 @@ void z_backing_store_init(void)
{
k_mem_slab_init(&backing_slabs, backing_store, CONFIG_MMU_PAGE_SIZE,
CONFIG_BACKING_STORE_RAM_PAGES);
free_slabs = CONFIG_BACKING_STORE_RAM_PAGES;
}

View file

@ -9,7 +9,7 @@
#include <mmu.h>
#ifdef CONFIG_BACKING_STORE_RAM_PAGES
#define EXTRA_PAGES CONFIG_BACKING_STORE_RAM_PAGES
#define EXTRA_PAGES (CONFIG_BACKING_STORE_RAM_PAGES - 1)
#else
#error "Unsupported configuration"
#endif
@ -182,6 +182,41 @@ void test_z_mem_unpin(void)
test_z_mem_page_out();
}
/* Show that even if we map enough anonymous memory to fill the backing
* store, we can still handle pagefaults.
* This eats up memory so should be last in the suite.
*/
void test_backing_store_capacity(void)
{
char *mem, *ret;
int key;
unsigned long faults;
size_t size = (((CONFIG_BACKING_STORE_RAM_PAGES - 1) - HALF_PAGES) *
CONFIG_MMU_PAGE_SIZE);
/* Consume the rest of memory */
mem = k_mem_map(size, K_MEM_PERM_RW);
zassert_not_null(mem, "k_mem_map failed");
/* Show no memory is left */
ret = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
zassert_is_null(ret, "k_mem_map shouldn't have succeeded");
key = irq_lock();
faults = z_num_pagefaults_get();
/* Poke all anonymous memory */
for (size_t i = 0; i < HALF_BYTES; i++) {
arena[i] = nums[i % 10];
}
for (size_t i = 0; i < size; i++) {
mem[i] = nums[i % 10];
}
faults = z_num_pagefaults_get() - faults;
irq_unlock(key);
zassert_not_equal(faults, 0, "should have had some pagefaults");
}
/* ztest main entry*/
void test_main(void)
{
@ -191,7 +226,8 @@ void test_main(void)
ztest_unit_test(test_z_mem_page_out),
ztest_unit_test(test_z_mem_page_in),
ztest_unit_test(test_z_mem_pin),
ztest_unit_test(test_z_mem_unpin)
);
ztest_unit_test(test_z_mem_unpin),
ztest_unit_test(test_backing_store_capacity));
ztest_run_test_suite(test_demand_paging);
}