diff --git a/subsys/demand_paging/backing_store/CMakeLists.txt b/subsys/demand_paging/backing_store/CMakeLists.txt index bbeb332083..61cabe62b6 100644 --- a/subsys/demand_paging/backing_store/CMakeLists.txt +++ b/subsys/demand_paging/backing_store/CMakeLists.txt @@ -11,4 +11,9 @@ include_directories( if(NOT DEFINED CONFIG_BACKING_STORE_CUSTOM) zephyr_library() zephyr_library_sources_ifdef(CONFIG_BACKING_STORE_RAM ram.c) + + zephyr_library_sources_ifdef( + CONFIG_BACKING_STORE_QEMU_X86_TINY_FLASH + backing_store_qemu_x86_tiny.c + ) endif() diff --git a/subsys/demand_paging/backing_store/Kconfig b/subsys/demand_paging/backing_store/Kconfig index bdbc4c9d3e..16781dbd11 100644 --- a/subsys/demand_paging/backing_store/Kconfig +++ b/subsys/demand_paging/backing_store/Kconfig @@ -18,6 +18,16 @@ config BACKING_STORE_RAM This implements a backing store using physical RAM pages that the Zephyr kernel is otherwise unaware of. It is intended for demonstration and testing of the demand paging feature. + +config BACKING_STORE_QEMU_X86_TINY_FLASH + bool "Flash-based backing store on qemu_x86_tiny" + depends on BOARD_QEMU_X86_TINY + help + This uses the "flash" memory area (in DTS) as the backing store + for demand paging. The qemu_x86_tiny.ld linker script puts + the symbols outside of boot and pinned sections into the flash + area, allowing testing of the demand paging mechanism on + code and data. endchoice if BACKING_STORE_RAM diff --git a/subsys/demand_paging/backing_store/backing_store_qemu_x86_tiny.c b/subsys/demand_paging/backing_store/backing_store_qemu_x86_tiny.c new file mode 100644 index 0000000000..bd75ef2de2 --- /dev/null +++ b/subsys/demand_paging/backing_store/backing_store_qemu_x86_tiny.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2021 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Backing store on qemu_x86_tiny for testing + * + * This uses the "flash" memory area (in DTS) as the backing store + * for demand paging. The qemu_x86_tiny.ld linker script puts + * the symbols outside of boot and pinned sections into the flash + * area, allowing testing of the demand paging mechanism on + * code and data. + */ + +#include +#include +#include +#include +#include + +void *location_to_flash(uintptr_t location) +{ + uintptr_t ptr = location; + + /* Offset from start of virtual address space */ + ptr -= CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET; + + /* Translate the offset into address to flash */ + ptr += CONFIG_FLASH_BASE_ADDRESS; + + __ASSERT_NO_MSG(ptr >= CONFIG_FLASH_BASE_ADDRESS); + __ASSERT_NO_MSG(ptr < (CONFIG_FLASH_BASE_ADDRESS + + KB(CONFIG_FLASH_SIZE) + - CONFIG_MMU_PAGE_SIZE)); + + return UINT_TO_POINTER(ptr); +} + +int k_mem_paging_backing_store_location_get(struct z_page_frame *pf, + uintptr_t *location, + bool page_fault) +{ + /* Simply returns the virtual address */ + *location = POINTER_TO_UINT(pf->addr); + + return 0; +} + +void k_mem_paging_backing_store_location_free(uintptr_t location) +{ + /* Nothing to do */ +} + +void k_mem_paging_backing_store_page_out(uintptr_t location) +{ + (void)memcpy(location_to_flash(location), Z_SCRATCH_PAGE, + CONFIG_MMU_PAGE_SIZE); +} + +void k_mem_paging_backing_store_page_in(uintptr_t location) +{ + (void)memcpy(Z_SCRATCH_PAGE, location_to_flash(location), + CONFIG_MMU_PAGE_SIZE); +} + +void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf, + uintptr_t location) +{ + /* Nothing to do */ +} + +void k_mem_paging_backing_store_init(void) +{ + /* Nothing to do */ +}