kernel: mmu: z_backing_store* to k_mem_paging_backing_store*

These functions are those that need be implemented by backing
store outside kernel. Promote them from z_* so these can be
included in documentation.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2021-05-13 11:57:54 -07:00 committed by Anas Nashif
parent 31c362d966
commit dfa4b7e375
7 changed files with 168 additions and 151 deletions

View file

@ -743,8 +743,9 @@ config DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS
gathering execution timing information for demand paging.
This requires k_mem_paging_eviction_histogram_bounds[] and
z_backing_store_histogram_bounds[] to define the upper bounds
for each bin. See kernel/statistics.c for information.
k_mem_paging_backing_store_histogram_bounds[] to define
the upper bounds for each bin. See kernel/statistics.c for
information.
endif # DEMAND_PAGING
endif # MMU

View file

@ -537,6 +537,130 @@ void k_mem_paging_eviction_init(void);
/** @} */
/**
* Backing store APIs
*
* @defgroup mem-demand-paging-backing-store Backing Store APIs
* @{
*/
/**
* Reserve or fetch a storage location for a data page loaded into a page frame
*
* The returned location token must be unique to the mapped virtual address.
* This location will be used in the backing store to page out data page
* contents for later retrieval. The location value must be page-aligned.
*
* This function may be called multiple times on the same data page. If its
* page frame has its Z_PAGE_FRAME_BACKED bit set, it is expected to return
* the previous backing store location for the data page containing a cached
* clean copy. This clean copy may be updated on page-out, or used to
* discard clean pages without needing to write out their contents.
*
* If the backing store is full, some other backing store location which caches
* a loaded data page may be selected, in which case its associated page frame
* will have the Z_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
*
* pf->addr will indicate the virtual address the page is currently mapped to.
* Large, sparse backing stores which can contain the entire address space
* may simply generate location tokens purely as a function of pf->addr with no
* other management necessary.
*
* This function distinguishes whether it was called on behalf of a page
* fault. A free backing store location must always be reserved in order for
* page faults to succeed. If the page_fault parameter is not set, this
* function should return -ENOMEM even if one location is available.
*
* This function is invoked with interrupts locked.
*
* @param addr Virtual address to obtain a storage location
* @param [out] location storage location token
* @param page_fault Whether this request was for a page fault
* @return 0 Success
* @return -ENOMEM Backing store is full
*/
int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
uintptr_t *location,
bool page_fault);
/**
* Free a backing store location
*
* Any stored data may be discarded, and the location token associated with
* this address may be re-used for some other data page.
*
* This function is invoked with interrupts locked.
*
* @param location Location token to free
*/
void k_mem_paging_backing_store_location_free(uintptr_t location);
/**
* Copy a data page from Z_SCRATCH_PAGE to the specified location
*
* Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
* to the intended source page frame for the calling context.
*
* Calls to this and k_mem_paging_backing_store_page_in() will always be
* serialized, but interrupts may be enabled.
*
* @param location Location token for the data page, for later retrieval
*/
void k_mem_paging_backing_store_page_out(uintptr_t location);
/**
* Copy a data page from the provided location to Z_SCRATCH_PAGE.
*
* Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
* to the intended destination page frame for the calling context.
*
* Calls to this and k_mem_paging_backing_store_page_out() will always be
* serialized, but interrupts may be enabled.
*
* @param location Location token for the data page
*/
void k_mem_paging_backing_store_page_in(uintptr_t location);
/**
* Update internal accounting after a page-in
*
* This is invoked after k_mem_paging_backing_store_page_in() and interrupts
* have been* re-locked, making it safe to access the z_page_frame data.
* The location value will be the same passed to
* k_mem_paging_backing_store_page_in().
*
* The primary use-case for this is to update custom fields for the backing
* store in the page frame, to reflect where the data should be evicted to
* if it is paged out again. This may be a no-op in some implementations.
*
* If the backing store caches paged-in data pages, this is the appropriate
* time to set the Z_PAGE_FRAME_BACKED bit. The kernel only skips paging
* out clean data pages if they are noted as clean in the page tables and the
* Z_PAGE_FRAME_BACKED bit is set in their associated page frame.
*
* @param pf Page frame that was loaded in
* @param location Location of where the loaded data page was retrieved
*/
void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
uintptr_t location);
/**
* Backing store initialization function.
*
* The implementation may expect to receive page in/out calls as soon as this
* returns, but not before that. Called at POST_KERNEL.
*
* This function is expected to do two things:
* - Initialize any internal data structures and accounting for the backing
* store.
* - If the backing store already contains all or some loaded kernel data pages
* at boot time, Z_PAGE_FRAME_BACKED should be appropriately set for their
* associated page frames, and any internal accounting set up appropriately.
*/
void k_mem_paging_backing_store_init(void);
/** @} */
#ifdef __cplusplus
}
#endif

View file

@ -230,122 +230,6 @@ extern size_t z_free_page_count;
#endif
#ifdef CONFIG_DEMAND_PAGING
/*
* Backing store APIs
*/
/**
* Reserve or fetch a storage location for a data page loaded into a page frame
*
* The returned location token must be unique to the mapped virtual address.
* This location will be used in the backing store to page out data page
* contents for later retrieval. The location value must be page-aligned.
*
* This function may be called multiple times on the same data page. If its
* page frame has its Z_PAGE_FRAME_BACKED bit set, it is expected to return
* the previous backing store location for the data page containing a cached
* clean copy. This clean copy may be updated on page-out, or used to
* discard clean pages without needing to write out their contents.
*
* If the backing store is full, some other backing store location which caches
* a loaded data page may be selected, in which case its associated page frame
* will have the Z_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
*
* pf->addr will indicate the virtual address the page is currently mapped to.
* Large, sparse backing stores which can contain the entire address space
* may simply generate location tokens purely as a function of pf->addr with no
* other management necessary.
*
* This function distinguishes whether it was called on behalf of a page
* fault. A free backing store location must always be reserved in order for
* page faults to succeed. If the page_fault parameter is not set, this
* function should return -ENOMEM even if one location is available.
*
* This function is invoked with interrupts locked.
*
* @param addr Virtual address to obtain a storage location
* @param [out] location storage location token
* @param page_fault Whether this request was for a page fault
* @return 0 Success
* @return -ENOMEM Backing store is full
*/
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location,
bool page_fault);
/**
* Free a backing store location
*
* Any stored data may be discarded, and the location token associated with
* this address may be re-used for some other data page.
*
* This function is invoked with interrupts locked.
*
* @param location Location token to free
*/
void z_backing_store_location_free(uintptr_t location);
/**
* Copy a data page from Z_SCRATCH_PAGE to the specified location
*
* Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
* to the intended source page frame for the calling context.
*
* Calls to this and z_backing_store_page_in() will always be serialized,
* but interrupts may be enabled.
*
* @param location Location token for the data page, for later retrieval
*/
void z_backing_store_page_out(uintptr_t location);
/**
* Copy a data page from the provided location to Z_SCRATCH_PAGE.
*
* Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
* to the intended destination page frame for the calling context.
*
* Calls to this and z_backing_store_page_out() will always be serialized,
* but interrupts may be enabled.
*
* @param location Location token for the data page
*/
void z_backing_store_page_in(uintptr_t location);
/**
* Update internal accounting after a page-in
*
* This is invoked after z_backing_store_page_in() and interrupts have been
* re-locked, making it safe to access the z_page_frame data. The location
* value will be the same passed to z_backing_store_page_in().
*
* The primary use-case for this is to update custom fields for the backing
* store in the page frame, to reflect where the data should be evicted to
* if it is paged out again. This may be a no-op in some implementations.
*
* If the backing store caches paged-in data pages, this is the appropriate
* time to set the Z_PAGE_FRAME_BACKED bit. The kernel only skips paging
* out clean data pages if they are noted as clean in the page tables and the
* Z_PAGE_FRAME_BACKED bit is set in their associated page frame.
*
* @param pf Page frame that was loaded in
* @param location Location of where the loaded data page was retrieved
*/
void z_backing_store_page_finalize(struct z_page_frame *pf, uintptr_t location);
/**
* Backing store initialization function.
*
* The implementation may expect to receive page in/out calls as soon as this
* returns, but not before that. Called at POST_KERNEL.
*
* This function is expected to do two things:
* - Initialize any internal data structures and accounting for the backing
* store.
* - If the backing store already contains all or some loaded kernel data pages
* at boot time, Z_PAGE_FRAME_BACKED should be appropriately set for their
* associated page frames, and any internal accounting set up appropriately.
*/
void z_backing_store_init(void);
/*
* Core kernel demand paging APIs
*/

View file

@ -790,7 +790,7 @@ void z_mem_manage_init(void)
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
z_paging_histogram_init();
#endif
z_backing_store_init();
k_mem_paging_backing_store_init();
k_mem_paging_eviction_init();
#endif
#if __ASSERT_ON
@ -824,7 +824,7 @@ static inline void do_backing_store_page_in(uintptr_t location)
#endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
z_backing_store_page_in(location);
k_mem_paging_backing_store_page_in(location);
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
#ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
@ -855,7 +855,7 @@ static inline void do_backing_store_page_out(uintptr_t location)
#endif /* CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS */
#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
z_backing_store_page_out(location);
k_mem_paging_backing_store_page_out(location);
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
#ifdef CONFIG_DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
@ -892,7 +892,8 @@ static void virt_region_foreach(void *addr, size_t size,
/*
* Perform some preparatory steps before paging out. The provided page frame
* must be evicted to the backing store immediately after this is called
* with a call to z_backing_store_page_out() if it contains a data page.
* with a call to k_mem_paging_backing_store_page_out() if it contains
* a data page.
*
* - Map page frame to scratch area if requested. This always is true if we're
* doing a page fault, but is only set on manual evictions if the page is
@ -935,8 +936,8 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
}
if (z_page_frame_is_mapped(pf)) {
ret = z_backing_store_location_get(pf, location_ptr,
page_fault);
ret = k_mem_paging_backing_store_location_get(pf, location_ptr,
page_fault);
if (ret != 0) {
LOG_ERR("out of backing store memory");
return -ENOMEM;
@ -1216,14 +1217,14 @@ static bool do_page_fault(void *addr, bool pin)
* entire operation. This is far worse for system interrupt latency
* but requires less pinned pages and ISRs may also take page faults.
*
* Support for allowing z_backing_store_page_out() and
* z_backing_store_page_in() to also sleep and allow other threads to
* run (such as in the case where the transfer is async DMA) is not
* implemented. Even if limited to thread context, arbitrary memory
* access triggering exceptions that put a thread to sleep on a
* contended page fault operation will break scheduling assumptions of
* cooperative threads or threads that implement crticial sections with
* spinlocks or disabling IRQs.
* Support for allowing k_mem_paging_backing_store_page_out() and
* k_mem_paging_backing_store_page_in() to also sleep and allow
* other threads to run (such as in the case where the transfer is
* async DMA) is not implemented. Even if limited to thread context,
* arbitrary memory access triggering exceptions that put a thread to
* sleep on a contended page fault operation will break scheduling
* assumptions of cooperative threads or threads that implement
* crticial sections with spinlocks or disabling IRQs.
*/
k_sched_lock();
__ASSERT(!k_is_in_isr(), "ISR page faults are forbidden");
@ -1289,7 +1290,7 @@ static bool do_page_fault(void *addr, bool pin)
pf->flags |= Z_PAGE_FRAME_MAPPED;
pf->addr = addr;
arch_mem_page_in(addr, z_page_frame_to_phys(pf));
z_backing_store_page_finalize(pf, page_in_location);
k_mem_paging_backing_store_page_finalize(pf, page_in_location);
out:
irq_unlock(key);
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ

View file

@ -32,7 +32,8 @@ k_mem_paging_eviction_histogram_bounds[
CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
extern unsigned long
z_backing_store_histogram_bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
k_mem_paging_backing_store_histogram_bounds[
CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
#else
#define NS_TO_CYC(ns) (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / 1000000U * ns)
@ -59,7 +60,8 @@ k_mem_paging_eviction_histogram_bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM
* (both page-in and page-out).
*/
__weak unsigned long
z_backing_store_histogram_bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
k_mem_paging_backing_store_histogram_bounds[
CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
NS_TO_CYC(10),
NS_TO_CYC(100),
NS_TO_CYC(125),
@ -150,13 +152,13 @@ void z_paging_histogram_init(void)
memset(&z_paging_histogram_backing_store_page_in, 0,
sizeof(z_paging_histogram_backing_store_page_in));
memcpy(z_paging_histogram_backing_store_page_in.bounds,
z_backing_store_histogram_bounds,
k_mem_paging_backing_store_histogram_bounds,
sizeof(z_paging_histogram_backing_store_page_in.bounds));
memset(&z_paging_histogram_backing_store_page_out, 0,
sizeof(z_paging_histogram_backing_store_page_out));
memcpy(z_paging_histogram_backing_store_page_out.bounds,
z_backing_store_histogram_bounds,
k_mem_paging_backing_store_histogram_bounds,
sizeof(z_paging_histogram_backing_store_page_out.bounds));
}

View file

@ -26,16 +26,18 @@
* large to hold clean copies of all mapped memory.
*
* This backing store is an example of the latter case. However, locations
* are freed as soon as pages are paged in, in z_backing_store_page_finalize().
* are freed as soon as pages are paged in, in
* k_mem_paging_backing_store_page_finalize().
* This implies that all data pages are treated as dirty as
* Z_PAGE_FRAME_BACKED is never set, even if the data page was paged out before
* and not modified since then.
*
* An optimization a real backing store will want is have
* z_backing_store_page_finalize() note the storage location of a paged-in
* data page in a custom field of its associated z_page_frame, and set the
* Z_PAGE_FRAME_BACKED bit. Invocations of z_backing_store_location_get() will
* have logic to return the previous clean page location instead of allocating
* k_mem_paging_backing_store_page_finalize() note the storage location of
* a paged-in data page in a custom field of its associated z_page_frame, and
* set the Z_PAGE_FRAME_BACKED bit. Invocations of
* k_mem_paging_backing_store_location_get() will have logic to return
* the previous clean page location instead of allocating
* a new one if Z_PAGE_FRAME_BACKED is set.
*
* This will, however, require the implementation of a clean page
@ -79,8 +81,9 @@ static uintptr_t slab_to_location(void *slab)
return offset;
}
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location,
bool page_fault)
int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
uintptr_t *location,
bool page_fault)
{
int ret;
void *slab;
@ -98,7 +101,7 @@ int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location,
return 0;
}
void z_backing_store_location_free(uintptr_t location)
void k_mem_paging_backing_store_location_free(uintptr_t location)
{
void *slab = location_to_slab(location);
@ -106,24 +109,25 @@ void z_backing_store_location_free(uintptr_t location)
free_slabs++;
}
void z_backing_store_page_out(uintptr_t location)
void k_mem_paging_backing_store_page_out(uintptr_t location)
{
(void)memcpy(location_to_slab(location), Z_SCRATCH_PAGE,
CONFIG_MMU_PAGE_SIZE);
}
void z_backing_store_page_in(uintptr_t location)
void k_mem_paging_backing_store_page_in(uintptr_t location)
{
(void)memcpy(Z_SCRATCH_PAGE, location_to_slab(location),
CONFIG_MMU_PAGE_SIZE);
}
void z_backing_store_page_finalize(struct z_page_frame *pf, uintptr_t location)
void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
uintptr_t location)
{
z_backing_store_location_free(location);
k_mem_paging_backing_store_location_free(location);
}
void z_backing_store_init(void)
void k_mem_paging_backing_store_init(void)
{
k_mem_slab_init(&backing_slabs, backing_store, CONFIG_MMU_PAGE_SIZE,
CONFIG_BACKING_STORE_RAM_PAGES);

View file

@ -35,7 +35,8 @@ k_mem_paging_eviction_histogram_bounds[
};
unsigned long
z_backing_store_histogram_bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
k_mem_paging_backing_store_histogram_bounds[
CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS] = {
10000,
50000,
100000,