kernel: mmu: always use before/after guard pages for k_mem_map()

When we start allowing unmapping of memory region, there is no
exact way to know if k_mem_map() is called with guard page option
specified or not. So just unconditionally enable guard pages on
both sides of the memory region to hopefully catch access
violations.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2021-04-14 11:55:47 -07:00 committed by Anas Nashif
parent cb0e3ede11
commit fe48f5a920
2 changed files with 23 additions and 13 deletions

View file

@ -278,8 +278,11 @@ void z_phys_unmap(uint8_t *virt, size_t size);
* Zephyr treats page faults on this guard page as a fatal K_ERR_STACK_CHK_FAIL
* if it determines it immediately precedes a stack buffer, this is
* implemented in the architecture layer.
*
* DEPRECATED: k_mem_map() will always allocate guard pages, so this bit
* no longer has any effect.
*/
#define K_MEM_MAP_GUARD BIT(18)
#define K_MEM_MAP_GUARD __DEPRECATED_MACRO BIT(18)
/**
* Return the amount of free memory available
@ -320,6 +323,10 @@ size_t k_mem_free_get(void);
* parameter, and any base address for re-mapping purposes must be page-
* aligned.
*
* Note that the allocation includes two guard pages immediately before
* and after the requested region. The total size of the allocation will be
* the requested size plus the size of these two guard pages.
*
* Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
* function, with details in the documentation for these flags.
*

View file

@ -348,10 +348,9 @@ static int map_anon_page(void *addr, uint32_t flags)
void *k_mem_map(size_t size, uint32_t flags)
{
uint8_t *dst;
size_t total_size = size;
size_t total_size;
int ret;
k_spinlock_key_t key;
bool guard = (flags & K_MEM_MAP_GUARD) != 0U;
uint8_t *pos;
__ASSERT(!(((flags & K_MEM_PERM_USER) != 0U) &&
@ -366,22 +365,26 @@ void *k_mem_map(size_t size, uint32_t flags)
key = k_spin_lock(&z_mm_lock);
if (guard) {
/* Need extra virtual page for the guard which we
* won't map
*/
total_size += CONFIG_MMU_PAGE_SIZE;
}
/* Need extra for the guard pages (before and after) which we
* won't map.
*/
total_size = size + CONFIG_MMU_PAGE_SIZE * 2;
dst = virt_region_get(total_size);
if (dst == NULL) {
/* Address space has no free region */
goto out;
}
if (guard) {
/* Skip over the guard page in returned address. */
dst += CONFIG_MMU_PAGE_SIZE;
}
/* Unmap both guard pages to make sure accessing them
* will generate fault.
*/
arch_mem_unmap(dst, CONFIG_MMU_PAGE_SIZE);
arch_mem_unmap(dst + CONFIG_MMU_PAGE_SIZE + size,
CONFIG_MMU_PAGE_SIZE);
/* Skip over the "before" guard page in returned address. */
dst += CONFIG_MMU_PAGE_SIZE;
VIRT_FOREACH(dst, size, pos) {
ret = map_anon_page(pos, flags);