diff --git a/include/sys/mem_manage.h b/include/sys/mem_manage.h index 71143cce4c..c16b1647a6 100644 --- a/include/sys/mem_manage.h +++ b/include/sys/mem_manage.h @@ -278,8 +278,11 @@ void z_phys_unmap(uint8_t *virt, size_t size); * Zephyr treats page faults on this guard page as a fatal K_ERR_STACK_CHK_FAIL * if it determines it immediately precedes a stack buffer, this is * implemented in the architecture layer. + * + * DEPRECATED: k_mem_map() will always allocate guard pages, so this bit + * no longer has any effect. */ -#define K_MEM_MAP_GUARD BIT(18) +#define K_MEM_MAP_GUARD __DEPRECATED_MACRO BIT(18) /** * Return the amount of free memory available @@ -320,6 +323,10 @@ size_t k_mem_free_get(void); * parameter, and any base address for re-mapping purposes must be page- * aligned. * + * Note that the allocation includes two guard pages immediately before + * and after the requested region. The total size of the allocation will be + * the requested size plus the size of these two guard pages. + * * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this * function, with details in the documentation for these flags. * diff --git a/kernel/mmu.c b/kernel/mmu.c index 2b96b3b7e5..3425581fd6 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -348,10 +348,9 @@ static int map_anon_page(void *addr, uint32_t flags) void *k_mem_map(size_t size, uint32_t flags) { uint8_t *dst; - size_t total_size = size; + size_t total_size; int ret; k_spinlock_key_t key; - bool guard = (flags & K_MEM_MAP_GUARD) != 0U; uint8_t *pos; __ASSERT(!(((flags & K_MEM_PERM_USER) != 0U) && @@ -366,22 +365,26 @@ void *k_mem_map(size_t size, uint32_t flags) key = k_spin_lock(&z_mm_lock); - if (guard) { - /* Need extra virtual page for the guard which we - * won't map - */ - total_size += CONFIG_MMU_PAGE_SIZE; - } + /* Need extra for the guard pages (before and after) which we + * won't map. + */ + total_size = size + CONFIG_MMU_PAGE_SIZE * 2; dst = virt_region_get(total_size); if (dst == NULL) { /* Address space has no free region */ goto out; } - if (guard) { - /* Skip over the guard page in returned address. */ - dst += CONFIG_MMU_PAGE_SIZE; - } + + /* Unmap both guard pages to make sure accessing them + * will generate fault. + */ + arch_mem_unmap(dst, CONFIG_MMU_PAGE_SIZE); + arch_mem_unmap(dst + CONFIG_MMU_PAGE_SIZE + size, + CONFIG_MMU_PAGE_SIZE); + + /* Skip over the "before" guard page in returned address. */ + dst += CONFIG_MMU_PAGE_SIZE; VIRT_FOREACH(dst, size, pos) { ret = map_anon_page(pos, flags);