arch: arm: cortex_r: Add MPU and USERSPACE support

Use Cortex-M code as a basis for adding MPU support for the Cortex-R.

Signed-off-by: Phil Erwin <phil.erwin@lexmark.com>
This commit is contained in:
Phil Erwin 2020-05-03 23:30:12 -04:00 committed by Christopher Friedt
parent eb0060cd7b
commit e0bed3b989
13 changed files with 753 additions and 147 deletions

View file

@ -30,6 +30,7 @@ config CPU_CORTEX_R
select CPU_CORTEX
select HAS_CMSIS_CORE
select HAS_FLASH_LOAD_OFFSET
select ARCH_HAS_USERSPACE if ARM_MPU
select ARCH_HAS_EXTRA_EXCEPTION_INFO
help
This option signifies the use of a CPU of the Cortex-R family.

View file

@ -5,3 +5,11 @@ zephyr_library()
zephyr_library_sources( arm_core_mpu.c)
zephyr_library_sources_ifdef(CONFIG_CPU_HAS_ARM_MPU arm_mpu.c)
zephyr_library_sources_ifdef(CONFIG_CPU_HAS_NXP_MPU nxp_mpu.c)
if (CONFIG_CPU_CORTEX_R)
zephyr_library_include_directories(cortex_a_r)
elseif (CONFIG_CPU_CORTEX_M)
zephyr_library_include_directories(cortex_m)
else ()
message(FATAL_ERROR "CPU is not Cortex-A/R/M")
endif ()

View file

@ -39,36 +39,12 @@ LOG_MODULE_DECLARE(mpu);
*/
static uint8_t static_regions_num;
/**
* Get the number of supported MPU regions.
*/
static inline uint8_t get_num_regions(void)
{
#if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
defined(CONFIG_CPU_CORTEX_M3) || \
defined(CONFIG_CPU_CORTEX_M4)
/* Cortex-M0+, Cortex-M3, and Cortex-M4 MCUs may
* have a fixed number of 8 MPU regions.
*/
return 8;
#elif defined(NUM_MPU_REGIONS)
/* Retrieve the number of regions from DTS configuration. */
return NUM_MPU_REGIONS;
#else
uint32_t type = MPU->TYPE;
type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos;
return (uint8_t)type;
#endif /* CPU_CORTEX_M0PLUS | CPU_CORTEX_M3 | CPU_CORTEX_M4 */
}
/* Include architecture-specific internal headers. */
#if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
defined(CONFIG_CPU_CORTEX_M3) || \
defined(CONFIG_CPU_CORTEX_M4) || \
defined(CONFIG_CPU_CORTEX_M7)
defined(CONFIG_CPU_CORTEX_M7) || \
defined(CONFIG_CPU_CORTEX_R)
#include "arm_mpu_v7_internal.h"
#elif defined(CONFIG_CPU_CORTEX_M23) || \
defined(CONFIG_CPU_CORTEX_M33) || \
@ -109,6 +85,9 @@ static int mpu_configure_region(const uint8_t index,
/* Populate internal ARM MPU region configuration structure. */
region_conf.base = new_region->start;
#if defined(CONFIG_CPU_CORTEX_R)
region_conf.size = size_to_mpu_rasr_size(new_region->size);
#endif
get_region_attr_from_mpu_partition_info(&region_conf.attr,
&new_region->attr, new_region->start, new_region->size);
@ -158,6 +137,38 @@ static int mpu_configure_regions(const struct z_arm_mpu_partition
/* ARM Core MPU Driver API Implementation for ARM MPU */
#if defined(CONFIG_CPU_CORTEX_R)
/**
* @brief enable the MPU by setting bit in SCTRL register
*/
void arm_core_mpu_enable(void)
{
uint32_t val;
__asm__ volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (val) ::);
val |= SCTRL_MPU_ENABLE;
/* Make sure that all the registers are set before proceeding */
__asm__ volatile ("dsb");
__asm__ volatile ("mcr p15, 0, %0, c1, c0, 0" :: "r" (val) :);
__asm__ volatile ("isb");
}
/**
* @brief disable the MPU by clearing bit in SCTRL register
*/
void arm_core_mpu_disable(void)
{
uint32_t val;
__asm__ volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (val) ::);
val &= ~SCTRL_MPU_ENABLE;
/* Force any outstanding transfers to complete before disabling MPU */
__asm__ volatile ("dsb");
__asm__ volatile ("mcr p15, 0, %0, c1, c0, 0" :: "r" (val) :);
__asm__ volatile ("isb");
}
#else
/**
* @brief enable the MPU
*/
@ -184,6 +195,7 @@ void arm_core_mpu_disable(void)
/* Disable MPU */
MPU->CTRL = 0;
}
#endif
#if defined(CONFIG_USERSPACE)
/**

View file

@ -10,6 +10,7 @@
#include <sys/math_extras.h>
#include <arm_mpu_internal.h>
#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
#include <logging/log.h>
@ -29,13 +30,26 @@ static void region_init(const uint32_t index,
const struct arm_mpu_region *region_conf)
{
/* Select the region you want to access */
MPU->RNR = index;
set_region_number(index);
/* Configure the region */
#if defined(CONFIG_CPU_CORTEX_R)
/*
* Clear size register, which disables the entry. It cannot be
* enabled as we reconfigure it.
*/
set_region_size(0);
set_region_base_address(region_conf->base & MPU_RBAR_ADDR_Msk);
set_region_attributes(region_conf->attr.rasr);
set_region_size(region_conf->size | MPU_RASR_ENABLE_Msk);
#else
MPU->RBAR = (region_conf->base & MPU_RBAR_ADDR_Msk)
| MPU_RBAR_VALID_Msk | index;
MPU->RASR = region_conf->attr.rasr | MPU_RASR_ENABLE_Msk;
LOG_DBG("[%d] 0x%08x 0x%08x",
index, region_conf->base, region_conf->attr.rasr);
#endif
}
/* @brief Partition sanity check
@ -105,7 +119,13 @@ static inline void get_region_attr_from_mpu_partition_info(
*/
(void) base;
#if defined(CONFIG_CPU_CORTEX_R)
(void) size;
p_attr->rasr = attr->rasr_attr;
#else
p_attr->rasr = attr->rasr_attr | size_to_mpu_rasr_size(size);
#endif
}
#if defined(CONFIG_USERSPACE)
@ -122,115 +142,11 @@ static inline int get_dyn_region_min_index(void)
return static_regions_num;
}
/**
* This internal function converts the SIZE field value of MPU_RASR
* to the region size (in bytes).
*/
static inline uint32_t mpu_rasr_size_to_size(uint32_t rasr_size)
{
return 1 << (rasr_size + 1U);
}
static inline uint32_t mpu_region_get_base(uint32_t index)
{
MPU->RNR = index;
return MPU->RBAR & MPU_RBAR_ADDR_Msk;
}
static inline uint32_t mpu_region_get_size(uint32_t index)
{
MPU->RNR = index;
uint32_t rasr_size = (MPU->RASR & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos;
return mpu_rasr_size_to_size(rasr_size);
}
/**
* This internal function checks if region is enabled or not.
*
* Note:
* The caller must provide a valid region number.
*/
static inline int is_enabled_region(uint32_t index)
{
/* Lock IRQs to ensure RNR value is correct when reading RASR. */
unsigned int key;
uint32_t rasr;
key = irq_lock();
MPU->RNR = index;
rasr = MPU->RASR;
irq_unlock(key);
return (rasr & MPU_RASR_ENABLE_Msk) ? 1 : 0;
}
/* Only a single bit is set for all user accessible permissions.
* In ARMv7-M MPU this is bit AP[1].
*/
#define MPU_USER_READ_ACCESSIBLE_Msk (P_RW_U_RO & P_RW_U_RW & P_RO_U_RO & RO)
/**
* This internal function returns the access permissions of an MPU region
* specified by its region index.
*
* Note:
* The caller must provide a valid region number.
*/
static inline uint32_t get_region_ap(uint32_t r_index)
{
/* Lock IRQs to ensure RNR value is correct when reading RASR. */
unsigned int key;
uint32_t rasr;
key = irq_lock();
MPU->RNR = r_index;
rasr = MPU->RASR;
irq_unlock(key);
return (rasr & MPU_RASR_AP_Msk) >> MPU_RASR_AP_Pos;
}
/**
* This internal function checks if the given buffer is in the region.
*
* Note:
* The caller must provide a valid region number.
*/
static inline int is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{
uint32_t r_addr_start;
uint32_t r_size_lshift;
uint32_t r_addr_end;
uint32_t end;
/* Lock IRQs to ensure RNR value is correct when reading RBAR, RASR. */
unsigned int key;
uint32_t rbar, rasr;
key = irq_lock();
MPU->RNR = r_index;
rbar = MPU->RBAR;
rasr = MPU->RASR;
irq_unlock(key);
r_addr_start = rbar & MPU_RBAR_ADDR_Msk;
r_size_lshift = ((rasr & MPU_RASR_SIZE_Msk) >>
MPU_RASR_SIZE_Pos) + 1U;
r_addr_end = r_addr_start + (1UL << r_size_lshift) - 1UL;
size = size == 0U ? 0U : size - 1U;
if (u32_add_overflow(start, size, &end)) {
return 0;
}
if ((start >= r_addr_start) && (end <= r_addr_end)) {
return 1;
}
return 0;
}
/**
* This internal function checks if the region is user accessible or not.
*

View file

@ -530,6 +530,23 @@ static int mpu_mark_areas_for_dynamic_regions(
return 0;
}
/**
* Get the number of supported MPU regions.
*/
static inline uint8_t get_num_regions(void)
{
#if defined(NUM_MPU_REGIONS)
/* Retrieve the number of regions from DTS configuration. */
return NUM_MPU_REGIONS;
#else
uint32_t type = MPU->TYPE;
type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos;
return (uint8_t)type;
#endif /* NUM_MPU_REGIONS */
}
/* This internal function programs the dynamic MPU regions.
*
* It returns the number of MPU region indices configured.

View file

@ -0,0 +1,163 @@
/* SPDX-License-Identifier: Apache-2.0
*
* Copyright (c) 2019 Lexmark International, Inc.
*/
#include <sys/math_extras.h>
/**
* Get the number of supported MPU regions.
*/
static inline uint8_t get_num_regions(void)
{
#if defined(NUM_MPU_REGIONS)
/* Retrieve the number of regions from DTS configuration. */
return NUM_MPU_REGIONS;
#else
uint32_t type;
__asm__ volatile("mrc p15, 0, %0, c0, c0, 4" : "=r" (type) ::);
type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos;
return (uint8_t)type;
#endif /* NUM_MPU_REGIONS */
}
static inline uint32_t get_region_attributes(void)
{
uint32_t attr;
__asm__ volatile("mrc p15, 0, %0, c6, c1, 4" : "=r" (attr) ::);
return attr;
}
static inline uint32_t get_region_base_address(void)
{
uint32_t addr;
__asm__ volatile("mrc p15, 0, %0, c6, c1, 0" : "=r" (addr) ::);
return addr;
}
static inline uint32_t get_region_size(void)
{
uint32_t size;
__asm__ volatile("mrc p15, 0, %0, c6, c1, 2" : "=r" (size) ::);
return size;
}
static inline void set_region_attributes(uint32_t attr)
{
__asm__ volatile("mcr p15, 0, %0, c6, c1, 4" :: "r" (attr) :);
}
static inline void set_region_base_address(uint32_t addr)
{
__asm__ volatile("mcr p15, 0, %0, c6, c1, 0" :: "r" (addr) :);
}
static inline void set_region_number(uint32_t index)
{
__asm__ volatile("mcr p15, 0, %0, c6, c2, 0" :: "r" (index) :);
}
static inline uint32_t mpu_region_get_base(uint32_t index)
{
set_region_number(index);
return get_region_base_address() & MPU_RBAR_ADDR_Msk;
}
/**
* This internal function converts the SIZE field value of MPU_RASR
* to the region size (in bytes).
*/
static inline uint32_t mpu_rasr_size_to_size(uint32_t rasr_size)
{
return 1 << (rasr_size + 1U);
}
static inline void set_region_size(uint32_t size)
{
__asm__ volatile("mcr p15, 0, %0, c6, c1, 2" :: "r" (size) :);
}
static inline void ARM_MPU_ClrRegion(uint32_t rnr)
{
set_region_number(rnr);
/* clear size field, which contains enable bit */
set_region_size(0);
}
/**
* This internal function checks if region is enabled or not.
*
* Note:
* The caller must provide a valid region number.
*/
static inline int is_enabled_region(uint32_t index)
{
set_region_number(index);
return (get_region_size() & MPU_RASR_ENABLE_Msk) ? 1 : 0;
}
/**
* This internal function returns the access permissions of an MPU region
* specified by its region index.
*
* Note:
* The caller must provide a valid region number.
*/
static inline uint32_t get_region_ap(uint32_t r_index)
{
set_region_number(r_index);
return (get_region_attributes() & MPU_RASR_AP_Msk) >> MPU_RASR_AP_Pos;
}
/**
* This internal function checks if the given buffer is in the region.
*
* Note:
* The caller must provide a valid region number.
*/
static inline int is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{
uint32_t r_addr_start;
uint32_t r_size_lshift;
uint32_t r_addr_end;
uint32_t end;
set_region_number(r_index);
r_addr_start = get_region_base_address() & MPU_RBAR_ADDR_Msk;
r_size_lshift = ((get_region_size() & MPU_RASR_SIZE_Msk) >>
MPU_RASR_SIZE_Pos) + 1;
r_addr_end = r_addr_start + (1UL << r_size_lshift) - 1;
size = size == 0 ? 0 : size - 1;
if (u32_add_overflow(start, size, &end)) {
return 0;
}
if ((start >= r_addr_start) && (end <= r_addr_end)) {
return 1;
}
return 0;
}
static inline uint32_t mpu_region_get_size(uint32_t index)
{
set_region_number(index);
uint32_t rasr_size =
(get_region_size() & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos;
return mpu_rasr_size_to_size(rasr_size);
}

View file

@ -0,0 +1,140 @@
/* SPDX-License-Identifier: Apache-2.0
*
* Copyright (c) 2019 Lexmark International, Inc.
*/
#include <sys/math_extras.h>
/**
* Get the number of supported MPU regions.
*/
static inline uint8_t get_num_regions(void)
{
#if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
defined(CONFIG_CPU_CORTEX_M3) || \
defined(CONFIG_CPU_CORTEX_M4)
/* Cortex-M0+, Cortex-M3, and Cortex-M4 MCUs may
* have a fixed number of 8 MPU regions.
*/
return 8;
#elif defined(NUM_MPU_REGIONS)
/* Retrieve the number of regions from DTS configuration. */
return NUM_MPU_REGIONS;
#else
uint32_t type = MPU->TYPE;
type = (type & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos;
return (uint8_t)type;
#endif /* CPU_CORTEX_M0PLUS | CPU_CORTEX_M3 | CPU_CORTEX_M4 */
}
static inline void set_region_number(uint32_t index)
{
MPU->RNR = index;
}
static inline uint32_t mpu_region_get_base(uint32_t index)
{
MPU->RNR = index;
return MPU->RBAR & MPU_RBAR_ADDR_Msk;
}
/**
* This internal function converts the SIZE field value of MPU_RASR
* to the region size (in bytes).
*/
static inline uint32_t mpu_rasr_size_to_size(uint32_t rasr_size)
{
return 1 << (rasr_size + 1U);
}
/**
* This internal function checks if region is enabled or not.
*
* Note:
* The caller must provide a valid region number.
*/
static inline int is_enabled_region(uint32_t index)
{
/* Lock IRQs to ensure RNR value is correct when reading RASR. */
unsigned int key;
uint32_t rasr;
key = irq_lock();
MPU->RNR = index;
rasr = MPU->RASR;
irq_unlock(key);
return (rasr & MPU_RASR_ENABLE_Msk) ? 1 : 0;
}
/**
* This internal function returns the access permissions of an MPU region
* specified by its region index.
*
* Note:
* The caller must provide a valid region number.
*/
static inline uint32_t get_region_ap(uint32_t r_index)
{
/* Lock IRQs to ensure RNR value is correct when reading RASR. */
unsigned int key;
uint32_t rasr;
key = irq_lock();
MPU->RNR = r_index;
rasr = MPU->RASR;
irq_unlock(key);
return (rasr & MPU_RASR_AP_Msk) >> MPU_RASR_AP_Pos;
}
/**
* This internal function checks if the given buffer is in the region.
*
* Note:
* The caller must provide a valid region number.
*/
static inline int is_in_region(uint32_t r_index, uint32_t start, uint32_t size)
{
uint32_t r_addr_start;
uint32_t r_size_lshift;
uint32_t r_addr_end;
uint32_t end;
/* Lock IRQs to ensure RNR value is correct when reading RBAR, RASR. */
unsigned int key;
uint32_t rbar, rasr;
key = irq_lock();
MPU->RNR = r_index;
rbar = MPU->RBAR;
rasr = MPU->RASR;
irq_unlock(key);
r_addr_start = rbar & MPU_RBAR_ADDR_Msk;
r_size_lshift = ((rasr & MPU_RASR_SIZE_Msk) >>
MPU_RASR_SIZE_Pos) + 1U;
r_addr_end = r_addr_start + (1UL << r_size_lshift) - 1UL;
size = size == 0U ? 0U : size - 1U;
if (u32_add_overflow(start, size, &end)) {
return 0;
}
if ((start >= r_addr_start) && (end <= r_addr_end)) {
return 1;
}
return 0;
}
static inline uint32_t mpu_region_get_size(uint32_t index)
{
MPU->RNR = index;
uint32_t rasr_size =
(MPU->RASR & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos;
return mpu_rasr_size_to_size(rasr_size);
}

View file

@ -356,6 +356,15 @@ _thread_irq_disabled:
cps #MODE_SYS
ldm r0, {r4-r11, sp}
cps #MODE_SVC
#if defined (CONFIG_ARM_MPU)
/* r2 contains k_thread */
mov r0, r2
/* Re-program dynamic memory map */
push {r2, lr}
bl z_arm_configure_dynamic_mpu_regions
pop {r2, lr}
#endif
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -642,17 +651,22 @@ SECTION_FUNC(TEXT, z_arm_svc)
beq demux
ldr r1, [lr, #-2]
bic r1, #0xff00
and r1, #0xff
/*
* grab service call number:
* 0: context switch
* 1: irq_offload (if configured)
* 2: kernel panic or oops (software generated fatal exception)
* Planned implementation of system calls for memory protection will
* expand this case.
* 3: system calls for memory protection
*/
demux:
#if defined(CONFIG_USERSPACE)
cmp r1, #_SVC_CALL_SYSTEM_CALL
beq _do_syscall
#endif
cmp r1, #_SVC_CALL_CONTEXT_SWITCH
beq _context_switch
@ -679,6 +693,128 @@ _oops:
cpsie i
movs pc, lr
#if defined(CONFIG_USERSPACE)
/*
* System call will setup a jump to the _do_arm_syscall function
* when the SVC returns via the bx lr.
*
* There is some trickery involved here because we have to preserve
* the original PC value so that we can return back to the caller of
* the SVC.
*
* On SVC exception, the USER/SYSTEM stack looks like the following:
*
* sp+0: r0
* sp+4: r1
* sp+8: r2
* sp+12: r3
* sp+16: r12
* sp+20: LR_svc (address of opcode just following SVC opcode )
*
* Registers look like:
* r0 - arg1
* r1 - arg2
* r2 - arg3
* r3 - arg4
* r4 - arg5
* r5 - arg6
* r6 - call_id
* r8 - saved link register
*/
_do_syscall:
/* validate syscall limit, only set priv mode if valid */
ldr ip, =K_SYSCALL_LIMIT
cmp r6, ip
blo valid_syscall_id
/* bad syscall id. Set arg0 to bad id and set call_id to SYSCALL_BAD */
cps #MODE_SYS
str r6, [sp]
cps #MODE_SVC
ldr r6, =K_SYSCALL_BAD
valid_syscall_id:
push {r0, r1}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, [r0, #_thread_offset_to_mode]
bic r1, #1
/* Store (privileged) mode in thread's mode state variable */
str r1, [r0, #_thread_offset_to_mode]
dsb
/* ISB is not strictly necessary here (stack pointer is not being
* touched), but it's recommended to avoid executing pre-fetched
* instructions with the previous privilege.
*/
isb
/*
* restore r0-r3 from supervisor stack before changing to system mode.
* r0,r1 saved just after valid_syscall_id
* r2,r3 saved just after z_arm_svc
*/
pop {r0-r3}
add sp,sp,r3 /* un-do stack pointer alignment to double-word boundary */
/*
* We need to store the spsr_svc onto the user stack before going off to
* the system call dispatcher. This is needed since the system call may
* call _swap() which will invoke another SVC, overwriting this register.
*/
mrs r0, spsr
/* Switch to system mode */
cps #MODE_SYS
/*
* save spsr_svc to the user stack to be restored after the system call
* completes
*/
push {r0}
/*
* Restore the nested level. The thread that is doing the system call may
* be put to sleep, as in the case of waiting in k_msgq_get() with
* K_FOREVER, so we don't want the nesting level to be elevated during
* that complete time.
*/
ldr r2, =_kernel
ldr r1, [r2, #_kernel_offset_to_nested]
sub r1, r1, #1
str r1, [r2, #_kernel_offset_to_nested]
/*
* restore r0-r3 from stack since we've used them above during demux
*/
ldr r0, [sp, #4]
ldr r1, [sp, #8]
ldr r2, [sp, #12]
ldr r3, [sp, #16]
/*
* grab return address from USER/SYSTEM stack frame
* (just past the SVC opcode)
*/
ldr r8, [sp, #24]
/*
* User stack left with:
*
* sp+0: spsr_svc
* sp+4: r0
* sp+8: r1
* sp+12: r2
* sp+16: r3
* sp+20: r12
* sp+24: LR_svc (address of opcode just following SVC opcode )
*/
/* branch to _arm_do_syscall. We will not return here. */
b z_arm_do_syscall
#endif
GTEXT(z_arm_cortex_r_svc)
SECTION_FUNC(TEXT, z_arm_cortex_r_svc)
svc #_SVC_CALL_CONTEXT_SWITCH

View file

@ -14,6 +14,10 @@
#include <arch/arm/aarch32/exc.h>
#if defined(CONFIG_CPU_CORTEX_R)
#include <arch/cpu.h>
#endif
_ASM_FILE_PROLOGUE
GTEXT(z_arm_userspace_enter)
@ -55,7 +59,8 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
add r0, r0, r1
/* Restore p1 from ip */
mov r1, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, ip
@ -67,6 +72,9 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
*/
mov ip, sp
#if defined(CONFIG_CPU_CORTEX_R)
mov sp, r0
#else
/* set stack to privileged stack
*
* Note [applies only when CONFIG_BUILTIN_STACK_GUARD is enabled]:
@ -76,6 +84,7 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
* located in memory higher than the default (user) thread stack.
*/
msr PSP, r0
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* At this point the privileged stack is not yet protected by PSPLIM.
@ -96,7 +105,8 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
mov r1, ip
push {r0,r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
push {r0,ip}
#endif
@ -127,7 +137,8 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
mov ip, r3
push {r0,r3}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
pop {r0,ip}
/* load up stack info from user stack */
@ -143,14 +154,15 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
#ifdef CONFIG_INIT_STACKS
ldr r1,=0xaaaaaaaa
#else
eors.n r1, r1
eors r1, r1
#endif
bl memset
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1}
mov ip, r1
#elif (defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE))
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
pop {r0,ip}
#endif
@ -164,10 +176,18 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
pop {r1,r2,r3,r4}
mov lr, r4
mov r4, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
pop {r1,r2,r3,lr}
#endif
#if defined(CONFIG_CPU_CORTEX_R)
/*
* set stack to user stack. We are in SYSTEM state, so r13 and r14 are
* shared with USER state
*/
mov sp, r0
#else
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/*
* Guard the default (user) stack until thread drops privileges.
@ -206,6 +226,7 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
/* set stack to user stack */
msr PSP, r0
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Restore interrupt lock status */
@ -216,6 +237,10 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
/* restore r0 */
mov r0, lr
#if defined(CONFIG_CPU_CORTEX_R)
/* change processor mode to unprivileged, with all interrrupts enabled. */
msr CPSR_c, #MODE_USR
#else
/* change processor mode to unprivileged */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
push {r0, r1, r2, r3}
@ -244,6 +269,7 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
#endif
dsb
msr CONTROL, ip
#endif
/* ISB is not strictly necessary here (stack pointer is not being
* touched), but it's recommended to avoid executing pre-fetched
@ -262,7 +288,8 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
ldr r0, =z_thread_entry
mov ip, r0
pop {r0, r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
ldr ip, =z_thread_entry
#endif
bx ip
@ -317,8 +344,8 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
/* Restore user stack and original r0, r1 */
pop {r0, r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
/* setup privileged stack */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
@ -332,7 +359,11 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
#endif
/* switch to privileged stack */
#if defined(CONFIG_CPU_CORTEX_R)
mov sp, ip
#else
msr PSP, ip
#endif
/* Note (applies when using stack limit checking):
* We do not need to lock IRQs after switching PSP to the privileged stack;
@ -404,7 +435,8 @@ dispatch_syscall:
/* Restore r0 */
mov r0, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
ldr ip, =K_SYSCALL_BAD
cmp r6, ip
bne valid_syscall
@ -425,9 +457,29 @@ dispatch_syscall:
lsl r6, #2
add ip, r6
ldr ip, [ip] /* load table address */
#if defined(CONFIG_CPU_CORTEX_R)
/*
* We can only be in this system call handling code if interrupts were
* enabled. This is because we would only come down this path if we were
* actively running in user state, and user state CANNOT disable external
* interrupts via irq_lock(). We want external interrupts enabled while
* running the system call handler, so we can blindly enable them now, and
* disable them afterwards.
*/
cpsie i
#endif
/* execute function from dispatch table */
blx ip
#if defined(CONFIG_CPU_CORTEX_R)
/*
* for same reasoning as above: we now disable external interrupts.
*/
cpsid i
#endif
/* restore LR */
ldr lr, [sp,#16]
#endif
@ -475,9 +527,15 @@ dispatch_syscall:
/* Restore r0 */
mov r0, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
/* set stack back to unprivileged stack */
ldr ip, [sp,#12]
#endif
#if defined(CONFIG_CPU_CORTEX_R)
mov sp, ip
#else
msr PSP, ip
#endif
@ -525,6 +583,31 @@ dispatch_syscall:
isb
pop {r0, r1}
#if defined(CONFIG_CPU_CORTEX_R)
/*
* Re-load items from user stack that were save in swap_helper.S. We
* don't need the values of R0-R3, so just adjust the stack pointer. We
* do need the old r12 and lr values.
*/
pop {r1} /* r1 = spsr_svc */
add sp, sp, #(4*4)
ldmia sp!, {r2-r3}
cps #MODE_SVC
/*
* Restore lr_svc stored into the SVC mode stack by the mode entry
* function. This ensures that the return address of the interrupted
* context is preserved in case of interrupt nesting.
*/
pop {lr}
/* restore the spsr_svc register */
msr spsr_fsxc,r1
mov r12, r2
mov lr, r3
#endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
@ -568,7 +651,26 @@ dispatch_syscall:
orrs ip, ip, #1
#endif
#if defined(CONFIG_CPU_CORTEX_R)
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
* restore the others to original values.
*/
mov r1, #0
mov r2, #0
mov r3, #0
/*
* return from SVC state to user state. SRSDB was used to save state
* in swap_helper.S. Change to sys mode so that we can recover those
* those values from the user stack
*/
cps #MODE_SYS
rfeia sp!
#else
bx ip
#endif
/*
@ -578,7 +680,8 @@ SECTION_FUNC(TEXT, arch_user_string_nlen)
push {r0, r1, r2, r4, r5, lr}
/* sp+4 is error value, init to -1 */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
ldr r3, =-1
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
mov.w r3, #-1
@ -594,6 +697,16 @@ z_arm_user_string_nlen_fault_start:
ldrb r5, [r0, r3]
z_arm_user_string_nlen_fault_end:
#if defined(CONFIG_CPU_CORTEX_R)
cmp r5, #0
beq strlen_done
cmp r3, r1
beq strlen_done
adds r3, #1
b strlen_loop
#else
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
cmp r5, #0
beq strlen_done
@ -605,6 +718,7 @@ z_arm_user_string_nlen_fault_end:
adds r3, #1
b.n strlen_loop
#endif
strlen_done:
/* Move length calculation from r3 to r0 (return value register) */

View file

@ -7,6 +7,15 @@
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_CORTEX_A_R_CPU_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_CORTEX_A_R_CPU_H_
#if defined(CONFIG_ARM_MPU)
#include <arch/arm/aarch32/cortex_a_r/mpu.h>
#endif
/*
* SCTRL register bit assignments
*/
#define SCTRL_MPU_ENABLE (1 << 0)
#define MODE_USR 0x10
#define MODE_FIQ 0x11
#define MODE_IRQ 0x12
@ -31,4 +40,10 @@
#define FPEXC_EN (1 << 30)
#define DFSR_DOMAIN_SHIFT (4)
#define DFSR_DOMAIN_MASK (0xf)
#define DFSR_FAULT_4_MASK (1 << 10)
#define DFSR_WRITE_MASK (1 << 11)
#define DFSR_AXI_SLAVE_MASK (1 << 12)
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_CORTEX_A_R_CPU_H_ */

View file

@ -0,0 +1,67 @@
/* SPDX-License-Identifier: Apache-2.0
*
* Copyright (c) 2019 Lexmark International, Inc.
*/
#ifndef ARCH_ARM_CORTEX_R_MPU_H
#define ARCH_ARM_CORTEX_R_MPU_H 1
#define MPU_RBAR_ADDR_Msk (~0x1f)
#define MPU_RASR_ENABLE_Msk (1)
#define MPU_RASR_SIZE_Pos 1U
#define MPU_RASR_SIZE_Msk (0x1FUL << MPU_RASR_SIZE_Pos)
#define MPU_TYPE_DREGION_Pos 8U
#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos)
#define MPU_RASR_XN_Pos 12
#define MPU_RASR_XN_Msk (1UL << MPU_RASR_XN_Pos)
#define MPU_RASR_AP_Pos 8
#define MPU_RASR_AP_Msk (0x7UL << MPU_RASR_AP_Pos)
#define MPU_RASR_TEX_Pos 3
#define MPU_RASR_TEX_Msk (0x7UL << MPU_RASR_TEX_Pos)
#define MPU_RASR_S_Pos 2
#define MPU_RASR_S_Msk (1UL << MPU_RASR_C_Pos)
#define MPU_RASR_C_Pos 1
#define MPU_RASR_C_Msk (1UL << MPU_RASR_C_Pos)
#define MPU_RASR_B_Pos 0
#define MPU_RASR_B_Msk (1UL << MPU_RASR_B_Pos)
#if defined(CONFIG_CPU_CORTEX_R4) || defined(CONFIG_CPU_CORTEX_R5)
#define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U)
#define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U)
#define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U)
#endif
#define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U)
#define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U)
#define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U)
#define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0aU)
#define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0bU)
#define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0cU)
#define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0dU)
#define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0eU)
#define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0fU)
#define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U)
#define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U)
#define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U)
#define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U)
#define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U)
#define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U)
#define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U)
#define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U)
#define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U)
#define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U)
#define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1aU)
#define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1bU)
#define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1cU)
#define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1dU)
#define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1eU)
#define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1fU)
#endif

View file

@ -9,7 +9,8 @@
#if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
defined(CONFIG_CPU_CORTEX_M3) || \
defined(CONFIG_CPU_CORTEX_M4) || \
defined(CONFIG_CPU_CORTEX_M7)
defined(CONFIG_CPU_CORTEX_M7) || \
defined(CONFIG_CPU_CORTEX_R)
#include <arch/arm/aarch32/mpu/arm_mpu_v7m.h>
#elif defined(CONFIG_CPU_CORTEX_M23) || \
defined(CONFIG_CPU_CORTEX_M33) || \
@ -27,6 +28,10 @@ struct arm_mpu_region {
uint32_t base;
/* Region Name */
const char *name;
#if defined(CONFIG_CPU_CORTEX_R)
/* Region Size */
uint32_t size;
#endif
/* Region Attributes */
arm_mpu_region_attr_t attr;
};
@ -39,12 +44,22 @@ struct arm_mpu_config {
const struct arm_mpu_region *mpu_regions;
};
#if defined(CONFIG_CPU_CORTEX_R)
#define MPU_REGION_ENTRY(_name, _base, _size, _attr) \
{\
.name = _name, \
.base = _base, \
.size = _size, \
.attr = _attr, \
}
#else
#define MPU_REGION_ENTRY(_name, _base, _attr) \
{\
.name = _name, \
.base = _base, \
.attr = _attr, \
}
#endif
/* Reference to the MPU configuration.
*

View file

@ -7,7 +7,9 @@
#ifndef _ASMLANGUAGE
#if defined(CONFIG_CPU_CORTEX_M)
#include <arch/arm/aarch32/cortex_m/cmsis.h>
#endif
/* Convenience macros to represent the ARMv7-M-specific
* configuration for memory access permission and