arch: arm: core: aarch32: Fix the syscall design for Cortex-R

When calling a syscall, the SVC routine will now elevate the thread to
privileged mode and exit the SVC setting the return address to the
syscall handler.  When the thread is swapped back in, it will be running
z_do_arm_syscall in system mode.  That function will run the syscall
then automatically return the thread to usr mode.

This allows running the syscall in sys mode on a thread so that we can
use syscalls that sleep without doing unnatural things.  The previous
implementation would enable interrupts while still in the SVC call and
do weird things with the nesting count.  An interrupt could happen
during this time when the syscall was still in the exception state, but
the nested count had been decremented too soon.  Correctness of the
nested count is important for future floating point unit work.

The Cortex-R behavior now matches that of Cortex-M.

Signed-off-by: Bradley Bolen <bbolen@lexmark.com>
This commit is contained in:
Bradley Bolen 2021-10-28 16:25:36 -04:00 committed by Anas Nashif
parent 771c177909
commit 2a357e5dfd
2 changed files with 56 additions and 137 deletions

View file

@ -646,6 +646,7 @@ svc_system_thread:
srsdb #MODE_SYS!
cps #MODE_SYS
push {r0-r3, r12, lr}
mov ip, sp
cps #MODE_SVC
/*
@ -723,20 +724,14 @@ _oops:
#if defined(CONFIG_USERSPACE)
/*
* System call will setup a jump to the _do_arm_syscall function
* when the SVC returns via the bx lr.
* running in system mode when returning from the exception.
*
* There is some trickery involved here because we have to preserve
* the original PC value so that we can return back to the caller of
* the SVC.
*
* On SVC exception, the USER/SYSTEM stack looks like the following:
*
* sp+0: r0
* sp+4: r1
* sp+8: r2
* sp+12: r3
* sp+16: r12
* sp+20: LR_svc (address of opcode just following SVC opcode )
* r0 - r1 - r2 - r3 - r12 - LR - { possible FPU space } - PC - SPSR
*
* Registers look like:
* r0 - arg1
@ -749,6 +744,26 @@ _oops:
* r8 - saved link register
*/
_do_syscall:
ldr r8, [ip, #24] /* grab address of LR from stack frame */
/* Make the exception return to system state */
ldr r1, [ip, #28]
/* If leaving thumb mode, set the return address to thumb mode */
tst r1, #T_BIT
orrne r8, #1
bic r1, #(MODE_MASK | T_BIT)
orr r1, r1, #MODE_SYS
str r1, [ip, #28]
/*
* Store the address of z_arm_do_syscall for the exit so the exception
* return goes there in system state.
*/
ldr r1, =z_arm_do_syscall
str r1, [ip, #24] /* overwrite the LR to point to z_arm_do_syscall */
/* validate syscall limit, only set priv mode if valid */
ldr ip, =K_SYSCALL_LIMIT
cmp r6, ip
@ -761,7 +776,6 @@ _do_syscall:
ldr r6, =K_SYSCALL_BAD
valid_syscall_id:
push {r0, r1}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, [r0, #_thread_offset_to_mode]
@ -776,56 +790,8 @@ valid_syscall_id:
*/
isb
/*
* restore r0-r3 from supervisor stack before changing to system mode.
* r0,r1 saved just after valid_syscall_id
* r2,r3 saved just after z_arm_svc
*/
pop {r0-r3}
add sp,sp,r3 /* un-do stack pointer alignment to double-word boundary */
/* Switch to system mode */
cps #MODE_SYS
/*
* Restore the nested level. The thread that is doing the system call may
* be put to sleep, as in the case of waiting in k_msgq_get() with
* K_FOREVER, so we don't want the nesting level to be elevated during
* that complete time.
*/
ldr r2, =_kernel
ldr r1, [r2, #_kernel_offset_to_nested]
sub r1, r1, #1
str r1, [r2, #_kernel_offset_to_nested]
/*
* restore r0-r3 from stack since we've used them above during demux
*/
ldr r0, [sp, #0]
ldr r1, [sp, #4]
ldr r2, [sp, #8]
ldr r3, [sp, #12]
/*
* grab return address from USER/SYSTEM stack frame
* (just past the SVC opcode)
*/
ldr r8, [sp, #20]
/*
* User stack left with:
*
* sp: r0
* sp+4: r1
* sp+8: r2
* sp+12: r3
* sp+16: r12
* sp+20: LR_svc (address of opcode just following SVC opcode )
*/
/* branch to _arm_do_syscall. We will not return here. */
b z_arm_do_syscall
/* Return to _arm_do_syscall in system state. */
b z_arm_int_exit
#endif
GTEXT(z_arm_cortex_r_svc)

View file

@ -351,7 +351,7 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
/* Restore user stack and original r0, r1 */
pop {r0, r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* setup privileged stack */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
@ -362,16 +362,19 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
subs ip, #8
str sp, [ip, #0]
str lr, [ip, #4]
#elif defined(CONFIG_CPU_CORTEX_R)
/* Store current LR at the beginning of the priv stack */
push {lr}
#elif defined(CONFIG_ARMV7_R)
/*
* The SVC handler has already switched to the privileged stack.
* Store the user SP and LR at the beginning of the priv stack.
*/
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_sp_usr]
push {ip, lr}
#endif
#if !defined(CONFIG_CPU_CORTEX_R)
/*
* switch to privileged stack
* The stack switch happens on exception entry for Cortex-R
*/
#if !defined(CONFIG_ARMV7_R)
/* switch to privileged stack */
msr PSP, ip
#endif
@ -446,7 +449,7 @@ dispatch_syscall:
mov r0, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
|| defined(CONFIG_ARMV7_R)
ldr ip, =K_SYSCALL_BAD
cmp r6, ip
bne valid_syscall
@ -454,11 +457,7 @@ dispatch_syscall:
/* BAD SYSCALL path */
/* fixup stack frame on the privileged stack, adding ssf */
mov ip, sp
#if defined(CONFIG_CPU_CORTEX_R)
push {r4,r5,ip}
#else
push {r4,r5,ip,lr}
#endif
b dispatch_syscall
valid_syscall:
@ -472,36 +471,13 @@ dispatch_syscall:
add ip, r6
ldr ip, [ip] /* load table address */
#if defined(CONFIG_CPU_CORTEX_R)
/*
* We can only be in this system call handling code if interrupts were
* enabled. This is because we would only come down this path if we were
* actively running in user state, and user state CANNOT disable external
* interrupts via irq_lock(). We want external interrupts enabled while
* running the system call handler, so we can blindly enable them now, and
* disable them afterwards.
*/
cpsie i
#endif
/* execute function from dispatch table */
blx ip
#if defined(CONFIG_CPU_CORTEX_R)
/*
* for same reasoning as above: we now disable external interrupts.
*/
cpsid i
/* restore LR */
ldr lr, [sp,#12]
#else
/* restore LR */
ldr lr, [sp,#16]
#endif
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/*
@ -545,9 +521,12 @@ dispatch_syscall:
/* Restore r0 */
mov r0, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* set stack back to unprivileged stack */
ldr ip, [sp,#12]
#endif
#if !defined(CONFIG_ARMV7_R)
msr PSP, ip
#endif
@ -574,7 +553,8 @@ dispatch_syscall:
orrs r2, r2, r3
msr CONTROL, r2
pop {r2, r3}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, [r0, #_thread_offset_to_mode]
@ -582,10 +562,12 @@ dispatch_syscall:
/* Store (unprivileged) mode in thread's mode state variable */
str r1, [r0, #_thread_offset_to_mode]
dsb
#if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* drop privileges by setting bit 0 in CONTROL */
mrs ip, CONTROL
orrs ip, ip, #1
msr CONTROL, ip
#endif
#endif
/* ISB is not strictly necessary here (stack pointer is not being
@ -636,42 +618,10 @@ dispatch_syscall:
*/
mov ip, r8
orrs ip, ip, #1
#endif
#if defined(CONFIG_CPU_CORTEX_R)
/*
* The stack contains (from top)
* spsr lr lr_svc r12 r3 r2 r1 r0 lr sp r5 r4
* Unwind everything except the return state that will be used for rfeia.
*/
add sp, sp, #(8*4)
ldmia sp!, {r12,lr}
pop {r2, r3}
cps #MODE_SVC
/*
* Restore lr_svc stored into the SVC mode stack by the mode entry
* function. This ensures that the return address of the interrupted
* context is preserved in case of interrupt nesting.
*/
pop {lr}
/*
* Move the return state from the privileged stack to the service
* stack. We need to put the user stack back in $sp, but we cannot
* trust the user stack. Therefore, put the return state on the svc
* stack and return from there.
*/
push {r2, r3}
#elif defined(CONFIG_ARMV7_R)
/* Restore user stack pointer */
ldr r1, =_kernel
ldr r1, [r1, #_kernel_offset_to_current]
cps #MODE_SYS
ldr sp, [r1, #_thread_offset_to_sp_usr] /* sp_usr */
cps #MODE_SVC
ldr ip, [sp,#12]
mov sp, ip
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
@ -681,12 +631,15 @@ dispatch_syscall:
mov r2, #0
mov r3, #0
/* return from SVC state to user state. */
rfeia sp!
#else
bx ip
/*
* return back to original function that called SVC
*/
mov ip, r8
cps #MODE_USR
#endif
bx ip
/*
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)