arch: arm: cortex_a_r: introduce USE_SWITCH

This commit introduce 'USE_SWITCH' feature into cortex-A/R(aarch32)
architecture

For introducing USE_SWITCH, the exception entry and exit are unified via
`z_arm_cortex_ar_enter_exc` and `z_arm_cortex_ar_exit_exc`. All
exceptions including ISR are using this way to enter and exit exception
handler.

Differentiate exception depth and interrupt depth. Allow doing
context switch when exception depth greater than 1 but not allow doing
this when interrupt depth greater than 1.

Currently, USE_SWITCH doesn't support FPU_SHARING and USERSPACE.

Signed-off-by: Huifeng Zhang <Huifeng.Zhang@arm.com>
This commit is contained in:
Huifeng Zhang 2023-07-18 13:22:52 +08:00 committed by Maureen Helm
parent 87dd43766d
commit abde709b5e
17 changed files with 504 additions and 9 deletions

View file

@ -34,11 +34,12 @@ config CPU_AARCH32_CORTEX_R
select HAS_CMSIS_CORE
select ARCH_HAS_NESTED_EXCEPTION_DETECTION
select HAS_FLASH_LOAD_OFFSET
select ARCH_HAS_USERSPACE if ARM_MPU
select ARCH_HAS_EXTRA_EXCEPTION_INFO
select ARCH_HAS_USERSPACE if ARM_MPU && !USE_SWITCH
select ARCH_HAS_EXTRA_EXCEPTION_INFO if !USE_SWITCH
select ARCH_HAS_CODE_DATA_RELOCATION
select ARCH_HAS_NOCACHE_MEMORY_SUPPORT if ARM_MPU && CPU_HAS_ARM_MPU && CPU_HAS_DCACHE
select ARCH_SUPPORTS_ROM_START
select USE_SWITCH_SUPPORTED
help
This option signifies the use of a CPU of the Cortex-R family.
@ -54,8 +55,9 @@ config CPU_AARCH32_CORTEX_A
select CPU_HAS_MMU
select HAS_CMSIS_CORE
select HAS_FLASH_LOAD_OFFSET
select ARCH_HAS_EXTRA_EXCEPTION_INFO
select ARCH_HAS_EXTRA_EXCEPTION_INFO if !USE_SWITCH
select ARCH_HAS_NOCACHE_MEMORY_SUPPORT
select USE_SWITCH_SUPPORTED
help
This option signifies the use of a CPU of the Cortex-A family.

View file

@ -4,7 +4,6 @@ zephyr_library()
zephyr_library_sources(
exc.S
exc_exit.S
fault.c
irq_init.c
reboot.c
@ -12,8 +11,6 @@ zephyr_library_sources(
stacks.c
tcm.c
vector_table.S
swap.c
swap_helper.S
irq_manage.c
prep_c.c
thread.c
@ -25,3 +22,5 @@ zephyr_library_sources_ifdef(CONFIG_USERSPACE thread.c)
zephyr_library_sources_ifdef(CONFIG_SEMIHOST semihost.c)
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE __aeabi_read_tp.S)
zephyr_library_sources_ifdef(CONFIG_ARCH_CACHE cache.c)
zephyr_library_sources_ifdef(CONFIG_USE_SWITCH switch.S)
zephyr_library_sources_ifndef(CONFIG_USE_SWITCH swap.c swap_helper.S exc_exit.S)

View file

@ -99,7 +99,7 @@ config CPU_CORTEX_R52
select AARCH32_ARMV8_R
select CPU_HAS_ICACHE
select CPU_HAS_DCACHE
select VFP_SP_D16
select VFP_SP_D16 if !USE_SWITCH
help
This option signifies the use of a Cortex-R52 CPU

View file

@ -42,6 +42,8 @@ GTEXT(z_arm_undef_instruction)
GTEXT(z_arm_prefetch_abort)
GTEXT(z_arm_data_abort)
#ifndef CONFIG_USE_SWITCH
.macro exception_entry mode
/*
* Store r0-r3, r12, lr, lr_und and spsr_und into the stack to
@ -233,3 +235,59 @@ SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_data_abort)
streq r1, [sp, #24 + FPU_SF_SIZE]
b z_arm_exc_exit
#else
/**
* @brief Undefined instruction exception handler
*
* An undefined instruction (UNDEF) exception is generated when an undefined
* instruction, or a VFP instruction when the VFP is not enabled, is
* encountered.
*/
SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_undef_instruction)
/*
* The undefined instruction address is offset by 2 if the previous
* mode is Thumb; otherwise, it is offset by 4.
*/
push {r0}
mrs r0, spsr
tst r0, #T_BIT
subeq lr, #4 /* ARM (!T_BIT) */
subne lr, #2 /* Thumb (T_BIT) */
pop {r0}
z_arm_cortex_ar_enter_exc
bl z_arm_fault_undef_instruction
b z_arm_cortex_ar_exit_exc
/**
* @brief Prefetch abort exception handler
*
* A prefetch abort (PABT) exception is generated when the processor marks the
* prefetched instruction as invalid and the instruction is executed.
*/
SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_prefetch_abort)
/*
* The faulting instruction address is always offset by 4 for the
* prefetch abort exceptions.
*/
sub lr, #4
z_arm_cortex_ar_enter_exc
bl z_arm_fault_prefetch
b z_arm_cortex_ar_exit_exc
/**
* @brief Data abort exception handler
*
* A data abort (DABT) exception is generated when an error occurs on a data
* memory access. This exception can be either synchronous or asynchronous,
* depending on the type of fault that caused it.
*/
SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_data_abort)
sub lr, #8
z_arm_cortex_ar_enter_exc
bl z_arm_fault_data
b z_arm_cortex_ar_exit_exc
#endif

View file

@ -32,6 +32,7 @@ GDATA(_sw_isr_table)
GTEXT(_isr_wrapper)
GTEXT(z_arm_int_exit)
#ifndef CONFIG_USE_SWITCH
/**
*
* @brief Wrapper around ISRs when inserted in software ISR table
@ -228,3 +229,145 @@ spurious_continue:
* z_arm_int_exit() */
ldr r1, =z_arm_int_exit
bx r1
#else
/**
*
* @brief Wrapper around ISRs when inserted in software ISR table
*
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table
* using the running interrupt number as the index, and invokes the registered
* ISR with its corresponding argument. When returning from the ISR, it
* determines if a context switch needs to happen and invoke the arch_switch
* function if so.
*
*/
SECTION_FUNC(TEXT, _isr_wrapper)
sub lr, #4
z_arm_cortex_ar_enter_exc
/* Increment interrupt nesting count */
get_cpu r2
ldr r0, [r2, #___cpu_t_nested_OFFSET]
add r0, #1
str r0, [r2, #___cpu_t_nested_OFFSET]
/* If not nested: switch to IRQ stack and save current sp on it. */
cmp r0, #1
bhi 1f
mov r0, sp
cps #MODE_IRQ
push {r0}
1:
#ifdef CONFIG_TRACING_ISR
bl sys_trace_isr_enter
#endif /* CONFIG_TRACING_ISR */
#ifdef CONFIG_PM
/*
* All interrupts are disabled when handling idle wakeup. For tickless
* idle, this ensures that the calculation and programming of the
* device for the next timer deadline is not interrupted. For
* non-tickless idle, this ensures that the clearing of the kernel idle
* state is not interrupted. In each case, z_pm_save_idle_exit
* is called with interrupts disabled.
*/
/* is this a wakeup from idle ? */
ldr r2, =_kernel
/* requested idle duration, in ticks */
ldr r0, [r2, #_kernel_offset_to_idle]
cmp r0, #0
beq _idle_state_cleared
movs r1, #0
/* clear kernel idle state */
str r1, [r2, #_kernel_offset_to_idle]
bl z_pm_save_idle_exit
_idle_state_cleared:
#endif /* CONFIG_PM */
/* Get active IRQ number from the interrupt controller */
#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
bl arm_gic_get_active
#else
bl z_soc_irq_get_active
#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
push {r0, r1}
lsl r0, r0, #3 /* table is 8-byte wide */
/*
* Skip calling the isr if it is a spurious interrupt.
*/
mov r1, #CONFIG_NUM_IRQS
lsl r1, r1, #3
cmp r0, r1
bge spurious_continue
ldr r1, =_sw_isr_table
add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay
* in thumb mode */
ldm r1!,{r0,r3} /* arg in r0, ISR in r3 */
/*
* Enable and disable interrupts again to allow nested in exception handlers.
*/
cpsie i
blx r3 /* call ISR */
cpsid i
spurious_continue:
/* Signal end-of-interrupt */
pop {r0, r1}
#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
bl arm_gic_eoi
#else
bl z_soc_irq_eoi
#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
#ifdef CONFIG_TRACING_ISR
bl sys_trace_isr_exit
#endif
GTEXT(z_arm_cortex_ar_irq_done)
z_arm_cortex_ar_irq_done:
/* Decrement interrupt nesting count */
get_cpu r2
ldr r0, [r2, #___cpu_t_nested_OFFSET]
sub r0, r0, #1
str r0, [r2, #___cpu_t_nested_OFFSET]
/* Do not context switch if exiting a nested interrupt */
cmp r0, #0
bhi __EXIT_INT
/* retrieve pointer to the current thread */
pop {r0}
cps #MODE_SYS
mov sp, r0
ldr r1, [r2, #___cpu_t_current_OFFSET]
push {r1}
mov r0, #0
bl z_get_next_switch_handle
pop {r1}
cmp r0, #0
beq __EXIT_INT
/*
* Switch thread
* r0: new thread
* r1: old thread
*/
bl z_arm_context_switch
__EXIT_INT:
#ifdef CONFIG_STACK_SENTINEL
bl z_check_stack_sentinel
#endif /* CONFIG_STACK_SENTINEL */
b z_arm_cortex_ar_exit_exc
#endif

View file

@ -16,4 +16,24 @@
and \rreg0, #TPIDRURO_CURR_CPU
.endm
.macro z_arm_cortex_ar_enter_exc
/*
* Store r0-r3, r12, lr into the stack to construct an exception
* stack frame.
*/
srsdb sp!, #MODE_SYS
cps #MODE_SYS
stmdb sp, {r0-r3, r12, lr}^
sub sp, #24
/* TODO: EXTRA_EXCEPTION_INFO */
mov r0, sp
/* increment exception depth */
get_cpu r2
ldrb r1, [r2, #_cpu_offset_to_exc_depth]
add r1, r1, #1
strb r1, [r2, #_cpu_offset_to_exc_depth]
.endm
#endif /* _MACRO_PRIV_INC_ */

View file

@ -0,0 +1,165 @@
/*
* Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Thread context switching for ARM Cortex-A and Cortex-R (AArch32)
*
* This module implements the routines necessary for thread context switching
* on ARM Cortex-A and Cortex-R CPUs.
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <offsets_short.h>
#include <zephyr/kernel.h>
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
GTEXT(z_arm_svc)
GTEXT(z_arm_context_switch)
GTEXT(z_do_kernel_oops)
GTEXT(z_arm_do_syscall)
/*
* Routine to handle context switches
*
* This function is directly called either by _isr_wrapper() in case of
* preemption, or arch_switch() in case of cooperative switching.
*
* void z_arm_context_switch(struct k_thread *new, struct k_thread *old);
*/
SECTION_FUNC(TEXT, z_arm_context_switch)
ldr r2, =_thread_offset_to_callee_saved
add r2, r1, r2
stm r2, {r4-r11, sp, lr}
/* save current thread's exception depth */
get_cpu r2
ldrb r3, [r2, #_cpu_offset_to_exc_depth]
strb r3, [r1, #_thread_offset_to_exception_depth]
/* retrieve next thread's exception depth */
ldrb r3, [r0, #_thread_offset_to_exception_depth]
strb r3, [r2, #_cpu_offset_to_exc_depth]
/* save old thread into switch handle which is required by
* z_sched_switch_spin().
*
* Note that this step must be done after all relevant state is
* saved.
*/
dsb
str r1, [r1, #___thread_t_switch_handle_OFFSET]
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
/* Grab the TLS pointer */
ldr r3, [r0, #_thread_offset_to_tls]
/* Store TLS pointer in the "Process ID" register.
* This register is used as a base pointer to all
* thread variables with offsets added by toolchain.
*/
mcr 15, 0, r3, c13, c0, 2
#endif
ldr r2, =_thread_offset_to_callee_saved
add r2, r0, r2
ldm r2, {r4-r11, sp, lr}
#if defined (CONFIG_ARM_MPU)
/* Re-program dynamic memory map */
push {r0, lr}
bl z_arm_configure_dynamic_mpu_regions
pop {r0, lr}
#endif
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
push {lr}
bl z_thread_mark_switched_in
pop {lr}
#endif
bx lr
/**
*
* @brief Service call handler
*
* The service call (svc) is used in the following occasions:
* - Cooperative context switching
* - IRQ offloading
* - Kernel run-time exceptions
*
*/
SECTION_FUNC(TEXT, z_arm_svc)
z_arm_cortex_ar_enter_exc
/* Get SVC number */
cps #MODE_SVC
mrs r0, spsr
tst r0, #0x20
ldreq r1, [lr, #-4]
biceq r1, #0xff000000
beq demux
ldr r1, [lr, #-2]
and r1, #0xff
/*
* grab service call number:
* TODO 0: context switch
* 1: irq_offload (if configured)
* 2: kernel panic or oops (software generated fatal exception)
* TODO 3: system calls for memory protection
*/
demux:
cps #MODE_SYS
cmp r1, #_SVC_CALL_RUNTIME_EXCEPT
beq _oops
#ifdef CONFIG_IRQ_OFFLOAD
cmp r1, #_SVC_CALL_IRQ_OFFLOAD
beq offload
b inv
offload:
get_cpu r2
ldr r3, [r2, #___cpu_t_nested_OFFSET]
add r3, r3, #1
str r3, [r2, #___cpu_t_nested_OFFSET]
/* If not nested: switch to IRQ stack and save current sp on it. */
cmp r3, #1
bhi 1f
mov r0, sp
cps #MODE_IRQ
push {r0}
1:
blx z_irq_do_offload
b z_arm_cortex_ar_irq_done
#endif
b inv
_oops:
/*
* Pass the exception frame to z_do_kernel_oops. r0 contains the
* exception reason.
*/
mov r0, sp
bl z_do_kernel_oops
inv:
mov r0, #0 /* K_ERR_CPU_EXCEPTION */
mov r1, sp
bl z_arm_fatal_error
/* Return here only in case of recoverable error */
b z_arm_cortex_ar_exit_exc

View file

@ -125,6 +125,13 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* initial values in all other registers/thread entries are
* irrelevant.
*/
#if defined(CONFIG_USE_SWITCH)
extern void z_arm_cortex_ar_exit_exc(void);
thread->switch_handle = thread;
/* thread birth happens through the exception return path */
thread->arch.exception_depth = 1;
thread->callee_saved.lr = (uint32_t)z_arm_cortex_ar_exit_exc;
#endif
}
#if defined(CONFIG_MPU_STACK_GUARD) && defined(CONFIG_FPU) \

View file

@ -13,6 +13,8 @@
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include "vector_table.h"
#include "offsets_short.h"
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
@ -28,4 +30,28 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
#else
ldr pc, =z_irq_spurious
#endif
#ifndef CONFIG_USE_SWITCH
ldr pc, =z_arm_nmi /* FIQ offset 0x1c */
#else
ldr pc,=z_irq_spurious
#endif
#ifdef CONFIG_USE_SWITCH
GTEXT(z_arm_cortex_ar_exit_exc)
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_cortex_ar_exit_exc)
/* decrement exception depth */
get_cpu r2
ldrb r1, [r2, #_cpu_offset_to_exc_depth]
sub r1, r1, #1
strb r1, [r2, #_cpu_offset_to_exc_depth]
/*
* Restore r0-r3, r12, lr, lr_und and spsr_und from the exception stack
* and return to the current thread.
*/
ldmia sp, {r0-r3, r12, lr}^
add sp, #24
rfeia sp!
#endif

View file

@ -32,6 +32,11 @@
GEN_OFFSET_SYM(_thread_arch_t, basepri);
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
#if defined(CONFIG_CPU_AARCH32_CORTEX_A) || defined(CONFIG_CPU_AARCH32_CORTEX_R)
GEN_OFFSET_SYM(_thread_arch_t, exception_depth);
GEN_OFFSET_SYM(_cpu_arch_t, exc_depth);
#endif
#if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
GEN_OFFSET_SYM(_thread_arch_t, mode);
#endif

View file

@ -40,7 +40,7 @@ static ALWAYS_INLINE bool arch_is_in_isr(void)
static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
{
return (arch_curr_cpu()->nested > 1U) ? (true) : (false);
return (arch_curr_cpu()->arch.exc_depth > 1U) ? (true) : (false);
}
#if defined(CONFIG_USERSPACE)
@ -54,7 +54,9 @@ static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const z_arch_esf_t
}
#endif
#ifndef CONFIG_USE_SWITCH
extern void z_arm_cortex_r_svc(void);
#endif
#ifdef __cplusplus
}

View file

@ -30,12 +30,30 @@ static ALWAYS_INLINE void arch_kernel_init(void)
{
}
#ifndef CONFIG_USE_SWITCH
static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->arch.swap_return_value = value;
}
#else
static ALWAYS_INLINE void arch_switch(void *switch_to, void **switched_from)
{
extern void z_arm_context_switch(struct k_thread *new,
struct k_thread *old);
struct k_thread *new = switch_to;
struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread,
switch_handle);
z_arm_context_switch(new, old);
}
#endif
extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3,
uint32_t stack_end,

View file

@ -23,6 +23,14 @@
#define _thread_offset_to_preempt_float \
(___thread_t_arch_OFFSET + ___thread_arch_t_preempt_float_OFFSET)
#if defined(CONFIG_CPU_AARCH32_CORTEX_A) || defined(CONFIG_CPU_AARCH32_CORTEX_R)
#define _thread_offset_to_exception_depth \
(___thread_t_arch_OFFSET + ___thread_arch_t_exception_depth_OFFSET)
#define _cpu_offset_to_exc_depth \
(___cpu_t_arch_OFFSET + ___cpu_arch_t_exc_depth_OFFSET)
#endif
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING)
#define _thread_offset_to_mode \
(___thread_t_arch_OFFSET + ___thread_arch_t_mode_OFFSET)

View file

@ -0,0 +1,33 @@
/*
* Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARM_STRUCTS_H_
#define ZEPHYR_INCLUDE_ARM_STRUCTS_H_
#include <zephyr/types.h>
#if defined(CONFIG_CPU_AARCH32_CORTEX_A) || defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* Per CPU architecture specifics */
struct _cpu_arch {
int8_t exc_depth;
};
#else
/* Default definitions when no architecture specific definitions exist. */
/* Per CPU architecture specifics (empty) */
struct _cpu_arch {
#ifdef __cplusplus
/* This struct will have a size 0 in C which is not allowed in C++ (it'll have a size 1). To
* prevent this, we add a 1 byte dummy variable.
*/
uint8_t dummy;
#endif
};
#endif
#endif /* ZEPHYR_INCLUDE_ARM_STRUCTS_H_ */

View file

@ -32,6 +32,9 @@ struct _callee_saved {
uint32_t v7; /* r10 */
uint32_t v8; /* r11 */
uint32_t psp; /* r13 */
#ifdef CONFIG_USE_SWITCH
uint32_t lr; /* lr */
#endif
};
typedef struct _callee_saved _callee_saved_t;
@ -74,6 +77,10 @@ struct _thread_arch {
struct _preempt_float preempt_float;
#endif
#if defined(CONFIG_CPU_AARCH32_CORTEX_A) || defined(CONFIG_CPU_AARCH32_CORTEX_R)
int8_t exception_depth;
#endif
#if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
/*
* Status variable holding several thread status flags

View file

@ -27,6 +27,8 @@
#include <zephyr/arch/arm64/structs.h>
#elif defined(CONFIG_RISCV)
#include <zephyr/arch/riscv/structs.h>
#elif defined(CONFIG_ARM)
#include <zephyr/arch/arm/structs.h>
#else
/* Default definitions when no architecture specific definitions exist. */

View file

@ -10,7 +10,7 @@ config SOC_FVP_AEMV8R_AARCH32
select CPU_CORTEX_R52
select CPU_HAS_ARM_MPU
select CPU_HAS_MPU
select VFP_DP_D32_FP16_FMAC
select VFP_DP_D32_FP16_FMAC if !USE_SWITCH
select GIC_V3
select GIC_SINGLE_SECURITY_STATE
select PLATFORM_SPECIFIC_INIT