arch: arm: cortex_a_r: Hold curr_cpu instance to TPIDRURO

Store the current CPU's struct _cpu instance into TPIDRURO, so that the
CPU core can get its struct _cpu instance by reading TPIDRURO. This is
useful in the SMP system.

Signed-off-by: Huifeng Zhang <Huifeng.Zhang@arm.com>
This commit is contained in:
Huifeng Zhang 2023-08-02 14:30:41 +08:00 committed by Maureen Helm
parent c3b857c434
commit 87dd43766d
11 changed files with 112 additions and 67 deletions

View file

@ -27,6 +27,7 @@
#include <zephyr/linker/sections.h>
#include <offsets_short.h>
#include <zephyr/arch/cpu.h>
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
@ -86,10 +87,10 @@ GTEXT(z_arm_data_abort)
#endif
/* Increment exception nesting count */
ldr r2, =_kernel
ldr r1, [r2, #_kernel_offset_to_nested]
get_cpu r2
ldr r1, [r2, #___cpu_t_nested_OFFSET]
add r1, r1, #1
str r1, [r2, #_kernel_offset_to_nested]
str r1, [r2, #___cpu_t_nested_OFFSET]
.endm
.macro exception_exit
@ -128,10 +129,10 @@ SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_undef_instruction)
sub sp, #24
/* Increment exception nesting count */
ldr r2, =_kernel
ldr r1, [r2, #_kernel_offset_to_nested]
get_cpu r2
ldr r1, [r2, #___cpu_t_nested_OFFSET]
add r1, r1, #1
str r1, [r2, #_kernel_offset_to_nested]
str r1, [r2, #___cpu_t_nested_OFFSET]
#if defined(CONFIG_FPU_SHARING)
sub sp, #___fpu_t_SIZEOF

View file

@ -18,6 +18,7 @@
#include <zephyr/linker/sections.h>
#include <offsets_short.h>
#include <zephyr/arch/cpu.h>
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
@ -52,8 +53,8 @@ GDATA(_kernel)
bne system_thread_exit\@
/* Restore user stack pointer */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
get_cpu r0
ldr r0, [r0, #___cpu_t_current_OFFSET]
cps #MODE_SYS
ldr sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */
cps #MODE_SVC
@ -68,8 +69,8 @@ system_thread_exit\@:
* If the floating point context pointer is null, then a context was
* saved so restore the float context from the exception stack frame.
*/
ldr r2, =_kernel
ldr r1, [r2, #_kernel_offset_to_fp_ctx]
get_cpu r2
ldr r1, [r2, #___cpu_t_fp_ctx_OFFSET]
cmp r1, #0
beq vfp_restore\@
@ -79,7 +80,7 @@ system_thread_exit\@:
*/
cmp r0, #0
moveq r1, #0
streq r1, [r2, #_kernel_offset_to_fp_ctx]
streq r1, [r2, #___cpu_t_fp_ctx_OFFSET]
b vfp_exit\@
vfp_restore\@:
@ -140,23 +141,24 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit)
#ifdef CONFIG_PREEMPT_ENABLED
/* Do not context switch if exiting a nested interrupt */
ldr r3, =_kernel
ldr r0, [r3, #_kernel_offset_to_nested]
get_cpu r3
ldr r0, [r3, #___cpu_t_nested_OFFSET]
cmp r0, #1
bhi __EXIT_INT
ldr r1, [r3, #_kernel_offset_to_current]
ldr r0, [r3, #_kernel_offset_to_ready_q_cache]
ldr r1, [r3, #___cpu_t_current_OFFSET]
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_ready_q_cache]
cmp r0, r1
blne z_arm_do_swap
__EXIT_INT:
#endif /* CONFIG_PREEMPT_ENABLED */
/* Decrement interrupt nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
get_cpu r2
ldr r0, [r2, #___cpu_t_nested_OFFSET]
sub r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
str r0, [r2, #___cpu_t_nested_OFFSET]
/* Restore previous stack pointer */
pop {r2, r3}
@ -207,8 +209,8 @@ __EXIT_INT:
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
/* Do not context switch if exiting a nested exception */
ldr r3, =_kernel
ldr r1, [r3, #_kernel_offset_to_nested]
get_cpu r3
ldr r1, [r3, #___cpu_t_nested_OFFSET]
cmp r1, #1
bhi __EXIT_EXC
@ -239,10 +241,10 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
bl z_arm_do_swap
/* Decrement exception nesting count */
ldr r3, =_kernel
ldr r0, [r3, #_kernel_offset_to_nested]
get_cpu r3
ldr r0, [r3, #___cpu_t_nested_OFFSET]
sub r0, r0, #1
str r0, [r3, #_kernel_offset_to_nested]
str r0, [r3, #___cpu_t_nested_OFFSET]
/* Return to the switched thread */
cps #MODE_SYS
@ -255,9 +257,9 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
__EXIT_EXC:
/* Decrement exception nesting count */
ldr r0, [r3, #_kernel_offset_to_nested]
ldr r0, [r3, #___cpu_t_nested_OFFSET]
sub r0, r0, #1
str r0, [r3, #_kernel_offset_to_nested]
str r0, [r3, #___cpu_t_nested_OFFSET]
#if defined(CONFIG_FPU_SHARING)
add sp, sp, #___fpu_t_SIZEOF

View file

@ -147,7 +147,7 @@ bool z_arm_fault_undef_instruction_fp(void)
__set_FPEXC(FPEXC_EN);
if (_kernel.cpus[0].nested > 1) {
if (_current_cpu->nested > 1) {
/*
* If the nested count is greater than 1, the undefined
* instruction exception came from an irq/svc context. (The
@ -155,12 +155,12 @@ bool z_arm_fault_undef_instruction_fp(void)
* the undef exception would increment it to 2).
*/
struct __fpu_sf *spill_esf =
(struct __fpu_sf *)_kernel.cpus[0].fp_ctx;
(struct __fpu_sf *)_current_cpu->fp_ctx;
if (spill_esf == NULL)
return false;
_kernel.cpus[0].fp_ctx = NULL;
_current_cpu->fp_ctx = NULL;
/*
* If the nested count is 2 and the current thread has used the
@ -170,9 +170,9 @@ bool z_arm_fault_undef_instruction_fp(void)
* saved exception stack frame, then save the floating point
* context because it is about to be overwritten.
*/
if (((_kernel.cpus[0].nested == 2)
if (((_current_cpu->nested == 2)
&& (_current->base.user_options & K_FP_REGS))
|| ((_kernel.cpus[0].nested > 2)
|| ((_current_cpu->nested > 2)
&& (spill_esf->undefined & FPEXC_EN))) {
/*
* Spill VFP registers to specified exception stack

View file

@ -22,6 +22,7 @@
#include <offsets_short.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/sw_isr_table.h>
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
@ -57,8 +58,8 @@ SECTION_FUNC(TEXT, _isr_wrapper)
cmp r0, #MODE_USR
bne isr_system_thread
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
get_cpu r0
ldr r0, [r0, #___cpu_t_current_OFFSET]
/* Save away user stack pointer */
cps #MODE_SYS
@ -108,10 +109,10 @@ _vfp_not_enabled:
* Mark where to store the floating context for the undefined
* instruction handler
*/
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_fp_ctx]
get_cpu r2
ldr r0, [r2, #___cpu_t_fp_ctx_OFFSET]
cmp r0, #0
streq sp, [r2, #_kernel_offset_to_fp_ctx]
streq sp, [r2, #___cpu_t_fp_ctx_OFFSET]
#endif /* CONFIG_FPU_SHARING */
/*
@ -139,10 +140,10 @@ _vfp_not_enabled:
push {r2, r3}
/* Increment interrupt nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
get_cpu r2
ldr r0, [r2, #___cpu_t_nested_OFFSET]
add r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
str r0, [r2, #___cpu_t_nested_OFFSET]
#ifdef CONFIG_TRACING_ISR
bl sys_trace_isr_enter

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _MACRO_PRIV_INC_
#define _MACRO_PRIV_INC_
#include <zephyr/arch/arm/cortex_a_r/tpidruro.h>
.macro get_cpu rreg0
/*
* Get CPU pointer.
*/
mrc p15, 0, \rreg0, c13, c0, 3
and \rreg0, #TPIDRURO_CURR_CPU
.endm
#endif /* _MACRO_PRIV_INC_ */

View file

@ -147,6 +147,9 @@ extern FUNC_NORETURN void z_cstart(void);
*/
void z_arm_prep_c(void)
{
/* Initialize tpidruro with our struct _cpu instance address */
write_tpidruro((uintptr_t)&_kernel.cpus[0]);
relocate_vector_table();
#if defined(CONFIG_CPU_HAS_FPU)
z_arm_floating_point_init();

View file

@ -20,6 +20,7 @@
#include <zephyr/arch/cpu.h>
#include <zephyr/syscall.h>
#include <zephyr/kernel.h>
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
@ -49,9 +50,9 @@ SECTION_FUNC(TEXT, z_arm_do_swap)
pop {r0, lr}
#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
/* load _kernel into r1 and current k_thread into r2 */
ldr r1, =_kernel
ldr r2, [r1, #_kernel_offset_to_current]
/* load current _cpu into r1 and current k_thread into r2 */
get_cpu r1
ldr r2, [r1, #___cpu_t_current_OFFSET]
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
/* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */
@ -81,7 +82,7 @@ SECTION_FUNC(TEXT, z_arm_do_swap)
* float registers have not been saved away, so write them to the
* exception stack frame.
*/
ldr r0, [r1, #_kernel_offset_to_fp_ctx]
ldr r0, [r1, #___cpu_t_fp_ctx_OFFSET]
cmp r0, #0
beq out_store_thread_context
@ -106,13 +107,14 @@ out_fp_inactive:
* frame, so zero out the global pointer to note this.
*/
mov r0, #0
str r0, [r1, #_kernel_offset_to_fp_ctx]
str r0, [r1, #___cpu_t_fp_ctx_OFFSET]
#endif /* CONFIG_FPU_SHARING */
/* fetch the thread to run from the ready queue cache */
ldr r2, [r1, #_kernel_offset_to_ready_q_cache]
ldr r3, =_kernel
ldr r2, [r3, #_kernel_offset_to_ready_q_cache]
str r2, [r1, #_kernel_offset_to_current]
str r2, [r1, #___cpu_t_current_OFFSET]
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
/* Grab the TLS pointer */
@ -139,11 +141,6 @@ out_fp_inactive:
movs r3, #0
str r3, [r2, #_thread_offset_to_basepri]
_thread_irq_disabled:
/* load _kernel into r1 and current k_thread into r2 */
ldr r1, =_kernel
ldr r2, [r1, #_kernel_offset_to_current]
/* addr of callee-saved regs in thread in r0 */
ldr r0, =_thread_offset_to_callee_saved
add r0, r2
@ -175,9 +172,9 @@ in_fp_inactive:
/* r2 contains k_thread */
mov r0, r2
/* Re-program dynamic memory map */
push {r2, lr}
push {r0, lr}
bl z_arm_configure_dynamic_mpu_regions
pop {r2, lr}
pop {r0, lr}
#endif
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
@ -217,8 +214,8 @@ SECTION_FUNC(TEXT, z_arm_svc)
cmp r0, #MODE_USR
bne svc_system_thread
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
get_cpu r0
ldr r0, [r0, #___cpu_t_current_OFFSET]
/* Save away user stack pointer */
cps #MODE_SYS
@ -265,10 +262,10 @@ _vfp_not_enabled:
* Mark where to store the floating context for the undefined
* instruction handler
*/
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_fp_ctx]
get_cpu r2
ldr r0, [r2, #___cpu_t_fp_ctx_OFFSET]
cmp r0, #0
streq sp, [r2, #_kernel_offset_to_fp_ctx]
streq sp, [r2, #___cpu_t_fp_ctx_OFFSET]
#endif /* CONFIG_FPU_SHARING */
mov ip, sp
@ -282,15 +279,16 @@ _vfp_not_enabled:
push {lr}
/* Align stack at double-word boundary */
/* TODO: Question, why push {r2, r3} here */
and r3, sp, #4
sub sp, sp, r3
push {r2, r3}
/* Increment interrupt nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
get_cpu r2
ldr r0, [r2, #___cpu_t_nested_OFFSET]
add r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
str r0, [r2, #___cpu_t_nested_OFFSET]
/* Get SVC number */
mrs r0, spsr
@ -403,8 +401,8 @@ _do_syscall:
ldr r6, =K_SYSCALL_BAD
valid_syscall_id:
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
get_cpu r0
ldr r0, [r0, #___cpu_t_current_OFFSET]
ldr r1, [r0, #_thread_offset_to_mode]
bic r1, #1
/* Store (privileged) mode in thread's mode state variable */

View file

@ -35,12 +35,12 @@ extern volatile irq_offload_routine_t offload_routine;
/* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */
static ALWAYS_INLINE bool arch_is_in_isr(void)
{
return (_kernel.cpus[0].nested != 0U);
return (arch_curr_cpu()->nested != 0U);
}
static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
{
return (_kernel.cpus[0].nested > 1U) ? (true) : (false);
return (arch_curr_cpu()->nested > 1U) ? (true) : (false);
}
#if defined(CONFIG_USERSPACE)

View file

@ -8,14 +8,14 @@
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_ARCH_INLINES_H
#include <zephyr/kernel_structs.h>
#include <zephyr/arch/arm/cortex_a_r/lib_helpers.h>
#include <zephyr/arch/arm/cortex_a_r/tpidruro.h>
#ifndef CONFIG_SMP
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
/* Dummy implementation always return the first cpu */
return &_kernel.cpus[0];
return (_cpu_t *)(read_tpidruro() & TPIDRURO_CURR_CPU);
}
#endif
static ALWAYS_INLINE uint32_t arch_proc_id(void)
{

View file

@ -72,6 +72,7 @@ MAKE_REG_HELPER(mair0, 0, 10, 2, 0);
MAKE_REG_HELPER(vbar, 0, 12, 0, 0);
MAKE_REG_HELPER(cntv_ctl, 0, 14, 3, 1);
MAKE_REG_HELPER(ctr, 0, 0, 0, 1);
MAKE_REG_HELPER(tpidruro, 0, 13, 0, 3);
MAKE_REG64_HELPER(ICC_SGI1R, 0, 12);
MAKE_REG64_HELPER(cntvct, 1, 14);
MAKE_REG64_HELPER(cntv_cval, 3, 14);

View file

@ -0,0 +1,20 @@
/*
* Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief tpidruro bits allocation
*
* Among other things, the tpidruro holds the address for the current
* CPU's struct _cpu instance. But such a pointer is at least 4-bytes
* aligned. That leaves two of free bits for other purposes.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_TPIDRURO_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_TPIDRURO_H_
#define TPIDRURO_CURR_CPU 0xFFFFFFFCUL
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_CORTEX_A_R_TPIDRURO_H_ */