arch: arm64: Use _arch_switch() API

Switch to the _arch_switch() API that is required for an SMP-aware
scheduler instead of using the old arch_swap mechanism.

SMP is not supported yet but this is a necessary step in that direction.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2020-08-04 12:52:16 +02:00 committed by Ioannis Glaropoulos
parent fd15524912
commit df4aa230c8
11 changed files with 49 additions and 55 deletions

View file

@ -14,8 +14,7 @@ zephyr_library_sources(
irq_manage.c
prep_c.c
reset.S
swap.c
swap_helper.S
switch.S
thread.c
vector_table.S
)

View file

@ -7,6 +7,8 @@ config CPU_CORTEX_A
bool
select CPU_CORTEX
select HAS_FLASH_LOAD_OFFSET
select USE_SWITCH
select USE_SWITCH_SUPPORTED
help
This option signifies the use of a CPU of the Cortex-A family.

View file

@ -88,10 +88,24 @@ SECTION_FUNC(TEXT, _isr_wrapper)
cmp x1, #0
bne exit
/* Check if we need to context switch */
ldr x1, [x0, #_kernel_offset_to_current]
ldr x2, [x0, #_kernel_offset_to_ready_q_cache]
cmp x1, x2
/*
* z_arch_get_next_switch_handle() is returning:
*
* - The next thread to schedule in x0
* - The current thread in x1. This value is returned using the
* **old_thread parameter, so we need to make space on the stack for
* that.
*/
stp x1, xzr, [sp, #-16]!
mov x0, sp
bl z_arch_get_next_switch_handle
ldp x1, xzr, [sp], #16
/*
* x0: 1st thread in the ready queue
* x1: _current thread
*/
cmp x0, x1
beq exit
/* Switch thread */

View file

@ -1,23 +0,0 @@
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <kernel_internal.h>
extern const int _k_neg_eagain;
int arch_swap(unsigned int key)
{
_current->arch.swap_return_value = _k_neg_eagain;
z_arm64_call_svc();
irq_unlock(key);
/* Context switch is performed here. Returning implies the
* thread has been context-switched-in again.
*/
return _current->arch.swap_return_value;
}

View file

@ -22,7 +22,6 @@
_ASM_FILE_PROLOGUE
GDATA(_kernel)
GDATA(_k_neg_eagain)
/**
* @brief Routine to handle context switches
@ -34,13 +33,12 @@ GDATA(_k_neg_eagain)
GTEXT(z_arm64_context_switch)
SECTION_FUNC(TEXT, z_arm64_context_switch)
#ifdef CONFIG_TRACING
stp x0, x1, [sp, #-16]!
stp xzr, x30, [sp, #-16]!
bl sys_trace_thread_switched_out
ldp xzr, x30, [sp], #16
ldp x0, x1, [sp], #16
#endif
/* load _kernel into x0 and current k_thread into x1 */
ldr x0, =_kernel
ldr x1, [x0, #_kernel_offset_to_current]
/* addr of callee-saved regs in thread in x2 */
ldr x2, =_thread_offset_to_callee_saved
@ -58,13 +56,9 @@ SECTION_FUNC(TEXT, z_arm64_context_switch)
mov x1, sp
str x1, [x2]
/* fetch the thread to run from the ready queue cache */
ldr x1, [x0, #_kernel_offset_to_ready_q_cache]
str x1, [x0, #_kernel_offset_to_current]
/* addr of callee-saved regs in thread in x2 */
ldr x2, =_thread_offset_to_callee_saved
add x2, x2, x1
add x2, x2, x0
/* Restore x19-x29 plus x30 */
ldp x19, x20, [x2], #16
@ -152,7 +146,7 @@ SECTION_FUNC(TEXT, z_thread_entry_wrapper)
GTEXT(z_arm64_svc)
SECTION_FUNC(TEXT, z_arm64_svc)
z_arm64_enter_exc x0, x1, x2
z_arm64_enter_exc x2, x3, x4
switch_el x1, 3f, 2f, 1f
3:
@ -197,12 +191,15 @@ offload:
b inv
context_switch:
/* Check if we need to context switch */
ldr x0, =_kernel
ldr x1, [x0, #_kernel_offset_to_current]
ldr x2, [x0, #_kernel_offset_to_ready_q_cache]
cmp x1, x2
beq exit
/*
* Retrieve x0 and x1 from the stack:
* - x0 = new_thread->switch_handle = switch_to thread
* - x1 = x1 = &old_thread->switch_handle = current thread
*/
ldp x0, x1, [sp, #(16 * 10)]
/* Get old thread from x1 */
sub x1, x1, ___thread_t_switch_handle_OFFSET
/* Switch thread */
bl z_arm64_context_switch

View file

@ -77,4 +77,13 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
thread->callee_saved.sp = (uint64_t)pInitCtx;
thread->callee_saved.x30 = (uint64_t)z_thread_entry_wrapper;
thread->switch_handle = thread;
}
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
*old_thread = _current;
return z_get_next_switch_handle(*old_thread);
}

View file

@ -29,6 +29,4 @@
#include <kernel_arch_data.h>
#include <kernel_offsets.h>
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
#endif /* _ARM_OFFSETS_INC_ */

View file

@ -38,7 +38,7 @@ static ALWAYS_INLINE bool arch_is_in_isr(void)
}
extern void z_arm64_call_svc(void);
extern void z_arm64_call_svc(void *switch_to, void **switched_from);
#ifdef __cplusplus
}

View file

@ -32,10 +32,11 @@ static ALWAYS_INLINE void arch_kernel_init(void)
{
}
static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
static inline void arch_switch(void *switch_to, void **switched_from)
{
thread->arch.swap_return_value = value;
z_arm64_call_svc(switch_to, switched_from);
return;
}
extern void z_arm64_fatal_error(const z_arch_esf_t *esf, unsigned int reason);

View file

@ -9,7 +9,4 @@
#include <offsets.h>
#define _thread_offset_to_swap_return_value \
(___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET)
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_OFFSETS_SHORT_ARCH_H_ */

View file

@ -41,7 +41,7 @@ struct _callee_saved {
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
uint32_t swap_return_value;
/* empty */
};
typedef struct _thread_arch _thread_arch_t;