arch: arc: overhaul the thread switch code in epilogue of irq and exception

overhaul the thread switch code in epilogue of irq and
exception handling:
* add z_arch_get_next_switch_handle to call z_get_next_switch_handle,
  let the scheduler to decide the switch thread. This will also cover
  the case of SMP.

* put lots of common codes in macros for thread switch to improve
  the maintainablity, readability.

* clean up of some lables to make codes easier to understand

Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
This commit is contained in:
Wayne Ren 2020-02-03 21:07:19 +08:00 committed by Anas Nashif
parent 34b033d41d
commit 3f88ddd549
7 changed files with 182 additions and 291 deletions

View file

@ -95,37 +95,6 @@ static void sched_ipi_handler(void *unused)
z_sched_ipi();
}
/**
* @brief Check whether need to do thread switch in isr context
*
* @details u64_t is used to let compiler use (r0, r1) as return register.
* use register r0 and register r1 as return value, r0 has
* new thread, r1 has old thread. If r0 == 0, it means no thread switch.
*/
u64_t z_arc_smp_switch_in_isr(void)
{
u64_t ret = 0;
u32_t new_thread;
u32_t old_thread;
old_thread = (u32_t)_current;
new_thread = (u32_t)z_get_next_ready_thread();
if (new_thread != old_thread) {
#ifdef CONFIG_TIMESLICING
z_reset_time_slice();
#endif
_current_cpu->swap_ok = 0;
((struct k_thread *)new_thread)->base.cpu =
arch_curr_cpu()->id;
_current_cpu->current = (struct k_thread *) new_thread;
ret = new_thread | ((u64_t)(old_thread) << 32);
}
return ret;
}
/* arch implementation of sched_ipi */
void arch_sched_ipi(void)
{

View file

@ -149,30 +149,25 @@ SECTION_FUNC(TEXT, _firq_exit)
_check_nest_int_by_irq_act r0, r1
jne _firq_no_reschedule
jne _firq_no_switch
#ifdef CONFIG_STACK_SENTINEL
bl z_check_stack_sentinel
#endif
/* sp is struct k_thread **old of z_arc_switch_in_isr
* which is a wrapper of z_get_next_switch_handle.
* r0 contains the 1st thread in ready queue. if
* it equals _current(r2) ,then do swap, or no swap.
*/
_get_next_switch_handle
#ifdef CONFIG_SMP
bl z_arc_smp_switch_in_isr
/* r0 points to new thread, r1 points to old thread */
brne r0, 0, _firq_reschedule
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/* Check if the current thread (in r2) is the cached thread */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
brne r0, r2, _firq_reschedule
#endif
/* fall to no rescheduling */
.balign 4
_firq_no_reschedule:
/* restore interrupted context' sp */
pop sp
cmp r0, r2
bne _firq_switch
/* fall to no switch */
.balign 4
_firq_no_switch:
/*
* Keeping this code block close to those that use it allows using brxx
* instruction instead of a pair of cmp and bxx
@ -183,19 +178,17 @@ _firq_no_reschedule:
rtie
.balign 4
_firq_reschedule:
pop sp
_firq_switch:
#if CONFIG_RGF_NUM_BANKS != 1
#ifdef CONFIG_SMP
/*
* save r0, r1 in irq stack for a while, as they will be changed by register
* save r0, r2 in irq stack for a while, as they will be changed by register
* bank switch
*/
_get_curr_cpu_irq_stack r2
st r0, [r2, -4]
st r1, [r2, -8]
#endif
_get_curr_cpu_irq_stack r1
st r0, [r1, -4]
st r2, [r1, -8]
/*
* We know there is no interrupted interrupt of lower priority at this
* point, so when switching back to register bank 0, it will contain the
@ -243,79 +236,35 @@ _firq_create_irq_stack_frame:
st_s r0, [sp, ___isf_t_status32_OFFSET]
st ilink, [sp, ___isf_t_pc_OFFSET] /* ilink into pc */
#ifdef CONFIG_SMP
/*
* load r0, r1 from irq stack
* load r0, r2 from irq stack
*/
_get_curr_cpu_irq_stack r2
ld r0, [r2, -4]
ld r1, [r2, -8]
_get_curr_cpu_irq_stack r1
ld r0, [r1, -4]
ld r2, [r1, -8]
#endif
#endif
#if defined(CONFIG_USERSPACE)
/*
* need to remember the user/kernel status of interrupted thread, will be
* restored when thread switched back
*/
lr r3, [_ARC_V2_AUX_IRQ_ACT]
and r3, r3, 0x80000000
push_s r3
#endif
#ifdef CONFIG_SMP
mov_s r2, r1
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
#endif
_save_callee_saved_regs
/* r2 is old thread */
_irq_store_old_thread_callee_regs
st _CAUSE_FIRQ, [r2, _thread_offset_to_relinquish_cause]
#ifdef CONFIG_SMP
mov_s r2, r0
#else
ld_s r2, [r1, _kernel_offset_to_ready_q_cache]
st_s r2, [r1, _kernel_offset_to_current]
#endif
/* mov new thread (r0) to r2 */
#ifdef CONFIG_ARC_STACK_CHECKING
_load_stack_check_regs
#endif
/*
* _load_callee_saved_regs expects incoming thread in r2.
* _load_callee_saved_regs restores the stack pointer.
*/
_load_callee_saved_regs
mov r2, r0
_load_new_thread_callee_regs
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
mov_s r0, r2
bl configure_mpu_thread
pop_s r2
#endif
#if defined(CONFIG_USERSPACE)
/*
* see comments in regular_irq.S
*/
lr r0, [_ARC_V2_AUX_IRQ_ACT]
bclr r0, r0, 31
sr r0, [_ARC_V2_AUX_IRQ_ACT]
#endif
ld r3, [r2, _thread_offset_to_relinquish_cause]
breq r3, _CAUSE_RIRQ, _firq_return_from_rirq
breq r3, _CAUSE_RIRQ, _firq_switch_from_rirq
nop_s
breq r3, _CAUSE_FIRQ, _firq_return_from_firq
breq r3, _CAUSE_FIRQ, _firq_switch_from_firq
nop_s
/* fall through */
.balign 4
_firq_return_from_coop:
_firq_switch_from_coop:
_set_misc_regs_irq_switch_from_coop
/* pc into ilink */
pop_s r0
mov_s ilink, r0
@ -326,18 +275,10 @@ _firq_return_from_coop:
rtie
.balign 4
_firq_return_from_rirq:
_firq_return_from_firq:
_firq_switch_from_rirq:
_firq_switch_from_firq:
#if defined(CONFIG_USERSPACE)
/*
* need to recover the user/kernel status of interrupted thread
*/
pop_s r3
lr r2, [_ARC_V2_AUX_IRQ_ACT]
or r2, r2, r3
sr r2, [_ARC_V2_AUX_IRQ_ACT]
#endif
_set_misc_regs_irq_switch_from_irq
_pop_irq_stack_frame

View file

@ -114,21 +114,11 @@ _exc_return:
* exception comes out, thread context?irq_context?nest irq context?
*/
#ifdef CONFIG_SMP
bl z_arc_smp_switch_in_isr
breq r0, 0, _exc_return_from_exc
mov_s r2, r0
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
_get_next_switch_handle
/* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
breq r0, r2, _exc_return_from_exc
ld_s r2, [r1, _kernel_offset_to_ready_q_cache]
st_s r2, [r1, _kernel_offset_to_current]
#endif
mov_s r2, r0
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/*
@ -184,8 +174,8 @@ _exc_return:
mov r2, ilink
#endif
/* Assumption: r2 has current thread */
b _rirq_common_interrupt_swap
/* Assumption: r2 has next thread */
b _rirq_newthread_switch
_exc_return_from_exc:
ld_s r0, [sp, ___isf_t_pc_OFFSET]

View file

@ -23,7 +23,7 @@
GTEXT(_rirq_enter)
GTEXT(_rirq_exit)
GTEXT(_rirq_common_interrupt_swap)
GTEXT(_rirq_newthread_switch)
#if 0 /* TODO: when FIRQ is not present, all would be regular */
@ -260,41 +260,17 @@ SECTION_FUNC(TEXT, _rirq_exit)
_check_nest_int_by_irq_act r0, r1
jne _rirq_no_reschedule
jne _rirq_no_switch
#ifdef CONFIG_STACK_SENTINEL
bl z_check_stack_sentinel
#endif
#ifdef CONFIG_SMP
bl z_arc_smp_switch_in_isr
/* r0 points to new thread, r1 points to old thread */
cmp_s r0, 0
beq _rirq_no_reschedule
mov_s r2, r1
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/*
* Both (a)reschedule and (b)non-reschedule cases need to load the
* current thread's stack, but don't have to use it until the decision
* is taken: load the delay slots with the 'load stack pointer'
* instruction.
*
* a) needs to load it to save outgoing context.
* b) needs to load it to restore the interrupted context.
/* sp is struct k_thread **old of z_arc_switch_in_isr
* which is a wrapper of z_get_next_switch_handle.
* r0 contains the 1st thread in ready queue. if
* it equals _current(r2) ,then do swap, or no swap.
*/
_get_next_switch_handle
/* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
cmp_s r0, r2
beq _rirq_no_reschedule
/* cached thread to run is in r0, fall through */
#endif
.balign 4
_rirq_reschedule:
cmp r0, r2
beq _rirq_no_switch
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to remember SEC_STAT.IRM bit */
@ -302,81 +278,31 @@ _rirq_reschedule:
push_s r3
#endif
#if defined(CONFIG_USERSPACE)
/*
* need to remember the user/kernel status of interrupted thread
*/
lr r3, [_ARC_V2_AUX_IRQ_ACT]
and r3, r3, 0x80000000
push_s r3
#endif
/* _save_callee_saved_regs expects outgoing thread in r2 */
_save_callee_saved_regs
/* r2 is old thread */
_irq_store_old_thread_callee_regs
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
#ifdef CONFIG_SMP
mov_s r2, r0
#else
/* incoming thread is in r0: it becomes the new 'current' */
mov_s r2, r0
st_s r2, [r1, _kernel_offset_to_current]
#endif
/* mov new thread (r0) to r2 */
mov r2, r0
/* _rirq_newthread_switch required by exception handling */
.balign 4
_rirq_common_interrupt_swap:
/* r2 contains pointer to new thread */
_rirq_newthread_switch:
#ifdef CONFIG_ARC_STACK_CHECKING
_load_stack_check_regs
#endif
/*
* _load_callee_saved_regs expects incoming thread in r2.
* _load_callee_saved_regs restores the stack pointer.
*/
_load_callee_saved_regs
_load_new_thread_callee_regs
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
mov_s r0, r2
bl configure_mpu_thread
pop_s r2
#endif
#if defined(CONFIG_USERSPACE)
/*
* when USERSPACE is enabled, according to ARCv2 ISA, SP will be switched
* if interrupt comes out in user mode, and will be recorded in bit 31
* (U bit) of IRQ_ACT. when interrupt exits, SP will be switched back
* according to U bit.
*
* For the case that context switches in interrupt, the target sp must be
* thread's kernel stack, no need to do hardware sp switch. so, U bit should
* be cleared.
*/
lr r0, [_ARC_V2_AUX_IRQ_ACT]
bclr r0, r0, 31
sr r0, [_ARC_V2_AUX_IRQ_ACT]
#endif
ld r3, [r2, _thread_offset_to_relinquish_cause]
breq r3, _CAUSE_RIRQ, _rirq_return_from_rirq
breq r3, _CAUSE_RIRQ, _rirq_switch_from_rirq
nop_s
breq r3, _CAUSE_FIRQ, _rirq_return_from_firq
breq r3, _CAUSE_FIRQ, _rirq_switch_from_firq
nop_s
/* fall through */
.balign 4
_rirq_return_from_coop:
_rirq_switch_from_coop:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* must return to secure mode, so set IRM bit to 1 */
lr r0, [_ARC_V2_SEC_STAT]
bset r0, r0, _ARC_V2_SEC_STAT_IRM_BIT
sflag r0
#endif
_set_misc_regs_irq_switch_from_coop
/*
* See verbose explanation of
@ -403,22 +329,10 @@ _rirq_return_from_coop:
rtie
.balign 4
_rirq_return_from_firq:
_rirq_return_from_rirq:
#if defined(CONFIG_USERSPACE)
/*
* need to recover the user/kernel status of interrupted thread
*/
pop_s r3
lr r2, [_ARC_V2_AUX_IRQ_ACT]
or r2, r2, r3
sr r2, [_ARC_V2_AUX_IRQ_ACT]
#endif
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to recover SEC_STAT.IRM bit */
pop_s r3
sflag r3
#endif
_rirq_no_reschedule:
_rirq_switch_from_firq:
_rirq_switch_from_rirq:
_set_misc_regs_irq_switch_from_irq
_rirq_no_switch:
rtie

View file

@ -94,14 +94,7 @@ SECTION_FUNC(TEXT, arch_switch)
push_s blink
_save_callee_saved_regs
#ifdef CONFIG_SMP
/* save old thread into switch handle which is required by
* wait_for_switch
*/
st r2, [r2, ___thread_t_switch_handle_OFFSET]
#endif
_store_old_thread_callee_regs
#ifdef CONFIG_ARC_STACK_CHECKING
/* disable stack checking here, as sp will be changed to target
@ -118,20 +111,7 @@ SECTION_FUNC(TEXT, arch_switch)
mov_s r2, r0
/* entering here, r2 contains the new current thread */
#ifdef CONFIG_ARC_STACK_CHECKING
_load_stack_check_regs
#endif
_load_callee_saved_regs
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
bl configure_mpu_thread
pop_s r2
#endif
ld r3, [r2, _thread_offset_to_relinquish_cause]
_load_new_thread_callee_regs
breq r3, _CAUSE_RIRQ, _switch_return_from_rirq
nop_s
@ -164,24 +144,14 @@ return_loc:
_switch_return_from_rirq:
_switch_return_from_firq:
#if defined(CONFIG_USERSPACE)
/*
* need to recover the user/kernel status of interrupted thread
*/
pop_s r3
lr r2, [_ARC_V2_AUX_IRQ_ACT]
or r2, r2, r3
sr r2, [_ARC_V2_AUX_IRQ_ACT]
#endif
_set_misc_regs_irq_switch_from_irq
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to recover SEC_STAT.IRM bit */
pop_s r3
sflag r3
#endif
/* use lowest interrupt priority to simulate
* a interrupt return to load left regs of new
* thread
*/
lr r3, [_ARC_V2_AUX_IRQ_ACT]
/* use lowest interrupt priority */
#ifdef CONFIG_ARC_SECURE_FIRMWARE
or r3, r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
#else

View file

@ -224,6 +224,12 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* initial values in all other regs/k_thread entries are irrelevant */
}
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
{
*old_thread = _current;
return z_get_next_switch_handle(*old_thread);
}
#ifdef CONFIG_USERSPACE

View file

@ -377,6 +377,107 @@
sr \reg, [\aux]
.endm
/* macro to store old thread call regs */
.macro _store_old_thread_callee_regs
_save_callee_saved_regs
#ifdef CONFIG_SMP
/* save old thread into switch handle which is required by
* wait_for_switch
*/
st r2, [r2, ___thread_t_switch_handle_OFFSET]
#endif
.endm
/* macro to store old thread call regs in interrupt*/
.macro _irq_store_old_thread_callee_regs
#if defined(CONFIG_USERSPACE)
/*
* need to remember the user/kernel status of interrupted thread, will be
* restored when thread switched back
*/
lr r3, [_ARC_V2_AUX_IRQ_ACT]
and r3, r3, 0x80000000
push_s r3
#endif
_store_old_thread_callee_regs
.endm
/* macro to load new thread callee regs */
.macro _load_new_thread_callee_regs
#ifdef CONFIG_ARC_STACK_CHECKING
_load_stack_check_regs
#endif
/*
* _load_callee_saved_regs expects incoming thread in r2.
* _load_callee_saved_regs restores the stack pointer.
*/
_load_callee_saved_regs
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
bl configure_mpu_thread
pop_s r2
#endif
ld r3, [r2, _thread_offset_to_relinquish_cause]
.endm
/* when switch to thread caused by coop, some status regs need to set */
.macro _set_misc_regs_irq_switch_from_coop
#if defined(CONFIG_USERSPACE)
/*
* when USERSPACE is enabled, according to ARCv2 ISA, SP will be switched
* if interrupt comes out in user mode, and will be recorded in bit 31
* (U bit) of IRQ_ACT. when interrupt exits, SP will be switched back
* according to U bit.
*
* For the case that context switches in interrupt, the target sp must be
* thread's kernel stack, no need to do hardware sp switch. so, U bit should
* be cleared.
*/
lr r0, [_ARC_V2_AUX_IRQ_ACT]
bclr r0, r0, 31
sr r0, [_ARC_V2_AUX_IRQ_ACT]
#endif
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* must return to secure mode, so set IRM bit to 1 */
lr r0, [_ARC_V2_SEC_STAT]
bset r0, r0, _ARC_V2_SEC_STAT_IRM_BIT
sflag r0
#endif
.endm
/* when switch to thread caused by irq, some status regs need to set */
.macro _set_misc_regs_irq_switch_from_irq
#if defined(CONFIG_USERSPACE)
/*
* need to recover the user/kernel status of interrupted thread
*/
pop_s r3
lr r2, [_ARC_V2_AUX_IRQ_ACT]
or r2, r2, r3
sr r2, [_ARC_V2_AUX_IRQ_ACT]
#endif
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to recover SEC_STAT.IRM bit */
pop_s r3
sflag r3
#endif
.endm
/* macro to get next switch handle in assembly */
.macro _get_next_switch_handle
push_s r2
mov r0, sp
bl z_arch_get_next_switch_handle
pop_s r2
.endm
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_ */