2015-04-11 01:44:37 +02:00
|
|
|
/*
|
2015-08-20 17:04:01 +02:00
|
|
|
* Copyright (c) 2014-2015 Wind River Systems, Inc.
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
2015-10-06 18:00:37 +02:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
2015-10-06 18:00:37 +02:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
2015-10-06 18:00:37 +02:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2015-12-04 16:09:39 +01:00
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* @brief Thread context switching
|
|
|
|
*
|
|
|
|
* This module implements the routines necessary for thread context switching
|
|
|
|
* on ARCv2 CPUs.
|
|
|
|
*
|
|
|
|
* See isr_wrapper.S for details.
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
#define _ASMLANGUAGE
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <offsets_short.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
#include <toolchain.h>
|
2015-05-28 19:56:47 +02:00
|
|
|
#include <arch/cpu.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
#include <v2/irq.h>
|
2016-11-01 14:07:34 +01:00
|
|
|
#include <swap_macros.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
GTEXT(_Swap)
|
2016-09-30 17:02:37 +02:00
|
|
|
GDATA(_k_neg_eagain)
|
2016-11-08 16:36:50 +01:00
|
|
|
GDATA(_kernel)
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 23:51:40 +02:00
|
|
|
* @brief Initiate a cooperative context switch
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
2016-12-18 15:42:55 +01:00
|
|
|
* The _Swap() routine is invoked by various kernel services to effect
|
2015-08-20 17:04:01 +02:00
|
|
|
* a cooperative context switch. Prior to invoking _Swap(), the caller
|
2016-12-18 15:42:55 +01:00
|
|
|
* disables interrupts via irq_lock() and the return 'key' is passed as a
|
2015-07-01 23:22:39 +02:00
|
|
|
* parameter to _Swap(). The key is in fact the value stored in the register
|
|
|
|
* operand of a CLRI instruction.
|
|
|
|
*
|
|
|
|
* It stores the intlock key parameter into current->intlock_key.
|
|
|
|
|
2015-08-20 17:04:01 +02:00
|
|
|
* Given that _Swap() is called to effect a cooperative context switch,
|
2015-07-01 23:22:39 +02:00
|
|
|
* the caller-saved integer registers are saved on the stack by the function
|
|
|
|
* call preamble to _Swap(). This creates a custom stack frame that will be
|
|
|
|
* popped when returning from _Swap(), but is not suitable for handling a return
|
|
|
|
* from an exception. Thus, the fact that the thread is pending because of a
|
|
|
|
* cooperative call to _Swap() has to be recorded via the _CAUSE_COOP code in
|
2016-11-08 16:36:50 +01:00
|
|
|
* the relinquish_cause of the thread's k_thread structure. The
|
|
|
|
* _IrqExit()/_FirqExit() code will take care of doing the right thing to
|
|
|
|
* restore the thread status.
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
|
|
|
* When _Swap() is invoked, we know the decision to perform a context switch or
|
|
|
|
* not has already been taken and a context switch must happen.
|
|
|
|
*
|
2016-12-21 17:16:01 +01:00
|
|
|
* @return may contain a return value setup by a call to
|
|
|
|
* _set_thread_return_value()
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
|
|
|
* C function prototype:
|
|
|
|
*
|
|
|
|
* unsigned int _Swap (unsigned int key);
|
|
|
|
*
|
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
SECTION_FUNC(TEXT, _Swap)
|
|
|
|
|
|
|
|
/* interrupts are locked, interrupt key is in r0 */
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
mov r1, _kernel
|
|
|
|
ld_s r2, [r1, _kernel_offset_to_current]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* save intlock key */
|
2016-11-08 16:36:50 +01:00
|
|
|
st_s r0, [r2, _thread_offset_to_intlock_key]
|
|
|
|
st _CAUSE_COOP, [r2, _thread_offset_to_relinquish_cause]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-09-30 17:02:37 +02:00
|
|
|
/*
|
|
|
|
* Carve space for the return value. Setting it to a defafult of
|
|
|
|
* -EAGAIN eliminates the need for the timeout code to set it.
|
|
|
|
* If another value is ever needed, it can be modified with
|
2016-12-21 17:16:01 +01:00
|
|
|
* _set_thread_return_value().
|
2016-09-30 17:02:37 +02:00
|
|
|
*/
|
|
|
|
ld r3, [_k_neg_eagain]
|
2016-11-08 16:36:50 +01:00
|
|
|
st_s r3, [r2, _thread_offset_to_return_value]
|
2016-09-30 17:02:37 +02:00
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
/*
|
|
|
|
* Save status32 and blink on the stack before the callee-saved registers.
|
|
|
|
* This is the same layout as the start of an IRQ stack frame.
|
|
|
|
*/
|
|
|
|
lr r3, [_ARC_V2_STATUS32]
|
|
|
|
push_s r3
|
2016-03-11 18:29:14 +01:00
|
|
|
#ifdef CONFIG_ARC_STACK_CHECKING
|
|
|
|
/* disable stack checking during swap */
|
|
|
|
bclr r3, r3, _ARC_V2_STATUS32_SC_BIT
|
|
|
|
kflag r3
|
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
push_s blink
|
|
|
|
|
|
|
|
_save_callee_saved_regs
|
|
|
|
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 16:37:27 +01:00
|
|
|
/* get the cached thread to run */
|
|
|
|
ld_s r2, [r1, _kernel_offset_to_ready_q_cache]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2015-08-20 17:04:01 +02:00
|
|
|
/* entering here, r2 contains the new current thread */
|
2016-03-11 18:29:14 +01:00
|
|
|
#ifdef CONFIG_ARC_STACK_CHECKING
|
|
|
|
/* Use stack top and down registers from restored context */
|
2016-11-08 16:36:50 +01:00
|
|
|
add r3, r2, _K_THREAD_NO_FLOAT_SIZEOF
|
2016-03-11 18:29:14 +01:00
|
|
|
sr r3, [_ARC_V2_KSTACK_TOP]
|
2016-11-08 16:36:50 +01:00
|
|
|
ld_s r3, [r2, _thread_offset_to_stack_top]
|
2016-03-11 18:29:14 +01:00
|
|
|
sr r3, [_ARC_V2_KSTACK_BASE]
|
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
/* XXX - can be moved to delay slot of _CAUSE_RIRQ ? */
|
2016-11-08 16:36:50 +01:00
|
|
|
st_s r2, [r1, _kernel_offset_to_current]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
_load_callee_saved_regs
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
ld_s r3, [r2, _thread_offset_to_relinquish_cause]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-05-25 18:24:55 +02:00
|
|
|
breq r3, _CAUSE_RIRQ, _swap_return_from_rirq
|
2015-04-11 01:44:37 +02:00
|
|
|
nop
|
2016-05-25 18:24:55 +02:00
|
|
|
breq r3, _CAUSE_FIRQ, _swap_return_from_firq
|
2015-04-11 01:44:37 +02:00
|
|
|
nop
|
|
|
|
|
|
|
|
/* fall through to _swap_return_from_coop */
|
|
|
|
|
|
|
|
.balign 4
|
|
|
|
_swap_return_from_coop:
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
ld_s r1, [r2, _thread_offset_to_intlock_key]
|
|
|
|
st 0, [r2, _thread_offset_to_intlock_key]
|
|
|
|
ld_s r0, [r2, _thread_offset_to_return_value]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
lr ilink, [_ARC_V2_STATUS32]
|
|
|
|
bbit1 ilink, _ARC_V2_STATUS32_AE_BIT, _return_from_exc
|
|
|
|
|
|
|
|
pop_s blink /* pc into blink */
|
|
|
|
pop_s r3 /* status32 into r3 */
|
|
|
|
kflag r3 /* write status32 */
|
|
|
|
|
|
|
|
j_s.d [blink] /* always execute delay slot */
|
|
|
|
seti r1 /* delay slot */
|
|
|
|
|
|
|
|
|
|
|
|
.balign 4
|
|
|
|
_swap_return_from_rirq:
|
|
|
|
_swap_return_from_firq:
|
|
|
|
|
2016-05-30 22:26:11 +02:00
|
|
|
lr r3, [_ARC_V2_STATUS32]
|
|
|
|
bbit1 r3, _ARC_V2_STATUS32_AE_BIT, _return_from_exc_irq
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-05-30 22:26:11 +02:00
|
|
|
/* pretend interrupt happened to use rtie instruction */
|
|
|
|
lr r3, [_ARC_V2_AUX_IRQ_ACT]
|
|
|
|
brne r3,0,_swap_already_in_irq
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-05-30 22:26:11 +02:00
|
|
|
or r3,r3,(1<<(CONFIG_NUM_IRQ_PRIO_LEVELS-1)) /* use lowest */
|
|
|
|
sr r3, [_ARC_V2_AUX_IRQ_ACT]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-05-30 22:26:11 +02:00
|
|
|
_swap_already_in_irq:
|
|
|
|
rtie
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
.balign 4
|
2016-05-30 22:26:11 +02:00
|
|
|
_return_from_exc_irq:
|
|
|
|
_pop_irq_stack_frame
|
|
|
|
sub_s sp, sp, 8
|
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
_return_from_exc:
|
2016-05-30 22:26:11 +02:00
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
/* put the return address to eret */
|
2016-05-30 22:26:11 +02:00
|
|
|
ld ilink, [sp] /* pc into ilink */
|
2015-04-11 01:44:37 +02:00
|
|
|
sr ilink, [_ARC_V2_ERET]
|
|
|
|
|
|
|
|
/* put status32 into estatus */
|
2016-05-30 22:26:11 +02:00
|
|
|
ld ilink, [sp, 4] /* status32 into ilink */
|
2015-04-11 01:44:37 +02:00
|
|
|
sr ilink, [_ARC_V2_ERSTATUS]
|
2016-05-30 22:26:11 +02:00
|
|
|
add_s sp, sp, 8
|
2015-04-11 01:44:37 +02:00
|
|
|
rtie
|