2015-04-11 01:44:37 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2015-12-04 16:09:39 +01:00
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* @brief Handling of transitions to-and-from regular IRQs (RIRQ)
|
|
|
|
*
|
|
|
|
* This module implements the code for handling entry to and exit from regular
|
|
|
|
* IRQs.
|
|
|
|
*
|
|
|
|
* See isr_wrapper.S for details.
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <offsets_short.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
#include <toolchain.h>
|
2019-10-24 17:08:21 +02:00
|
|
|
#include <linker/sections.h>
|
2015-05-28 19:56:47 +02:00
|
|
|
#include <arch/cpu.h>
|
2016-11-01 14:07:34 +01:00
|
|
|
#include <swap_macros.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
GTEXT(_rirq_enter)
|
|
|
|
GTEXT(_rirq_exit)
|
arc: trap handler, used by irq_offload, now handles thread switch
It was found that the test latency_measure, when compiled
for microkernel, would fail on the ARC. This because the
trap handler, used by irq_offload, wasn't supporting thread switching.
This submission adds the code to do that, and the code size is
bigger only when CONFIG_MICROKERNEL is defined.
To keep code a bit smaller, there is a trick exploited here where
the AE bit is cleared in the STATUS32 register and in AUX_IRQ_ACT,
bit 1 is set, to make it appear as if the machine has interrupted
at priority 1 level. It then can jump into some common interrupt
exit code for regular interrupts and perform an RTIE instruction
to switch into the new thread.
test/latency_measure/microkernel now passes.
Change-Id: I1872a80bb09a259814540567f51721203201679a
Signed-off-by: Chuck Jordan <cjordan@synopsys.com>
2016-05-26 19:39:20 +02:00
|
|
|
GTEXT(_rirq_common_interrupt_swap)
|
|
|
|
|
2019-07-25 06:13:13 +02:00
|
|
|
|
2016-10-29 00:27:11 +02:00
|
|
|
#if 0 /* TODO: when FIRQ is not present, all would be regular */
|
|
|
|
#define NUM_REGULAR_IRQ_PRIO_LEVELS CONFIG_NUM_IRQ_PRIO_LEVELS
|
|
|
|
#else
|
|
|
|
#define NUM_REGULAR_IRQ_PRIO_LEVELS (CONFIG_NUM_IRQ_PRIO_LEVELS-1)
|
|
|
|
#endif
|
|
|
|
/* note: the above define assumes that prio 0 IRQ is for FIRQ, and
|
|
|
|
* that all others are regular interrupts.
|
|
|
|
* TODO: Revist this if FIRQ becomes configurable.
|
|
|
|
*/
|
|
|
|
|
2019-08-29 22:18:37 +02:00
|
|
|
/*
|
|
|
|
|
|
|
|
===========================================================
|
|
|
|
RETURN FROM INTERRUPT TO COOPERATIVE THREAD
|
|
|
|
===========================================================
|
|
|
|
|
|
|
|
That's a special case because:
|
|
|
|
1. We return from IRQ handler to a cooperative thread
|
|
|
|
2. During IRQ handling context switch did happen
|
|
|
|
3. Returning to a thread which previously gave control
|
|
|
|
to another thread because of:
|
|
|
|
- Calling k_sleep()
|
|
|
|
- Explicitly yielding
|
|
|
|
- Bumping into locked sync primitive etc
|
|
|
|
|
|
|
|
What (3) means is before passing control to another thread our thread
|
|
|
|
in question:
|
|
|
|
a. Stashed all precious caller-saved registers on its stack
|
|
|
|
b. Pushed return address to the top of the stack as well
|
|
|
|
|
|
|
|
That's how thread's stack looks like right before jumping to another thread:
|
|
|
|
----------------------------->8---------------------------------
|
|
|
|
PRE-CONTEXT-SWITCH STACK
|
|
|
|
|
|
|
|
lower_addr, let's say: 0x1000
|
|
|
|
|
|
|
|
--------------------------------------
|
|
|
|
SP -> | Return address; PC (Program Counter), in fact value taken from
|
2019-11-07 21:43:29 +01:00
|
|
|
| BLINK register in arch_switch()
|
2019-08-29 22:18:37 +02:00
|
|
|
--------------------------------------
|
|
|
|
| STATUS32 value, we explicitly save it here for later usage, read-on
|
|
|
|
--------------------------------------
|
|
|
|
| Caller-saved registers: some of R0-R12
|
|
|
|
--------------------------------------
|
|
|
|
|...
|
|
|
|
|...
|
|
|
|
|
|
|
|
higher_addr, let's say: 0x2000
|
|
|
|
----------------------------->8---------------------------------
|
|
|
|
|
|
|
|
When context gets switched the kernel saves callee-saved registers in the
|
|
|
|
thread's stack right on top of pre-switch contents so that's what we have:
|
|
|
|
----------------------------->8---------------------------------
|
|
|
|
POST-CONTEXT-SWITCH STACK
|
|
|
|
|
|
|
|
lower_addr, let's say: 0x1000
|
|
|
|
|
|
|
|
--------------------------------------
|
|
|
|
SP -> | Callee-saved registers: see struct _callee_saved_stack{}
|
|
|
|
| |- R13
|
|
|
|
| |- R14
|
|
|
|
| | ...
|
|
|
|
| \- FP
|
|
|
|
| ...
|
|
|
|
--------------------------------------
|
|
|
|
| Return address; PC (Program Counter)
|
|
|
|
--------------------------------------
|
|
|
|
| STATUS32 value
|
|
|
|
--------------------------------------
|
|
|
|
| Caller-saved registers: some of R0-R12
|
|
|
|
--------------------------------------
|
|
|
|
|...
|
|
|
|
|...
|
|
|
|
|
|
|
|
higher_addr, let's say: 0x2000
|
|
|
|
----------------------------->8---------------------------------
|
|
|
|
|
|
|
|
So how do we return in such a complex scenario.
|
|
|
|
|
|
|
|
First we restore callee-saved regs with help of _load_callee_saved_regs().
|
|
|
|
Now we're back to PRE-CONTEXT-SWITCH STACK (see above).
|
|
|
|
|
|
|
|
Logically our next step is to load return address from the top of the stack
|
|
|
|
and jump to that address to continue execution of the desired thread, but
|
|
|
|
we're still in interrupt handling mode and the only way to return to normal
|
|
|
|
execution mode is to execute "rtie" instruction. And here we need to deal
|
|
|
|
with peculiarities of return from IRQ on ARCv2 cores.
|
|
|
|
|
|
|
|
Instead of simple jump to a return address stored in the tip of thread's stack
|
|
|
|
(with subsequent interrupt enable) ARCv2 core additionally automatically
|
|
|
|
restores some registers from stack. Most important ones are
|
|
|
|
PC ("Program Counter") which holds address of the next instruction to execute
|
|
|
|
and STATUS32 which holds imortant flags including global interrupt enable,
|
|
|
|
zero, carry etc.
|
|
|
|
|
|
|
|
To make things worse depending on ARC core configuration and run-time setup
|
|
|
|
of certain features different set of registers will be restored.
|
|
|
|
|
|
|
|
Typically those same registers are automatically saved on stack on entry to
|
|
|
|
an interrupt, but remember we're returning to the thread which was
|
|
|
|
not interrupted by interrupt and so on its stack there're no automatically
|
|
|
|
saved registers, still inevitably on RTIE execution register restoration
|
|
|
|
will happen. So if we do nothing special we'll end-up with that:
|
|
|
|
----------------------------->8---------------------------------
|
|
|
|
lower_addr, let's say: 0x1000
|
|
|
|
|
|
|
|
--------------------------------------
|
|
|
|
# | Return address; PC (Program Counter)
|
|
|
|
| --------------------------------------
|
|
|
|
| | STATUS32 value
|
|
|
|
| --------------------------------------
|
|
|
|
|
|
|
|
|
sizeof(_irq_stack_frame)
|
|
|
|
|
|
|
|
|
| | Caller-saved registers: R0-R12
|
|
|
|
V --------------------------------------
|
|
|
|
|...
|
|
|
|
SP -> | < Some data on thread's stack>
|
|
|
|
|...
|
|
|
|
|
|
|
|
higher_addr, let's say: 0x2000
|
|
|
|
----------------------------->8---------------------------------
|
|
|
|
|
|
|
|
I.e. we'll go much deeper down the stack over needed return address, read
|
|
|
|
some value from unexpected location in stack and will try to jump there.
|
|
|
|
Nobody knows were we end-up then.
|
|
|
|
|
|
|
|
To work-around that problem we need to mimic existance of IRQ stack frame
|
|
|
|
of which we really only need return address obviously to return where we
|
|
|
|
need to. For that we just shift SP so that it points sizeof(_irq_stack_frame)
|
|
|
|
above like that:
|
|
|
|
----------------------------->8---------------------------------
|
|
|
|
lower_addr, let's say: 0x1000
|
|
|
|
|
|
|
|
SP -> |
|
|
|
|
A | < Some unrelated data >
|
|
|
|
| |
|
|
|
|
|
|
|
|
|
sizeof(_irq_stack_frame)
|
|
|
|
|
|
|
|
|
| --------------------------------------
|
|
|
|
| | Return address; PC (Program Counter)
|
|
|
|
| --------------------------------------
|
|
|
|
# | STATUS32 value
|
|
|
|
--------------------------------------
|
|
|
|
| Caller-saved registers: R0-R12
|
|
|
|
--------------------------------------
|
|
|
|
|...
|
|
|
|
| < Some data on thread's stack>
|
|
|
|
|...
|
|
|
|
|
|
|
|
higher_addr, let's say: 0x2000
|
|
|
|
----------------------------->8---------------------------------
|
|
|
|
|
|
|
|
Indeed R0-R13 "restored" from IRQ stack frame will contain garbage but
|
|
|
|
it makes no difference because we're returning to execution of code as if
|
|
|
|
we're returning from yet another function call and so we will restore
|
|
|
|
all needed registers from the stack.
|
|
|
|
|
|
|
|
One other important remark here is R13.
|
|
|
|
|
|
|
|
CPU hardware automatically save/restore registers in pairs and since we
|
|
|
|
wanted to save/restore R12 in IRQ stack frame as a caller-saved register we
|
|
|
|
just happen to do that for R13 as well. But given compiler treats it as
|
|
|
|
a callee-saved register we save/restore it separately in _callee_saved_stack
|
|
|
|
structure. And when we restore callee-saved registers from stack we among
|
|
|
|
other registers recover R13. But later on return from IRQ with RTIE
|
|
|
|
instruction, R13 will be "restored" again from fake IRQ stack frame and
|
|
|
|
if we don't copy correct R13 value to fake IRQ stack frame R13 value
|
|
|
|
will be corrupted.
|
|
|
|
|
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 23:51:40 +02:00
|
|
|
* @brief Work to be done before handing control to an IRQ ISR
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
|
|
|
* The processor pushes automatically all registers that need to be saved.
|
|
|
|
* However, since the processor always runs at kernel privilege there is no
|
|
|
|
* automatic switch to the IRQ stack: this must be done in software.
|
|
|
|
*
|
|
|
|
* Assumption by _isr_demux: r3 is untouched by _rirq_enter.
|
|
|
|
*
|
2015-07-01 23:29:04 +02:00
|
|
|
* @return N/A
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
SECTION_FUNC(TEXT, _rirq_enter)
|
|
|
|
|
2017-07-11 04:39:54 +02:00
|
|
|
|
2016-03-11 18:29:14 +01:00
|
|
|
#ifdef CONFIG_ARC_STACK_CHECKING
|
2019-08-01 06:39:35 +02:00
|
|
|
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
2018-07-19 10:59:21 +02:00
|
|
|
lr r2, [_ARC_V2_SEC_STAT]
|
|
|
|
bclr r2, r2, _ARC_V2_SEC_STAT_SSC_BIT
|
2019-08-01 06:39:35 +02:00
|
|
|
sflag r2
|
|
|
|
|
2018-07-19 10:59:21 +02:00
|
|
|
#else
|
2016-03-11 18:29:14 +01:00
|
|
|
/* disable stack checking */
|
|
|
|
lr r2, [_ARC_V2_STATUS32]
|
|
|
|
bclr r2, r2, _ARC_V2_STATUS32_SC_BIT
|
|
|
|
kflag r2
|
2018-07-19 10:59:21 +02:00
|
|
|
#endif
|
2016-03-11 18:29:14 +01:00
|
|
|
#endif
|
2017-07-11 04:39:54 +02:00
|
|
|
clri
|
|
|
|
|
2019-08-13 18:45:13 +02:00
|
|
|
/* check whether irq stack is used */
|
|
|
|
_check_and_inc_int_nest_counter r0, r1
|
2019-07-03 17:43:31 +02:00
|
|
|
|
|
|
|
bne.d rirq_nest
|
2019-08-29 12:52:04 +02:00
|
|
|
mov_s r0, sp
|
2017-07-11 04:39:54 +02:00
|
|
|
|
2019-07-25 06:13:13 +02:00
|
|
|
_get_curr_cpu_irq_stack sp
|
2017-07-11 04:39:54 +02:00
|
|
|
rirq_nest:
|
|
|
|
push_s r0
|
|
|
|
|
|
|
|
seti
|
2015-04-11 01:44:37 +02:00
|
|
|
j _isr_demux
|
|
|
|
|
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
|
|
|
*
|
2015-07-01 23:51:40 +02:00
|
|
|
* @brief Work to be done exiting an IRQ
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
2015-07-01 23:29:04 +02:00
|
|
|
* @return N/A
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
SECTION_FUNC(TEXT, _rirq_exit)
|
2017-07-11 04:39:54 +02:00
|
|
|
clri
|
|
|
|
|
|
|
|
pop sp
|
|
|
|
|
2019-08-13 18:45:13 +02:00
|
|
|
_dec_int_nest_counter r0, r1
|
|
|
|
|
2019-07-03 17:43:31 +02:00
|
|
|
_check_nest_int_by_irq_act r0, r1
|
|
|
|
|
2019-08-13 13:58:03 +02:00
|
|
|
jne _rirq_no_reschedule
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2018-04-16 12:14:03 +02:00
|
|
|
#ifdef CONFIG_STACK_SENTINEL
|
2019-03-08 22:19:05 +01:00
|
|
|
bl z_check_stack_sentinel
|
2018-04-16 12:14:03 +02:00
|
|
|
#endif
|
|
|
|
|
2016-12-14 20:34:29 +01:00
|
|
|
#ifdef CONFIG_PREEMPT_ENABLED
|
|
|
|
|
2019-07-25 06:13:13 +02:00
|
|
|
#ifdef CONFIG_SMP
|
2019-10-31 10:03:29 +01:00
|
|
|
bl z_arc_smp_switch_in_isr
|
2019-07-25 06:13:13 +02:00
|
|
|
/* r0 points to new thread, r1 points to old thread */
|
2019-08-29 12:52:04 +02:00
|
|
|
cmp_s r0, 0
|
2019-07-25 06:13:13 +02:00
|
|
|
beq _rirq_no_reschedule
|
2019-08-29 12:52:04 +02:00
|
|
|
mov_s r2, r1
|
2019-07-25 06:13:13 +02:00
|
|
|
#else
|
2019-08-29 12:52:04 +02:00
|
|
|
mov_s r1, _kernel
|
2016-11-08 16:36:50 +01:00
|
|
|
ld_s r2, [r1, _kernel_offset_to_current]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-12-21 22:00:35 +01:00
|
|
|
/*
|
|
|
|
* Both (a)reschedule and (b)non-reschedule cases need to load the
|
|
|
|
* current thread's stack, but don't have to use it until the decision
|
|
|
|
* is taken: load the delay slots with the 'load stack pointer'
|
|
|
|
* instruction.
|
2015-04-11 01:44:37 +02:00
|
|
|
*
|
|
|
|
* a) needs to load it to save outgoing context.
|
|
|
|
* b) needs to load it to restore the interrupted context.
|
|
|
|
*/
|
|
|
|
|
2016-11-07 21:49:23 +01:00
|
|
|
/* check if the current thread needs to be rescheduled */
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 16:37:27 +01:00
|
|
|
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
|
boards: Update arc em_starterkit support from 2.2 to 2.3
Here are the main changes:
* board: Update EMSK onboard resources such as Button, Switch and LEDs
+ update soc.h for em7d, em9d, em11d
+ update board.h for em_starterkit board
* arc: Add floating point support and code density support
+ add kconfig configuration
+ add compiler options
+ add register definitions, marcos, assembly codes
+ fixes in existing codes and configurations.
* arc: Update detailed board configurations for cores of emsk 2.3
* script: Provide arc_debugger.sh for debugging em_starterkit board
+ make BOARD=em_starterkit debug
This will start openocd server for emsk, and arc gdb will connect
to this debug server, user can run `continue` command if user just
want to run the application, or other commands if debugging needed.
+ make BOARD=em_starterkit debugserver
This will start an openocd debugger server for emsk, and user can
connect to this debugserver using arc gdb and do what they want to.
+ make BOARD=em_starterkit flash
This will download the zephyr application elf file to emsk,
and run it.
Signed-off-by: Huaqi Fang <huaqi.fang@synopsys.com>
2017-05-17 09:18:51 +02:00
|
|
|
cmp_s r0, r2
|
|
|
|
beq _rirq_no_reschedule
|
2016-09-30 17:02:37 +02:00
|
|
|
|
kernel/arch: enhance the "ready thread" cache
The way the ready thread cache was implemented caused it to not always
be "hot", i.e. there could be some misses, which happened when the
cached thread was taken out of the ready queue. When that happened, it
was not replaced immediately, since doing so could mean that the
replacement might not run because the flow could be interrupted and
another thread could take its place. This was the more conservative
approach that insured that moving a thread to the cache would never be
wasted.
However, this caused two problems:
1. The cache could not be refilled until another thread context-switched
in, since there was no thread in the cache to compare priorities
against.
2. Interrupt exit code would always have to call into C to find what
thread to run when the current thread was not coop and did not have the
scheduler locked. Furthermore, it was possible for this code path to
encounter a cold cache and then it had to find out what thread to run
the long way.
To fix this, filling the cache is now more aggressive, i.e. the next
thread to put in the cache is found even in the case the current cached
thread is context-switched out. This ensures the interrupt exit code is
much faster on the slow path. In addition, since finding the next thread
to run is now always "get it from the cache", which is a simple fetch
from memory (_kernel.ready_q.cache), there is no need to call the more
complex C code.
On the ARM FRDM K64F board, this improvement is seen:
Before:
1- Measure time to switch from ISR back to interrupted task
switching time is 215 tcs = 1791 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 315 tcs = 2625 nsec
After:
1- Measure time to switch from ISR back to interrupted task
switching time is 130 tcs = 1083 nsec
2- Measure time from ISR to executing a different task (rescheduled)
switch time is 225 tcs = 1875 nsec
These are the most dramatic improvements, but most of the numbers
generated by the latency_measure test are improved.
Fixes ZEP-1401.
Change-Id: I2eaac147048b1ec71a93bd0a285e743a39533973
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-12-02 16:37:27 +01:00
|
|
|
/* cached thread to run is in r0, fall through */
|
2019-07-25 06:13:13 +02:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
.balign 4
|
|
|
|
_rirq_reschedule:
|
|
|
|
|
2019-08-13 13:58:03 +02:00
|
|
|
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
|
|
|
/* here need to remember SEC_STAT.IRM bit */
|
|
|
|
lr r3, [_ARC_V2_SEC_STAT]
|
2019-08-29 12:52:04 +02:00
|
|
|
push_s r3
|
2019-08-13 13:58:03 +02:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
/* _save_callee_saved_regs expects outgoing thread in r2 */
|
|
|
|
_save_callee_saved_regs
|
|
|
|
|
2016-11-08 16:36:50 +01:00
|
|
|
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-07-25 06:13:13 +02:00
|
|
|
#ifdef CONFIG_SMP
|
2019-08-29 12:52:04 +02:00
|
|
|
mov_s r2, r0
|
2019-07-25 06:13:13 +02:00
|
|
|
#else
|
2016-11-07 21:49:23 +01:00
|
|
|
/* incoming thread is in r0: it becomes the new 'current' */
|
2019-08-29 12:52:04 +02:00
|
|
|
mov_s r2, r0
|
2016-11-08 16:36:50 +01:00
|
|
|
st_s r2, [r1, _kernel_offset_to_current]
|
2019-07-25 06:13:13 +02:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-11-03 21:11:13 +01:00
|
|
|
.balign 4
|
arc: trap handler, used by irq_offload, now handles thread switch
It was found that the test latency_measure, when compiled
for microkernel, would fail on the ARC. This because the
trap handler, used by irq_offload, wasn't supporting thread switching.
This submission adds the code to do that, and the code size is
bigger only when CONFIG_MICROKERNEL is defined.
To keep code a bit smaller, there is a trick exploited here where
the AE bit is cleared in the STATUS32 register and in AUX_IRQ_ACT,
bit 1 is set, to make it appear as if the machine has interrupted
at priority 1 level. It then can jump into some common interrupt
exit code for regular interrupts and perform an RTIE instruction
to switch into the new thread.
test/latency_measure/microkernel now passes.
Change-Id: I1872a80bb09a259814540567f51721203201679a
Signed-off-by: Chuck Jordan <cjordan@synopsys.com>
2016-05-26 19:39:20 +02:00
|
|
|
_rirq_common_interrupt_swap:
|
|
|
|
/* r2 contains pointer to new thread */
|
|
|
|
|
2016-03-11 18:29:14 +01:00
|
|
|
#ifdef CONFIG_ARC_STACK_CHECKING
|
2018-06-01 08:00:22 +02:00
|
|
|
_load_stack_check_regs
|
2016-03-11 18:29:14 +01:00
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
/*
|
|
|
|
* _load_callee_saved_regs expects incoming thread in r2.
|
|
|
|
* _load_callee_saved_regs restores the stack pointer.
|
|
|
|
*/
|
|
|
|
_load_callee_saved_regs
|
|
|
|
|
arch: arc: add user space support for arc
* add the implementation of syscall
* based on 'trap_s' intruction, id = 3
* add the privilege stack
* the privilege stack is allocted with thread stack
* for the kernel thread, the privilege stack is also a
part of thread stack, the start of stack can be configured
as stack guard
* for the user thread, no stack guard, when the user stack is
overflow, it will fall into kernel memory area which requires
kernel privilege, privilege violation will be raised
* modify the linker template and add MPU_ADDR_ALIGN
* add user space corresponding codes in mpu
* the user sp aux reg will be part of thread context
* When user thread is interruptted for the 1st time, the context is
saved in user stack (U bit of IRQ_CTLR is set to 1). When nest
interrupt comes, the context is saved in thread's privilege stack
* the arc_mpu_regions.c is moved to board folder, as it's board
specific
* the above codes have been tested through tests/kernel/mem_protect/
userspace for MPU version 2
Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
2018-01-23 10:13:09 +01:00
|
|
|
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
2017-08-15 06:20:42 +02:00
|
|
|
push_s r2
|
2019-08-29 12:52:04 +02:00
|
|
|
mov_s r0, r2
|
arch: arc: add user space support for arc
* add the implementation of syscall
* based on 'trap_s' intruction, id = 3
* add the privilege stack
* the privilege stack is allocted with thread stack
* for the kernel thread, the privilege stack is also a
part of thread stack, the start of stack can be configured
as stack guard
* for the user thread, no stack guard, when the user stack is
overflow, it will fall into kernel memory area which requires
kernel privilege, privilege violation will be raised
* modify the linker template and add MPU_ADDR_ALIGN
* add user space corresponding codes in mpu
* the user sp aux reg will be part of thread context
* When user thread is interruptted for the 1st time, the context is
saved in user stack (U bit of IRQ_CTLR is set to 1). When nest
interrupt comes, the context is saved in thread's privilege stack
* the arc_mpu_regions.c is moved to board folder, as it's board
specific
* the above codes have been tested through tests/kernel/mem_protect/
userspace for MPU version 2
Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
2018-01-23 10:13:09 +01:00
|
|
|
bl configure_mpu_thread
|
2017-12-20 09:48:45 +01:00
|
|
|
pop_s r2
|
|
|
|
#endif
|
|
|
|
|
2019-07-01 09:48:20 +02:00
|
|
|
#if defined(CONFIG_USERSPACE)
|
|
|
|
/*
|
|
|
|
* when USERSPACE is enabled, according to ARCv2 ISA, SP will be switched
|
|
|
|
* if interrupt comes out in user mode, and will be recorded in bit 31
|
|
|
|
* (U bit) of IRQ_ACT. when interrupt exits, SP will be switched back
|
|
|
|
* according to U bit.
|
|
|
|
*
|
|
|
|
* For the case that context switches in interrupt, the target sp must be
|
|
|
|
* thread's kernel stack, no need to do hardware sp switch. so, U bit should
|
|
|
|
* be cleared.
|
|
|
|
*/
|
|
|
|
lr r0, [_ARC_V2_AUX_IRQ_ACT]
|
|
|
|
bclr r0, r0, 31
|
|
|
|
sr r0, [_ARC_V2_AUX_IRQ_ACT]
|
|
|
|
#endif
|
|
|
|
|
2019-06-30 01:46:29 +02:00
|
|
|
ld r3, [r2, _thread_offset_to_relinquish_cause]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-05-25 18:24:55 +02:00
|
|
|
breq r3, _CAUSE_RIRQ, _rirq_return_from_rirq
|
2019-08-29 12:52:04 +02:00
|
|
|
nop_s
|
2016-05-25 18:24:55 +02:00
|
|
|
breq r3, _CAUSE_FIRQ, _rirq_return_from_firq
|
2019-08-29 12:52:04 +02:00
|
|
|
nop_s
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
/* fall through */
|
|
|
|
|
|
|
|
.balign 4
|
|
|
|
_rirq_return_from_coop:
|
|
|
|
|
2019-08-01 06:39:35 +02:00
|
|
|
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
|
|
|
/* must return to secure mode, so set IRM bit to 1 */
|
|
|
|
lr r0, [_ARC_V2_SEC_STAT]
|
|
|
|
bset r0, r0, _ARC_V2_SEC_STAT_IRM_BIT
|
|
|
|
sflag r0
|
|
|
|
#endif
|
2019-08-29 22:18:37 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* See verbose explanation of
|
|
|
|
* RETURN FROM INTERRUPT TO COOPERATIVE THREAD above
|
|
|
|
*/
|
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
/* carve fake stack */
|
2018-03-07 19:48:48 +01:00
|
|
|
sub sp, sp, ___isf_t_pc_OFFSET
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
|
2019-02-04 13:22:51 +01:00
|
|
|
/* reset zero-overhead loops */
|
|
|
|
st 0, [sp, ___isf_t_lp_end_OFFSET]
|
2019-01-14 14:16:10 +01:00
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
/*
|
|
|
|
* r13 is part of both the callee and caller-saved register sets because
|
|
|
|
* the processor is only able to save registers in pair in the regular
|
|
|
|
* IRQ prologue. r13 thus has to be set to its correct value in the IRQ
|
|
|
|
* stack frame.
|
|
|
|
*/
|
2016-11-08 16:36:50 +01:00
|
|
|
st_s r13, [sp, ___isf_t_r13_OFFSET]
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2019-08-13 13:58:03 +02:00
|
|
|
/* stack now has the IRQ stack frame layout, pointing to sp */
|
2015-04-11 01:44:37 +02:00
|
|
|
/* rtie will pop the rest from the stack */
|
2019-08-13 13:58:03 +02:00
|
|
|
rtie
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2016-12-14 20:34:29 +01:00
|
|
|
#endif /* CONFIG_PREEMPT_ENABLED */
|
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
.balign 4
|
2017-07-11 04:39:54 +02:00
|
|
|
_rirq_return_from_firq:
|
|
|
|
_rirq_return_from_rirq:
|
2019-08-13 13:58:03 +02:00
|
|
|
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
|
|
|
/* here need to recover SEC_STAT.IRM bit */
|
2019-08-29 12:52:04 +02:00
|
|
|
pop_s r3
|
2019-08-13 13:58:03 +02:00
|
|
|
sflag r3
|
|
|
|
#endif
|
2015-04-11 01:44:37 +02:00
|
|
|
_rirq_no_reschedule:
|
|
|
|
|
|
|
|
rtie
|