riscv: make core code 64-bit compatible

There are two aspects to this: CPU registers are twice as big, and the
load and store instructions must use the 'd' suffix instead of the 'w'
one. To abstract register differences, we simply use a ulong_t instead
of u32_t given that RISC-V is either ILP32 or LP64. And the relevant
lw/sw instructions are replaced by LR/SR (load/store register) that get
defined as either lw/sw or ld/sd. Finally a few constants to deal with
register offsets are also provided.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2019-07-24 16:21:58 -04:00 committed by Andrew Boie
parent 1f4b5ddd0f
commit 0440a815a9
10 changed files with 234 additions and 212 deletions

View file

@ -13,17 +13,17 @@ FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
const z_arch_esf_t *esf)
{
if (esf != NULL) {
z_fatal_print("Faulting instruction address = 0x%08x",
z_fatal_print("Faulting instruction address = 0x%08lx",
esf->mepc);
z_fatal_print(" ra: 0x%08x gp: 0x%08x tp: 0x%08x t0: 0x%08x",
z_fatal_print(" ra: 0x%08lx gp: 0x%08lx tp: 0x%08lx t0: 0x%08lx",
esf->ra, esf->gp, esf->tp, esf->t0);
z_fatal_print(" t1: 0x%08x t2: 0x%08x t3: 0x%08x t4: 0x%08x",
z_fatal_print(" t1: 0x%08lx t2: 0x%08lx t3: 0x%08lx t4: 0x%08lx",
esf->t1, esf->t2, esf->t3, esf->t4);
z_fatal_print(" t5: 0x%08x t6: 0x%08x a0: 0x%08x a1: 0x%08x",
z_fatal_print(" t5: 0x%08lx t6: 0x%08lx a0: 0x%08lx a1: 0x%08lx",
esf->t5, esf->t6, esf->a0, esf->a1);
z_fatal_print(" a2: 0x%08x a3: 0x%08x a4: 0x%08x a5: 0x%08x",
z_fatal_print(" a2: 0x%08lx a3: 0x%08lx a4: 0x%08lx a5: 0x%08lx",
esf->a2, esf->a3, esf->a4, esf->a5);
z_fatal_print(" a6: 0x%08x a7: 0x%08x\n",
z_fatal_print(" a6: 0x%08lx a7: 0x%08lx\n",
esf->a6, esf->a7);
}
@ -31,7 +31,7 @@ FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
CODE_UNREACHABLE;
}
static char *cause_str(u32_t cause)
static char *cause_str(ulong_t cause)
{
switch (cause) {
case 0:
@ -53,13 +53,12 @@ static char *cause_str(u32_t cause)
FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
{
u32_t mcause;
ulong_t mcause;
__asm__ volatile("csrr %0, mcause" : "=r" (mcause));
mcause &= SOC_MCAUSE_EXP_MASK;
z_fatal_print("Exception cause %s (%d)", cause_str(mcause),
(int)mcause);
z_fatal_print("Exception cause %s (%ld)", cause_str(mcause), mcause);
z_riscv_fatal_error(K_ERR_CPU_EXCEPTION, esf);
}

View file

@ -9,7 +9,7 @@
FUNC_NORETURN void z_irq_spurious(void *unused)
{
u32_t mcause;
ulong_t mcause;
ARG_UNUSED(unused);
@ -17,7 +17,7 @@ FUNC_NORETURN void z_irq_spurious(void *unused)
mcause &= SOC_MCAUSE_EXP_MASK;
z_fatal_print("Spurious interrupt detected! IRQ: %d", (int)mcause);
z_fatal_print("Spurious interrupt detected! IRQ: %ld", mcause);
#if defined(CONFIG_RISCV_HAS_PLIC)
if (mcause == RISCV_MACHINE_EXT_IRQ) {
z_fatal_print("PLIC interrupt line causing the IRQ: %d",

View file

@ -74,32 +74,32 @@ SECTION_FUNC(exception.entry, __irq_wrapper)
* floating-point registers should be accounted for when corresponding
* config variable is set
*/
sw ra, __z_arch_esf_t_ra_OFFSET(sp)
sw gp, __z_arch_esf_t_gp_OFFSET(sp)
sw tp, __z_arch_esf_t_tp_OFFSET(sp)
sw t0, __z_arch_esf_t_t0_OFFSET(sp)
sw t1, __z_arch_esf_t_t1_OFFSET(sp)
sw t2, __z_arch_esf_t_t2_OFFSET(sp)
sw t3, __z_arch_esf_t_t3_OFFSET(sp)
sw t4, __z_arch_esf_t_t4_OFFSET(sp)
sw t5, __z_arch_esf_t_t5_OFFSET(sp)
sw t6, __z_arch_esf_t_t6_OFFSET(sp)
sw a0, __z_arch_esf_t_a0_OFFSET(sp)
sw a1, __z_arch_esf_t_a1_OFFSET(sp)
sw a2, __z_arch_esf_t_a2_OFFSET(sp)
sw a3, __z_arch_esf_t_a3_OFFSET(sp)
sw a4, __z_arch_esf_t_a4_OFFSET(sp)
sw a5, __z_arch_esf_t_a5_OFFSET(sp)
sw a6, __z_arch_esf_t_a6_OFFSET(sp)
sw a7, __z_arch_esf_t_a7_OFFSET(sp)
SR ra, __z_arch_esf_t_ra_OFFSET(sp)
SR gp, __z_arch_esf_t_gp_OFFSET(sp)
SR tp, __z_arch_esf_t_tp_OFFSET(sp)
SR t0, __z_arch_esf_t_t0_OFFSET(sp)
SR t1, __z_arch_esf_t_t1_OFFSET(sp)
SR t2, __z_arch_esf_t_t2_OFFSET(sp)
SR t3, __z_arch_esf_t_t3_OFFSET(sp)
SR t4, __z_arch_esf_t_t4_OFFSET(sp)
SR t5, __z_arch_esf_t_t5_OFFSET(sp)
SR t6, __z_arch_esf_t_t6_OFFSET(sp)
SR a0, __z_arch_esf_t_a0_OFFSET(sp)
SR a1, __z_arch_esf_t_a1_OFFSET(sp)
SR a2, __z_arch_esf_t_a2_OFFSET(sp)
SR a3, __z_arch_esf_t_a3_OFFSET(sp)
SR a4, __z_arch_esf_t_a4_OFFSET(sp)
SR a5, __z_arch_esf_t_a5_OFFSET(sp)
SR a6, __z_arch_esf_t_a6_OFFSET(sp)
SR a7, __z_arch_esf_t_a7_OFFSET(sp)
/* Save MEPC register */
csrr t0, mepc
sw t0, __z_arch_esf_t_mepc_OFFSET(sp)
SR t0, __z_arch_esf_t_mepc_OFFSET(sp)
/* Save SOC-specific MSTATUS register */
csrr t0, SOC_MSTATUS_REG
sw t0, __z_arch_esf_t_mstatus_OFFSET(sp)
SR t0, __z_arch_esf_t_mstatus_OFFSET(sp)
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Handle context saving at SOC level. */
@ -164,9 +164,9 @@ is_syscall:
* It's safe to always increment by 4, even with compressed
* instructions, because the ecall instruction is always 4 bytes.
*/
lw t0, __z_arch_esf_t_mepc_OFFSET(sp)
LR t0, __z_arch_esf_t_mepc_OFFSET(sp)
addi t0, t0, 4
sw t0, __z_arch_esf_t_mepc_OFFSET(sp)
SR t0, __z_arch_esf_t_mepc_OFFSET(sp)
#ifdef CONFIG_IRQ_OFFLOAD
/*
@ -176,7 +176,7 @@ is_syscall:
* jump to is_interrupt to handle the IRQ offload.
*/
la t0, _offload_routine
lw t1, 0x00(t0)
LR t1, 0x00(t0)
bnez t1, is_interrupt
#endif
@ -196,14 +196,14 @@ is_interrupt:
/* Switch to interrupt stack */
la t2, _kernel
lw sp, _kernel_offset_to_irq_stack(t2)
LR sp, _kernel_offset_to_irq_stack(t2)
/*
* Save thread stack pointer on interrupt stack
* In RISC-V, stack pointer needs to be 16-byte aligned
*/
addi sp, sp, -16
sw t0, 0x00(sp)
SR t0, 0x00(sp)
on_irq_stack:
/* Increment _kernel.nested variable */
@ -243,25 +243,25 @@ call_irq:
/*
* Call corresponding registered function in _sw_isr_table.
* (table is 8-bytes wide, we should shift index by 3)
* (table is 2-word wide, we should shift index accordingly)
*/
la t0, _sw_isr_table
slli a0, a0, 3
slli a0, a0, (RV_REGSHIFT + 1)
add t0, t0, a0
/* Load argument in a0 register */
lw a0, 0x00(t0)
LR a0, 0x00(t0)
/* Load ISR function address in register t1 */
lw t1, 0x04(t0)
lw t1, RV_REGSIZE(t0)
#ifdef CONFIG_EXECUTION_BENCHMARKING
addi sp, sp, -16
sw a0, 0x00(sp)
sw t1, 0x04(sp)
SR a0, 0x00(sp)
SR t1, RV_REGSIZE(sp)
call read_timer_end_of_isr
lw t1, 0x04(sp)
lw a0, 0x00(sp)
LR t1, RV_REGSIZE(sp)
LR a0, 0x00(sp)
addi sp, sp, 16
#endif
/* Call ISR function */
@ -277,7 +277,7 @@ on_thread_stack:
sw t2, _kernel_offset_to_nested(t1)
/* Restore thread stack pointer */
lw t0, 0x00(sp)
LR t0, 0x00(sp)
addi sp, t0, 0
#ifdef CONFIG_STACK_SENTINEL
@ -291,13 +291,13 @@ on_thread_stack:
*/
/* Get pointer to _kernel.current */
lw t2, _kernel_offset_to_current(t1)
LR t2, _kernel_offset_to_current(t1)
/*
* Check if next thread to schedule is current thread.
* If yes do not perform a reschedule
*/
lw t3, _kernel_offset_to_ready_q_cache(t1)
LR t3, _kernel_offset_to_ready_q_cache(t1)
beq t3, t2, no_reschedule
#else
j no_reschedule
@ -311,101 +311,101 @@ reschedule:
la t0, _kernel
/* Get pointer to _kernel.current */
lw t1, _kernel_offset_to_current(t0)
LR t1, _kernel_offset_to_current(t0)
/*
* Save callee-saved registers of current thread
* prior to handle context-switching
*/
sw s0, _thread_offset_to_s0(t1)
sw s1, _thread_offset_to_s1(t1)
sw s2, _thread_offset_to_s2(t1)
sw s3, _thread_offset_to_s3(t1)
sw s4, _thread_offset_to_s4(t1)
sw s5, _thread_offset_to_s5(t1)
sw s6, _thread_offset_to_s6(t1)
sw s7, _thread_offset_to_s7(t1)
sw s8, _thread_offset_to_s8(t1)
sw s9, _thread_offset_to_s9(t1)
sw s10, _thread_offset_to_s10(t1)
sw s11, _thread_offset_to_s11(t1)
SR s0, _thread_offset_to_s0(t1)
SR s1, _thread_offset_to_s1(t1)
SR s2, _thread_offset_to_s2(t1)
SR s3, _thread_offset_to_s3(t1)
SR s4, _thread_offset_to_s4(t1)
SR s5, _thread_offset_to_s5(t1)
SR s6, _thread_offset_to_s6(t1)
SR s7, _thread_offset_to_s7(t1)
SR s8, _thread_offset_to_s8(t1)
SR s9, _thread_offset_to_s9(t1)
SR s10, _thread_offset_to_s10(t1)
SR s11, _thread_offset_to_s11(t1)
/*
* Save stack pointer of current thread and set the default return value
* of z_swap to _k_neg_eagain for the thread.
*/
sw sp, _thread_offset_to_sp(t1)
SR sp, _thread_offset_to_sp(t1)
la t2, _k_neg_eagain
lw t3, 0x00(t2)
sw t3, _thread_offset_to_swap_return_value(t1)
/* Get next thread to schedule. */
lw t1, _kernel_offset_to_ready_q_cache(t0)
LR t1, _kernel_offset_to_ready_q_cache(t0)
/*
* Set _kernel.current to new thread loaded in t1
*/
sw t1, _kernel_offset_to_current(t0)
SR t1, _kernel_offset_to_current(t0)
/* Switch to new thread stack */
lw sp, _thread_offset_to_sp(t1)
LR sp, _thread_offset_to_sp(t1)
/* Restore callee-saved registers of new thread */
lw s0, _thread_offset_to_s0(t1)
lw s1, _thread_offset_to_s1(t1)
lw s2, _thread_offset_to_s2(t1)
lw s3, _thread_offset_to_s3(t1)
lw s4, _thread_offset_to_s4(t1)
lw s5, _thread_offset_to_s5(t1)
lw s6, _thread_offset_to_s6(t1)
lw s7, _thread_offset_to_s7(t1)
lw s8, _thread_offset_to_s8(t1)
lw s9, _thread_offset_to_s9(t1)
lw s10, _thread_offset_to_s10(t1)
lw s11, _thread_offset_to_s11(t1)
LR s0, _thread_offset_to_s0(t1)
LR s1, _thread_offset_to_s1(t1)
LR s2, _thread_offset_to_s2(t1)
LR s3, _thread_offset_to_s3(t1)
LR s4, _thread_offset_to_s4(t1)
LR s5, _thread_offset_to_s5(t1)
LR s6, _thread_offset_to_s6(t1)
LR s7, _thread_offset_to_s7(t1)
LR s8, _thread_offset_to_s8(t1)
LR s9, _thread_offset_to_s9(t1)
LR s10, _thread_offset_to_s10(t1)
LR s11, _thread_offset_to_s11(t1)
#ifdef CONFIG_EXECUTION_BENCHMARKING
addi sp, sp, -__z_arch_esf_t_SIZEOF
sw ra, __z_arch_esf_t_ra_OFFSET(sp)
sw gp, __z_arch_esf_t_gp_OFFSET(sp)
sw tp, __z_arch_esf_t_tp_OFFSET(sp)
sw t0, __z_arch_esf_t_t0_OFFSET(sp)
sw t1, __z_arch_esf_t_t1_OFFSET(sp)
sw t2, __z_arch_esf_t_t2_OFFSET(sp)
sw t3, __z_arch_esf_t_t3_OFFSET(sp)
sw t4, __z_arch_esf_t_t4_OFFSET(sp)
sw t5, __z_arch_esf_t_t5_OFFSET(sp)
sw t6, __z_arch_esf_t_t6_OFFSET(sp)
sw a0, __z_arch_esf_t_a0_OFFSET(sp)
sw a1, __z_arch_esf_t_a1_OFFSET(sp)
sw a2, __z_arch_esf_t_a2_OFFSET(sp)
sw a3, __z_arch_esf_t_a3_OFFSET(sp)
sw a4, __z_arch_esf_t_a4_OFFSET(sp)
sw a5, __z_arch_esf_t_a5_OFFSET(sp)
sw a6, __z_arch_esf_t_a6_OFFSET(sp)
sw a7, __z_arch_esf_t_a7_OFFSET(sp)
SR ra, __z_arch_esf_t_ra_OFFSET(sp)
SR gp, __z_arch_esf_t_gp_OFFSET(sp)
SR tp, __z_arch_esf_t_tp_OFFSET(sp)
SR t0, __z_arch_esf_t_t0_OFFSET(sp)
SR t1, __z_arch_esf_t_t1_OFFSET(sp)
SR t2, __z_arch_esf_t_t2_OFFSET(sp)
SR t3, __z_arch_esf_t_t3_OFFSET(sp)
SR t4, __z_arch_esf_t_t4_OFFSET(sp)
SR t5, __z_arch_esf_t_t5_OFFSET(sp)
SR t6, __z_arch_esf_t_t6_OFFSET(sp)
SR a0, __z_arch_esf_t_a0_OFFSET(sp)
SR a1, __z_arch_esf_t_a1_OFFSET(sp)
SR a2, __z_arch_esf_t_a2_OFFSET(sp)
SR a3, __z_arch_esf_t_a3_OFFSET(sp)
SR a4, __z_arch_esf_t_a4_OFFSET(sp)
SR a5, __z_arch_esf_t_a5_OFFSET(sp)
SR a6, __z_arch_esf_t_a6_OFFSET(sp)
SR a7, __z_arch_esf_t_a7_OFFSET(sp)
call read_timer_end_of_swap
lw ra, __z_arch_esf_t_ra_OFFSET(sp)
lw gp, __z_arch_esf_t_gp_OFFSET(sp)
lw tp, __z_arch_esf_t_tp_OFFSET(sp)
lw t0, __z_arch_esf_t_t0_OFFSET(sp)
lw t1, __z_arch_esf_t_t1_OFFSET(sp)
lw t2, __z_arch_esf_t_t2_OFFSET(sp)
lw t3, __z_arch_esf_t_t3_OFFSET(sp)
lw t4, __z_arch_esf_t_t4_OFFSET(sp)
lw t5, __z_arch_esf_t_t5_OFFSET(sp)
lw t6, __z_arch_esf_t_t6_OFFSET(sp)
lw a0, __z_arch_esf_t_a0_OFFSET(sp)
lw a1, __z_arch_esf_t_a1_OFFSET(sp)
lw a2, __z_arch_esf_t_a2_OFFSET(sp)
lw a3, __z_arch_esf_t_a3_OFFSET(sp)
lw a4, __z_arch_esf_t_a4_OFFSET(sp)
lw a5, __z_arch_esf_t_a5_OFFSET(sp)
lw a6, __z_arch_esf_t_a6_OFFSET(sp)
lw a7, __z_arch_esf_t_a7_OFFSET(sp)
LR ra, __z_arch_esf_t_ra_OFFSET(sp)
LR gp, __z_arch_esf_t_gp_OFFSET(sp)
LR tp, __z_arch_esf_t_tp_OFFSET(sp)
LR t0, __z_arch_esf_t_t0_OFFSET(sp)
LR t1, __z_arch_esf_t_t1_OFFSET(sp)
LR t2, __z_arch_esf_t_t2_OFFSET(sp)
LR t3, __z_arch_esf_t_t3_OFFSET(sp)
LR t4, __z_arch_esf_t_t4_OFFSET(sp)
LR t5, __z_arch_esf_t_t5_OFFSET(sp)
LR t6, __z_arch_esf_t_t6_OFFSET(sp)
LR a0, __z_arch_esf_t_a0_OFFSET(sp)
LR a1, __z_arch_esf_t_a1_OFFSET(sp)
LR a2, __z_arch_esf_t_a2_OFFSET(sp)
LR a3, __z_arch_esf_t_a3_OFFSET(sp)
LR a4, __z_arch_esf_t_a4_OFFSET(sp)
LR a5, __z_arch_esf_t_a5_OFFSET(sp)
LR a6, __z_arch_esf_t_a6_OFFSET(sp)
LR a7, __z_arch_esf_t_a7_OFFSET(sp)
/* Release stack space */
addi sp, sp, __z_arch_esf_t_SIZEOF
@ -419,32 +419,32 @@ no_reschedule:
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
/* Restore MEPC register */
lw t0, __z_arch_esf_t_mepc_OFFSET(sp)
LR t0, __z_arch_esf_t_mepc_OFFSET(sp)
csrw mepc, t0
/* Restore SOC-specific MSTATUS register */
lw t0, __z_arch_esf_t_mstatus_OFFSET(sp)
LR t0, __z_arch_esf_t_mstatus_OFFSET(sp)
csrw SOC_MSTATUS_REG, t0
/* Restore caller-saved registers from thread stack */
lw ra, __z_arch_esf_t_ra_OFFSET(sp)
lw gp, __z_arch_esf_t_gp_OFFSET(sp)
lw tp, __z_arch_esf_t_tp_OFFSET(sp)
lw t0, __z_arch_esf_t_t0_OFFSET(sp)
lw t1, __z_arch_esf_t_t1_OFFSET(sp)
lw t2, __z_arch_esf_t_t2_OFFSET(sp)
lw t3, __z_arch_esf_t_t3_OFFSET(sp)
lw t4, __z_arch_esf_t_t4_OFFSET(sp)
lw t5, __z_arch_esf_t_t5_OFFSET(sp)
lw t6, __z_arch_esf_t_t6_OFFSET(sp)
lw a0, __z_arch_esf_t_a0_OFFSET(sp)
lw a1, __z_arch_esf_t_a1_OFFSET(sp)
lw a2, __z_arch_esf_t_a2_OFFSET(sp)
lw a3, __z_arch_esf_t_a3_OFFSET(sp)
lw a4, __z_arch_esf_t_a4_OFFSET(sp)
lw a5, __z_arch_esf_t_a5_OFFSET(sp)
lw a6, __z_arch_esf_t_a6_OFFSET(sp)
lw a7, __z_arch_esf_t_a7_OFFSET(sp)
LR ra, __z_arch_esf_t_ra_OFFSET(sp)
LR gp, __z_arch_esf_t_gp_OFFSET(sp)
LR tp, __z_arch_esf_t_tp_OFFSET(sp)
LR t0, __z_arch_esf_t_t0_OFFSET(sp)
LR t1, __z_arch_esf_t_t1_OFFSET(sp)
LR t2, __z_arch_esf_t_t2_OFFSET(sp)
LR t3, __z_arch_esf_t_t3_OFFSET(sp)
LR t4, __z_arch_esf_t_t4_OFFSET(sp)
LR t5, __z_arch_esf_t_t5_OFFSET(sp)
LR t6, __z_arch_esf_t_t6_OFFSET(sp)
LR a0, __z_arch_esf_t_a0_OFFSET(sp)
LR a1, __z_arch_esf_t_a1_OFFSET(sp)
LR a2, __z_arch_esf_t_a2_OFFSET(sp)
LR a3, __z_arch_esf_t_a3_OFFSET(sp)
LR a4, __z_arch_esf_t_a4_OFFSET(sp)
LR a5, __z_arch_esf_t_a5_OFFSET(sp)
LR a6, __z_arch_esf_t_a6_OFFSET(sp)
LR a7, __z_arch_esf_t_a7_OFFSET(sp)
/* Release stack space */
addi sp, sp, __z_arch_esf_t_SIZEOF

View file

@ -26,45 +26,45 @@ SECTION_FUNC(exception.other, __swap)
#ifdef CONFIG_EXECUTION_BENCHMARKING
addi sp, sp, -__z_arch_esf_t_SIZEOF
sw ra, __z_arch_esf_t_ra_OFFSET(sp)
sw gp, __z_arch_esf_t_gp_OFFSET(sp)
sw tp, __z_arch_esf_t_tp_OFFSET(sp)
sw t0, __z_arch_esf_t_t0_OFFSET(sp)
sw t1, __z_arch_esf_t_t1_OFFSET(sp)
sw t2, __z_arch_esf_t_t2_OFFSET(sp)
sw t3, __z_arch_esf_t_t3_OFFSET(sp)
sw t4, __z_arch_esf_t_t4_OFFSET(sp)
sw t5, __z_arch_esf_t_t5_OFFSET(sp)
sw t6, __z_arch_esf_t_t6_OFFSET(sp)
sw a0, __z_arch_esf_t_a0_OFFSET(sp)
sw a1, __z_arch_esf_t_a1_OFFSET(sp)
sw a2, __z_arch_esf_t_a2_OFFSET(sp)
sw a3, __z_arch_esf_t_a3_OFFSET(sp)
sw a4, __z_arch_esf_t_a4_OFFSET(sp)
sw a5, __z_arch_esf_t_a5_OFFSET(sp)
sw a6, __z_arch_esf_t_a6_OFFSET(sp)
sw a7, __z_arch_esf_t_a7_OFFSET(sp)
SR ra, __z_arch_esf_t_ra_OFFSET(sp)
SR gp, __z_arch_esf_t_gp_OFFSET(sp)
SR tp, __z_arch_esf_t_tp_OFFSET(sp)
SR t0, __z_arch_esf_t_t0_OFFSET(sp)
SR t1, __z_arch_esf_t_t1_OFFSET(sp)
SR t2, __z_arch_esf_t_t2_OFFSET(sp)
SR t3, __z_arch_esf_t_t3_OFFSET(sp)
SR t4, __z_arch_esf_t_t4_OFFSET(sp)
SR t5, __z_arch_esf_t_t5_OFFSET(sp)
SR t6, __z_arch_esf_t_t6_OFFSET(sp)
SR a0, __z_arch_esf_t_a0_OFFSET(sp)
SR a1, __z_arch_esf_t_a1_OFFSET(sp)
SR a2, __z_arch_esf_t_a2_OFFSET(sp)
SR a3, __z_arch_esf_t_a3_OFFSET(sp)
SR a4, __z_arch_esf_t_a4_OFFSET(sp)
SR a5, __z_arch_esf_t_a5_OFFSET(sp)
SR a6, __z_arch_esf_t_a6_OFFSET(sp)
SR a7, __z_arch_esf_t_a7_OFFSET(sp)
call read_timer_start_of_swap
lw ra, __z_arch_esf_t_ra_OFFSET(sp)
lw gp, __z_arch_esf_t_gp_OFFSET(sp)
lw tp, __z_arch_esf_t_tp_OFFSET(sp)
lw t0, __z_arch_esf_t_t0_OFFSET(sp)
lw t1, __z_arch_esf_t_t1_OFFSET(sp)
lw t2, __z_arch_esf_t_t2_OFFSET(sp)
lw t3, __z_arch_esf_t_t3_OFFSET(sp)
lw t4, __z_arch_esf_t_t4_OFFSET(sp)
lw t5, __z_arch_esf_t_t5_OFFSET(sp)
lw t6, __z_arch_esf_t_t6_OFFSET(sp)
lw a0, __z_arch_esf_t_a0_OFFSET(sp)
lw a1, __z_arch_esf_t_a1_OFFSET(sp)
lw a2, __z_arch_esf_t_a2_OFFSET(sp)
lw a3, __z_arch_esf_t_a3_OFFSET(sp)
lw a4, __z_arch_esf_t_a4_OFFSET(sp)
lw a5, __z_arch_esf_t_a5_OFFSET(sp)
lw a6, __z_arch_esf_t_a6_OFFSET(sp)
lw a7, __z_arch_esf_t_a7_OFFSET(sp)
LR ra, __z_arch_esf_t_ra_OFFSET(sp)
LR gp, __z_arch_esf_t_gp_OFFSET(sp)
LR tp, __z_arch_esf_t_tp_OFFSET(sp)
LR t0, __z_arch_esf_t_t0_OFFSET(sp)
LR t1, __z_arch_esf_t_t1_OFFSET(sp)
LR t2, __z_arch_esf_t_t2_OFFSET(sp)
LR t3, __z_arch_esf_t_t3_OFFSET(sp)
LR t4, __z_arch_esf_t_t4_OFFSET(sp)
LR t5, __z_arch_esf_t_t5_OFFSET(sp)
LR t6, __z_arch_esf_t_t6_OFFSET(sp)
LR a0, __z_arch_esf_t_a0_OFFSET(sp)
LR a1, __z_arch_esf_t_a1_OFFSET(sp)
LR a2, __z_arch_esf_t_a2_OFFSET(sp)
LR a3, __z_arch_esf_t_a3_OFFSET(sp)
LR a4, __z_arch_esf_t_a4_OFFSET(sp)
LR a5, __z_arch_esf_t_a5_OFFSET(sp)
LR a6, __z_arch_esf_t_a6_OFFSET(sp)
LR a7, __z_arch_esf_t_a7_OFFSET(sp)
/* Release stack space */
addi sp, sp, __z_arch_esf_t_SIZEOF
@ -83,7 +83,7 @@ SECTION_FUNC(exception.other, __swap)
la t0, _kernel
/* Get pointer to _kernel.current */
lw t1, _kernel_offset_to_current(t0)
LR t1, _kernel_offset_to_current(t0)
/* Load return value of __swap function in temp register t2 */
lw t2, _thread_offset_to_swap_return_value(t1)

View file

@ -33,10 +33,10 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
stack_size - sizeof(struct __esf));
/* Setup the initial stack frame */
stack_init->a0 = (u32_t)thread_func;
stack_init->a1 = (u32_t)arg1;
stack_init->a2 = (u32_t)arg2;
stack_init->a3 = (u32_t)arg3;
stack_init->a0 = (ulong_t)thread_func;
stack_init->a1 = (ulong_t)arg1;
stack_init->a2 = (ulong_t)arg2;
stack_init->a3 = (ulong_t)arg3;
/*
* Following the RISC-V architecture,
* the MSTATUS register (used to globally enable/disable interrupt),
@ -47,7 +47,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* This shall allow to handle nested interrupts.
*
* Given that context switching is performed via a system call exception
* within the RISCV32 architecture implementation, initially set:
* within the RISCV architecture implementation, initially set:
* 1) MSTATUS to SOC_MSTATUS_DEF_RESTORE in the thread stack to enable
* interrupts when the newly created thread will be scheduled;
* 2) MEPC to the address of the z_thread_entry_wrapper in the thread
@ -61,7 +61,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* thread stack.
*/
stack_init->mstatus = SOC_MSTATUS_DEF_RESTORE;
stack_init->mepc = (u32_t)z_thread_entry_wrapper;
stack_init->mepc = (ulong_t)z_thread_entry_wrapper;
thread->callee_saved.sp = (u32_t)stack_init;
thread->callee_saved.sp = (ulong_t)stack_init;
}

View file

@ -27,20 +27,20 @@
* saved/restored when a cooperative context switch occurs.
*/
struct _callee_saved {
u32_t sp; /* Stack pointer, (x2 register) */
ulong_t sp; /* Stack pointer, (x2 register) */
u32_t s0; /* saved register/frame pointer */
u32_t s1; /* saved register */
u32_t s2; /* saved register */
u32_t s3; /* saved register */
u32_t s4; /* saved register */
u32_t s5; /* saved register */
u32_t s6; /* saved register */
u32_t s7; /* saved register */
u32_t s8; /* saved register */
u32_t s9; /* saved register */
u32_t s10; /* saved register */
u32_t s11; /* saved register */
ulong_t s0; /* saved register/frame pointer */
ulong_t s1; /* saved register */
ulong_t s2; /* saved register */
ulong_t s3; /* saved register */
ulong_t s4; /* saved register */
ulong_t s5; /* saved register */
ulong_t s6; /* saved register */
ulong_t s7; /* saved register */
ulong_t s8; /* saved register */
ulong_t s9; /* saved register */
ulong_t s10; /* saved register */
ulong_t s11; /* saved register */
};
typedef struct _callee_saved _callee_saved_t;

View file

@ -31,6 +31,18 @@ extern "C" {
/* stacks, for RISCV architecture stack should be 16byte-aligned */
#define STACK_ALIGN 16
#ifdef CONFIG_64BIT
#define LR ld
#define SR sd
#define RV_REGSIZE 8
#define RV_REGSHIFT 3
#else
#define LR lw
#define SR sw
#define RV_REGSIZE 4
#define RV_REGSHIFT 2
#endif
#ifndef _ASMLANGUAGE
#include <sys/util.h>
@ -91,7 +103,8 @@ void z_irq_spurious(void *unused);
*/
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
{
unsigned int key, mstatus;
unsigned int key;
ulong_t mstatus;
__asm__ volatile ("csrrc %0, mstatus, %1"
: "=r" (mstatus)
@ -108,7 +121,7 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
*/
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
{
unsigned int mstatus;
ulong_t mstatus;
__asm__ volatile ("csrrs %0, mstatus, %1"
: "=r" (mstatus)

View file

@ -42,29 +42,29 @@ struct soc_esf {
#endif
struct __esf {
u32_t ra; /* return address */
u32_t gp; /* global pointer */
u32_t tp; /* thread pointer */
ulong_t ra; /* return address */
ulong_t gp; /* global pointer */
ulong_t tp; /* thread pointer */
u32_t t0; /* Caller-saved temporary register */
u32_t t1; /* Caller-saved temporary register */
u32_t t2; /* Caller-saved temporary register */
u32_t t3; /* Caller-saved temporary register */
u32_t t4; /* Caller-saved temporary register */
u32_t t5; /* Caller-saved temporary register */
u32_t t6; /* Caller-saved temporary register */
ulong_t t0; /* Caller-saved temporary register */
ulong_t t1; /* Caller-saved temporary register */
ulong_t t2; /* Caller-saved temporary register */
ulong_t t3; /* Caller-saved temporary register */
ulong_t t4; /* Caller-saved temporary register */
ulong_t t5; /* Caller-saved temporary register */
ulong_t t6; /* Caller-saved temporary register */
u32_t a0; /* function argument/return value */
u32_t a1; /* function argument */
u32_t a2; /* function argument */
u32_t a3; /* function argument */
u32_t a4; /* function argument */
u32_t a5; /* function argument */
u32_t a6; /* function argument */
u32_t a7; /* function argument */
ulong_t a0; /* function argument/return value */
ulong_t a1; /* function argument */
ulong_t a2; /* function argument */
ulong_t a3; /* function argument */
ulong_t a4; /* function argument */
ulong_t a5; /* function argument */
ulong_t a6; /* function argument */
ulong_t a7; /* function argument */
u32_t mepc; /* machine exception program counter */
u32_t mstatus; /* machine status register */
ulong_t mepc; /* machine exception program counter */
ulong_t mstatus; /* machine status register */
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
struct soc_esf soc_context;

View file

@ -23,6 +23,9 @@ typedef unsigned short u16_t;
typedef unsigned int u32_t;
typedef unsigned long long u64_t;
/* 32 bits on ILP32 builds, 64 bits on LP64 builts */
typedef unsigned long ulong_t;
#ifdef __cplusplus
}
#endif

View file

@ -43,10 +43,17 @@
/* SOC-specific MCAUSE bitfields */
#ifdef CONFIG_64BIT
/* Interrupt Mask */
#define SOC_MCAUSE_IRQ_MASK (1 << 63)
/* Exception code Mask */
#define SOC_MCAUSE_EXP_MASK 0x7FFFFFFFFFFFFFFF
#else
/* Interrupt Mask */
#define SOC_MCAUSE_IRQ_MASK (1 << 31)
/* Exception code Mask */
#define SOC_MCAUSE_EXP_MASK 0x7FFFFFFF
#endif
/* ECALL exception number */
#define SOC_MCAUSE_ECALL_EXP RISCV_MACHINE_ECALL_EXP