kernel: rename NANO_ESF

This is now called z_arch_esf_t, conforming to our naming
convention.

This needs to remain a typedef due to how our offset generation
header mechanism works.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-07-16 15:21:19 -07:00 committed by Andrew Boie
parent c9a4bd47a7
commit 96571a8c40
44 changed files with 295 additions and 295 deletions

View file

@ -18,7 +18,7 @@
#include <arch/cpu.h>
#include <logging/log_ctrl.h>
void z_arc_fatal_error(unsigned int reason, const NANO_ESF *esf)
void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
{
if (reason == K_ERR_CPU_EXCEPTION) {
z_fatal_print("Faulting instruction address = 0x%lx",

View file

@ -363,7 +363,7 @@ static void dump_exception_info(u32_t vector, u32_t cause, u32_t parameter)
* invokes the user provided routine k_sys_fatal_error_handler() which is
* responsible for implementing the error handling policy.
*/
void _Fault(NANO_ESF *esf)
void _Fault(z_arch_esf_t *esf)
{
u32_t vector, cause, parameter;
u32_t exc_addr = z_arc_v2_aux_reg_read(_ARC_V2_EFA);

View file

@ -61,7 +61,7 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
extern void z_arch_switch(void *switch_to, void **switched_from);
extern void z_arc_fatal_error(unsigned int reason, const NANO_ESF *esf);
extern void z_arc_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus

View file

@ -19,7 +19,7 @@
#include <kernel_structs.h>
#include <logging/log_ctrl.h>
static void esf_dump(const NANO_ESF *esf)
static void esf_dump(const z_arch_esf_t *esf)
{
z_fatal_print("r0/a1: 0x%08x r1/a2: 0x%08x r2/a3: 0x%08x",
esf->basic.a1, esf->basic.a2, esf->basic.a3);
@ -41,7 +41,7 @@ static void esf_dump(const NANO_ESF *esf)
esf->basic.pc);
}
void z_arm_fatal_error(unsigned int reason, const NANO_ESF *esf)
void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
{
if (esf != NULL) {
@ -50,7 +50,7 @@ void z_arm_fatal_error(unsigned int reason, const NANO_ESF *esf)
z_fatal_error(reason, esf);
}
void z_do_kernel_oops(const NANO_ESF *esf)
void z_do_kernel_oops(const z_arch_esf_t *esf)
{
z_arm_fatal_error(esf->basic.r0, esf);
}
@ -58,7 +58,7 @@ void z_do_kernel_oops(const NANO_ESF *esf)
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
{
u32_t *ssf_contents = ssf_ptr;
NANO_ESF oops_esf = { 0 };
z_arch_esf_t oops_esf = { 0 };
/* TODO: Copy the rest of the register set out of ssf_ptr */
oops_esf.basic.pc = ssf_contents[3];

View file

@ -136,7 +136,7 @@
*/
#if (CONFIG_FAULT_DUMP == 1)
static void FaultShow(const NANO_ESF *esf, int fault)
static void FaultShow(const z_arch_esf_t *esf, int fault)
{
PR_EXC("Fault! EXC #%d", fault);
@ -155,7 +155,7 @@ static void FaultShow(const NANO_ESF *esf, int fault)
*
* For Dump level 0, no information needs to be generated.
*/
static void FaultShow(const NANO_ESF *esf, int fault)
static void FaultShow(const z_arch_esf_t *esf, int fault)
{
(void)esf;
(void)fault;
@ -175,7 +175,7 @@ static const struct z_exc_handle exceptions[] = {
*
* @return true if error is recoverable, otherwise return false.
*/
static bool memory_fault_recoverable(NANO_ESF *esf)
static bool memory_fault_recoverable(z_arch_esf_t *esf)
{
#ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
@ -210,7 +210,7 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr,
*
* @return error code to identify the fatal error reason
*/
static u32_t MpuFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
static u32_t MpuFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
u32_t mmfar = -EINVAL;
@ -333,7 +333,7 @@ static u32_t MpuFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
*
* @return N/A
*/
static int BusFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
static int BusFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
@ -487,7 +487,7 @@ static int BusFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
*
* @return error code to identify the fatal error reason
*/
static u32_t UsageFault(const NANO_ESF *esf)
static u32_t UsageFault(const z_arch_esf_t *esf)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
@ -543,7 +543,7 @@ static u32_t UsageFault(const NANO_ESF *esf)
*
* @return N/A
*/
static void SecureFault(const NANO_ESF *esf)
static void SecureFault(const z_arch_esf_t *esf)
{
PR_FAULT_INFO("***** SECURE FAULT *****");
@ -582,7 +582,7 @@ static void SecureFault(const NANO_ESF *esf)
*
* @return N/A
*/
static void DebugMonitor(const NANO_ESF *esf)
static void DebugMonitor(const z_arch_esf_t *esf)
{
ARG_UNUSED(esf);
@ -602,7 +602,7 @@ static void DebugMonitor(const NANO_ESF *esf)
*
* @return error code to identify the fatal error reason
*/
static u32_t HardFault(NANO_ESF *esf, bool *recoverable)
static u32_t HardFault(z_arch_esf_t *esf, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
@ -644,7 +644,7 @@ static u32_t HardFault(NANO_ESF *esf, bool *recoverable)
*
* @return N/A
*/
static void ReservedException(const NANO_ESF *esf, int fault)
static void ReservedException(const z_arch_esf_t *esf, int fault)
{
ARG_UNUSED(esf);
@ -654,7 +654,7 @@ static void ReservedException(const NANO_ESF *esf, int fault)
}
/* Handler function for ARM fault conditions. */
static u32_t FaultHandle(NANO_ESF *esf, int fault, bool *recoverable)
static u32_t FaultHandle(z_arch_esf_t *esf, int fault, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
@ -708,7 +708,7 @@ static u32_t FaultHandle(NANO_ESF *esf, int fault, bool *recoverable)
*
* @param secure_esf Pointer to the secure stack frame.
*/
static void SecureStackDump(const NANO_ESF *secure_esf)
static void SecureStackDump(const z_arch_esf_t *secure_esf)
{
/*
* In case a Non-Secure exception interrupted the Secure
@ -733,7 +733,7 @@ static void SecureStackDump(const NANO_ESF *secure_esf)
* Non-Secure exception entry.
*/
top_of_sec_stack += ADDITIONAL_STATE_CONTEXT_WORDS;
secure_esf = (const NANO_ESF *)top_of_sec_stack;
secure_esf = (const z_arch_esf_t *)top_of_sec_stack;
sec_ret_addr = secure_esf->basic.pc;
} else {
/* Exception during Non-Secure function call.
@ -780,7 +780,7 @@ static void SecureStackDump(const NANO_ESF *secure_esf)
* Note: exc_return argument shall only be used by the Fault handler if we are
* running a Secure Firmware.
*/
void _Fault(NANO_ESF *esf, u32_t exc_return)
void _Fault(z_arch_esf_t *esf, u32_t exc_return)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
@ -815,13 +815,13 @@ void _Fault(NANO_ESF *esf, u32_t exc_return)
* and supply it to the fault handing function.
*/
if (exc_return & EXC_RETURN_MODE_THREAD) {
esf = (NANO_ESF *)__TZ_get_PSP_NS();
esf = (z_arch_esf_t *)__TZ_get_PSP_NS();
if ((SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) == 0) {
PR_EXC("RETTOBASE does not match EXC_RETURN");
goto _exit_fatal;
}
} else {
esf = (NANO_ESF *)__TZ_get_MSP_NS();
esf = (z_arch_esf_t *)__TZ_get_MSP_NS();
if ((SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) != 0) {
PR_EXC("RETTOBASE does not match EXC_RETURN");
goto _exit_fatal;

View file

@ -146,7 +146,7 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
u32_t stack_end,
u32_t stack_start);
extern void z_arm_fatal_error(unsigned int reason, const NANO_ESF *esf);
extern void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
#endif /* _ASMLANGUAGE */

View file

@ -34,35 +34,35 @@ GTEXT(_offload_routine)
*/
SECTION_FUNC(exception.entry, _exception)
/* Reserve thread stack space for saving context */
subi sp, sp, __NANO_ESF_SIZEOF
subi sp, sp, __z_arch_esf_t_SIZEOF
/* Preserve all caller-saved registers onto the thread's stack */
stw ra, __NANO_ESF_ra_OFFSET(sp)
stw r1, __NANO_ESF_r1_OFFSET(sp)
stw r2, __NANO_ESF_r2_OFFSET(sp)
stw r3, __NANO_ESF_r3_OFFSET(sp)
stw r4, __NANO_ESF_r4_OFFSET(sp)
stw r5, __NANO_ESF_r5_OFFSET(sp)
stw r6, __NANO_ESF_r6_OFFSET(sp)
stw r7, __NANO_ESF_r7_OFFSET(sp)
stw r8, __NANO_ESF_r8_OFFSET(sp)
stw r9, __NANO_ESF_r9_OFFSET(sp)
stw r10, __NANO_ESF_r10_OFFSET(sp)
stw r11, __NANO_ESF_r11_OFFSET(sp)
stw r12, __NANO_ESF_r12_OFFSET(sp)
stw r13, __NANO_ESF_r13_OFFSET(sp)
stw r14, __NANO_ESF_r14_OFFSET(sp)
stw r15, __NANO_ESF_r15_OFFSET(sp)
stw ra, __z_arch_esf_t_ra_OFFSET(sp)
stw r1, __z_arch_esf_t_r1_OFFSET(sp)
stw r2, __z_arch_esf_t_r2_OFFSET(sp)
stw r3, __z_arch_esf_t_r3_OFFSET(sp)
stw r4, __z_arch_esf_t_r4_OFFSET(sp)
stw r5, __z_arch_esf_t_r5_OFFSET(sp)
stw r6, __z_arch_esf_t_r6_OFFSET(sp)
stw r7, __z_arch_esf_t_r7_OFFSET(sp)
stw r8, __z_arch_esf_t_r8_OFFSET(sp)
stw r9, __z_arch_esf_t_r9_OFFSET(sp)
stw r10, __z_arch_esf_t_r10_OFFSET(sp)
stw r11, __z_arch_esf_t_r11_OFFSET(sp)
stw r12, __z_arch_esf_t_r12_OFFSET(sp)
stw r13, __z_arch_esf_t_r13_OFFSET(sp)
stw r14, __z_arch_esf_t_r14_OFFSET(sp)
stw r15, __z_arch_esf_t_r15_OFFSET(sp)
/* Store value of estatus control register */
rdctl et, estatus
stw et, __NANO_ESF_estatus_OFFSET(sp)
stw et, __z_arch_esf_t_estatus_OFFSET(sp)
/* ea-4 is the address of the instruction when the exception happened,
* put this in the stack frame as well
*/
addi r15, ea, -4
stw r15, __NANO_ESF_instr_OFFSET(sp)
stw r15, __z_arch_esf_t_instr_OFFSET(sp)
/* Figure out whether we are here because of an interrupt or an
* exception. If an interrupt, switch stacks and enter IRQ handling
@ -156,7 +156,7 @@ not_interrupt:
*
* We earlier put ea - 4 in the stack frame, replace it with just ea
*/
stw ea, __NANO_ESF_instr_OFFSET(sp)
stw ea, __z_arch_esf_t_instr_OFFSET(sp)
#ifdef CONFIG_IRQ_OFFLOAD
/* Check the contents of _offload_routine. If non-NULL, jump into
@ -192,35 +192,35 @@ _exception_exit:
* and return to the interrupted context */
/* Return address from the exception */
ldw ea, __NANO_ESF_instr_OFFSET(sp)
ldw ea, __z_arch_esf_t_instr_OFFSET(sp)
/* Restore estatus
* XXX is this right??? */
ldw r5, __NANO_ESF_estatus_OFFSET(sp)
ldw r5, __z_arch_esf_t_estatus_OFFSET(sp)
wrctl estatus, r5
/* Restore caller-saved registers */
ldw ra, __NANO_ESF_ra_OFFSET(sp)
ldw r1, __NANO_ESF_r1_OFFSET(sp)
ldw r2, __NANO_ESF_r2_OFFSET(sp)
ldw r3, __NANO_ESF_r3_OFFSET(sp)
ldw r4, __NANO_ESF_r4_OFFSET(sp)
ldw r5, __NANO_ESF_r5_OFFSET(sp)
ldw r6, __NANO_ESF_r6_OFFSET(sp)
ldw r7, __NANO_ESF_r7_OFFSET(sp)
ldw r8, __NANO_ESF_r8_OFFSET(sp)
ldw r9, __NANO_ESF_r9_OFFSET(sp)
ldw r10, __NANO_ESF_r10_OFFSET(sp)
ldw r11, __NANO_ESF_r11_OFFSET(sp)
ldw r12, __NANO_ESF_r12_OFFSET(sp)
ldw r13, __NANO_ESF_r13_OFFSET(sp)
ldw r14, __NANO_ESF_r14_OFFSET(sp)
ldw r15, __NANO_ESF_r15_OFFSET(sp)
ldw ra, __z_arch_esf_t_ra_OFFSET(sp)
ldw r1, __z_arch_esf_t_r1_OFFSET(sp)
ldw r2, __z_arch_esf_t_r2_OFFSET(sp)
ldw r3, __z_arch_esf_t_r3_OFFSET(sp)
ldw r4, __z_arch_esf_t_r4_OFFSET(sp)
ldw r5, __z_arch_esf_t_r5_OFFSET(sp)
ldw r6, __z_arch_esf_t_r6_OFFSET(sp)
ldw r7, __z_arch_esf_t_r7_OFFSET(sp)
ldw r8, __z_arch_esf_t_r8_OFFSET(sp)
ldw r9, __z_arch_esf_t_r9_OFFSET(sp)
ldw r10, __z_arch_esf_t_r10_OFFSET(sp)
ldw r11, __z_arch_esf_t_r11_OFFSET(sp)
ldw r12, __z_arch_esf_t_r12_OFFSET(sp)
ldw r13, __z_arch_esf_t_r13_OFFSET(sp)
ldw r14, __z_arch_esf_t_r14_OFFSET(sp)
ldw r15, __z_arch_esf_t_r15_OFFSET(sp)
/* Put the stack pointer back where it was when we entered
* exception state
*/
addi sp, sp, __NANO_ESF_SIZEOF
addi sp, sp, __z_arch_esf_t_SIZEOF
/* All done, copy estatus into status and transfer to ea */
eret

View file

@ -11,7 +11,7 @@
#include <logging/log_ctrl.h>
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
const NANO_ESF *esf)
const z_arch_esf_t *esf)
{
if (esf != NULL) {
/* Subtract 4 from EA since we added 4 earlier so that the
@ -99,7 +99,7 @@ static char *cause_str(u32_t cause_code)
}
#endif
FUNC_NORETURN void _Fault(const NANO_ESF *esf)
FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
{
#if defined(CONFIG_PRINTK) || defined(CONFIG_LOG)
/* Unfortunately, completely unavailable on Nios II/e cores */

View file

@ -43,25 +43,25 @@ GEN_OFFSET_SYM(_callee_saved_t, sp);
GEN_OFFSET_SYM(_callee_saved_t, key);
GEN_OFFSET_SYM(_callee_saved_t, retval);
GEN_OFFSET_SYM(NANO_ESF, ra);
GEN_OFFSET_SYM(NANO_ESF, r1);
GEN_OFFSET_SYM(NANO_ESF, r2);
GEN_OFFSET_SYM(NANO_ESF, r3);
GEN_OFFSET_SYM(NANO_ESF, r4);
GEN_OFFSET_SYM(NANO_ESF, r5);
GEN_OFFSET_SYM(NANO_ESF, r6);
GEN_OFFSET_SYM(NANO_ESF, r7);
GEN_OFFSET_SYM(NANO_ESF, r8);
GEN_OFFSET_SYM(NANO_ESF, r9);
GEN_OFFSET_SYM(NANO_ESF, r10);
GEN_OFFSET_SYM(NANO_ESF, r11);
GEN_OFFSET_SYM(NANO_ESF, r12);
GEN_OFFSET_SYM(NANO_ESF, r13);
GEN_OFFSET_SYM(NANO_ESF, r14);
GEN_OFFSET_SYM(NANO_ESF, r15);
GEN_OFFSET_SYM(NANO_ESF, estatus);
GEN_OFFSET_SYM(NANO_ESF, instr);
GEN_ABSOLUTE_SYM(__NANO_ESF_SIZEOF, sizeof(NANO_ESF));
GEN_OFFSET_SYM(z_arch_esf_t, ra);
GEN_OFFSET_SYM(z_arch_esf_t, r1);
GEN_OFFSET_SYM(z_arch_esf_t, r2);
GEN_OFFSET_SYM(z_arch_esf_t, r3);
GEN_OFFSET_SYM(z_arch_esf_t, r4);
GEN_OFFSET_SYM(z_arch_esf_t, r5);
GEN_OFFSET_SYM(z_arch_esf_t, r6);
GEN_OFFSET_SYM(z_arch_esf_t, r7);
GEN_OFFSET_SYM(z_arch_esf_t, r8);
GEN_OFFSET_SYM(z_arch_esf_t, r9);
GEN_OFFSET_SYM(z_arch_esf_t, r10);
GEN_OFFSET_SYM(z_arch_esf_t, r11);
GEN_OFFSET_SYM(z_arch_esf_t, r12);
GEN_OFFSET_SYM(z_arch_esf_t, r13);
GEN_OFFSET_SYM(z_arch_esf_t, r14);
GEN_OFFSET_SYM(z_arch_esf_t, r15);
GEN_OFFSET_SYM(z_arch_esf_t, estatus);
GEN_OFFSET_SYM(z_arch_esf_t, instr);
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t));
/*
* size of the struct k_thread structure sans save area for floating

View file

@ -42,7 +42,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
}
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
const NANO_ESF *esf);
const z_arch_esf_t *esf);
#define z_is_in_isr() (_kernel.nested != 0U)

View file

@ -10,7 +10,7 @@
#include <logging/log_ctrl.h>
FUNC_NORETURN void z_riscv32_fatal_error(unsigned int reason,
const NANO_ESF *esf)
const z_arch_esf_t *esf)
{
if (esf != NULL) {
z_fatal_print("Faulting instruction address = 0x%08x",
@ -51,7 +51,7 @@ static char *cause_str(u32_t cause)
}
}
FUNC_NORETURN void _Fault(const NANO_ESF *esf)
FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
{
u32_t mcause;

View file

@ -66,7 +66,7 @@ GTEXT(__irq_wrapper)
*/
SECTION_FUNC(exception.entry, __irq_wrapper)
/* Allocate space on thread stack to save registers */
addi sp, sp, -__NANO_ESF_SIZEOF
addi sp, sp, -__z_arch_esf_t_SIZEOF
/*
* Save caller-saved registers on current thread stack.
@ -74,36 +74,36 @@ SECTION_FUNC(exception.entry, __irq_wrapper)
* floating-point registers should be accounted for when corresponding
* config variable is set
*/
sw ra, __NANO_ESF_ra_OFFSET(sp)
sw gp, __NANO_ESF_gp_OFFSET(sp)
sw tp, __NANO_ESF_tp_OFFSET(sp)
sw t0, __NANO_ESF_t0_OFFSET(sp)
sw t1, __NANO_ESF_t1_OFFSET(sp)
sw t2, __NANO_ESF_t2_OFFSET(sp)
sw t3, __NANO_ESF_t3_OFFSET(sp)
sw t4, __NANO_ESF_t4_OFFSET(sp)
sw t5, __NANO_ESF_t5_OFFSET(sp)
sw t6, __NANO_ESF_t6_OFFSET(sp)
sw a0, __NANO_ESF_a0_OFFSET(sp)
sw a1, __NANO_ESF_a1_OFFSET(sp)
sw a2, __NANO_ESF_a2_OFFSET(sp)
sw a3, __NANO_ESF_a3_OFFSET(sp)
sw a4, __NANO_ESF_a4_OFFSET(sp)
sw a5, __NANO_ESF_a5_OFFSET(sp)
sw a6, __NANO_ESF_a6_OFFSET(sp)
sw a7, __NANO_ESF_a7_OFFSET(sp)
sw ra, __z_arch_esf_t_ra_OFFSET(sp)
sw gp, __z_arch_esf_t_gp_OFFSET(sp)
sw tp, __z_arch_esf_t_tp_OFFSET(sp)
sw t0, __z_arch_esf_t_t0_OFFSET(sp)
sw t1, __z_arch_esf_t_t1_OFFSET(sp)
sw t2, __z_arch_esf_t_t2_OFFSET(sp)
sw t3, __z_arch_esf_t_t3_OFFSET(sp)
sw t4, __z_arch_esf_t_t4_OFFSET(sp)
sw t5, __z_arch_esf_t_t5_OFFSET(sp)
sw t6, __z_arch_esf_t_t6_OFFSET(sp)
sw a0, __z_arch_esf_t_a0_OFFSET(sp)
sw a1, __z_arch_esf_t_a1_OFFSET(sp)
sw a2, __z_arch_esf_t_a2_OFFSET(sp)
sw a3, __z_arch_esf_t_a3_OFFSET(sp)
sw a4, __z_arch_esf_t_a4_OFFSET(sp)
sw a5, __z_arch_esf_t_a5_OFFSET(sp)
sw a6, __z_arch_esf_t_a6_OFFSET(sp)
sw a7, __z_arch_esf_t_a7_OFFSET(sp)
/* Save MEPC register */
csrr t0, mepc
sw t0, __NANO_ESF_mepc_OFFSET(sp)
sw t0, __z_arch_esf_t_mepc_OFFSET(sp)
/* Save SOC-specific MSTATUS register */
csrr t0, SOC_MSTATUS_REG
sw t0, __NANO_ESF_mstatus_OFFSET(sp)
sw t0, __z_arch_esf_t_mstatus_OFFSET(sp)
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Handle context saving at SOC level. */
addi a0, sp, __NANO_ESF_soc_context_OFFSET
addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
jal ra, __soc_save_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
@ -145,7 +145,7 @@ SECTION_FUNC(exception.entry, __irq_wrapper)
/*
* Call _Fault to handle exception.
* Stack pointer is pointing to a NANO_ESF structure, pass it
* Stack pointer is pointing to a z_arch_esf_t structure, pass it
* to _Fault (via register a0).
* If _Fault shall return, set return address to no_reschedule
* to restore stack.
@ -164,9 +164,9 @@ is_syscall:
* It's safe to always increment by 4, even with compressed
* instructions, because the ecall instruction is always 4 bytes.
*/
lw t0, __NANO_ESF_mepc_OFFSET(sp)
lw t0, __z_arch_esf_t_mepc_OFFSET(sp)
addi t0, t0, 4
sw t0, __NANO_ESF_mepc_OFFSET(sp)
sw t0, __z_arch_esf_t_mepc_OFFSET(sp)
#ifdef CONFIG_IRQ_OFFLOAD
/*
@ -365,89 +365,89 @@ reschedule:
lw s11, _thread_offset_to_s11(t1)
#ifdef CONFIG_EXECUTION_BENCHMARKING
addi sp, sp, -__NANO_ESF_SIZEOF
addi sp, sp, -__z_arch_esf_t_SIZEOF
sw ra, __NANO_ESF_ra_OFFSET(sp)
sw gp, __NANO_ESF_gp_OFFSET(sp)
sw tp, __NANO_ESF_tp_OFFSET(sp)
sw t0, __NANO_ESF_t0_OFFSET(sp)
sw t1, __NANO_ESF_t1_OFFSET(sp)
sw t2, __NANO_ESF_t2_OFFSET(sp)
sw t3, __NANO_ESF_t3_OFFSET(sp)
sw t4, __NANO_ESF_t4_OFFSET(sp)
sw t5, __NANO_ESF_t5_OFFSET(sp)
sw t6, __NANO_ESF_t6_OFFSET(sp)
sw a0, __NANO_ESF_a0_OFFSET(sp)
sw a1, __NANO_ESF_a1_OFFSET(sp)
sw a2, __NANO_ESF_a2_OFFSET(sp)
sw a3, __NANO_ESF_a3_OFFSET(sp)
sw a4, __NANO_ESF_a4_OFFSET(sp)
sw a5, __NANO_ESF_a5_OFFSET(sp)
sw a6, __NANO_ESF_a6_OFFSET(sp)
sw a7, __NANO_ESF_a7_OFFSET(sp)
sw ra, __z_arch_esf_t_ra_OFFSET(sp)
sw gp, __z_arch_esf_t_gp_OFFSET(sp)
sw tp, __z_arch_esf_t_tp_OFFSET(sp)
sw t0, __z_arch_esf_t_t0_OFFSET(sp)
sw t1, __z_arch_esf_t_t1_OFFSET(sp)
sw t2, __z_arch_esf_t_t2_OFFSET(sp)
sw t3, __z_arch_esf_t_t3_OFFSET(sp)
sw t4, __z_arch_esf_t_t4_OFFSET(sp)
sw t5, __z_arch_esf_t_t5_OFFSET(sp)
sw t6, __z_arch_esf_t_t6_OFFSET(sp)
sw a0, __z_arch_esf_t_a0_OFFSET(sp)
sw a1, __z_arch_esf_t_a1_OFFSET(sp)
sw a2, __z_arch_esf_t_a2_OFFSET(sp)
sw a3, __z_arch_esf_t_a3_OFFSET(sp)
sw a4, __z_arch_esf_t_a4_OFFSET(sp)
sw a5, __z_arch_esf_t_a5_OFFSET(sp)
sw a6, __z_arch_esf_t_a6_OFFSET(sp)
sw a7, __z_arch_esf_t_a7_OFFSET(sp)
call read_timer_end_of_swap
lw ra, __NANO_ESF_ra_OFFSET(sp)
lw gp, __NANO_ESF_gp_OFFSET(sp)
lw tp, __NANO_ESF_tp_OFFSET(sp)
lw t0, __NANO_ESF_t0_OFFSET(sp)
lw t1, __NANO_ESF_t1_OFFSET(sp)
lw t2, __NANO_ESF_t2_OFFSET(sp)
lw t3, __NANO_ESF_t3_OFFSET(sp)
lw t4, __NANO_ESF_t4_OFFSET(sp)
lw t5, __NANO_ESF_t5_OFFSET(sp)
lw t6, __NANO_ESF_t6_OFFSET(sp)
lw a0, __NANO_ESF_a0_OFFSET(sp)
lw a1, __NANO_ESF_a1_OFFSET(sp)
lw a2, __NANO_ESF_a2_OFFSET(sp)
lw a3, __NANO_ESF_a3_OFFSET(sp)
lw a4, __NANO_ESF_a4_OFFSET(sp)
lw a5, __NANO_ESF_a5_OFFSET(sp)
lw a6, __NANO_ESF_a6_OFFSET(sp)
lw a7, __NANO_ESF_a7_OFFSET(sp)
lw ra, __z_arch_esf_t_ra_OFFSET(sp)
lw gp, __z_arch_esf_t_gp_OFFSET(sp)
lw tp, __z_arch_esf_t_tp_OFFSET(sp)
lw t0, __z_arch_esf_t_t0_OFFSET(sp)
lw t1, __z_arch_esf_t_t1_OFFSET(sp)
lw t2, __z_arch_esf_t_t2_OFFSET(sp)
lw t3, __z_arch_esf_t_t3_OFFSET(sp)
lw t4, __z_arch_esf_t_t4_OFFSET(sp)
lw t5, __z_arch_esf_t_t5_OFFSET(sp)
lw t6, __z_arch_esf_t_t6_OFFSET(sp)
lw a0, __z_arch_esf_t_a0_OFFSET(sp)
lw a1, __z_arch_esf_t_a1_OFFSET(sp)
lw a2, __z_arch_esf_t_a2_OFFSET(sp)
lw a3, __z_arch_esf_t_a3_OFFSET(sp)
lw a4, __z_arch_esf_t_a4_OFFSET(sp)
lw a5, __z_arch_esf_t_a5_OFFSET(sp)
lw a6, __z_arch_esf_t_a6_OFFSET(sp)
lw a7, __z_arch_esf_t_a7_OFFSET(sp)
/* Release stack space */
addi sp, sp, __NANO_ESF_SIZEOF
addi sp, sp, __z_arch_esf_t_SIZEOF
#endif
no_reschedule:
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Restore context at SOC level */
addi a0, sp, __NANO_ESF_soc_context_OFFSET
addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
jal ra, __soc_restore_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
/* Restore MEPC register */
lw t0, __NANO_ESF_mepc_OFFSET(sp)
lw t0, __z_arch_esf_t_mepc_OFFSET(sp)
csrw mepc, t0
/* Restore SOC-specific MSTATUS register */
lw t0, __NANO_ESF_mstatus_OFFSET(sp)
lw t0, __z_arch_esf_t_mstatus_OFFSET(sp)
csrw SOC_MSTATUS_REG, t0
/* Restore caller-saved registers from thread stack */
lw ra, __NANO_ESF_ra_OFFSET(sp)
lw gp, __NANO_ESF_gp_OFFSET(sp)
lw tp, __NANO_ESF_tp_OFFSET(sp)
lw t0, __NANO_ESF_t0_OFFSET(sp)
lw t1, __NANO_ESF_t1_OFFSET(sp)
lw t2, __NANO_ESF_t2_OFFSET(sp)
lw t3, __NANO_ESF_t3_OFFSET(sp)
lw t4, __NANO_ESF_t4_OFFSET(sp)
lw t5, __NANO_ESF_t5_OFFSET(sp)
lw t6, __NANO_ESF_t6_OFFSET(sp)
lw a0, __NANO_ESF_a0_OFFSET(sp)
lw a1, __NANO_ESF_a1_OFFSET(sp)
lw a2, __NANO_ESF_a2_OFFSET(sp)
lw a3, __NANO_ESF_a3_OFFSET(sp)
lw a4, __NANO_ESF_a4_OFFSET(sp)
lw a5, __NANO_ESF_a5_OFFSET(sp)
lw a6, __NANO_ESF_a6_OFFSET(sp)
lw a7, __NANO_ESF_a7_OFFSET(sp)
lw ra, __z_arch_esf_t_ra_OFFSET(sp)
lw gp, __z_arch_esf_t_gp_OFFSET(sp)
lw tp, __z_arch_esf_t_tp_OFFSET(sp)
lw t0, __z_arch_esf_t_t0_OFFSET(sp)
lw t1, __z_arch_esf_t_t1_OFFSET(sp)
lw t2, __z_arch_esf_t_t2_OFFSET(sp)
lw t3, __z_arch_esf_t_t3_OFFSET(sp)
lw t4, __z_arch_esf_t_t4_OFFSET(sp)
lw t5, __z_arch_esf_t_t5_OFFSET(sp)
lw t6, __z_arch_esf_t_t6_OFFSET(sp)
lw a0, __z_arch_esf_t_a0_OFFSET(sp)
lw a1, __z_arch_esf_t_a1_OFFSET(sp)
lw a2, __z_arch_esf_t_a2_OFFSET(sp)
lw a3, __z_arch_esf_t_a3_OFFSET(sp)
lw a4, __z_arch_esf_t_a4_OFFSET(sp)
lw a5, __z_arch_esf_t_a5_OFFSET(sp)
lw a6, __z_arch_esf_t_a6_OFFSET(sp)
lw a7, __z_arch_esf_t_a7_OFFSET(sp)
/* Release stack space */
addi sp, sp, __NANO_ESF_SIZEOF
addi sp, sp, __z_arch_esf_t_SIZEOF
/* Call SOC_ERET to exit ISR */
SOC_ERET

View file

@ -43,30 +43,30 @@ GEN_OFFSET_SYM(_callee_saved_t, s10);
GEN_OFFSET_SYM(_callee_saved_t, s11);
/* esf member offsets */
GEN_OFFSET_SYM(NANO_ESF, ra);
GEN_OFFSET_SYM(NANO_ESF, gp);
GEN_OFFSET_SYM(NANO_ESF, tp);
GEN_OFFSET_SYM(NANO_ESF, t0);
GEN_OFFSET_SYM(NANO_ESF, t1);
GEN_OFFSET_SYM(NANO_ESF, t2);
GEN_OFFSET_SYM(NANO_ESF, t3);
GEN_OFFSET_SYM(NANO_ESF, t4);
GEN_OFFSET_SYM(NANO_ESF, t5);
GEN_OFFSET_SYM(NANO_ESF, t6);
GEN_OFFSET_SYM(NANO_ESF, a0);
GEN_OFFSET_SYM(NANO_ESF, a1);
GEN_OFFSET_SYM(NANO_ESF, a2);
GEN_OFFSET_SYM(NANO_ESF, a3);
GEN_OFFSET_SYM(NANO_ESF, a4);
GEN_OFFSET_SYM(NANO_ESF, a5);
GEN_OFFSET_SYM(NANO_ESF, a6);
GEN_OFFSET_SYM(NANO_ESF, a7);
GEN_OFFSET_SYM(z_arch_esf_t, ra);
GEN_OFFSET_SYM(z_arch_esf_t, gp);
GEN_OFFSET_SYM(z_arch_esf_t, tp);
GEN_OFFSET_SYM(z_arch_esf_t, t0);
GEN_OFFSET_SYM(z_arch_esf_t, t1);
GEN_OFFSET_SYM(z_arch_esf_t, t2);
GEN_OFFSET_SYM(z_arch_esf_t, t3);
GEN_OFFSET_SYM(z_arch_esf_t, t4);
GEN_OFFSET_SYM(z_arch_esf_t, t5);
GEN_OFFSET_SYM(z_arch_esf_t, t6);
GEN_OFFSET_SYM(z_arch_esf_t, a0);
GEN_OFFSET_SYM(z_arch_esf_t, a1);
GEN_OFFSET_SYM(z_arch_esf_t, a2);
GEN_OFFSET_SYM(z_arch_esf_t, a3);
GEN_OFFSET_SYM(z_arch_esf_t, a4);
GEN_OFFSET_SYM(z_arch_esf_t, a5);
GEN_OFFSET_SYM(z_arch_esf_t, a6);
GEN_OFFSET_SYM(z_arch_esf_t, a7);
GEN_OFFSET_SYM(NANO_ESF, mepc);
GEN_OFFSET_SYM(NANO_ESF, mstatus);
GEN_OFFSET_SYM(z_arch_esf_t, mepc);
GEN_OFFSET_SYM(z_arch_esf_t, mstatus);
#if defined(CONFIG_RISCV_SOC_CONTEXT_SAVE)
GEN_OFFSET_SYM(NANO_ESF, soc_context);
GEN_OFFSET_SYM(z_arch_esf_t, soc_context);
#endif
#if defined(CONFIG_RISCV_SOC_OFFSETS)
GEN_SOC_OFFSET_SYMS();
@ -75,10 +75,10 @@ GEN_SOC_OFFSET_SYMS();
/*
* RISC-V requires the stack to be 16-bytes aligned, hence SP needs to grow or
* shrink by a size, which follows the RISC-V stack alignment requirements
* Hence, ensure that __NANO_ESF_SIZEOF and _K_THREAD_NO_FLOAT_SIZEOF sizes
* Hence, ensure that __z_arch_esf_t_SIZEOF and _K_THREAD_NO_FLOAT_SIZEOF sizes
* are aligned accordingly.
*/
GEN_ABSOLUTE_SYM(__NANO_ESF_SIZEOF, STACK_ROUND_UP(sizeof(NANO_ESF)));
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, STACK_ROUND_UP(sizeof(z_arch_esf_t)));
/*
* size of the struct k_thread structure sans save area for floating

View file

@ -24,50 +24,50 @@ SECTION_FUNC(exception.other, __swap)
/* Make a system call to perform context switch */
#ifdef CONFIG_EXECUTION_BENCHMARKING
addi sp, sp, -__NANO_ESF_SIZEOF
addi sp, sp, -__z_arch_esf_t_SIZEOF
sw ra, __NANO_ESF_ra_OFFSET(sp)
sw gp, __NANO_ESF_gp_OFFSET(sp)
sw tp, __NANO_ESF_tp_OFFSET(sp)
sw t0, __NANO_ESF_t0_OFFSET(sp)
sw t1, __NANO_ESF_t1_OFFSET(sp)
sw t2, __NANO_ESF_t2_OFFSET(sp)
sw t3, __NANO_ESF_t3_OFFSET(sp)
sw t4, __NANO_ESF_t4_OFFSET(sp)
sw t5, __NANO_ESF_t5_OFFSET(sp)
sw t6, __NANO_ESF_t6_OFFSET(sp)
sw a0, __NANO_ESF_a0_OFFSET(sp)
sw a1, __NANO_ESF_a1_OFFSET(sp)
sw a2, __NANO_ESF_a2_OFFSET(sp)
sw a3, __NANO_ESF_a3_OFFSET(sp)
sw a4, __NANO_ESF_a4_OFFSET(sp)
sw a5, __NANO_ESF_a5_OFFSET(sp)
sw a6, __NANO_ESF_a6_OFFSET(sp)
sw a7, __NANO_ESF_a7_OFFSET(sp)
sw ra, __z_arch_esf_t_ra_OFFSET(sp)
sw gp, __z_arch_esf_t_gp_OFFSET(sp)
sw tp, __z_arch_esf_t_tp_OFFSET(sp)
sw t0, __z_arch_esf_t_t0_OFFSET(sp)
sw t1, __z_arch_esf_t_t1_OFFSET(sp)
sw t2, __z_arch_esf_t_t2_OFFSET(sp)
sw t3, __z_arch_esf_t_t3_OFFSET(sp)
sw t4, __z_arch_esf_t_t4_OFFSET(sp)
sw t5, __z_arch_esf_t_t5_OFFSET(sp)
sw t6, __z_arch_esf_t_t6_OFFSET(sp)
sw a0, __z_arch_esf_t_a0_OFFSET(sp)
sw a1, __z_arch_esf_t_a1_OFFSET(sp)
sw a2, __z_arch_esf_t_a2_OFFSET(sp)
sw a3, __z_arch_esf_t_a3_OFFSET(sp)
sw a4, __z_arch_esf_t_a4_OFFSET(sp)
sw a5, __z_arch_esf_t_a5_OFFSET(sp)
sw a6, __z_arch_esf_t_a6_OFFSET(sp)
sw a7, __z_arch_esf_t_a7_OFFSET(sp)
call read_timer_start_of_swap
lw ra, __NANO_ESF_ra_OFFSET(sp)
lw gp, __NANO_ESF_gp_OFFSET(sp)
lw tp, __NANO_ESF_tp_OFFSET(sp)
lw t0, __NANO_ESF_t0_OFFSET(sp)
lw t1, __NANO_ESF_t1_OFFSET(sp)
lw t2, __NANO_ESF_t2_OFFSET(sp)
lw t3, __NANO_ESF_t3_OFFSET(sp)
lw t4, __NANO_ESF_t4_OFFSET(sp)
lw t5, __NANO_ESF_t5_OFFSET(sp)
lw t6, __NANO_ESF_t6_OFFSET(sp)
lw a0, __NANO_ESF_a0_OFFSET(sp)
lw a1, __NANO_ESF_a1_OFFSET(sp)
lw a2, __NANO_ESF_a2_OFFSET(sp)
lw a3, __NANO_ESF_a3_OFFSET(sp)
lw a4, __NANO_ESF_a4_OFFSET(sp)
lw a5, __NANO_ESF_a5_OFFSET(sp)
lw a6, __NANO_ESF_a6_OFFSET(sp)
lw a7, __NANO_ESF_a7_OFFSET(sp)
lw ra, __z_arch_esf_t_ra_OFFSET(sp)
lw gp, __z_arch_esf_t_gp_OFFSET(sp)
lw tp, __z_arch_esf_t_tp_OFFSET(sp)
lw t0, __z_arch_esf_t_t0_OFFSET(sp)
lw t1, __z_arch_esf_t_t1_OFFSET(sp)
lw t2, __z_arch_esf_t_t2_OFFSET(sp)
lw t3, __z_arch_esf_t_t3_OFFSET(sp)
lw t4, __z_arch_esf_t_t4_OFFSET(sp)
lw t5, __z_arch_esf_t_t5_OFFSET(sp)
lw t6, __z_arch_esf_t_t6_OFFSET(sp)
lw a0, __z_arch_esf_t_a0_OFFSET(sp)
lw a1, __z_arch_esf_t_a1_OFFSET(sp)
lw a2, __z_arch_esf_t_a2_OFFSET(sp)
lw a3, __z_arch_esf_t_a3_OFFSET(sp)
lw a4, __z_arch_esf_t_a4_OFFSET(sp)
lw a5, __z_arch_esf_t_a5_OFFSET(sp)
lw a6, __z_arch_esf_t_a6_OFFSET(sp)
lw a7, __z_arch_esf_t_a7_OFFSET(sp)
/* Release stack space */
addi sp, sp, __NANO_ESF_SIZEOF
addi sp, sp, __z_arch_esf_t_SIZEOF
#endif
ecall

View file

@ -38,7 +38,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
}
FUNC_NORETURN void z_riscv32_fatal_error(unsigned int reason,
const NANO_ESF *esf);
const z_arch_esf_t *esf);
#define z_is_in_isr() (_kernel.nested != 0U)

View file

@ -156,15 +156,15 @@ SECTION_FUNC(TEXT, _exception_enter)
/* ESP is still pointing to the ESF at this point */
testl $0x200, __NANO_ESF_eflags_OFFSET(%esp)
testl $0x200, __z_arch_esf_t_eflags_OFFSET(%esp)
je allDone
sti
allDone:
#if CONFIG_X86_IAMCU
movl %esp, %eax /* NANO_ESF * parameter */
movl %esp, %eax /* z_arch_esf_t * parameter */
#else
pushl %esp /* push NANO_ESF * parameter */
pushl %esp /* push z_arch_esf_t * parameter */
#endif
INDIRECT_CALL(%ecx) /* call exception handler */

View file

@ -21,7 +21,7 @@
#include <exc_handle.h>
#include <logging/log_ctrl.h>
__weak void z_debug_fatal_hook(const NANO_ESF *esf) { ARG_UNUSED(esf); }
__weak void z_debug_fatal_hook(const z_arch_esf_t *esf) { ARG_UNUSED(esf); }
#ifdef CONFIG_THREAD_STACK_INFO
/**
@ -129,7 +129,7 @@ FUNC_NORETURN void z_arch_system_halt(unsigned int reason)
}
#endif
FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, const NANO_ESF *esf)
FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
{
if (esf != NULL) {
z_fatal_print("eax: 0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x",
@ -152,7 +152,7 @@ FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, const NANO_ESF *esf)
CODE_UNREACHABLE;
}
void z_x86_spurious_irq(const NANO_ESF *esf)
void z_x86_spurious_irq(const z_arch_esf_t *esf)
{
int vector = z_irq_controller_isr_vector_get();
@ -167,7 +167,7 @@ void z_arch_syscall_oops(void *ssf_ptr)
{
struct _x86_syscall_stack_frame *ssf =
(struct _x86_syscall_stack_frame *)ssf_ptr;
NANO_ESF oops = {
z_arch_esf_t oops = {
.eip = ssf->eip,
.cs = ssf->cs,
.eflags = ssf->eflags
@ -181,7 +181,7 @@ void z_arch_syscall_oops(void *ssf_ptr)
}
#ifdef CONFIG_X86_KERNEL_OOPS
void z_do_kernel_oops(const NANO_ESF *esf)
void z_do_kernel_oops(const z_arch_esf_t *esf)
{
u32_t *stack_ptr = (u32_t *)esf->esp;
u32_t reason = *stack_ptr;
@ -208,7 +208,7 @@ NANO_CPU_INT_REGISTER(_kernel_oops_handler, NANO_SOFT_IRQ,
#if CONFIG_EXCEPTION_DEBUG
FUNC_NORETURN static void generic_exc_handle(unsigned int vector,
const NANO_ESF *pEsf)
const z_arch_esf_t *pEsf)
{
switch (vector) {
case IV_GENERAL_PROTECTION:
@ -228,7 +228,7 @@ FUNC_NORETURN static void generic_exc_handle(unsigned int vector,
}
#define _EXC_FUNC(vector) \
FUNC_NORETURN void handle_exc_##vector(const NANO_ESF *pEsf) \
FUNC_NORETURN void handle_exc_##vector(const z_arch_esf_t *pEsf) \
{ \
generic_exc_handle(vector, pEsf); \
}
@ -302,7 +302,7 @@ static void dump_mmu_flags(struct x86_mmu_pdpt *pdpt, void *addr)
}
#endif /* CONFIG_X86_MMU */
static void dump_page_fault(NANO_ESF *esf)
static void dump_page_fault(z_arch_esf_t *esf)
{
u32_t err, cr2;
@ -338,7 +338,7 @@ static const struct z_exc_handle exceptions[] = {
};
#endif
void page_fault_handler(NANO_ESF *esf)
void page_fault_handler(z_arch_esf_t *esf)
{
#ifdef CONFIG_USERSPACE
int i;
@ -365,7 +365,7 @@ void page_fault_handler(NANO_ESF *esf)
_EXCEPTION_CONNECT_CODE(page_fault_handler, IV_PAGE_FAULT);
#ifdef CONFIG_X86_ENABLE_TSS
static __noinit volatile NANO_ESF _df_esf;
static __noinit volatile z_arch_esf_t _df_esf;
/* Very tiny stack; just enough for the bogus error code pushed by the CPU
* and a frame pointer push by the compiler. All df_handler_top does is
@ -418,13 +418,13 @@ static __used void df_handler_bottom(void)
reason = K_ERR_STACK_CHK_FAIL;
}
#endif
z_x86_fatal_error(reason, (NANO_ESF *)&_df_esf);
z_x86_fatal_error(reason, (z_arch_esf_t *)&_df_esf);
}
static FUNC_NORETURN __used void df_handler_top(void)
{
/* State of the system when the double-fault forced a task switch
* will be in _main_tss. Set up a NANO_ESF and copy system state into
* will be in _main_tss. Set up a z_arch_esf_t and copy system state into
* it
*/
_df_esf.esp = _main_tss.esp;

View file

@ -314,7 +314,7 @@ int z_float_disable(struct k_thread *thread)
* instruction is executed while CR0[TS]=1. The handler then enables the
* current thread to use all supported floating point registers.
*/
void _FpNotAvailableExcHandler(NANO_ESF *pEsf)
void _FpNotAvailableExcHandler(z_arch_esf_t *pEsf)
{
ARG_UNUSED(pEsf);

View file

@ -48,20 +48,20 @@ GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF,
GEN_OFFSET_SYM(_callee_saved_t, esp);
/* NANO_ESF structure member offsets */
/* z_arch_esf_t structure member offsets */
GEN_OFFSET_SYM(NANO_ESF, esp);
GEN_OFFSET_SYM(NANO_ESF, ebp);
GEN_OFFSET_SYM(NANO_ESF, ebx);
GEN_OFFSET_SYM(NANO_ESF, esi);
GEN_OFFSET_SYM(NANO_ESF, edi);
GEN_OFFSET_SYM(NANO_ESF, edx);
GEN_OFFSET_SYM(NANO_ESF, ecx);
GEN_OFFSET_SYM(NANO_ESF, eax);
GEN_OFFSET_SYM(NANO_ESF, errorCode);
GEN_OFFSET_SYM(NANO_ESF, eip);
GEN_OFFSET_SYM(NANO_ESF, cs);
GEN_OFFSET_SYM(NANO_ESF, eflags);
GEN_OFFSET_SYM(z_arch_esf_t, esp);
GEN_OFFSET_SYM(z_arch_esf_t, ebp);
GEN_OFFSET_SYM(z_arch_esf_t, ebx);
GEN_OFFSET_SYM(z_arch_esf_t, esi);
GEN_OFFSET_SYM(z_arch_esf_t, edi);
GEN_OFFSET_SYM(z_arch_esf_t, edx);
GEN_OFFSET_SYM(z_arch_esf_t, ecx);
GEN_OFFSET_SYM(z_arch_esf_t, eax);
GEN_OFFSET_SYM(z_arch_esf_t, errorCode);
GEN_OFFSET_SYM(z_arch_esf_t, eip);
GEN_OFFSET_SYM(z_arch_esf_t, cs);
GEN_OFFSET_SYM(z_arch_esf_t, eflags);
/* tTaskStateSegment structure member offsets */

View file

@ -62,7 +62,7 @@
* Assign an exception handler to a particular vector in the IDT.
*
* @param handler A handler function of the prototype
* void handler(const NANO_ESF *esf)
* void handler(const z_arch_esf_t *esf)
* @param vector Vector index in the IDT
*/
#define _EXCEPTION_CONNECT_NOCODE(handler, vector) \
@ -75,7 +75,7 @@
* The error code will be accessible in esf->errorCode
*
* @param handler A handler function of the prototype
* void handler(const NANO_ESF *esf)
* void handler(const z_arch_esf_t *esf)
* @param vector Vector index in the IDT
*/
#define _EXCEPTION_CONNECT_CODE(handler, vector) \

View file

@ -18,7 +18,7 @@
struct device;
struct NANO_ESF {
struct z_arch_esf_t {
};
void z_new_thread(struct k_thread *t, k_thread_stack_t *stack,
@ -59,7 +59,7 @@ void z_unhandled_vector(int vector, int err, struct xuk_entry_frame *f)
z_fatal_print("*** R8 0x%llx R9 0x%llx R10 0x%llx R11 0x%llx",
f->r8, f->r9, f->r10, f->r11);
/* FIXME: Why isn't xuk_entry_frame a NANO_ESF? */
/* FIXME: Why isn't xuk_entry_frame a z_arch_esf_t? */
z_fatal_error(x86_64_except_reason, NULL);
}

View file

@ -115,7 +115,7 @@ static void dump_exc_state(void)
}
XTENSA_ERR_NORET void z_xtensa_fatal_error(unsigned int reason,
const NANO_ESF *esf)
const z_arch_esf_t *esf)
{
dump_exc_state();

View file

@ -201,7 +201,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
* as these are software errors. Should clean this
* up.
*
* FIXME: interrupted_stack and NANO_ESF ought to be the same
* FIXME: interrupted_stack and z_arch_esf_t ought to be the same
*/
z_xtensa_fatal_error(K_ERR_CPU_EXCEPTION, NULL);
}

View file

@ -34,7 +34,7 @@ extern "C" {
extern void FatalErrorHandler(void);
extern void ReservedInterruptHandler(unsigned int intNo);
extern void z_xtensa_fatal_error(unsigned int reason, const NANO_ESF *esf);
extern void z_xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
/* Defined in xtensa_context.S */
extern void z_xt_coproc_init(void);

View file

@ -20,7 +20,7 @@ extern "C" {
#ifdef _ASMLANGUAGE
#else
typedef struct _irq_stack_frame NANO_ESF;
typedef struct _irq_stack_frame z_arch_esf_t;
#endif
#ifdef __cplusplus

View file

@ -62,7 +62,7 @@ struct __esf {
#endif
};
typedef struct __esf NANO_ESF;
typedef struct __esf z_arch_esf_t;
extern void z_ExcExit(void);

View file

@ -150,13 +150,13 @@ struct __esf {
u32_t instr; /* Instruction being executed when exc occurred */
};
typedef struct __esf NANO_ESF;
typedef struct __esf z_arch_esf_t;
FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason,
const NANO_ESF *esf);
const z_arch_esf_t *esf);
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *esf);
const z_arch_esf_t *esf);
enum nios2_exception_cause {
NIOS2_EXCEPTION_UNKNOWN = -1,

View file

@ -37,7 +37,7 @@ struct __esf {
u32_t dummy; /*maybe we will want to add something someday*/
};
typedef struct __esf NANO_ESF;
typedef struct __esf z_arch_esf_t;
extern u32_t z_timer_cycle_get_32(void);
#define z_arch_k_cycle_get_32() z_timer_cycle_get_32()

View file

@ -71,7 +71,7 @@ struct __esf {
#endif
};
typedef struct __esf NANO_ESF;
typedef struct __esf z_arch_esf_t;
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
typedef struct soc_esf soc_esf_t;
#endif

View file

@ -301,7 +301,7 @@ typedef struct nanoEsf {
unsigned int eip;
unsigned int cs;
unsigned int eflags;
} NANO_ESF;
} z_arch_esf_t;
struct _x86_syscall_stack_frame {

View file

@ -12,5 +12,5 @@
#define STACK_ALIGN 8
typedef struct NANO_ESF NANO_ESF;
typedef struct z_arch_esf_t z_arch_esf_t;
#endif /* _X86_64_ARCH_H */

View file

@ -32,7 +32,7 @@ struct __esf {
u32_t pc;
};
typedef struct __esf NANO_ESF;
typedef struct __esf z_arch_esf_t;
#endif
#ifdef __cplusplus

View file

@ -60,7 +60,7 @@ enum k_fatal_error_reason {
* @param esf Exception context, with details and partial or full register
* state when the error occurred. May in some cases be NULL.
*/
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *esf);
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf);
/**
* Called by architecture code upon a fatal error.
@ -76,7 +76,7 @@ void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *esf);
* @param esf Exception context, with details and partial or full register
* state when the error occurred. May in some cases be NULL.
*/
void z_fatal_error(unsigned int reason, const NANO_ESF *esf);
void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
/**
* Print messages related to an exception

View file

@ -33,7 +33,7 @@ FUNC_NORETURN __weak void z_arch_system_halt(unsigned int reason)
/* LCOV_EXCL_START */
__weak void k_sys_fatal_error_handler(unsigned int reason,
const NANO_ESF *esf)
const z_arch_esf_t *esf)
{
ARG_UNUSED(esf);
@ -95,7 +95,7 @@ void z_fatal_print(const char *fmt, ...)
}
#endif /* CONFIG_LOG || CONFIG_PRINTK */
void z_fatal_error(unsigned int reason, const NANO_ESF *esf)
void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
{
struct k_thread *thread = k_current_get();

View file

@ -1,2 +1,2 @@
mbedtls_pk_context
NANO_ESF
z_arch_esf_t

View file

@ -45,7 +45,7 @@ extern "C" {
#define CONFIG_X86 1
#define CONFIG_PRINTK 1
struct esf;
typedef struct esf NANO_ESF;
typedef struct esf z_arch_esf_t;
#endif
#include <sys/printk.h>

View file

@ -48,7 +48,7 @@ static volatile int int_handler_executed;
/* Assume the spurious interrupt handler will execute and abort the task */
static volatile int spur_handler_aborted_thread = 1;
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *esf)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf)
{
zassert_equal(reason, K_ERR_SPURIOUS_IRQ, "wrong error reason");
zassert_equal(k_current_get(), &my_thread, "wrong thread crashed");
@ -87,7 +87,7 @@ void isr_handler(void)
* @return N/A
*/
void exc_divide_error_handler(NANO_ESF *p_esf)
void exc_divide_error_handler(z_arch_esf_t *p_esf)
{
p_esf->eip += 2;
/* provide evidence that the handler executed */

View file

@ -44,7 +44,7 @@ volatile int rv;
static volatile int crash_reason;
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
{
TC_PRINT("Caught system error -- reason %d\n", reason);
crash_reason = reason;

View file

@ -19,7 +19,7 @@ K_SEM_DEFINE(barrier_sem,
ZTEST_BMEM bool valid_fault;
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
{
printk("Caught system error -- reason %d %d\n", reason, valid_fault);
if (valid_fault) {

View file

@ -25,7 +25,7 @@
#if !(defined(CONFIG_ARM) || defined(CONFIG_ARC))
FUNC_NORETURN
#endif
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
{
INFO("Caught system error -- reason %d\n", reason);
ztest_test_pass();

View file

@ -15,7 +15,7 @@
ZTEST_BMEM static int count;
ZTEST_BMEM static int ret = TC_PASS;
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *esf)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf)
{
zassert_equal(reason, K_ERR_STACK_CHK_FAIL, "wrong error type");
}

View file

@ -74,7 +74,7 @@ K_APP_BMEM(part0) static volatile unsigned int expected_reason;
#if !(defined(CONFIG_ARM) || defined(CONFIG_ARC))
FUNC_NORETURN
#endif
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
{
INFO("Caught system error -- reason %d\n", reason);
/*

View file

@ -675,7 +675,7 @@ void pipe_put_get_timeout(void)
/******************************************************************************/
ZTEST_BMEM bool valid_fault;
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
{
printk("Caught system error -- reason %d\n", reason);
if (valid_fault) {

View file

@ -15,7 +15,7 @@ static K_SEM_DEFINE(start_sem, 0, 1);
static K_SEM_DEFINE(end_sem, 0, 1);
static ZTEST_BMEM struct k_thread *dyn_thread;
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *esf)
void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *esf)
{
zassert_equal(reason, K_ERR_KERNEL_OOPS, "wrong error reason");
zassert_equal(k_current_get(), dyn_thread, "wrong thread crashed");