2015-04-11 01:44:37 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-11 01:44:37 +02:00
|
|
|
*/
|
|
|
|
|
2015-12-04 16:09:39 +01:00
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* @brief Exception/interrupt context helpers for Cortex-M CPUs
|
|
|
|
*
|
2015-10-20 18:42:33 +02:00
|
|
|
* Exception/interrupt context helpers.
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2023-12-08 14:53:19 +01:00
|
|
|
#ifndef ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_
|
|
|
|
#define ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2022-05-06 10:49:15 +02:00
|
|
|
#include <zephyr/arch/cpu.h>
|
2015-04-11 01:44:37 +02:00
|
|
|
|
|
|
|
#ifdef _ASMLANGUAGE
|
|
|
|
|
|
|
|
/* nothing */
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2023-06-29 18:22:37 +02:00
|
|
|
#include <cmsis_core.h>
|
2023-12-08 14:58:12 +01:00
|
|
|
#include <zephyr/arch/arm/exception.h>
|
2022-05-06 10:49:15 +02:00
|
|
|
#include <zephyr/irq_offload.h>
|
2017-04-19 21:53:48 +02:00
|
|
|
|
2019-08-12 19:52:55 +02:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2017-04-19 21:53:48 +02:00
|
|
|
#ifdef CONFIG_IRQ_OFFLOAD
|
|
|
|
extern volatile irq_offload_routine_t offload_routine;
|
|
|
|
#endif
|
2017-01-18 19:28:52 +01:00
|
|
|
|
2018-07-09 11:47:18 +02:00
|
|
|
/* Writes to the AIRCR must be accompanied by a write of the value 0x05FA
|
|
|
|
* to the Vector Key field, otherwise the writes are ignored.
|
|
|
|
*/
|
|
|
|
#define AIRCR_VECT_KEY_PERMIT_WRITE 0x05FAUL
|
2019-10-07 10:16:56 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The current executing vector is found in the IPSR register. All
|
|
|
|
* IRQs and system exceptions are considered as interrupt context.
|
2015-07-01 23:22:39 +02:00
|
|
|
*/
|
2019-11-07 21:43:29 +01:00
|
|
|
static ALWAYS_INLINE bool arch_is_in_isr(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2019-10-07 10:16:56 +02:00
|
|
|
return (__get_IPSR()) ? (true) : (false);
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2019-10-07 11:03:18 +02:00
|
|
|
/**
|
|
|
|
* @brief Find out if we were in ISR context
|
|
|
|
* before the current exception occurred.
|
|
|
|
*
|
|
|
|
* A function that determines, based on inspecting the current
|
|
|
|
* ESF, whether the processor was in handler mode before entering
|
|
|
|
* the current exception state (i.e. nested exception) or not.
|
|
|
|
*
|
|
|
|
* Notes:
|
|
|
|
* - The function shall only be called from ISR context.
|
|
|
|
* - We do not use ARM processor state flags to determine
|
|
|
|
* whether we are in a nested exception; we rely on the
|
|
|
|
* RETPSR value stacked on the ESF. Hence, the function
|
|
|
|
* assumes that the ESF stack frame has a valid RETPSR
|
|
|
|
* value.
|
|
|
|
*
|
|
|
|
* @param esf the exception stack frame (cannot be NULL)
|
|
|
|
* @return true if execution state was in handler mode, before
|
|
|
|
* the current exception occurred, otherwise false.
|
|
|
|
*/
|
2019-11-07 21:43:29 +01:00
|
|
|
static ALWAYS_INLINE bool arch_is_in_nested_exception(const z_arch_esf_t *esf)
|
2019-10-07 11:03:18 +02:00
|
|
|
{
|
|
|
|
return (esf->basic.xpsr & IPSR_ISR_Msk) ? (true) : (false);
|
|
|
|
}
|
|
|
|
|
2021-06-23 16:05:41 +02:00
|
|
|
#if defined(CONFIG_USERSPACE)
|
|
|
|
/**
|
|
|
|
* @brief Is the thread in unprivileged mode
|
|
|
|
*
|
|
|
|
* @param esf the exception stack frame (unused)
|
|
|
|
* @return true if the current thread was in unprivileged mode
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE bool z_arm_preempted_thread_in_user_mode(const z_arch_esf_t *esf)
|
|
|
|
{
|
|
|
|
return z_arm_thread_is_in_user_mode();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-07-01 23:22:39 +02:00
|
|
|
/**
|
2015-07-01 23:51:40 +02:00
|
|
|
* @brief Setup system exceptions
|
2015-07-01 23:22:39 +02:00
|
|
|
*
|
|
|
|
* Set exception priorities to conform with the BASEPRI locking mechanism.
|
|
|
|
* Set PendSV priority to lowest possible.
|
|
|
|
*
|
|
|
|
* Enable fault exceptions.
|
|
|
|
*/
|
2019-09-30 21:31:07 +02:00
|
|
|
static ALWAYS_INLINE void z_arm_exc_setup(void)
|
2015-04-11 01:44:37 +02:00
|
|
|
{
|
2021-06-16 15:14:02 +02:00
|
|
|
/* PendSV is set to lowest priority, regardless of it being used.
|
|
|
|
* This is done as the IRQ is always enabled.
|
|
|
|
*/
|
2020-02-10 17:19:49 +01:00
|
|
|
NVIC_SetPriority(PendSV_IRQn, _EXC_PENDSV_PRIO);
|
2016-12-13 20:55:11 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_CORTEX_M_HAS_BASEPRI
|
2020-02-10 17:03:27 +01:00
|
|
|
/* Note: SVCall IRQ priority level is left to default (0)
|
|
|
|
* for Cortex-M variants without BASEPRI (e.g. ARMv6-M).
|
|
|
|
*/
|
2017-01-18 19:28:52 +01:00
|
|
|
NVIC_SetPriority(SVCall_IRQn, _EXC_SVC_PRIO);
|
2016-12-13 20:55:11 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS
|
2017-01-18 19:28:52 +01:00
|
|
|
NVIC_SetPriority(MemoryManagement_IRQn, _EXC_FAULT_PRIO);
|
|
|
|
NVIC_SetPriority(BusFault_IRQn, _EXC_FAULT_PRIO);
|
|
|
|
NVIC_SetPriority(UsageFault_IRQn, _EXC_FAULT_PRIO);
|
2022-11-22 08:29:03 +01:00
|
|
|
#if defined(CONFIG_CORTEX_M_DEBUG_MONITOR_HOOK)
|
|
|
|
NVIC_SetPriority(DebugMonitor_IRQn, IRQ_PRIO_LOWEST);
|
|
|
|
#elif defined(CONFIG_CPU_CORTEX_M_HAS_DWT)
|
2021-06-16 15:55:36 +02:00
|
|
|
NVIC_SetPriority(DebugMonitor_IRQn, _EXC_FAULT_PRIO);
|
|
|
|
#endif
|
2018-02-28 12:48:31 +01:00
|
|
|
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
|
|
|
|
NVIC_SetPriority(SecureFault_IRQn, _EXC_FAULT_PRIO);
|
|
|
|
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
|
2015-04-11 01:44:37 +02:00
|
|
|
|
2017-01-25 15:45:11 +01:00
|
|
|
/* Enable Usage, Mem, & Bus Faults */
|
|
|
|
SCB->SHCSR |= SCB_SHCSR_USGFAULTENA_Msk | SCB_SHCSR_MEMFAULTENA_Msk |
|
|
|
|
SCB_SHCSR_BUSFAULTENA_Msk;
|
2018-02-28 12:48:31 +01:00
|
|
|
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
|
|
|
|
/* Enable Secure Fault */
|
|
|
|
SCB->SHCSR |= SCB_SHCSR_SECUREFAULTENA_Msk;
|
|
|
|
/* Clear BFAR before setting BusFaults to target Non-Secure state. */
|
|
|
|
SCB->BFAR = 0;
|
2018-03-05 15:53:09 +01:00
|
|
|
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
|
|
|
|
#endif /* CONFIG_CPU_CORTEX_M_HAS_PROGRAMMABLE_FAULT_PRIOS */
|
|
|
|
|
2019-08-08 14:56:53 +02:00
|
|
|
#if defined(CONFIG_ARM_SECURE_FIRMWARE) && \
|
|
|
|
!defined(CONFIG_ARM_SECURE_BUSFAULT_HARDFAULT_NMI)
|
2018-02-28 12:48:31 +01:00
|
|
|
/* Set NMI, Hard, and Bus Faults as Non-Secure.
|
|
|
|
* NMI and Bus Faults targeting the Secure state will
|
|
|
|
* escalate to a SecureFault or SecureHardFault.
|
|
|
|
*/
|
2018-03-26 12:56:12 +02:00
|
|
|
SCB->AIRCR =
|
|
|
|
(SCB->AIRCR & (~(SCB_AIRCR_VECTKEY_Msk)))
|
|
|
|
| SCB_AIRCR_BFHFNMINS_Msk
|
2018-07-09 11:47:18 +02:00
|
|
|
| ((AIRCR_VECT_KEY_PERMIT_WRITE << SCB_AIRCR_VECTKEY_Pos) &
|
|
|
|
SCB_AIRCR_VECTKEY_Msk);
|
2018-03-05 15:53:09 +01:00
|
|
|
/* Note: Fault conditions that would generate a SecureFault
|
|
|
|
* in a PE with the Main Extension instead generate a
|
|
|
|
* SecureHardFault in a PE without the Main Extension.
|
|
|
|
*/
|
2019-08-08 14:56:53 +02:00
|
|
|
#endif /* ARM_SECURE_FIRMWARE && !ARM_SECURE_BUSFAULT_HARDFAULT_NMI */
|
2021-06-16 15:41:11 +02:00
|
|
|
|
|
|
|
#if defined(CONFIG_CPU_CORTEX_M_HAS_SYSTICK) && \
|
|
|
|
!defined(CONFIG_CORTEX_M_SYSTICK)
|
|
|
|
/* SoC implements SysTick, but the system does not use it
|
|
|
|
* as driver for system timing. However, the SysTick IRQ is
|
|
|
|
* always enabled, so we must ensure the interrupt priority
|
|
|
|
* is set to a level lower than the kernel interrupts (for
|
|
|
|
* the assert mechanism to work properly) in case the SysTick
|
|
|
|
* interrupt is accidentally raised.
|
|
|
|
*/
|
|
|
|
NVIC_SetPriority(SysTick_IRQn, _EXC_IRQ_DEFAULT_PRIO);
|
|
|
|
#endif /* CPU_CORTEX_M_HAS_SYSTICK && ! CORTEX_M_SYSTICK */
|
|
|
|
|
2015-04-11 01:44:37 +02:00
|
|
|
}
|
|
|
|
|
2017-01-25 16:12:00 +01:00
|
|
|
/**
|
|
|
|
* @brief Clear Fault exceptions
|
|
|
|
*
|
|
|
|
* Clear out exceptions for Mem, Bus, Usage and Hard Faults
|
|
|
|
*/
|
2019-09-30 21:31:07 +02:00
|
|
|
static ALWAYS_INLINE void z_arm_clear_faults(void)
|
2017-01-25 16:12:00 +01:00
|
|
|
{
|
2018-02-06 23:47:58 +01:00
|
|
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
|
|
|
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
2017-01-25 16:12:00 +01:00
|
|
|
/* Reset all faults */
|
2017-01-25 16:33:03 +01:00
|
|
|
SCB->CFSR = SCB_CFSR_USGFAULTSR_Msk |
|
|
|
|
SCB_CFSR_MEMFAULTSR_Msk |
|
|
|
|
SCB_CFSR_BUSFAULTSR_Msk;
|
2017-01-25 16:12:00 +01:00
|
|
|
|
2017-01-25 16:33:03 +01:00
|
|
|
/* Clear all Hard Faults - HFSR is write-one-to-clear */
|
|
|
|
SCB->HFSR = 0xffffffff;
|
2017-01-25 16:12:00 +01:00
|
|
|
#else
|
|
|
|
#error Unknown ARM architecture
|
2018-02-06 23:47:58 +01:00
|
|
|
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
2017-01-25 16:12:00 +01:00
|
|
|
}
|
|
|
|
|
2021-01-19 18:48:39 +01:00
|
|
|
/**
|
|
|
|
* @brief Assess whether a debug monitor event should be treated as an error
|
|
|
|
*
|
2021-01-20 11:09:50 +01:00
|
|
|
* This routine checks the status of a debug_monitor() exception, and
|
2021-01-19 18:48:39 +01:00
|
|
|
* evaluates whether this needs to be considered as a processor error.
|
|
|
|
*
|
|
|
|
* @return true if the DM exception is a processor error, otherwise false
|
|
|
|
*/
|
|
|
|
bool z_arm_debug_monitor_event_error_check(void);
|
|
|
|
|
2016-01-22 18:38:49 +01:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-08-12 19:52:55 +02:00
|
|
|
#endif /* _ASMLANGUAGE */
|
2016-01-22 18:38:49 +01:00
|
|
|
|
2023-12-08 14:53:19 +01:00
|
|
|
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_CORTEX_M_EXCEPTION_H_ */
|