88ba97fea4
This adds lazy floating point context switching. On svc/irq entrance, the VFP is disabled and a pointer to the exception stack frame is saved away. If the esf pointer is still valid on exception exit, then no other context used the VFP so the context is still valid and nothing needs to be restored. If the esf pointer is NULL on exception exit, then some other context used the VFP and the floating point context is restored from the esf. The undefined instruction handler is responsible for saving away the floating point context if needed. If the handler is in the first irq/svc context and the current thread uses the VFP, then the float context needs to be saved. Also, if the handler is in a nested context and the previous context was using the FVP, save the float context. Signed-off-by: Bradley Bolen <bbolen@lexmark.com>
82 lines
2 KiB
C
82 lines
2 KiB
C
/*
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#ifndef ZEPHYR_KERNEL_INCLUDE_OFFSETS_SHORT_H_
|
|
#define ZEPHYR_KERNEL_INCLUDE_OFFSETS_SHORT_H_
|
|
|
|
#include <offsets.h>
|
|
#include <offsets_short_arch.h>
|
|
|
|
/* kernel */
|
|
|
|
/* main */
|
|
#ifndef CONFIG_SMP
|
|
/* Relies on _kernel.cpu being the first member of _kernel and having 1 element
|
|
*/
|
|
#define _kernel_offset_to_nested \
|
|
(___cpu_t_nested_OFFSET)
|
|
|
|
#define _kernel_offset_to_irq_stack \
|
|
(___cpu_t_irq_stack_OFFSET)
|
|
|
|
#define _kernel_offset_to_current \
|
|
(___cpu_t_current_OFFSET)
|
|
|
|
#if defined(CONFIG_FPU_SHARING)
|
|
#define _kernel_offset_to_fp_ctx \
|
|
(___cpu_t_fp_ctx_OFFSET)
|
|
#endif /* CONFIG_FPU_SHARING */
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#define _kernel_offset_to_idle \
|
|
(___kernel_t_idle_OFFSET)
|
|
|
|
#define _kernel_offset_to_current_fp \
|
|
(___kernel_t_current_fp_OFFSET)
|
|
|
|
#define _kernel_offset_to_ready_q_cache \
|
|
(___kernel_t_ready_q_OFFSET + ___ready_q_t_cache_OFFSET)
|
|
|
|
/* end - kernel */
|
|
|
|
/* threads */
|
|
|
|
/* main */
|
|
|
|
#define _thread_offset_to_callee_saved \
|
|
(___thread_t_callee_saved_OFFSET)
|
|
|
|
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
|
#define _thread_offset_to_tls \
|
|
(___thread_t_tls_OFFSET)
|
|
#endif /* CONFIG_THREAD_LOCAL_STORAGE */
|
|
|
|
/* base */
|
|
|
|
#define _thread_offset_to_thread_state \
|
|
(___thread_t_base_OFFSET + ___thread_base_t_thread_state_OFFSET)
|
|
|
|
#define _thread_offset_to_user_options \
|
|
(___thread_t_base_OFFSET + ___thread_base_t_user_options_OFFSET)
|
|
|
|
#define _thread_offset_to_prio \
|
|
(___thread_t_base_OFFSET + ___thread_base_t_prio_OFFSET)
|
|
|
|
#define _thread_offset_to_sched_locked \
|
|
(___thread_t_base_OFFSET + ___thread_base_t_sched_locked_OFFSET)
|
|
|
|
#define _thread_offset_to_preempt \
|
|
(___thread_t_base_OFFSET + ___thread_base_t_preempt_OFFSET)
|
|
|
|
#define _thread_offset_to_esf \
|
|
(___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET)
|
|
|
|
#define _thread_offset_to_stack_start \
|
|
(___thread_t_stack_info_OFFSET + ___thread_stack_info_t_start_OFFSET)
|
|
/* end - threads */
|
|
|
|
#endif /* ZEPHYR_KERNEL_INCLUDE_OFFSETS_SHORT_H_ */
|