arch: smp: make flush_fpu_ipi a common, optional interfaces

The interface to flush fpu is not unique to one architecture, make it a
generic, optional interface that can be implemented (and overriden) by a
platform.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2023-12-13 16:04:15 -05:00 committed by Carles Cufí
parent 3f75373584
commit 7d3b6c6a40
7 changed files with 23 additions and 23 deletions

View file

@ -250,7 +250,7 @@ static bool z_arm64_stack_corruption_check(z_arch_esf_t *esf, uint64_t esr, uint
* so flush the fpu context to its owner, and then set no fpu trap to avoid
* a new nested exception triggered by FPU accessing (var_args).
*/
z_arm64_flush_local_fpu();
arch_flush_local_fpu();
write_cpacr_el1(read_cpacr_el1() | CPACR_EL1_FPEN_NOTRAP);
#endif
arch_curr_cpu()->arch.corrupted_sp = 0UL;

View file

@ -64,7 +64,7 @@ static inline void DBG(char *msg, struct k_thread *t) { }
* Flush FPU content and disable access.
* This is called locally and also from flush_fpu_ipi_handler().
*/
void z_arm64_flush_local_fpu(void)
void arch_flush_local_fpu(void)
{
__ASSERT(read_daif() & DAIF_IRQ_BIT, "must be called with IRQs disabled");
@ -107,10 +107,10 @@ static void flush_owned_fpu(struct k_thread *thread)
}
/* we found it live on CPU i */
if (i == _current_cpu->id) {
z_arm64_flush_local_fpu();
arch_flush_local_fpu();
} else {
/* the FPU context is live on another CPU */
z_arm64_flush_fpu_ipi(i);
arch_flush_fpu_ipi(i);
/*
* Wait for it only if this is about the thread
@ -126,7 +126,7 @@ static void flush_owned_fpu(struct k_thread *thread)
* two CPUs want to pull each other's FPU context.
*/
if (thread == _current) {
z_arm64_flush_local_fpu();
arch_flush_local_fpu();
while (atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner) == thread) {
barrier_dsync_fence_full();
}
@ -334,7 +334,7 @@ int arch_float_disable(struct k_thread *thread)
flush_owned_fpu(thread);
#else
if (thread == atomic_ptr_get(&_current_cpu->arch.fpu_owner)) {
z_arm64_flush_local_fpu();
arch_flush_local_fpu();
}
#endif

View file

@ -242,11 +242,11 @@ void flush_fpu_ipi_handler(const void *unused)
ARG_UNUSED(unused);
disable_irq();
z_arm64_flush_local_fpu();
arch_flush_local_fpu();
/* no need to re-enable IRQs here */
}
void z_arm64_flush_fpu_ipi(unsigned int cpu)
void arch_flush_fpu_ipi(unsigned int cpu)
{
const uint64_t mpidr = cpu_map[cpu];
uint8_t aff0;
@ -272,7 +272,7 @@ void arch_spin_relax(void)
arm_gic_irq_clear_pending(SGI_FPU_IPI);
/*
* We may not be in IRQ context here hence cannot use
* z_arm64_flush_local_fpu() directly.
* arch_flush_local_fpu() directly.
*/
arch_float_disable(_current_cpu->arch.fpu_owner);
}

View file

@ -48,8 +48,8 @@ extern void z_arm64_set_ttbr0(uint64_t ttbr0);
extern void z_arm64_mem_cfg_ipi(void);
#ifdef CONFIG_FPU_SHARING
void z_arm64_flush_local_fpu(void);
void z_arm64_flush_fpu_ipi(unsigned int cpu);
void arch_flush_local_fpu(void);
void arch_flush_fpu_ipi(unsigned int cpu);
#endif
#ifdef CONFIG_ARM64_SAFE_EXCEPTION_STACK

View file

@ -98,7 +98,7 @@ static void z_riscv_fpu_load(void)
*
* This is called locally and also from flush_fpu_ipi_handler().
*/
void z_riscv_flush_local_fpu(void)
void arch_flush_local_fpu(void)
{
__ASSERT((csr_read(mstatus) & MSTATUS_IEN) == 0,
"must be called with IRQs disabled");
@ -149,11 +149,11 @@ static void flush_owned_fpu(struct k_thread *thread)
/* we found it live on CPU i */
if (i == _current_cpu->id) {
z_riscv_fpu_disable();
z_riscv_flush_local_fpu();
arch_flush_local_fpu();
break;
}
/* the FPU context is live on another CPU */
z_riscv_flush_fpu_ipi(i);
arch_flush_fpu_ipi(i);
/*
* Wait for it only if this is about the thread
@ -170,7 +170,7 @@ static void flush_owned_fpu(struct k_thread *thread)
*/
if (thread == _current) {
z_riscv_fpu_disable();
z_riscv_flush_local_fpu();
arch_flush_local_fpu();
do {
arch_nop();
owner = atomic_ptr_get(&_kernel.cpus[i].arch.fpu_owner);
@ -211,7 +211,7 @@ void z_riscv_fpu_trap(z_arch_esf_t *esf)
"called despite FPU being accessible");
/* save current owner's content if any */
z_riscv_flush_local_fpu();
arch_flush_local_fpu();
if (_current->arch.exception_depth > 0) {
/*
@ -271,7 +271,7 @@ static bool fpu_access_allowed(unsigned int exc_update_level)
* to come otherwise.
*/
z_riscv_fpu_disable();
z_riscv_flush_local_fpu();
arch_flush_local_fpu();
#ifdef CONFIG_SMP
flush_owned_fpu(_current);
#endif
@ -329,7 +329,7 @@ int arch_float_disable(struct k_thread *thread)
#else
if (thread == _current_cpu->arch.fpu_owner) {
z_riscv_fpu_disable();
z_riscv_flush_local_fpu();
arch_flush_local_fpu();
}
#endif

View file

@ -97,7 +97,7 @@ void arch_sched_ipi(void)
}
#ifdef CONFIG_FPU_SHARING
void z_riscv_flush_fpu_ipi(unsigned int cpu)
void arch_flush_fpu_ipi(unsigned int cpu)
{
atomic_set_bit(&cpu_pending_ipi[cpu], IPI_FPU_FLUSH);
MSIP(_kernel.cpus[cpu].arch.hartid) = 1;
@ -120,7 +120,7 @@ static void sched_ipi_handler(const void *unused)
/* disable IRQs */
csr_clear(mstatus, MSTATUS_IEN);
/* perform the flush */
z_riscv_flush_local_fpu();
arch_flush_local_fpu();
/*
* No need to re-enable IRQs here as long as
* this remains the last case.
@ -144,7 +144,7 @@ void arch_spin_relax(void)
if (atomic_test_and_clear_bit(pending_ipi, IPI_FPU_FLUSH)) {
/*
* We may not be in IRQ context here hence cannot use
* z_riscv_flush_local_fpu() directly.
* arch_flush_local_fpu() directly.
*/
arch_float_disable(_current_cpu->arch.fpu_owner);
}

View file

@ -95,8 +95,8 @@ int z_irq_do_offload(void);
#endif
#ifdef CONFIG_FPU_SHARING
void z_riscv_flush_local_fpu(void);
void z_riscv_flush_fpu_ipi(unsigned int cpu);
void arch_flush_local_fpu(void);
void arch_flush_fpu_ipi(unsigned int cpu);
#endif
#ifndef CONFIG_MULTITHREADING