2016-09-15 18:37:58 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-09-15 18:37:58 +02:00
|
|
|
*/
|
|
|
|
|
2022-05-06 11:04:23 +02:00
|
|
|
#include <zephyr/kernel.h>
|
|
|
|
#include <zephyr/toolchain.h>
|
|
|
|
#include <zephyr/linker/sections.h>
|
|
|
|
#include <zephyr/drivers/timer/system_timer.h>
|
|
|
|
#include <zephyr/pm/pm.h>
|
2018-09-18 00:56:06 +02:00
|
|
|
#include <stdbool.h>
|
2022-05-06 11:04:23 +02:00
|
|
|
#include <zephyr/logging/log.h>
|
2023-08-29 19:03:12 +02:00
|
|
|
/* private kernel APIs */
|
2020-09-02 18:20:38 +02:00
|
|
|
#include <ksched.h>
|
2021-05-14 00:46:43 +02:00
|
|
|
#include <kswap.h>
|
2023-08-29 19:03:12 +02:00
|
|
|
#include <wait_q.h>
|
2020-09-02 18:20:38 +02:00
|
|
|
|
2021-03-03 22:20:15 +01:00
|
|
|
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
2019-08-19 23:29:21 +02:00
|
|
|
|
2021-11-16 06:22:38 +01:00
|
|
|
void z_pm_save_idle_exit(void)
|
2016-09-15 18:37:58 +02:00
|
|
|
{
|
2021-01-07 18:29:17 +01:00
|
|
|
#ifdef CONFIG_PM
|
2016-10-27 06:16:37 +02:00
|
|
|
/* Some CPU low power states require notification at the ISR
|
|
|
|
* to allow any operations that needs to be done before kernel
|
2021-02-24 20:22:53 +01:00
|
|
|
* switches task or processes nested interrupts.
|
|
|
|
* This can be simply ignored if not required.
|
2016-09-15 18:37:58 +02:00
|
|
|
*/
|
2021-02-24 20:22:53 +01:00
|
|
|
pm_system_resume();
|
|
|
|
#endif /* CONFIG_PM */
|
2023-04-24 21:58:18 +02:00
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
2021-02-25 21:33:15 +01:00
|
|
|
sys_clock_idle_exit();
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_SYS_CLOCK_EXISTS */
|
2016-09-15 18:37:58 +02:00
|
|
|
}
|
|
|
|
|
2021-03-03 22:20:15 +01:00
|
|
|
void idle(void *unused1, void *unused2, void *unused3)
|
2016-09-15 18:37:58 +02:00
|
|
|
{
|
2021-03-03 22:20:15 +01:00
|
|
|
ARG_UNUSED(unused1);
|
2016-09-15 18:37:58 +02:00
|
|
|
ARG_UNUSED(unused2);
|
|
|
|
ARG_UNUSED(unused3);
|
|
|
|
|
2021-05-14 00:46:43 +02:00
|
|
|
__ASSERT_NO_MSG(_current->base.prio >= 0);
|
|
|
|
|
2018-09-18 00:56:06 +02:00
|
|
|
while (true) {
|
2022-09-13 19:13:40 +02:00
|
|
|
/* SMP systems without a working IPI can't actual
|
|
|
|
* enter an idle state, because they can't be notified
|
|
|
|
* of scheduler changes (i.e. threads they should
|
|
|
|
* run). They just spin instead, with a minimal
|
|
|
|
* relaxation loop to prevent hammering the scheduler
|
|
|
|
* lock and/or timer driver. This is intended as a
|
|
|
|
* fallback configuration for new platform bringup.
|
2021-03-03 22:20:15 +01:00
|
|
|
*/
|
2022-10-10 18:21:16 +02:00
|
|
|
if (IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
|
|
|
|
for (volatile int i = 0; i < 100000; i++) {
|
|
|
|
/* Empty loop */
|
|
|
|
}
|
2022-09-13 19:13:40 +02:00
|
|
|
z_swap_unlocked();
|
2021-03-03 22:20:15 +01:00
|
|
|
}
|
2020-12-14 23:31:11 +01:00
|
|
|
|
2021-03-03 22:20:15 +01:00
|
|
|
/* Note weird API: k_cpu_idle() is called with local
|
|
|
|
* CPU interrupts masked, and returns with them
|
|
|
|
* unmasked. It does not take a spinlock or other
|
|
|
|
* higher level construct.
|
|
|
|
*/
|
2021-03-03 21:16:35 +01:00
|
|
|
(void) arch_irq_lock();
|
|
|
|
|
2021-11-20 00:54:56 +01:00
|
|
|
#ifdef CONFIG_PM
|
|
|
|
_kernel.idle = z_get_next_timeout_expiry();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the suspend hook function of the soc interface
|
|
|
|
* to allow entry into a low power state. The function
|
|
|
|
* returns false if low power state was not entered, in
|
|
|
|
* which case, kernel does normal idle processing.
|
|
|
|
*
|
|
|
|
* This function is entered with interrupts disabled.
|
|
|
|
* If a low power state was entered, then the hook
|
2024-02-19 13:16:58 +01:00
|
|
|
* function should enable interrupts before exiting.
|
2021-11-20 00:54:56 +01:00
|
|
|
* This is because the kernel does not do its own idle
|
|
|
|
* processing in those cases i.e. skips k_cpu_idle().
|
|
|
|
* The kernel's idle processing re-enables interrupts
|
|
|
|
* which is essential for the kernel's scheduling
|
|
|
|
* logic.
|
|
|
|
*/
|
2022-01-19 00:05:54 +01:00
|
|
|
if (k_is_pre_kernel() || !pm_system_suspend(_kernel.idle)) {
|
2020-09-07 18:23:30 +02:00
|
|
|
k_cpu_idle();
|
|
|
|
}
|
2021-11-20 00:54:56 +01:00
|
|
|
#else
|
|
|
|
k_cpu_idle();
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_PM */
|
2020-09-07 18:23:30 +02:00
|
|
|
|
2021-05-14 00:46:43 +02:00
|
|
|
#if !defined(CONFIG_PREEMPT_ENABLED)
|
|
|
|
# if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
|
|
|
|
/* A legacy mess: the idle thread is by definition
|
|
|
|
* preemptible as far as the modern scheduler is
|
|
|
|
* concerned, but older platforms use
|
|
|
|
* CONFIG_PREEMPT_ENABLED=n as an optimization hint
|
|
|
|
* that interrupt exit always returns to the
|
|
|
|
* interrupted context. So in that setup we need to
|
|
|
|
* explicitly yield in the idle thread otherwise
|
|
|
|
* nothing else will run once it starts.
|
2021-03-03 22:20:15 +01:00
|
|
|
*/
|
2021-05-14 00:46:43 +02:00
|
|
|
if (_kernel.ready_q.cache != _current) {
|
|
|
|
z_swap_unlocked();
|
2021-03-03 22:20:15 +01:00
|
|
|
}
|
2024-03-08 12:00:10 +01:00
|
|
|
# endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */
|
|
|
|
#endif /* !defined(CONFIG_PREEMPT_ENABLED) */
|
2019-09-28 18:40:19 +02:00
|
|
|
}
|
2016-09-15 18:37:58 +02:00
|
|
|
}
|
2023-05-19 18:00:57 +02:00
|
|
|
|
|
|
|
void __weak arch_spin_relax(void)
|
|
|
|
{
|
|
|
|
__ASSERT(!arch_irq_unlocked(arch_irq_lock()),
|
|
|
|
"this is meant to be called with IRQs disabled");
|
|
|
|
|
|
|
|
arch_nop();
|
|
|
|
}
|