kernel/idle: Use normal idle in SMP when IPI is available

Now that we have a working IPI framework, there's no reason for the
default spin loop for the SMP idle thread.  Just use the default
platform idle and send an IPI when a new thread is readied.

Long term, this can be optimized if necessary (e.g. only send the IPI
to idling CPUs, or check priorities, etc...), but for a 2-cpu system
this is a very reasonable default.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2019-08-19 14:29:21 -07:00 committed by Anas Nashif
parent 6c283ca3d0
commit 11bd67db53
2 changed files with 10 additions and 9 deletions

View file

@ -19,6 +19,10 @@
#define IDLE_THRESH 1
#endif
/* Fallback idle spin loop for SMP platforms without a working IPI */
#define SMP_FALLBACK \
(defined(CONFIG_SMP) && !defined(CONFIG_SCHED_IPI_SUPPORTED))
#ifdef CONFIG_SYS_POWER_MANAGEMENT
/*
* Used to allow _sys_suspend() implementation to control notification
@ -56,7 +60,7 @@ void __attribute__((weak)) _sys_resume_from_deep_sleep(void)
*
* @return N/A
*/
#ifndef CONFIG_SMP
#if !SMP_FALLBACK
static void set_kernel_idle_time_in_ticks(s32_t ticks)
{
#ifdef CONFIG_SYS_POWER_MANAGEMENT
@ -145,18 +149,12 @@ void idle(void *unused1, void *unused2, void *unused3)
__idle_time_stamp = k_cycle_get_32();
#endif
#ifdef CONFIG_SMP
/* Simplified idle for SMP CPUs pending driver support. The
* busy waiting is needed to prevent lock contention. Long
* term we need to wake up idle CPUs with an IPI.
*/
while (true) {
#if SMP_FALLBACK
k_busy_wait(100);
k_yield();
}
#else
for (;;) {
(void)irq_lock();
(void)z_arch_irq_lock();
sys_power_save_idle();
IDLE_YIELD_IF_COOP();

View file

@ -336,6 +336,9 @@ void z_add_thread_to_ready_q(struct k_thread *thread)
_priq_run_add(&_kernel.ready_q.runq, thread);
z_mark_thread_as_queued(thread);
update_cache(0);
#ifdef CONFIG_SCHED_IPI_SUPPORTED
z_arch_sched_ipi();
#endif
}
}