kernel/sched: Refeactor/cleanup z_thread_halt()

Big change is to factor out a thread_halt_spin() utility to manage the
core complexity of this code: the situation where an ISR is asked to
abort a thread already running on another SMP CPU.

With that gone, things can be cleaned up quite a bit.  Remove early
returns, most of the "#if CONFIG_SMP" usage was superfluous and will
optimize out, unify and clean up the comments, etc...

No behavioral changes (hopefully), just refactoring.

Signed-off-by: Andy Ross <andyross@google.com>
This commit is contained in:
Andy Ross 2024-04-06 07:44:47 -07:00 committed by Carles Cufí
parent 2a7adae6c1
commit 5fa2b6f377

View file

@ -160,7 +160,6 @@ static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
}
}
#ifdef CONFIG_SMP
/* Called out of z_swap() when CONFIG_SMP. The current thread can
* never live in the run queue until we are inexorably on the context
* switch path on SMP, otherwise there is a deadlock condition where a
@ -187,7 +186,6 @@ static inline bool is_halting(struct k_thread *thread)
return (thread->base.thread_state &
(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
}
#endif /* CONFIG_SMP */
/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
static inline void clear_halting(struct k_thread *thread)
@ -421,84 +419,70 @@ void z_sched_start(struct k_thread *thread)
z_reschedule(&_sched_spinlock, key);
}
/**
* @brief Halt a thread
*
* If the target thread is running on another CPU, flag it as needing to
* abort and send an IPI (if supported) to force a schedule point and wait
* until the target thread is switched out (ISRs will spin to wait and threads
* will block to wait). If the target thread is not running on another CPU,
* then it is safe to act immediately.
*
* Upon entry to this routine, the scheduler lock is already held. It is
* released before this routine returns.
*
* @param thread Thread to suspend or abort
* @param key Current key for _sched_spinlock
* @param terminate True if aborting thread, false if suspending thread
/* Spins in ISR context, waiting for a thread known to be running on
* another CPU to catch the IPI we sent and halt. Note that we check
* for ourselves being asynchronously halted first to prevent simple
* deadlocks (but not complex ones involving cycles of 3+ threads!).
*/
static k_spinlock_key_t thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
{
if (is_halting(_current)) {
halt_thread(_current,
is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
}
k_spin_unlock(&_sched_spinlock, key);
while (is_halting(thread)) {
}
key = k_spin_lock(&_sched_spinlock);
z_sched_switch_spin(thread);
return key;
}
/* Shared handler for k_thread_{suspend,abort}(). Called with the
* scheduler lock held and the key passed (which it may
* release/reacquire!) which will be released before a possible return
* (aborting _current will not return, obviously), which may be after
* a context switch.
*/
static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
bool terminate)
{
_wait_q_t *wq = &thread->join_queue;
#ifdef CONFIG_SMP
if (is_halting(_current) && arch_is_in_isr()) {
/* Another CPU (in an ISR) or thread is waiting for the
* current thread to halt. Halt it now to help avoid a
* potential deadlock.
*/
halt_thread(_current,
is_aborting(_current) ? _THREAD_DEAD
: _THREAD_SUSPENDED);
}
wq = terminate ? wq : &thread->halt_queue;
#endif
bool active = thread_active_elsewhere(thread);
if (active) {
/* It's running somewhere else, flag and poke */
/* If the target is a thread running on another CPU, flag and
* poke (note that we might spin to wait, so a true
* synchronous IPI is needed here, not deferred!), it will
* halt itself in the IPI. Otherwise it's unscheduled, so we
* can clean it up directly.
*/
if (thread_active_elsewhere(thread)) {
thread->base.thread_state |= (terminate ? _THREAD_ABORTING
: _THREAD_SUSPENDING);
/* We might spin to wait, so a true synchronous IPI is needed
* here, not deferred!
*/
#ifdef CONFIG_SCHED_IPI_SUPPORTED
: _THREAD_SUSPENDING);
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
arch_sched_ipi();
#endif /* CONFIG_SCHED_IPI_SUPPORTED */
}
if (is_halting(thread) && (thread != _current)) {
#endif
if (arch_is_in_isr()) {
/* ISRs can only spin waiting another CPU */
key = thread_halt_spin(thread, key);
k_spin_unlock(&_sched_spinlock, key);
while (is_halting(thread)) {
}
/* Now we know it's halting, but not necessarily
* halted (suspended or aborted). Wait for the switch
* to happen!
*/
key = k_spin_lock(&_sched_spinlock);
z_sched_switch_spin(thread);
k_spin_unlock(&_sched_spinlock, key);
} else if (active) {
/* Threads can wait on a queue */
add_to_waitq_locked(_current, terminate ?
&thread->join_queue :
&thread->halt_queue);
} else {
add_to_waitq_locked(_current, wq);
z_swap(&_sched_spinlock, key);
}
return; /* lock has been released */
}
#endif /* CONFIG_SMP */
halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
if ((thread == _current) && !arch_is_in_isr()) {
z_swap(&_sched_spinlock, key);
__ASSERT(!terminate, "aborted _current back from dead");
} else {
k_spin_unlock(&_sched_spinlock, key);
halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
if ((thread == _current) && !arch_is_in_isr()) {
z_swap(&_sched_spinlock, key);
__ASSERT(!terminate, "aborted _current back from dead");
} else {
k_spin_unlock(&_sched_spinlock, key);
}
}
}
void z_impl_k_thread_suspend(struct k_thread *thread)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);