kernel/sched: Refeactor/cleanup z_thread_halt()
Big change is to factor out a thread_halt_spin() utility to manage the core complexity of this code: the situation where an ISR is asked to abort a thread already running on another SMP CPU. With that gone, things can be cleaned up quite a bit. Remove early returns, most of the "#if CONFIG_SMP" usage was superfluous and will optimize out, unify and clean up the comments, etc... No behavioral changes (hopefully), just refactoring. Signed-off-by: Andy Ross <andyross@google.com>
This commit is contained in:
parent
2a7adae6c1
commit
5fa2b6f377
112
kernel/sched.c
112
kernel/sched.c
|
@ -160,7 +160,6 @@ static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Called out of z_swap() when CONFIG_SMP. The current thread can
|
/* Called out of z_swap() when CONFIG_SMP. The current thread can
|
||||||
* never live in the run queue until we are inexorably on the context
|
* never live in the run queue until we are inexorably on the context
|
||||||
* switch path on SMP, otherwise there is a deadlock condition where a
|
* switch path on SMP, otherwise there is a deadlock condition where a
|
||||||
|
@ -187,7 +186,6 @@ static inline bool is_halting(struct k_thread *thread)
|
||||||
return (thread->base.thread_state &
|
return (thread->base.thread_state &
|
||||||
(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
|
(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
|
||||||
|
|
||||||
/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
|
/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
|
||||||
static inline void clear_halting(struct k_thread *thread)
|
static inline void clear_halting(struct k_thread *thread)
|
||||||
|
@ -421,84 +419,70 @@ void z_sched_start(struct k_thread *thread)
|
||||||
z_reschedule(&_sched_spinlock, key);
|
z_reschedule(&_sched_spinlock, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* Spins in ISR context, waiting for a thread known to be running on
|
||||||
* @brief Halt a thread
|
* another CPU to catch the IPI we sent and halt. Note that we check
|
||||||
*
|
* for ourselves being asynchronously halted first to prevent simple
|
||||||
* If the target thread is running on another CPU, flag it as needing to
|
* deadlocks (but not complex ones involving cycles of 3+ threads!).
|
||||||
* abort and send an IPI (if supported) to force a schedule point and wait
|
*/
|
||||||
* until the target thread is switched out (ISRs will spin to wait and threads
|
static k_spinlock_key_t thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
|
||||||
* will block to wait). If the target thread is not running on another CPU,
|
{
|
||||||
* then it is safe to act immediately.
|
if (is_halting(_current)) {
|
||||||
*
|
halt_thread(_current,
|
||||||
* Upon entry to this routine, the scheduler lock is already held. It is
|
is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
|
||||||
* released before this routine returns.
|
}
|
||||||
*
|
k_spin_unlock(&_sched_spinlock, key);
|
||||||
* @param thread Thread to suspend or abort
|
while (is_halting(thread)) {
|
||||||
* @param key Current key for _sched_spinlock
|
}
|
||||||
* @param terminate True if aborting thread, false if suspending thread
|
key = k_spin_lock(&_sched_spinlock);
|
||||||
|
z_sched_switch_spin(thread);
|
||||||
|
return key;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Shared handler for k_thread_{suspend,abort}(). Called with the
|
||||||
|
* scheduler lock held and the key passed (which it may
|
||||||
|
* release/reacquire!) which will be released before a possible return
|
||||||
|
* (aborting _current will not return, obviously), which may be after
|
||||||
|
* a context switch.
|
||||||
*/
|
*/
|
||||||
static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
|
static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
|
||||||
bool terminate)
|
bool terminate)
|
||||||
{
|
{
|
||||||
|
_wait_q_t *wq = &thread->join_queue;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (is_halting(_current) && arch_is_in_isr()) {
|
wq = terminate ? wq : &thread->halt_queue;
|
||||||
/* Another CPU (in an ISR) or thread is waiting for the
|
#endif
|
||||||
* current thread to halt. Halt it now to help avoid a
|
|
||||||
* potential deadlock.
|
|
||||||
*/
|
|
||||||
halt_thread(_current,
|
|
||||||
is_aborting(_current) ? _THREAD_DEAD
|
|
||||||
: _THREAD_SUSPENDED);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool active = thread_active_elsewhere(thread);
|
/* If the target is a thread running on another CPU, flag and
|
||||||
|
* poke (note that we might spin to wait, so a true
|
||||||
if (active) {
|
* synchronous IPI is needed here, not deferred!), it will
|
||||||
/* It's running somewhere else, flag and poke */
|
* halt itself in the IPI. Otherwise it's unscheduled, so we
|
||||||
|
* can clean it up directly.
|
||||||
|
*/
|
||||||
|
if (thread_active_elsewhere(thread)) {
|
||||||
thread->base.thread_state |= (terminate ? _THREAD_ABORTING
|
thread->base.thread_state |= (terminate ? _THREAD_ABORTING
|
||||||
: _THREAD_SUSPENDING);
|
: _THREAD_SUSPENDING);
|
||||||
|
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||||
/* We might spin to wait, so a true synchronous IPI is needed
|
|
||||||
* here, not deferred!
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_SCHED_IPI_SUPPORTED
|
|
||||||
arch_sched_ipi();
|
arch_sched_ipi();
|
||||||
#endif /* CONFIG_SCHED_IPI_SUPPORTED */
|
#endif
|
||||||
}
|
|
||||||
|
|
||||||
if (is_halting(thread) && (thread != _current)) {
|
|
||||||
if (arch_is_in_isr()) {
|
if (arch_is_in_isr()) {
|
||||||
/* ISRs can only spin waiting another CPU */
|
key = thread_halt_spin(thread, key);
|
||||||
k_spin_unlock(&_sched_spinlock, key);
|
k_spin_unlock(&_sched_spinlock, key);
|
||||||
while (is_halting(thread)) {
|
} else {
|
||||||
}
|
add_to_waitq_locked(_current, wq);
|
||||||
|
|
||||||
/* Now we know it's halting, but not necessarily
|
|
||||||
* halted (suspended or aborted). Wait for the switch
|
|
||||||
* to happen!
|
|
||||||
*/
|
|
||||||
key = k_spin_lock(&_sched_spinlock);
|
|
||||||
z_sched_switch_spin(thread);
|
|
||||||
k_spin_unlock(&_sched_spinlock, key);
|
|
||||||
} else if (active) {
|
|
||||||
/* Threads can wait on a queue */
|
|
||||||
add_to_waitq_locked(_current, terminate ?
|
|
||||||
&thread->join_queue :
|
|
||||||
&thread->halt_queue);
|
|
||||||
z_swap(&_sched_spinlock, key);
|
z_swap(&_sched_spinlock, key);
|
||||||
}
|
}
|
||||||
return; /* lock has been released */
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_SMP */
|
|
||||||
halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
|
|
||||||
if ((thread == _current) && !arch_is_in_isr()) {
|
|
||||||
z_swap(&_sched_spinlock, key);
|
|
||||||
__ASSERT(!terminate, "aborted _current back from dead");
|
|
||||||
} else {
|
} else {
|
||||||
k_spin_unlock(&_sched_spinlock, key);
|
halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
|
||||||
|
if ((thread == _current) && !arch_is_in_isr()) {
|
||||||
|
z_swap(&_sched_spinlock, key);
|
||||||
|
__ASSERT(!terminate, "aborted _current back from dead");
|
||||||
|
} else {
|
||||||
|
k_spin_unlock(&_sched_spinlock, key);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void z_impl_k_thread_suspend(struct k_thread *thread)
|
void z_impl_k_thread_suspend(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
|
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
|
||||||
|
|
Loading…
Reference in a new issue