Revert "kernel/sched: Fix free-memory write when ISRs abort _current"

This reverts commit 61c70626a5.

This PR introduced 2 regressions in main CI:
71977 & 71978
Let's revert it by now to get main's CI passing again.

Signed-off-by: Alberto Escolar Piedras <alberto.escolar.piedras@nordicsemi.no>
This commit is contained in:
Alberto Escolar Piedras 2024-04-26 09:37:58 +02:00 committed by Fabio Baltieri
parent ab41d3ab99
commit ea26bcf8d3
4 changed files with 14 additions and 42 deletions

View file

@ -37,8 +37,6 @@ BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
#define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#endif /* CONFIG_MULTITHREADING */
extern struct k_thread _thread_dummies[CONFIG_MP_MAX_NUM_CPUS];
void z_sched_init(void);
void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
void z_unpend_thread_no_timeout(struct k_thread *thread);

View file

@ -650,7 +650,12 @@ FUNC_NORETURN void z_cstart(void)
LOG_CORE_INIT();
#if defined(CONFIG_MULTITHREADING)
z_dummy_thread_init(&_thread_dummies[0]);
/* Note: The z_ready_thread() call in prepare_multithreading() requires
* a dummy thread even if CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN=y
*/
struct k_thread dummy_thread;
z_dummy_thread_init(&dummy_thread);
#endif /* CONFIG_MULTITHREADING */
/* do any necessary initialization of static devices */
z_device_state_init();

View file

@ -30,11 +30,6 @@ extern struct k_thread *pending_current;
struct k_spinlock _sched_spinlock;
/* Storage to "complete" the context switch from an invalid/incomplete thread
* context (ex: exiting an ISR that aborted _current)
*/
__incoherent struct k_thread _thread_dummies[CONFIG_MP_MAX_NUM_CPUS];
static void update_cache(int preempt_ok);
static void halt_thread(struct k_thread *thread, uint8_t new_state);
static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
@ -428,9 +423,8 @@ void z_sched_start(struct k_thread *thread)
* another CPU to catch the IPI we sent and halt. Note that we check
* for ourselves being asynchronously halted first to prevent simple
* deadlocks (but not complex ones involving cycles of 3+ threads!).
* Acts to release the provided lock before returning.
*/
static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
static k_spinlock_key_t thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
{
if (is_halting(_current)) {
halt_thread(_current,
@ -438,11 +432,10 @@ static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
}
k_spin_unlock(&_sched_spinlock, key);
while (is_halting(thread)) {
unsigned int k = arch_irq_lock();
arch_spin_relax(); /* Requires interrupts be masked */
arch_irq_unlock(k);
}
key = k_spin_lock(&_sched_spinlock);
z_sched_switch_spin(thread);
return key;
}
/* Shared handler for k_thread_{suspend,abort}(). Called with the
@ -472,7 +465,8 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
arch_sched_ipi();
#endif
if (arch_is_in_isr()) {
thread_halt_spin(thread, key);
key = thread_halt_spin(thread, key);
k_spin_unlock(&_sched_spinlock, key);
} else {
add_to_waitq_locked(_current, wq);
z_swap(&_sched_spinlock, key);
@ -486,10 +480,6 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
k_spin_unlock(&_sched_spinlock, key);
}
}
/* NOTE: the scheduler lock has been released. Don't put
* logic here, it's likely to be racy/deadlocky even if you
* re-take the lock!
*/
}
@ -1289,8 +1279,6 @@ extern void thread_abort_hook(struct k_thread *thread);
*/
static void halt_thread(struct k_thread *thread, uint8_t new_state)
{
bool dummify = false;
/* We hold the lock, and the thread is known not to be running
* anywhere.
*/
@ -1307,16 +1295,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
}
(void)z_abort_thread_timeout(thread);
unpend_all(&thread->join_queue);
/* Edge case: aborting _current from within an
* ISR that preempted it requires clearing the
* _current pointer so the upcoming context
* switch doesn't clobber the now-freed
* memory
*/
if (thread == _current && arch_is_in_isr()) {
dummify = true;
}
}
#ifdef CONFIG_SMP
unpend_all(&thread->halt_queue);
@ -1355,16 +1333,6 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
k_thread_abort_cleanup(thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
/* Do this "set _current to dummy" step last so that
* subsystems above can rely on _current being
* unchanged. Disabled for posix as that arch
* continues to use the _current pointer in its swap
* code.
*/
if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
z_dummy_thread_init(&_thread_dummies[_current_cpu->id]);
}
}
}

View file

@ -108,6 +108,7 @@ static void wait_for_start_signal(atomic_t *start_flag)
static inline void smp_init_top(void *arg)
{
struct k_thread dummy_thread;
struct cpu_start_cb csc = arg ? *(struct cpu_start_cb *)arg : (struct cpu_start_cb){0};
/* Let start_cpu() know that this CPU has powered up. */
@ -122,7 +123,7 @@ static inline void smp_init_top(void *arg)
/* Initialize the dummy thread struct so that
* the scheduler can schedule actual threads to run.
*/
z_dummy_thread_init(&_thread_dummies[arch_curr_cpu()->id]);
z_dummy_thread_init(&dummy_thread);
}
#ifdef CONFIG_SYS_CLOCK_EXISTS