kernel: add options to cleanup after aborting current thread

This adds the mechanism to do cleanup after k_thread_abort()
is called with the current thread. This is mainly used for
cleaning up things when the thread cannot be running, e.g.,
cleanup the thread stack.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2024-03-26 11:54:31 -07:00 committed by Anas Nashif
parent 94997a026f
commit 378131c266
5 changed files with 127 additions and 0 deletions

View file

@ -197,6 +197,13 @@ config THREAD_ABORT_HOOK
help
Used by portability layers to modify locally managed status mask.
config THREAD_ABORT_NEED_CLEANUP
bool
help
This option enables the bits to clean up the current thread if
k_thread_abort(_current) is called, as the cleanup cannot be
running in the current thread stack.
config THREAD_CUSTOM_DATA
bool "Thread custom data"
help

View file

@ -297,6 +297,35 @@ int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats);
int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
#endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
#if defined(CONFIG_THREAD_ABORT_NEED_CLEANUP)
/**
* Perform cleanup at the end of k_thread_abort().
*
* This performs additional cleanup steps at the end of k_thread_abort()
* where these steps require that the thread is no longer running.
* If the target thread is not the current running thread, the cleanup
* steps will be performed immediately. However, if the target thread is
* the current running thread (e.g. k_thread_abort(_current)), it defers
* the cleanup steps to later when the work will be finished in another
* context.
*
* @param thread Pointer to thread to be cleaned up.
*/
void k_thread_abort_cleanup(struct k_thread *thread);
/**
* Check if thread is the same as the one waiting for cleanup.
*
* This is used to guard against reusing the same thread object
* before the previous cleanup has finished. This will perform
* the necessary cleanups before the thread object can be
* reused. Should mainly be used during thread creation.
*
* @param thread Pointer to thread to be checked.
*/
void k_thread_abort_cleanup_check_reuse(struct k_thread *thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
#ifdef __cplusplus
}
#endif

View file

@ -59,6 +59,7 @@ void z_sched_abort(struct k_thread *thread);
void z_sched_ipi(void);
void z_sched_start(struct k_thread *thread);
void z_ready_thread(struct k_thread *thread);
void z_ready_thread_locked(struct k_thread *thread);
void z_requeue_current(struct k_thread *curr);
struct k_thread *z_swap_next_thread(void);
void z_thread_abort(struct k_thread *thread);

View file

@ -417,6 +417,13 @@ static void ready_thread(struct k_thread *thread)
}
}
void z_ready_thread_locked(struct k_thread *thread)
{
if (!thread_active_elsewhere(thread)) {
ready_thread(thread);
}
}
void z_ready_thread(struct k_thread *thread)
{
K_SPINLOCK(&_sched_spinlock) {
@ -1371,6 +1378,10 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
k_object_uninit(thread->stack_obj);
k_object_uninit(thread);
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
k_thread_abort_cleanup(thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
}
}

View file

@ -487,6 +487,10 @@ char *z_setup_new_thread(struct k_thread *new_thread,
Z_ASSERT_VALID_PRIO(prio, entry);
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
k_thread_abort_cleanup_check_reuse(new_thread);
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
#ifdef CONFIG_OBJ_CORE_THREAD
k_obj_core_init_and_link(K_OBJ_CORE(new_thread), &obj_type_thread);
#ifdef CONFIG_OBJ_CORE_STATS_THREAD
@ -950,3 +954,78 @@ int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
return 0;
}
#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
/** Pointer to thread which needs to be cleaned up. */
static struct k_thread *thread_to_cleanup;
/** Spinlock for thread abort cleanup. */
static struct k_spinlock thread_cleanup_lock;
void defer_thread_cleanup(struct k_thread *thread)
{
/* Note when adding new deferred cleanup steps:
* - The thread object may have been overwritten by the time
* the actual cleanup is being done (e.g. thread object
* allocated on a stack). So stash any necessary data here
* that will be used in the actual cleanup steps.
*/
thread_to_cleanup = thread;
}
void do_thread_cleanup(struct k_thread *thread)
{
/* Note when adding new actual cleanup steps:
* - The thread object may have been overwritten when this is
* called. So avoid using any data from the thread object.
*/
ARG_UNUSED(thread);
}
void k_thread_abort_cleanup(struct k_thread *thread)
{
K_SPINLOCK(&thread_cleanup_lock) {
if (thread_to_cleanup != NULL) {
/* Finish the pending one first. */
do_thread_cleanup(thread_to_cleanup);
thread_to_cleanup = NULL;
}
if (thread == _current) {
/* Need to defer for current running thread as the cleanup
* might result in exception. Actual cleanup will be done
* at the next time k_thread_abort() is called, or at thread
* creation if the same thread object is being reused. This
* is to make sure the cleanup code no longer needs this
* thread's stack. This is not exactly ideal as the stack
* may still be memory mapped for a while. However, this is
* a simple solution without a) the need to workaround
* the schedule lock during k_thread_abort(), b) creating
* another thread to perform the cleanup, and c) does not
* require architecture code support (e.g. via exception).
*/
defer_thread_cleanup(thread);
} else {
/* Not the current running thread, so we are safe to do
* cleanups.
*/
do_thread_cleanup(thread);
}
}
}
void k_thread_abort_cleanup_check_reuse(struct k_thread *thread)
{
K_SPINLOCK(&thread_cleanup_lock) {
/* This is to guard reuse of the same thread object and make sure
* any pending cleanups of it needs to be finished before the thread
* object can be reused.
*/
if (thread_to_cleanup == thread) {
do_thread_cleanup(thread_to_cleanup);
thread_to_cleanup = NULL;
}
}
}
#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */