diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 6a39731868..760f46b1bd 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -101,6 +101,13 @@ list(APPEND kernel_files ) endif() + +if(CONFIG_THREAD_MONITOR) +list(APPEND kernel_files + thread_monitor.c) +endif() + + if(CONFIG_XIP) list(APPEND kernel_files xip.c) diff --git a/kernel/Kconfig b/kernel/Kconfig index db3990ee9a..4cb1a17b70 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -220,6 +220,7 @@ config DYNAMIC_THREAD select EXPERIMENTAL depends on THREAD_STACK_INFO select DYNAMIC_OBJECTS if USERSPACE + select THREAD_MONITOR help Enable support for dynamic threads and stacks. diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h index a204cd9b83..34ff4c0f88 100644 --- a/kernel/include/kernel_internal.h +++ b/kernel/include/kernel_internal.h @@ -110,15 +110,6 @@ static inline void *z_thread_malloc(size_t size) return z_thread_aligned_alloc(0, size); } -/* clean up when a thread is aborted */ - -#if defined(CONFIG_THREAD_MONITOR) -extern void z_thread_monitor_exit(struct k_thread *thread); -#else -#define z_thread_monitor_exit(thread) \ - do {/* nothing */ \ - } while (false) -#endif /* CONFIG_THREAD_MONITOR */ #ifdef CONFIG_USE_SWITCH /* This is a arch function traditionally, but when the switch-based diff --git a/kernel/include/kthread.h b/kernel/include/kthread.h index 8cdec848f2..96a41d394a 100644 --- a/kernel/include/kthread.h +++ b/kernel/include/kthread.h @@ -11,6 +11,25 @@ #include #include +#ifdef CONFIG_THREAD_MONITOR +/* This lock protects the linked list of active threads; i.e. the + * initial _kernel.threads pointer and the linked list made up of + * thread->next_thread (until NULL) + */ +extern struct k_spinlock z_thread_monitor_lock; +#endif + +/* clean up when a thread is aborted */ + +#if defined(CONFIG_THREAD_MONITOR) +void z_thread_monitor_exit(struct k_thread *thread); +#else +#define z_thread_monitor_exit(thread) \ + do {/* nothing */ \ + } while (false) +#endif /* CONFIG_THREAD_MONITOR */ + + #ifdef CONFIG_MULTITHREADING static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t delay) { diff --git a/kernel/sched.c b/kernel/sched.c index 703b6adc6a..408b7ed08c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/thread.c b/kernel/thread.c index c94cc302fa..105a753df8 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -69,75 +69,10 @@ SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); #endif -#ifdef CONFIG_THREAD_MONITOR -/* This lock protects the linked list of active threads; i.e. the - * initial _kernel.threads pointer and the linked list made up of - * thread->next_thread (until NULL) - */ -static struct k_spinlock z_thread_monitor_lock; -#endif /* CONFIG_THREAD_MONITOR */ #define _FOREACH_STATIC_THREAD(thread_data) \ STRUCT_SECTION_FOREACH(_static_thread_data, thread_data) -void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data) -{ -#if defined(CONFIG_THREAD_MONITOR) - struct k_thread *thread; - k_spinlock_key_t key; - - __ASSERT(user_cb != NULL, "user_cb can not be NULL"); - - /* - * Lock is needed to make sure that the _kernel.threads is not being - * modified by the user_cb either directly or indirectly. - * The indirect ways are through calling k_thread_create and - * k_thread_abort from user_cb. - */ - key = k_spin_lock(&z_thread_monitor_lock); - - SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach); - - for (thread = _kernel.threads; thread; thread = thread->next_thread) { - user_cb(thread, user_data); - } - - SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach); - - k_spin_unlock(&z_thread_monitor_lock, key); -#else - ARG_UNUSED(user_cb); - ARG_UNUSED(user_data); -#endif -} - -void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data) -{ -#if defined(CONFIG_THREAD_MONITOR) - struct k_thread *thread; - k_spinlock_key_t key; - - __ASSERT(user_cb != NULL, "user_cb can not be NULL"); - - key = k_spin_lock(&z_thread_monitor_lock); - - SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked); - - for (thread = _kernel.threads; thread; thread = thread->next_thread) { - k_spin_unlock(&z_thread_monitor_lock, key); - user_cb(thread, user_data); - key = k_spin_lock(&z_thread_monitor_lock); - } - - SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked); - - k_spin_unlock(&z_thread_monitor_lock, key); -#else - ARG_UNUSED(user_cb); - ARG_UNUSED(user_data); -#endif -} - bool k_is_in_isr(void) { return arch_is_in_isr(); @@ -173,33 +108,6 @@ static inline void *z_vrfy_k_thread_custom_data_get(void) #endif /* CONFIG_USERSPACE */ #endif /* CONFIG_THREAD_CUSTOM_DATA */ -#if defined(CONFIG_THREAD_MONITOR) -/* - * Remove a thread from the kernel's list of active threads. - */ -void z_thread_monitor_exit(struct k_thread *thread) -{ - k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock); - - if (thread == _kernel.threads) { - _kernel.threads = _kernel.threads->next_thread; - } else { - struct k_thread *prev_thread; - - prev_thread = _kernel.threads; - while ((prev_thread != NULL) && - (thread != prev_thread->next_thread)) { - prev_thread = prev_thread->next_thread; - } - if (prev_thread != NULL) { - prev_thread->next_thread = thread->next_thread; - } - } - - k_spin_unlock(&z_thread_monitor_lock, key); -} -#endif - int z_impl_k_thread_name_set(struct k_thread *thread, const char *value) { #ifdef CONFIG_THREAD_NAME diff --git a/kernel/thread_monitor.c b/kernel/thread_monitor.c new file mode 100644 index 0000000000..8861529b88 --- /dev/null +++ b/kernel/thread_monitor.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2010-2014 Wind River Systems, Inc. + * Copyright (c) 2024 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +struct k_spinlock z_thread_monitor_lock; +/* + * Remove a thread from the kernel's list of active threads. + */ +void z_thread_monitor_exit(struct k_thread *thread) +{ + k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock); + + if (thread == _kernel.threads) { + _kernel.threads = _kernel.threads->next_thread; + } else { + struct k_thread *prev_thread; + + prev_thread = _kernel.threads; + while ((prev_thread != NULL) && + (thread != prev_thread->next_thread)) { + prev_thread = prev_thread->next_thread; + } + if (prev_thread != NULL) { + prev_thread->next_thread = thread->next_thread; + } + } + + k_spin_unlock(&z_thread_monitor_lock, key); +} + + +void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data) +{ + struct k_thread *thread; + k_spinlock_key_t key; + + __ASSERT(user_cb != NULL, "user_cb can not be NULL"); + + /* + * Lock is needed to make sure that the _kernel.threads is not being + * modified by the user_cb either directly or indirectly. + * The indirect ways are through calling k_thread_create and + * k_thread_abort from user_cb. + */ + key = k_spin_lock(&z_thread_monitor_lock); + + SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach); + + for (thread = _kernel.threads; thread; thread = thread->next_thread) { + user_cb(thread, user_data); + } + + SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach); + + k_spin_unlock(&z_thread_monitor_lock, key); +} + +void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data) +{ + struct k_thread *thread; + k_spinlock_key_t key; + + __ASSERT(user_cb != NULL, "user_cb can not be NULL"); + + key = k_spin_lock(&z_thread_monitor_lock); + + SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked); + + for (thread = _kernel.threads; thread; thread = thread->next_thread) { + k_spin_unlock(&z_thread_monitor_lock, key); + user_cb(thread, user_data); + key = k_spin_lock(&z_thread_monitor_lock); + } + + SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked); + + k_spin_unlock(&z_thread_monitor_lock, key); + +}