kernel: split thread monitor

Move thread monitor related functions, not enabled in most cases outside
of thread.c and cleanup headers.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2024-02-26 17:03:35 -05:00
parent e10665531f
commit 9e83413542
7 changed files with 113 additions and 101 deletions

View file

@ -101,6 +101,13 @@ list(APPEND kernel_files
) )
endif() endif()
if(CONFIG_THREAD_MONITOR)
list(APPEND kernel_files
thread_monitor.c)
endif()
if(CONFIG_XIP) if(CONFIG_XIP)
list(APPEND kernel_files list(APPEND kernel_files
xip.c) xip.c)

View file

@ -220,6 +220,7 @@ config DYNAMIC_THREAD
select EXPERIMENTAL select EXPERIMENTAL
depends on THREAD_STACK_INFO depends on THREAD_STACK_INFO
select DYNAMIC_OBJECTS if USERSPACE select DYNAMIC_OBJECTS if USERSPACE
select THREAD_MONITOR
help help
Enable support for dynamic threads and stacks. Enable support for dynamic threads and stacks.

View file

@ -110,15 +110,6 @@ static inline void *z_thread_malloc(size_t size)
return z_thread_aligned_alloc(0, size); return z_thread_aligned_alloc(0, size);
} }
/* clean up when a thread is aborted */
#if defined(CONFIG_THREAD_MONITOR)
extern void z_thread_monitor_exit(struct k_thread *thread);
#else
#define z_thread_monitor_exit(thread) \
do {/* nothing */ \
} while (false)
#endif /* CONFIG_THREAD_MONITOR */
#ifdef CONFIG_USE_SWITCH #ifdef CONFIG_USE_SWITCH
/* This is a arch function traditionally, but when the switch-based /* This is a arch function traditionally, but when the switch-based

View file

@ -11,6 +11,25 @@
#include <zephyr/kernel.h> #include <zephyr/kernel.h>
#include <timeout_q.h> #include <timeout_q.h>
#ifdef CONFIG_THREAD_MONITOR
/* This lock protects the linked list of active threads; i.e. the
* initial _kernel.threads pointer and the linked list made up of
* thread->next_thread (until NULL)
*/
extern struct k_spinlock z_thread_monitor_lock;
#endif
/* clean up when a thread is aborted */
#if defined(CONFIG_THREAD_MONITOR)
void z_thread_monitor_exit(struct k_thread *thread);
#else
#define z_thread_monitor_exit(thread) \
do {/* nothing */ \
} while (false)
#endif /* CONFIG_THREAD_MONITOR */
#ifdef CONFIG_MULTITHREADING #ifdef CONFIG_MULTITHREADING
static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t delay) static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t delay)
{ {

View file

@ -7,6 +7,7 @@
#include <ksched.h> #include <ksched.h>
#include <zephyr/spinlock.h> #include <zephyr/spinlock.h>
#include <wait_q.h> #include <wait_q.h>
#include <kthread.h>
#include <priority_q.h> #include <priority_q.h>
#include <kswap.h> #include <kswap.h>
#include <kernel_arch_func.h> #include <kernel_arch_func.h>

View file

@ -69,75 +69,10 @@ SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif
#ifdef CONFIG_THREAD_MONITOR
/* This lock protects the linked list of active threads; i.e. the
* initial _kernel.threads pointer and the linked list made up of
* thread->next_thread (until NULL)
*/
static struct k_spinlock z_thread_monitor_lock;
#endif /* CONFIG_THREAD_MONITOR */
#define _FOREACH_STATIC_THREAD(thread_data) \ #define _FOREACH_STATIC_THREAD(thread_data) \
STRUCT_SECTION_FOREACH(_static_thread_data, thread_data) STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
{
#if defined(CONFIG_THREAD_MONITOR)
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
/*
* Lock is needed to make sure that the _kernel.threads is not being
* modified by the user_cb either directly or indirectly.
* The indirect ways are through calling k_thread_create and
* k_thread_abort from user_cb.
*/
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
user_cb(thread, user_data);
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
k_spin_unlock(&z_thread_monitor_lock, key);
#else
ARG_UNUSED(user_cb);
ARG_UNUSED(user_data);
#endif
}
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
{
#if defined(CONFIG_THREAD_MONITOR)
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
k_spin_unlock(&z_thread_monitor_lock, key);
user_cb(thread, user_data);
key = k_spin_lock(&z_thread_monitor_lock);
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
k_spin_unlock(&z_thread_monitor_lock, key);
#else
ARG_UNUSED(user_cb);
ARG_UNUSED(user_data);
#endif
}
bool k_is_in_isr(void) bool k_is_in_isr(void)
{ {
return arch_is_in_isr(); return arch_is_in_isr();
@ -173,33 +108,6 @@ static inline void *z_vrfy_k_thread_custom_data_get(void)
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
#endif /* CONFIG_THREAD_CUSTOM_DATA */ #endif /* CONFIG_THREAD_CUSTOM_DATA */
#if defined(CONFIG_THREAD_MONITOR)
/*
* Remove a thread from the kernel's list of active threads.
*/
void z_thread_monitor_exit(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
if (thread == _kernel.threads) {
_kernel.threads = _kernel.threads->next_thread;
} else {
struct k_thread *prev_thread;
prev_thread = _kernel.threads;
while ((prev_thread != NULL) &&
(thread != prev_thread->next_thread)) {
prev_thread = prev_thread->next_thread;
}
if (prev_thread != NULL) {
prev_thread->next_thread = thread->next_thread;
}
}
k_spin_unlock(&z_thread_monitor_lock, key);
}
#endif
int z_impl_k_thread_name_set(struct k_thread *thread, const char *value) int z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
{ {
#ifdef CONFIG_THREAD_NAME #ifdef CONFIG_THREAD_NAME

85
kernel/thread_monitor.c Normal file
View file

@ -0,0 +1,85 @@
/*
* Copyright (c) 2010-2014 Wind River Systems, Inc.
* Copyright (c) 2024 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <kthread.h>
struct k_spinlock z_thread_monitor_lock;
/*
* Remove a thread from the kernel's list of active threads.
*/
void z_thread_monitor_exit(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
if (thread == _kernel.threads) {
_kernel.threads = _kernel.threads->next_thread;
} else {
struct k_thread *prev_thread;
prev_thread = _kernel.threads;
while ((prev_thread != NULL) &&
(thread != prev_thread->next_thread)) {
prev_thread = prev_thread->next_thread;
}
if (prev_thread != NULL) {
prev_thread->next_thread = thread->next_thread;
}
}
k_spin_unlock(&z_thread_monitor_lock, key);
}
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
{
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
/*
* Lock is needed to make sure that the _kernel.threads is not being
* modified by the user_cb either directly or indirectly.
* The indirect ways are through calling k_thread_create and
* k_thread_abort from user_cb.
*/
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
user_cb(thread, user_data);
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
k_spin_unlock(&z_thread_monitor_lock, key);
}
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
{
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
k_spin_unlock(&z_thread_monitor_lock, key);
user_cb(thread, user_data);
key = k_spin_lock(&z_thread_monitor_lock);
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
k_spin_unlock(&z_thread_monitor_lock, key);
}