kernel: fixup thread monitor locking
The lock in kernel/thread.c was pulling double-duty, protecting both the thread monitor linked list and also serializing access to k_thread_suspend/resume functions. The monitor list now has its own dedicated lock. The object tracing test has been updated to use k_thread_foreach(). Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
18a30e9184
commit
c4fbc08347
|
@ -30,8 +30,12 @@
|
|||
#include <sys/check.h>
|
||||
|
||||
#ifdef CONFIG_THREAD_MONITOR
|
||||
static struct k_spinlock lock;
|
||||
#endif
|
||||
/* This lock protects the linked list of active threads; i.e. the
|
||||
* initial _kernel.threads pointer and the linked list made up of
|
||||
* thread->next_thread (until NULL)
|
||||
*/
|
||||
static struct k_spinlock z_thread_monitor_lock;
|
||||
#endif /* CONFIG_THREAD_MONITOR */
|
||||
|
||||
#define _FOREACH_STATIC_THREAD(thread_data) \
|
||||
Z_STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
|
||||
|
@ -50,11 +54,11 @@ void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
|
|||
* The indirect ways are through calling k_thread_create and
|
||||
* k_thread_abort from user_cb.
|
||||
*/
|
||||
key = k_spin_lock(&lock);
|
||||
key = k_spin_lock(&z_thread_monitor_lock);
|
||||
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
|
||||
user_cb(thread, user_data);
|
||||
}
|
||||
k_spin_unlock(&lock, key);
|
||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -66,13 +70,13 @@ void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
|
|||
|
||||
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
|
||||
|
||||
key = k_spin_lock(&lock);
|
||||
key = k_spin_lock(&z_thread_monitor_lock);
|
||||
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
|
||||
k_spin_unlock(&lock, key);
|
||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
||||
user_cb(thread, user_data);
|
||||
key = k_spin_lock(&lock);
|
||||
key = k_spin_lock(&z_thread_monitor_lock);
|
||||
}
|
||||
k_spin_unlock(&lock, key);
|
||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -179,7 +183,7 @@ static inline void *z_vrfy_k_thread_custom_data_get(void)
|
|||
*/
|
||||
void z_thread_monitor_exit(struct k_thread *thread)
|
||||
{
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
|
||||
|
||||
if (thread == _kernel.threads) {
|
||||
_kernel.threads = _kernel.threads->next_thread;
|
||||
|
@ -196,7 +200,7 @@ void z_thread_monitor_exit(struct k_thread *thread)
|
|||
}
|
||||
}
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -545,11 +549,11 @@ void z_setup_new_thread(struct k_thread *new_thread,
|
|||
new_thread->entry.parameter2 = p2;
|
||||
new_thread->entry.parameter3 = p3;
|
||||
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
|
||||
|
||||
new_thread->next_thread = _kernel.threads;
|
||||
_kernel.threads = new_thread;
|
||||
k_spin_unlock(&lock, key);
|
||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
||||
#endif
|
||||
#ifdef CONFIG_THREAD_NAME
|
||||
if (name != NULL) {
|
||||
|
|
|
@ -53,32 +53,31 @@ K_SEM_DEFINE(f3, -5, 1);
|
|||
* @{
|
||||
* @}
|
||||
*/
|
||||
static inline void thread_list_cb(const struct k_thread *thread, void *data)
|
||||
{
|
||||
int *ctr = data;
|
||||
|
||||
if (thread->base.prio == -1) {
|
||||
TC_PRINT("PREMPT: %p OPTIONS: 0x%02x, STATE: 0x%02x\n",
|
||||
thread,
|
||||
thread->base.user_options,
|
||||
thread->base.thread_state);
|
||||
} else {
|
||||
TC_PRINT("COOP: %p OPTIONS: 0x%02x, STATE: 0x%02x\n",
|
||||
thread,
|
||||
thread->base.user_options,
|
||||
thread->base.thread_state);
|
||||
}
|
||||
(*ctr)++;
|
||||
}
|
||||
|
||||
static inline int thread_monitor(void)
|
||||
{
|
||||
int obj_counter = 0;
|
||||
struct k_thread *thread_list = NULL;
|
||||
|
||||
/* wait a bit to allow any initialization-only threads to terminate */
|
||||
|
||||
thread_list = (struct k_thread *)SYS_THREAD_MONITOR_HEAD;
|
||||
while (thread_list != NULL) {
|
||||
if (thread_list->base.prio == -1) {
|
||||
TC_PRINT("PREMPT: %p OPTIONS: 0x%02x, STATE: 0x%02x\n",
|
||||
thread_list,
|
||||
thread_list->base.user_options,
|
||||
thread_list->base.thread_state);
|
||||
} else {
|
||||
TC_PRINT("COOP: %p OPTIONS: 0x%02x, STATE: 0x%02x\n",
|
||||
thread_list,
|
||||
thread_list->base.user_options,
|
||||
thread_list->base.thread_state);
|
||||
}
|
||||
thread_list =
|
||||
(struct k_thread *)SYS_THREAD_MONITOR_NEXT(thread_list);
|
||||
obj_counter++;
|
||||
}
|
||||
k_thread_foreach(thread_list_cb, &obj_counter);
|
||||
TC_PRINT("THREAD QUANTITY: %d\n", obj_counter);
|
||||
|
||||
return obj_counter;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue