From 1be7bca3333d0636f48a867148b9d2e44c0d6270 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Tue, 25 Oct 2016 10:57:52 -0500 Subject: [PATCH] kernel: Add interrupt locking to thread monitoring exit API This is needed because some thread termination paths can be invoked with no guarantee that thread preemption won't happen. (It also aligns with the approach taken by the thread monitoring initialization code.) Change-Id: I28a384e051775390eb047498cb23fed22910e4df Signed-off-by: Allan Stephens --- kernel/nanokernel/nano_context.c | 9 ++++----- kernel/unified/thread.c | 9 ++++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/kernel/nanokernel/nano_context.c b/kernel/nanokernel/nano_context.c index 6088cba54b..ca59fe834a 100644 --- a/kernel/nanokernel/nano_context.c +++ b/kernel/nanokernel/nano_context.c @@ -148,14 +148,11 @@ void *sys_thread_custom_data_get(void) #if defined(CONFIG_THREAD_MONITOR) /* * Remove a thread from the kernel's list of active threads. - * - * On entry the current thread must be in a non-preemptible state to ensure - * the list of threads does not change in mid-operation. (That is, it must - * be a fiber or interrupts must be locked.) This routine cannot be called - * from an ISR context. */ void _thread_monitor_exit(struct tcs *thread) { + unsigned int key = irq_lock(); + if (thread == _nanokernel.threads) { _nanokernel.threads = _nanokernel.threads->next_thread; } else { @@ -167,6 +164,8 @@ void _thread_monitor_exit(struct tcs *thread) } prev_thread->next_thread = thread->next_thread; } + + irq_unlock(key); } #endif /* CONFIG_THREAD_MONITOR */ diff --git a/kernel/unified/thread.c b/kernel/unified/thread.c index 0aa23b43c4..01b344b416 100644 --- a/kernel/unified/thread.c +++ b/kernel/unified/thread.c @@ -167,14 +167,11 @@ void *k_thread_custom_data_get(void) #if defined(CONFIG_THREAD_MONITOR) /* * Remove a thread from the kernel's list of active threads. - * - * On entry the current thread must be in a non-preemptible state to ensure - * the list of threads does not change in mid-operation. (That is, it must - * be non-preemptible or have locked the scheduler, or interrupts must be - * locked.) This routine cannot be called from an ISR context. */ void _thread_monitor_exit(struct k_thread *thread) { + unsigned int key = irq_lock(); + if (thread == _nanokernel.threads) { _nanokernel.threads = _nanokernel.threads->next_thread; } else { @@ -186,6 +183,8 @@ void _thread_monitor_exit(struct k_thread *thread) } prev_thread->next_thread = thread->next_thread; } + + irq_unlock(key); } #endif /* CONFIG_THREAD_MONITOR */