kernel: move thread related helper function kthread.h

Move some helper functions to inernal kthread.h, to offload crowded
sched.c

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2024-03-07 18:00:45 -05:00
parent 49be50c8dd
commit ebb503ff7b
7 changed files with 198 additions and 184 deletions

View file

@ -10,6 +10,7 @@
#include <zephyr/kernel_structs.h>
#include <kernel_internal.h>
#include <timeout_q.h>
#include <kthread.h>
#include <zephyr/tracing/tracing.h>
#include <stdbool.h>
@ -52,7 +53,7 @@ void z_unpend_thread(struct k_thread *thread);
int z_unpend_all(_wait_q_t *wait_q);
bool z_thread_prio_set(struct k_thread *thread, int prio);
void *z_get_next_switch_handle(void *interrupted);
void idle(void *unused1, void *unused2, void *unused3);
void z_time_slice(void);
void z_reset_time_slice(struct k_thread *curr);
void z_sched_abort(struct k_thread *thread);
@ -68,127 +69,6 @@ static inline void z_reschedule_unlocked(void)
(void) z_reschedule_irqlock(arch_irq_lock());
}
static inline bool z_is_idle_thread_entry(void *entry_point)
{
return entry_point == idle;
}
static inline bool z_is_idle_thread_object(struct k_thread *thread)
{
#ifdef CONFIG_MULTITHREADING
#ifdef CONFIG_SMP
return thread->base.is_idle;
#else
return thread == &z_idle_threads[0];
#endif /* CONFIG_SMP */
#else
return false;
#endif /* CONFIG_MULTITHREADING */
}
static inline bool z_is_thread_suspended(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_SUSPENDED) != 0U;
}
static inline bool z_is_thread_pending(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_PENDING) != 0U;
}
static inline bool z_is_thread_prevented_from_running(struct k_thread *thread)
{
uint8_t state = thread->base.thread_state;
return (state & (_THREAD_PENDING | _THREAD_PRESTART | _THREAD_DEAD |
_THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U;
}
static inline bool z_is_thread_timeout_active(struct k_thread *thread)
{
return !z_is_inactive_timeout(&thread->base.timeout);
}
static inline bool z_is_thread_ready(struct k_thread *thread)
{
return !((z_is_thread_prevented_from_running(thread)) != 0U ||
z_is_thread_timeout_active(thread));
}
static inline bool z_has_thread_started(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_PRESTART) == 0U;
}
static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state)
{
return (thread->base.thread_state & state) != 0U;
}
static inline bool z_is_thread_queued(struct k_thread *thread)
{
return z_is_thread_state_set(thread, _THREAD_QUEUED);
}
static inline void z_mark_thread_as_suspended(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_SUSPENDED;
SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread);
}
static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_SUSPENDED;
SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread);
}
static inline void z_mark_thread_as_started(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PRESTART;
}
static inline void z_mark_thread_as_pending(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_PENDING;
}
static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PENDING;
}
/*
* This function tags the current thread as essential to system operation.
* Exceptions raised by this thread will be treated as a fatal system error.
*/
static inline void z_thread_essential_set(struct k_thread *thread)
{
thread->base.user_options |= K_ESSENTIAL;
}
/*
* This function tags the current thread as not essential to system operation.
* Exceptions raised by this thread may be recoverable.
* (This is the default tag for a thread.)
*/
static inline void z_thread_essential_clear(struct k_thread *thread)
{
thread->base.user_options &= ~K_ESSENTIAL;
}
/*
* This routine indicates if the current thread is an essential system thread.
*
* Returns true if current thread is essential, false if it is not.
*/
static inline bool z_is_thread_essential(struct k_thread *thread)
{
return (thread->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
}
static inline bool z_is_under_prio_ceiling(int prio)
{
return prio >= CONFIG_PRIORITY_CEILING;

View file

@ -9,6 +9,7 @@
#define ZEPHYR_KERNEL_INCLUDE_THREAD_H_
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <timeout_q.h>
#ifdef CONFIG_THREAD_MONITOR
@ -19,6 +20,8 @@
extern struct k_spinlock z_thread_monitor_lock;
#endif /* CONFIG_THREAD_MONITOR */
void idle(void *unused1, void *unused2, void *unused3);
/* clean up when a thread is aborted */
#if defined(CONFIG_THREAD_MONITOR)
@ -46,4 +49,191 @@ static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t dela
}
#endif /* CONFIG_MULTITHREADING */
static inline int is_preempt(struct k_thread *thread)
{
/* explanation in kernel_struct.h */
return thread->base.preempt <= _PREEMPT_THRESHOLD;
}
static inline int is_metairq(struct k_thread *thread)
{
#if CONFIG_NUM_METAIRQ_PRIORITIES > 0
return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
< CONFIG_NUM_METAIRQ_PRIORITIES;
#else
ARG_UNUSED(thread);
return 0;
#endif /* CONFIG_NUM_METAIRQ_PRIORITIES */
}
#if CONFIG_ASSERT
static inline bool is_thread_dummy(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
}
#endif /* CONFIG_ASSERT */
static inline bool z_is_thread_suspended(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_SUSPENDED) != 0U;
}
static inline bool z_is_thread_pending(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_PENDING) != 0U;
}
static inline bool z_is_thread_prevented_from_running(struct k_thread *thread)
{
uint8_t state = thread->base.thread_state;
return (state & (_THREAD_PENDING | _THREAD_PRESTART | _THREAD_DEAD |
_THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U;
}
static inline bool z_is_thread_timeout_active(struct k_thread *thread)
{
return !z_is_inactive_timeout(&thread->base.timeout);
}
static inline bool z_is_thread_ready(struct k_thread *thread)
{
return !((z_is_thread_prevented_from_running(thread)) != 0U ||
z_is_thread_timeout_active(thread));
}
static inline bool z_has_thread_started(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_PRESTART) == 0U;
}
static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state)
{
return (thread->base.thread_state & state) != 0U;
}
static inline bool z_is_thread_queued(struct k_thread *thread)
{
return z_is_thread_state_set(thread, _THREAD_QUEUED);
}
static inline void z_mark_thread_as_suspended(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_SUSPENDED;
SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread);
}
static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_SUSPENDED;
SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread);
}
static inline void z_mark_thread_as_started(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PRESTART;
}
static inline void z_mark_thread_as_pending(struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_PENDING;
}
static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_PENDING;
}
/*
* This function tags the current thread as essential to system operation.
* Exceptions raised by this thread will be treated as a fatal system error.
*/
static inline void z_thread_essential_set(struct k_thread *thread)
{
thread->base.user_options |= K_ESSENTIAL;
}
/*
* This function tags the current thread as not essential to system operation.
* Exceptions raised by this thread may be recoverable.
* (This is the default tag for a thread.)
*/
static inline void z_thread_essential_clear(struct k_thread *thread)
{
thread->base.user_options &= ~K_ESSENTIAL;
}
/*
* This routine indicates if the current thread is an essential system thread.
*
* Returns true if current thread is essential, false if it is not.
*/
static inline bool z_is_thread_essential(struct k_thread *thread)
{
return (thread->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
}
static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
int preempt_ok)
{
/* Preemption is OK if it's being explicitly allowed by
* software state (e.g. the thread called k_yield())
*/
if (preempt_ok != 0) {
return true;
}
__ASSERT(_current != NULL, "");
/* Or if we're pended/suspended/dummy (duh) */
if (z_is_thread_prevented_from_running(_current)) {
return true;
}
/* Edge case on ARM where a thread can be pended out of an
* interrupt handler before the "synchronous" swap starts
* context switching. Platforms with atomic swap can never
* hit this.
*/
if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
&& z_is_thread_timeout_active(thread)) {
return true;
}
/* Otherwise we have to be running a preemptible thread or
* switching to a metairq
*/
if (is_preempt(_current) || is_metairq(thread)) {
return true;
}
return false;
}
static inline bool z_is_idle_thread_entry(void *entry_point)
{
return entry_point == idle;
}
static inline bool z_is_idle_thread_object(struct k_thread *thread)
{
#ifdef CONFIG_MULTITHREADING
#ifdef CONFIG_SMP
return thread->base.is_idle;
#else
return thread == &z_idle_threads[0];
#endif /* CONFIG_SMP */
#else
return false;
#endif /* CONFIG_MULTITHREADING */
}
#endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */

View file

@ -18,6 +18,7 @@
#include <zephyr/init.h>
/* private kernel APIs */
#include <ksched.h>
#include <kthread.h>
#include <wait_q.h>
#ifdef CONFIG_OBJ_CORE_MAILBOX

View file

@ -30,6 +30,7 @@
#include <zephyr/kernel_structs.h>
#include <zephyr/toolchain.h>
#include <ksched.h>
#include <kthread.h>
#include <wait_q.h>
#include <errno.h>
#include <zephyr/init.h>

View file

@ -30,36 +30,11 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state);
static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
static inline int is_preempt(struct k_thread *thread)
{
/* explanation in kernel_struct.h */
return thread->base.preempt <= _PREEMPT_THRESHOLD;
}
BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
"You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
"CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
"threads.");
static inline int is_metairq(struct k_thread *thread)
{
#if CONFIG_NUM_METAIRQ_PRIORITIES > 0
return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
< CONFIG_NUM_METAIRQ_PRIORITIES;
#else
ARG_UNUSED(thread);
return 0;
#endif /* CONFIG_NUM_METAIRQ_PRIORITIES */
}
#if CONFIG_ASSERT
static inline bool is_thread_dummy(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
}
#endif /* CONFIG_ASSERT */
/*
* Return value same as e.g. memcmp
* > 0 -> thread 1 priority > thread 2 priority
@ -102,43 +77,6 @@ int32_t z_sched_prio_cmp(struct k_thread *thread_1,
return 0;
}
static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
int preempt_ok)
{
/* Preemption is OK if it's being explicitly allowed by
* software state (e.g. the thread called k_yield())
*/
if (preempt_ok != 0) {
return true;
}
__ASSERT(_current != NULL, "");
/* Or if we're pended/suspended/dummy (duh) */
if (z_is_thread_prevented_from_running(_current)) {
return true;
}
/* Edge case on ARM where a thread can be pended out of an
* interrupt handler before the "synchronous" swap starts
* context switching. Platforms with atomic swap can never
* hit this.
*/
if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
&& z_is_thread_timeout_active(thread)) {
return true;
}
/* Otherwise we have to be running a preemptible thread or
* switching to a metairq
*/
if (is_preempt(_current) || is_metairq(thread)) {
return true;
}
return false;
}
#ifdef CONFIG_SCHED_CPU_MASK
static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
{

View file

@ -18,7 +18,10 @@
#include <zephyr/kernel.h>
#include <kernel_internal.h>
#include <string.h>
/* internal kernel APIs */
#include <ksched.h>
#include <kthread.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(test);

View file

@ -10,6 +10,7 @@
/* Internal APIs */
#include <kernel_internal.h>
#include <ksched.h>
#include <kthread.h>
struct k_thread kthread_thread;
struct k_thread kthread_thread1;