diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 74e4bc9f11..253d588aff 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -52,7 +53,7 @@ void z_unpend_thread(struct k_thread *thread); int z_unpend_all(_wait_q_t *wait_q); bool z_thread_prio_set(struct k_thread *thread, int prio); void *z_get_next_switch_handle(void *interrupted); -void idle(void *unused1, void *unused2, void *unused3); + void z_time_slice(void); void z_reset_time_slice(struct k_thread *curr); void z_sched_abort(struct k_thread *thread); @@ -68,127 +69,6 @@ static inline void z_reschedule_unlocked(void) (void) z_reschedule_irqlock(arch_irq_lock()); } -static inline bool z_is_idle_thread_entry(void *entry_point) -{ - return entry_point == idle; -} - -static inline bool z_is_idle_thread_object(struct k_thread *thread) -{ -#ifdef CONFIG_MULTITHREADING -#ifdef CONFIG_SMP - return thread->base.is_idle; -#else - return thread == &z_idle_threads[0]; -#endif /* CONFIG_SMP */ -#else - return false; -#endif /* CONFIG_MULTITHREADING */ -} - -static inline bool z_is_thread_suspended(struct k_thread *thread) -{ - return (thread->base.thread_state & _THREAD_SUSPENDED) != 0U; -} - -static inline bool z_is_thread_pending(struct k_thread *thread) -{ - return (thread->base.thread_state & _THREAD_PENDING) != 0U; -} - -static inline bool z_is_thread_prevented_from_running(struct k_thread *thread) -{ - uint8_t state = thread->base.thread_state; - - return (state & (_THREAD_PENDING | _THREAD_PRESTART | _THREAD_DEAD | - _THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U; - -} - -static inline bool z_is_thread_timeout_active(struct k_thread *thread) -{ - return !z_is_inactive_timeout(&thread->base.timeout); -} - -static inline bool z_is_thread_ready(struct k_thread *thread) -{ - return !((z_is_thread_prevented_from_running(thread)) != 0U || - z_is_thread_timeout_active(thread)); -} - -static inline bool z_has_thread_started(struct k_thread *thread) -{ - return (thread->base.thread_state & _THREAD_PRESTART) == 0U; -} - -static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state) -{ - return (thread->base.thread_state & state) != 0U; -} - -static inline bool z_is_thread_queued(struct k_thread *thread) -{ - return z_is_thread_state_set(thread, _THREAD_QUEUED); -} - -static inline void z_mark_thread_as_suspended(struct k_thread *thread) -{ - thread->base.thread_state |= _THREAD_SUSPENDED; - - SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread); -} - -static inline void z_mark_thread_as_not_suspended(struct k_thread *thread) -{ - thread->base.thread_state &= ~_THREAD_SUSPENDED; - - SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread); -} - -static inline void z_mark_thread_as_started(struct k_thread *thread) -{ - thread->base.thread_state &= ~_THREAD_PRESTART; -} - -static inline void z_mark_thread_as_pending(struct k_thread *thread) -{ - thread->base.thread_state |= _THREAD_PENDING; -} - -static inline void z_mark_thread_as_not_pending(struct k_thread *thread) -{ - thread->base.thread_state &= ~_THREAD_PENDING; -} - -/* - * This function tags the current thread as essential to system operation. - * Exceptions raised by this thread will be treated as a fatal system error. - */ -static inline void z_thread_essential_set(struct k_thread *thread) -{ - thread->base.user_options |= K_ESSENTIAL; -} - -/* - * This function tags the current thread as not essential to system operation. - * Exceptions raised by this thread may be recoverable. - * (This is the default tag for a thread.) - */ -static inline void z_thread_essential_clear(struct k_thread *thread) -{ - thread->base.user_options &= ~K_ESSENTIAL; -} - -/* - * This routine indicates if the current thread is an essential system thread. - * - * Returns true if current thread is essential, false if it is not. - */ -static inline bool z_is_thread_essential(struct k_thread *thread) -{ - return (thread->base.user_options & K_ESSENTIAL) == K_ESSENTIAL; -} - static inline bool z_is_under_prio_ceiling(int prio) { return prio >= CONFIG_PRIORITY_CEILING; diff --git a/kernel/include/kthread.h b/kernel/include/kthread.h index 423e6989fd..57192cfd90 100644 --- a/kernel/include/kthread.h +++ b/kernel/include/kthread.h @@ -9,6 +9,7 @@ #define ZEPHYR_KERNEL_INCLUDE_THREAD_H_ #include +#include #include #ifdef CONFIG_THREAD_MONITOR @@ -19,6 +20,8 @@ extern struct k_spinlock z_thread_monitor_lock; #endif /* CONFIG_THREAD_MONITOR */ +void idle(void *unused1, void *unused2, void *unused3); + /* clean up when a thread is aborted */ #if defined(CONFIG_THREAD_MONITOR) @@ -46,4 +49,191 @@ static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t dela } #endif /* CONFIG_MULTITHREADING */ +static inline int is_preempt(struct k_thread *thread) +{ + /* explanation in kernel_struct.h */ + return thread->base.preempt <= _PREEMPT_THRESHOLD; +} + + +static inline int is_metairq(struct k_thread *thread) +{ +#if CONFIG_NUM_METAIRQ_PRIORITIES > 0 + return (thread->base.prio - K_HIGHEST_THREAD_PRIO) + < CONFIG_NUM_METAIRQ_PRIORITIES; +#else + ARG_UNUSED(thread); + return 0; +#endif /* CONFIG_NUM_METAIRQ_PRIORITIES */ +} + +#if CONFIG_ASSERT +static inline bool is_thread_dummy(struct k_thread *thread) +{ + return (thread->base.thread_state & _THREAD_DUMMY) != 0U; +} +#endif /* CONFIG_ASSERT */ + + +static inline bool z_is_thread_suspended(struct k_thread *thread) +{ + return (thread->base.thread_state & _THREAD_SUSPENDED) != 0U; +} + +static inline bool z_is_thread_pending(struct k_thread *thread) +{ + return (thread->base.thread_state & _THREAD_PENDING) != 0U; +} + +static inline bool z_is_thread_prevented_from_running(struct k_thread *thread) +{ + uint8_t state = thread->base.thread_state; + + return (state & (_THREAD_PENDING | _THREAD_PRESTART | _THREAD_DEAD | + _THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U; + +} + +static inline bool z_is_thread_timeout_active(struct k_thread *thread) +{ + return !z_is_inactive_timeout(&thread->base.timeout); +} + +static inline bool z_is_thread_ready(struct k_thread *thread) +{ + return !((z_is_thread_prevented_from_running(thread)) != 0U || + z_is_thread_timeout_active(thread)); +} + +static inline bool z_has_thread_started(struct k_thread *thread) +{ + return (thread->base.thread_state & _THREAD_PRESTART) == 0U; +} + +static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state) +{ + return (thread->base.thread_state & state) != 0U; +} + +static inline bool z_is_thread_queued(struct k_thread *thread) +{ + return z_is_thread_state_set(thread, _THREAD_QUEUED); +} + +static inline void z_mark_thread_as_suspended(struct k_thread *thread) +{ + thread->base.thread_state |= _THREAD_SUSPENDED; + + SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread); +} + +static inline void z_mark_thread_as_not_suspended(struct k_thread *thread) +{ + thread->base.thread_state &= ~_THREAD_SUSPENDED; + + SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread); +} + +static inline void z_mark_thread_as_started(struct k_thread *thread) +{ + thread->base.thread_state &= ~_THREAD_PRESTART; +} + +static inline void z_mark_thread_as_pending(struct k_thread *thread) +{ + thread->base.thread_state |= _THREAD_PENDING; +} + +static inline void z_mark_thread_as_not_pending(struct k_thread *thread) +{ + thread->base.thread_state &= ~_THREAD_PENDING; +} + +/* + * This function tags the current thread as essential to system operation. + * Exceptions raised by this thread will be treated as a fatal system error. + */ +static inline void z_thread_essential_set(struct k_thread *thread) +{ + thread->base.user_options |= K_ESSENTIAL; +} + +/* + * This function tags the current thread as not essential to system operation. + * Exceptions raised by this thread may be recoverable. + * (This is the default tag for a thread.) + */ +static inline void z_thread_essential_clear(struct k_thread *thread) +{ + thread->base.user_options &= ~K_ESSENTIAL; +} + +/* + * This routine indicates if the current thread is an essential system thread. + * + * Returns true if current thread is essential, false if it is not. + */ +static inline bool z_is_thread_essential(struct k_thread *thread) +{ + return (thread->base.user_options & K_ESSENTIAL) == K_ESSENTIAL; +} + + +static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, + int preempt_ok) +{ + /* Preemption is OK if it's being explicitly allowed by + * software state (e.g. the thread called k_yield()) + */ + if (preempt_ok != 0) { + return true; + } + + __ASSERT(_current != NULL, ""); + + /* Or if we're pended/suspended/dummy (duh) */ + if (z_is_thread_prevented_from_running(_current)) { + return true; + } + + /* Edge case on ARM where a thread can be pended out of an + * interrupt handler before the "synchronous" swap starts + * context switching. Platforms with atomic swap can never + * hit this. + */ + if (IS_ENABLED(CONFIG_SWAP_NONATOMIC) + && z_is_thread_timeout_active(thread)) { + return true; + } + + /* Otherwise we have to be running a preemptible thread or + * switching to a metairq + */ + if (is_preempt(_current) || is_metairq(thread)) { + return true; + } + + return false; +} + + +static inline bool z_is_idle_thread_entry(void *entry_point) +{ + return entry_point == idle; +} + +static inline bool z_is_idle_thread_object(struct k_thread *thread) +{ +#ifdef CONFIG_MULTITHREADING +#ifdef CONFIG_SMP + return thread->base.is_idle; +#else + return thread == &z_idle_threads[0]; +#endif /* CONFIG_SMP */ +#else + return false; +#endif /* CONFIG_MULTITHREADING */ +} + + #endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */ diff --git a/kernel/mailbox.c b/kernel/mailbox.c index 0d690768c2..84c79b1993 100644 --- a/kernel/mailbox.c +++ b/kernel/mailbox.c @@ -18,6 +18,7 @@ #include /* private kernel APIs */ #include +#include #include #ifdef CONFIG_OBJ_CORE_MAILBOX diff --git a/kernel/mutex.c b/kernel/mutex.c index e5ead48b9c..3635e7624c 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/sched.c b/kernel/sched.c index bcc80d63bb..4aa2251a7b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -30,36 +30,11 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state); static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q); - -static inline int is_preempt(struct k_thread *thread) -{ - /* explanation in kernel_struct.h */ - return thread->base.preempt <= _PREEMPT_THRESHOLD; -} - BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES, "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as " "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative " "threads."); -static inline int is_metairq(struct k_thread *thread) -{ -#if CONFIG_NUM_METAIRQ_PRIORITIES > 0 - return (thread->base.prio - K_HIGHEST_THREAD_PRIO) - < CONFIG_NUM_METAIRQ_PRIORITIES; -#else - ARG_UNUSED(thread); - return 0; -#endif /* CONFIG_NUM_METAIRQ_PRIORITIES */ -} - -#if CONFIG_ASSERT -static inline bool is_thread_dummy(struct k_thread *thread) -{ - return (thread->base.thread_state & _THREAD_DUMMY) != 0U; -} -#endif /* CONFIG_ASSERT */ - /* * Return value same as e.g. memcmp * > 0 -> thread 1 priority > thread 2 priority @@ -102,43 +77,6 @@ int32_t z_sched_prio_cmp(struct k_thread *thread_1, return 0; } -static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, - int preempt_ok) -{ - /* Preemption is OK if it's being explicitly allowed by - * software state (e.g. the thread called k_yield()) - */ - if (preempt_ok != 0) { - return true; - } - - __ASSERT(_current != NULL, ""); - - /* Or if we're pended/suspended/dummy (duh) */ - if (z_is_thread_prevented_from_running(_current)) { - return true; - } - - /* Edge case on ARM where a thread can be pended out of an - * interrupt handler before the "synchronous" swap starts - * context switching. Platforms with atomic swap can never - * hit this. - */ - if (IS_ENABLED(CONFIG_SWAP_NONATOMIC) - && z_is_thread_timeout_active(thread)) { - return true; - } - - /* Otherwise we have to be running a preemptible thread or - * switching to a metairq - */ - if (is_preempt(_current) || is_metairq(thread)) { - return true; - } - - return false; -} - #ifdef CONFIG_SCHED_CPU_MASK static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq) { diff --git a/tests/kernel/threads/thread_apis/src/main.c b/tests/kernel/threads/thread_apis/src/main.c index 95a970d579..28e1bc29d0 100644 --- a/tests/kernel/threads/thread_apis/src/main.c +++ b/tests/kernel/threads/thread_apis/src/main.c @@ -18,7 +18,10 @@ #include #include #include + +/* internal kernel APIs */ #include +#include #include LOG_MODULE_REGISTER(test); diff --git a/tests/kernel/threads/thread_apis/src/test_essential_thread.c b/tests/kernel/threads/thread_apis/src/test_essential_thread.c index 0d2829b2b3..1814346932 100644 --- a/tests/kernel/threads/thread_apis/src/test_essential_thread.c +++ b/tests/kernel/threads/thread_apis/src/test_essential_thread.c @@ -10,6 +10,7 @@ /* Internal APIs */ #include #include +#include struct k_thread kthread_thread; struct k_thread kthread_thread1;