kernel: Cleanup, unify _add_thread_to_ready_q() and _ready_thread()

The scheduler exposed two APIs to do the same thing:
_add_thread_to_ready_q() was a low level primitive that in most cases
was wrapped by _ready_thread(), which also (1) checks that the thread
_is_ready() or exits, (2) flags the thread as "started" to handle the
case of a thread running for the first time out of a waitq timeout,
and (3) signals a logger event.

As it turns out, all existing usage was already checking case #1.
Case #2 can be better handled in the timeout resume path instead of on
every call.  And case #3 was probably wrong to have been skipping
anyway (there were paths that could make a thread runnable without
logging).

Now _add_thread_to_ready_q() is an internal scheduler API, as it
probably always should have been.

This also moves some asserts from the inline _ready_thread() wrapper
to the underlying true function for code size reasons, otherwise the
extra use of the inline added by this patch blows past code size
limits on Quark D2000.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-03-09 12:17:45 -08:00 committed by Anas Nashif
parent c7ceef6751
commit 85bc0a3fe6
6 changed files with 19 additions and 32 deletions

View file

@ -418,21 +418,6 @@ static inline void _mark_thread_as_started(struct k_thread *thread)
*/
static inline void _ready_thread(struct k_thread *thread)
{
__ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) ||
((thread->base.prio == K_LOWEST_THREAD_PRIO) &&
(thread == _idle_thread)),
"thread %p prio too low (is %d, cannot be lower than %d)",
thread, thread->base.prio,
thread == _idle_thread ? K_LOWEST_THREAD_PRIO :
K_LOWEST_APPLICATION_THREAD_PRIO);
__ASSERT(!_is_prio_higher(thread->base.prio, K_HIGHEST_THREAD_PRIO),
"thread %p prio too high (id %d, cannot be higher than %d)",
thread, thread->base.prio, K_HIGHEST_THREAD_PRIO);
/* needed to handle the start-with-delay case */
_mark_thread_as_started(thread);
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
}

View file

@ -90,6 +90,7 @@ static inline void _handle_one_expired_timeout(struct _timeout *timeout)
K_DEBUG("timeout %p\n", timeout);
if (thread) {
_unpend_thread_timing_out(thread, timeout);
_mark_thread_as_started(thread);
_ready_thread(thread);
irq_unlock(key);
} else {

View file

@ -274,7 +274,7 @@ static void init_idle_thread(struct k_thread *thr, k_thread_stack_t *stack)
IDLE_STACK_SIZE, idle, NULL, NULL, NULL,
K_LOWEST_THREAD_PRIO, K_ESSENTIAL);
_mark_thread_as_started(thr);
_add_thread_to_ready_q(thr);
_ready_thread(thr);
}
#endif
@ -352,7 +352,7 @@ static void prepare_multithreading(struct k_thread *dummy_thread)
NULL, NULL, NULL,
CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL);
_mark_thread_as_started(_main_thread);
_add_thread_to_ready_q(_main_thread);
_ready_thread(_main_thread);
#ifdef CONFIG_MULTITHREADING
init_idle_thread(_idle_thread, _idle_stack);

View file

@ -299,7 +299,7 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state,
goto ready_event;
}
_add_thread_to_ready_q(thread);
_ready_thread(thread);
*must_reschedule = !_is_in_isr() && _must_switch_threads();
ready_event:

View file

@ -70,6 +70,18 @@ static struct k_thread *get_ready_q_head(void)
void _add_thread_to_ready_q(struct k_thread *thread)
{
__ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) ||
((thread->base.prio == K_LOWEST_THREAD_PRIO) &&
(thread == _idle_thread)),
"thread %p prio too low (is %d, cannot be lower than %d)",
thread, thread->base.prio,
thread == _idle_thread ? K_LOWEST_THREAD_PRIO :
K_LOWEST_APPLICATION_THREAD_PRIO);
__ASSERT(!_is_prio_higher(thread->base.prio, K_HIGHEST_THREAD_PRIO),
"thread %p prio too high (id %d, cannot be higher than %d)",
thread, thread->base.prio, K_HIGHEST_THREAD_PRIO);
#ifdef CONFIG_MULTITHREADING
int q_index = _get_ready_q_q_index(thread->base.prio);
sys_dlist_t *q = &_ready_q.q[q_index];

View file

@ -227,16 +227,8 @@ void _impl_k_thread_start(struct k_thread *thread)
}
_mark_thread_as_started(thread);
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
if (_must_switch_threads()) {
_Swap(key);
return;
}
}
irq_unlock(key);
_ready_thread(thread);
_reschedule_threads(key);
}
#ifdef CONFIG_USERSPACE
@ -462,10 +454,7 @@ _SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_suspend, K_OBJ_THREAD, k_tid_t);
void _k_thread_single_resume(struct k_thread *thread)
{
_mark_thread_as_not_suspended(thread);
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
}
_ready_thread(thread);
}
void _impl_k_thread_resume(struct k_thread *thread)