kernel: Add the old "multi queue" scheduler algorithm as an option

Zephyr 1.12 removed the old scheduler and replaced it with the choice
of a "dumb" list or a balanced tree.  But the old multi-queue
algorithm is still useful in the space between these two (applications
with large-ish numbers of runnable threads, but that don't need fancy
features like EDF or SMP affinity).  So add it as a
CONFIG_SCHED_MULTIQ option.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-06-28 10:38:14 -07:00 committed by Anas Nashif
parent 225c74bbdf
commit 9f06a35450
5 changed files with 106 additions and 19 deletions

View file

@ -44,4 +44,20 @@ void _priq_rb_add(struct _priq_rb *pq, struct k_thread *thread);
void _priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread);
struct k_thread *_priq_rb_best(struct _priq_rb *pq);
/* Traditional/textbook "multi-queue" structure. Separate lists for a
* small number (max 32 here) of fixed priorities. This corresponds
* to the original Zephyr scheduler. RAM requirements are
* comparatively high, but performance is very fast. Won't work with
* features like deadline scheduling which need large priority spaces
* to represet their requirements.
*/
struct _priq_mq {
sys_dlist_t queues[32];
unsigned int bitmask; /* bit 1<<i set if queues[i] is non-empty */
};
void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread);
void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread);
struct k_thread *_priq_mq_best(struct _priq_mq *pq);
#endif /* _sched_priq__h_ */

View file

@ -225,8 +225,27 @@ config SCHED_SCALABLE
are not otherwise using the rbtree somehwere) requires an
extra ~2kb of code. But the resulting behavior will scale
cleanly and quickly into the many thousands of threads. Use
this on platforms where you may have MANY threads marked as
runnable at a given time. Most applications don't want this.
this on platforms where you may have many threads (very
roughly: more than 20 or so) marked as runnable at a given
time. Most applications don't want this.
config SCHED_MULTIQ
bool "Traditional multi-queue ready queue"
depends on !SCHED_DEADLINE
help
When selected, the scheduler ready queue will be implemented
as the classic/textbook array of lists, one per priority
(max 32 priorities). This corresponds to the scheduler
algorithm used in Zephyr versions prior to 1.12. It incurs
only a tiny code size overhead vs. the "dumb" scheduler and
runs in O(1) time in almost all circumstances with very low
constant factor. But it requires a fairly large RAM budget
to store those list heads, and the limited features make it
incompatible with features like deadline scheduling that
need to sort threads more finely, and SMP affinity which
need to traverse the list of threads. Typical applications
with small numbers of runnable threads probably want the
DUMB scheduler.
endchoice # SCHED_ALGORITHM
@ -234,9 +253,10 @@ choice WAITQ_ALGORITHM
prompt "Wait queue priority algorithm"
default WAITQ_DUMB
help
The wait_q abstraction used in IPC primitives to pend threads
for later wakeup shares the same backend data structure
choices as the scheduler, and can use the same options.
The wait_q abstraction used in IPC primitives to pend
threads for later wakeup shares the same backend data
structure choices as the scheduler, and can use the same
options.
config WAITQ_SCALABLE
bool

View file

@ -71,10 +71,12 @@ struct _ready_q {
struct k_thread *cache;
#endif
#ifdef CONFIG_SCHED_DUMB
#if defined(CONFIG_SCHED_DUMB)
sys_dlist_t runq;
#else
#elif defined(CONFIG_SCHED_SCALABLE)
struct _priq_rb runq;
#elif defined(CONFIG_SCHED_MULTIQ)
struct _priq_mq runq;
#endif
};

View file

@ -20,6 +20,10 @@
#define _priq_run_add _priq_rb_add
#define _priq_run_remove _priq_rb_remove
#define _priq_run_best _priq_rb_best
#elif defined(CONFIG_SCHED_MULTIQ)
#define _priq_run_add _priq_mq_add
#define _priq_run_remove _priq_mq_remove
#define _priq_run_best _priq_mq_best
#endif
#if defined(CONFIG_WAITQ_SCALABLE)
@ -559,6 +563,42 @@ struct k_thread *_priq_rb_best(struct _priq_rb *pq)
return CONTAINER_OF(n, struct k_thread, base.qnode_rb);
}
#ifdef CONFIG_SCHED_MULTIQ
# if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31
# error Too many priorities for multiqueue scheduler (max 32)
# endif
#endif
void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread)
{
int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist);
pq->bitmask |= (1 << priority_bit);
}
void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread)
{
int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
sys_dlist_remove(&thread->base.qnode_dlist);
if (sys_dlist_is_empty(&pq->queues[priority_bit])) {
pq->bitmask &= ~(1 << priority_bit);
}
}
struct k_thread *_priq_mq_best(struct _priq_mq *pq)
{
if (!pq->bitmask) {
return NULL;
}
sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
return CONTAINER_OF(sys_dlist_peek_head(l),
struct k_thread, base.qnode_dlist);
}
#ifdef CONFIG_TIMESLICING
extern s32_t _time_slice_duration; /* Measured in ms */
extern s32_t _time_slice_elapsed; /* Measured in ms */
@ -644,13 +684,21 @@ void _sched_init(void)
{
#ifdef CONFIG_SCHED_DUMB
sys_dlist_init(&_kernel.ready_q.runq);
#else
#endif
#ifdef CONFIG_SCHED_SCALABLE
_kernel.ready_q.runq = (struct _priq_rb) {
.tree = {
.lessthan_fn = _priq_rb_lessthan,
}
};
#endif
#ifdef CONFIG_SCHED_MULTIQ
for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
sys_dlist_init(&_kernel.ready_q.runq.queues[i]);
}
#endif
}
int _impl_k_thread_priority_get(k_tid_t thread)

View file

@ -1 +1,2 @@
# nothing here
CONFIG_SCHED_MULTIQ=y