zephyr/kernel/priority_queues.c
TaiJu Wu 1f5f0cf838 sched: Remove multi-level queue priority limit
Modified bitmask to  bitmask array, it can make multilevel queue remove
32 bit prioriry limit.

We can scan bitmask array to find which queue have ready thread.

Only need the number of queues as priority because the priority
is checked on create_thread.

Signed-off-by: TaiJu Wu <tjwu1217@gmail.com>
2024-03-12 19:37:40 -04:00

120 lines
2.7 KiB
C

/*
* Copyright (c) 2018,2024 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
#include <ksched.h>
#include <zephyr/sys/math_extras.h>
void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
{
ARG_UNUSED(pq);
__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
sys_dlist_remove(&thread->base.qnode_dlist);
}
struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
{
struct k_thread *thread = NULL;
sys_dnode_t *n = sys_dlist_peek_head(pq);
if (n != NULL) {
thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
}
return thread;
}
bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
{
struct k_thread *thread_a, *thread_b;
int32_t cmp;
thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
cmp = z_sched_prio_cmp(thread_a, thread_b);
if (cmp > 0) {
return true;
} else if (cmp < 0) {
return false;
} else {
return thread_a->base.order_key < thread_b->base.order_key
? 1 : 0;
}
}
void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
{
struct k_thread *t;
__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
thread->base.order_key = pq->next_order_key++;
/* Renumber at wraparound. This is tiny code, and in practice
* will almost never be hit on real systems. BUT on very
* long-running systems where a priq never completely empties
* AND that contains very large numbers of threads, it can be
* a latency glitch to loop over all the threads like this.
*/
if (!pq->next_order_key) {
RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
t->base.order_key = pq->next_order_key++;
}
}
rb_insert(&pq->tree, &thread->base.qnode_rb);
}
void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
{
__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
rb_remove(&pq->tree, &thread->base.qnode_rb);
if (!pq->tree.root) {
pq->next_order_key = 0;
}
}
struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
{
struct k_thread *thread = NULL;
struct rbnode *n = rb_get_min(&pq->tree);
if (n != NULL) {
thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
}
return thread;
}
struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
{
struct k_thread *thread = NULL;
for (int i = 0; i < PRIQ_BITMAP_SIZE; ++i) {
if (!pq->bitmask[i]) {
continue;
}
#ifdef CONFIG_64BIT
sys_dlist_t *l = &pq->queues[i * 64 + u64_count_trailing_zeros(pq->bitmask[i])];
#else
sys_dlist_t *l = &pq->queues[i * 32 + u32_count_trailing_zeros(pq->bitmask[i])];
#endif
sys_dnode_t *n = sys_dlist_peek_head(l);
if (n != NULL) {
thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
break;
}
}
return thread;
}