kernel/queue: Spinlockify

Straightforward port.  Each struct k_queue object gets a spinlock to
control obvious data ownership.

Note that this port actually discovered a preexisting bug: the -ENOMEM
case in queue_insert() was failing to release the lock.  But because
the tests that hit that path didn't rely on other threads being
scheduled, they ran to successful completion even with interrupts
disabled.  The spinlock API detects that as a recursive lock when
asserts are enabled.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-07-25 13:01:54 -07:00 committed by Anas Nashif
parent f6521a360d
commit 603ea42764
2 changed files with 18 additions and 20 deletions

View file

@ -1728,6 +1728,7 @@ static inline u32_t k_uptime_delta_32(s64_t *reftime)
struct k_queue { struct k_queue {
sys_sflist_t data_q; sys_sflist_t data_q;
struct k_spinlock lock;
union { union {
_wait_q_t wait_q; _wait_q_t wait_q;

View file

@ -85,6 +85,7 @@ SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
void _impl_k_queue_init(struct k_queue *queue) void _impl_k_queue_init(struct k_queue *queue)
{ {
sys_sflist_init(&queue->data_q); sys_sflist_init(&queue->data_q);
queue->lock = (struct k_spinlock) {};
_waitq_init(&queue->wait_q); _waitq_init(&queue->wait_q);
#if defined(CONFIG_POLL) #if defined(CONFIG_POLL)
sys_dlist_init(&queue->poll_events); sys_dlist_init(&queue->poll_events);
@ -122,7 +123,7 @@ static inline void handle_poll_events(struct k_queue *queue, u32_t state)
void _impl_k_queue_cancel_wait(struct k_queue *queue) void _impl_k_queue_cancel_wait(struct k_queue *queue)
{ {
u32_t key = irq_lock(); k_spinlock_key_t key = k_spin_lock(&queue->lock);
#if !defined(CONFIG_POLL) #if !defined(CONFIG_POLL)
struct k_thread *first_pending_thread; struct k_thread *first_pending_thread;
@ -135,7 +136,7 @@ void _impl_k_queue_cancel_wait(struct k_queue *queue)
handle_poll_events(queue, K_POLL_STATE_CANCELLED); handle_poll_events(queue, K_POLL_STATE_CANCELLED);
#endif /* !CONFIG_POLL */ #endif /* !CONFIG_POLL */
_reschedule_irqlock(key); _reschedule(&queue->lock, key);
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
@ -146,7 +147,7 @@ Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_queue_cancel_wait, K_OBJ_QUEUE,
static s32_t queue_insert(struct k_queue *queue, void *prev, void *data, static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
bool alloc) bool alloc)
{ {
u32_t key = irq_lock(); k_spinlock_key_t key = k_spin_lock(&queue->lock);
#if !defined(CONFIG_POLL) #if !defined(CONFIG_POLL)
struct k_thread *first_pending_thread; struct k_thread *first_pending_thread;
@ -154,7 +155,7 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
if (first_pending_thread != NULL) { if (first_pending_thread != NULL) {
prepare_thread_to_run(first_pending_thread, data); prepare_thread_to_run(first_pending_thread, data);
_reschedule_irqlock(key); _reschedule(&queue->lock, key);
return 0; return 0;
} }
#endif /* !CONFIG_POLL */ #endif /* !CONFIG_POLL */
@ -165,6 +166,7 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
anode = z_thread_malloc(sizeof(*anode)); anode = z_thread_malloc(sizeof(*anode));
if (anode == NULL) { if (anode == NULL) {
k_spin_unlock(&queue->lock, key);
return -ENOMEM; return -ENOMEM;
} }
anode->data = data; anode->data = data;
@ -179,7 +181,7 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
#endif /* CONFIG_POLL */ #endif /* CONFIG_POLL */
_reschedule_irqlock(key); _reschedule(&queue->lock, key);
return 0; return 0;
} }
@ -234,7 +236,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
{ {
__ASSERT(head && tail, "invalid head or tail"); __ASSERT(head && tail, "invalid head or tail");
unsigned int key = irq_lock(); k_spinlock_key_t key = k_spin_lock(&queue->lock);
#if !defined(CONFIG_POLL) #if !defined(CONFIG_POLL)
struct k_thread *thread = NULL; struct k_thread *thread = NULL;
@ -257,7 +259,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
#endif /* !CONFIG_POLL */ #endif /* !CONFIG_POLL */
_reschedule_irqlock(key); _reschedule(&queue->lock, key);
} }
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list) void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
@ -282,7 +284,7 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
{ {
struct k_poll_event event; struct k_poll_event event;
int err, elapsed = 0, done = 0; int err, elapsed = 0, done = 0;
unsigned int key; k_spinlock_key_t key;
void *val; void *val;
u32_t start; u32_t start;
@ -302,12 +304,9 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
return NULL; return NULL;
} }
/* sys_sflist_* aren't threadsafe, so must be always protected key = k_spin_lock(&queue->lock);
* by irq_lock.
*/
key = irq_lock();
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true); val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
irq_unlock(key); k_spin_unlock(&queue->lock, key);
if ((val == NULL) && (timeout != K_FOREVER)) { if ((val == NULL) && (timeout != K_FOREVER)) {
elapsed = k_uptime_get_32() - start; elapsed = k_uptime_get_32() - start;
@ -321,32 +320,30 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
void *_impl_k_queue_get(struct k_queue *queue, s32_t timeout) void *_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
{ {
unsigned int key; k_spinlock_key_t key = k_spin_lock(&queue->lock);
void *data; void *data;
key = irq_lock();
if (likely(!sys_sflist_is_empty(&queue->data_q))) { if (likely(!sys_sflist_is_empty(&queue->data_q))) {
sys_sfnode_t *node; sys_sfnode_t *node;
node = sys_sflist_get_not_empty(&queue->data_q); node = sys_sflist_get_not_empty(&queue->data_q);
data = z_queue_node_peek(node, true); data = z_queue_node_peek(node, true);
irq_unlock(key); k_spin_unlock(&queue->lock, key);
return data; return data;
} }
if (timeout == K_NO_WAIT) { if (timeout == K_NO_WAIT) {
irq_unlock(key); k_spin_unlock(&queue->lock, key);
return NULL; return NULL;
} }
#if defined(CONFIG_POLL) #if defined(CONFIG_POLL)
irq_unlock(key); k_spin_unlock(&queue->lock, key);
return k_queue_poll(queue, timeout); return k_queue_poll(queue, timeout);
#else #else
int ret = _pend_curr_irqlock(key, &queue->wait_q, timeout); int ret = _pend_curr(&queue->lock, key, &queue->wait_q, timeout);
return (ret != 0) ? NULL : _current->base.swap_data; return (ret != 0) ? NULL : _current->base.swap_data;
#endif /* CONFIG_POLL */ #endif /* CONFIG_POLL */