kernel: msgq: avoid single char variables for msgq
Use msqgq instead of the short and confusing q. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
e001c9aadc
commit
7bde81f467
143
kernel/msg_q.c
143
kernel/msg_q.c
|
@ -44,25 +44,26 @@ SYS_INIT(init_msgq_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|||
|
||||
#endif /* CONFIG_OBJECT_TRACING */
|
||||
|
||||
void k_msgq_init(struct k_msgq *q, char *buffer, size_t msg_size,
|
||||
void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
|
||||
u32_t max_msgs)
|
||||
{
|
||||
q->msg_size = msg_size;
|
||||
q->max_msgs = max_msgs;
|
||||
q->buffer_start = buffer;
|
||||
q->buffer_end = buffer + (max_msgs * msg_size);
|
||||
q->read_ptr = buffer;
|
||||
q->write_ptr = buffer;
|
||||
q->used_msgs = 0;
|
||||
q->flags = 0;
|
||||
z_waitq_init(&q->wait_q);
|
||||
q->lock = (struct k_spinlock) {};
|
||||
SYS_TRACING_OBJ_INIT(k_msgq, q);
|
||||
msgq->msg_size = msg_size;
|
||||
msgq->max_msgs = max_msgs;
|
||||
msgq->buffer_start = buffer;
|
||||
msgq->buffer_end = buffer + (max_msgs * msg_size);
|
||||
msgq->read_ptr = buffer;
|
||||
msgq->write_ptr = buffer;
|
||||
msgq->used_msgs = 0;
|
||||
msgq->flags = 0;
|
||||
z_waitq_init(&msgq->wait_q);
|
||||
msgq->lock = (struct k_spinlock) {};
|
||||
|
||||
z_object_init(q);
|
||||
SYS_TRACING_OBJ_INIT(k_msgq, msgq);
|
||||
|
||||
z_object_init(msgq);
|
||||
}
|
||||
|
||||
int z_impl_k_msgq_alloc_init(struct k_msgq *q, size_t msg_size,
|
||||
int z_impl_k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
|
||||
u32_t max_msgs)
|
||||
{
|
||||
void *buffer;
|
||||
|
@ -74,8 +75,8 @@ int z_impl_k_msgq_alloc_init(struct k_msgq *q, size_t msg_size,
|
|||
} else {
|
||||
buffer = z_thread_malloc(total_size);
|
||||
if (buffer != NULL) {
|
||||
k_msgq_init(q, buffer, msg_size, max_msgs);
|
||||
q->flags = K_MSGQ_FLAG_ALLOC;
|
||||
k_msgq_init(msgq, buffer, msg_size, max_msgs);
|
||||
msgq->flags = K_MSGQ_FLAG_ALLOC;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -ENOMEM;
|
||||
|
@ -94,45 +95,47 @@ Z_SYSCALL_HANDLER(k_msgq_alloc_init, q, msg_size, max_msgs)
|
|||
}
|
||||
#endif
|
||||
|
||||
void k_msgq_cleanup(struct k_msgq *q)
|
||||
void k_msgq_cleanup(struct k_msgq *msgq)
|
||||
{
|
||||
__ASSERT_NO_MSG(z_waitq_head(&q->wait_q) == NULL);
|
||||
__ASSERT_NO_MSG(z_waitq_head(&msgq->wait_q) == NULL);
|
||||
|
||||
if ((q->flags & K_MSGQ_FLAG_ALLOC) != 0) {
|
||||
k_free(q->buffer_start);
|
||||
q->flags &= ~K_MSGQ_FLAG_ALLOC;
|
||||
if ((msgq->flags & K_MSGQ_FLAG_ALLOC) != 0) {
|
||||
k_free(msgq->buffer_start);
|
||||
msgq->flags &= ~K_MSGQ_FLAG_ALLOC;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int z_impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
|
||||
int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout)
|
||||
{
|
||||
__ASSERT(!z_is_in_isr() || timeout == K_NO_WAIT, "");
|
||||
|
||||
k_spinlock_key_t key = k_spin_lock(&q->lock);
|
||||
struct k_thread *pending_thread;
|
||||
k_spinlock_key_t key;
|
||||
int result;
|
||||
|
||||
if (q->used_msgs < q->max_msgs) {
|
||||
key = k_spin_lock(&msgq->lock);
|
||||
|
||||
if (msgq->used_msgs < msgq->max_msgs) {
|
||||
/* message queue isn't full */
|
||||
pending_thread = z_unpend_first_thread(&q->wait_q);
|
||||
pending_thread = z_unpend_first_thread(&msgq->wait_q);
|
||||
if (pending_thread != NULL) {
|
||||
/* give message to waiting thread */
|
||||
(void)memcpy(pending_thread->base.swap_data, data,
|
||||
q->msg_size);
|
||||
msgq->msg_size);
|
||||
/* wake up waiting thread */
|
||||
z_set_thread_return_value(pending_thread, 0);
|
||||
z_ready_thread(pending_thread);
|
||||
z_reschedule(&q->lock, key);
|
||||
z_reschedule(&msgq->lock, key);
|
||||
return 0;
|
||||
} else {
|
||||
/* put message in queue */
|
||||
(void)memcpy(q->write_ptr, data, q->msg_size);
|
||||
q->write_ptr += q->msg_size;
|
||||
if (q->write_ptr == q->buffer_end) {
|
||||
q->write_ptr = q->buffer_start;
|
||||
(void)memcpy(msgq->write_ptr, data, msgq->msg_size);
|
||||
msgq->write_ptr += msgq->msg_size;
|
||||
if (msgq->write_ptr == msgq->buffer_end) {
|
||||
msgq->write_ptr = msgq->buffer_start;
|
||||
}
|
||||
q->used_msgs++;
|
||||
msgq->used_msgs++;
|
||||
}
|
||||
result = 0;
|
||||
} else if (timeout == K_NO_WAIT) {
|
||||
|
@ -141,10 +144,10 @@ int z_impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
|
|||
} else {
|
||||
/* wait for put message success, failure, or timeout */
|
||||
_current->base.swap_data = data;
|
||||
return z_pend_curr(&q->lock, key, &q->wait_q, timeout);
|
||||
return z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
|
||||
}
|
||||
|
||||
k_spin_unlock(&q->lock, key);
|
||||
k_spin_unlock(&msgq->lock, key);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -161,11 +164,11 @@ Z_SYSCALL_HANDLER(k_msgq_put, msgq_p, data, timeout)
|
|||
}
|
||||
#endif
|
||||
|
||||
void z_impl_k_msgq_get_attrs(struct k_msgq *q, struct k_msgq_attrs *attrs)
|
||||
void z_impl_k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs)
|
||||
{
|
||||
attrs->msg_size = q->msg_size;
|
||||
attrs->max_msgs = q->max_msgs;
|
||||
attrs->used_msgs = q->used_msgs;
|
||||
attrs->msg_size = msgq->msg_size;
|
||||
attrs->max_msgs = msgq->max_msgs;
|
||||
attrs->used_msgs = msgq->used_msgs;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
@ -180,39 +183,41 @@ Z_SYSCALL_HANDLER(k_msgq_get_attrs, msgq_p, attrs)
|
|||
}
|
||||
#endif
|
||||
|
||||
int z_impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
|
||||
int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout)
|
||||
{
|
||||
__ASSERT(!z_is_in_isr() || timeout == K_NO_WAIT, "");
|
||||
|
||||
k_spinlock_key_t key = k_spin_lock(&q->lock);
|
||||
k_spinlock_key_t key;
|
||||
struct k_thread *pending_thread;
|
||||
int result;
|
||||
|
||||
if (q->used_msgs > 0) {
|
||||
key = k_spin_lock(&msgq->lock);
|
||||
|
||||
if (msgq->used_msgs > 0) {
|
||||
/* take first available message from queue */
|
||||
(void)memcpy(data, q->read_ptr, q->msg_size);
|
||||
q->read_ptr += q->msg_size;
|
||||
if (q->read_ptr == q->buffer_end) {
|
||||
q->read_ptr = q->buffer_start;
|
||||
(void)memcpy(data, msgq->read_ptr, msgq->msg_size);
|
||||
msgq->read_ptr += msgq->msg_size;
|
||||
if (msgq->read_ptr == msgq->buffer_end) {
|
||||
msgq->read_ptr = msgq->buffer_start;
|
||||
}
|
||||
q->used_msgs--;
|
||||
msgq->used_msgs--;
|
||||
|
||||
/* handle first thread waiting to write (if any) */
|
||||
pending_thread = z_unpend_first_thread(&q->wait_q);
|
||||
pending_thread = z_unpend_first_thread(&msgq->wait_q);
|
||||
if (pending_thread != NULL) {
|
||||
/* add thread's message to queue */
|
||||
(void)memcpy(q->write_ptr, pending_thread->base.swap_data,
|
||||
q->msg_size);
|
||||
q->write_ptr += q->msg_size;
|
||||
if (q->write_ptr == q->buffer_end) {
|
||||
q->write_ptr = q->buffer_start;
|
||||
(void)memcpy(msgq->write_ptr, pending_thread->base.swap_data,
|
||||
msgq->msg_size);
|
||||
msgq->write_ptr += msgq->msg_size;
|
||||
if (msgq->write_ptr == msgq->buffer_end) {
|
||||
msgq->write_ptr = msgq->buffer_start;
|
||||
}
|
||||
q->used_msgs++;
|
||||
msgq->used_msgs++;
|
||||
|
||||
/* wake up waiting thread */
|
||||
z_set_thread_return_value(pending_thread, 0);
|
||||
z_ready_thread(pending_thread);
|
||||
z_reschedule(&q->lock, key);
|
||||
z_reschedule(&msgq->lock, key);
|
||||
return 0;
|
||||
}
|
||||
result = 0;
|
||||
|
@ -222,10 +227,10 @@ int z_impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
|
|||
} else {
|
||||
/* wait for get message success or timeout */
|
||||
_current->base.swap_data = data;
|
||||
return z_pend_curr(&q->lock, key, &q->wait_q, timeout);
|
||||
return z_pend_curr(&msgq->lock, key, &msgq->wait_q, timeout);
|
||||
}
|
||||
|
||||
k_spin_unlock(&q->lock, key);
|
||||
k_spin_unlock(&msgq->lock, key);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -242,21 +247,23 @@ Z_SYSCALL_HANDLER(k_msgq_get, msgq_p, data, timeout)
|
|||
}
|
||||
#endif
|
||||
|
||||
int z_impl_k_msgq_peek(struct k_msgq *q, void *data)
|
||||
int z_impl_k_msgq_peek(struct k_msgq *msgq, void *data)
|
||||
{
|
||||
k_spinlock_key_t key = k_spin_lock(&q->lock);
|
||||
k_spinlock_key_t key;
|
||||
int result;
|
||||
|
||||
if (q->used_msgs > 0) {
|
||||
key = k_spin_lock(&msgq->lock);
|
||||
|
||||
if (msgq->used_msgs > 0) {
|
||||
/* take first available message from queue */
|
||||
(void)memcpy(data, q->read_ptr, q->msg_size);
|
||||
(void)memcpy(data, msgq->read_ptr, msgq->msg_size);
|
||||
result = 0;
|
||||
} else {
|
||||
/* don't wait for a message to become available */
|
||||
result = -ENOMSG;
|
||||
}
|
||||
|
||||
k_spin_unlock(&q->lock, key);
|
||||
k_spin_unlock(&msgq->lock, key);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -273,21 +280,23 @@ Z_SYSCALL_HANDLER(k_msgq_peek, msgq_p, data)
|
|||
}
|
||||
#endif
|
||||
|
||||
void z_impl_k_msgq_purge(struct k_msgq *q)
|
||||
void z_impl_k_msgq_purge(struct k_msgq *msgq)
|
||||
{
|
||||
k_spinlock_key_t key = k_spin_lock(&q->lock);
|
||||
k_spinlock_key_t key;
|
||||
struct k_thread *pending_thread;
|
||||
|
||||
key = k_spin_lock(&msgq->lock);
|
||||
|
||||
/* wake up any threads that are waiting to write */
|
||||
while ((pending_thread = z_unpend_first_thread(&q->wait_q)) != NULL) {
|
||||
while ((pending_thread = z_unpend_first_thread(&msgq->wait_q)) != NULL) {
|
||||
z_set_thread_return_value(pending_thread, -ENOMSG);
|
||||
z_ready_thread(pending_thread);
|
||||
}
|
||||
|
||||
q->used_msgs = 0;
|
||||
q->read_ptr = q->write_ptr;
|
||||
msgq->used_msgs = 0;
|
||||
msgq->read_ptr = msgq->write_ptr;
|
||||
|
||||
z_reschedule(&q->lock, key);
|
||||
z_reschedule(&msgq->lock, key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
|
Loading…
Reference in a new issue