945af95f42
All system calls made from userspace which involve pointers to kernel objects (including device drivers) will need to have those pointers validated; userspace should never be able to crash the kernel by passing it garbage. The actual validation with _k_object_validate() will be in the system call receiver code, which doesn't exist yet. - CONFIG_USERSPACE introduced. We are somewhat far away from having an end-to-end implementation, but at least need a Kconfig symbol to guard the incoming code with. Formal documentation doesn't exist yet either, but will appear later down the road once the implementation is mostly finalized. - In the memory region for RAM, the data section has been moved last, past bss and noinit. This ensures that inserting generated tables with addresses of kernel objects does not change the addresses of those objects (which would make the table invalid) - The DWARF debug information in the generated ELF binary is parsed to fetch the locations of all kernel objects and pass this to gperf to create a perfect hash table of their memory addresses. - The generated gperf code doesn't know that we are exclusively working with memory addresses and uses memory inefficently. A post-processing script process_gperf.py adjusts the generated code before it is compiled to work with pointer values directly and not strings containing them. - _k_object_init() calls inserted into the init functions for the set of kernel object types we are going to support so far Issue: ZEP-2187 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
184 lines
4.3 KiB
C
184 lines
4.3 KiB
C
/*
|
|
* Copyright (c) 2016 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
* @brief Message queues.
|
|
*/
|
|
|
|
|
|
#include <kernel.h>
|
|
#include <kernel_structs.h>
|
|
#include <debug/object_tracing_common.h>
|
|
#include <toolchain.h>
|
|
#include <linker/sections.h>
|
|
#include <string.h>
|
|
#include <wait_q.h>
|
|
#include <misc/dlist.h>
|
|
#include <init.h>
|
|
|
|
extern struct k_msgq _k_msgq_list_start[];
|
|
extern struct k_msgq _k_msgq_list_end[];
|
|
|
|
#ifdef CONFIG_OBJECT_TRACING
|
|
|
|
struct k_msgq *_trace_list_k_msgq;
|
|
|
|
/*
|
|
* Complete initialization of statically defined message queues.
|
|
*/
|
|
static int init_msgq_module(struct device *dev)
|
|
{
|
|
ARG_UNUSED(dev);
|
|
|
|
struct k_msgq *msgq;
|
|
|
|
for (msgq = _k_msgq_list_start; msgq < _k_msgq_list_end; msgq++) {
|
|
SYS_TRACING_OBJ_INIT(k_msgq, msgq);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
SYS_INIT(init_msgq_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|
|
|
#endif /* CONFIG_OBJECT_TRACING */
|
|
|
|
void k_msgq_init(struct k_msgq *q, char *buffer,
|
|
size_t msg_size, u32_t max_msgs)
|
|
{
|
|
q->msg_size = msg_size;
|
|
q->max_msgs = max_msgs;
|
|
q->buffer_start = buffer;
|
|
q->buffer_end = buffer + (max_msgs * msg_size);
|
|
q->read_ptr = buffer;
|
|
q->write_ptr = buffer;
|
|
q->used_msgs = 0;
|
|
sys_dlist_init(&q->wait_q);
|
|
SYS_TRACING_OBJ_INIT(k_msgq, q);
|
|
|
|
_k_object_init(q);
|
|
}
|
|
|
|
int k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
|
|
{
|
|
__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");
|
|
|
|
unsigned int key = irq_lock();
|
|
struct k_thread *pending_thread;
|
|
int result;
|
|
|
|
if (q->used_msgs < q->max_msgs) {
|
|
/* message queue isn't full */
|
|
pending_thread = _unpend_first_thread(&q->wait_q);
|
|
if (pending_thread) {
|
|
/* give message to waiting thread */
|
|
memcpy(pending_thread->base.swap_data, data,
|
|
q->msg_size);
|
|
/* wake up waiting thread */
|
|
_set_thread_return_value(pending_thread, 0);
|
|
_abort_thread_timeout(pending_thread);
|
|
_ready_thread(pending_thread);
|
|
if (!_is_in_isr() && _must_switch_threads()) {
|
|
_Swap(key);
|
|
return 0;
|
|
}
|
|
} else {
|
|
/* put message in queue */
|
|
memcpy(q->write_ptr, data, q->msg_size);
|
|
q->write_ptr += q->msg_size;
|
|
if (q->write_ptr == q->buffer_end) {
|
|
q->write_ptr = q->buffer_start;
|
|
}
|
|
q->used_msgs++;
|
|
}
|
|
result = 0;
|
|
} else if (timeout == K_NO_WAIT) {
|
|
/* don't wait for message space to become available */
|
|
result = -ENOMSG;
|
|
} else {
|
|
/* wait for put message success, failure, or timeout */
|
|
_pend_current_thread(&q->wait_q, timeout);
|
|
_current->base.swap_data = data;
|
|
return _Swap(key);
|
|
}
|
|
|
|
irq_unlock(key);
|
|
|
|
return result;
|
|
}
|
|
|
|
int k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
|
|
{
|
|
__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");
|
|
|
|
unsigned int key = irq_lock();
|
|
struct k_thread *pending_thread;
|
|
int result;
|
|
|
|
if (q->used_msgs > 0) {
|
|
/* take first available message from queue */
|
|
memcpy(data, q->read_ptr, q->msg_size);
|
|
q->read_ptr += q->msg_size;
|
|
if (q->read_ptr == q->buffer_end) {
|
|
q->read_ptr = q->buffer_start;
|
|
}
|
|
q->used_msgs--;
|
|
|
|
/* handle first thread waiting to write (if any) */
|
|
pending_thread = _unpend_first_thread(&q->wait_q);
|
|
if (pending_thread) {
|
|
/* add thread's message to queue */
|
|
memcpy(q->write_ptr, pending_thread->base.swap_data,
|
|
q->msg_size);
|
|
q->write_ptr += q->msg_size;
|
|
if (q->write_ptr == q->buffer_end) {
|
|
q->write_ptr = q->buffer_start;
|
|
}
|
|
q->used_msgs++;
|
|
|
|
/* wake up waiting thread */
|
|
_set_thread_return_value(pending_thread, 0);
|
|
_abort_thread_timeout(pending_thread);
|
|
_ready_thread(pending_thread);
|
|
if (!_is_in_isr() && _must_switch_threads()) {
|
|
_Swap(key);
|
|
return 0;
|
|
}
|
|
}
|
|
result = 0;
|
|
} else if (timeout == K_NO_WAIT) {
|
|
/* don't wait for a message to become available */
|
|
result = -ENOMSG;
|
|
} else {
|
|
/* wait for get message success or timeout */
|
|
_pend_current_thread(&q->wait_q, timeout);
|
|
_current->base.swap_data = data;
|
|
return _Swap(key);
|
|
}
|
|
|
|
irq_unlock(key);
|
|
|
|
return result;
|
|
}
|
|
|
|
void k_msgq_purge(struct k_msgq *q)
|
|
{
|
|
unsigned int key = irq_lock();
|
|
struct k_thread *pending_thread;
|
|
|
|
/* wake up any threads that are waiting to write */
|
|
while ((pending_thread = _unpend_first_thread(&q->wait_q)) != NULL) {
|
|
_set_thread_return_value(pending_thread, -ENOMSG);
|
|
_abort_thread_timeout(pending_thread);
|
|
_ready_thread(pending_thread);
|
|
}
|
|
|
|
q->used_msgs = 0;
|
|
q->read_ptr = q->write_ptr;
|
|
|
|
_reschedule_threads(key);
|
|
}
|