zephyr/kernel/stack.c
Andrew Boie 945af95f42 kernel: introduce object validation mechanism
All system calls made from userspace which involve pointers to kernel
objects (including device drivers) will need to have those pointers
validated; userspace should never be able to crash the kernel by passing
it garbage.

The actual validation with _k_object_validate() will be in the system
call receiver code, which doesn't exist yet.

- CONFIG_USERSPACE introduced. We are somewhat far away from having an
  end-to-end implementation, but at least need a Kconfig symbol to
  guard the incoming code with. Formal documentation doesn't exist yet
  either, but will appear later down the road once the implementation is
  mostly finalized.

- In the memory region for RAM, the data section has been moved last,
  past bss and noinit. This ensures that inserting generated tables
  with addresses of kernel objects does not change the addresses of
  those objects (which would make the table invalid)

- The DWARF debug information in the generated ELF binary is parsed to
  fetch the locations of all kernel objects and pass this to gperf to
  create a perfect hash table of their memory addresses.

- The generated gperf code doesn't know that we are exclusively working
  with memory addresses and uses memory inefficently. A post-processing
  script process_gperf.py adjusts the generated code before it is
  compiled to work with pointer values directly and not strings
  containing them.

- _k_object_init() calls inserted into the init functions for the set of
  kernel object types we are going to support so far

Issue: ZEP-2187
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-09-07 16:33:33 -07:00

114 lines
2.2 KiB
C

/*
* Copyright (c) 2010-2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief fixed-size stack object
*/
#include <kernel.h>
#include <kernel_structs.h>
#include <debug/object_tracing_common.h>
#include <toolchain.h>
#include <linker/sections.h>
#include <ksched.h>
#include <wait_q.h>
#include <misc/__assert.h>
#include <init.h>
extern struct k_stack _k_stack_list_start[];
extern struct k_stack _k_stack_list_end[];
#ifdef CONFIG_OBJECT_TRACING
struct k_stack *_trace_list_k_stack;
/*
* Complete initialization of statically defined stacks.
*/
static int init_stack_module(struct device *dev)
{
ARG_UNUSED(dev);
struct k_stack *stack;
for (stack = _k_stack_list_start; stack < _k_stack_list_end; stack++) {
SYS_TRACING_OBJ_INIT(k_stack, stack);
}
return 0;
}
SYS_INIT(init_stack_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJECT_TRACING */
void k_stack_init(struct k_stack *stack, u32_t *buffer, int num_entries)
{
sys_dlist_init(&stack->wait_q);
stack->next = stack->base = buffer;
stack->top = stack->base + num_entries;
SYS_TRACING_OBJ_INIT(k_stack, stack);
_k_object_init(stack);
}
void k_stack_push(struct k_stack *stack, u32_t data)
{
struct k_thread *first_pending_thread;
unsigned int key;
__ASSERT(stack->next != stack->top, "stack is full");
key = irq_lock();
first_pending_thread = _unpend_first_thread(&stack->wait_q);
if (first_pending_thread) {
_abort_thread_timeout(first_pending_thread);
_ready_thread(first_pending_thread);
_set_thread_return_value_with_data(first_pending_thread,
0, (void *)data);
if (!_is_in_isr() && _must_switch_threads()) {
(void)_Swap(key);
return;
}
} else {
*(stack->next) = data;
stack->next++;
}
irq_unlock(key);
}
int k_stack_pop(struct k_stack *stack, u32_t *data, s32_t timeout)
{
unsigned int key;
int result;
key = irq_lock();
if (likely(stack->next > stack->base)) {
stack->next--;
*data = *(stack->next);
irq_unlock(key);
return 0;
}
if (timeout == K_NO_WAIT) {
irq_unlock(key);
return -EBUSY;
}
_pend_current_thread(&stack->wait_q, timeout);
result = _Swap(key);
if (result == 0) {
*data = (u32_t)_current->base.swap_data;
}
return result;
}