7832738ae9
Add a k_timeout_t type, and use it everywhere that kernel API functions were accepting a millisecond timeout argument. Instead of forcing milliseconds everywhere (which are often not integrally representable as system ticks), do the conversion to ticks at the point where the timeout is created. This avoids an extra unit conversion in some application code, and allows us to express the timeout in units other than milliseconds to achieve greater precision. The existing K_MSEC() et. al. macros now return initializers for a k_timeout_t. The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t values, which means they cannot be operated on as integers. Applications which have their own APIs that need to inspect these vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to test for equality. Timer drivers, which receive an integer tick count in ther z_clock_set_timeout() functions, now use the integer-valued K_TICKS_FOREVER constant instead of K_FOREVER. For the initial release, to preserve source compatibility, a CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the k_timeout_t will remain a compatible 32 bit value that will work with any legacy Zephyr application. Some subsystems present timeout (or timeout-like) values to their own users as APIs that would re-use the kernel's own constants and conventions. These will require some minor design work to adapt to the new scheme (in most cases just using k_timeout_t directly in their own API), and they have not been changed in this patch, instead selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems include: CAN Bus, the Microbit display driver, I2S, LoRa modem drivers, the UART Async API, Video hardware drivers, the console subsystem, and the network buffer abstraction. k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant provided that works identically to the original API. Most of the changes here are just type/configuration management and documentation, but there are logic changes in mempool, where a loop that used a timeout numerically has been reworked using a new z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was enabled) a similar loop was needlessly used to try to retry the k_poll() call after a spurious failure. But k_poll() does not fail spuriously, so the loop was removed. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
175 lines
3.8 KiB
C
175 lines
3.8 KiB
C
/*
|
|
* Copyright (c) 2010-2016 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
/**
|
|
* @brief fixed-size stack object
|
|
*/
|
|
|
|
#include <kernel.h>
|
|
#include <kernel_structs.h>
|
|
#include <debug/object_tracing_common.h>
|
|
#include <toolchain.h>
|
|
#include <linker/sections.h>
|
|
#include <ksched.h>
|
|
#include <wait_q.h>
|
|
#include <sys/check.h>
|
|
#include <init.h>
|
|
#include <syscall_handler.h>
|
|
#include <kernel_internal.h>
|
|
|
|
#ifdef CONFIG_OBJECT_TRACING
|
|
|
|
struct k_stack *_trace_list_k_stack;
|
|
|
|
/*
|
|
* Complete initialization of statically defined stacks.
|
|
*/
|
|
static int init_stack_module(struct device *dev)
|
|
{
|
|
ARG_UNUSED(dev);
|
|
|
|
Z_STRUCT_SECTION_FOREACH(k_stack, stack) {
|
|
SYS_TRACING_OBJ_INIT(k_stack, stack);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
SYS_INIT(init_stack_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|
|
|
#endif /* CONFIG_OBJECT_TRACING */
|
|
|
|
void k_stack_init(struct k_stack *stack, stack_data_t *buffer,
|
|
u32_t num_entries)
|
|
{
|
|
z_waitq_init(&stack->wait_q);
|
|
stack->lock = (struct k_spinlock) {};
|
|
stack->next = stack->base = buffer;
|
|
stack->top = stack->base + num_entries;
|
|
|
|
SYS_TRACING_OBJ_INIT(k_stack, stack);
|
|
z_object_init(stack);
|
|
}
|
|
|
|
s32_t z_impl_k_stack_alloc_init(struct k_stack *stack, u32_t num_entries)
|
|
{
|
|
void *buffer;
|
|
s32_t ret;
|
|
|
|
buffer = z_thread_malloc(num_entries * sizeof(stack_data_t));
|
|
if (buffer != NULL) {
|
|
k_stack_init(stack, buffer, num_entries);
|
|
stack->flags = K_STACK_FLAG_ALLOC;
|
|
ret = (s32_t)0;
|
|
} else {
|
|
ret = -ENOMEM;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
static inline s32_t z_vrfy_k_stack_alloc_init(struct k_stack *stack,
|
|
u32_t num_entries)
|
|
{
|
|
Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_STACK));
|
|
Z_OOPS(Z_SYSCALL_VERIFY(num_entries > 0));
|
|
return z_impl_k_stack_alloc_init(stack, num_entries);
|
|
}
|
|
#include <syscalls/k_stack_alloc_init_mrsh.c>
|
|
#endif
|
|
|
|
int k_stack_cleanup(struct k_stack *stack)
|
|
{
|
|
CHECKIF(z_waitq_head(&stack->wait_q) != NULL) {
|
|
return -EAGAIN;
|
|
}
|
|
|
|
if ((stack->flags & K_STACK_FLAG_ALLOC) != (u8_t)0) {
|
|
k_free(stack->base);
|
|
stack->base = NULL;
|
|
stack->flags &= ~K_STACK_FLAG_ALLOC;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int z_impl_k_stack_push(struct k_stack *stack, stack_data_t data)
|
|
{
|
|
struct k_thread *first_pending_thread;
|
|
k_spinlock_key_t key;
|
|
|
|
CHECKIF(stack->next == stack->top) {
|
|
return -ENOMEM;
|
|
}
|
|
|
|
key = k_spin_lock(&stack->lock);
|
|
|
|
first_pending_thread = z_unpend_first_thread(&stack->wait_q);
|
|
|
|
if (first_pending_thread != NULL) {
|
|
z_ready_thread(first_pending_thread);
|
|
|
|
z_thread_return_value_set_with_data(first_pending_thread,
|
|
0, (void *)data);
|
|
z_reschedule(&stack->lock, key);
|
|
} else {
|
|
*(stack->next) = data;
|
|
stack->next++;
|
|
k_spin_unlock(&stack->lock, key);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
static inline int z_vrfy_k_stack_push(struct k_stack *stack, stack_data_t data)
|
|
{
|
|
Z_OOPS(Z_SYSCALL_OBJ(stack, K_OBJ_STACK));
|
|
|
|
return z_impl_k_stack_push(stack, data);
|
|
}
|
|
#include <syscalls/k_stack_push_mrsh.c>
|
|
#endif
|
|
|
|
int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data,
|
|
k_timeout_t timeout)
|
|
{
|
|
k_spinlock_key_t key;
|
|
int result;
|
|
|
|
key = k_spin_lock(&stack->lock);
|
|
|
|
if (likely(stack->next > stack->base)) {
|
|
stack->next--;
|
|
*data = *(stack->next);
|
|
k_spin_unlock(&stack->lock, key);
|
|
return 0;
|
|
}
|
|
|
|
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
|
|
k_spin_unlock(&stack->lock, key);
|
|
return -EBUSY;
|
|
}
|
|
|
|
result = z_pend_curr(&stack->lock, key, &stack->wait_q, timeout);
|
|
if (result == -EAGAIN) {
|
|
return -EAGAIN;
|
|
}
|
|
|
|
*data = (stack_data_t)_current->base.swap_data;
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
static inline int z_vrfy_k_stack_pop(struct k_stack *stack,
|
|
stack_data_t *data, k_timeout_t timeout)
|
|
{
|
|
Z_OOPS(Z_SYSCALL_OBJ(stack, K_OBJ_STACK));
|
|
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, sizeof(stack_data_t)));
|
|
return z_impl_k_stack_pop(stack, data, timeout);
|
|
}
|
|
#include <syscalls/k_stack_pop_mrsh.c>
|
|
#endif
|