2017-08-22 22:15:23 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2022-05-06 11:04:23 +02:00
|
|
|
#include <zephyr/kernel.h>
|
2017-08-22 22:15:23 +02:00
|
|
|
#include <string.h>
|
2022-05-06 11:04:23 +02:00
|
|
|
#include <zephyr/sys/math_extras.h>
|
|
|
|
#include <zephyr/sys/rb.h>
|
|
|
|
#include <zephyr/kernel_structs.h>
|
|
|
|
#include <zephyr/sys/sys_io.h>
|
2017-08-30 23:17:44 +02:00
|
|
|
#include <ksched.h>
|
2022-05-06 11:04:23 +02:00
|
|
|
#include <zephyr/syscall.h>
|
2023-09-27 00:46:01 +02:00
|
|
|
#include <zephyr/internal/syscall_handler.h>
|
2022-05-06 11:04:23 +02:00
|
|
|
#include <zephyr/device.h>
|
|
|
|
#include <zephyr/init.h>
|
2018-12-16 21:39:44 +01:00
|
|
|
#include <stdbool.h>
|
2022-05-06 11:04:23 +02:00
|
|
|
#include <zephyr/app_memory/app_memdomain.h>
|
|
|
|
#include <zephyr/sys/libc-hooks.h>
|
|
|
|
#include <zephyr/sys/mutex.h>
|
2019-11-05 18:27:18 +01:00
|
|
|
#include <inttypes.h>
|
2022-05-06 11:04:23 +02:00
|
|
|
#include <zephyr/linker/linker-defs.h>
|
2019-02-21 22:44:54 +01:00
|
|
|
|
2019-02-28 05:12:40 +01:00
|
|
|
#ifdef Z_LIBC_PARTITION_EXISTS
|
2019-02-21 22:44:54 +01:00
|
|
|
K_APPMEM_PARTITION_DEFINE(z_libc_partition);
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* Z_LIBC_PARTITION_EXISTS */
|
2018-09-17 13:58:09 +02:00
|
|
|
|
2019-02-27 23:41:45 +01:00
|
|
|
/* TODO: Find a better place to put this. Since we pull the entire
|
2019-05-09 14:43:30 +02:00
|
|
|
* lib..__modules__crypto__mbedtls.a globals into app shared memory
|
|
|
|
* section, we can't put this in zephyr_init.c of the mbedtls module.
|
2019-02-27 23:41:45 +01:00
|
|
|
*/
|
|
|
|
#ifdef CONFIG_MBEDTLS
|
|
|
|
K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_MBEDTLS */
|
2019-02-27 23:41:45 +01:00
|
|
|
|
2022-05-06 11:04:23 +02:00
|
|
|
#include <zephyr/logging/log.h>
|
2020-11-26 19:32:34 +01:00
|
|
|
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
2018-09-17 13:58:09 +02:00
|
|
|
|
2019-02-06 18:10:36 +01:00
|
|
|
/* The originally synchronization strategy made heavy use of recursive
|
|
|
|
* irq_locking, which ports poorly to spinlocks which are
|
|
|
|
* non-recursive. Rather than try to redesign as part of
|
|
|
|
* spinlockification, this uses multiple locks to preserve the
|
|
|
|
* original semantics exactly. The locks are named for the data they
|
|
|
|
* protect where possible, or just for the code that uses them where
|
|
|
|
* not.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_DYNAMIC_OBJECTS
|
2023-07-14 21:24:33 +02:00
|
|
|
static struct k_spinlock lists_lock; /* kobj dlist */
|
2019-02-06 18:10:36 +01:00
|
|
|
static struct k_spinlock objfree_lock; /* k_object_free */
|
2023-06-23 18:34:14 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_GEN_PRIV_STACKS
|
2023-09-20 14:35:58 +02:00
|
|
|
/* On ARM & ARC MPU we may have two different alignment requirement
|
2023-06-23 18:34:14 +02:00
|
|
|
* when dynamically allocating thread stacks, one for the privileged
|
|
|
|
* stack and other for the user stack, so we need to account the
|
|
|
|
* worst alignment scenario and reserve space for that.
|
|
|
|
*/
|
2023-09-20 14:35:58 +02:00
|
|
|
#if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
|
2023-06-23 18:34:14 +02:00
|
|
|
#define STACK_ELEMENT_DATA_SIZE(size) \
|
|
|
|
(sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \
|
2024-03-22 22:11:49 +01:00
|
|
|
Z_THREAD_STACK_OBJ_ALIGN(size) + K_THREAD_STACK_LEN(size))
|
2023-06-23 18:34:14 +02:00
|
|
|
#else
|
|
|
|
#define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \
|
2024-03-22 22:11:49 +01:00
|
|
|
K_THREAD_STACK_LEN(size))
|
2023-09-20 14:35:58 +02:00
|
|
|
#endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
|
2023-06-23 18:34:14 +02:00
|
|
|
#else
|
2024-03-22 22:11:49 +01:00
|
|
|
#define STACK_ELEMENT_DATA_SIZE(size) K_THREAD_STACK_LEN(size)
|
2023-06-23 18:34:14 +02:00
|
|
|
#endif /* CONFIG_GEN_PRIV_STACKS */
|
|
|
|
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_DYNAMIC_OBJECTS */
|
2019-02-06 18:10:36 +01:00
|
|
|
static struct k_spinlock obj_lock; /* kobj struct data */
|
|
|
|
|
2017-10-17 00:29:30 +02:00
|
|
|
#define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
|
|
|
|
|
2018-08-08 20:23:16 +02:00
|
|
|
#ifdef CONFIG_DYNAMIC_OBJECTS
|
2020-05-27 18:26:57 +02:00
|
|
|
extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_DYNAMIC_OBJECTS */
|
2018-08-08 20:23:16 +02:00
|
|
|
|
2023-09-26 23:37:25 +02:00
|
|
|
static void clear_perms_cb(struct k_object *ko, void *ctx_ptr);
|
2018-08-08 20:23:16 +02:00
|
|
|
|
2017-08-22 22:15:23 +02:00
|
|
|
const char *otype_to_str(enum k_objects otype)
|
|
|
|
{
|
2018-09-11 22:14:21 +02:00
|
|
|
const char *ret;
|
2017-08-22 22:15:23 +02:00
|
|
|
/* -fdata-sections doesn't work right except in very very recent
|
|
|
|
* GCC and these literal strings would appear in the binary even if
|
|
|
|
* otype_to_str was omitted by the linker
|
|
|
|
*/
|
2019-10-01 19:28:32 +02:00
|
|
|
#ifdef CONFIG_LOG
|
2017-08-22 22:15:23 +02:00
|
|
|
switch (otype) {
|
2018-04-05 22:59:33 +02:00
|
|
|
/* otype-to-str.h is generated automatically during build by
|
|
|
|
* gen_kobject_list.py
|
|
|
|
*/
|
2020-05-30 02:49:02 +02:00
|
|
|
case K_OBJ_ANY:
|
|
|
|
ret = "generic";
|
|
|
|
break;
|
2018-04-05 22:59:33 +02:00
|
|
|
#include <otype-to-str.h>
|
2017-08-22 22:15:23 +02:00
|
|
|
default:
|
2018-09-11 22:14:21 +02:00
|
|
|
ret = "?";
|
|
|
|
break;
|
2017-08-22 22:15:23 +02:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
ARG_UNUSED(otype);
|
2021-05-24 10:30:32 +02:00
|
|
|
ret = NULL;
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_LOG */
|
2018-09-11 22:14:21 +02:00
|
|
|
return ret;
|
2017-08-22 22:15:23 +02:00
|
|
|
}
|
|
|
|
|
2017-10-05 20:11:02 +02:00
|
|
|
struct perm_ctx {
|
|
|
|
int parent_id;
|
|
|
|
int child_id;
|
|
|
|
struct k_thread *parent;
|
|
|
|
};
|
|
|
|
|
2020-03-11 18:56:19 +01:00
|
|
|
#ifdef CONFIG_GEN_PRIV_STACKS
|
2022-07-11 16:53:29 +02:00
|
|
|
/* See write_gperf_table() in scripts/build/gen_kobject_list.py. The privilege
|
2020-03-11 18:56:19 +01:00
|
|
|
* mode stacks are allocated as an array. The base of the array is
|
|
|
|
* aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
|
2020-03-11 18:56:19 +01:00
|
|
|
{
|
2023-09-27 12:49:28 +02:00
|
|
|
struct k_object *obj = k_object_find(stack);
|
2020-03-11 18:56:19 +01:00
|
|
|
|
|
|
|
__ASSERT(obj != NULL, "stack object not found");
|
|
|
|
__ASSERT(obj->type == K_OBJ_THREAD_STACK_ELEMENT,
|
|
|
|
"bad stack object");
|
|
|
|
|
|
|
|
return obj->data.stack_data->priv;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_GEN_PRIV_STACKS */
|
|
|
|
|
2017-11-09 01:38:03 +01:00
|
|
|
#ifdef CONFIG_DYNAMIC_OBJECTS
|
2020-12-15 22:50:48 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that dyn_obj->data is where the kernel object resides
|
|
|
|
* so it is the one that actually needs to be aligned.
|
|
|
|
* Due to the need to get the the fields inside struct dyn_obj
|
|
|
|
* from kernel object pointers (i.e. from data[]), the offset
|
|
|
|
* from data[] needs to be fixed at build time. Therefore,
|
|
|
|
* data[] is declared with __aligned(), such that when dyn_obj
|
|
|
|
* is allocated with alignment, data[] is also aligned.
|
|
|
|
* Due to this requirement, data[] needs to be aligned with
|
|
|
|
* the maximum alignment needed for all kernel objects
|
|
|
|
* (hence the following DYN_OBJ_DATA_ALIGN).
|
|
|
|
*/
|
2021-12-16 18:47:32 +01:00
|
|
|
#ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
|
|
|
|
#define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT)
|
2020-12-15 22:50:48 +01:00
|
|
|
#else
|
|
|
|
#define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
|
2020-12-15 22:50:48 +01:00
|
|
|
|
2023-06-23 18:34:14 +02:00
|
|
|
#ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
|
|
|
|
#ifndef CONFIG_MPU_STACK_GUARD
|
|
|
|
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
|
|
|
|
Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE)
|
|
|
|
#else
|
|
|
|
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
|
|
|
|
Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
|
|
|
|
#endif /* !CONFIG_MPU_STACK_GUARD */
|
|
|
|
#else
|
|
|
|
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
|
|
|
|
Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN)
|
|
|
|
#endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */
|
|
|
|
|
2020-12-15 22:50:48 +01:00
|
|
|
#define DYN_OBJ_DATA_ALIGN \
|
|
|
|
MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
|
|
|
|
|
2023-06-23 18:34:14 +02:00
|
|
|
struct dyn_obj {
|
2023-09-26 23:37:25 +02:00
|
|
|
struct k_object kobj;
|
2023-07-18 22:11:07 +02:00
|
|
|
sys_dnode_t dobj_list;
|
2023-06-23 18:34:14 +02:00
|
|
|
|
|
|
|
/* The object itself */
|
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
2023-09-26 23:37:25 +02:00
|
|
|
extern struct k_object *z_object_gperf_find(const void *obj);
|
2019-03-08 22:19:05 +01:00
|
|
|
extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
|
2017-11-09 01:38:03 +01:00
|
|
|
void *context);
|
|
|
|
|
2018-04-25 02:01:37 +02:00
|
|
|
/*
|
|
|
|
* Linked list of allocated kernel objects, for iteration over all allocated
|
|
|
|
* objects (and potentially deleting them during iteration).
|
|
|
|
*/
|
|
|
|
static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
|
|
|
|
|
|
|
|
/*
|
2023-07-14 21:24:33 +02:00
|
|
|
* TODO: Write some hash table code that will replace obj_list.
|
2018-04-25 02:01:37 +02:00
|
|
|
*/
|
|
|
|
|
2017-11-09 01:38:03 +01:00
|
|
|
static size_t obj_size_get(enum k_objects otype)
|
|
|
|
{
|
2018-09-11 22:14:21 +02:00
|
|
|
size_t ret;
|
|
|
|
|
2017-11-09 01:38:03 +01:00
|
|
|
switch (otype) {
|
2018-05-16 19:11:17 +02:00
|
|
|
#include <otype-to-size.h>
|
2017-11-09 01:38:03 +01:00
|
|
|
default:
|
2020-04-30 20:33:38 +02:00
|
|
|
ret = sizeof(const struct device);
|
2018-09-11 22:14:21 +02:00
|
|
|
break;
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
2018-09-11 22:14:21 +02:00
|
|
|
|
|
|
|
return ret;
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
|
|
|
|
2020-12-15 22:50:48 +01:00
|
|
|
static size_t obj_align_get(enum k_objects otype)
|
|
|
|
{
|
|
|
|
size_t ret;
|
|
|
|
|
|
|
|
switch (otype) {
|
|
|
|
case K_OBJ_THREAD:
|
2021-12-16 18:47:32 +01:00
|
|
|
#ifdef ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT
|
|
|
|
ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT;
|
2020-12-15 22:50:48 +01:00
|
|
|
#else
|
2021-12-13 23:54:51 +01:00
|
|
|
ret = __alignof(struct dyn_obj);
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
|
2020-12-15 22:50:48 +01:00
|
|
|
break;
|
|
|
|
default:
|
2021-12-13 23:54:51 +01:00
|
|
|
ret = __alignof(struct dyn_obj);
|
2020-12-15 22:50:48 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-07-18 22:11:07 +02:00
|
|
|
static struct dyn_obj *dyn_object_find(void *obj)
|
2017-11-09 01:38:03 +01:00
|
|
|
{
|
2023-07-18 22:11:07 +02:00
|
|
|
struct dyn_obj *node;
|
2023-06-23 18:34:14 +02:00
|
|
|
k_spinlock_key_t key;
|
2017-11-09 01:38:03 +01:00
|
|
|
|
|
|
|
/* For any dynamically allocated kernel object, the object
|
2021-08-23 17:32:58 +02:00
|
|
|
* pointer is just a member of the containing struct dyn_obj,
|
2017-11-09 01:38:03 +01:00
|
|
|
* so just a little arithmetic is necessary to locate the
|
|
|
|
* corresponding struct rbnode
|
|
|
|
*/
|
2023-06-23 18:34:14 +02:00
|
|
|
key = k_spin_lock(&lists_lock);
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2023-07-14 21:24:33 +02:00
|
|
|
SYS_DLIST_FOR_EACH_CONTAINER(&obj_list, node, dobj_list) {
|
|
|
|
if (node->kobj.name == obj) {
|
2023-06-23 18:34:14 +02:00
|
|
|
goto end;
|
|
|
|
}
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
2023-06-23 18:34:14 +02:00
|
|
|
|
|
|
|
/* No object found */
|
2023-07-14 21:24:33 +02:00
|
|
|
node = NULL;
|
2023-06-23 18:34:14 +02:00
|
|
|
|
|
|
|
end:
|
2019-02-06 18:10:36 +01:00
|
|
|
k_spin_unlock(&lists_lock, key);
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2023-07-14 21:24:33 +02:00
|
|
|
return node;
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
|
|
|
|
2018-08-08 20:23:16 +02:00
|
|
|
/**
|
|
|
|
* @internal
|
|
|
|
*
|
|
|
|
* @brief Allocate a new thread index for a new thread.
|
|
|
|
*
|
|
|
|
* This finds an unused thread index that can be assigned to a new
|
|
|
|
* thread. If too many threads have been allocated, the kernel will
|
|
|
|
* run out of indexes and this function will fail.
|
|
|
|
*
|
|
|
|
* Note that if an unused index is found, that index will be marked as
|
|
|
|
* used after return of this function.
|
|
|
|
*
|
|
|
|
* @param tidx The new thread index if successful
|
|
|
|
*
|
2018-12-16 21:39:44 +01:00
|
|
|
* @return true if successful, false if failed
|
2018-08-08 20:23:16 +02:00
|
|
|
**/
|
2019-11-18 19:20:16 +01:00
|
|
|
static bool thread_idx_alloc(uintptr_t *tidx)
|
2018-08-08 20:23:16 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int idx;
|
|
|
|
int base;
|
|
|
|
|
|
|
|
base = 0;
|
|
|
|
for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
|
|
|
|
idx = find_lsb_set(_thread_idx_map[i]);
|
|
|
|
|
2018-12-16 21:48:29 +01:00
|
|
|
if (idx != 0) {
|
2018-08-08 20:23:16 +02:00
|
|
|
*tidx = base + (idx - 1);
|
|
|
|
|
|
|
|
sys_bitfield_clear_bit((mem_addr_t)_thread_idx_map,
|
|
|
|
*tidx);
|
|
|
|
|
|
|
|
/* Clear permission from all objects */
|
2023-09-27 12:48:38 +02:00
|
|
|
k_object_wordlist_foreach(clear_perms_cb,
|
2018-08-08 20:23:16 +02:00
|
|
|
(void *)*tidx);
|
|
|
|
|
2018-12-16 21:39:44 +01:00
|
|
|
return true;
|
2018-08-08 20:23:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
base += 8;
|
|
|
|
}
|
|
|
|
|
2018-12-16 21:39:44 +01:00
|
|
|
return false;
|
2018-08-08 20:23:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @internal
|
|
|
|
*
|
|
|
|
* @brief Free a thread index.
|
|
|
|
*
|
|
|
|
* This frees a thread index so it can be used by another
|
|
|
|
* thread.
|
|
|
|
*
|
|
|
|
* @param tidx The thread index to be freed
|
|
|
|
**/
|
2019-11-18 19:20:16 +01:00
|
|
|
static void thread_idx_free(uintptr_t tidx)
|
2018-08-08 20:23:16 +02:00
|
|
|
{
|
|
|
|
/* To prevent leaked permission when index is recycled */
|
2023-09-27 12:48:38 +02:00
|
|
|
k_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
|
2018-08-08 20:23:16 +02:00
|
|
|
|
|
|
|
sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
|
|
|
|
}
|
|
|
|
|
2023-09-26 23:37:25 +02:00
|
|
|
static struct k_object *dynamic_object_create(enum k_objects otype, size_t align,
|
2023-06-23 18:34:14 +02:00
|
|
|
size_t size)
|
2017-11-09 01:38:03 +01:00
|
|
|
{
|
2023-07-18 22:11:07 +02:00
|
|
|
struct dyn_obj *dyn;
|
|
|
|
|
|
|
|
dyn = z_thread_aligned_alloc(align, sizeof(struct dyn_obj));
|
|
|
|
if (dyn == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2023-06-23 18:34:14 +02:00
|
|
|
if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
|
|
|
|
size_t adjusted_size;
|
|
|
|
|
|
|
|
if (size == 0) {
|
2023-07-18 22:11:07 +02:00
|
|
|
k_free(dyn);
|
2023-06-23 18:34:14 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
adjusted_size = STACK_ELEMENT_DATA_SIZE(size);
|
2023-07-18 22:11:07 +02:00
|
|
|
dyn->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK,
|
2023-06-23 18:34:14 +02:00
|
|
|
adjusted_size);
|
2023-07-18 22:11:07 +02:00
|
|
|
if (dyn->data == NULL) {
|
|
|
|
k_free(dyn);
|
2023-06-23 18:34:14 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_GEN_PRIV_STACKS
|
|
|
|
struct z_stack_data *stack_data = (struct z_stack_data *)
|
2023-07-18 22:11:07 +02:00
|
|
|
((uint8_t *)dyn->data + adjusted_size - sizeof(*stack_data));
|
|
|
|
stack_data->priv = (uint8_t *)dyn->data;
|
2024-02-16 05:31:50 +01:00
|
|
|
stack_data->size = adjusted_size;
|
2023-06-23 18:34:14 +02:00
|
|
|
dyn->kobj.data.stack_data = stack_data;
|
2023-09-20 14:35:58 +02:00
|
|
|
#if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU)
|
2023-06-23 18:34:14 +02:00
|
|
|
dyn->kobj.name = (void *)ROUND_UP(
|
2023-07-18 22:11:07 +02:00
|
|
|
((uint8_t *)dyn->data + CONFIG_PRIVILEGED_STACK_SIZE),
|
2023-06-23 18:34:14 +02:00
|
|
|
Z_THREAD_STACK_OBJ_ALIGN(size));
|
|
|
|
#else
|
2023-07-18 22:11:07 +02:00
|
|
|
dyn->kobj.name = dyn->data;
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
|
2023-06-23 18:34:14 +02:00
|
|
|
#else
|
2023-07-18 22:11:07 +02:00
|
|
|
dyn->kobj.name = dyn->data;
|
2024-02-16 05:31:50 +01:00
|
|
|
dyn->kobj.data.stack_size = adjusted_size;
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_GEN_PRIV_STACKS */
|
2023-06-23 18:34:14 +02:00
|
|
|
} else {
|
2023-07-18 22:11:07 +02:00
|
|
|
dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size);
|
|
|
|
if (dyn->data == NULL) {
|
|
|
|
k_free(dyn->data);
|
2023-06-23 18:34:14 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2023-07-18 22:11:07 +02:00
|
|
|
dyn->kobj.name = dyn->data;
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
|
|
|
|
2023-06-23 18:34:14 +02:00
|
|
|
dyn->kobj.type = otype;
|
2020-08-25 12:11:16 +02:00
|
|
|
dyn->kobj.flags = 0;
|
|
|
|
(void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2020-05-30 02:49:02 +02:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lists_lock);
|
|
|
|
|
2021-04-27 20:49:30 +02:00
|
|
|
sys_dlist_append(&obj_list, &dyn->dobj_list);
|
2020-05-30 02:49:02 +02:00
|
|
|
k_spin_unlock(&lists_lock, key);
|
|
|
|
|
2020-08-25 12:11:16 +02:00
|
|
|
return &dyn->kobj;
|
2020-05-30 02:49:02 +02:00
|
|
|
}
|
|
|
|
|
2023-09-26 23:37:25 +02:00
|
|
|
struct k_object *k_object_create_dynamic_aligned(size_t align, size_t size)
|
2023-06-23 18:34:14 +02:00
|
|
|
{
|
2023-09-26 23:37:25 +02:00
|
|
|
struct k_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);
|
2023-06-23 18:34:14 +02:00
|
|
|
|
|
|
|
if (obj == NULL) {
|
|
|
|
LOG_ERR("could not allocate kernel object, out of memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *z_object_alloc(enum k_objects otype, size_t size)
|
2020-05-30 02:49:02 +02:00
|
|
|
{
|
2023-09-26 23:37:25 +02:00
|
|
|
struct k_object *zo;
|
2020-06-11 09:53:01 +02:00
|
|
|
uintptr_t tidx = 0;
|
2020-05-30 02:49:02 +02:00
|
|
|
|
|
|
|
if (otype <= K_OBJ_ANY || otype >= K_OBJ_LAST) {
|
|
|
|
LOG_ERR("bad object type %d requested", otype);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (otype) {
|
|
|
|
case K_OBJ_THREAD:
|
2019-03-08 22:19:05 +01:00
|
|
|
if (!thread_idx_alloc(&tidx)) {
|
2020-05-30 02:49:02 +02:00
|
|
|
LOG_ERR("out of free thread indexes");
|
2018-08-08 20:23:16 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-05-30 02:49:02 +02:00
|
|
|
break;
|
|
|
|
/* The following are currently not allowed at all */
|
|
|
|
case K_OBJ_FUTEX: /* Lives in user memory */
|
|
|
|
case K_OBJ_SYS_MUTEX: /* Lives in user memory */
|
|
|
|
case K_OBJ_NET_SOCKET: /* Indeterminate size */
|
|
|
|
LOG_ERR("forbidden object type '%s' requested",
|
|
|
|
otype_to_str(otype));
|
|
|
|
return NULL;
|
|
|
|
default:
|
|
|
|
/* Remainder within bounds are permitted */
|
|
|
|
break;
|
|
|
|
}
|
2018-08-08 20:23:16 +02:00
|
|
|
|
2023-06-23 18:34:14 +02:00
|
|
|
zo = dynamic_object_create(otype, obj_align_get(otype), size);
|
2020-05-30 02:49:02 +02:00
|
|
|
if (zo == NULL) {
|
2022-03-13 23:28:59 +01:00
|
|
|
if (otype == K_OBJ_THREAD) {
|
|
|
|
thread_idx_free(tidx);
|
|
|
|
}
|
2020-05-30 02:49:02 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (otype == K_OBJ_THREAD) {
|
|
|
|
zo->data.thread_id = tidx;
|
2018-08-08 20:23:16 +02:00
|
|
|
}
|
|
|
|
|
2017-11-09 01:38:03 +01:00
|
|
|
/* The allocating thread implicitly gets permission on kernel objects
|
|
|
|
* that it allocates
|
|
|
|
*/
|
2023-09-27 12:47:01 +02:00
|
|
|
k_thread_perms_set(zo, _current);
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2020-05-30 02:49:02 +02:00
|
|
|
/* Activates reference counting logic for automatic disposal when
|
|
|
|
* all permissions have been revoked
|
|
|
|
*/
|
|
|
|
zo->flags |= K_OBJ_FLAG_ALLOC;
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2020-05-30 02:49:02 +02:00
|
|
|
return zo->name;
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
|
|
|
|
2023-06-22 08:27:28 +02:00
|
|
|
void *z_impl_k_object_alloc(enum k_objects otype)
|
|
|
|
{
|
2023-06-23 18:34:14 +02:00
|
|
|
return z_object_alloc(otype, 0);
|
2023-06-22 08:27:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
|
|
|
|
{
|
|
|
|
return z_object_alloc(otype, size);
|
|
|
|
}
|
|
|
|
|
2017-11-09 01:38:03 +01:00
|
|
|
void k_object_free(void *obj)
|
|
|
|
{
|
2023-07-18 22:11:07 +02:00
|
|
|
struct dyn_obj *dyn;
|
2017-11-09 01:38:03 +01:00
|
|
|
|
|
|
|
/* This function is intentionally not exposed to user mode.
|
|
|
|
* There's currently no robust way to track that an object isn't
|
|
|
|
* being used by some other thread
|
|
|
|
*/
|
|
|
|
|
2019-02-06 18:10:36 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&objfree_lock);
|
|
|
|
|
2020-08-25 12:11:16 +02:00
|
|
|
dyn = dyn_object_find(obj);
|
|
|
|
if (dyn != NULL) {
|
2021-04-27 20:49:30 +02:00
|
|
|
sys_dlist_remove(&dyn->dobj_list);
|
2018-08-08 20:23:16 +02:00
|
|
|
|
2020-08-25 12:11:16 +02:00
|
|
|
if (dyn->kobj.type == K_OBJ_THREAD) {
|
|
|
|
thread_idx_free(dyn->kobj.data.thread_id);
|
2018-08-08 20:23:16 +02:00
|
|
|
}
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
2019-02-06 18:10:36 +01:00
|
|
|
k_spin_unlock(&objfree_lock, key);
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2020-08-25 12:11:16 +02:00
|
|
|
if (dyn != NULL) {
|
2023-07-18 23:00:07 +02:00
|
|
|
k_free(dyn->data);
|
2020-08-25 12:11:16 +02:00
|
|
|
k_free(dyn);
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:49:28 +02:00
|
|
|
struct k_object *k_object_find(const void *obj)
|
2017-11-09 01:38:03 +01:00
|
|
|
{
|
2023-09-26 23:37:25 +02:00
|
|
|
struct k_object *ret;
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2019-03-08 22:19:05 +01:00
|
|
|
ret = z_object_gperf_find(obj);
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2018-09-17 18:39:51 +02:00
|
|
|
if (ret == NULL) {
|
2023-07-18 22:11:07 +02:00
|
|
|
struct dyn_obj *dyn;
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2020-05-14 12:06:08 +02:00
|
|
|
/* The cast to pointer-to-non-const violates MISRA
|
|
|
|
* 11.8 but is justified since we know dynamic objects
|
|
|
|
* were not declared with a const qualifier.
|
|
|
|
*/
|
2023-06-23 18:34:14 +02:00
|
|
|
dyn = dyn_object_find((void *)obj);
|
|
|
|
if (dyn != NULL) {
|
|
|
|
ret = &dyn->kobj;
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:48:38 +02:00
|
|
|
void k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
2017-11-09 01:38:03 +01:00
|
|
|
{
|
2023-07-18 22:11:07 +02:00
|
|
|
struct dyn_obj *obj, *next;
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2019-03-08 22:19:05 +01:00
|
|
|
z_object_gperf_wordlist_foreach(func, context);
|
2017-11-09 01:38:03 +01:00
|
|
|
|
2019-02-06 18:10:36 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&lists_lock);
|
|
|
|
|
2021-04-27 20:49:30 +02:00
|
|
|
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, dobj_list) {
|
2018-04-25 02:01:37 +02:00
|
|
|
func(&obj->kobj, context);
|
|
|
|
}
|
2019-02-06 18:10:36 +01:00
|
|
|
k_spin_unlock(&lists_lock, key);
|
2017-11-09 01:38:03 +01:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_DYNAMIC_OBJECTS */
|
|
|
|
|
2020-03-11 14:37:42 +01:00
|
|
|
static unsigned int thread_index_get(struct k_thread *thread)
|
2017-11-03 17:00:35 +01:00
|
|
|
{
|
2023-09-26 23:37:25 +02:00
|
|
|
struct k_object *ko;
|
2017-11-03 17:00:35 +01:00
|
|
|
|
2023-09-27 12:49:28 +02:00
|
|
|
ko = k_object_find(thread);
|
2017-11-03 17:00:35 +01:00
|
|
|
|
2018-09-17 18:39:51 +02:00
|
|
|
if (ko == NULL) {
|
2017-11-03 17:00:35 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-11 14:37:42 +01:00
|
|
|
return ko->data.thread_id;
|
2017-11-03 17:00:35 +01:00
|
|
|
}
|
|
|
|
|
2023-09-26 23:37:25 +02:00
|
|
|
static void unref_check(struct k_object *ko, uintptr_t index)
|
2018-04-13 23:44:00 +02:00
|
|
|
{
|
2019-02-06 18:10:36 +01:00
|
|
|
k_spinlock_key_t key = k_spin_lock(&obj_lock);
|
2019-01-31 21:09:06 +01:00
|
|
|
|
|
|
|
sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
|
|
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_OBJECTS
|
2021-12-13 23:54:51 +01:00
|
|
|
if ((ko->flags & K_OBJ_FLAG_ALLOC) == 0U) {
|
|
|
|
/* skip unref check for static kernel object */
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-12-04 19:57:03 +01:00
|
|
|
void *vko = ko;
|
|
|
|
|
2023-07-18 22:11:07 +02:00
|
|
|
struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
|
2019-01-31 21:09:06 +01:00
|
|
|
|
2021-12-13 23:54:51 +01:00
|
|
|
__ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");
|
2019-01-31 21:09:06 +01:00
|
|
|
|
2018-04-13 23:44:00 +02:00
|
|
|
for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
|
2019-03-27 02:57:45 +01:00
|
|
|
if (ko->perms[i] != 0U) {
|
2019-01-31 21:09:06 +01:00
|
|
|
goto out;
|
2018-04-13 23:44:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This object has no more references. Some objects may have
|
|
|
|
* dynamically allocated resources, require cleanup, or need to be
|
2024-02-19 13:16:58 +01:00
|
|
|
* marked as uninitialized when all references are gone. What
|
2018-04-13 23:44:00 +02:00
|
|
|
* specifically needs to happen depends on the object type.
|
|
|
|
*/
|
|
|
|
switch (ko->type) {
|
2022-07-08 17:27:09 +02:00
|
|
|
#ifdef CONFIG_PIPES
|
2018-04-13 02:38:12 +02:00
|
|
|
case K_OBJ_PIPE:
|
|
|
|
k_pipe_cleanup((struct k_pipe *)ko->name);
|
|
|
|
break;
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_PIPES */
|
2018-04-13 03:35:56 +02:00
|
|
|
case K_OBJ_MSGQ:
|
|
|
|
k_msgq_cleanup((struct k_msgq *)ko->name);
|
|
|
|
break;
|
2018-05-03 02:44:39 +02:00
|
|
|
case K_OBJ_STACK:
|
|
|
|
k_stack_cleanup((struct k_stack *)ko->name);
|
|
|
|
break;
|
2018-04-13 23:44:00 +02:00
|
|
|
default:
|
2018-09-11 22:14:21 +02:00
|
|
|
/* Nothing to do */
|
2018-04-13 23:44:00 +02:00
|
|
|
break;
|
|
|
|
}
|
2018-04-25 02:01:37 +02:00
|
|
|
|
2021-04-27 20:49:30 +02:00
|
|
|
sys_dlist_remove(&dyn->dobj_list);
|
2023-07-18 23:00:07 +02:00
|
|
|
k_free(dyn->data);
|
2020-08-25 12:11:16 +02:00
|
|
|
k_free(dyn);
|
2019-01-31 21:09:06 +01:00
|
|
|
out:
|
2024-03-08 12:00:10 +01:00
|
|
|
#endif /* CONFIG_DYNAMIC_OBJECTS */
|
2019-02-06 18:10:36 +01:00
|
|
|
k_spin_unlock(&obj_lock, key);
|
2018-04-13 23:44:00 +02:00
|
|
|
}
|
|
|
|
|
2023-09-26 23:37:25 +02:00
|
|
|
static void wordlist_cb(struct k_object *ko, void *ctx_ptr)
|
2017-10-05 20:11:02 +02:00
|
|
|
{
|
|
|
|
struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
|
|
|
|
|
|
|
|
if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
|
|
|
|
(struct k_thread *)ko->name != ctx->parent) {
|
|
|
|
sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:47:50 +02:00
|
|
|
void k_thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
|
2017-10-05 20:11:02 +02:00
|
|
|
{
|
|
|
|
struct perm_ctx ctx = {
|
2017-11-03 17:00:35 +01:00
|
|
|
thread_index_get(parent),
|
|
|
|
thread_index_get(child),
|
2017-10-05 20:11:02 +02:00
|
|
|
parent
|
|
|
|
};
|
|
|
|
|
2017-11-03 17:00:35 +01:00
|
|
|
if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
|
2023-09-27 12:48:38 +02:00
|
|
|
k_object_wordlist_foreach(wordlist_cb, &ctx);
|
2017-10-05 20:11:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:47:01 +02:00
|
|
|
void k_thread_perms_set(struct k_object *ko, struct k_thread *thread)
|
2017-08-22 22:15:23 +02:00
|
|
|
{
|
2017-11-03 17:00:35 +01:00
|
|
|
int index = thread_index_get(thread);
|
|
|
|
|
|
|
|
if (index != -1) {
|
|
|
|
sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
|
2017-08-30 23:31:03 +02:00
|
|
|
}
|
2017-08-22 22:15:23 +02:00
|
|
|
}
|
|
|
|
|
2023-09-27 12:46:26 +02:00
|
|
|
void k_thread_perms_clear(struct k_object *ko, struct k_thread *thread)
|
2017-10-09 23:47:55 +02:00
|
|
|
{
|
2017-11-03 17:00:35 +01:00
|
|
|
int index = thread_index_get(thread);
|
|
|
|
|
|
|
|
if (index != -1) {
|
2019-02-06 18:10:36 +01:00
|
|
|
sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
|
2019-01-31 21:09:06 +01:00
|
|
|
unref_check(ko, index);
|
2017-10-09 23:47:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-26 23:37:25 +02:00
|
|
|
static void clear_perms_cb(struct k_object *ko, void *ctx_ptr)
|
2017-10-13 22:57:07 +02:00
|
|
|
{
|
2019-11-18 19:20:16 +01:00
|
|
|
uintptr_t id = (uintptr_t)ctx_ptr;
|
2017-10-13 22:57:07 +02:00
|
|
|
|
2019-01-31 21:09:06 +01:00
|
|
|
unref_check(ko, id);
|
2017-10-13 22:57:07 +02:00
|
|
|
}
|
|
|
|
|
2023-09-27 12:45:48 +02:00
|
|
|
void k_thread_perms_all_clear(struct k_thread *thread)
|
2017-10-13 22:57:07 +02:00
|
|
|
{
|
2019-11-18 19:20:16 +01:00
|
|
|
uintptr_t index = thread_index_get(thread);
|
2017-11-03 17:00:35 +01:00
|
|
|
|
2020-10-12 12:10:45 +02:00
|
|
|
if ((int)index != -1) {
|
2023-09-27 12:48:38 +02:00
|
|
|
k_object_wordlist_foreach(clear_perms_cb, (void *)index);
|
2017-10-13 22:57:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-26 23:37:25 +02:00
|
|
|
static int thread_perms_test(struct k_object *ko)
|
2017-08-22 22:15:23 +02:00
|
|
|
{
|
2017-11-03 17:00:35 +01:00
|
|
|
int index;
|
|
|
|
|
2019-03-27 02:57:45 +01:00
|
|
|
if ((ko->flags & K_OBJ_FLAG_PUBLIC) != 0U) {
|
2017-10-13 22:57:07 +02:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-11-03 17:00:35 +01:00
|
|
|
index = thread_index_get(_current);
|
|
|
|
if (index != -1) {
|
|
|
|
return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
|
2017-08-30 23:31:03 +02:00
|
|
|
}
|
|
|
|
return 0;
|
2017-08-22 22:15:23 +02:00
|
|
|
}
|
|
|
|
|
2023-09-26 23:37:25 +02:00
|
|
|
static void dump_permission_error(struct k_object *ko)
|
2017-10-10 18:31:32 +02:00
|
|
|
{
|
2017-11-03 17:00:35 +01:00
|
|
|
int index = thread_index_get(_current);
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("thread %p (%d) does not have permission on %s %p",
|
|
|
|
_current, index,
|
|
|
|
otype_to_str(ko->type), ko->name);
|
|
|
|
LOG_HEXDUMP_ERR(ko->perms, sizeof(ko->perms), "permission bitmap");
|
2017-10-10 18:31:32 +02:00
|
|
|
}
|
2017-08-22 22:15:23 +02:00
|
|
|
|
2023-09-27 12:51:23 +02:00
|
|
|
void k_object_dump_error(int retval, const void *obj, struct k_object *ko,
|
2017-10-10 18:31:32 +02:00
|
|
|
enum k_objects otype)
|
|
|
|
{
|
|
|
|
switch (retval) {
|
|
|
|
case -EBADF:
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("%p is not a valid %s", obj, otype_to_str(otype));
|
2020-05-30 02:49:02 +02:00
|
|
|
if (ko == NULL) {
|
|
|
|
LOG_ERR("address is not a known kernel object");
|
|
|
|
} else {
|
|
|
|
LOG_ERR("address is actually a %s",
|
|
|
|
otype_to_str(ko->type));
|
|
|
|
}
|
2017-10-10 18:31:32 +02:00
|
|
|
break;
|
|
|
|
case -EPERM:
|
|
|
|
dump_permission_error(ko);
|
|
|
|
break;
|
|
|
|
case -EINVAL:
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("%p used before initialization", obj);
|
2017-10-10 18:31:32 +02:00
|
|
|
break;
|
2017-10-15 23:22:08 +02:00
|
|
|
case -EADDRINUSE:
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("%p %s in use", obj, otype_to_str(otype));
|
2018-09-11 07:54:55 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Not handled error */
|
|
|
|
break;
|
2017-08-22 22:15:23 +02:00
|
|
|
}
|
2017-10-04 21:10:32 +02:00
|
|
|
}
|
|
|
|
|
2020-05-14 12:06:08 +02:00
|
|
|
void z_impl_k_object_access_grant(const void *object, struct k_thread *thread)
|
2017-10-04 21:10:32 +02:00
|
|
|
{
|
2023-09-27 12:49:28 +02:00
|
|
|
struct k_object *ko = k_object_find(object);
|
2017-10-04 21:10:32 +02:00
|
|
|
|
2018-09-17 18:39:51 +02:00
|
|
|
if (ko != NULL) {
|
2023-09-27 12:47:01 +02:00
|
|
|
k_thread_perms_set(ko, thread);
|
2017-10-04 21:10:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 12:06:08 +02:00
|
|
|
void k_object_access_revoke(const void *object, struct k_thread *thread)
|
2017-10-09 23:47:55 +02:00
|
|
|
{
|
2023-09-27 12:49:28 +02:00
|
|
|
struct k_object *ko = k_object_find(object);
|
2017-10-09 23:47:55 +02:00
|
|
|
|
2018-09-17 18:39:51 +02:00
|
|
|
if (ko != NULL) {
|
2023-09-27 12:46:26 +02:00
|
|
|
k_thread_perms_clear(ko, thread);
|
2017-10-09 23:47:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 12:06:08 +02:00
|
|
|
void z_impl_k_object_release(const void *object)
|
2018-04-13 22:15:28 +02:00
|
|
|
{
|
|
|
|
k_object_access_revoke(object, _current);
|
|
|
|
}
|
|
|
|
|
2020-05-14 12:06:08 +02:00
|
|
|
void k_object_access_all_grant(const void *object)
|
2017-10-04 21:10:32 +02:00
|
|
|
{
|
2023-09-27 12:49:28 +02:00
|
|
|
struct k_object *ko = k_object_find(object);
|
2017-10-04 21:10:32 +02:00
|
|
|
|
2018-09-17 18:39:51 +02:00
|
|
|
if (ko != NULL) {
|
2017-10-13 22:57:07 +02:00
|
|
|
ko->flags |= K_OBJ_FLAG_PUBLIC;
|
2017-10-04 21:10:32 +02:00
|
|
|
}
|
2017-08-22 22:15:23 +02:00
|
|
|
}
|
|
|
|
|
2023-09-27 12:50:26 +02:00
|
|
|
int k_object_validate(struct k_object *ko, enum k_objects otype,
|
2017-10-15 23:22:08 +02:00
|
|
|
enum _obj_init_check init)
|
2017-08-22 22:15:23 +02:00
|
|
|
{
|
2018-09-21 01:30:45 +02:00
|
|
|
if (unlikely((ko == NULL) ||
|
|
|
|
(otype != K_OBJ_ANY && ko->type != otype))) {
|
2017-08-22 22:15:23 +02:00
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
kernel: policy change for uninitailized objects
The old policy was that objects that are not marked as initialized may
be claimed by any thread, user or kernel.
This has some undesirable implications:
- Kernel objects that were initailized at build time via some
_<object name>_INITIALIZER macro, not intended for userspace to ever
use, could be 'stolen' if their memory addresses were figured out and
_k_object_init() was never called on them.
- In general, a malicious thread could initialize all unclaimed objects
it could find, resulting in denial of service for the threads that
these objects were intended for.
Now, performing any operation in user mode on a kernel object,
initialized or not, required that the calling user thread have
permission on it. Such permission would have to be explicitly granted or
inherited from a supervisor thread, as with this change only supervisor
thread will be able to claim uninitialized objects in this way.
If an uninitialized kernel object has permissions granted to multiple
threads, whatever thread actually initializes the object will reset all
permission bits to zero and grant only the calling thread access to that
object.
In other words, granting access to an uninitialized object to several
threads means that "whichever of these threads (or any kernel thread)
who actually initializes this object will obtain exclusive access to
that object, which it then may grant to other threads as it sees fit."
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-10-09 21:46:25 +02:00
|
|
|
/* Manipulation of any kernel objects by a user thread requires that
|
|
|
|
* thread be granted access first, even for uninitialized objects
|
2017-08-22 22:15:23 +02:00
|
|
|
*/
|
2019-03-14 22:32:45 +01:00
|
|
|
if (unlikely(thread_perms_test(ko) == 0)) {
|
2017-08-22 22:15:23 +02:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2017-10-15 23:22:08 +02:00
|
|
|
/* Initialization state checks. _OBJ_INIT_ANY, we don't care */
|
|
|
|
if (likely(init == _OBJ_INIT_TRUE)) {
|
2021-08-23 17:32:58 +02:00
|
|
|
/* Object MUST be initialized */
|
2019-03-28 21:57:54 +01:00
|
|
|
if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) == 0U)) {
|
2017-10-15 23:22:08 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2021-05-17 10:58:20 +02:00
|
|
|
} else if (init == _OBJ_INIT_FALSE) { /* _OBJ_INIT_FALSE case */
|
2017-10-15 23:22:08 +02:00
|
|
|
/* Object MUST NOT be initialized */
|
2019-03-28 21:57:54 +01:00
|
|
|
if (unlikely((ko->flags & K_OBJ_FLAG_INITIALIZED) != 0U)) {
|
2017-10-15 23:22:08 +02:00
|
|
|
return -EADDRINUSE;
|
|
|
|
}
|
2018-09-25 20:24:28 +02:00
|
|
|
} else {
|
|
|
|
/* _OBJ_INIT_ANY */
|
2017-08-22 22:15:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-09-26 23:32:13 +02:00
|
|
|
void k_object_init(const void *obj)
|
2017-08-22 22:15:23 +02:00
|
|
|
{
|
2023-09-26 23:37:25 +02:00
|
|
|
struct k_object *ko;
|
2017-08-22 22:15:23 +02:00
|
|
|
|
|
|
|
/* By the time we get here, if the caller was from userspace, all the
|
2023-09-27 12:50:26 +02:00
|
|
|
* necessary checks have been done in k_object_validate(), which takes
|
2017-08-22 22:15:23 +02:00
|
|
|
* place before the object is initialized.
|
|
|
|
*
|
|
|
|
* This function runs after the object has been initialized and
|
|
|
|
* finalizes it
|
|
|
|
*/
|
|
|
|
|
2023-09-27 12:49:28 +02:00
|
|
|
ko = k_object_find(obj);
|
2018-09-17 18:39:51 +02:00
|
|
|
if (ko == NULL) {
|
2017-08-22 22:15:23 +02:00
|
|
|
/* Supervisor threads can ignore rules about kernel objects
|
|
|
|
* and may declare them on stacks, etc. Such objects will never
|
|
|
|
* be usable from userspace, but we shouldn't explode.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-10-10 18:31:32 +02:00
|
|
|
/* Allows non-initialization system calls to be made on this object */
|
2017-08-22 22:15:23 +02:00
|
|
|
ko->flags |= K_OBJ_FLAG_INITIALIZED;
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:44:47 +02:00
|
|
|
void k_object_recycle(const void *obj)
|
2018-07-31 23:39:11 +02:00
|
|
|
{
|
2023-09-27 12:49:28 +02:00
|
|
|
struct k_object *ko = k_object_find(obj);
|
2018-07-31 23:39:11 +02:00
|
|
|
|
2018-09-17 18:39:51 +02:00
|
|
|
if (ko != NULL) {
|
2018-09-12 04:09:03 +02:00
|
|
|
(void)memset(ko->perms, 0, sizeof(ko->perms));
|
2023-09-27 12:47:01 +02:00
|
|
|
k_thread_perms_set(ko, _current);
|
2018-07-31 23:39:11 +02:00
|
|
|
ko->flags |= K_OBJ_FLAG_INITIALIZED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:45:18 +02:00
|
|
|
void k_object_uninit(const void *obj)
|
2017-10-05 21:21:36 +02:00
|
|
|
{
|
2023-09-26 23:37:25 +02:00
|
|
|
struct k_object *ko;
|
2017-10-05 21:21:36 +02:00
|
|
|
|
2023-09-26 23:32:13 +02:00
|
|
|
/* See comments in k_object_init() */
|
2023-09-27 12:49:28 +02:00
|
|
|
ko = k_object_find(obj);
|
2018-09-17 18:39:51 +02:00
|
|
|
if (ko == NULL) {
|
2017-10-05 21:21:36 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
|
|
|
|
}
|
|
|
|
|
2018-06-22 23:31:51 +02:00
|
|
|
/*
|
|
|
|
* Copy to/from helper functions used in syscall handlers
|
|
|
|
*/
|
2023-09-27 12:54:24 +02:00
|
|
|
void *k_usermode_alloc_from_copy(const void *src, size_t size)
|
2018-06-22 23:31:51 +02:00
|
|
|
{
|
|
|
|
void *dst = NULL;
|
|
|
|
|
|
|
|
/* Does the caller in user mode have access to read this memory? */
|
2023-09-27 13:09:45 +02:00
|
|
|
if (K_SYSCALL_MEMORY_READ(src, size)) {
|
2018-06-22 23:31:51 +02:00
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dst = z_thread_malloc(size);
|
2018-09-17 18:39:51 +02:00
|
|
|
if (dst == NULL) {
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("out of thread resource pool memory (%zu)", size);
|
2018-06-22 23:31:51 +02:00
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2018-08-14 00:17:04 +02:00
|
|
|
(void)memcpy(dst, src, size);
|
2018-06-22 23:31:51 +02:00
|
|
|
out_err:
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
2019-03-28 23:17:31 +01:00
|
|
|
static int user_copy(void *dst, const void *src, size_t size, bool to_user)
|
2018-06-22 23:31:51 +02:00
|
|
|
{
|
|
|
|
int ret = EFAULT;
|
|
|
|
|
|
|
|
/* Does the caller in user mode have access to this memory? */
|
2023-09-27 13:09:45 +02:00
|
|
|
if (to_user ? K_SYSCALL_MEMORY_WRITE(dst, size) :
|
|
|
|
K_SYSCALL_MEMORY_READ(src, size)) {
|
2018-06-22 23:31:51 +02:00
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2018-08-14 00:17:04 +02:00
|
|
|
(void)memcpy(dst, src, size);
|
2018-06-22 23:31:51 +02:00
|
|
|
ret = 0;
|
|
|
|
out_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:54:56 +02:00
|
|
|
int k_usermode_from_copy(void *dst, const void *src, size_t size)
|
2018-06-22 23:31:51 +02:00
|
|
|
{
|
|
|
|
return user_copy(dst, src, size, false);
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:56:59 +02:00
|
|
|
int k_usermode_to_copy(void *dst, const void *src, size_t size)
|
2018-06-22 23:31:51 +02:00
|
|
|
{
|
|
|
|
return user_copy(dst, src, size, true);
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:56:59 +02:00
|
|
|
char *k_usermode_string_alloc_copy(const char *src, size_t maxlen)
|
2018-06-22 23:31:51 +02:00
|
|
|
{
|
2019-05-07 19:17:35 +02:00
|
|
|
size_t actual_len;
|
2018-08-15 02:57:08 +02:00
|
|
|
int err;
|
2018-06-22 23:31:51 +02:00
|
|
|
char *ret = NULL;
|
|
|
|
|
2023-09-27 12:53:39 +02:00
|
|
|
actual_len = k_usermode_string_nlen(src, maxlen, &err);
|
2018-12-16 21:48:29 +01:00
|
|
|
if (err != 0) {
|
2018-06-22 23:31:51 +02:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (actual_len == maxlen) {
|
|
|
|
/* Not NULL terminated */
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("string too long %p (%zu)", src, actual_len);
|
2018-06-22 23:31:51 +02:00
|
|
|
goto out;
|
|
|
|
}
|
2019-05-07 19:17:35 +02:00
|
|
|
if (size_add_overflow(actual_len, 1, &actual_len)) {
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("overflow");
|
2018-06-22 23:31:51 +02:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:54:24 +02:00
|
|
|
ret = k_usermode_alloc_from_copy(src, actual_len);
|
userspace: fix copy from user locking
We don't actually need spinlocks here.
For user_copy(), we are checking that the pointer/size passed in
from user mode represents an area that the thread can read or
write to. Then we do a memcpy into the kernel-side buffer,
which is used from then on. It's OK if another thread scribbles
on the buffer contents during the copy, as we have not yet
begun any examination of its contents yet.
For the z_user_string*_copy() functions, it's also possible
that another thread could scribble on the string contents,
but we do no analysis of the string other than to establish
a length. We just need to ensure that when these functions
exit, the copied string is NULL terminated.
For SMP, the spinlocks are removed as they will not prevent a
thread running on another CPU from changing the buffer/string
contents, we just need to safely deal with that possibility.
For UP, the locks do prevent another thread from stepping
in, but it's better to just safely deal with it rather than
affect the interrupt latency of the system.
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2019-04-12 21:32:34 +02:00
|
|
|
|
|
|
|
/* Someone may have modified the source string during the above
|
|
|
|
* checks. Ensure what we actually copied is still terminated
|
|
|
|
* properly.
|
|
|
|
*/
|
|
|
|
if (ret != NULL) {
|
2021-03-29 16:03:49 +02:00
|
|
|
ret[actual_len - 1U] = '\0';
|
userspace: fix copy from user locking
We don't actually need spinlocks here.
For user_copy(), we are checking that the pointer/size passed in
from user mode represents an area that the thread can read or
write to. Then we do a memcpy into the kernel-side buffer,
which is used from then on. It's OK if another thread scribbles
on the buffer contents during the copy, as we have not yet
begun any examination of its contents yet.
For the z_user_string*_copy() functions, it's also possible
that another thread could scribble on the string contents,
but we do no analysis of the string other than to establish
a length. We just need to ensure that when these functions
exit, the copied string is NULL terminated.
For SMP, the spinlocks are removed as they will not prevent a
thread running on another CPU from changing the buffer/string
contents, we just need to safely deal with that possibility.
For UP, the locks do prevent another thread from stepping
in, but it's better to just safely deal with it rather than
affect the interrupt latency of the system.
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2019-04-12 21:32:34 +02:00
|
|
|
}
|
2018-06-22 23:31:51 +02:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:56:59 +02:00
|
|
|
int k_usermode_string_copy(char *dst, const char *src, size_t maxlen)
|
2018-06-22 23:31:51 +02:00
|
|
|
{
|
2019-05-07 19:17:35 +02:00
|
|
|
size_t actual_len;
|
2018-08-15 02:57:08 +02:00
|
|
|
int ret, err;
|
2018-06-22 23:31:51 +02:00
|
|
|
|
2023-09-27 12:53:39 +02:00
|
|
|
actual_len = k_usermode_string_nlen(src, maxlen, &err);
|
2018-12-16 21:48:29 +01:00
|
|
|
if (err != 0) {
|
2018-06-22 23:31:51 +02:00
|
|
|
ret = EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (actual_len == maxlen) {
|
|
|
|
/* Not NULL terminated */
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("string too long %p (%zu)", src, actual_len);
|
2018-06-22 23:31:51 +02:00
|
|
|
ret = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-05-07 19:17:35 +02:00
|
|
|
if (size_add_overflow(actual_len, 1, &actual_len)) {
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("overflow");
|
2018-06-22 23:31:51 +02:00
|
|
|
ret = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-09-27 12:54:56 +02:00
|
|
|
ret = k_usermode_from_copy(dst, src, actual_len);
|
userspace: fix copy from user locking
We don't actually need spinlocks here.
For user_copy(), we are checking that the pointer/size passed in
from user mode represents an area that the thread can read or
write to. Then we do a memcpy into the kernel-side buffer,
which is used from then on. It's OK if another thread scribbles
on the buffer contents during the copy, as we have not yet
begun any examination of its contents yet.
For the z_user_string*_copy() functions, it's also possible
that another thread could scribble on the string contents,
but we do no analysis of the string other than to establish
a length. We just need to ensure that when these functions
exit, the copied string is NULL terminated.
For SMP, the spinlocks are removed as they will not prevent a
thread running on another CPU from changing the buffer/string
contents, we just need to safely deal with that possibility.
For UP, the locks do prevent another thread from stepping
in, but it's better to just safely deal with it rather than
affect the interrupt latency of the system.
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2019-04-12 21:32:34 +02:00
|
|
|
|
2023-09-27 12:56:59 +02:00
|
|
|
/* See comment above in k_usermode_string_alloc_copy() */
|
userspace: fix copy from user locking
We don't actually need spinlocks here.
For user_copy(), we are checking that the pointer/size passed in
from user mode represents an area that the thread can read or
write to. Then we do a memcpy into the kernel-side buffer,
which is used from then on. It's OK if another thread scribbles
on the buffer contents during the copy, as we have not yet
begun any examination of its contents yet.
For the z_user_string*_copy() functions, it's also possible
that another thread could scribble on the string contents,
but we do no analysis of the string other than to establish
a length. We just need to ensure that when these functions
exit, the copied string is NULL terminated.
For SMP, the spinlocks are removed as they will not prevent a
thread running on another CPU from changing the buffer/string
contents, we just need to safely deal with that possibility.
For UP, the locks do prevent another thread from stepping
in, but it's better to just safely deal with it rather than
affect the interrupt latency of the system.
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2019-04-12 21:32:34 +02:00
|
|
|
dst[actual_len - 1] = '\0';
|
2018-06-22 23:31:51 +02:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-02-23 01:08:44 +01:00
|
|
|
/*
|
|
|
|
* Application memory region initialization
|
|
|
|
*/
|
|
|
|
|
|
|
|
extern char __app_shmem_regions_start[];
|
|
|
|
extern char __app_shmem_regions_end[];
|
|
|
|
|
init: remove the need for a dummy device pointer in SYS_INIT functions
The init infrastructure, found in `init.h`, is currently used by:
- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices
They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:
```c
struct init_entry {
int (*init)(const struct device *dev);
/* only set by DEVICE_*, otherwise NULL */
const struct device *dev;
}
```
As a result, we end up with such weird/ugly pattern:
```c
static int my_init(const struct device *dev)
{
/* always NULL! add ARG_UNUSED to avoid compiler warning */
ARG_UNUSED(dev);
...
}
```
This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:
```c
static int my_init(void)
{
...
}
```
This is achieved using a union:
```c
union init_function {
/* for SYS_INIT, used when init_entry.dev == NULL */
int (*sys)(void);
/* for DEVICE*, used when init_entry.dev != NULL */
int (*dev)(const struct device *dev);
};
struct init_entry {
/* stores init function (either for SYS_INIT or DEVICE*)
union init_function init_fn;
/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
* to know which union entry to call.
*/
const struct device *dev;
}
```
This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.
**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
init: convert SYS_INIT functions to the new signature
Conversion scripted using scripts/utils/migrate_sys_init.py.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
manifest: update projects for SYS_INIT changes
Update modules with updated SYS_INIT calls:
- hal_ti
- lvgl
- sof
- TraceRecorderSource
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: devicetree: devices: adjust test
Adjust test according to the recently introduced SYS_INIT
infrastructure.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: kernel: threads: adjust SYS_INIT call
Adjust to the new signature: int (*init_fn)(void);
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-10-19 09:33:44 +02:00
|
|
|
static int app_shmem_bss_zero(void)
|
2019-02-23 01:08:44 +01:00
|
|
|
{
|
|
|
|
struct z_app_region *region, *end;
|
|
|
|
|
2020-03-16 19:20:08 +01:00
|
|
|
|
2019-02-23 01:08:44 +01:00
|
|
|
end = (struct z_app_region *)&__app_shmem_regions_end;
|
|
|
|
region = (struct z_app_region *)&__app_shmem_regions_start;
|
|
|
|
|
|
|
|
for ( ; region < end; region++) {
|
2021-07-12 22:33:32 +02:00
|
|
|
#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
|
|
|
|
/* When BSS sections are not present at boot, we need to wait for
|
|
|
|
* paging mechanism to be initialized before we can zero out BSS.
|
|
|
|
*/
|
|
|
|
extern bool z_sys_post_kernel;
|
|
|
|
bool do_clear = z_sys_post_kernel;
|
|
|
|
|
|
|
|
/* During pre-kernel init, z_sys_post_kernel == false, but
|
|
|
|
* with pinned rodata region, so clear. Otherwise skip.
|
|
|
|
* In post-kernel init, z_sys_post_kernel == true,
|
|
|
|
* skip those in pinned rodata region as they have already
|
|
|
|
* been cleared and possibly already in use. Otherwise clear.
|
|
|
|
*/
|
|
|
|
if (((uint8_t *)region->bss_start >= (uint8_t *)_app_smem_pinned_start) &&
|
|
|
|
((uint8_t *)region->bss_start < (uint8_t *)_app_smem_pinned_end)) {
|
|
|
|
do_clear = !do_clear;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (do_clear)
|
|
|
|
#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
|
|
|
|
{
|
|
|
|
(void)memset(region->bss_start, 0, region->bss_size);
|
|
|
|
}
|
2019-02-23 01:08:44 +01:00
|
|
|
}
|
2020-03-16 19:20:08 +01:00
|
|
|
|
|
|
|
return 0;
|
2019-02-23 01:08:44 +01:00
|
|
|
}
|
|
|
|
|
2022-07-02 04:06:55 +02:00
|
|
|
SYS_INIT_NAMED(app_shmem_bss_zero_pre, app_shmem_bss_zero,
|
|
|
|
PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
2020-03-16 19:20:08 +01:00
|
|
|
|
2021-07-12 22:33:32 +02:00
|
|
|
#if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
|
|
|
|
/* When BSS sections are not present at boot, we need to wait for
|
|
|
|
* paging mechanism to be initialized before we can zero out BSS.
|
|
|
|
*/
|
2022-07-02 04:06:55 +02:00
|
|
|
SYS_INIT_NAMED(app_shmem_bss_zero_post, app_shmem_bss_zero,
|
|
|
|
POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
|
2021-07-12 22:33:32 +02:00
|
|
|
#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
|
|
|
|
|
2018-06-22 23:31:51 +02:00
|
|
|
/*
|
|
|
|
* Default handlers if otherwise unimplemented
|
|
|
|
*/
|
|
|
|
|
2019-11-05 18:27:18 +01:00
|
|
|
static uintptr_t handler_bad_syscall(uintptr_t bad_id, uintptr_t arg2,
|
|
|
|
uintptr_t arg3, uintptr_t arg4,
|
|
|
|
uintptr_t arg5, uintptr_t arg6,
|
|
|
|
void *ssf)
|
2017-09-08 21:10:12 +02:00
|
|
|
{
|
2019-11-05 18:27:18 +01:00
|
|
|
LOG_ERR("Bad system call id %" PRIuPTR " invoked", bad_id);
|
2020-05-29 01:24:09 +02:00
|
|
|
arch_syscall_oops(ssf);
|
2019-06-24 18:35:55 +02:00
|
|
|
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
2017-09-08 21:10:12 +02:00
|
|
|
}
|
|
|
|
|
2019-11-05 18:27:18 +01:00
|
|
|
static uintptr_t handler_no_syscall(uintptr_t arg1, uintptr_t arg2,
|
|
|
|
uintptr_t arg3, uintptr_t arg4,
|
|
|
|
uintptr_t arg5, uintptr_t arg6, void *ssf)
|
2017-09-29 01:54:35 +02:00
|
|
|
{
|
2019-09-30 23:25:23 +02:00
|
|
|
LOG_ERR("Unimplemented system call");
|
2020-05-29 01:24:09 +02:00
|
|
|
arch_syscall_oops(ssf);
|
2019-06-24 18:35:55 +02:00
|
|
|
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
2017-09-29 01:54:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#include <syscall_dispatch.c>
|