2016-10-05 19:01:54 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2015-2016 Intel Corporation.
|
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2016-10-05 19:01:54 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#include <device.h>
|
2019-06-25 18:25:32 +02:00
|
|
|
#include <sys/atomic.h>
|
2018-11-12 19:25:12 +01:00
|
|
|
#include <syscall_handler.h>
|
2016-10-05 19:01:54 +02:00
|
|
|
|
2020-03-09 11:02:20 +01:00
|
|
|
extern const struct init_entry __init_start[];
|
|
|
|
extern const struct init_entry __init_PRE_KERNEL_1_start[];
|
|
|
|
extern const struct init_entry __init_PRE_KERNEL_2_start[];
|
|
|
|
extern const struct init_entry __init_POST_KERNEL_start[];
|
|
|
|
extern const struct init_entry __init_APPLICATION_start[];
|
|
|
|
extern const struct init_entry __init_end[];
|
2016-10-05 19:01:54 +02:00
|
|
|
|
2020-01-15 17:57:29 +01:00
|
|
|
#ifdef CONFIG_SMP
|
2020-03-09 11:02:20 +01:00
|
|
|
extern const struct init_entry __init_SMP_start[];
|
2020-01-15 17:57:29 +01:00
|
|
|
#endif
|
2016-10-05 19:01:54 +02:00
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
extern const struct device __device_start[];
|
|
|
|
extern const struct device __device_end[];
|
2020-03-09 11:02:20 +01:00
|
|
|
|
2020-04-30 11:49:39 +02:00
|
|
|
extern uint32_t __device_init_status_start[];
|
|
|
|
|
2020-09-02 00:31:40 +02:00
|
|
|
#ifdef CONFIG_PM_DEVICE
|
2020-05-27 18:26:57 +02:00
|
|
|
extern uint32_t __device_busy_start[];
|
|
|
|
extern uint32_t __device_busy_end[];
|
2016-10-05 19:01:54 +02:00
|
|
|
#define DEVICE_BUSY_SIZE (__device_busy_end - __device_busy_start)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
2020-03-09 11:02:20 +01:00
|
|
|
* @brief Execute all the init entry initialization functions at a given level
|
2016-10-05 19:01:54 +02:00
|
|
|
*
|
2020-03-09 11:02:20 +01:00
|
|
|
* @details Invokes the initialization routine for each init entry object
|
|
|
|
* created by the INIT_ENTRY_DEFINE() macro using the specified level.
|
|
|
|
* The linker script places the init entry objects in memory in the order
|
2016-10-05 19:01:54 +02:00
|
|
|
* they need to be invoked, with symbols indicating where one level leaves
|
|
|
|
* off and the next one begins.
|
|
|
|
*
|
|
|
|
* @param level init level to run.
|
|
|
|
*/
|
2020-05-27 18:26:57 +02:00
|
|
|
void z_sys_init_run_level(int32_t level)
|
2016-10-05 19:01:54 +02:00
|
|
|
{
|
2020-03-09 11:02:20 +01:00
|
|
|
static const struct init_entry *levels[] = {
|
|
|
|
__init_PRE_KERNEL_1_start,
|
|
|
|
__init_PRE_KERNEL_2_start,
|
|
|
|
__init_POST_KERNEL_start,
|
|
|
|
__init_APPLICATION_start,
|
2020-01-15 17:57:29 +01:00
|
|
|
#ifdef CONFIG_SMP
|
2020-03-09 11:02:20 +01:00
|
|
|
__init_SMP_start,
|
2020-01-15 17:57:29 +01:00
|
|
|
#endif
|
2018-11-02 01:42:07 +01:00
|
|
|
/* End marker */
|
2020-03-09 11:02:20 +01:00
|
|
|
__init_end,
|
2018-11-02 01:42:07 +01:00
|
|
|
};
|
2020-03-09 11:02:20 +01:00
|
|
|
const struct init_entry *entry;
|
2016-10-05 19:01:54 +02:00
|
|
|
|
2020-03-09 11:02:20 +01:00
|
|
|
for (entry = levels[level]; entry < levels[level+1]; entry++) {
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *dev = entry->dev;
|
2016-10-05 19:01:54 +02:00
|
|
|
|
2020-03-09 11:02:20 +01:00
|
|
|
if (dev != NULL) {
|
|
|
|
z_object_init(dev);
|
|
|
|
}
|
|
|
|
|
2020-09-14 17:51:44 +02:00
|
|
|
if ((entry->init(dev) != 0) && (dev != NULL)) {
|
|
|
|
/* Initialization failed.
|
|
|
|
* Set the init status bit so device is not declared ready.
|
2020-04-30 11:49:39 +02:00
|
|
|
*/
|
|
|
|
sys_bitfield_set_bit(
|
|
|
|
(mem_addr_t) __device_init_status_start,
|
|
|
|
(dev - __device_start));
|
2018-12-07 22:12:21 +01:00
|
|
|
}
|
2016-10-05 19:01:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *z_impl_device_get_binding(const char *name)
|
2016-10-05 19:01:54 +02:00
|
|
|
{
|
2020-04-30 20:33:38 +02:00
|
|
|
const struct device *dev;
|
2016-10-05 19:01:54 +02:00
|
|
|
|
2018-02-14 23:47:11 +01:00
|
|
|
/* Split the search into two loops: in the common scenario, where
|
|
|
|
* device names are stored in ROM (and are referenced by the user
|
|
|
|
* with CONFIG_* macros), only cheap pointer comparisons will be
|
2020-03-09 11:02:20 +01:00
|
|
|
* performed. Reserve string comparisons for a fallback.
|
2018-02-14 23:47:11 +01:00
|
|
|
*/
|
2020-03-09 11:02:20 +01:00
|
|
|
for (dev = __device_start; dev != __device_end; dev++) {
|
2020-06-22 17:01:39 +02:00
|
|
|
if (z_device_ready(dev) && (dev->name == name)) {
|
2020-03-09 11:02:20 +01:00
|
|
|
return dev;
|
2018-02-14 23:47:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 11:02:20 +01:00
|
|
|
for (dev = __device_start; dev != __device_end; dev++) {
|
2020-06-22 17:01:39 +02:00
|
|
|
if (z_device_ready(dev) && (strcmp(name, dev->name) == 0)) {
|
2020-03-09 11:02:20 +01:00
|
|
|
return dev;
|
2016-10-05 19:01:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-12 19:25:12 +01:00
|
|
|
#ifdef CONFIG_USERSPACE
|
2020-04-30 20:33:38 +02:00
|
|
|
static inline const struct device *z_vrfy_device_get_binding(const char *name)
|
2018-11-12 19:25:12 +01:00
|
|
|
{
|
|
|
|
char name_copy[Z_DEVICE_MAX_NAME_LEN];
|
|
|
|
|
|
|
|
if (z_user_string_copy(name_copy, (char *)name, sizeof(name_copy))
|
|
|
|
!= 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
|
|
|
return z_impl_device_get_binding(name_copy);
|
2018-11-12 19:25:12 +01:00
|
|
|
}
|
userspace: Support for split 64 bit arguments
System call arguments, at the arch layer, are single words. So
passing wider values requires splitting them into two registers at
call time. This gets even more complicated for values (e.g
k_timeout_t) that may have different sizes depending on configuration.
This patch adds a feature to gen_syscalls.py to detect functions with
wide arguments and automatically generates code to split/unsplit them.
Unfortunately the current scheme of Z_SYSCALL_DECLARE_* macros won't
work with functions like this, because for N arguments (our current
maximum N is 10) there are 2^N possible configurations of argument
widths. So this generates the complete functions for each handler and
wrapper, effectively doing in python what was originally done in the
preprocessor.
Another complexity is that traditional the z_hdlr_*() function for a
system call has taken the raw list of word arguments, which does not
work when some of those arguments must be 64 bit types. So instead of
using a single Z_SYSCALL_HANDLER macro, this splits the job of
z_hdlr_*() into two steps: An automatically-generated unmarshalling
function, z_mrsh_*(), which then calls a user-supplied verification
function z_vrfy_*(). The verification function is typesafe, and is a
simple C function with exactly the same argument and return signature
as the syscall impl function. It is also not responsible for
validating the pointers to the extra parameter array or a wide return
value, that code gets automatically generated.
This commit includes new vrfy/msrh handling for all syscalls invoked
during CI runs. Future commits will port the less testable code.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
2019-08-06 22:34:31 +02:00
|
|
|
#include <syscalls/device_get_binding_mrsh.c>
|
2018-11-12 19:25:12 +01:00
|
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
size_t z_device_get_all_static(struct device const **devices)
|
2020-06-22 15:55:37 +02:00
|
|
|
{
|
|
|
|
*devices = __device_start;
|
|
|
|
return __device_end - __device_start;
|
2020-04-30 11:49:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool z_device_ready(const struct device *dev)
|
|
|
|
{
|
2020-09-14 17:51:44 +02:00
|
|
|
/* Set bit indicates device failed initialization */
|
|
|
|
return !(sys_bitfield_test_bit((mem_addr_t)__device_init_status_start,
|
2020-04-30 11:49:39 +02:00
|
|
|
(dev - __device_start)));
|
2020-06-22 15:55:37 +02:00
|
|
|
}
|
|
|
|
|
2020-09-02 00:31:40 +02:00
|
|
|
#ifdef CONFIG_PM_DEVICE
|
2020-04-30 20:33:38 +02:00
|
|
|
int device_pm_control_nop(const struct device *unused_device,
|
|
|
|
uint32_t unused_ctrl_command,
|
|
|
|
void *unused_context,
|
|
|
|
device_pm_cb cb,
|
|
|
|
void *unused_arg)
|
2016-10-05 19:01:54 +02:00
|
|
|
{
|
2020-05-20 15:58:56 +02:00
|
|
|
return -ENOTSUP;
|
2016-10-05 19:01:54 +02:00
|
|
|
}
|
2016-10-08 02:07:04 +02:00
|
|
|
|
2016-10-05 19:01:54 +02:00
|
|
|
int device_any_busy_check(void)
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < DEVICE_BUSY_SIZE; i++) {
|
2019-03-27 02:57:45 +01:00
|
|
|
if (__device_busy_start[i] != 0U) {
|
2016-10-05 19:01:54 +02:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
int device_busy_check(const struct device *chk_dev)
|
2016-10-05 19:01:54 +02:00
|
|
|
{
|
|
|
|
if (atomic_test_bit((const atomic_t *)__device_busy_start,
|
2020-03-09 11:02:20 +01:00
|
|
|
(chk_dev - __device_start))) {
|
2016-10-05 19:01:54 +02:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
void device_busy_set(const struct device *busy_dev)
|
2016-10-05 19:01:54 +02:00
|
|
|
{
|
2020-09-02 00:31:40 +02:00
|
|
|
#ifdef CONFIG_PM_DEVICE
|
2016-10-05 19:01:54 +02:00
|
|
|
atomic_set_bit((atomic_t *) __device_busy_start,
|
2020-03-09 11:02:20 +01:00
|
|
|
(busy_dev - __device_start));
|
2016-12-11 07:19:26 +01:00
|
|
|
#else
|
|
|
|
ARG_UNUSED(busy_dev);
|
2016-10-05 19:01:54 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-04-30 20:33:38 +02:00
|
|
|
void device_busy_clear(const struct device *busy_dev)
|
2016-10-05 19:01:54 +02:00
|
|
|
{
|
2020-09-02 00:31:40 +02:00
|
|
|
#ifdef CONFIG_PM_DEVICE
|
2016-10-05 19:01:54 +02:00
|
|
|
atomic_clear_bit((atomic_t *) __device_busy_start,
|
2020-03-09 11:02:20 +01:00
|
|
|
(busy_dev - __device_start));
|
2016-12-11 07:19:26 +01:00
|
|
|
#else
|
|
|
|
ARG_UNUSED(busy_dev);
|
2016-10-05 19:01:54 +02:00
|
|
|
#endif
|
|
|
|
}
|