kernel: add closing comments to config endifs

Add a closing comment to the endif with the configuration
information to which the endif belongs too.
To make the code more clearer if the configs need adaptions.

Signed-off-by: Simon Hein <Shein@baumer.com>
This commit is contained in:
Simon Hein 2024-03-08 12:00:10 +01:00 committed by Anas Nashif
parent 6266dc11a9
commit bcd1d19322
49 changed files with 437 additions and 431 deletions

View file

@ -39,7 +39,7 @@
(CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET)) (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
#else #else
#define Z_MEM_VM_OFFSET 0 #define Z_MEM_VM_OFFSET 0
#endif #endif /* CONFIG_MMU */
#define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET) #define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET)
#define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET) #define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET)
@ -70,26 +70,26 @@ static inline uintptr_t z_mem_phys_addr(void *virt)
__ASSERT( __ASSERT(
#if CONFIG_KERNEL_VM_BASE != 0 #if CONFIG_KERNEL_VM_BASE != 0
(addr >= CONFIG_KERNEL_VM_BASE) && (addr >= CONFIG_KERNEL_VM_BASE) &&
#endif #endif /* CONFIG_KERNEL_VM_BASE != 0 */
#if (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE) != 0 #if (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE) != 0
(addr < (CONFIG_KERNEL_VM_BASE + (addr < (CONFIG_KERNEL_VM_BASE +
(CONFIG_KERNEL_VM_SIZE))), (CONFIG_KERNEL_VM_SIZE))),
#else #else
false, false,
#endif #endif /* CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE != 0 */
"address %p not in permanent mappings", virt); "address %p not in permanent mappings", virt);
#else #else
/* Should be identity-mapped */ /* Should be identity-mapped */
__ASSERT( __ASSERT(
#if CONFIG_SRAM_BASE_ADDRESS != 0 #if CONFIG_SRAM_BASE_ADDRESS != 0
(addr >= CONFIG_SRAM_BASE_ADDRESS) && (addr >= CONFIG_SRAM_BASE_ADDRESS) &&
#endif #endif /* CONFIG_SRAM_BASE_ADDRESS != 0 */
#if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0 #if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0
(addr < (CONFIG_SRAM_BASE_ADDRESS + (addr < (CONFIG_SRAM_BASE_ADDRESS +
(CONFIG_SRAM_SIZE * 1024UL))), (CONFIG_SRAM_SIZE * 1024UL))),
#else #else
false, false,
#endif #endif /* (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0 */
"physical address 0x%lx not in RAM", "physical address 0x%lx not in RAM",
(unsigned long)addr); (unsigned long)addr);
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
@ -111,15 +111,15 @@ static inline void *z_mem_virt_addr(uintptr_t phys)
__ASSERT( __ASSERT(
#if CONFIG_SRAM_BASE_ADDRESS != 0 #if CONFIG_SRAM_BASE_ADDRESS != 0
(phys >= CONFIG_SRAM_BASE_ADDRESS) && (phys >= CONFIG_SRAM_BASE_ADDRESS) &&
#endif #endif /* CONFIG_SRAM_BASE_ADDRESS != 0 */
#if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0 #if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0
(phys < (CONFIG_SRAM_BASE_ADDRESS + (phys < (CONFIG_SRAM_BASE_ADDRESS +
(CONFIG_SRAM_SIZE * 1024UL))), (CONFIG_SRAM_SIZE * 1024UL))),
#else #else
false, false,
#endif #endif /* (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0 */
"physical address 0x%lx not in RAM", (unsigned long)phys); "physical address 0x%lx not in RAM", (unsigned long)phys);
#endif #endif /* CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK */
/* TODO add assertion that this page frame is pinned to boot mapping, /* TODO add assertion that this page frame is pinned to boot mapping,
* the above check won't be sufficient with demand paging * the above check won't be sufficient with demand paging

View file

@ -8,4 +8,4 @@
void z_sched_ipi(void); void z_sched_ipi(void);
#endif #endif /* ZEPHYR_INCLUDE_KERNEL_INTERNAL_SMP_H_ */

View file

@ -11,7 +11,7 @@
#include <zephyr/toolchain.h> #include <zephyr/toolchain.h>
#if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64) #if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
#include <zephyr/arch/arm64/arm_mem.h> #include <zephyr/arch/arm64/arm_mem.h>
#endif #endif /* CONFIG_ARM_MMU && CONFIG_ARM64 */
#include <zephyr/kernel/internal/mm.h> #include <zephyr/kernel/internal/mm.h>

View file

@ -47,7 +47,7 @@ struct k_mem_paging_stats_t {
#if !defined(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || defined(__DOXYGEN__) #if !defined(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || defined(__DOXYGEN__)
/** Number of page faults while in ISR */ /** Number of page faults while in ISR */
unsigned long in_isr; unsigned long in_isr;
#endif #endif /* !CONFIG_DEMAND_PAGING_ALLOW_IRQ */
} pagefaults; } pagefaults;
struct { struct {

View file

@ -76,7 +76,7 @@ struct k_obj_core;
#else #else
#define K_OBJ_CORE_INIT(objp, type) do { } while (0) #define K_OBJ_CORE_INIT(objp, type) do { } while (0)
#define K_OBJ_CORE_LINK(objp) do { } while (0) #define K_OBJ_CORE_LINK(objp) do { } while (0)
#endif #endif /* CONFIG_OBJ_CORE */
/** /**
* INTERNAL_HIDDEN @endcond * INTERNAL_HIDDEN @endcond
@ -114,7 +114,7 @@ struct k_obj_type {
#ifdef CONFIG_OBJ_CORE_STATS #ifdef CONFIG_OBJ_CORE_STATS
/** Pointer to object core statistics descriptor */ /** Pointer to object core statistics descriptor */
struct k_obj_core_stats_desc *stats_desc; struct k_obj_core_stats_desc *stats_desc;
#endif #endif /* CONFIG_OBJ_CORE_STATS */
}; };
/** Object core structure */ /** Object core structure */
@ -123,7 +123,7 @@ struct k_obj_core {
struct k_obj_type *type; /**< Object type to which object belongs */ struct k_obj_type *type; /**< Object type to which object belongs */
#ifdef CONFIG_OBJ_CORE_STATS #ifdef CONFIG_OBJ_CORE_STATS
void *stats; /**< Pointer to kernel object's stats */ void *stats; /**< Pointer to kernel object's stats */
#endif #endif /* CONFIG_OBJ_CORE_STATS */
}; };
/** /**
@ -280,7 +280,7 @@ static inline void k_obj_core_stats_init(struct k_obj_core *obj_core,
{ {
obj_core->stats = stats; obj_core->stats = stats;
} }
#endif #endif /* CONFIG_OBJ_CORE_STATS */
/** /**
* @brief Register kernel object for gathering statistics * @brief Register kernel object for gathering statistics

View file

@ -26,8 +26,8 @@ struct k_cycle_stats {
uint64_t longest; /**< \# of cycles in longest usage window */ uint64_t longest; /**< \# of cycles in longest usage window */
uint32_t num_windows; /**< \# of usage windows */ uint32_t num_windows; /**< \# of usage windows */
/** @} */ /** @} */
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
bool track_usage; /**< true if gathering usage stats */ bool track_usage; /**< true if gathering usage stats */
}; };
#endif #endif /* ZEPHYR_INCLUDE_KERNEL_STATS_H_ */

View file

@ -9,7 +9,7 @@
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
#include <zephyr/kernel/mm/demand_paging.h> #include <zephyr/kernel/mm/demand_paging.h>
#endif #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
#include <zephyr/kernel/stats.h> #include <zephyr/kernel/stats.h>
#include <zephyr/arch/arch_interface.h> #include <zephyr/arch/arch_interface.h>
@ -38,7 +38,7 @@ struct __thread_entry {
void *parameter2; void *parameter2;
void *parameter3; void *parameter3;
}; };
#endif #endif /* CONFIG_THREAD_MONITOR */
struct k_thread; struct k_thread;
@ -96,14 +96,14 @@ struct _thread_base {
#else /* Little Endian */ #else /* Little Endian */
int8_t prio; int8_t prio;
uint8_t sched_locked; uint8_t sched_locked;
#endif #endif /* CONFIG_BIG_ENDIAN */
}; };
uint16_t preempt; uint16_t preempt;
}; };
#ifdef CONFIG_SCHED_DEADLINE #ifdef CONFIG_SCHED_DEADLINE
int prio_deadline; int prio_deadline;
#endif #endif /* CONFIG_SCHED_DEADLINE */
uint32_t order_key; uint32_t order_key;
@ -117,7 +117,7 @@ struct _thread_base {
/* Recursive count of irq_lock() calls */ /* Recursive count of irq_lock() calls */
uint8_t global_lock_count; uint8_t global_lock_count;
#endif #endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_CPU_MASK #ifdef CONFIG_SCHED_CPU_MASK
/* "May run on" bits for each CPU */ /* "May run on" bits for each CPU */
@ -125,7 +125,7 @@ struct _thread_base {
uint8_t cpu_mask; uint8_t cpu_mask;
#else #else
uint16_t cpu_mask; uint16_t cpu_mask;
#endif #endif /* CONFIG_MP_MAX_NUM_CPUS */
#endif /* CONFIG_SCHED_CPU_MASK */ #endif /* CONFIG_SCHED_CPU_MASK */
/* data returned by APIs */ /* data returned by APIs */
@ -134,17 +134,17 @@ struct _thread_base {
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
/* this thread's entry in a timeout queue */ /* this thread's entry in a timeout queue */
struct _timeout timeout; struct _timeout timeout;
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
#ifdef CONFIG_TIMESLICE_PER_THREAD #ifdef CONFIG_TIMESLICE_PER_THREAD
int32_t slice_ticks; int32_t slice_ticks;
k_thread_timeslice_fn_t slice_expired; k_thread_timeslice_fn_t slice_expired;
void *slice_data; void *slice_data;
#endif #endif /* CONFIG_TIMESLICE_PER_THREAD */
#ifdef CONFIG_SCHED_THREAD_USAGE #ifdef CONFIG_SCHED_THREAD_USAGE
struct k_cycle_stats usage; /* Track thread usage statistics */ struct k_cycle_stats usage; /* Track thread usage statistics */
#endif #endif /* CONFIG_SCHED_THREAD_USAGE */
}; };
typedef struct _thread_base _thread_base_t; typedef struct _thread_base _thread_base_t;
@ -190,9 +190,9 @@ struct _mem_domain_info {
struct _thread_userspace_local_data { struct _thread_userspace_local_data {
#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO) #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
int errno_var; int errno_var;
#endif #endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */
}; };
#endif #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
typedef struct k_thread_runtime_stats { typedef struct k_thread_runtime_stats {
#ifdef CONFIG_SCHED_THREAD_USAGE #ifdef CONFIG_SCHED_THREAD_USAGE
@ -203,7 +203,7 @@ typedef struct k_thread_runtime_stats {
* as the total # of non-idle cycles. In the context of CPU statistics, * as the total # of non-idle cycles. In the context of CPU statistics,
* it refers to the sum of non-idle + idle cycles. * it refers to the sum of non-idle + idle cycles.
*/ */
#endif #endif /* CONFIG_SCHED_THREAD_USAGE */
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
/* /*
@ -216,7 +216,7 @@ typedef struct k_thread_runtime_stats {
uint64_t current_cycles; /* current # of non-idle cycles */ uint64_t current_cycles; /* current # of non-idle cycles */
uint64_t peak_cycles; /* peak # of non-idle cycles */ uint64_t peak_cycles; /* peak # of non-idle cycles */
uint64_t average_cycles; /* average # of non-idle cycles */ uint64_t average_cycles; /* average # of non-idle cycles */
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
/* /*
@ -226,7 +226,7 @@ typedef struct k_thread_runtime_stats {
*/ */
uint64_t idle_cycles; uint64_t idle_cycles;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
#if defined(__cplusplus) && !defined(CONFIG_SCHED_THREAD_USAGE) && \ #if defined(__cplusplus) && !defined(CONFIG_SCHED_THREAD_USAGE) && \
!defined(CONFIG_SCHED_THREAD_USAGE_ANALYSIS) && !defined(CONFIG_SCHED_THREAD_USAGE_ALL) !defined(CONFIG_SCHED_THREAD_USAGE_ANALYSIS) && !defined(CONFIG_SCHED_THREAD_USAGE_ALL)
@ -262,7 +262,7 @@ struct k_thread {
#if defined(CONFIG_POLL) #if defined(CONFIG_POLL)
struct z_poller poller; struct z_poller poller;
#endif #endif /* CONFIG_POLL */
#if defined(CONFIG_EVENTS) #if defined(CONFIG_EVENTS)
struct k_thread *next_event_link; struct k_thread *next_event_link;
@ -272,7 +272,7 @@ struct k_thread {
/** true if timeout should not wake the thread */ /** true if timeout should not wake the thread */
bool no_wake_on_timeout; bool no_wake_on_timeout;
#endif #endif /* CONFIG_EVENTS */
#if defined(CONFIG_THREAD_MONITOR) #if defined(CONFIG_THREAD_MONITOR)
/** thread entry and parameters description */ /** thread entry and parameters description */
@ -280,28 +280,28 @@ struct k_thread {
/** next item in list of all threads */ /** next item in list of all threads */
struct k_thread *next_thread; struct k_thread *next_thread;
#endif #endif /* CONFIG_THREAD_MONITOR */
#if defined(CONFIG_THREAD_NAME) #if defined(CONFIG_THREAD_NAME)
/** Thread name */ /** Thread name */
char name[CONFIG_THREAD_MAX_NAME_LEN]; char name[CONFIG_THREAD_MAX_NAME_LEN];
#endif #endif /* CONFIG_THREAD_NAME */
#ifdef CONFIG_THREAD_CUSTOM_DATA #ifdef CONFIG_THREAD_CUSTOM_DATA
/** crude thread-local storage */ /** crude thread-local storage */
void *custom_data; void *custom_data;
#endif #endif /* CONFIG_THREAD_CUSTOM_DATA */
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
struct _thread_userspace_local_data *userspace_local_data; struct _thread_userspace_local_data *userspace_local_data;
#endif #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO) #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
#ifndef CONFIG_USERSPACE #ifndef CONFIG_USERSPACE
/** per-thread errno variable */ /** per-thread errno variable */
int errno_var; int errno_var;
#endif #endif /* CONFIG_USERSPACE */
#endif #endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */
#if defined(CONFIG_THREAD_STACK_INFO) #if defined(CONFIG_THREAD_STACK_INFO)
/** Stack Info */ /** Stack Info */
@ -328,7 +328,7 @@ struct k_thread {
/** Context handle returned via arch_switch() */ /** Context handle returned via arch_switch() */
void *switch_handle; void *switch_handle;
#endif #endif /* CONFIG_USE_SWITCH */
/** resource pool */ /** resource pool */
struct k_heap *resource_pool; struct k_heap *resource_pool;
@ -340,21 +340,21 @@ struct k_thread {
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
/** Paging statistics */ /** Paging statistics */
struct k_mem_paging_stats_t paging_stats; struct k_mem_paging_stats_t paging_stats;
#endif #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
#ifdef CONFIG_PIPES #ifdef CONFIG_PIPES
/** Pipe descriptor used with blocking k_pipe operations */ /** Pipe descriptor used with blocking k_pipe operations */
struct _pipe_desc pipe_desc; struct _pipe_desc pipe_desc;
#endif #endif /* CONFIG_PIPES */
#ifdef CONFIG_OBJ_CORE_THREAD #ifdef CONFIG_OBJ_CORE_THREAD
struct k_obj_core obj_core; struct k_obj_core obj_core;
#endif #endif /* CONFIG_OBJ_CORE_THREAD */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/** threads waiting in k_thread_suspend() */ /** threads waiting in k_thread_suspend() */
_wait_q_t halt_queue; _wait_q_t halt_queue;
#endif #endif /* CONFIG_SMP */
/** arch-specifics: must always be at the end */ /** arch-specifics: must always be at the end */
struct _thread_arch arch; struct _thread_arch arch;
@ -363,4 +363,4 @@ struct k_thread {
typedef struct k_thread _thread_t; typedef struct k_thread _thread_t;
typedef struct k_thread *k_tid_t; typedef struct k_thread *k_tid_t;
#endif #endif /* ZEPHYR_INCLUDE_KERNEL_THREAD_H_ */

View file

@ -92,7 +92,7 @@ static inline char *z_stack_ptr_align(char *ptr)
#define K_KERNEL_STACK_RESERVED ((size_t)ARCH_KERNEL_STACK_RESERVED) #define K_KERNEL_STACK_RESERVED ((size_t)ARCH_KERNEL_STACK_RESERVED)
#else #else
#define K_KERNEL_STACK_RESERVED ((size_t)0) #define K_KERNEL_STACK_RESERVED ((size_t)0)
#endif #endif /* ARCH_KERNEL_STACK_RESERVED */
#define Z_KERNEL_STACK_SIZE_ADJUST(size) (ROUND_UP(size, \ #define Z_KERNEL_STACK_SIZE_ADJUST(size) (ROUND_UP(size, \
ARCH_STACK_PTR_ALIGN) + \ ARCH_STACK_PTR_ALIGN) + \
@ -102,7 +102,7 @@ static inline char *z_stack_ptr_align(char *ptr)
#define Z_KERNEL_STACK_OBJ_ALIGN ARCH_KERNEL_STACK_OBJ_ALIGN #define Z_KERNEL_STACK_OBJ_ALIGN ARCH_KERNEL_STACK_OBJ_ALIGN
#else #else
#define Z_KERNEL_STACK_OBJ_ALIGN ARCH_STACK_PTR_ALIGN #define Z_KERNEL_STACK_OBJ_ALIGN ARCH_STACK_PTR_ALIGN
#endif #endif /* ARCH_KERNEL_STACK_OBJ_ALIGN */
#define Z_KERNEL_STACK_LEN(size) \ #define Z_KERNEL_STACK_LEN(size) \
ROUND_UP(Z_KERNEL_STACK_SIZE_ADJUST(size), Z_KERNEL_STACK_OBJ_ALIGN) ROUND_UP(Z_KERNEL_STACK_SIZE_ADJUST(size), Z_KERNEL_STACK_OBJ_ALIGN)
@ -232,7 +232,7 @@ static inline char *z_stack_ptr_align(char *ptr)
#else #else
#define K_KERNEL_PINNED_STACK_DEFINE(sym, size) \ #define K_KERNEL_PINNED_STACK_DEFINE(sym, size) \
Z_KERNEL_STACK_DEFINE_IN(sym, size, __kstackmem) Z_KERNEL_STACK_DEFINE_IN(sym, size, __kstackmem)
#endif #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
/** /**
* @brief Define a toplevel array of kernel stack memory regions * @brief Define a toplevel array of kernel stack memory regions
@ -265,7 +265,7 @@ static inline char *z_stack_ptr_align(char *ptr)
#else #else
#define K_KERNEL_PINNED_STACK_ARRAY_DEFINE(sym, nmemb, size) \ #define K_KERNEL_PINNED_STACK_ARRAY_DEFINE(sym, nmemb, size) \
Z_KERNEL_STACK_ARRAY_DEFINE_IN(sym, nmemb, size, __kstackmem) Z_KERNEL_STACK_ARRAY_DEFINE_IN(sym, nmemb, size, __kstackmem)
#endif #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
/** /**
* @brief Define an embedded stack memory region * @brief Define an embedded stack memory region
@ -320,7 +320,7 @@ static inline char *Z_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
#define K_THREAD_STACK_RESERVED ((size_t)(ARCH_THREAD_STACK_RESERVED)) #define K_THREAD_STACK_RESERVED ((size_t)(ARCH_THREAD_STACK_RESERVED))
#else #else
#define K_THREAD_STACK_RESERVED ((size_t)0U) #define K_THREAD_STACK_RESERVED ((size_t)0U)
#endif #endif /* ARCH_THREAD_STACK_RESERVED */
/** /**
* @brief Properly align the lowest address of a stack object * @brief Properly align the lowest address of a stack object
@ -553,7 +553,7 @@ static inline char *Z_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
#else #else
#define K_THREAD_PINNED_STACK_DEFINE(sym, size) \ #define K_THREAD_PINNED_STACK_DEFINE(sym, size) \
K_THREAD_STACK_DEFINE(sym, size) K_THREAD_STACK_DEFINE(sym, size)
#endif #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
/** /**
* @brief Calculate size of stacks to be allocated in a stack array * @brief Calculate size of stacks to be allocated in a stack array
@ -611,7 +611,7 @@ static inline char *Z_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
#else #else
#define K_THREAD_PINNED_STACK_ARRAY_DEFINE(sym, nmemb, size) \ #define K_THREAD_PINNED_STACK_ARRAY_DEFINE(sym, nmemb, size) \
K_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) K_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size)
#endif #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
/** /**
* @brief Define an embedded stack memory region * @brief Define an embedded stack memory region

View file

@ -56,7 +56,7 @@ static struct k_spinlock lock;
#else #else
#define ATOMIC_SYSCALL_HANDLER_TARGET(name) #define ATOMIC_SYSCALL_HANDLER_TARGET(name)
#define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) #define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name)
#endif #endif /* CONFIG_USERSPACE */
/** /**
* *
@ -411,4 +411,4 @@ ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_nand);
#include <syscalls/atomic_xor_mrsh.c> #include <syscalls/atomic_xor_mrsh.c>
#include <syscalls/atomic_and_mrsh.c> #include <syscalls/atomic_and_mrsh.c>
#include <syscalls/atomic_nand_mrsh.c> #include <syscalls/atomic_nand_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */

View file

@ -14,15 +14,15 @@
#define BANNER_POSTFIX " (delayed boot " DELAY_STR "ms)" #define BANNER_POSTFIX " (delayed boot " DELAY_STR "ms)"
#else #else
#define BANNER_POSTFIX "" #define BANNER_POSTFIX ""
#endif #endif /* defined(CONFIG_BOOT_DELAY) && (CONFIG_BOOT_DELAY > 0) */
#ifndef BANNER_VERSION #ifndef BANNER_VERSION
#ifdef BUILD_VERSION #ifdef BUILD_VERSION
#define BANNER_VERSION STRINGIFY(BUILD_VERSION) #define BANNER_VERSION STRINGIFY(BUILD_VERSION)
#else #else
#define BANNER_VERSION KERNEL_VERSION_STRING #define BANNER_VERSION KERNEL_VERSION_STRING
#endif #endif /* BUILD_VERSION */
#endif #endif /* !BANNER_VERSION */
void boot_banner(void) void boot_banner(void)
{ {

View file

@ -14,7 +14,7 @@
#ifdef CONFIG_OBJ_CORE_CONDVAR #ifdef CONFIG_OBJ_CORE_CONDVAR
static struct k_obj_type obj_type_condvar; static struct k_obj_type obj_type_condvar;
#endif #endif /* CONFIG_OBJ_CORE_CONDVAR */
static struct k_spinlock lock; static struct k_spinlock lock;
@ -25,7 +25,7 @@ int z_impl_k_condvar_init(struct k_condvar *condvar)
#ifdef CONFIG_OBJ_CORE_CONDVAR #ifdef CONFIG_OBJ_CORE_CONDVAR
k_obj_core_init_and_link(K_OBJ_CORE(condvar), &obj_type_condvar); k_obj_core_init_and_link(K_OBJ_CORE(condvar), &obj_type_condvar);
#endif #endif /* CONFIG_OBJ_CORE_CONDVAR */
SYS_PORT_TRACING_OBJ_INIT(k_condvar, condvar, 0); SYS_PORT_TRACING_OBJ_INIT(k_condvar, condvar, 0);
@ -39,7 +39,7 @@ int z_vrfy_k_condvar_init(struct k_condvar *condvar)
return z_impl_k_condvar_init(condvar); return z_impl_k_condvar_init(condvar);
} }
#include <syscalls/k_condvar_init_mrsh.c> #include <syscalls/k_condvar_init_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_condvar_signal(struct k_condvar *condvar) int z_impl_k_condvar_signal(struct k_condvar *condvar)
{ {
@ -71,7 +71,7 @@ int z_vrfy_k_condvar_signal(struct k_condvar *condvar)
return z_impl_k_condvar_signal(condvar); return z_impl_k_condvar_signal(condvar);
} }
#include <syscalls/k_condvar_signal_mrsh.c> #include <syscalls/k_condvar_signal_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_condvar_broadcast(struct k_condvar *condvar) int z_impl_k_condvar_broadcast(struct k_condvar *condvar)
{ {
@ -104,7 +104,7 @@ int z_vrfy_k_condvar_broadcast(struct k_condvar *condvar)
return z_impl_k_condvar_broadcast(condvar); return z_impl_k_condvar_broadcast(condvar);
} }
#include <syscalls/k_condvar_broadcast_mrsh.c> #include <syscalls/k_condvar_broadcast_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex, int z_impl_k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
k_timeout_t timeout) k_timeout_t timeout)
@ -133,7 +133,7 @@ int z_vrfy_k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
return z_impl_k_condvar_wait(condvar, mutex, timeout); return z_impl_k_condvar_wait(condvar, mutex, timeout);
} }
#include <syscalls/k_condvar_wait_mrsh.c> #include <syscalls/k_condvar_wait_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_CONDVAR #ifdef CONFIG_OBJ_CORE_CONDVAR
static int init_condvar_obj_core_list(void) static int init_condvar_obj_core_list(void)
@ -155,4 +155,4 @@ static int init_condvar_obj_core_list(void)
SYS_INIT(init_condvar_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_condvar_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_CONDVAR */

View file

@ -13,7 +13,7 @@ extern struct k_spinlock _sched_spinlock;
# ifdef CONFIG_SMP # ifdef CONFIG_SMP
/* Right now we use a two byte for this mask */ /* Right now we use a two byte for this mask */
BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 16, "Too many CPUs for mask word"); BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 16, "Too many CPUs for mask word");
# endif # endif /* CONFIG_SMP */
static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask) static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
@ -23,7 +23,7 @@ static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_m
#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
__ASSERT(z_is_thread_prevented_from_running(thread), __ASSERT(z_is_thread_prevented_from_running(thread),
"Running threads cannot change CPU pin"); "Running threads cannot change CPU pin");
#endif #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
K_SPINLOCK(&_sched_spinlock) { K_SPINLOCK(&_sched_spinlock) {
if (z_is_thread_prevented_from_running(thread)) { if (z_is_thread_prevented_from_running(thread)) {
@ -39,7 +39,7 @@ static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_m
__ASSERT((m == 0) || ((m & (m - 1)) == 0), __ASSERT((m == 0) || ((m & (m - 1)) == 0),
"Only one CPU allowed in mask when PIN_ONLY"); "Only one CPU allowed in mask when PIN_ONLY");
#endif #endif /* defined(CONFIG_ASSERT) && defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY) */
return ret; return ret;
} }

View file

@ -20,7 +20,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE #define BA_SIZE CONFIG_DYNAMIC_THREAD_POOL_SIZE
#else #else
#define BA_SIZE 1 #define BA_SIZE 1
#endif #endif /* CONFIG_DYNAMIC_THREAD_POOL_SIZE > 0 */
struct dyn_cb_data { struct dyn_cb_data {
k_tid_t tid; k_tid_t tid;
@ -71,7 +71,7 @@ static k_thread_stack_t *stack_alloc_dyn(size_t size, int flags)
* enabled we can't proceed. * enabled we can't proceed.
*/ */
return NULL; return NULL;
#endif #endif /* CONFIG_DYNAMIC_OBJECTS */
} }
return z_thread_stack_alloc_dyn(Z_KERNEL_STACK_OBJ_ALIGN, return z_thread_stack_alloc_dyn(Z_KERNEL_STACK_OBJ_ALIGN,
@ -106,7 +106,7 @@ static inline k_thread_stack_t *z_vrfy_k_thread_stack_alloc(size_t size, int fla
return z_impl_k_thread_stack_alloc(size, flags); return z_impl_k_thread_stack_alloc(size, flags);
} }
#include <syscalls/k_thread_stack_alloc_mrsh.c> #include <syscalls/k_thread_stack_alloc_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static void dyn_cb(const struct k_thread *thread, void *user_data) static void dyn_cb(const struct k_thread *thread, void *user_data)
{ {
@ -154,7 +154,7 @@ int z_impl_k_thread_stack_free(k_thread_stack_t *stack)
} }
#else #else
k_free(stack); k_free(stack);
#endif #endif /* CONFIG_USERSPACE */
} else { } else {
LOG_DBG("Invalid stack %p", stack); LOG_DBG("Invalid stack %p", stack);
return -EINVAL; return -EINVAL;
@ -169,4 +169,4 @@ static inline int z_vrfy_k_thread_stack_free(k_thread_stack_t *stack)
return z_impl_k_thread_stack_free(stack); return z_impl_k_thread_stack_free(stack);
} }
#include <syscalls/k_thread_stack_free_mrsh.c> #include <syscalls/k_thread_stack_free_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */

View file

@ -47,7 +47,7 @@ struct event_walk_data {
#ifdef CONFIG_OBJ_CORE_EVENT #ifdef CONFIG_OBJ_CORE_EVENT
static struct k_obj_type obj_type_event; static struct k_obj_type obj_type_event;
#endif #endif /* CONFIG_OBJ_CORE_EVENT */
void z_impl_k_event_init(struct k_event *event) void z_impl_k_event_init(struct k_event *event)
{ {
@ -62,7 +62,7 @@ void z_impl_k_event_init(struct k_event *event)
#ifdef CONFIG_OBJ_CORE_EVENT #ifdef CONFIG_OBJ_CORE_EVENT
k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event); k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
#endif #endif /* CONFIG_OBJ_CORE_EVENT */
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
@ -72,7 +72,7 @@ void z_vrfy_k_event_init(struct k_event *event)
z_impl_k_event_init(event); z_impl_k_event_init(event);
} }
#include <syscalls/k_event_init_mrsh.c> #include <syscalls/k_event_init_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
/** /**
* @brief determine if desired set of events been satisfied * @brief determine if desired set of events been satisfied
@ -191,7 +191,7 @@ uint32_t z_vrfy_k_event_post(struct k_event *event, uint32_t events)
return z_impl_k_event_post(event, events); return z_impl_k_event_post(event, events);
} }
#include <syscalls/k_event_post_mrsh.c> #include <syscalls/k_event_post_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_event_set(struct k_event *event, uint32_t events) uint32_t z_impl_k_event_set(struct k_event *event, uint32_t events)
{ {
@ -205,7 +205,7 @@ uint32_t z_vrfy_k_event_set(struct k_event *event, uint32_t events)
return z_impl_k_event_set(event, events); return z_impl_k_event_set(event, events);
} }
#include <syscalls/k_event_set_mrsh.c> #include <syscalls/k_event_set_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_event_set_masked(struct k_event *event, uint32_t events, uint32_t z_impl_k_event_set_masked(struct k_event *event, uint32_t events,
uint32_t events_mask) uint32_t events_mask)
@ -221,7 +221,7 @@ uint32_t z_vrfy_k_event_set_masked(struct k_event *event, uint32_t events,
return z_impl_k_event_set_masked(event, events, events_mask); return z_impl_k_event_set_masked(event, events, events_mask);
} }
#include <syscalls/k_event_set_masked_mrsh.c> #include <syscalls/k_event_set_masked_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_event_clear(struct k_event *event, uint32_t events) uint32_t z_impl_k_event_clear(struct k_event *event, uint32_t events)
{ {
@ -235,7 +235,7 @@ uint32_t z_vrfy_k_event_clear(struct k_event *event, uint32_t events)
return z_impl_k_event_clear(event, events); return z_impl_k_event_clear(event, events);
} }
#include <syscalls/k_event_clear_mrsh.c> #include <syscalls/k_event_clear_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events, static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
unsigned int options, k_timeout_t timeout) unsigned int options, k_timeout_t timeout)
@ -321,7 +321,7 @@ uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
return z_impl_k_event_wait(event, events, reset, timeout); return z_impl_k_event_wait(event, events, reset, timeout);
} }
#include <syscalls/k_event_wait_mrsh.c> #include <syscalls/k_event_wait_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
/** /**
* Wait for all of the specified events * Wait for all of the specified events
@ -343,7 +343,7 @@ uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
return z_impl_k_event_wait_all(event, events, reset, timeout); return z_impl_k_event_wait_all(event, events, reset, timeout);
} }
#include <syscalls/k_event_wait_all_mrsh.c> #include <syscalls/k_event_wait_all_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_EVENT #ifdef CONFIG_OBJ_CORE_EVENT
static int init_event_obj_core_list(void) static int init_event_obj_core_list(void)
@ -364,4 +364,4 @@ static int init_event_obj_core_list(void)
SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_EVENT */

View file

@ -108,7 +108,7 @@ void z_fatal_error(unsigned int reason, const z_arch_esf_t *esf)
if ((esf != NULL) && arch_is_in_nested_exception(esf)) { if ((esf != NULL) && arch_is_in_nested_exception(esf)) {
LOG_ERR("Fault during interrupt handling\n"); LOG_ERR("Fault during interrupt handling\n");
} }
#endif #endif /* CONFIG_ARCH_HAS_NESTED_EXCEPTION_DETECTION */
LOG_ERR("Current thread: %p (%s)", thread, LOG_ERR("Current thread: %p (%s)", thread,
thread_name_get(thread)); thread_name_get(thread));

View file

@ -30,7 +30,7 @@ void z_pm_save_idle_exit(void)
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
sys_clock_idle_exit(); sys_clock_idle_exit();
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
} }
void idle(void *unused1, void *unused2, void *unused3) void idle(void *unused1, void *unused2, void *unused3)
@ -87,7 +87,7 @@ void idle(void *unused1, void *unused2, void *unused3)
} }
#else #else
k_cpu_idle(); k_cpu_idle();
#endif #endif /* CONFIG_PM */
#if !defined(CONFIG_PREEMPT_ENABLED) #if !defined(CONFIG_PREEMPT_ENABLED)
# if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
@ -103,8 +103,8 @@ void idle(void *unused1, void *unused2, void *unused3)
if (_kernel.ready_q.cache != _current) { if (_kernel.ready_q.cache != _current) {
z_swap_unlocked(); z_swap_unlocked();
} }
# endif # endif /* !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) */
#endif #endif /* !defined(CONFIG_PREEMPT_ENABLED) */
} }
} }

View file

@ -37,7 +37,7 @@ extern "C" {
* @param usec_to_wait Wait period, in microseconds * @param usec_to_wait Wait period, in microseconds
*/ */
void arch_busy_wait(uint32_t usec_to_wait); void arch_busy_wait(uint32_t usec_to_wait);
#endif #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
/** @} */ /** @} */
@ -154,7 +154,7 @@ int arch_swap(unsigned int key);
*/ */
static ALWAYS_INLINE void static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value); arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
#endif /* CONFIG_USE_SWITCH i*/ #endif /* CONFIG_USE_SWITCH */
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/** /**

View file

@ -43,7 +43,7 @@ static inline void z_data_copy(void)
{ {
/* Do nothing */ /* Do nothing */
} }
#endif #endif /* CONFIG_XIP */
#ifdef CONFIG_LINKER_USE_BOOT_SECTION #ifdef CONFIG_LINKER_USE_BOOT_SECTION
void z_bss_zero_boot(void); void z_bss_zero_boot(void);
@ -52,7 +52,7 @@ static inline void z_bss_zero_boot(void)
{ {
/* Do nothing */ /* Do nothing */
} }
#endif #endif /* CONFIG_LINKER_USE_BOOT_SECTION */
#ifdef CONFIG_LINKER_USE_PINNED_SECTION #ifdef CONFIG_LINKER_USE_PINNED_SECTION
void z_bss_zero_pinned(void); void z_bss_zero_pinned(void);
@ -61,7 +61,7 @@ static inline void z_bss_zero_pinned(void)
{ {
/* Do nothing */ /* Do nothing */
} }
#endif #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
FUNC_NORETURN void z_cstart(void); FUNC_NORETURN void z_cstart(void);
@ -135,27 +135,27 @@ z_thread_return_value_set_with_data(struct k_thread *thread,
extern void z_smp_init(void); extern void z_smp_init(void);
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
extern void smp_timer_init(void); extern void smp_timer_init(void);
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
#endif #endif /* CONFIG_SMP */
extern void z_early_rand_get(uint8_t *buf, size_t length); extern void z_early_rand_get(uint8_t *buf, size_t length);
#if CONFIG_STACK_POINTER_RANDOM #if CONFIG_STACK_POINTER_RANDOM
extern int z_stack_adjust_initialized; extern int z_stack_adjust_initialized;
#endif #endif /* CONFIG_STACK_POINTER_RANDOM */
extern struct k_thread z_main_thread; extern struct k_thread z_main_thread;
#ifdef CONFIG_MULTITHREADING #ifdef CONFIG_MULTITHREADING
extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS]; extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
#endif #endif /* CONFIG_MULTITHREADING */
K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
CONFIG_ISR_STACK_SIZE); CONFIG_ISR_STACK_SIZE);
#ifdef CONFIG_GEN_PRIV_STACKS #ifdef CONFIG_GEN_PRIV_STACKS
extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack); extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
#endif #endif /* CONFIG_GEN_PRIV_STACKS */
/* Calculate stack usage. */ /* Calculate stack usage. */
int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr); int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr);
@ -189,7 +189,7 @@ struct gdb_ctx;
* and synchronously communicate with gdb on host. * and synchronously communicate with gdb on host.
*/ */
extern int z_gdb_main_loop(struct gdb_ctx *ctx); extern int z_gdb_main_loop(struct gdb_ctx *ctx);
#endif #endif /* CONFIG_GDBSTUB */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
void z_thread_mark_switched_in(void); void z_thread_mark_switched_in(void);
@ -263,7 +263,7 @@ bool pm_system_suspend(int32_t ticks);
*/ */
void pm_system_resume(void); void pm_system_resume(void);
#endif #endif /* CONFIG_PM */
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
/** /**
@ -287,7 +287,7 @@ int z_thread_stats_query(struct k_obj_core *obj_core, void *stats);
int z_thread_stats_reset(struct k_obj_core *obj_core); int z_thread_stats_reset(struct k_obj_core *obj_core);
int z_thread_stats_disable(struct k_obj_core *obj_core); int z_thread_stats_disable(struct k_obj_core *obj_core);
int z_thread_stats_enable(struct k_obj_core *obj_core); int z_thread_stats_enable(struct k_obj_core *obj_core);
#endif #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats); int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats);
@ -295,7 +295,7 @@ int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats);
int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats); int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats);
int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats); int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
#endif #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
#ifdef __cplusplus #ifdef __cplusplus
} }

View file

@ -34,23 +34,23 @@ GEN_OFFSET_SYM(_kernel_t, cpus);
#if defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU_SHARING)
GEN_OFFSET_SYM(_cpu_t, fp_ctx); GEN_OFFSET_SYM(_cpu_t, fp_ctx);
#endif #endif /* CONFIG_FPU_SHARING */
#ifdef CONFIG_PM #ifdef CONFIG_PM
GEN_OFFSET_SYM(_kernel_t, idle); GEN_OFFSET_SYM(_kernel_t, idle);
#endif #endif /* CONFIG_PM */
#ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY #ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
GEN_OFFSET_SYM(_kernel_t, ready_q); GEN_OFFSET_SYM(_kernel_t, ready_q);
#endif #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
GEN_OFFSET_SYM(_ready_q_t, cache); GEN_OFFSET_SYM(_ready_q_t, cache);
#endif #endif /* CONFIG_SMP */
#ifdef CONFIG_FPU_SHARING #ifdef CONFIG_FPU_SHARING
GEN_OFFSET_SYM(_kernel_t, current_fp); GEN_OFFSET_SYM(_kernel_t, current_fp);
#endif #endif /* CONFIG_FPU_SHARING */
GEN_OFFSET_SYM(_thread_base_t, user_options); GEN_OFFSET_SYM(_thread_base_t, user_options);
@ -60,15 +60,15 @@ GEN_OFFSET_SYM(_thread_t, arch);
#ifdef CONFIG_USE_SWITCH #ifdef CONFIG_USE_SWITCH
GEN_OFFSET_SYM(_thread_t, switch_handle); GEN_OFFSET_SYM(_thread_t, switch_handle);
#endif #endif /* CONFIG_USE_SWITCH */
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
GEN_OFFSET_SYM(_thread_t, stack_info); GEN_OFFSET_SYM(_thread_t, stack_info);
#endif #endif /* CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_THREAD_LOCAL_STORAGE #ifdef CONFIG_THREAD_LOCAL_STORAGE
GEN_OFFSET_SYM(_thread_t, tls); GEN_OFFSET_SYM(_thread_t, tls);
#endif #endif /* CONFIG_THREAD_LOCAL_STORAGE */
GEN_ABSOLUTE_SYM(__z_interrupt_stack_SIZEOF, sizeof(z_interrupt_stacks[0])); GEN_ABSOLUTE_SYM(__z_interrupt_stack_SIZEOF, sizeof(z_interrupt_stacks[0]));
@ -76,12 +76,12 @@ GEN_ABSOLUTE_SYM(__z_interrupt_stack_SIZEOF, sizeof(z_interrupt_stacks[0]));
#ifdef CONFIG_DEVICE_DEPS #ifdef CONFIG_DEVICE_DEPS
GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_HANDLES_OFFSET, GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_HANDLES_OFFSET,
offsetof(struct device, deps)); offsetof(struct device, deps));
#endif #endif /* CONFIG_DEVICE_DEPS */
#ifdef CONFIG_PM_DEVICE #ifdef CONFIG_PM_DEVICE
GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_PM_OFFSET, GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_PM_OFFSET,
offsetof(struct device, pm)); offsetof(struct device, pm));
#endif #endif /* CONFIG_PM_DEVICE */
/* member offsets in the pm_device structure. Used in image post-processing */ /* member offsets in the pm_device structure. Used in image post-processing */

View file

@ -34,7 +34,7 @@ BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
#else #else
#define Z_VALID_PRIO(prio, entry_point) ((prio) == -1) #define Z_VALID_PRIO(prio, entry_point) ((prio) == -1)
#define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "") #define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
#endif #endif /* CONFIG_MULTITHREADING */
void z_sched_init(void); void z_sched_init(void);
void z_move_thread_to_end_of_prio_q(struct k_thread *thread); void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
@ -80,7 +80,7 @@ static inline bool z_is_idle_thread_object(struct k_thread *thread)
return thread->base.is_idle; return thread->base.is_idle;
#else #else
return thread == &z_idle_threads[0]; return thread == &z_idle_threads[0];
#endif #endif /* CONFIG_SMP */
#else #else
return false; return false;
#endif /* CONFIG_MULTITHREADING */ #endif /* CONFIG_MULTITHREADING */
@ -417,7 +417,7 @@ static inline void z_sched_usage_switch(struct k_thread *thread)
#ifdef CONFIG_SCHED_THREAD_USAGE #ifdef CONFIG_SCHED_THREAD_USAGE
z_sched_usage_stop(); z_sched_usage_stop();
z_sched_usage_start(thread); z_sched_usage_start(thread);
#endif #endif /* CONFIG_SCHED_THREAD_USAGE */
} }
#endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */ #endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */

View file

@ -15,7 +15,7 @@
extern void z_check_stack_sentinel(void); extern void z_check_stack_sentinel(void);
#else #else
#define z_check_stack_sentinel() /**/ #define z_check_stack_sentinel() /**/
#endif #endif /* CONFIG_STACK_SENTINEL */
extern struct k_spinlock _sched_spinlock; extern struct k_spinlock _sched_spinlock;
@ -63,7 +63,7 @@ static inline void z_sched_switch_spin(struct k_thread *thread)
* non-null. * non-null.
*/ */
barrier_dmem_fence_full(); barrier_dmem_fence_full();
#endif #endif /* CONFIG_SMP */
} }
/* New style context switching. arch_switch() is a lower level /* New style context switching. arch_switch() is a lower level
@ -99,8 +99,8 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
__ASSERT(arch_irq_unlocked(key) || __ASSERT(arch_irq_unlocked(key) ||
_current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD), _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
"Context switching while holding lock!"); "Context switching while holding lock!");
# endif # endif /* CONFIG_ARM64 */
#endif #endif /* CONFIG_SPIN_VALIDATE */
old_thread = _current; old_thread = _current;
@ -131,18 +131,18 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
if (!is_spinlock) { if (!is_spinlock) {
z_smp_release_global_lock(new_thread); z_smp_release_global_lock(new_thread);
} }
#endif #endif /* CONFIG_SMP */
z_thread_mark_switched_out(); z_thread_mark_switched_out();
z_sched_switch_spin(new_thread); z_sched_switch_spin(new_thread);
_current_cpu->current = new_thread; _current_cpu->current = new_thread;
#ifdef CONFIG_TIMESLICING #ifdef CONFIG_TIMESLICING
z_reset_time_slice(new_thread); z_reset_time_slice(new_thread);
#endif #endif /* CONFIG_TIMESLICING */
#ifdef CONFIG_SPIN_VALIDATE #ifdef CONFIG_SPIN_VALIDATE
z_spin_lock_set_owner(&_sched_spinlock); z_spin_lock_set_owner(&_sched_spinlock);
#endif #endif /* CONFIG_SPIN_VALIDATE */
arch_cohere_stacks(old_thread, NULL, new_thread); arch_cohere_stacks(old_thread, NULL, new_thread);
@ -152,7 +152,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
* time. See z_sched_switch_spin(). * time. See z_sched_switch_spin().
*/ */
z_requeue_current(old_thread); z_requeue_current(old_thread);
#endif #endif /* CONFIG_SMP */
void *newsh = new_thread->switch_handle; void *newsh = new_thread->switch_handle;
if (IS_ENABLED(CONFIG_SMP)) { if (IS_ENABLED(CONFIG_SMP)) {
@ -241,24 +241,24 @@ static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
dummy_thread->base.thread_state = _THREAD_DUMMY; dummy_thread->base.thread_state = _THREAD_DUMMY;
#ifdef CONFIG_SCHED_CPU_MASK #ifdef CONFIG_SCHED_CPU_MASK
dummy_thread->base.cpu_mask = -1; dummy_thread->base.cpu_mask = -1;
#endif #endif /* CONFIG_SCHED_CPU_MASK */
dummy_thread->base.user_options = K_ESSENTIAL; dummy_thread->base.user_options = K_ESSENTIAL;
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
dummy_thread->stack_info.start = 0U; dummy_thread->stack_info.start = 0U;
dummy_thread->stack_info.size = 0U; dummy_thread->stack_info.size = 0U;
#endif #endif /* CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default; dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
#endif #endif /* CONFIG_USERSPACE */
#if (K_HEAP_MEM_POOL_SIZE > 0) #if (K_HEAP_MEM_POOL_SIZE > 0)
k_thread_system_pool_assign(dummy_thread); k_thread_system_pool_assign(dummy_thread);
#else #else
dummy_thread->resource_pool = NULL; dummy_thread->resource_pool = NULL;
#endif #endif /* K_HEAP_MEM_POOL_SIZE */
#ifdef CONFIG_TIMESLICE_PER_THREAD #ifdef CONFIG_TIMESLICE_PER_THREAD
dummy_thread->base.slice_ticks = 0; dummy_thread->base.slice_ticks = 0;
#endif #endif /* CONFIG_TIMESLICE_PER_THREAD */
_current_cpu->current = dummy_thread; _current_cpu->current = dummy_thread;
} }

View file

@ -17,7 +17,7 @@
* thread->next_thread (until NULL) * thread->next_thread (until NULL)
*/ */
extern struct k_spinlock z_thread_monitor_lock; extern struct k_spinlock z_thread_monitor_lock;
#endif #endif /* CONFIG_THREAD_MONITOR */
/* clean up when a thread is aborted */ /* clean up when a thread is aborted */
@ -42,8 +42,8 @@ static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t dela
#else #else
ARG_UNUSED(delay); ARG_UNUSED(delay);
k_thread_start(thread); k_thread_start(thread);
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
} }
#endif #endif /* CONFIG_MULTITHREADING */
#endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */ #endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */

View file

@ -54,7 +54,7 @@
#define Z_FREE_VM_START Z_BOOT_PHYS_TO_VIRT(Z_PHYS_RAM_END) #define Z_FREE_VM_START Z_BOOT_PHYS_TO_VIRT(Z_PHYS_RAM_END)
#else #else
#define Z_FREE_VM_START Z_KERNEL_VIRT_END #define Z_FREE_VM_START Z_KERNEL_VIRT_END
#endif #endif /* CONFIG_ARCH_MAPS_ALL_RAM */
/* /*
* Macros and data structures for physical page frame accounting, * Macros and data structures for physical page frame accounting,
@ -121,7 +121,7 @@ struct z_page_frame {
} __aligned(4); } __aligned(4);
#else #else
} __packed; } __packed;
#endif #endif /* CONFIG_XTENSA */
static inline bool z_page_frame_is_pinned(struct z_page_frame *pf) static inline bool z_page_frame_is_pinned(struct z_page_frame *pf)
{ {
@ -237,7 +237,7 @@ void z_page_frames_dump(void);
CONFIG_MMU_PAGE_SIZE)) CONFIG_MMU_PAGE_SIZE))
#else #else
#define Z_VM_RESERVED 0 #define Z_VM_RESERVED 0
#endif #endif /* CONFIG_DEMAND_PAGING */
#ifdef CONFIG_DEMAND_PAGING #ifdef CONFIG_DEMAND_PAGING
/* /*

View file

@ -16,7 +16,7 @@
# define _priq_run_best _priq_dumb_mask_best # define _priq_run_best _priq_dumb_mask_best
# else # else
# define _priq_run_best z_priq_dumb_best # define _priq_run_best z_priq_dumb_best
# endif # endif /* CONFIG_SCHED_CPU_MASK */
/* Scalable Scheduling */ /* Scalable Scheduling */
#elif defined(CONFIG_SCHED_SCALABLE) #elif defined(CONFIG_SCHED_SCALABLE)
#define _priq_run_add z_priq_rb_add #define _priq_run_add z_priq_rb_add

View file

@ -73,7 +73,7 @@ static inline void z_add_thread_timeout(struct k_thread *thread, k_timeout_t tic
ARG_UNUSED(ticks); ARG_UNUSED(ticks);
} }
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
#ifdef __cplusplus #ifdef __cplusplus
} }

View file

@ -85,7 +85,7 @@ static void z_init_static_threads(void)
pos->thread); pos->thread);
} }
} }
#endif #endif /* CONFIG_USERSPACE */
/* /*
* Non-legacy static threads may be started immediately or * Non-legacy static threads may be started immediately or
@ -128,12 +128,12 @@ enum init_level {
INIT_LEVEL_APPLICATION, INIT_LEVEL_APPLICATION,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
INIT_LEVEL_SMP, INIT_LEVEL_SMP,
#endif #endif /* CONFIG_SMP */
}; };
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern const struct init_entry __init_SMP_start[]; extern const struct init_entry __init_SMP_start[];
#endif #endif /* CONFIG_SMP */
/* /*
* storage space for the interrupt stack * storage space for the interrupt stack
@ -173,8 +173,8 @@ static struct k_obj_core_stats_desc kernel_stats_desc = {
.disable = NULL, .disable = NULL,
.enable = NULL, .enable = NULL,
}; };
#endif #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
#endif #endif /* CONFIG_OBJ_CORE_SYSTEM */
/* LCOV_EXCL_START /* LCOV_EXCL_START
* *
@ -245,7 +245,7 @@ void z_bss_zero(void)
#ifdef CONFIG_COVERAGE_GCOV #ifdef CONFIG_COVERAGE_GCOV
z_early_memset(&__gcov_bss_start, 0, z_early_memset(&__gcov_bss_start, 0,
((uintptr_t) &__gcov_bss_end - (uintptr_t) &__gcov_bss_start)); ((uintptr_t) &__gcov_bss_end - (uintptr_t) &__gcov_bss_start));
#endif #endif /* CONFIG_COVERAGE_GCOV */
} }
#ifdef CONFIG_LINKER_USE_BOOT_SECTION #ifdef CONFIG_LINKER_USE_BOOT_SECTION
@ -279,7 +279,7 @@ void z_bss_zero_boot(void)
__boot_func __boot_func
#else #else
__pinned_func __pinned_func
#endif #endif /* CONFIG_LINKER_USE_BOOT_SECTION */
void z_bss_zero_pinned(void) void z_bss_zero_pinned(void)
{ {
z_early_memset(&lnkr_pinned_bss_start, 0, z_early_memset(&lnkr_pinned_bss_start, 0,
@ -293,7 +293,7 @@ void z_bss_zero_pinned(void)
extern __thread volatile uintptr_t __stack_chk_guard; extern __thread volatile uintptr_t __stack_chk_guard;
#else #else
extern volatile uintptr_t __stack_chk_guard; extern volatile uintptr_t __stack_chk_guard;
#endif #endif /* CONFIG_STACK_CANARIES_TLS */
#endif /* CONFIG_STACK_CANARIES */ #endif /* CONFIG_STACK_CANARIES */
/* LCOV_EXCL_STOP */ /* LCOV_EXCL_STOP */
@ -322,7 +322,7 @@ static void z_sys_init_run_level(enum init_level level)
__init_APPLICATION_start, __init_APPLICATION_start,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
__init_SMP_start, __init_SMP_start,
#endif #endif /* CONFIG_SMP */
/* End marker */ /* End marker */
__init_end, __init_end,
}; };
@ -391,13 +391,13 @@ static void bg_thread_main(void *unused1, void *unused2, void *unused3)
z_sys_init_run_level(INIT_LEVEL_POST_KERNEL); z_sys_init_run_level(INIT_LEVEL_POST_KERNEL);
#if CONFIG_STACK_POINTER_RANDOM #if CONFIG_STACK_POINTER_RANDOM
z_stack_adjust_initialized = 1; z_stack_adjust_initialized = 1;
#endif #endif /* CONFIG_STACK_POINTER_RANDOM */
boot_banner(); boot_banner();
#if defined(CONFIG_CPP) #if defined(CONFIG_CPP)
void z_cpp_init_static(void); void z_cpp_init_static(void);
z_cpp_init_static(); z_cpp_init_static();
#endif #endif /* CONFIG_CPP */
/* Final init level before app starts */ /* Final init level before app starts */
z_sys_init_run_level(INIT_LEVEL_APPLICATION); z_sys_init_run_level(INIT_LEVEL_APPLICATION);
@ -406,14 +406,14 @@ static void bg_thread_main(void *unused1, void *unused2, void *unused3)
#ifdef CONFIG_KERNEL_COHERENCE #ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(&_kernel)); __ASSERT_NO_MSG(arch_mem_coherent(&_kernel));
#endif #endif /* CONFIG_KERNEL_COHERENCE */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!IS_ENABLED(CONFIG_SMP_BOOT_DELAY)) { if (!IS_ENABLED(CONFIG_SMP_BOOT_DELAY)) {
z_smp_init(); z_smp_init();
} }
z_sys_init_run_level(INIT_LEVEL_SMP); z_sys_init_run_level(INIT_LEVEL_SMP);
#endif #endif /* CONFIG_SMP */
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
z_mem_manage_boot_finish(); z_mem_manage_boot_finish();
@ -429,7 +429,7 @@ static void bg_thread_main(void *unused1, void *unused2, void *unused3)
#ifdef CONFIG_COVERAGE_DUMP #ifdef CONFIG_COVERAGE_DUMP
/* Dump coverage data once the main() has exited. */ /* Dump coverage data once the main() has exited. */
gcov_coverage_dump(); gcov_coverage_dump();
#endif #endif /* CONFIG_COVERAGE_DUMP */
} /* LCOV_EXCL_LINE ... because we just dumped final coverage data */ } /* LCOV_EXCL_LINE ... because we just dumped final coverage data */
#if defined(CONFIG_MULTITHREADING) #if defined(CONFIG_MULTITHREADING)
@ -446,7 +446,7 @@ static void init_idle_thread(int i)
snprintk(tname, 8, "idle %02d", i); snprintk(tname, 8, "idle %02d", i);
#else #else
char *tname = "idle"; char *tname = "idle";
#endif #endif /* CONFIG_MP_MAX_NUM_CPUS */
#else #else
char *tname = NULL; char *tname = NULL;
@ -460,7 +460,7 @@ static void init_idle_thread(int i)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
thread->base.is_idle = 1U; thread->base.is_idle = 1U;
#endif #endif /* CONFIG_SMP */
} }
void z_init_cpu(int id) void z_init_cpu(int id)
@ -524,7 +524,7 @@ static char *prepare_multithreading(void)
* to work as intended * to work as intended
*/ */
_kernel.ready_q.cache = &z_main_thread; _kernel.ready_q.cache = &z_main_thread;
#endif #endif /* CONFIG_SMP */
stack_ptr = z_setup_new_thread(&z_main_thread, z_main_stack, stack_ptr = z_setup_new_thread(&z_main_thread, z_main_stack,
CONFIG_MAIN_STACK_SIZE, bg_thread_main, CONFIG_MAIN_STACK_SIZE, bg_thread_main,
NULL, NULL, NULL, NULL, NULL, NULL,
@ -551,7 +551,7 @@ static FUNC_NORETURN void switch_to_main_thread(char *stack_ptr)
* will never be rescheduled in. * will never be rescheduled in.
*/ */
z_swap_unlocked(); z_swap_unlocked();
#endif #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
} }
#endif /* CONFIG_MULTITHREADING */ #endif /* CONFIG_MULTITHREADING */
@ -573,7 +573,7 @@ void __weak z_early_rand_get(uint8_t *buf, size_t length)
buf += rc; buf += rc;
} }
} }
#endif #endif /* CONFIG_ENTROPY_HAS_DRIVER */
while (length > 0) { while (length > 0) {
uint32_t val; uint32_t val;
@ -621,7 +621,7 @@ FUNC_NORETURN void z_cstart(void)
struct k_thread dummy_thread; struct k_thread dummy_thread;
z_dummy_thread_init(&dummy_thread); z_dummy_thread_init(&dummy_thread);
#endif #endif /* CONFIG_MULTITHREADING */
/* do any necessary initialization of static devices */ /* do any necessary initialization of static devices */
z_device_state_init(); z_device_state_init();
@ -640,7 +640,7 @@ FUNC_NORETURN void z_cstart(void)
#ifdef CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT #ifdef CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT
timing_init(); timing_init();
timing_start(); timing_start();
#endif #endif /* CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT */
#ifdef CONFIG_MULTITHREADING #ifdef CONFIG_MULTITHREADING
switch_to_main_thread(prepare_multithreading()); switch_to_main_thread(prepare_multithreading());
@ -661,7 +661,7 @@ FUNC_NORETURN void z_cstart(void)
while (true) { while (true) {
} }
/* LCOV_EXCL_STOP */ /* LCOV_EXCL_STOP */
#endif #endif /* ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING */
#endif /* CONFIG_MULTITHREADING */ #endif /* CONFIG_MULTITHREADING */
/* /*
@ -683,7 +683,7 @@ static int init_cpu_obj_core_list(void)
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
k_obj_type_stats_init(&obj_type_cpu, &cpu_stats_desc); k_obj_type_stats_init(&obj_type_cpu, &cpu_stats_desc);
#endif #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
return 0; return 0;
} }
@ -697,13 +697,13 @@ static int init_kernel_obj_core_list(void)
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
k_obj_type_stats_init(&obj_type_kernel, &kernel_stats_desc); k_obj_type_stats_init(&obj_type_kernel, &kernel_stats_desc);
#endif #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
k_obj_core_init_and_link(K_OBJ_CORE(&_kernel), &obj_type_kernel); k_obj_core_init_and_link(K_OBJ_CORE(&_kernel), &obj_type_kernel);
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
k_obj_core_stats_register(K_OBJ_CORE(&_kernel), _kernel.usage, k_obj_core_stats_register(K_OBJ_CORE(&_kernel), _kernel.usage,
sizeof(_kernel.usage)); sizeof(_kernel.usage));
#endif #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
return 0; return 0;
} }
@ -713,4 +713,4 @@ SYS_INIT(init_cpu_obj_core_list, PRE_KERNEL_1,
SYS_INIT(init_kernel_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_kernel_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_SYSTEM */

View file

@ -19,5 +19,5 @@ void irq_offload(irq_offload_routine_t routine, const void *parameter)
k_sem_take(&offload_sem, K_FOREVER); k_sem_take(&offload_sem, K_FOREVER);
arch_irq_offload(routine, parameter); arch_irq_offload(routine, parameter);
k_sem_give(&offload_sem); k_sem_give(&offload_sem);
#endif #endif /* CONFIG_IRQ_OFFLOAD_NESTED */
} }

View file

@ -22,7 +22,7 @@
#ifdef CONFIG_OBJ_CORE_MAILBOX #ifdef CONFIG_OBJ_CORE_MAILBOX
static struct k_obj_type obj_type_mailbox; static struct k_obj_type obj_type_mailbox;
#endif #endif /* CONFIG_OBJ_CORE_MAILBOX */
#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) #if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
@ -97,7 +97,7 @@ void k_mbox_init(struct k_mbox *mbox)
#ifdef CONFIG_OBJ_CORE_MAILBOX #ifdef CONFIG_OBJ_CORE_MAILBOX
k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox); k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
#endif #endif /* CONFIG_OBJ_CORE_MAILBOX */
SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox); SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox);
} }
@ -189,7 +189,7 @@ static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
} }
return; return;
} }
#endif #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
/* synchronous send: wake up sending thread */ /* synchronous send: wake up sending thread */
arch_thread_return_value_set(sending_thread, 0); arch_thread_return_value_set(sending_thread, 0);
@ -256,7 +256,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
z_reschedule(&mbox->lock, key); z_reschedule(&mbox->lock, key);
return 0; return 0;
} }
#endif #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout); SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
/* /*
@ -286,7 +286,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
k_spin_unlock(&mbox->lock, key); k_spin_unlock(&mbox->lock, key);
return 0; return 0;
} }
#endif #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout); SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
/* synchronous send: sender waits on tx queue for receiver or timeout */ /* synchronous send: sender waits on tx queue for receiver or timeout */
@ -335,7 +335,7 @@ void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
(void)mbox_message_put(mbox, &async->tx_msg, K_FOREVER); (void)mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, async_put, mbox, sem); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, async_put, mbox, sem);
} }
#endif #endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer) void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
{ {
@ -463,4 +463,4 @@ static int init_mailbox_obj_core_list(void)
SYS_INIT(init_mailbox_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_mailbox_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_MAILBOX */

View file

@ -43,7 +43,7 @@ static bool check_add_partition(struct k_mem_domain *domain,
part->start); part->start);
return false; return false;
} }
#endif #endif /* CONFIG_EXECUTE_XOR_WRITE */
if (part->size == 0U) { if (part->size == 0U) {
LOG_ERR("zero sized partition at %p with base 0x%lx", LOG_ERR("zero sized partition at %p with base 0x%lx",
@ -124,7 +124,7 @@ int k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
ret = -ENOMEM; ret = -ENOMEM;
goto unlock_out; goto unlock_out;
} }
#endif #endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
if (num_parts != 0U) { if (num_parts != 0U) {
uint32_t i; uint32_t i;
@ -145,7 +145,7 @@ int k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
CHECKIF(ret2 != 0) { CHECKIF(ret2 != 0) {
ret = ret2; ret = ret2;
} }
#endif #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
} }
} }
@ -200,7 +200,7 @@ int k_mem_domain_add_partition(struct k_mem_domain *domain,
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
ret = arch_mem_domain_partition_add(domain, p_idx); ret = arch_mem_domain_partition_add(domain, p_idx);
#endif #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
unlock_out: unlock_out:
k_spin_unlock(&z_mem_domain_lock, key); k_spin_unlock(&z_mem_domain_lock, key);
@ -242,7 +242,7 @@ int k_mem_domain_remove_partition(struct k_mem_domain *domain,
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
ret = arch_mem_domain_partition_remove(domain, p_idx); ret = arch_mem_domain_partition_remove(domain, p_idx);
#endif #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
/* A zero-sized partition denotes it's a free partition */ /* A zero-sized partition denotes it's a free partition */
domain->partitions[p_idx].size = 0U; domain->partitions[p_idx].size = 0U;
@ -271,7 +271,7 @@ static int add_thread_locked(struct k_mem_domain *domain,
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
ret = arch_mem_domain_thread_add(thread); ret = arch_mem_domain_thread_add(thread);
#endif #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
return ret; return ret;
} }
@ -287,7 +287,7 @@ static int remove_thread_locked(struct k_thread *thread)
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
ret = arch_mem_domain_thread_remove(thread); ret = arch_mem_domain_thread_remove(thread);
#endif #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
return ret; return ret;
} }

View file

@ -55,7 +55,7 @@ static int k_mem_slab_stats_query(struct k_obj_core *obj_core, void *stats)
ptr->max_allocated_bytes = slab->info.max_used * slab->info.block_size; ptr->max_allocated_bytes = slab->info.max_used * slab->info.block_size;
#else #else
ptr->max_allocated_bytes = 0; ptr->max_allocated_bytes = 0;
#endif #endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
k_spin_unlock(&slab->lock, key); k_spin_unlock(&slab->lock, key);
return 0; return 0;
@ -73,7 +73,7 @@ static int k_mem_slab_stats_reset(struct k_obj_core *obj_core)
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->info.max_used = slab->info.num_used; slab->info.max_used = slab->info.num_used;
#endif #endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
k_spin_unlock(&slab->lock, key); k_spin_unlock(&slab->lock, key);
@ -89,8 +89,8 @@ static struct k_obj_core_stats_desc mem_slab_stats_desc = {
.disable = NULL, .disable = NULL,
.enable = NULL, .enable = NULL,
}; };
#endif #endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
#endif #endif /* CONFIG_OBJ_CORE_MEM_SLAB */
/** /**
* @brief Initialize kernel memory slab subsystem. * @brief Initialize kernel memory slab subsystem.
@ -141,8 +141,8 @@ static int init_mem_slab_obj_core_list(void)
offsetof(struct k_mem_slab, obj_core)); offsetof(struct k_mem_slab, obj_core));
#ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB #ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
k_obj_type_stats_init(&obj_type_mem_slab, &mem_slab_stats_desc); k_obj_type_stats_init(&obj_type_mem_slab, &mem_slab_stats_desc);
#endif #endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
#endif #endif /* CONFIG_OBJ_CORE_MEM_SLAB */
/* Initialize statically defined mem_slabs */ /* Initialize statically defined mem_slabs */
@ -158,8 +158,8 @@ static int init_mem_slab_obj_core_list(void)
#ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB #ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info, k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info,
sizeof(struct k_mem_slab_info)); sizeof(struct k_mem_slab_info));
#endif #endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
#endif #endif /* CONFIG_OBJ_CORE_MEM_SLAB */
} }
out: out:
@ -182,7 +182,7 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->info.max_used = 0U; slab->info.max_used = 0U;
#endif #endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
rc = create_free_list(slab); rc = create_free_list(slab);
if (rc < 0) { if (rc < 0) {
@ -191,11 +191,11 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
#ifdef CONFIG_OBJ_CORE_MEM_SLAB #ifdef CONFIG_OBJ_CORE_MEM_SLAB
k_obj_core_init_and_link(K_OBJ_CORE(slab), &obj_type_mem_slab); k_obj_core_init_and_link(K_OBJ_CORE(slab), &obj_type_mem_slab);
#endif #endif /* CONFIG_OBJ_CORE_MEM_SLAB */
#ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB #ifdef CONFIG_OBJ_CORE_STATS_MEM_SLAB
k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info, k_obj_core_stats_register(K_OBJ_CORE(slab), &slab->info,
sizeof(struct k_mem_slab_info)); sizeof(struct k_mem_slab_info));
#endif #endif /* CONFIG_OBJ_CORE_STATS_MEM_SLAB */
z_waitq_init(&slab->wait_q); z_waitq_init(&slab->wait_q);
k_object_init(slab); k_object_init(slab);
@ -221,7 +221,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->info.max_used = MAX(slab->info.num_used, slab->info.max_used = MAX(slab->info.num_used,
slab->info.max_used); slab->info.max_used);
#endif #endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
result = 0; result = 0;
} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT) || } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
@ -298,7 +298,7 @@ int k_mem_slab_runtime_stats_get(struct k_mem_slab *slab, struct sys_memory_stat
slab->info.block_size; slab->info.block_size;
#else #else
stats->max_allocated_bytes = 0; stats->max_allocated_bytes = 0;
#endif #endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */
k_spin_unlock(&slab->lock, key); k_spin_unlock(&slab->lock, key);
@ -320,4 +320,4 @@ int k_mem_slab_runtime_stats_reset_max(struct k_mem_slab *slab)
return 0; return 0;
} }
#endif #endif /* CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION */

View file

@ -119,7 +119,7 @@ void k_thread_system_pool_assign(struct k_thread *thread)
} }
#else #else
#define _SYSTEM_HEAP NULL #define _SYSTEM_HEAP NULL
#endif #endif /* K_HEAP_MEM_POOL_SIZE */
void *z_thread_aligned_alloc(size_t align, size_t size) void *z_thread_aligned_alloc(size_t align, size_t size)
{ {

View file

@ -24,7 +24,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_DEMAND_PAGING #ifdef CONFIG_DEMAND_PAGING
#include <zephyr/kernel/mm/demand_paging.h> #include <zephyr/kernel/mm/demand_paging.h>
#endif #endif /* CONFIG_DEMAND_PAGING */
/* /*
* General terminology: * General terminology:
@ -76,7 +76,7 @@ static bool page_frames_initialized;
#define COLOR(x) printk(_CONCAT(ANSI_, x)) #define COLOR(x) printk(_CONCAT(ANSI_, x))
#else #else
#define COLOR(x) do { } while (false) #define COLOR(x) do { } while (false)
#endif #endif /* COLOR_PAGE_FRAMES */
/* LCOV_EXCL_START */ /* LCOV_EXCL_START */
static void page_frame_dump(struct z_page_frame *pf) static void page_frame_dump(struct z_page_frame *pf)
@ -729,7 +729,7 @@ size_t k_mem_free_get(void)
} }
#else #else
ret = z_free_page_count; ret = z_free_page_count;
#endif #endif /* CONFIG_DEMAND_PAGING */
k_spin_unlock(&z_mm_lock, key); k_spin_unlock(&z_mm_lock, key);
return ret * (size_t)CONFIG_MMU_PAGE_SIZE; return ret * (size_t)CONFIG_MMU_PAGE_SIZE;
@ -767,7 +767,7 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
#ifndef CONFIG_KERNEL_DIRECT_MAP #ifndef CONFIG_KERNEL_DIRECT_MAP
__ASSERT(!(flags & K_MEM_DIRECT_MAP), "The direct-map is not enabled"); __ASSERT(!(flags & K_MEM_DIRECT_MAP), "The direct-map is not enabled");
#endif #endif /* CONFIG_KERNEL_DIRECT_MAP */
addr_offset = k_mem_region_align(&aligned_phys, &aligned_size, addr_offset = k_mem_region_align(&aligned_phys, &aligned_size,
phys, size, phys, size,
CONFIG_MMU_PAGE_SIZE); CONFIG_MMU_PAGE_SIZE);
@ -959,12 +959,12 @@ void z_mem_manage_init(void)
* boot process. Will be un-pinned once boot process completes. * boot process. Will be un-pinned once boot process completes.
*/ */
mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, true); mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, true);
#endif #endif /* CONFIG_LINKER_USE_BOOT_SECTION */
#ifdef CONFIG_LINKER_USE_PINNED_SECTION #ifdef CONFIG_LINKER_USE_PINNED_SECTION
/* Pin the page frames correspondng to the pinned symbols */ /* Pin the page frames correspondng to the pinned symbols */
mark_linker_section_pinned(lnkr_pinned_start, lnkr_pinned_end, true); mark_linker_section_pinned(lnkr_pinned_start, lnkr_pinned_end, true);
#endif #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
/* Any remaining pages that aren't mapped, reserved, or pinned get /* Any remaining pages that aren't mapped, reserved, or pinned get
* added to the free pages list * added to the free pages list
@ -979,10 +979,10 @@ void z_mem_manage_init(void)
#ifdef CONFIG_DEMAND_PAGING #ifdef CONFIG_DEMAND_PAGING
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
z_paging_histogram_init(); z_paging_histogram_init();
#endif #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
k_mem_paging_backing_store_init(); k_mem_paging_backing_store_init();
k_mem_paging_eviction_init(); k_mem_paging_eviction_init();
#endif #endif /* CONFIG_DEMAND_PAGING */
#if __ASSERT_ON #if __ASSERT_ON
page_frames_initialized = true; page_frames_initialized = true;
#endif #endif
@ -996,7 +996,7 @@ void z_mem_manage_init(void)
* memory to be cleared. * memory to be cleared.
*/ */
z_bss_zero(); z_bss_zero();
#endif #endif /* CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
} }
void z_mem_manage_boot_finish(void) void z_mem_manage_boot_finish(void)
@ -1006,7 +1006,7 @@ void z_mem_manage_boot_finish(void)
* as they don't need to be in memory all the time anymore. * as they don't need to be in memory all the time anymore.
*/ */
mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, false); mark_linker_section_pinned(lnkr_boot_start, lnkr_boot_end, false);
#endif #endif /* CONFIG_LINKER_USE_BOOT_SECTION */
} }
#ifdef CONFIG_DEMAND_PAGING #ifdef CONFIG_DEMAND_PAGING
@ -1016,7 +1016,7 @@ struct k_mem_paging_stats_t paging_stats;
extern struct k_mem_paging_histogram_t z_paging_histogram_eviction; extern struct k_mem_paging_histogram_t z_paging_histogram_eviction;
extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_in; extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_in;
extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out; extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out;
#endif #endif /* CONFIG_DEMAND_PAGING_STATS */
static inline void do_backing_store_page_in(uintptr_t location) static inline void do_backing_store_page_in(uintptr_t location)
{ {
@ -1162,7 +1162,7 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
__ASSERT(!z_page_frame_is_busy(pf), "page frame 0x%lx is already busy", __ASSERT(!z_page_frame_is_busy(pf), "page frame 0x%lx is already busy",
phys); phys);
pf->flags |= Z_PAGE_FRAME_BUSY; pf->flags |= Z_PAGE_FRAME_BUSY;
#endif #endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
/* Update dirty parameter, since we set to true if it wasn't backed /* Update dirty parameter, since we set to true if it wasn't backed
* even if otherwise clean * even if otherwise clean
*/ */
@ -1320,7 +1320,7 @@ static inline void paging_stats_faults_inc(struct k_thread *faulting_thread,
} }
#else #else
ARG_UNUSED(faulting_thread); ARG_UNUSED(faulting_thread);
#endif #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
#ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ #ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
if (k_is_in_isr()) { if (k_is_in_isr()) {
@ -1328,7 +1328,7 @@ static inline void paging_stats_faults_inc(struct k_thread *faulting_thread,
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
faulting_thread->paging_stats.pagefaults.in_isr++; faulting_thread->paging_stats.pagefaults.in_isr++;
#endif #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */
} }
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */ #endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
#endif /* CONFIG_DEMAND_PAGING_STATS */ #endif /* CONFIG_DEMAND_PAGING_STATS */

View file

@ -27,7 +27,7 @@
#ifdef CONFIG_OBJ_CORE_MSGQ #ifdef CONFIG_OBJ_CORE_MSGQ
static struct k_obj_type obj_type_msgq; static struct k_obj_type obj_type_msgq;
#endif #endif /* CONFIG_OBJ_CORE_MSGQ */
#ifdef CONFIG_POLL #ifdef CONFIG_POLL
static inline void handle_poll_events(struct k_msgq *msgq, uint32_t state) static inline void handle_poll_events(struct k_msgq *msgq, uint32_t state)
@ -55,7 +55,7 @@ void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
#ifdef CONFIG_OBJ_CORE_MSGQ #ifdef CONFIG_OBJ_CORE_MSGQ
k_obj_core_init_and_link(K_OBJ_CORE(msgq), &obj_type_msgq); k_obj_core_init_and_link(K_OBJ_CORE(msgq), &obj_type_msgq);
#endif #endif /* CONFIG_OBJ_CORE_MSGQ */
SYS_PORT_TRACING_OBJ_INIT(k_msgq, msgq); SYS_PORT_TRACING_OBJ_INIT(k_msgq, msgq);
@ -98,7 +98,7 @@ int z_vrfy_k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
return z_impl_k_msgq_alloc_init(msgq, msg_size, max_msgs); return z_impl_k_msgq_alloc_init(msgq, msg_size, max_msgs);
} }
#include <syscalls/k_msgq_alloc_init_mrsh.c> #include <syscalls/k_msgq_alloc_init_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int k_msgq_cleanup(struct k_msgq *msgq) int k_msgq_cleanup(struct k_msgq *msgq)
{ {
@ -193,7 +193,7 @@ static inline int z_vrfy_k_msgq_put(struct k_msgq *msgq, const void *data,
return z_impl_k_msgq_put(msgq, data, timeout); return z_impl_k_msgq_put(msgq, data, timeout);
} }
#include <syscalls/k_msgq_put_mrsh.c> #include <syscalls/k_msgq_put_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
void z_impl_k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs) void z_impl_k_msgq_get_attrs(struct k_msgq *msgq, struct k_msgq_attrs *attrs)
{ {
@ -211,7 +211,7 @@ static inline void z_vrfy_k_msgq_get_attrs(struct k_msgq *msgq,
z_impl_k_msgq_get_attrs(msgq, attrs); z_impl_k_msgq_get_attrs(msgq, attrs);
} }
#include <syscalls/k_msgq_get_attrs_mrsh.c> #include <syscalls/k_msgq_get_attrs_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout) int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
{ {
@ -291,7 +291,7 @@ static inline int z_vrfy_k_msgq_get(struct k_msgq *msgq, void *data,
return z_impl_k_msgq_get(msgq, data, timeout); return z_impl_k_msgq_get(msgq, data, timeout);
} }
#include <syscalls/k_msgq_get_mrsh.c> #include <syscalls/k_msgq_get_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_msgq_peek(struct k_msgq *msgq, void *data) int z_impl_k_msgq_peek(struct k_msgq *msgq, void *data)
{ {
@ -325,7 +325,7 @@ static inline int z_vrfy_k_msgq_peek(struct k_msgq *msgq, void *data)
return z_impl_k_msgq_peek(msgq, data); return z_impl_k_msgq_peek(msgq, data);
} }
#include <syscalls/k_msgq_peek_mrsh.c> #include <syscalls/k_msgq_peek_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx) int z_impl_k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_t idx)
{ {
@ -371,7 +371,7 @@ static inline int z_vrfy_k_msgq_peek_at(struct k_msgq *msgq, void *data, uint32_
return z_impl_k_msgq_peek_at(msgq, data, idx); return z_impl_k_msgq_peek_at(msgq, data, idx);
} }
#include <syscalls/k_msgq_peek_at_mrsh.c> #include <syscalls/k_msgq_peek_at_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
void z_impl_k_msgq_purge(struct k_msgq *msgq) void z_impl_k_msgq_purge(struct k_msgq *msgq)
{ {
@ -416,7 +416,7 @@ static inline uint32_t z_vrfy_k_msgq_num_used_get(struct k_msgq *msgq)
} }
#include <syscalls/k_msgq_num_used_get_mrsh.c> #include <syscalls/k_msgq_num_used_get_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_MSGQ #ifdef CONFIG_OBJ_CORE_MSGQ
static int init_msgq_obj_core_list(void) static int init_msgq_obj_core_list(void)
@ -438,4 +438,4 @@ static int init_msgq_obj_core_list(void)
SYS_INIT(init_msgq_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_msgq_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_MSGQ */

View file

@ -49,7 +49,7 @@ static struct k_spinlock lock;
#ifdef CONFIG_OBJ_CORE_MUTEX #ifdef CONFIG_OBJ_CORE_MUTEX
static struct k_obj_type obj_type_mutex; static struct k_obj_type obj_type_mutex;
#endif #endif /* CONFIG_OBJ_CORE_MUTEX */
int z_impl_k_mutex_init(struct k_mutex *mutex) int z_impl_k_mutex_init(struct k_mutex *mutex)
{ {
@ -62,7 +62,7 @@ int z_impl_k_mutex_init(struct k_mutex *mutex)
#ifdef CONFIG_OBJ_CORE_MUTEX #ifdef CONFIG_OBJ_CORE_MUTEX
k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex); k_obj_core_init_and_link(K_OBJ_CORE(mutex), &obj_type_mutex);
#endif #endif /* CONFIG_OBJ_CORE_MUTEX */
SYS_PORT_TRACING_OBJ_INIT(k_mutex, mutex, 0); SYS_PORT_TRACING_OBJ_INIT(k_mutex, mutex, 0);
@ -76,7 +76,7 @@ static inline int z_vrfy_k_mutex_init(struct k_mutex *mutex)
return z_impl_k_mutex_init(mutex); return z_impl_k_mutex_init(mutex);
} }
#include <syscalls/k_mutex_init_mrsh.c> #include <syscalls/k_mutex_init_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static int32_t new_prio_for_inheritance(int32_t target, int32_t limit) static int32_t new_prio_for_inheritance(int32_t target, int32_t limit)
{ {
@ -205,7 +205,7 @@ static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex,
return z_impl_k_mutex_lock(mutex, timeout); return z_impl_k_mutex_lock(mutex, timeout);
} }
#include <syscalls/k_mutex_lock_mrsh.c> #include <syscalls/k_mutex_lock_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_mutex_unlock(struct k_mutex *mutex) int z_impl_k_mutex_unlock(struct k_mutex *mutex)
{ {
@ -289,7 +289,7 @@ static inline int z_vrfy_k_mutex_unlock(struct k_mutex *mutex)
return z_impl_k_mutex_unlock(mutex); return z_impl_k_mutex_unlock(mutex);
} }
#include <syscalls/k_mutex_unlock_mrsh.c> #include <syscalls/k_mutex_unlock_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_MUTEX #ifdef CONFIG_OBJ_CORE_MUTEX
static int init_mutex_obj_core_list(void) static int init_mutex_obj_core_list(void)
@ -310,4 +310,4 @@ static int init_mutex_obj_core_list(void)
SYS_INIT(init_mutex_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_mutex_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_MUTEX */

View file

@ -28,7 +28,7 @@ void k_obj_core_init(struct k_obj_core *obj_core, struct k_obj_type *type)
obj_core->type = type; obj_core->type = type;
#ifdef CONFIG_OBJ_CORE_STATS #ifdef CONFIG_OBJ_CORE_STATS
obj_core->stats = NULL; obj_core->stats = NULL;
#endif #endif /* CONFIG_OBJ_CORE_STATS */
} }
void k_obj_core_link(struct k_obj_core *obj_core) void k_obj_core_link(struct k_obj_core *obj_core)
@ -324,4 +324,4 @@ int k_obj_core_stats_enable(struct k_obj_core *obj_core)
return rv; return rv;
} }
#endif #endif /* CONFIG_OBJ_CORE_STATS */

View file

@ -33,7 +33,7 @@ static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
k_timeout_t timeout); k_timeout_t timeout);
#ifdef CONFIG_OBJ_CORE_PIPE #ifdef CONFIG_OBJ_CORE_PIPE
static struct k_obj_type obj_type_pipe; static struct k_obj_type obj_type_pipe;
#endif #endif /* CONFIG_OBJ_CORE_PIPE */
void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size) void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
@ -52,12 +52,12 @@ void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size)
#if defined(CONFIG_POLL) #if defined(CONFIG_POLL)
sys_dlist_init(&pipe->poll_events); sys_dlist_init(&pipe->poll_events);
#endif #endif /* CONFIG_POLL */
k_object_init(pipe); k_object_init(pipe);
#ifdef CONFIG_OBJ_CORE_PIPE #ifdef CONFIG_OBJ_CORE_PIPE
k_obj_core_init_and_link(K_OBJ_CORE(pipe), &obj_type_pipe); k_obj_core_init_and_link(K_OBJ_CORE(pipe), &obj_type_pipe);
#endif #endif /* CONFIG_OBJ_CORE_PIPE */
} }
int z_impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size) int z_impl_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
@ -94,7 +94,7 @@ static inline int z_vrfy_k_pipe_alloc_init(struct k_pipe *pipe, size_t size)
return z_impl_k_pipe_alloc_init(pipe, size); return z_impl_k_pipe_alloc_init(pipe, size);
} }
#include <syscalls/k_pipe_alloc_init_mrsh.c> #include <syscalls/k_pipe_alloc_init_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static inline void handle_poll_events(struct k_pipe *pipe) static inline void handle_poll_events(struct k_pipe *pipe)
{ {
@ -102,7 +102,7 @@ static inline void handle_poll_events(struct k_pipe *pipe)
z_handle_obj_poll_events(&pipe->poll_events, K_POLL_STATE_PIPE_DATA_AVAILABLE); z_handle_obj_poll_events(&pipe->poll_events, K_POLL_STATE_PIPE_DATA_AVAILABLE);
#else #else
ARG_UNUSED(pipe); ARG_UNUSED(pipe);
#endif #endif /* CONFIG_POLL */
} }
void z_impl_k_pipe_flush(struct k_pipe *pipe) void z_impl_k_pipe_flush(struct k_pipe *pipe)
@ -127,7 +127,7 @@ void z_vrfy_k_pipe_flush(struct k_pipe *pipe)
z_impl_k_pipe_flush(pipe); z_impl_k_pipe_flush(pipe);
} }
#include <syscalls/k_pipe_flush_mrsh.c> #include <syscalls/k_pipe_flush_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
void z_impl_k_pipe_buffer_flush(struct k_pipe *pipe) void z_impl_k_pipe_buffer_flush(struct k_pipe *pipe)
{ {
@ -154,7 +154,7 @@ void z_vrfy_k_pipe_buffer_flush(struct k_pipe *pipe)
z_impl_k_pipe_buffer_flush(pipe); z_impl_k_pipe_buffer_flush(pipe);
} }
#endif #endif /* CONFIG_USERSPACE */
int k_pipe_cleanup(struct k_pipe *pipe) int k_pipe_cleanup(struct k_pipe *pipe)
{ {
@ -526,7 +526,7 @@ int z_vrfy_k_pipe_put(struct k_pipe *pipe, const void *data,
timeout); timeout);
} }
#include <syscalls/k_pipe_put_mrsh.c> #include <syscalls/k_pipe_put_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe, static int pipe_get_internal(k_spinlock_key_t key, struct k_pipe *pipe,
void *data, size_t bytes_to_read, void *data, size_t bytes_to_read,
@ -734,7 +734,7 @@ int z_vrfy_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
timeout); timeout);
} }
#include <syscalls/k_pipe_get_mrsh.c> #include <syscalls/k_pipe_get_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
size_t z_impl_k_pipe_read_avail(struct k_pipe *pipe) size_t z_impl_k_pipe_read_avail(struct k_pipe *pipe)
{ {
@ -771,7 +771,7 @@ size_t z_vrfy_k_pipe_read_avail(struct k_pipe *pipe)
return z_impl_k_pipe_read_avail(pipe); return z_impl_k_pipe_read_avail(pipe);
} }
#include <syscalls/k_pipe_read_avail_mrsh.c> #include <syscalls/k_pipe_read_avail_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
size_t z_impl_k_pipe_write_avail(struct k_pipe *pipe) size_t z_impl_k_pipe_write_avail(struct k_pipe *pipe)
{ {
@ -808,7 +808,7 @@ size_t z_vrfy_k_pipe_write_avail(struct k_pipe *pipe)
return z_impl_k_pipe_write_avail(pipe); return z_impl_k_pipe_write_avail(pipe);
} }
#include <syscalls/k_pipe_write_avail_mrsh.c> #include <syscalls/k_pipe_write_avail_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_PIPE #ifdef CONFIG_OBJ_CORE_PIPE
static int init_pipe_obj_core_list(void) static int init_pipe_obj_core_list(void)
@ -829,4 +829,4 @@ static int init_pipe_obj_core_list(void)
SYS_INIT(init_pipe_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_pipe_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_PIPE */

View file

@ -93,7 +93,7 @@ static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
*state = K_POLL_STATE_PIPE_DATA_AVAILABLE; *state = K_POLL_STATE_PIPE_DATA_AVAILABLE;
return true; return true;
} }
#endif #endif /* CONFIG_PIPES */
case K_POLL_TYPE_IGNORE: case K_POLL_TYPE_IGNORE:
break; break;
default: default:
@ -159,7 +159,7 @@ static inline void register_event(struct k_poll_event *event,
__ASSERT(event->pipe != NULL, "invalid pipe\n"); __ASSERT(event->pipe != NULL, "invalid pipe\n");
add_event(&event->pipe->poll_events, event, poller); add_event(&event->pipe->poll_events, event, poller);
break; break;
#endif #endif /* CONFIG_PIPES */
case K_POLL_TYPE_IGNORE: case K_POLL_TYPE_IGNORE:
/* nothing to do */ /* nothing to do */
break; break;
@ -200,7 +200,7 @@ static inline void clear_event_registration(struct k_poll_event *event)
__ASSERT(event->pipe != NULL, "invalid pipe\n"); __ASSERT(event->pipe != NULL, "invalid pipe\n");
remove_event = true; remove_event = true;
break; break;
#endif #endif /* CONFIG_PIPES */
case K_POLL_TYPE_IGNORE: case K_POLL_TYPE_IGNORE:
/* nothing to do */ /* nothing to do */
break; break;
@ -417,7 +417,7 @@ static inline int z_vrfy_k_poll(struct k_poll_event *events,
case K_POLL_TYPE_PIPE_DATA_AVAILABLE: case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
K_OOPS(K_SYSCALL_OBJ(e->pipe, K_OBJ_PIPE)); K_OOPS(K_SYSCALL_OBJ(e->pipe, K_OBJ_PIPE));
break; break;
#endif #endif /* CONFIG_PIPES */
default: default:
ret = -EINVAL; ret = -EINVAL;
goto out_free; goto out_free;
@ -435,7 +435,7 @@ oops_free:
K_OOPS(1); K_OOPS(1);
} }
#include <syscalls/k_poll_mrsh.c> #include <syscalls/k_poll_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
/* must be called with interrupts locked */ /* must be called with interrupts locked */
static int signal_poll_event(struct k_poll_event *event, uint32_t state) static int signal_poll_event(struct k_poll_event *event, uint32_t state)
@ -494,7 +494,7 @@ static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
z_impl_k_poll_signal_init(sig); z_impl_k_poll_signal_init(sig);
} }
#include <syscalls/k_poll_signal_init_mrsh.c> #include <syscalls/k_poll_signal_init_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
void z_impl_k_poll_signal_reset(struct k_poll_signal *sig) void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
{ {
@ -522,7 +522,7 @@ void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
z_impl_k_poll_signal_check(sig, signaled, result); z_impl_k_poll_signal_check(sig, signaled, result);
} }
#include <syscalls/k_poll_signal_check_mrsh.c> #include <syscalls/k_poll_signal_check_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result) int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
{ {
@ -565,7 +565,7 @@ static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
} }
#include <syscalls/k_poll_signal_reset_mrsh.c> #include <syscalls/k_poll_signal_reset_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static void triggered_work_handler(struct k_work *work) static void triggered_work_handler(struct k_work *work)
{ {

View file

@ -76,7 +76,7 @@ static inline void z_vrfy_k_queue_init(struct k_queue *queue)
z_impl_k_queue_init(queue); z_impl_k_queue_init(queue);
} }
#include <syscalls/k_queue_init_mrsh.c> #include <syscalls/k_queue_init_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static void prepare_thread_to_run(struct k_thread *thread, void *data) static void prepare_thread_to_run(struct k_thread *thread, void *data)
{ {
@ -91,7 +91,7 @@ static inline void handle_poll_events(struct k_queue *queue, uint32_t state)
#else #else
ARG_UNUSED(queue); ARG_UNUSED(queue);
ARG_UNUSED(state); ARG_UNUSED(state);
#endif #endif /* CONFIG_POLL */
} }
void z_impl_k_queue_cancel_wait(struct k_queue *queue) void z_impl_k_queue_cancel_wait(struct k_queue *queue)
@ -118,7 +118,7 @@ static inline void z_vrfy_k_queue_cancel_wait(struct k_queue *queue)
z_impl_k_queue_cancel_wait(queue); z_impl_k_queue_cancel_wait(queue);
} }
#include <syscalls/k_queue_cancel_wait_mrsh.c> #include <syscalls/k_queue_cancel_wait_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static int32_t queue_insert(struct k_queue *queue, void *prev, void *data, static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
bool alloc, bool is_append) bool alloc, bool is_append)
@ -221,7 +221,7 @@ static inline int32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue,
return z_impl_k_queue_alloc_append(queue, data); return z_impl_k_queue_alloc_append(queue, data);
} }
#include <syscalls/k_queue_alloc_append_mrsh.c> #include <syscalls/k_queue_alloc_append_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data) int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
{ {
@ -242,7 +242,7 @@ static inline int32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue,
return z_impl_k_queue_alloc_prepend(queue, data); return z_impl_k_queue_alloc_prepend(queue, data);
} }
#include <syscalls/k_queue_alloc_prepend_mrsh.c> #include <syscalls/k_queue_alloc_prepend_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int k_queue_append_list(struct k_queue *queue, void *head, void *tail) int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
{ {
@ -454,7 +454,7 @@ static int init_fifo_obj_core_list(void)
SYS_INIT(init_fifo_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_fifo_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_FIFO */
#ifdef CONFIG_OBJ_CORE_LIFO #ifdef CONFIG_OBJ_CORE_LIFO
struct k_obj_type _obj_type_lifo; struct k_obj_type _obj_type_lifo;
@ -477,4 +477,4 @@ static int init_lifo_obj_core_list(void)
SYS_INIT(init_lifo_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_lifo_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_LIFO */

View file

@ -50,7 +50,7 @@ static inline int is_metairq(struct k_thread *thread)
#else #else
ARG_UNUSED(thread); ARG_UNUSED(thread);
return 0; return 0;
#endif #endif /* CONFIG_NUM_METAIRQ_PRIORITIES */
} }
#if CONFIG_ASSERT #if CONFIG_ASSERT
@ -58,7 +58,7 @@ static inline bool is_thread_dummy(struct k_thread *thread)
{ {
return (thread->base.thread_state & _THREAD_DUMMY) != 0U; return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
} }
#endif #endif /* CONFIG_ASSERT */
/* /*
* Return value same as e.g. memcmp * Return value same as e.g. memcmp
@ -98,7 +98,7 @@ int32_t z_sched_prio_cmp(struct k_thread *thread_1,
*/ */
return (int32_t) (d2 - d1); return (int32_t) (d2 - d1);
} }
#endif #endif /* CONFIG_SCHED_DEADLINE */
return 0; return 0;
} }
@ -154,7 +154,7 @@ static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
} }
return NULL; return NULL;
} }
#endif #endif /* CONFIG_SCHED_CPU_MASK */
#if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB) #if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB)
static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
@ -174,7 +174,7 @@ static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
sys_dlist_append(pq, &thread->base.qnode_dlist); sys_dlist_append(pq, &thread->base.qnode_dlist);
} }
#endif #endif /* CONFIG_SCHED_DUMB || CONFIG_WAITQ_DUMB */
static ALWAYS_INLINE void *thread_runq(struct k_thread *thread) static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
{ {
@ -193,7 +193,7 @@ static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
#else #else
ARG_UNUSED(thread); ARG_UNUSED(thread);
return &_kernel.ready_q.runq; return &_kernel.ready_q.runq;
#endif #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
} }
static ALWAYS_INLINE void *curr_cpu_runq(void) static ALWAYS_INLINE void *curr_cpu_runq(void)
@ -202,7 +202,7 @@ static ALWAYS_INLINE void *curr_cpu_runq(void)
return &arch_curr_cpu()->ready_q.runq; return &arch_curr_cpu()->ready_q.runq;
#else #else
return &_kernel.ready_q.runq; return &_kernel.ready_q.runq;
#endif #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
} }
static ALWAYS_INLINE void runq_add(struct k_thread *thread) static ALWAYS_INLINE void runq_add(struct k_thread *thread)
@ -239,7 +239,7 @@ static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
/* add current to end of queue means "yield" */ /* add current to end of queue means "yield" */
_current_cpu->swap_ok = true; _current_cpu->swap_ok = true;
} }
#endif #endif /* CONFIG_SMP */
} }
static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread) static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
@ -266,7 +266,7 @@ static void signal_pending_ipi(void)
arch_sched_ipi(); arch_sched_ipi();
} }
} }
#endif #endif /* CONFIG_SMP && CONFIG_SCHED_IPI_SUPPORTED */
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -296,7 +296,7 @@ static inline bool is_halting(struct k_thread *thread)
return (thread->base.thread_state & return (thread->base.thread_state &
(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U; (_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
} }
#endif #endif /* CONFIG_SMP */
/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */ /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
static inline void clear_halting(struct k_thread *thread) static inline void clear_halting(struct k_thread *thread)
@ -311,7 +311,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
halt_thread(_current, is_aborting(_current) ? halt_thread(_current, is_aborting(_current) ?
_THREAD_DEAD : _THREAD_SUSPENDED); _THREAD_DEAD : _THREAD_SUSPENDED);
} }
#endif #endif /* CONFIG_SMP */
struct k_thread *thread = runq_best(); struct k_thread *thread = runq_best();
@ -332,6 +332,9 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
} }
} }
#endif #endif
/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
* CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
*/
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* In uniprocessor mode, we can leave the current thread in /* In uniprocessor mode, we can leave the current thread in
@ -386,7 +389,7 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
_current_cpu->swap_ok = false; _current_cpu->swap_ok = false;
return thread; return thread;
#endif #endif /* CONFIG_SMP */
} }
static void move_thread_to_end_of_prio_q(struct k_thread *thread) static void move_thread_to_end_of_prio_q(struct k_thread *thread)
@ -404,7 +407,7 @@ static void flag_ipi(void)
if (arch_num_cpus() > 1) { if (arch_num_cpus() > 1) {
_kernel.pending_ipi = true; _kernel.pending_ipi = true;
} }
#endif #endif /* CONFIG_SMP && CONFIG_SCHED_IPI_SUPPORTED */
} }
#ifdef CONFIG_TIMESLICING #ifdef CONFIG_TIMESLICING
@ -421,7 +424,7 @@ static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
* a noop condition in z_time_slice(). * a noop condition in z_time_slice().
*/ */
static struct k_thread *pending_current; static struct k_thread *pending_current;
#endif #endif /* CONFIG_SWAP_NONATOMIC */
static inline int slice_time(struct k_thread *thread) static inline int slice_time(struct k_thread *thread)
{ {
@ -433,7 +436,7 @@ static inline int slice_time(struct k_thread *thread)
} }
#else #else
ARG_UNUSED(thread); ARG_UNUSED(thread);
#endif #endif /* CONFIG_TIMESLICE_PER_THREAD */
return ret; return ret;
} }
@ -447,7 +450,7 @@ static inline bool sliceable(struct k_thread *thread)
#ifdef CONFIG_TIMESLICE_PER_THREAD #ifdef CONFIG_TIMESLICE_PER_THREAD
ret |= thread->base.slice_ticks != 0; ret |= thread->base.slice_ticks != 0;
#endif #endif /* CONFIG_TIMESLICE_PER_THREAD */
return ret; return ret;
} }
@ -498,7 +501,7 @@ void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks
thread->base.slice_data = data; thread->base.slice_data = data;
} }
} }
#endif #endif /* CONFIG_TIMESLICE_PER_THREAD */
/* Called out of each timer interrupt */ /* Called out of each timer interrupt */
void z_time_slice(void) void z_time_slice(void)
@ -513,7 +516,7 @@ void z_time_slice(void)
return; return;
} }
pending_current = NULL; pending_current = NULL;
#endif #endif /* CONFIG_SWAP_NONATOMIC */
if (slice_expired[_current_cpu->id] && sliceable(curr)) { if (slice_expired[_current_cpu->id] && sliceable(curr)) {
#ifdef CONFIG_TIMESLICE_PER_THREAD #ifdef CONFIG_TIMESLICE_PER_THREAD
@ -522,7 +525,7 @@ void z_time_slice(void)
curr->base.slice_expired(curr, curr->base.slice_data); curr->base.slice_expired(curr, curr->base.slice_data);
key = k_spin_lock(&_sched_spinlock); key = k_spin_lock(&_sched_spinlock);
} }
#endif #endif /* CONFIG_TIMESLICE_PER_THREAD */
if (!z_is_thread_prevented_from_running(curr)) { if (!z_is_thread_prevented_from_running(curr)) {
move_thread_to_end_of_prio_q(curr); move_thread_to_end_of_prio_q(curr);
} }
@ -530,7 +533,7 @@ void z_time_slice(void)
} }
k_spin_unlock(&_sched_spinlock, key); k_spin_unlock(&_sched_spinlock, key);
} }
#endif #endif /* CONFIG_TIMESLICING */
/* Track cooperative threads preempted by metairqs so we can return to /* Track cooperative threads preempted by metairqs so we can return to
* them specifically. Called at the moment a new thread has been * them specifically. Called at the moment a new thread has been
@ -551,6 +554,9 @@ static void update_metairq_preempt(struct k_thread *thread)
#else #else
ARG_UNUSED(thread); ARG_UNUSED(thread);
#endif #endif
/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
* CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
*/
} }
static void update_cache(int preempt_ok) static void update_cache(int preempt_ok)
@ -563,7 +569,7 @@ static void update_cache(int preempt_ok)
if (thread != _current) { if (thread != _current) {
z_reset_time_slice(thread); z_reset_time_slice(thread);
} }
#endif #endif /* CONFIG_TIMESLICING */
update_metairq_preempt(thread); update_metairq_preempt(thread);
_kernel.ready_q.cache = thread; _kernel.ready_q.cache = thread;
} else { } else {
@ -578,7 +584,7 @@ static void update_cache(int preempt_ok)
* reason the scheduler will make the same decision anyway. * reason the scheduler will make the same decision anyway.
*/ */
_current_cpu->swap_ok = preempt_ok; _current_cpu->swap_ok = preempt_ok;
#endif #endif /* CONFIG_SMP */
} }
static bool thread_active_elsewhere(struct k_thread *thread) static bool thread_active_elsewhere(struct k_thread *thread)
@ -598,7 +604,7 @@ static bool thread_active_elsewhere(struct k_thread *thread)
return true; return true;
} }
} }
#endif #endif /* CONFIG_SMP */
ARG_UNUSED(thread); ARG_UNUSED(thread);
return false; return false;
} }
@ -607,7 +613,7 @@ static void ready_thread(struct k_thread *thread)
{ {
#ifdef CONFIG_KERNEL_COHERENCE #ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(thread)); __ASSERT_NO_MSG(arch_mem_coherent(thread));
#endif #endif /* CONFIG_KERNEL_COHERENCE */
/* If thread is queued already, do not try and added it to the /* If thread is queued already, do not try and added it to the
* run queue again * run queue again
@ -693,7 +699,7 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
*/ */
#ifdef CONFIG_SCHED_IPI_SUPPORTED #ifdef CONFIG_SCHED_IPI_SUPPORTED
arch_sched_ipi(); arch_sched_ipi();
#endif #endif /* CONFIG_SCHED_IPI_SUPPORTED */
} }
if (is_halting(thread) && (thread != _current)) { if (is_halting(thread) && (thread != _current)) {
@ -719,7 +725,7 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
} }
return; /* lock has been released */ return; /* lock has been released */
} }
#endif #endif /* CONFIG_SMP */
halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED); halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
if ((thread == _current) && !arch_is_in_isr()) { if ((thread == _current) && !arch_is_in_isr()) {
z_swap(&_sched_spinlock, key); z_swap(&_sched_spinlock, key);
@ -757,7 +763,7 @@ static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
z_impl_k_thread_suspend(thread); z_impl_k_thread_suspend(thread);
} }
#include <syscalls/k_thread_suspend_mrsh.c> #include <syscalls/k_thread_suspend_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
void z_impl_k_thread_resume(struct k_thread *thread) void z_impl_k_thread_resume(struct k_thread *thread)
{ {
@ -786,7 +792,7 @@ static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
z_impl_k_thread_resume(thread); z_impl_k_thread_resume(thread);
} }
#include <syscalls/k_thread_resume_mrsh.c> #include <syscalls/k_thread_resume_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static _wait_q_t *pended_on_thread(struct k_thread *thread) static _wait_q_t *pended_on_thread(struct k_thread *thread)
{ {
@ -829,7 +835,7 @@ static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
{ {
#ifdef CONFIG_KERNEL_COHERENCE #ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q)); __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
#endif #endif /* CONFIG_KERNEL_COHERENCE */
add_to_waitq_locked(thread, wait_q); add_to_waitq_locked(thread, wait_q);
add_thread_timeout(thread, timeout); add_thread_timeout(thread, timeout);
} }
@ -873,7 +879,7 @@ void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
if (do_nothing) { if (do_nothing) {
continue; continue;
} }
#endif #endif /* CONFIG_EVENTS */
if (!killed) { if (!killed) {
/* The thread is not being killed */ /* The thread is not being killed */
@ -899,14 +905,14 @@ void z_thread_timeout(struct _timeout *timeout)
z_sched_wake_thread(thread, true); z_sched_wake_thread(thread, true);
} }
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
_wait_q_t *wait_q, k_timeout_t timeout) _wait_q_t *wait_q, k_timeout_t timeout)
{ {
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current; pending_current = _current;
#endif #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock); __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
/* We do a "lock swap" prior to calling z_swap(), such that /* We do a "lock swap" prior to calling z_swap(), such that
@ -994,7 +1000,7 @@ static inline bool resched(uint32_t key)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
_current_cpu->swap_ok = 0; _current_cpu->swap_ok = 0;
#endif #endif /* CONFIG_SMP */
return arch_irq_unlocked(key) && !arch_is_in_isr(); return arch_irq_unlocked(key) && !arch_is_in_isr();
} }
@ -1014,7 +1020,7 @@ static inline bool need_swap(void)
/* Check if the next ready thread is the same as the current thread */ /* Check if the next ready thread is the same as the current thread */
new_thread = _kernel.ready_q.cache; new_thread = _kernel.ready_q.cache;
return new_thread != _current; return new_thread != _current;
#endif #endif /* CONFIG_SMP */
} }
void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key) void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
@ -1079,7 +1085,7 @@ struct k_thread *z_swap_next_thread(void)
return ret; return ret;
#else #else
return _kernel.ready_q.cache; return _kernel.ready_q.cache;
#endif #endif /* CONFIG_SMP */
} }
#ifdef CONFIG_USE_SWITCH #ifdef CONFIG_USE_SWITCH
@ -1148,7 +1154,7 @@ void *z_get_next_switch_handle(void *interrupted)
#ifdef CONFIG_TIMESLICING #ifdef CONFIG_TIMESLICING
z_reset_time_slice(new_thread); z_reset_time_slice(new_thread);
#endif #endif /* CONFIG_TIMESLICING */
#ifdef CONFIG_SPIN_VALIDATE #ifdef CONFIG_SPIN_VALIDATE
/* Changed _current! Update the spinlock /* Changed _current! Update the spinlock
@ -1157,7 +1163,7 @@ void *z_get_next_switch_handle(void *interrupted)
* release the lock. * release the lock.
*/ */
z_spin_lock_set_owner(&_sched_spinlock); z_spin_lock_set_owner(&_sched_spinlock);
#endif #endif /* CONFIG_SPIN_VALIDATE */
/* A queued (runnable) old/current thread /* A queued (runnable) old/current thread
* needs to be added back to the run queue * needs to be added back to the run queue
@ -1183,9 +1189,9 @@ void *z_get_next_switch_handle(void *interrupted)
_current->switch_handle = interrupted; _current->switch_handle = interrupted;
set_current(_kernel.ready_q.cache); set_current(_kernel.ready_q.cache);
return _current->switch_handle; return _current->switch_handle;
#endif #endif /* CONFIG_SMP */
} }
#endif #endif /* CONFIG_USE_SWITCH */
int z_unpend_all(_wait_q_t *wait_q) int z_unpend_all(_wait_q_t *wait_q)
{ {
@ -1226,7 +1232,7 @@ void z_sched_init(void)
} }
#else #else
init_ready_q(&_kernel.ready_q); init_ready_q(&_kernel.ready_q);
#endif #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
} }
int z_impl_k_thread_priority_get(k_tid_t thread) int z_impl_k_thread_priority_get(k_tid_t thread)
@ -1241,7 +1247,7 @@ static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
return z_impl_k_thread_priority_get(thread); return z_impl_k_thread_priority_get(thread);
} }
#include <syscalls/k_thread_priority_get_mrsh.c> #include <syscalls/k_thread_priority_get_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
void z_impl_k_thread_priority_set(k_tid_t thread, int prio) void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
{ {
@ -1270,11 +1276,11 @@ static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio, K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
"thread priority may only be downgraded (%d < %d)", "thread priority may only be downgraded (%d < %d)",
prio, thread->base.prio)); prio, thread->base.prio));
#endif #endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
z_impl_k_thread_priority_set(thread, prio); z_impl_k_thread_priority_set(thread, prio);
} }
#include <syscalls/k_thread_priority_set_mrsh.c> #include <syscalls/k_thread_priority_set_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_SCHED_DEADLINE #ifdef CONFIG_SCHED_DEADLINE
void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline) void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
@ -1312,8 +1318,8 @@ static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
z_impl_k_thread_deadline_set((k_tid_t)thread, deadline); z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
} }
#include <syscalls/k_thread_deadline_set_mrsh.c> #include <syscalls/k_thread_deadline_set_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#endif #endif /* CONFIG_SCHED_DEADLINE */
bool k_can_yield(void) bool k_can_yield(void)
{ {
@ -1344,7 +1350,7 @@ static inline void z_vrfy_k_yield(void)
z_impl_k_yield(); z_impl_k_yield();
} }
#include <syscalls/k_yield_mrsh.c> #include <syscalls/k_yield_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static int32_t z_tick_sleep(k_ticks_t ticks) static int32_t z_tick_sleep(k_ticks_t ticks)
{ {
@ -1360,7 +1366,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
k_yield(); k_yield();
return 0; return 0;
} }
#endif #endif /* CONFIG_MULTITHREADING */
if (Z_TICK_ABS(ticks) <= 0) { if (Z_TICK_ABS(ticks) <= 0) {
expected_wakeup_ticks = ticks + sys_clock_tick_get_32(); expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
@ -1374,7 +1380,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current; pending_current = _current;
#endif #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
unready_thread(_current); unready_thread(_current);
z_add_thread_timeout(_current, timeout); z_add_thread_timeout(_current, timeout);
z_mark_thread_as_suspended(_current); z_mark_thread_as_suspended(_current);
@ -1390,7 +1396,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
#else #else
/* busy wait to be time coherent since subsystems may depend on it */ /* busy wait to be time coherent since subsystems may depend on it */
z_impl_k_busy_wait(k_ticks_to_us_ceil32(expected_wakeup_ticks)); z_impl_k_busy_wait(k_ticks_to_us_ceil32(expected_wakeup_ticks));
#endif #endif /* CONFIG_MULTITHREADING */
return 0; return 0;
} }
@ -1410,7 +1416,7 @@ int32_t z_impl_k_sleep(k_timeout_t timeout)
#else #else
/* In Single Thread, just wait for an interrupt saving power */ /* In Single Thread, just wait for an interrupt saving power */
k_cpu_idle(); k_cpu_idle();
#endif #endif /* CONFIG_MULTITHREADING */
SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER); SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
return (int32_t) K_TICKS_FOREVER; return (int32_t) K_TICKS_FOREVER;
@ -1433,7 +1439,7 @@ static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
return z_impl_k_sleep(timeout); return z_impl_k_sleep(timeout);
} }
#include <syscalls/k_sleep_mrsh.c> #include <syscalls/k_sleep_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int32_t z_impl_k_usleep(int us) int32_t z_impl_k_usleep(int us)
{ {
@ -1457,7 +1463,7 @@ static inline int32_t z_vrfy_k_usleep(int us)
return z_impl_k_usleep(us); return z_impl_k_usleep(us);
} }
#include <syscalls/k_usleep_mrsh.c> #include <syscalls/k_usleep_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
void z_impl_k_wakeup(k_tid_t thread) void z_impl_k_wakeup(k_tid_t thread)
{ {
@ -1491,7 +1497,7 @@ void z_impl_k_wakeup(k_tid_t thread)
#ifdef CONFIG_TRACE_SCHED_IPI #ifdef CONFIG_TRACE_SCHED_IPI
extern void z_trace_sched_ipi(void); extern void z_trace_sched_ipi(void);
#endif #endif /* CONFIG_TRACE_SCHED_IPI */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void z_sched_ipi(void) void z_sched_ipi(void)
@ -1501,15 +1507,15 @@ void z_sched_ipi(void)
*/ */
#ifdef CONFIG_TRACE_SCHED_IPI #ifdef CONFIG_TRACE_SCHED_IPI
z_trace_sched_ipi(); z_trace_sched_ipi();
#endif #endif /* CONFIG_TRACE_SCHED_IPI */
#ifdef CONFIG_TIMESLICING #ifdef CONFIG_TIMESLICING
if (sliceable(_current)) { if (sliceable(_current)) {
z_time_slice(); z_time_slice();
} }
#endif #endif /* CONFIG_TIMESLICING */
} }
#endif #endif /* CONFIG_SMP */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_wakeup(k_tid_t thread) static inline void z_vrfy_k_wakeup(k_tid_t thread)
@ -1518,7 +1524,7 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread)
z_impl_k_wakeup(thread); z_impl_k_wakeup(thread);
} }
#include <syscalls/k_wakeup_mrsh.c> #include <syscalls/k_wakeup_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
k_tid_t z_impl_k_sched_current_thread_query(void) k_tid_t z_impl_k_sched_current_thread_query(void)
{ {
@ -1528,13 +1534,13 @@ k_tid_t z_impl_k_sched_current_thread_query(void)
* local interrupts when reading it. * local interrupts when reading it.
*/ */
unsigned int k = arch_irq_lock(); unsigned int k = arch_irq_lock();
#endif #endif /* CONFIG_SMP */
k_tid_t ret = _current_cpu->current; k_tid_t ret = _current_cpu->current;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
arch_irq_unlock(k); arch_irq_unlock(k);
#endif #endif /* CONFIG_SMP */
return ret; return ret;
} }
@ -1544,7 +1550,7 @@ static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
return z_impl_k_sched_current_thread_query(); return z_impl_k_sched_current_thread_query();
} }
#include <syscalls/k_sched_current_thread_query_mrsh.c> #include <syscalls/k_sched_current_thread_query_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_is_preempt_thread(void) int z_impl_k_is_preempt_thread(void)
{ {
@ -1557,7 +1563,7 @@ static inline int z_vrfy_k_is_preempt_thread(void)
return z_impl_k_is_preempt_thread(); return z_impl_k_is_preempt_thread();
} }
#include <syscalls/k_is_preempt_thread_mrsh.c> #include <syscalls/k_is_preempt_thread_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static inline void unpend_all(_wait_q_t *wait_q) static inline void unpend_all(_wait_q_t *wait_q)
{ {
@ -1573,7 +1579,7 @@ static inline void unpend_all(_wait_q_t *wait_q)
#ifdef CONFIG_THREAD_ABORT_HOOK #ifdef CONFIG_THREAD_ABORT_HOOK
extern void thread_abort_hook(struct k_thread *thread); extern void thread_abort_hook(struct k_thread *thread);
#endif #endif /* CONFIG_THREAD_ABORT_HOOK */
/** /**
* @brief Dequeues the specified thread * @brief Dequeues the specified thread
@ -1604,7 +1610,7 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unpend_all(&thread->halt_queue); unpend_all(&thread->halt_queue);
#endif #endif /* CONFIG_SMP */
update_cache(1); update_cache(1);
if (new_state == _THREAD_SUSPENDED) { if (new_state == _THREAD_SUSPENDED) {
@ -1613,28 +1619,28 @@ static void halt_thread(struct k_thread *thread, uint8_t new_state)
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(thread); arch_float_disable(thread);
#endif #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread); SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
z_thread_monitor_exit(thread); z_thread_monitor_exit(thread);
#ifdef CONFIG_THREAD_ABORT_HOOK #ifdef CONFIG_THREAD_ABORT_HOOK
thread_abort_hook(thread); thread_abort_hook(thread);
#endif #endif /* CONFIG_THREAD_ABORT_HOOK */
#ifdef CONFIG_OBJ_CORE_THREAD #ifdef CONFIG_OBJ_CORE_THREAD
#ifdef CONFIG_OBJ_CORE_STATS_THREAD #ifdef CONFIG_OBJ_CORE_STATS_THREAD
k_obj_core_stats_deregister(K_OBJ_CORE(thread)); k_obj_core_stats_deregister(K_OBJ_CORE(thread));
#endif #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
k_obj_core_unlink(K_OBJ_CORE(thread)); k_obj_core_unlink(K_OBJ_CORE(thread));
#endif #endif /* CONFIG_OBJ_CORE_THREAD */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
z_mem_domain_exit_thread(thread); z_mem_domain_exit_thread(thread);
k_thread_perms_all_clear(thread); k_thread_perms_all_clear(thread);
k_object_uninit(thread->stack_obj); k_object_uninit(thread->stack_obj);
k_object_uninit(thread); k_object_uninit(thread);
#endif #endif /* CONFIG_USERSPACE */
} }
} }
@ -1666,7 +1672,7 @@ void z_impl_k_thread_abort(struct k_thread *thread)
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
} }
#endif #endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
{ {
@ -1723,7 +1729,7 @@ static bool thread_obj_validate(struct k_thread *thread)
default: default:
#ifdef CONFIG_LOG #ifdef CONFIG_LOG
k_object_dump_error(ret, thread, ko, K_OBJ_THREAD); k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
#endif #endif /* CONFIG_LOG */
K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied")); K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
} }
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ CODE_UNREACHABLE; /* LCOV_EXCL_LINE */

View file

@ -40,7 +40,7 @@ static struct k_spinlock lock;
#ifdef CONFIG_OBJ_CORE_SEM #ifdef CONFIG_OBJ_CORE_SEM
static struct k_obj_type obj_type_sem; static struct k_obj_type obj_type_sem;
#endif #endif /* CONFIG_OBJ_CORE_SEM */
int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count, int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
unsigned int limit) unsigned int limit)
@ -62,12 +62,12 @@ int z_impl_k_sem_init(struct k_sem *sem, unsigned int initial_count,
z_waitq_init(&sem->wait_q); z_waitq_init(&sem->wait_q);
#if defined(CONFIG_POLL) #if defined(CONFIG_POLL)
sys_dlist_init(&sem->poll_events); sys_dlist_init(&sem->poll_events);
#endif #endif /* CONFIG_POLL */
k_object_init(sem); k_object_init(sem);
#ifdef CONFIG_OBJ_CORE_SEM #ifdef CONFIG_OBJ_CORE_SEM
k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem); k_obj_core_init_and_link(K_OBJ_CORE(sem), &obj_type_sem);
#endif #endif /* CONFIG_OBJ_CORE_SEM */
return 0; return 0;
} }
@ -80,7 +80,7 @@ int z_vrfy_k_sem_init(struct k_sem *sem, unsigned int initial_count,
return z_impl_k_sem_init(sem, initial_count, limit); return z_impl_k_sem_init(sem, initial_count, limit);
} }
#include <syscalls/k_sem_init_mrsh.c> #include <syscalls/k_sem_init_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
static inline bool handle_poll_events(struct k_sem *sem) static inline bool handle_poll_events(struct k_sem *sem)
{ {
@ -90,7 +90,7 @@ static inline bool handle_poll_events(struct k_sem *sem)
#else #else
ARG_UNUSED(sem); ARG_UNUSED(sem);
return false; return false;
#endif #endif /* CONFIG_POLL */
} }
void z_impl_k_sem_give(struct k_sem *sem) void z_impl_k_sem_give(struct k_sem *sem)
@ -127,7 +127,7 @@ static inline void z_vrfy_k_sem_give(struct k_sem *sem)
z_impl_k_sem_give(sem); z_impl_k_sem_give(sem);
} }
#include <syscalls/k_sem_give_mrsh.c> #include <syscalls/k_sem_give_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout) int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
{ {
@ -207,7 +207,7 @@ static inline unsigned int z_vrfy_k_sem_count_get(struct k_sem *sem)
} }
#include <syscalls/k_sem_count_get_mrsh.c> #include <syscalls/k_sem_count_get_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_SEM #ifdef CONFIG_OBJ_CORE_SEM
static int init_sem_obj_core_list(void) static int init_sem_obj_core_list(void)
@ -228,4 +228,4 @@ static int init_sem_obj_core_list(void)
SYS_INIT(init_sem_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_sem_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_SEM */

View file

@ -49,7 +49,7 @@ static struct cpu_start_cb {
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
/** True if smp_timer_init() needs to be called. */ /** True if smp_timer_init() needs to be called. */
bool reinit_timer; bool reinit_timer;
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
} cpu_start_fn; } cpu_start_fn;
static struct k_spinlock cpu_start_lock; static struct k_spinlock cpu_start_lock;
@ -130,7 +130,7 @@ static inline void smp_init_top(void *arg)
if ((arg == NULL) || csc.reinit_timer) { if ((arg == NULL) || csc.reinit_timer) {
smp_timer_init(); smp_timer_init();
} }
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
/* Do additional initialization steps if needed. */ /* Do additional initialization steps if needed. */
if (csc.fn != NULL) { if (csc.fn != NULL) {
@ -177,7 +177,7 @@ void k_smp_cpu_start(int id, smp_init_fn fn, void *arg)
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
cpu_start_fn.reinit_timer = true; cpu_start_fn.reinit_timer = true;
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
/* We are only starting one CPU so we do not need to synchronize /* We are only starting one CPU so we do not need to synchronize
* across all CPUs using the start_flag. So just set it to 1. * across all CPUs using the start_flag. So just set it to 1.
@ -206,7 +206,7 @@ void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg,
cpu_start_fn.reinit_timer = reinit_timer; cpu_start_fn.reinit_timer = reinit_timer;
#else #else
ARG_UNUSED(reinit_timer); ARG_UNUSED(reinit_timer);
#endif #endif /* CONFIG_SYS_CLOCK_EXISTS */
/* We are only starting one CPU so we do not need to synchronize /* We are only starting one CPU so we do not need to synchronize
* across all CPUs using the start_flag. So just set it to 1. * across all CPUs using the start_flag. So just set it to 1.

View file

@ -21,7 +21,7 @@
#ifdef CONFIG_OBJ_CORE_STACK #ifdef CONFIG_OBJ_CORE_STACK
static struct k_obj_type obj_type_stack; static struct k_obj_type obj_type_stack;
#endif #endif /* CONFIG_OBJ_CORE_STACK */
void k_stack_init(struct k_stack *stack, stack_data_t *buffer, void k_stack_init(struct k_stack *stack, stack_data_t *buffer,
uint32_t num_entries) uint32_t num_entries)
@ -36,7 +36,7 @@ void k_stack_init(struct k_stack *stack, stack_data_t *buffer,
#ifdef CONFIG_OBJ_CORE_STACK #ifdef CONFIG_OBJ_CORE_STACK
k_obj_core_init_and_link(K_OBJ_CORE(stack), &obj_type_stack); k_obj_core_init_and_link(K_OBJ_CORE(stack), &obj_type_stack);
#endif #endif /* CONFIG_OBJ_CORE_STACK */
} }
int32_t z_impl_k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries) int32_t z_impl_k_stack_alloc_init(struct k_stack *stack, uint32_t num_entries)
@ -69,7 +69,7 @@ static inline int32_t z_vrfy_k_stack_alloc_init(struct k_stack *stack,
return z_impl_k_stack_alloc_init(stack, num_entries); return z_impl_k_stack_alloc_init(stack, num_entries);
} }
#include <syscalls/k_stack_alloc_init_mrsh.c> #include <syscalls/k_stack_alloc_init_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int k_stack_cleanup(struct k_stack *stack) int k_stack_cleanup(struct k_stack *stack)
{ {
@ -137,7 +137,7 @@ static inline int z_vrfy_k_stack_push(struct k_stack *stack, stack_data_t data)
return z_impl_k_stack_push(stack, data); return z_impl_k_stack_push(stack, data);
} }
#include <syscalls/k_stack_push_mrsh.c> #include <syscalls/k_stack_push_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data,
k_timeout_t timeout) k_timeout_t timeout)
@ -192,7 +192,7 @@ static inline int z_vrfy_k_stack_pop(struct k_stack *stack,
return z_impl_k_stack_pop(stack, data, timeout); return z_impl_k_stack_pop(stack, data, timeout);
} }
#include <syscalls/k_stack_pop_mrsh.c> #include <syscalls/k_stack_pop_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_STACK #ifdef CONFIG_OBJ_CORE_STACK
static int init_stack_obj_core_list(void) static int init_stack_obj_core_list(void)
@ -213,4 +213,4 @@ static int init_stack_obj_core_list(void)
SYS_INIT(init_stack_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_stack_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_STACK */

View file

@ -47,7 +47,7 @@ static struct k_obj_core_stats_desc thread_stats_desc = {
.disable = z_thread_stats_disable, .disable = z_thread_stats_disable,
.enable = z_thread_stats_enable, .enable = z_thread_stats_enable,
}; };
#endif #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
static int init_thread_obj_core_list(void) static int init_thread_obj_core_list(void)
{ {
@ -56,18 +56,18 @@ static int init_thread_obj_core_list(void)
#ifdef CONFIG_OBJ_CORE_THREAD #ifdef CONFIG_OBJ_CORE_THREAD
z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID, z_obj_type_init(&obj_type_thread, K_OBJ_TYPE_THREAD_ID,
offsetof(struct k_thread, obj_core)); offsetof(struct k_thread, obj_core));
#endif #endif /* CONFIG_OBJ_CORE_THREAD */
#ifdef CONFIG_OBJ_CORE_STATS_THREAD #ifdef CONFIG_OBJ_CORE_STATS_THREAD
k_obj_type_stats_init(&obj_type_thread, &thread_stats_desc); k_obj_type_stats_init(&obj_type_thread, &thread_stats_desc);
#endif #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
return 0; return 0;
} }
SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_thread_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_THREAD */
#define _FOREACH_STATIC_THREAD(thread_data) \ #define _FOREACH_STATIC_THREAD(thread_data) \
@ -91,7 +91,7 @@ static inline void z_vrfy_k_thread_custom_data_set(void *data)
z_impl_k_thread_custom_data_set(data); z_impl_k_thread_custom_data_set(data);
} }
#include <syscalls/k_thread_custom_data_set_mrsh.c> #include <syscalls/k_thread_custom_data_set_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
void *z_impl_k_thread_custom_data_get(void) void *z_impl_k_thread_custom_data_get(void)
{ {
@ -338,8 +338,8 @@ static inline void z_vrfy_k_thread_start(struct k_thread *thread)
return z_impl_k_thread_start(thread); return z_impl_k_thread_start(thread);
} }
#include <syscalls/k_thread_start_mrsh.c> #include <syscalls/k_thread_start_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#endif #endif /* CONFIG_MULTITHREADING */
#if CONFIG_STACK_POINTER_RANDOM #if CONFIG_STACK_POINTER_RANDOM
@ -388,7 +388,7 @@ static char *setup_thread_stack(struct k_thread *new_thread,
stack_buf_start = Z_THREAD_STACK_BUFFER(stack); stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED; stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
} else } else
#endif #endif /* CONFIG_USERSPACE */
{ {
/* Object cannot host a user mode thread */ /* Object cannot host a user mode thread */
stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size); stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
@ -417,7 +417,7 @@ static char *setup_thread_stack(struct k_thread *new_thread,
#ifdef CONFIG_INIT_STACKS #ifdef CONFIG_INIT_STACKS
memset(stack_buf_start, 0xaa, stack_buf_size); memset(stack_buf_start, 0xaa, stack_buf_size);
#endif #endif /* CONFIG_INIT_STACKS */
#ifdef CONFIG_STACK_SENTINEL #ifdef CONFIG_STACK_SENTINEL
/* Put the stack sentinel at the lowest 4 bytes of the stack area. /* Put the stack sentinel at the lowest 4 bytes of the stack area.
* We periodically check that it's still present and kill the thread * We periodically check that it's still present and kill the thread
@ -436,10 +436,10 @@ static char *setup_thread_stack(struct k_thread *new_thread,
delta += tls_size; delta += tls_size;
new_thread->userspace_local_data = new_thread->userspace_local_data =
(struct _thread_userspace_local_data *)(stack_ptr - delta); (struct _thread_userspace_local_data *)(stack_ptr - delta);
#endif #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
#if CONFIG_STACK_POINTER_RANDOM #if CONFIG_STACK_POINTER_RANDOM
delta += random_offset(stack_buf_size); delta += random_offset(stack_buf_size);
#endif #endif /* CONFIG_STACK_POINTER_RANDOM */
delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN); delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
/* Initial values. Arches which implement MPU guards that "borrow" /* Initial values. Arches which implement MPU guards that "borrow"
@ -452,7 +452,7 @@ static char *setup_thread_stack(struct k_thread *new_thread,
new_thread->stack_info.start = (uintptr_t)stack_buf_start; new_thread->stack_info.start = (uintptr_t)stack_buf_start;
new_thread->stack_info.size = stack_buf_size; new_thread->stack_info.size = stack_buf_size;
new_thread->stack_info.delta = delta; new_thread->stack_info.delta = delta;
#endif #endif /* CONFIG_THREAD_STACK_INFO */
stack_ptr -= delta; stack_ptr -= delta;
return stack_ptr; return stack_ptr;
@ -479,8 +479,8 @@ char *z_setup_new_thread(struct k_thread *new_thread,
k_obj_core_stats_register(K_OBJ_CORE(new_thread), k_obj_core_stats_register(K_OBJ_CORE(new_thread),
&new_thread->base.usage, &new_thread->base.usage,
sizeof(new_thread->base.usage)); sizeof(new_thread->base.usage));
#endif #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
#endif #endif /* CONFIG_OBJ_CORE_THREAD */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
__ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack), __ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
@ -493,7 +493,7 @@ char *z_setup_new_thread(struct k_thread *new_thread,
/* Any given thread has access to itself */ /* Any given thread has access to itself */
k_object_access_grant(new_thread, new_thread); k_object_access_grant(new_thread, new_thread);
#endif #endif /* CONFIG_USERSPACE */
z_waitq_init(&new_thread->join_queue); z_waitq_init(&new_thread->join_queue);
/* Initialize various struct k_thread members */ /* Initialize various struct k_thread members */
@ -513,7 +513,7 @@ char *z_setup_new_thread(struct k_thread *new_thread,
__ASSERT_NO_MSG(!arch_mem_coherent(stack)); __ASSERT_NO_MSG(!arch_mem_coherent(stack));
#endif /* CONFIG_DYNAMIC_THREAD */ #endif /* CONFIG_DYNAMIC_THREAD */
#endif #endif /* CONFIG_KERNEL_COHERENCE */
arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3); arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
@ -527,14 +527,14 @@ char *z_setup_new_thread(struct k_thread *new_thread,
*/ */
__ASSERT(new_thread->switch_handle != NULL, __ASSERT(new_thread->switch_handle != NULL,
"arch layer failed to initialize switch_handle"); "arch layer failed to initialize switch_handle");
#endif #endif /* CONFIG_USE_SWITCH */
#ifdef CONFIG_THREAD_CUSTOM_DATA #ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */ /* Initialize custom data field (value is opaque to kernel) */
new_thread->custom_data = NULL; new_thread->custom_data = NULL;
#endif #endif /* CONFIG_THREAD_CUSTOM_DATA */
#ifdef CONFIG_EVENTS #ifdef CONFIG_EVENTS
new_thread->no_wake_on_timeout = false; new_thread->no_wake_on_timeout = false;
#endif #endif /* CONFIG_EVENTS */
#ifdef CONFIG_THREAD_MONITOR #ifdef CONFIG_THREAD_MONITOR
new_thread->entry.pEntry = entry; new_thread->entry.pEntry = entry;
new_thread->entry.parameter1 = p1; new_thread->entry.parameter1 = p1;
@ -546,7 +546,7 @@ char *z_setup_new_thread(struct k_thread *new_thread,
new_thread->next_thread = _kernel.threads; new_thread->next_thread = _kernel.threads;
_kernel.threads = new_thread; _kernel.threads = new_thread;
k_spin_unlock(&z_thread_monitor_lock, key); k_spin_unlock(&z_thread_monitor_lock, key);
#endif #endif /* CONFIG_THREAD_MONITOR */
#ifdef CONFIG_THREAD_NAME #ifdef CONFIG_THREAD_NAME
if (name != NULL) { if (name != NULL) {
strncpy(new_thread->name, name, strncpy(new_thread->name, name,
@ -556,42 +556,42 @@ char *z_setup_new_thread(struct k_thread *new_thread,
} else { } else {
new_thread->name[0] = '\0'; new_thread->name[0] = '\0';
} }
#endif #endif /* CONFIG_THREAD_NAME */
#ifdef CONFIG_SCHED_CPU_MASK #ifdef CONFIG_SCHED_CPU_MASK
if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) { if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
new_thread->base.cpu_mask = 1; /* must specify only one cpu */ new_thread->base.cpu_mask = 1; /* must specify only one cpu */
} else { } else {
new_thread->base.cpu_mask = -1; /* allow all cpus */ new_thread->base.cpu_mask = -1; /* allow all cpus */
} }
#endif #endif /* CONFIG_SCHED_CPU_MASK */
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
/* _current may be null if the dummy thread is not used */ /* _current may be null if the dummy thread is not used */
if (!_current) { if (!_current) {
new_thread->resource_pool = NULL; new_thread->resource_pool = NULL;
return stack_ptr; return stack_ptr;
} }
#endif #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
z_mem_domain_init_thread(new_thread); z_mem_domain_init_thread(new_thread);
if ((options & K_INHERIT_PERMS) != 0U) { if ((options & K_INHERIT_PERMS) != 0U) {
k_thread_perms_inherit(_current, new_thread); k_thread_perms_inherit(_current, new_thread);
} }
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_SCHED_DEADLINE #ifdef CONFIG_SCHED_DEADLINE
new_thread->base.prio_deadline = 0; new_thread->base.prio_deadline = 0;
#endif #endif /* CONFIG_SCHED_DEADLINE */
new_thread->resource_pool = _current->resource_pool; new_thread->resource_pool = _current->resource_pool;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
z_waitq_init(&new_thread->halt_queue); z_waitq_init(&new_thread->halt_queue);
#endif #endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_THREAD_USAGE #ifdef CONFIG_SCHED_THREAD_USAGE
new_thread->base.usage = (struct k_cycle_stats) {}; new_thread->base.usage = (struct k_cycle_stats) {};
new_thread->base.usage.track_usage = new_thread->base.usage.track_usage =
CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE; CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE */
SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread); SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
@ -661,7 +661,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
stack_obj_size = stack_object->data.stack_data->size; stack_obj_size = stack_object->data.stack_data->size;
#else #else
stack_obj_size = stack_object->data.stack_size; stack_obj_size = stack_object->data.stack_size;
#endif #endif /* CONFIG_GEN_PRIV_STACKS */
K_OOPS(K_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size, K_OOPS(K_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
"stack size %zu is too big, max is %zu", "stack size %zu is too big, max is %zu",
total_size, stack_obj_size)); total_size, stack_obj_size));
@ -707,12 +707,12 @@ void z_init_thread_base(struct _thread_base *thread_base, int priority,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
thread_base->is_idle = 0; thread_base->is_idle = 0;
#endif #endif /* CONFIG_SMP */
#ifdef CONFIG_TIMESLICE_PER_THREAD #ifdef CONFIG_TIMESLICE_PER_THREAD
thread_base->slice_ticks = 0; thread_base->slice_ticks = 0;
thread_base->slice_expired = NULL; thread_base->slice_expired = NULL;
#endif #endif /* CONFIG_TIMESLICE_PER_THREAD */
/* swap_data does not need to be initialized */ /* swap_data does not need to be initialized */
@ -731,30 +731,30 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
_current->entry.parameter1 = p1; _current->entry.parameter1 = p1;
_current->entry.parameter2 = p2; _current->entry.parameter2 = p2;
_current->entry.parameter3 = p3; _current->entry.parameter3 = p3;
#endif #endif /* CONFIG_THREAD_MONITOR */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
__ASSERT(z_stack_is_user_capable(_current->stack_obj), __ASSERT(z_stack_is_user_capable(_current->stack_obj),
"dropping to user mode with kernel-only stack object"); "dropping to user mode with kernel-only stack object");
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
memset(_current->userspace_local_data, 0, memset(_current->userspace_local_data, 0,
sizeof(struct _thread_userspace_local_data)); sizeof(struct _thread_userspace_local_data));
#endif #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
#ifdef CONFIG_THREAD_LOCAL_STORAGE #ifdef CONFIG_THREAD_LOCAL_STORAGE
arch_tls_stack_setup(_current, arch_tls_stack_setup(_current,
(char *)(_current->stack_info.start + (char *)(_current->stack_info.start +
_current->stack_info.size)); _current->stack_info.size));
#endif #endif /* CONFIG_THREAD_LOCAL_STORAGE */
arch_user_mode_enter(entry, p1, p2, p3); arch_user_mode_enter(entry, p1, p2, p3);
#else #else
/* XXX In this case we do not reset the stack */ /* XXX In this case we do not reset the stack */
z_thread_entry(entry, p1, p2, p3); z_thread_entry(entry, p1, p2, p3);
#endif #endif /* CONFIG_USERSPACE */
} }
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO) #if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
#ifdef CONFIG_STACK_GROWS_UP #ifdef CONFIG_STACK_GROWS_UP
#error "Unsupported configuration for stack analysis" #error "Unsupported configuration for stack analysis"
#endif #endif /* CONFIG_STACK_GROWS_UP */
int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr) int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
{ {
@ -858,25 +858,25 @@ static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
return z_impl_k_thread_timeout_expires_ticks(thread); return z_impl_k_thread_timeout_expires_ticks(thread);
} }
#include <syscalls/k_thread_timeout_expires_ticks_mrsh.c> #include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
void z_thread_mark_switched_in(void) void z_thread_mark_switched_in(void)
{ {
#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH) #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
z_sched_usage_start(_current); z_sched_usage_start(_current);
#endif #endif /* CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
SYS_PORT_TRACING_FUNC(k_thread, switched_in); SYS_PORT_TRACING_FUNC(k_thread, switched_in);
#endif #endif /* CONFIG_TRACING */
} }
void z_thread_mark_switched_out(void) void z_thread_mark_switched_out(void)
{ {
#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH) #if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
z_sched_usage_stop(); z_sched_usage_stop();
#endif #endif /*CONFIG_SCHED_THREAD_USAGE && !CONFIG_USE_SWITCH */
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
#ifdef CONFIG_THREAD_LOCAL_STORAGE #ifdef CONFIG_THREAD_LOCAL_STORAGE
@ -884,9 +884,9 @@ void z_thread_mark_switched_out(void)
if (!_current_cpu->current || if (!_current_cpu->current ||
(_current_cpu->current->base.thread_state & _THREAD_DUMMY) != 0) (_current_cpu->current->base.thread_state & _THREAD_DUMMY) != 0)
return; return;
#endif #endif /* CONFIG_THREAD_LOCAL_STORAGE */
SYS_PORT_TRACING_FUNC(k_thread, switched_out); SYS_PORT_TRACING_FUNC(k_thread, switched_out);
#endif #endif /* CONFIG_TRACING */
} }
#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */ #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
@ -901,7 +901,7 @@ int k_thread_runtime_stats_get(k_tid_t thread,
z_sched_thread_usage(thread, stats); z_sched_thread_usage(thread, stats);
#else #else
*stats = (k_thread_runtime_stats_t) {}; *stats = (k_thread_runtime_stats_t) {};
#endif #endif /* CONFIG_SCHED_THREAD_USAGE */
return 0; return 0;
} }
@ -910,7 +910,7 @@ int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
{ {
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
k_thread_runtime_stats_t tmp_stats; k_thread_runtime_stats_t tmp_stats;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
if (stats == NULL) { if (stats == NULL) {
return -EINVAL; return -EINVAL;
@ -932,10 +932,10 @@ int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
stats->current_cycles += tmp_stats.current_cycles; stats->current_cycles += tmp_stats.current_cycles;
stats->peak_cycles += tmp_stats.peak_cycles; stats->peak_cycles += tmp_stats.peak_cycles;
stats->average_cycles += tmp_stats.average_cycles; stats->average_cycles += tmp_stats.average_cycles;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
stats->idle_cycles += tmp_stats.idle_cycles; stats->idle_cycles += tmp_stats.idle_cycles;
} }
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
return 0; return 0;
} }

View file

@ -105,7 +105,7 @@ void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
#ifdef CONFIG_KERNEL_COHERENCE #ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(to)); __ASSERT_NO_MSG(arch_mem_coherent(to));
#endif #endif /* CONFIG_KERNEL_COHERENCE */
__ASSERT(!sys_dnode_is_linked(&to->node), ""); __ASSERT(!sys_dnode_is_linked(&to->node), "");
to->fn = fn; to->fn = fn;
@ -255,7 +255,7 @@ void sys_clock_announce(int32_t ticks)
#ifdef CONFIG_TIMESLICING #ifdef CONFIG_TIMESLICING
z_time_slice(); z_time_slice();
#endif #endif /* CONFIG_TIMESLICING */
} }
int64_t sys_clock_tick_get(void) int64_t sys_clock_tick_get(void)
@ -274,7 +274,7 @@ uint32_t sys_clock_tick_get_32(void)
return (uint32_t)sys_clock_tick_get(); return (uint32_t)sys_clock_tick_get();
#else #else
return (uint32_t)curr_tick; return (uint32_t)curr_tick;
#endif #endif /* CONFIG_TICKLESS_KERNEL */
} }
int64_t z_impl_k_uptime_ticks(void) int64_t z_impl_k_uptime_ticks(void)
@ -288,7 +288,7 @@ static inline int64_t z_vrfy_k_uptime_ticks(void)
return z_impl_k_uptime_ticks(); return z_impl_k_uptime_ticks();
} }
#include <syscalls/k_uptime_ticks_mrsh.c> #include <syscalls/k_uptime_ticks_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
k_timepoint_t sys_timepoint_calc(k_timeout_t timeout) k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
{ {
@ -337,4 +337,4 @@ void z_vrfy_sys_clock_tick_set(uint64_t tick)
{ {
z_impl_sys_clock_tick_set(tick); z_impl_sys_clock_tick_set(tick);
} }
#endif #endif /* CONFIG_ZTEST */

View file

@ -17,7 +17,7 @@ static struct k_spinlock lock;
#ifdef CONFIG_OBJ_CORE_TIMER #ifdef CONFIG_OBJ_CORE_TIMER
static struct k_obj_type obj_type_timer; static struct k_obj_type obj_type_timer;
#endif #endif /* CONFIG_OBJ_CORE_TIMER */
/** /**
* @brief Handle expiration of a kernel timer object. * @brief Handle expiration of a kernel timer object.
@ -72,7 +72,7 @@ void z_timer_expiration_handler(struct _timeout *t)
* down" behavior on timeout addition). * down" behavior on timeout addition).
*/ */
next = K_TIMEOUT_ABS_TICKS(k_uptime_ticks() + 1 + next.ticks); next = K_TIMEOUT_ABS_TICKS(k_uptime_ticks() + 1 + next.ticks);
#endif #endif /* CONFIG_TIMEOUT_64BIT */
z_add_timeout(&timer->timeout, z_timer_expiration_handler, z_add_timeout(&timer->timeout, z_timer_expiration_handler,
next); next);
} }
@ -132,7 +132,7 @@ void k_timer_init(struct k_timer *timer,
#ifdef CONFIG_OBJ_CORE_TIMER #ifdef CONFIG_OBJ_CORE_TIMER
k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer); k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
#endif #endif /* CONFIG_OBJ_CORE_TIMER */
} }
@ -189,7 +189,7 @@ static inline void z_vrfy_k_timer_start(struct k_timer *timer,
z_impl_k_timer_start(timer, duration, period); z_impl_k_timer_start(timer, duration, period);
} }
#include <syscalls/k_timer_start_mrsh.c> #include <syscalls/k_timer_start_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
void z_impl_k_timer_stop(struct k_timer *timer) void z_impl_k_timer_stop(struct k_timer *timer)
{ {
@ -222,7 +222,7 @@ static inline void z_vrfy_k_timer_stop(struct k_timer *timer)
z_impl_k_timer_stop(timer); z_impl_k_timer_stop(timer);
} }
#include <syscalls/k_timer_stop_mrsh.c> #include <syscalls/k_timer_stop_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_timer_status_get(struct k_timer *timer) uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
{ {
@ -242,7 +242,7 @@ static inline uint32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
return z_impl_k_timer_status_get(timer); return z_impl_k_timer_status_get(timer);
} }
#include <syscalls/k_timer_status_get_mrsh.c> #include <syscalls/k_timer_status_get_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
uint32_t z_impl_k_timer_status_sync(struct k_timer *timer) uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
{ {
@ -342,7 +342,7 @@ static inline void z_vrfy_k_timer_user_data_set(struct k_timer *timer,
} }
#include <syscalls/k_timer_user_data_set_mrsh.c> #include <syscalls/k_timer_user_data_set_mrsh.c>
#endif #endif /* CONFIG_USERSPACE */
#ifdef CONFIG_OBJ_CORE_TIMER #ifdef CONFIG_OBJ_CORE_TIMER
static int init_timer_obj_core_list(void) static int init_timer_obj_core_list(void)
@ -362,4 +362,4 @@ static int init_timer_obj_core_list(void)
} }
SYS_INIT(init_timer_obj_core_list, PRE_KERNEL_1, SYS_INIT(init_timer_obj_core_list, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif #endif /* CONFIG_OBJ_CORE_TIMER */

View file

@ -14,7 +14,7 @@
/* Need one of these for this to work */ /* Need one of these for this to work */
#if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) #if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
#error "No data backend configured for CONFIG_SCHED_THREAD_USAGE" #error "No data backend configured for CONFIG_SCHED_THREAD_USAGE"
#endif #endif /* !CONFIG_USE_SWITCH && !CONFIG_INSTRUMENT_THREAD_SWITCHING */
static struct k_spinlock usage_lock; static struct k_spinlock usage_lock;
@ -26,7 +26,7 @@ static uint32_t usage_now(void)
now = (uint32_t)timing_counter_get(); now = (uint32_t)timing_counter_get();
#else #else
now = k_cycle_get_32(); now = k_cycle_get_32();
#endif #endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
/* Edge case: we use a zero as a null ("stop() already called") */ /* Edge case: we use a zero as a null ("stop() already called") */
return (now == 0) ? 1 : now; return (now == 0) ? 1 : now;
@ -51,12 +51,12 @@ static void sched_cpu_update_usage(struct _cpu *cpu, uint32_t cycles)
} else { } else {
cpu->usage->current = 0; cpu->usage->current = 0;
cpu->usage->num_windows++; cpu->usage->num_windows++;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
} }
} }
#else #else
#define sched_cpu_update_usage(cpu, cycles) do { } while (0) #define sched_cpu_update_usage(cpu, cycles) do { } while (0)
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
static void sched_thread_update_usage(struct k_thread *thread, uint32_t cycles) static void sched_thread_update_usage(struct k_thread *thread, uint32_t cycles)
{ {
@ -68,7 +68,7 @@ static void sched_thread_update_usage(struct k_thread *thread, uint32_t cycles)
if (thread->base.usage.longest < thread->base.usage.current) { if (thread->base.usage.longest < thread->base.usage.current) {
thread->base.usage.longest = thread->base.usage.current; thread->base.usage.longest = thread->base.usage.current;
} }
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
} }
void z_sched_usage_start(struct k_thread *thread) void z_sched_usage_start(struct k_thread *thread)
@ -93,7 +93,7 @@ void z_sched_usage_start(struct k_thread *thread)
*/ */
_current_cpu->usage0 = usage_now(); _current_cpu->usage0 = usage_now();
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
} }
void z_sched_usage_stop(void) void z_sched_usage_stop(void)
@ -159,7 +159,7 @@ void z_sched_cpu_usage(uint8_t cpu_id, struct k_thread_runtime_stats *stats)
stats->average_cycles = stats->total_cycles / stats->average_cycles = stats->total_cycles /
cpu->usage->num_windows; cpu->usage->num_windows;
} }
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
stats->idle_cycles = stats->idle_cycles =
_kernel.cpus[cpu_id].idle_thread->base.usage.total; _kernel.cpus[cpu_id].idle_thread->base.usage.total;
@ -168,7 +168,7 @@ void z_sched_cpu_usage(uint8_t cpu_id, struct k_thread_runtime_stats *stats)
k_spin_unlock(&usage_lock, key); k_spin_unlock(&usage_lock, key);
} }
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
void z_sched_thread_usage(struct k_thread *thread, void z_sched_thread_usage(struct k_thread *thread,
struct k_thread_runtime_stats *stats) struct k_thread_runtime_stats *stats)
@ -215,11 +215,11 @@ void z_sched_thread_usage(struct k_thread *thread,
stats->average_cycles = stats->total_cycles / stats->average_cycles = stats->total_cycles /
thread->base.usage.num_windows; thread->base.usage.num_windows;
} }
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
stats->idle_cycles = 0; stats->idle_cycles = 0;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
stats->execution_cycles = thread->base.usage.total; stats->execution_cycles = thread->base.usage.total;
k_spin_unlock(&usage_lock, key); k_spin_unlock(&usage_lock, key);
@ -273,7 +273,7 @@ int k_thread_runtime_stats_disable(k_tid_t thread)
return 0; return 0;
} }
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
void k_sys_runtime_stats_enable(void) void k_sys_runtime_stats_enable(void)
@ -303,7 +303,7 @@ void k_sys_runtime_stats_enable(void)
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
_kernel.cpus[i].usage->num_windows++; _kernel.cpus[i].usage->num_windows++;
_kernel.cpus[i].usage->current = 0; _kernel.cpus[i].usage->current = 0;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
} }
k_spin_unlock(&usage_lock, key); k_spin_unlock(&usage_lock, key);
@ -342,7 +342,7 @@ void k_sys_runtime_stats_disable(void)
k_spin_unlock(&usage_lock, key); k_spin_unlock(&usage_lock, key);
} }
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
#ifdef CONFIG_OBJ_CORE_STATS_THREAD #ifdef CONFIG_OBJ_CORE_STATS_THREAD
int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats) int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats)
@ -382,7 +382,7 @@ int z_thread_stats_reset(struct k_obj_core *obj_core)
stats->current = 0ULL; stats->current = 0ULL;
stats->longest = 0ULL; stats->longest = 0ULL;
stats->num_windows = (thread->base.usage.track_usage) ? 1U : 0U; stats->num_windows = (thread->base.usage.track_usage) ? 1U : 0U;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
if (thread != _current_cpu->current) { if (thread != _current_cpu->current) {
@ -423,7 +423,7 @@ int z_thread_stats_disable(struct k_obj_core *obj_core)
return k_thread_runtime_stats_disable(thread); return k_thread_runtime_stats_disable(thread);
#else #else
return -ENOTSUP; return -ENOTSUP;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
} }
int z_thread_stats_enable(struct k_obj_core *obj_core) int z_thread_stats_enable(struct k_obj_core *obj_core)
@ -436,9 +436,9 @@ int z_thread_stats_enable(struct k_obj_core *obj_core)
return k_thread_runtime_stats_enable(thread); return k_thread_runtime_stats_enable(thread);
#else #else
return -ENOTSUP; return -ENOTSUP;
#endif #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
} }
#endif #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats) int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats)
@ -462,7 +462,7 @@ int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats)
return 0; return 0;
} }
#endif #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
#ifdef CONFIG_OBJ_CORE_STATS_SYSTEM #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats) int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats)
@ -483,4 +483,4 @@ int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats)
return k_thread_runtime_stats_all_get(stats); return k_thread_runtime_stats_all_get(stats);
} }
#endif #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */

View file

@ -25,7 +25,7 @@
#ifdef Z_LIBC_PARTITION_EXISTS #ifdef Z_LIBC_PARTITION_EXISTS
K_APPMEM_PARTITION_DEFINE(z_libc_partition); K_APPMEM_PARTITION_DEFINE(z_libc_partition);
#endif #endif /* Z_LIBC_PARTITION_EXISTS */
/* TODO: Find a better place to put this. Since we pull the entire /* TODO: Find a better place to put this. Since we pull the entire
* lib..__modules__crypto__mbedtls.a globals into app shared memory * lib..__modules__crypto__mbedtls.a globals into app shared memory
@ -33,7 +33,7 @@ K_APPMEM_PARTITION_DEFINE(z_libc_partition);
*/ */
#ifdef CONFIG_MBEDTLS #ifdef CONFIG_MBEDTLS
K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition); K_APPMEM_PARTITION_DEFINE(k_mbedtls_partition);
#endif #endif /* CONFIG_MBEDTLS */
#include <zephyr/logging/log.h> #include <zephyr/logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
@ -68,14 +68,14 @@ static struct k_spinlock objfree_lock; /* k_object_free */
#define STACK_ELEMENT_DATA_SIZE(size) Z_THREAD_STACK_SIZE_ADJUST(size) #define STACK_ELEMENT_DATA_SIZE(size) Z_THREAD_STACK_SIZE_ADJUST(size)
#endif /* CONFIG_GEN_PRIV_STACKS */ #endif /* CONFIG_GEN_PRIV_STACKS */
#endif #endif /* CONFIG_DYNAMIC_OBJECTS */
static struct k_spinlock obj_lock; /* kobj struct data */ static struct k_spinlock obj_lock; /* kobj struct data */
#define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8) #define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
#ifdef CONFIG_DYNAMIC_OBJECTS #ifdef CONFIG_DYNAMIC_OBJECTS
extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES]; extern uint8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
#endif #endif /* CONFIG_DYNAMIC_OBJECTS */
static void clear_perms_cb(struct k_object *ko, void *ctx_ptr); static void clear_perms_cb(struct k_object *ko, void *ctx_ptr);
@ -102,7 +102,7 @@ const char *otype_to_str(enum k_objects otype)
#else #else
ARG_UNUSED(otype); ARG_UNUSED(otype);
ret = NULL; ret = NULL;
#endif #endif /* CONFIG_LOG */
return ret; return ret;
} }
@ -147,7 +147,7 @@ uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
#define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT) #define DYN_OBJ_DATA_ALIGN_K_THREAD (ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT)
#else #else
#define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *)) #define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
#endif #endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
#ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE #ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
#ifndef CONFIG_MPU_STACK_GUARD #ifndef CONFIG_MPU_STACK_GUARD
@ -211,7 +211,7 @@ static size_t obj_align_get(enum k_objects otype)
ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT; ret = ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT;
#else #else
ret = __alignof(struct dyn_obj); ret = __alignof(struct dyn_obj);
#endif #endif /* ARCH_DYNAMIC_OBJ_K_THREAD_ALIGNMENT */
break; break;
default: default:
ret = __alignof(struct dyn_obj); ret = __alignof(struct dyn_obj);
@ -349,11 +349,11 @@ static struct k_object *dynamic_object_create(enum k_objects otype, size_t align
Z_THREAD_STACK_OBJ_ALIGN(size)); Z_THREAD_STACK_OBJ_ALIGN(size));
#else #else
dyn->kobj.name = dyn->data; dyn->kobj.name = dyn->data;
#endif #endif /* CONFIG_ARM_MPU || CONFIG_ARC_MPU */
#else #else
dyn->kobj.name = dyn->data; dyn->kobj.name = dyn->data;
dyn->kobj.data.stack_size = adjusted_size; dyn->kobj.data.stack_size = adjusted_size;
#endif #endif /* CONFIG_GEN_PRIV_STACKS */
} else { } else {
dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size); dyn->data = z_thread_aligned_alloc(align, obj_size_get(otype) + size);
if (dyn->data == NULL) { if (dyn->data == NULL) {
@ -561,7 +561,7 @@ static void unref_check(struct k_object *ko, uintptr_t index)
case K_OBJ_PIPE: case K_OBJ_PIPE:
k_pipe_cleanup((struct k_pipe *)ko->name); k_pipe_cleanup((struct k_pipe *)ko->name);
break; break;
#endif #endif /* CONFIG_PIPES */
case K_OBJ_MSGQ: case K_OBJ_MSGQ:
k_msgq_cleanup((struct k_msgq *)ko->name); k_msgq_cleanup((struct k_msgq *)ko->name);
break; break;
@ -577,7 +577,7 @@ static void unref_check(struct k_object *ko, uintptr_t index)
k_free(dyn->data); k_free(dyn->data);
k_free(dyn); k_free(dyn);
out: out:
#endif #endif /* CONFIG_DYNAMIC_OBJECTS */
k_spin_unlock(&obj_lock, key); k_spin_unlock(&obj_lock, key);
} }

View file

@ -25,7 +25,7 @@ static struct k_object *validate_kernel_object(const void *obj,
if (ret != 0) { if (ret != 0) {
#ifdef CONFIG_LOG #ifdef CONFIG_LOG
k_object_dump_error(ret, obj, ko, otype); k_object_dump_error(ret, obj, ko, otype);
#endif #endif /* CONFIG_LOG */
return NULL; return NULL;
} }

View file

@ -471,7 +471,7 @@ bool k_work_flush(struct k_work *work,
__ASSERT_NO_MSG(sync != NULL); __ASSERT_NO_MSG(sync != NULL);
#ifdef CONFIG_KERNEL_COHERENCE #ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(sync)); __ASSERT_NO_MSG(arch_mem_coherent(sync));
#endif #endif /* CONFIG_KERNEL_COHERENCE */
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work); SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
@ -583,7 +583,7 @@ bool k_work_cancel_sync(struct k_work *work,
__ASSERT_NO_MSG(!k_is_in_isr()); __ASSERT_NO_MSG(!k_is_in_isr());
#ifdef CONFIG_KERNEL_COHERENCE #ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(sync)); __ASSERT_NO_MSG(arch_mem_coherent(sync));
#endif #endif /* CONFIG_KERNEL_COHERENCE */
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync); SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
@ -1066,7 +1066,7 @@ bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
__ASSERT_NO_MSG(!k_is_in_isr()); __ASSERT_NO_MSG(!k_is_in_isr());
#ifdef CONFIG_KERNEL_COHERENCE #ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(sync)); __ASSERT_NO_MSG(arch_mem_coherent(sync));
#endif #endif /* CONFIG_KERNEL_COHERENCE */
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync); SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
@ -1098,7 +1098,7 @@ bool k_work_flush_delayable(struct k_work_delayable *dwork,
__ASSERT_NO_MSG(!k_is_in_isr()); __ASSERT_NO_MSG(!k_is_in_isr());
#ifdef CONFIG_KERNEL_COHERENCE #ifdef CONFIG_KERNEL_COHERENCE
__ASSERT_NO_MSG(arch_mem_coherent(sync)); __ASSERT_NO_MSG(arch_mem_coherent(sync));
#endif #endif /* CONFIG_KERNEL_COHERENCE */
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync); SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);