diff --git a/arch/arm/core/cpu_idle.S b/arch/arm/core/cpu_idle.S index 622a3f4b0e..dc36136efd 100644 --- a/arch/arm/core/cpu_idle.S +++ b/arch/arm/core/cpu_idle.S @@ -113,9 +113,9 @@ SECTION_FUNC(TEXT, _NanoIdleValClear) */ SECTION_FUNC(TEXT, k_cpu_idle) -#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP +#ifdef CONFIG_TRACING push {lr} - bl _sys_k_event_logger_enter_sleep + bl sys_trace_idle pop {r0} mov lr, r0 #endif @@ -157,9 +157,9 @@ SECTION_FUNC(TEXT, k_cpu_idle) */ SECTION_FUNC(TEXT, k_cpu_atomic_idle) -#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP +#ifdef CONFIG_TRACING push {lr} - bl _sys_k_event_logger_enter_sleep + bl sys_trace_idle pop {r1} mov lr, r1 #endif diff --git a/arch/arm/core/irq_manage.c b/arch/arm/core/irq_manage.c index c0bbd6379c..f4bfdabe2a 100644 --- a/arch/arm/core/irq_manage.c +++ b/arch/arm/core/irq_manage.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include extern void __reserved(void); @@ -137,7 +137,6 @@ void _irq_spurious(void *unused) * arch/cpu.h and kernel_structs.h; the inline functions typically need to * perform operations on _kernel. For now, leave as regular functions, a * future iteration will resolve this. - * We have a similar issue with the k_event_logger functions. * * See https://github.com/zephyrproject-rtos/zephyr/issues/3056 */ @@ -178,14 +177,10 @@ void _arch_isr_direct_pm(void) } #endif -#if defined(CONFIG_KERNEL_EVENT_LOGGER_SLEEP) || \ - defined(CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT) void _arch_isr_direct_header(void) { - _sys_k_event_logger_interrupt(); - _sys_k_event_logger_exit_sleep(); + sys_trace_isr_enter(); } -#endif #if defined(CONFIG_ARM_SECURE_FIRMWARE) /** diff --git a/arch/arm/core/isr_wrapper.S b/arch/arm/core/isr_wrapper.S index b1563df2f7..95fbe5cd06 100644 --- a/arch/arm/core/isr_wrapper.S +++ b/arch/arm/core/isr_wrapper.S @@ -47,12 +47,8 @@ SECTION_FUNC(TEXT, _isr_wrapper) bl read_timer_start_of_isr #endif -#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT - bl _sys_k_event_logger_interrupt -#endif - -#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP - bl _sys_k_event_logger_exit_sleep +#ifdef CONFIG_TRACING + bl sys_trace_isr_enter #endif #ifdef CONFIG_SYS_POWER_MANAGEMENT diff --git a/arch/arm/core/swap_helper.S b/arch/arm/core/swap_helper.S index e89764f73e..3afdae2c35 100644 --- a/arch/arm/core/swap_helper.S +++ b/arch/arm/core/swap_helper.S @@ -43,10 +43,10 @@ GDATA(_kernel) SECTION_FUNC(TEXT, __pendsv) -#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH +#ifdef CONFIG_TRACING /* Register the context switch */ push {lr} - bl _sys_k_event_logger_context_switch + bl sys_trace_thread_switched_in #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) pop {r0} mov lr, r0 diff --git a/arch/x86/core/irq_manage.c b/arch/x86/core/irq_manage.c index e4e509affe..b9c4e0f4da 100644 --- a/arch/x86/core/irq_manage.c +++ b/arch/x86/core/irq_manage.c @@ -63,6 +63,7 @@ void _arch_irq_direct_pm(void) void _arch_isr_direct_header(void) { _int_latency_start(); + sys_trace_isr_enter(); _sys_k_event_logger_interrupt(); _sys_k_event_logger_exit_sleep(); @@ -76,6 +77,7 @@ void _arch_isr_direct_footer(int swap) { _irq_controller_eoi(); _int_latency_stop(); + sys_trace_isr_exit(); --_kernel.nested; /* Call swap if all the following is true: diff --git a/drivers/timer/cortex_m_systick.c b/drivers/timer/cortex_m_systick.c index c86e0beae2..48643f29c4 100644 --- a/drivers/timer/cortex_m_systick.c +++ b/drivers/timer/cortex_m_systick.c @@ -224,6 +224,7 @@ void _timer_int_handler(void *unused) extern void _sys_k_event_logger_interrupt(void); _sys_k_event_logger_interrupt(); #endif + sys_trace_isr_enter(); #ifdef CONFIG_SYS_POWER_MANAGEMENT s32_t numIdleTicks; diff --git a/drivers/timer/nrf_rtc_timer.c b/drivers/timer/nrf_rtc_timer.c index a1e73ed738..02d27a01ec 100644 --- a/drivers/timer/nrf_rtc_timer.c +++ b/drivers/timer/nrf_rtc_timer.c @@ -472,6 +472,7 @@ void rtc1_nrf5_isr(void *arg) extern void read_timer_start_of_tick_handler(void); read_timer_start_of_tick_handler(); #endif + sys_trace_isr_enter(); #ifdef CONFIG_TICKLESS_KERNEL if (!expected_sys_ticks) { @@ -500,6 +501,7 @@ void rtc1_nrf5_isr(void *arg) extern void read_timer_end_of_tick_handler(void); read_timer_end_of_tick_handler(); #endif + sys_trace_isr_exit(); } diff --git a/include/arch/arm/cortex_m/irq.h b/include/arch/arm/cortex_m/irq.h index f858ef6336..26a2a89465 100644 --- a/include/arch/arm/cortex_m/irq.h +++ b/include/arch/arm/cortex_m/irq.h @@ -107,22 +107,25 @@ extern void _arch_isr_direct_pm(void); #define _ARCH_ISR_DIRECT_PM() do { } while (0) #endif -#if defined(CONFIG_KERNEL_EVENT_LOGGER_SLEEP) || \ - defined(CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT) #define _ARCH_ISR_DIRECT_HEADER() _arch_isr_direct_header() extern void _arch_isr_direct_header(void); -#else -#define _ARCH_ISR_DIRECT_HEADER() do { } while (0) -#endif #define _ARCH_ISR_DIRECT_FOOTER(swap) _arch_isr_direct_footer(swap) /* arch/arm/core/exc_exit.S */ extern void _IntExit(void); +#ifdef CONFIG_TRACING +extern void sys_trace_isr_exit_to_scheduler(void); +#endif + static inline void _arch_isr_direct_footer(int maybe_swap) { if (maybe_swap) { + +#ifdef CONFIG_TRACING + sys_trace_isr_exit_to_scheduler(); +#endif _IntExit(); } } diff --git a/include/kernel.h b/include/kernel.h index da8ed7243a..040c7c0025 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -4967,6 +4967,7 @@ inline void *operator new[](size_t size, void *ptr) #endif /* defined(CONFIG_CPLUSPLUS) && defined(__cplusplus) */ +#include #include #endif /* !_ASMLANGUAGE */ diff --git a/include/tracing.h b/include/tracing.h new file mode 100644 index 0000000000..987e41563b --- /dev/null +++ b/include/tracing.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ +#ifndef _KERNEL_TRACE_H +#define _KERNEL_TRACE_H + +#include + +/* Below IDs are used with systemview, not final to the zephyr tracing API */ +#define SYS_TRACE_ID_OFFSET (32u) + +#define SYS_TRACE_ID_MUTEX_INIT (1u + SYS_TRACE_ID_OFFSET) +#define SYS_TRACE_ID_MUTEX_UNLOCK (2u + SYS_TRACE_ID_OFFSET) +#define SYS_TRACE_ID_MUTEX_LOCK (3u + SYS_TRACE_ID_OFFSET) +#define SYS_TRACE_ID_SEMA_INIT (4u + SYS_TRACE_ID_OFFSET) +#define SYS_TRACE_ID_SEMA_GIVE (5u + SYS_TRACE_ID_OFFSET) +#define SYS_TRACE_ID_SEMA_TAKE (6u + SYS_TRACE_ID_OFFSET) + +#if CONFIG_TRACING +void z_sys_trace_idle(void); +void z_sys_trace_isr_enter(void); +void z_sys_trace_isr_exit_to_scheduler(void); +void z_sys_trace_thread_switched_in(void); +#endif + +/** + * @brief Called before a thread has been selected to run + */ +#define sys_trace_thread_switched_out() + +/** + * @brief Called after a thread has been selected to run + */ +#define sys_trace_thread_switched_in() + +/** + * @brief Called when setting priority of a thread + */ +#define sys_trace_thread_priority_set(thread) + +/** + * @brief Called when a thread is being created + */ +#define sys_trace_thread_create(thread) + +/** + * @brief Called when a thread is being aborted + * + */ +#define sys_trace_thread_abort(thread) + +/** + * @brief Called when a thread is being suspended + * @param thread Thread structure + */ +#define sys_trace_thread_suspend(thread) + +/** + * @brief Called when a thread is being resumed from suspension + * @param thread Thread structure + */ +#define sys_trace_thread_resume(thread) + +/** + * @brief Called when a thread is ready to run + * @param thread Thread structure + */ +#define sys_trace_thread_ready(thread) + +/** + * @brief Called when a thread is pending + * @param thread Thread structure + */ +#define sys_trace_thread_pend(thread) + +/** + * @brief Provide information about specific thread + * @param thread Thread structure + */ +#define sys_trace_thread_info(thread) + +/** + * @brief Called when entering an ISR + */ +#define sys_trace_isr_enter() + +/** + * @brief Called when exiting an ISR + */ +#define sys_trace_isr_exit() + +/** + * @brief Called when exiting an ISR and switching to scheduler + */ +#define sys_trace_isr_exit_to_scheduler() + +/** + * @brief Can be called with any id signifying a new call + * @param id ID of the operation that was started + */ +#define sys_trace_void(id) + +/** + * @brief Can be called with any id signifying ending a call + * @param id ID of the operation that was completed + */ +#define sys_trace_end_call(id) + + + +#define z_sys_trace_idle() + +#define z_sys_trace_isr_enter() + +#define z_sys_trace_isr_exit_to_scheduler() + +#define z_sys_trace_thread_switched_in() + +#endif diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 5ce2cf848b..411409ee1e 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -12,6 +12,7 @@ #ifdef CONFIG_KERNEL_EVENT_LOGGER #include #endif /* CONFIG_KERNEL_EVENT_LOGGER */ +#include #ifdef CONFIG_MULTITHREADING #define _VALID_PRIO(prio, entry_point) \ @@ -225,6 +226,8 @@ static inline void _ready_thread(struct k_thread *thread) _add_thread_to_ready_q(thread); } + sys_trace_thread_ready(thread); + #ifdef CONFIG_KERNEL_EVENT_LOGGER_THREAD _sys_k_event_logger_thread_ready(thread); #endif diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index e2e2cb9491..bbe04ef20b 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -33,6 +33,10 @@ extern void _sys_k_event_logger_context_switch(void); void _smp_reacquire_global_lock(struct k_thread *thread); void _smp_release_global_lock(struct k_thread *thread); +#ifdef CONFIG_TRACING +extern void sys_trace_thread_switched_out(void); +#endif + /* context switching and scheduling-related routines */ #ifdef CONFIG_USE_SWITCH @@ -56,6 +60,10 @@ static inline unsigned int _Swap(unsigned int key) _check_stack_sentinel(); _update_time_slice_before_swap(); +#ifdef CONFIG_TRACING + sys_trace_thread_switched_out(); +#endif + #ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH _sys_k_event_logger_context_switch(); #endif @@ -80,6 +88,10 @@ static inline unsigned int _Swap(unsigned int key) ret = _current->swap_retval; } +#ifdef CONFIG_TRACING + sys_trace_thread_switched_in(); +#endif + irq_unlock(key); return ret; @@ -91,10 +103,19 @@ extern unsigned int __swap(unsigned int key); static inline unsigned int _Swap(unsigned int key) { + unsigned int ret; _check_stack_sentinel(); _update_time_slice_before_swap(); - return __swap(key); +#ifdef CONFIG_TRACING + sys_trace_thread_switched_out(); +#endif + ret = __swap(key); +#ifdef CONFIG_TRACING + sys_trace_thread_switched_in(); +#endif + + return ret; } #endif diff --git a/kernel/init.c b/kernel/init.c index 49a7364fce..878e27b553 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -31,6 +31,7 @@ #include #include #include +#include /* kernel build timestamp items */ #define BUILD_TIMESTAMP "BUILD: " __DATE__ " " __TIME__ @@ -334,12 +335,16 @@ static void prepare_multithreading(struct k_thread *dummy_thread) MAIN_STACK_SIZE, bg_thread_main, NULL, NULL, NULL, CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL); + + sys_trace_thread_create(_main_thread); + _mark_thread_as_started(_main_thread); _ready_thread(_main_thread); #ifdef CONFIG_MULTITHREADING init_idle_thread(_idle_thread, _idle_stack); _kernel.cpus[0].idle_thread = _idle_thread; + sys_trace_thread_create(_idle_thread); #endif #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 diff --git a/kernel/mutex.c b/kernel/mutex.c index e505b8be40..e3f3584ba8 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -36,6 +36,7 @@ #include #include #include +#include #define RECORD_STATE_CHANGE(mutex) do { } while ((0)) #define RECORD_CONFLICT(mutex) do { } while ((0)) @@ -72,13 +73,17 @@ void _impl_k_mutex_init(struct k_mutex *mutex) mutex->owner = NULL; mutex->lock_count = 0; + sys_trace_void(SYS_TRACE_ID_MUTEX_INIT); + /* initialized upon first use */ /* mutex->owner_orig_prio = 0; */ _waitq_init(&mutex->wait_q); + SYS_TRACING_OBJ_INIT(k_mutex, mutex); _k_object_init(mutex); + sys_trace_end_call(SYS_TRACE_ID_MUTEX_INIT); } #ifdef CONFIG_USERSPACE @@ -118,6 +123,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) int new_prio; unsigned int key; + sys_trace_void(SYS_TRACE_ID_MUTEX_LOCK); _sched_lock(); if (likely(mutex->lock_count == 0 || mutex->owner == _current)) { @@ -136,6 +142,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) mutex->owner_orig_prio); k_sched_unlock(); + sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK); return 0; } @@ -144,6 +151,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) if (unlikely(timeout == K_NO_WAIT)) { k_sched_unlock(); + sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK); return -EBUSY; } @@ -167,6 +175,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) if (got_mutex == 0) { k_sched_unlock(); + sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK); return 0; } @@ -188,6 +197,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) k_sched_unlock(); + sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK); return -EAGAIN; } @@ -206,6 +216,7 @@ void _impl_k_mutex_unlock(struct k_mutex *mutex) __ASSERT(mutex->lock_count > 0, ""); __ASSERT(mutex->owner == _current, ""); + sys_trace_void(SYS_TRACE_ID_MUTEX_UNLOCK); _sched_lock(); RECORD_STATE_CHANGE(); diff --git a/kernel/sched.c b/kernel/sched.c index 8c4cebe349..108f5102ad 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -371,6 +371,7 @@ void _thread_priority_set(struct k_thread *thread, int prio) thread->base.prio = prio; } } + sys_trace_thread_priority_set(thread); if (need_sched) { _reschedule(irq_lock()); diff --git a/kernel/sem.c b/kernel/sem.c index bdd88d108e..98488e6fbd 100644 --- a/kernel/sem.c +++ b/kernel/sem.c @@ -27,6 +27,7 @@ #include #include #include +#include extern struct k_sem _k_sem_list_start[]; extern struct k_sem _k_sem_list_end[]; @@ -60,6 +61,7 @@ void _impl_k_sem_init(struct k_sem *sem, unsigned int initial_count, __ASSERT(limit != 0, "limit cannot be zero"); __ASSERT(initial_count <= limit, "count cannot be greater than limit"); + sys_trace_void(SYS_TRACE_ID_SEMA_INIT); sem->count = initial_count; sem->limit = limit; _waitq_init(&sem->wait_q); @@ -70,6 +72,7 @@ void _impl_k_sem_init(struct k_sem *sem, unsigned int initial_count, SYS_TRACING_OBJ_INIT(k_sem, sem); _k_object_init(sem); + sys_trace_end_call(SYS_TRACE_ID_SEMA_INIT); } #ifdef CONFIG_USERSPACE @@ -134,7 +137,9 @@ void _impl_k_sem_give(struct k_sem *sem) { unsigned int key = irq_lock(); + sys_trace_void(SYS_TRACE_ID_SEMA_GIVE); do_sem_give(sem); + sys_trace_end_call(SYS_TRACE_ID_SEMA_GIVE); _reschedule(key); } @@ -146,19 +151,24 @@ int _impl_k_sem_take(struct k_sem *sem, s32_t timeout) { __ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, ""); + sys_trace_void(SYS_TRACE_ID_SEMA_TAKE); unsigned int key = irq_lock(); if (likely(sem->count > 0)) { sem->count--; irq_unlock(key); + sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE); return 0; } if (timeout == K_NO_WAIT) { irq_unlock(key); + sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE); return -EBUSY; } + sys_trace_end_call(SYS_TRACE_ID_SEMA_TAKE); + return _pend_current_thread(key, &sem->wait_q, timeout); } diff --git a/kernel/thread.c b/kernel/thread.c index 78a8296f7e..f8f5002193 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -27,6 +27,7 @@ #include #include #include +#include extern struct _static_thread_data _static_thread_data_list_start[]; extern struct _static_thread_data _static_thread_data_list_end[]; @@ -358,6 +359,7 @@ void _setup_new_thread(struct k_thread *new_thread, new_thread->base.prio_deadline = 0; #endif new_thread->resource_pool = _current->resource_pool; + sys_trace_thread_create(new_thread); } #ifdef CONFIG_MULTITHREADING @@ -368,12 +370,14 @@ k_tid_t _impl_k_thread_create(struct k_thread *new_thread, int prio, u32_t options, s32_t delay) { __ASSERT(!_is_in_isr(), "Threads may not be created in ISRs"); + _setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, prio, options); if (delay != K_FOREVER) { schedule_new_thread(new_thread, delay); } + return new_thread; } @@ -503,6 +507,8 @@ void _impl_k_thread_suspend(struct k_thread *thread) _k_thread_single_suspend(thread); + sys_trace_thread_suspend(thread); + if (thread == _current) { _Swap(key); } else { @@ -526,6 +532,7 @@ void _impl_k_thread_resume(struct k_thread *thread) _k_thread_single_resume(thread); + sys_trace_thread_resume(thread); _reschedule(key); } @@ -551,6 +558,8 @@ void _k_thread_single_abort(struct k_thread *thread) } thread->base.thread_state |= _THREAD_DEAD; + + sys_trace_thread_abort(thread); #ifdef CONFIG_KERNEL_EVENT_LOGGER_THREAD _sys_k_event_logger_thread_exit(thread); #endif