From 8892406c1de21bd5de5877f39099e3663a5f3af1 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Thu, 3 Oct 2019 11:43:10 -0700 Subject: [PATCH] kernel/sys_clock.h: Deprecate and convert uses of old conversions Mark the old time conversion APIs deprecated, leave compatibility macros in place, and replace all usage with the new API. Signed-off-by: Andy Ross --- arch/arc/core/timestamp.c | 2 +- doc/reference/kernel/timing/clocks.rst | 2 +- drivers/timer/altera_avalon_timer_hal.c | 8 +++---- drivers/timer/litex_timer.c | 6 ++--- drivers/timer/loapic_timer.c | 2 +- drivers/timer/xlnx_psttc_timer.c | 2 +- include/kernel.h | 4 ++-- include/sys_clock.h | 23 ++++++++++++------- kernel/poll.c | 2 +- kernel/sched.c | 12 +++++----- kernel/thread.c | 2 +- kernel/timeout.c | 2 +- kernel/timer.c | 4 ++-- kernel/work_q.c | 2 +- lib/cmsis_rtos_v1/cmsis_signal.c | 2 +- lib/cmsis_rtos_v2/event_flags.c | 4 ++-- lib/cmsis_rtos_v2/kernel.c | 4 ++-- lib/cmsis_rtos_v2/mempool.c | 2 +- lib/cmsis_rtos_v2/msgq.c | 4 ++-- lib/cmsis_rtos_v2/mutex.c | 2 +- lib/cmsis_rtos_v2/semaphore.c | 2 +- lib/cmsis_rtos_v2/thread_flags.c | 4 ++-- lib/cmsis_rtos_v2/timer.c | 2 +- samples/smp/pi/src/main.c | 2 +- subsys/bluetooth/shell/gatt.c | 2 +- subsys/bluetooth/shell/l2cap.c | 2 +- subsys/debug/tracing/cpu_stats.c | 6 ++--- subsys/net/ip/net_shell.c | 8 +++---- subsys/net/ip/net_stats.h | 4 ++-- subsys/net/l2/canbus/6locan.c | 16 ++++++------- subsys/net/l2/ppp/ppp_l2.c | 2 +- subsys/testsuite/include/timestamp.h | 2 +- .../latency_measure/src/int_to_thread.c | 2 +- .../latency_measure/src/int_to_thread_evt.c | 2 +- tests/benchmarks/latency_measure/src/utils.h | 2 +- tests/benchmarks/mbedtls/src/benchmark.c | 2 +- .../benchmarks/timing_info/src/timing_info.h | 2 +- tests/kernel/common/src/boot_delay.c | 3 +-- tests/kernel/common/src/clock.c | 4 ++-- tests/kernel/context/src/main.c | 4 ++-- tests/kernel/early_sleep/src/main.c | 8 +++---- tests/kernel/fifo/fifo_timeout/src/main.c | 4 ++-- tests/kernel/lifo/lifo_usage/src/main.c | 2 +- .../src/test_sched_timeslice_reset.c | 10 ++++---- .../schedule_api/src/test_slice_scheduling.c | 4 ++-- tests/kernel/sleep/src/main.c | 2 +- .../tickless/tickless_concept/src/main.c | 8 +++---- tests/kernel/timer/timer_api/src/main.c | 6 ++--- tests/kernel/timer/timer_monotonic/src/main.c | 4 ++-- tests/kernel/workq/work_queue/src/main.c | 2 +- tests/kernel/workq/work_queue_api/src/main.c | 6 ++--- .../cmsis_rtos_v1/src/kernel_apis.c | 2 +- .../cmsis_rtos_v2/src/thread_apis.c | 14 +++++------ 53 files changed, 122 insertions(+), 114 deletions(-) diff --git a/arch/arc/core/timestamp.c b/arch/arc/core/timestamp.c index 81e3ea7914..1eee914a4b 100644 --- a/arch/arc/core/timestamp.c +++ b/arch/arc/core/timestamp.c @@ -33,7 +33,7 @@ u64_t z_tsc_read(void) t = (u64_t)z_tick_get(); count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT); irq_unlock(key); - t *= (u64_t)sys_clock_hw_cycles_per_tick(); + t *= k_ticks_to_cyc_floor64(1); t += (u64_t)count; return t; } diff --git a/doc/reference/kernel/timing/clocks.rst b/doc/reference/kernel/timing/clocks.rst index 70f355be96..afeb335ee0 100644 --- a/doc/reference/kernel/timing/clocks.rst +++ b/doc/reference/kernel/timing/clocks.rst @@ -129,7 +129,7 @@ between two points in time. /* compute how long the work took (assumes no counter rollover) */ cycles_spent = stop_time - start_time; - nanoseconds_spent = SYS_CLOCK_HW_CYCLES_TO_NS(cycles_spent); + nanoseconds_spent = (u32_t)k_cyc_to_ns_floor64(cycles_spent); Suggested Uses ************** diff --git a/drivers/timer/altera_avalon_timer_hal.c b/drivers/timer/altera_avalon_timer_hal.c index 5a97b7efac..d8cd8a99bf 100644 --- a/drivers/timer/altera_avalon_timer_hal.c +++ b/drivers/timer/altera_avalon_timer_hal.c @@ -28,7 +28,7 @@ static void timer_irq_handler(void *unused) read_timer_start_of_tick_handler(); #endif - accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); + accumulated_cycle_count += k_ticks_to_cyc_floor32(1); /* Clear the interrupt */ alt_handle_irq((void *)TIMER_0_BASE, TIMER_0_IRQ); @@ -46,15 +46,15 @@ int z_clock_driver_init(struct device *device) ARG_UNUSED(device); IOWR_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE, - sys_clock_hw_cycles_per_tick() & 0xFFFF); + k_ticks_to_cyc_floor32(1) & 0xFFFF); IOWR_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE, - (sys_clock_hw_cycles_per_tick() >> 16) & 0xFFFF); + (k_ticks_to_cyc_floor32(1) >> 16) & 0xFFFF); IRQ_CONNECT(TIMER_0_IRQ, 0, timer_irq_handler, NULL, 0); irq_enable(TIMER_0_IRQ); alt_avalon_timer_sc_init((void *)TIMER_0_BASE, 0, - TIMER_0_IRQ, sys_clock_hw_cycles_per_tick()); + TIMER_0_IRQ, k_ticks_to_cyc_floor32(1)); return 0; } diff --git a/drivers/timer/litex_timer.c b/drivers/timer/litex_timer.c index d1bc201ce5..48d0aece89 100644 --- a/drivers/timer/litex_timer.c +++ b/drivers/timer/litex_timer.c @@ -30,7 +30,7 @@ static void litex_timer_irq_handler(void *device) int key = irq_lock(); sys_write8(TIMER_EV, TIMER_EV_PENDING_ADDR); - accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); + accumulated_cycle_count += k_ticks_to_cyc_floor32(1); z_clock_announce(1); irq_unlock(key); @@ -57,9 +57,9 @@ int z_clock_driver_init(struct device *device) sys_write8(TIMER_DISABLE, TIMER_EN_ADDR); for (int i = 0; i < 4; i++) { - sys_write8(sys_clock_hw_cycles_per_tick() >> (24 - i * 8), + sys_write8(k_ticks_to_cyc_floor32(1) >> (24 - i * 8), TIMER_RELOAD_ADDR + i * 0x4); - sys_write8(sys_clock_hw_cycles_per_tick() >> (24 - i * 8), + sys_write8(k_ticks_to_cyc_floor32(1) >> (24 - i * 8), TIMER_LOAD_ADDR + i * 0x4); } diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c index ed795d4415..c2275d2e8a 100644 --- a/drivers/timer/loapic_timer.c +++ b/drivers/timer/loapic_timer.c @@ -574,7 +574,7 @@ int z_clock_driver_init(struct device *device) /* determine the timer counter value (in timer clock cycles/system tick) */ - cycles_per_tick = sys_clock_hw_cycles_per_tick(); + cycles_per_tick = k_ticks_to_cyc_floor32(1); tickless_idle_init(); diff --git a/drivers/timer/xlnx_psttc_timer.c b/drivers/timer/xlnx_psttc_timer.c index 5ec46ab3e2..d1abeb45e9 100644 --- a/drivers/timer/xlnx_psttc_timer.c +++ b/drivers/timer/xlnx_psttc_timer.c @@ -111,7 +111,7 @@ void _timer_int_handler(void *unused) u32_t regval; regval = sys_read32(TIMER_BASEADDR + XTTCPS_ISR_OFFSET); - accumulated_cycles += sys_clock_hw_cycles_per_tick(); + accumulated_cycles += k_ticks_to_cyc_floor32(1); z_clock_announce(_sys_idle_elapsed_ticks); } diff --git a/include/kernel.h b/include/kernel.h index d2258306e7..1c98827d86 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -1637,7 +1637,7 @@ __syscall u32_t k_timer_remaining_get(struct k_timer *timer); static inline u32_t z_impl_k_timer_remaining_get(struct k_timer *timer) { const s32_t ticks = z_timeout_remaining(&timer->timeout); - return (ticks > 0) ? (u32_t)__ticks_to_ms(ticks) : 0U; + return (ticks > 0) ? (u32_t)k_ticks_to_ms_floor64(ticks) : 0U; } /** @@ -3077,7 +3077,7 @@ static inline int k_delayed_work_submit(struct k_delayed_work *work, */ static inline s32_t k_delayed_work_remaining_get(struct k_delayed_work *work) { - return __ticks_to_ms(z_timeout_remaining(&work->timeout)); + return k_ticks_to_ms_floor64(z_timeout_remaining(&work->timeout)); } /** diff --git a/include/sys_clock.h b/include/sys_clock.h index 6396e47826..08d87f764a 100644 --- a/include/sys_clock.h +++ b/include/sys_clock.h @@ -72,13 +72,20 @@ extern void z_enable_sys_clock(void); #endif -#define __ticks_to_ms(t) k_ticks_to_ms_floor64(t) -#define z_ms_to_ticks(t) k_ms_to_ticks_ceil32(t) -#define __ticks_to_us(t) k_ticks_to_us_floor64(t) -#define z_us_to_ticks(t) k_us_to_ticks_ceil64(t) -#define sys_clock_hw_cycles_per_tick() k_ticks_to_cyc_floor32(1) -#define SYS_CLOCK_HW_CYCLES_TO_NS64(t) (1000 * k_cyc_to_us_floor64(t)) -#define SYS_CLOCK_HW_CYCLES_TO_NS(t) ((u32_t)(1000 * k_cyc_to_us_floor64(t))) +#define __ticks_to_ms(t) __DEPRECATED_MACRO \ + k_ticks_to_ms_floor64(t) +#define z_ms_to_ticks(t) \ + k_ms_to_ticks_ceil32(t) +#define __ticks_to_us(t) __DEPRECATED_MACRO \ + k_ticks_to_us_floor64(t) +#define z_us_to_ticks(t) __DEPRECATED_MACRO \ + k_us_to_ticks_ceil64(t) +#define sys_clock_hw_cycles_per_tick() __DEPRECATED_MACRO \ + k_ticks_to_cyc_floor32(1) +#define SYS_CLOCK_HW_CYCLES_TO_NS64(t) __DEPRECATED_MACRO \ + k_cyc_to_ns_floor64(t) +#define SYS_CLOCK_HW_CYCLES_TO_NS(t) __DEPRECATED_MACRO \ + ((u32_t)k_cyc_to_ns_floor64(t)) /* added tick needed to account for tick in progress */ #define _TICK_ALIGN 1 @@ -88,7 +95,7 @@ extern void z_enable_sys_clock(void); * and calculates the average cycle time */ #define SYS_CLOCK_HW_CYCLES_TO_NS_AVG(X, NCYCLES) \ - (u32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X) / NCYCLES) + (u32_t)(k_cyc_to_ns_floor64(X) / NCYCLES) /** * @defgroup clock_apis Kernel Clock APIs diff --git a/kernel/poll.c b/kernel/poll.c index 7fba360ebd..a63f11d7a2 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -638,7 +638,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q, if (timeout != K_FOREVER) { z_add_timeout(&work->timeout, triggered_work_expiration_handler, - z_ms_to_ticks(timeout)); + k_ms_to_ticks_ceil32(timeout)); } /* From now, any event will result in submitted work. */ diff --git a/kernel/sched.c b/kernel/sched.c index e3f4e32bab..d4726a97dc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -257,7 +257,7 @@ void k_sched_time_slice_set(s32_t slice, int prio) { LOCKED(&sched_spinlock) { _current_cpu->slice_ticks = 0; - slice_time = z_ms_to_ticks(slice); + slice_time = k_ms_to_ticks_ceil32(slice); slice_max_prio = prio; z_reset_time_slice(); } @@ -368,7 +368,7 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) } if (timeout != K_FOREVER) { - s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(timeout); + s32_t ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout); z_add_thread_timeout(thread, ticks); } @@ -975,9 +975,9 @@ s32_t z_impl_k_sleep(int ms) { s32_t ticks; - ticks = z_ms_to_ticks(ms); + ticks = k_ms_to_ticks_ceil32(ms); ticks = z_tick_sleep(ticks); - return __ticks_to_ms(ticks); + return k_ticks_to_ms_floor64(ticks); } #ifdef CONFIG_USERSPACE @@ -992,9 +992,9 @@ s32_t z_impl_k_usleep(int us) { s32_t ticks; - ticks = z_us_to_ticks(us); + ticks = k_us_to_ticks_ceil64(us); ticks = z_tick_sleep(ticks); - return __ticks_to_us(ticks); + return k_ticks_to_us_floor64(ticks); } #ifdef CONFIG_USERSPACE diff --git a/kernel/thread.c b/kernel/thread.c index f2ad1c953e..b163d48979 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -378,7 +378,7 @@ static void schedule_new_thread(struct k_thread *thread, s32_t delay) if (delay == 0) { k_thread_start(thread); } else { - s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(delay); + s32_t ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay); z_add_thread_timeout(thread, ticks); } diff --git a/kernel/timeout.c b/kernel/timeout.c index 9694653d69..262366742d 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -238,7 +238,7 @@ u32_t z_tick_get_32(void) s64_t z_impl_k_uptime_get(void) { - return __ticks_to_ms(z_tick_get()); + return k_ticks_to_ms_floor64(z_tick_get()); } #ifdef CONFIG_USERSPACE diff --git a/kernel/timer.c b/kernel/timer.c index f767d78526..47c56394e1 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -112,8 +112,8 @@ void z_impl_k_timer_start(struct k_timer *timer, s32_t duration, s32_t period) volatile s32_t period_in_ticks, duration_in_ticks; - period_in_ticks = z_ms_to_ticks(period); - duration_in_ticks = z_ms_to_ticks(duration); + period_in_ticks = k_ms_to_ticks_ceil32(period); + duration_in_ticks = k_ms_to_ticks_ceil32(duration); (void)z_abort_timeout(&timer->timeout); timer->period = period_in_ticks; diff --git a/kernel/work_q.c b/kernel/work_q.c index f050a437ab..1fb18f3ac8 100644 --- a/kernel/work_q.c +++ b/kernel/work_q.c @@ -108,7 +108,7 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q, /* Add timeout */ z_add_timeout(&work->timeout, work_timeout, - _TICK_ALIGN + z_ms_to_ticks(delay)); + _TICK_ALIGN + k_ms_to_ticks_ceil32(delay)); done: k_spin_unlock(&lock, key); diff --git a/lib/cmsis_rtos_v1/cmsis_signal.c b/lib/cmsis_rtos_v1/cmsis_signal.c index 02a262d33e..151e6d8cd3 100644 --- a/lib/cmsis_rtos_v1/cmsis_signal.c +++ b/lib/cmsis_rtos_v1/cmsis_signal.c @@ -142,7 +142,7 @@ osEvent osSignalWait(int32_t signals, uint32_t millisec) */ hwclk_cycles_delta = (u64_t)k_cycle_get_32() - time_stamp_start; time_delta_ns = - (u32_t)SYS_CLOCK_HW_CYCLES_TO_NS(hwclk_cycles_delta); + (u32_t)k_cyc_to_ns_floor64(hwclk_cycles_delta); time_delta_ms = (u32_t)time_delta_ns/NSEC_PER_MSEC; if (timeout > time_delta_ms) { diff --git a/lib/cmsis_rtos_v2/event_flags.c b/lib/cmsis_rtos_v2/event_flags.c index 93e751ed6c..20b99c810f 100644 --- a/lib/cmsis_rtos_v2/event_flags.c +++ b/lib/cmsis_rtos_v2/event_flags.c @@ -109,7 +109,7 @@ uint32_t osEventFlagsWait(osEventFlagsId_t ef_id, uint32_t flags, struct cv2_event_flags *events = (struct cv2_event_flags *)ef_id; int retval, key; u32_t sig; - u32_t time_delta_ms, timeout_ms = __ticks_to_ms(timeout); + u32_t time_delta_ms, timeout_ms = k_ticks_to_ms_floor64(timeout); u64_t time_stamp_start, hwclk_cycles_delta, time_delta_ns; /* Can be called from ISRs only if timeout is set to 0 */ @@ -172,7 +172,7 @@ uint32_t osEventFlagsWait(osEventFlagsId_t ef_id, uint32_t flags, (u64_t)k_cycle_get_32() - time_stamp_start; time_delta_ns = - (u32_t)SYS_CLOCK_HW_CYCLES_TO_NS(hwclk_cycles_delta); + (u32_t)k_cyc_to_ns_floor64(hwclk_cycles_delta); time_delta_ms = (u32_t)time_delta_ns / NSEC_PER_MSEC; diff --git a/lib/cmsis_rtos_v2/kernel.c b/lib/cmsis_rtos_v2/kernel.c index 06285911fc..e58bef5e7d 100644 --- a/lib/cmsis_rtos_v2/kernel.c +++ b/lib/cmsis_rtos_v2/kernel.c @@ -132,7 +132,7 @@ osStatus_t osDelay(uint32_t ticks) return osErrorISR; } - k_sleep(__ticks_to_ms(ticks)); + k_sleep(k_ticks_to_ms_floor64(ticks)); return osOK; } @@ -149,7 +149,7 @@ osStatus_t osDelayUntil(uint32_t ticks) } ticks_elapsed = osKernelGetTickCount(); - k_sleep(__ticks_to_ms(ticks - ticks_elapsed)); + k_sleep(k_ticks_to_ms_floor64(ticks - ticks_elapsed)); return osOK; } diff --git a/lib/cmsis_rtos_v2/mempool.c b/lib/cmsis_rtos_v2/mempool.c index 128b8068a6..1ff49ed115 100644 --- a/lib/cmsis_rtos_v2/mempool.c +++ b/lib/cmsis_rtos_v2/mempool.c @@ -109,7 +109,7 @@ void *osMemoryPoolAlloc(osMemoryPoolId_t mp_id, uint32_t timeout) } else { retval = k_mem_slab_alloc( (struct k_mem_slab *)(&mslab->z_mslab), - (void **)&ptr, __ticks_to_ms(timeout)); + (void **)&ptr, k_ticks_to_ms_floor64(timeout)); } if (retval == 0) { diff --git a/lib/cmsis_rtos_v2/msgq.c b/lib/cmsis_rtos_v2/msgq.c index d4f4de9f3e..930ec50e3d 100644 --- a/lib/cmsis_rtos_v2/msgq.c +++ b/lib/cmsis_rtos_v2/msgq.c @@ -104,7 +104,7 @@ osStatus_t osMessageQueuePut(osMessageQueueId_t msgq_id, const void *msg_ptr, retval = k_msgq_put(&msgq->z_msgq, (void *)msg_ptr, K_FOREVER); } else { retval = k_msgq_put(&msgq->z_msgq, (void *)msg_ptr, - __ticks_to_ms(timeout)); + k_ticks_to_ms_floor64(timeout)); } if (retval == 0) { @@ -142,7 +142,7 @@ osStatus_t osMessageQueueGet(osMessageQueueId_t msgq_id, void *msg_ptr, retval = k_msgq_get(&msgq->z_msgq, msg_ptr, K_FOREVER); } else { retval = k_msgq_get(&msgq->z_msgq, msg_ptr, - __ticks_to_ms(timeout)); + k_ticks_to_ms_floor64(timeout)); } if (retval == 0) { diff --git a/lib/cmsis_rtos_v2/mutex.c b/lib/cmsis_rtos_v2/mutex.c index 27c8ae1308..8127941f5d 100644 --- a/lib/cmsis_rtos_v2/mutex.c +++ b/lib/cmsis_rtos_v2/mutex.c @@ -94,7 +94,7 @@ osStatus_t osMutexAcquire(osMutexId_t mutex_id, uint32_t timeout) status = k_mutex_lock(&mutex->z_mutex, K_NO_WAIT); } else { status = k_mutex_lock(&mutex->z_mutex, - __ticks_to_ms(timeout)); + k_ticks_to_ms_floor64(timeout)); } if (status == -EBUSY) { diff --git a/lib/cmsis_rtos_v2/semaphore.c b/lib/cmsis_rtos_v2/semaphore.c index 938401fd4e..d77dea51e7 100644 --- a/lib/cmsis_rtos_v2/semaphore.c +++ b/lib/cmsis_rtos_v2/semaphore.c @@ -77,7 +77,7 @@ osStatus_t osSemaphoreAcquire(osSemaphoreId_t semaphore_id, uint32_t timeout) status = k_sem_take(&semaphore->z_semaphore, K_NO_WAIT); } else { status = k_sem_take(&semaphore->z_semaphore, - __ticks_to_ms(timeout)); + k_ticks_to_ms_floor64(timeout)); } if (status == -EBUSY) { diff --git a/lib/cmsis_rtos_v2/thread_flags.c b/lib/cmsis_rtos_v2/thread_flags.c index d7f769391d..ecb51caae4 100644 --- a/lib/cmsis_rtos_v2/thread_flags.c +++ b/lib/cmsis_rtos_v2/thread_flags.c @@ -89,7 +89,7 @@ uint32_t osThreadFlagsWait(uint32_t flags, uint32_t options, uint32_t timeout) struct cv2_thread *tid; int retval, key; u32_t sig; - u32_t time_delta_ms, timeout_ms = __ticks_to_ms(timeout); + u32_t time_delta_ms, timeout_ms = k_ticks_to_ms_floor64(timeout); u64_t time_stamp_start, hwclk_cycles_delta, time_delta_ns; if (k_is_in_isr()) { @@ -155,7 +155,7 @@ uint32_t osThreadFlagsWait(uint32_t flags, uint32_t options, uint32_t timeout) (u64_t)k_cycle_get_32() - time_stamp_start; time_delta_ns = - (u32_t)SYS_CLOCK_HW_CYCLES_TO_NS(hwclk_cycles_delta); + (u32_t)k_cyc_to_ns_floor64(hwclk_cycles_delta); time_delta_ms = (u32_t)time_delta_ns / NSEC_PER_MSEC; diff --git a/lib/cmsis_rtos_v2/timer.c b/lib/cmsis_rtos_v2/timer.c index 2508e3ec7c..e202618894 100644 --- a/lib/cmsis_rtos_v2/timer.c +++ b/lib/cmsis_rtos_v2/timer.c @@ -80,7 +80,7 @@ osTimerId_t osTimerNew(osTimerFunc_t func, osTimerType_t type, osStatus_t osTimerStart(osTimerId_t timer_id, uint32_t ticks) { struct cv2_timer *timer = (struct cv2_timer *)timer_id; - u32_t millisec = __ticks_to_ms(ticks); + u32_t millisec = k_ticks_to_ms_floor64(ticks); if (timer == NULL) { return osErrorParameter; diff --git a/samples/smp/pi/src/main.c b/samples/smp/pi/src/main.c index f100ae7818..eb92d66ae5 100644 --- a/samples/smp/pi/src/main.c +++ b/samples/smp/pi/src/main.c @@ -102,7 +102,7 @@ void main(void) stop_time = k_cycle_get_32(); cycles_spent = stop_time - start_time; - nanoseconds_spent = SYS_CLOCK_HW_CYCLES_TO_NS(cycles_spent); + nanoseconds_spent = (u32_t)k_cyc_to_ns_floor64(cycles_spent); for (i = 0; i < THREADS_NUM; i++) printk("Pi value calculated by thread #%d: %s\n", i, buffer[i]); diff --git a/subsys/bluetooth/shell/gatt.c b/subsys/bluetooth/shell/gatt.c index 0ca75a6118..f9682234bf 100644 --- a/subsys/bluetooth/shell/gatt.c +++ b/subsys/bluetooth/shell/gatt.c @@ -860,7 +860,7 @@ static ssize_t write_met(struct bt_conn *conn, const struct bt_gatt_attr *attr, memcpy(value + offset, buf, len); delta = k_cycle_get_32() - cycle_stamp; - delta = SYS_CLOCK_HW_CYCLES_TO_NS(delta); + delta = (u32_t)k_cyc_to_ns_floor64(delta); /* if last data rx-ed was greater than 1 second in the past, * reset the metrics. diff --git a/subsys/bluetooth/shell/l2cap.c b/subsys/bluetooth/shell/l2cap.c index 0a7a47a969..4132edbb36 100644 --- a/subsys/bluetooth/shell/l2cap.c +++ b/subsys/bluetooth/shell/l2cap.c @@ -62,7 +62,7 @@ static int l2cap_recv_metrics(struct bt_l2cap_chan *chan, struct net_buf *buf) u32_t delta; delta = k_cycle_get_32() - cycle_stamp; - delta = SYS_CLOCK_HW_CYCLES_TO_NS(delta); + delta = (u32_t)k_cyc_to_ns_floor64(delta); /* if last data rx-ed was greater than 1 second in the past, * reset the metrics. diff --git a/subsys/debug/tracing/cpu_stats.c b/subsys/debug/tracing/cpu_stats.c index c5c0306cf0..4ea14595da 100644 --- a/subsys/debug/tracing/cpu_stats.c +++ b/subsys/debug/tracing/cpu_stats.c @@ -62,10 +62,10 @@ void cpu_stats_get_ns(struct cpu_stats *cpu_stats_ns) int key = irq_lock(); cpu_stats_update_counters(); - cpu_stats_ns->idle = SYS_CLOCK_HW_CYCLES_TO_NS(stats_hw_tick.idle); - cpu_stats_ns->non_idle = SYS_CLOCK_HW_CYCLES_TO_NS( + cpu_stats_ns->idle = (u32_t)k_cyc_to_ns_floor64(stats_hw_tick.idle); + cpu_stats_ns->non_idle = (u32_t)k_cyc_to_ns_floor64( stats_hw_tick.non_idle); - cpu_stats_ns->sched = SYS_CLOCK_HW_CYCLES_TO_NS(stats_hw_tick.sched); + cpu_stats_ns->sched = (u32_t)k_cyc_to_ns_floor64(stats_hw_tick.sched); irq_unlock(key); } diff --git a/subsys/net/ip/net_shell.c b/subsys/net/ip/net_shell.c index b7781d37d5..bc3a065711 100644 --- a/subsys/net/ip/net_shell.c +++ b/subsys/net/ip/net_shell.c @@ -2887,9 +2887,9 @@ static enum net_verdict handle_ipv6_echo_reply(struct net_pkt *pkt, net_pkt_ieee802154_rssi(pkt), #endif #ifdef CONFIG_FLOAT - (SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000.f)); + ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000.f)); #else - (SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000)); + ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000)); #endif k_sem_give(&ping_timeout); @@ -3011,9 +3011,9 @@ static enum net_verdict handle_ipv4_echo_reply(struct net_pkt *pkt, ntohs(icmp_echo->sequence), ip_hdr->ttl, #ifdef CONFIG_FLOAT - (SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000.f)); + ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000.f)); #else - (SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000)); + ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000)); #endif k_sem_give(&ping_timeout); diff --git a/subsys/net/ip/net_stats.h b/subsys/net/ip/net_stats.h index 3684ec8004..aa54477a58 100644 --- a/subsys/net/ip/net_stats.h +++ b/subsys/net/ip/net_stats.h @@ -327,7 +327,7 @@ static inline void net_stats_update_tx_time(struct net_if *iface, u32_t diff = end_time - start_time; UPDATE_STAT(iface, stats.tx_time.sum += - SYS_CLOCK_HW_CYCLES_TO_NS64(diff) / NSEC_PER_USEC); + k_cyc_to_ns_floor64(diff) / 1000); UPDATE_STAT(iface, stats.tx_time.count += 1); } #else @@ -379,7 +379,7 @@ static inline void net_stats_update_tc_tx_time(struct net_if *iface, u32_t diff = end_time - start_time; UPDATE_STAT(iface, stats.tc.sent[tc].tx_time.sum += - SYS_CLOCK_HW_CYCLES_TO_NS64(diff) / NSEC_PER_USEC); + k_cyc_to_ns_floor64(diff) / 1000); UPDATE_STAT(iface, stats.tc.sent[tc].tx_time.count += 1); net_stats_update_tx_time(iface, start_time, end_time); diff --git a/subsys/net/l2/canbus/6locan.c b/subsys/net/l2/canbus/6locan.c index 4f4e424f69..f85dd8d25a 100644 --- a/subsys/net/l2/canbus/6locan.c +++ b/subsys/net/l2/canbus/6locan.c @@ -160,7 +160,7 @@ static s32_t canbus_stmin_to_ticks(u8_t stmin) time_ms = stmin; } - return z_ms_to_ticks(time_ms); + return k_ms_to_ticks_ceil32(time_ms); } static u16_t canbus_get_lladdr(struct net_linkaddr *net_lladdr) @@ -533,7 +533,7 @@ static enum net_verdict canbus_process_cf(struct net_pkt *pkt) } } else { z_add_timeout(&rx_ctx->timeout, canbus_rx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); if (NET_CAN_BS != 0 && !mcast) { rx_ctx->act_block_nr++; @@ -637,7 +637,7 @@ static enum net_verdict canbus_process_ff(struct net_pkt *pkt) /* At this point we expect to get Consecutive frames directly */ z_add_timeout(&rx_ctx->timeout, canbus_rx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); rx_ctx->state = NET_CAN_RX_STATE_CF; @@ -764,7 +764,7 @@ static void canbus_tx_work(struct net_pkt *pkt) ctx); ctx->state = NET_CAN_TX_STATE_WAIT_FC; z_add_timeout(&ctx->timeout, canbus_tx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); break; } else if (ctx->opts.stmin) { ctx->state = NET_CAN_TX_STATE_WAIT_ST; @@ -777,7 +777,7 @@ static void canbus_tx_work(struct net_pkt *pkt) case NET_CAN_TX_STATE_WAIT_ST: NET_DBG("SM wait ST. CTX: %p", ctx); z_add_timeout(&ctx->timeout, canbus_st_min_timeout, - z_ms_to_ticks(canbus_stmin_to_ticks(ctx->opts.stmin))); + k_ms_to_ticks_ceil32(canbus_stmin_to_ticks(ctx->opts.stmin))); ctx->state = NET_CAN_TX_STATE_SEND_CF; break; @@ -833,7 +833,7 @@ static enum net_verdict canbus_process_fc_data(struct canbus_isotp_tx_ctx *ctx, NET_DBG("Got WAIT frame. CTX: %p", ctx); z_abort_timeout(&ctx->timeout); z_add_timeout(&ctx->timeout, canbus_tx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); if (ctx->wft >= NET_CAN_WFTMAX) { NET_INFO("Got to many wait frames. CTX: %p", ctx); ctx->state = NET_CAN_TX_STATE_ERR; @@ -1023,12 +1023,12 @@ static int canbus_send_multiple_frames(struct net_pkt *pkt, size_t len, if (!mcast) { z_add_timeout(&tx_ctx->timeout, canbus_tx_timeout, - z_ms_to_ticks(NET_CAN_BS_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_BS_TIME)); tx_ctx->state = NET_CAN_TX_STATE_WAIT_FC; } else { tx_ctx->state = NET_CAN_TX_STATE_SEND_CF; z_add_timeout(&tx_ctx->timeout, canbus_start_sending_cf, - z_ms_to_ticks(NET_CAN_FF_CF_TIME)); + k_ms_to_ticks_ceil32(NET_CAN_FF_CF_TIME)); } return 0; diff --git a/subsys/net/l2/ppp/ppp_l2.c b/subsys/net/l2/ppp/ppp_l2.c index 4250db6f90..1f10ea562c 100644 --- a/subsys/net/l2/ppp/ppp_l2.c +++ b/subsys/net/l2/ppp/ppp_l2.c @@ -345,7 +345,7 @@ static void echo_reply_handler(void *user_data, size_t user_data_len) time_diff = abs(end_time - ctx->shell.echo_req_data); ctx->shell.echo_req_data = - SYS_CLOCK_HW_CYCLES_TO_NS64(time_diff) / 1000; + k_cyc_to_ns_floor64(time_diff) / 1000; k_sem_give(&ctx->shell.wait_echo_reply); } diff --git a/subsys/testsuite/include/timestamp.h b/subsys/testsuite/include/timestamp.h index d4e29a10be..7dbfc9d1f6 100644 --- a/subsys/testsuite/include/timestamp.h +++ b/subsys/testsuite/include/timestamp.h @@ -104,7 +104,7 @@ static inline int high_timer_overflow(void) /* Check if the time elapsed in msec is sufficient to trigger an * overflow of the high precision timer */ - if (tCheck >= (SYS_CLOCK_HW_CYCLES_TO_NS64(UINT_MAX) / + if (tCheck >= (k_cyc_to_ns_floor64(UINT_MAX) / (NSEC_PER_USEC * USEC_PER_MSEC))) { return -1; } diff --git a/tests/benchmarks/latency_measure/src/int_to_thread.c b/tests/benchmarks/latency_measure/src/int_to_thread.c index bad34a0bb8..b58b0dc09e 100644 --- a/tests/benchmarks/latency_measure/src/int_to_thread.c +++ b/tests/benchmarks/latency_measure/src/int_to_thread.c @@ -74,7 +74,7 @@ int int_to_thread(void) make_int(); if (flag_var == 1) { PRINT_FORMAT(" switching time is %u tcs = %u nsec", - timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); + timestamp, (u32_t)k_cyc_to_ns_floor64(timestamp)); } return 0; } diff --git a/tests/benchmarks/latency_measure/src/int_to_thread_evt.c b/tests/benchmarks/latency_measure/src/int_to_thread_evt.c index 1cc91c750b..96e041c2f3 100644 --- a/tests/benchmarks/latency_measure/src/int_to_thread_evt.c +++ b/tests/benchmarks/latency_measure/src/int_to_thread_evt.c @@ -92,6 +92,6 @@ int int_to_thread_evt(void) k_sem_take(&WORKSEMA, K_FOREVER); PRINT_FORMAT(" switch time is %u tcs = %u nsec", - timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); + timestamp, (u32_t)k_cyc_to_ns_floor64(timestamp)); return 0; } diff --git a/tests/benchmarks/latency_measure/src/utils.h b/tests/benchmarks/latency_measure/src/utils.h index c3e0814597..42802db3e6 100644 --- a/tests/benchmarks/latency_measure/src/utils.h +++ b/tests/benchmarks/latency_measure/src/utils.h @@ -65,7 +65,7 @@ static inline void print_dash_line(void) #define PRINT_TIME_BANNER() \ do { \ PRINT_FORMAT(" tcs = timer clock cycles: 1 tcs is %u nsec", \ - SYS_CLOCK_HW_CYCLES_TO_NS(1)); \ + (u32_t)k_cyc_to_ns_floor64(1)); \ print_dash_line(); \ } while (0) diff --git a/tests/benchmarks/mbedtls/src/benchmark.c b/tests/benchmarks/mbedtls/src/benchmark.c index 56a63bc7a3..360a95d248 100644 --- a/tests/benchmarks/mbedtls/src/benchmark.c +++ b/tests/benchmarks/mbedtls/src/benchmark.c @@ -174,7 +174,7 @@ do { \ } \ \ delta = k_cycle_get_32() - tsc; \ - delta = SYS_CLOCK_HW_CYCLES_TO_NS64(delta); \ + delta = k_cyc_to_ns_floor64(delta); \ \ mbedtls_printf("%9lu KiB/s, %9lu ns/byte\n", \ ii * BUFSIZE / 1024, \ diff --git a/tests/benchmarks/timing_info/src/timing_info.h b/tests/benchmarks/timing_info/src/timing_info.h index 1da40e08cf..17eb0d09ad 100644 --- a/tests/benchmarks/timing_info/src/timing_info.h +++ b/tests/benchmarks/timing_info/src/timing_info.h @@ -133,7 +133,7 @@ static inline void benchmark_timer_init(void) { } static inline void benchmark_timer_stop(void) { } static inline void benchmark_timer_start(void) { } -#define CYCLES_TO_NS(x) SYS_CLOCK_HW_CYCLES_TO_NS(x) +#define CYCLES_TO_NS(x) (u32_t)k_cyc_to_ns_floor64(x) /* Get Core Frequency in MHz */ static inline u32_t get_core_freq_MHz(void) diff --git a/tests/kernel/common/src/boot_delay.c b/tests/kernel/common/src/boot_delay.c index 2a62199186..3e47ab6a09 100644 --- a/tests/kernel/common/src/boot_delay.c +++ b/tests/kernel/common/src/boot_delay.c @@ -16,14 +16,13 @@ /** * @brief This module verifies the delay specified during boot. - * @see k_cycle_get_32, #SYS_CLOCK_HW_CYCLES_TO_NS64(X) */ void test_verify_bootdelay(void) { u32_t current_cycles = k_cycle_get_32(); /* compare this with the boot delay specified */ - zassert_true(SYS_CLOCK_HW_CYCLES_TO_NS64(current_cycles) >= + zassert_true(k_cyc_to_ns_floor64(current_cycles) >= (NSEC_PER_MSEC * CONFIG_BOOT_DELAY), "boot delay not executed"); } diff --git a/tests/kernel/common/src/clock.c b/tests/kernel/common/src/clock.c index 1101912dba..6e7dac943a 100644 --- a/tests/kernel/common/src/clock.c +++ b/tests/kernel/common/src/clock.c @@ -95,7 +95,7 @@ void test_clock_cycle(void) c32 = k_cycle_get_32(); /*break if cycle counter wrap around*/ while (k_cycle_get_32() > c32 && - k_cycle_get_32() < (c32 + sys_clock_hw_cycles_per_tick())) { + k_cycle_get_32() < (c32 + k_ticks_to_cyc_floor32(1))) { #if defined(CONFIG_ARCH_POSIX) k_busy_wait(50); #endif @@ -119,7 +119,7 @@ void test_clock_cycle(void) (sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC), NULL); /* delta NS should be greater than 1 milli-second */ - zassert_true(SYS_CLOCK_HW_CYCLES_TO_NS(c1 - c0) > + zassert_true((u32_t)k_cyc_to_ns_floor64(c1 - c0) > (NSEC_PER_SEC / MSEC_PER_SEC), NULL); } } diff --git a/tests/kernel/context/src/main.c b/tests/kernel/context/src/main.c index b35802f00d..31f865122c 100644 --- a/tests/kernel/context/src/main.c +++ b/tests/kernel/context/src/main.c @@ -260,7 +260,7 @@ static void _test_kernel_cpu_idle(int atomic) k_cpu_idle(); } /* calculating milliseconds per tick*/ - tms += __ticks_to_ms(1); + tms += k_ticks_to_ms_floor64(1); tms2 = k_uptime_get_32(); zassert_false(tms2 < tms, "Bad ms per tick value computed," "got %d which is less than %d\n", @@ -699,7 +699,7 @@ static void thread_sleep(void *delta, void *arg2, void *arg3) timestamp = k_uptime_get() - timestamp; TC_PRINT(" thread back from sleep\n"); - int slop = MAX(__ticks_to_ms(2), 1); + int slop = MAX(k_ticks_to_ms_floor64(2), 1); if (timestamp < timeout || timestamp > timeout + slop) { TC_ERROR("timestamp out of range, got %d\n", (int)timestamp); diff --git a/tests/kernel/early_sleep/src/main.c b/tests/kernel/early_sleep/src/main.c index 7db8befb77..7466259ee9 100644 --- a/tests/kernel/early_sleep/src/main.c +++ b/tests/kernel/early_sleep/src/main.c @@ -58,10 +58,10 @@ static int ticks_to_sleep(int ticks) u32_t stop_time; start_time = k_cycle_get_32(); - k_sleep(__ticks_to_ms(ticks)); + k_sleep(k_ticks_to_ms_floor64(ticks)); stop_time = k_cycle_get_32(); - return (stop_time - start_time) / sys_clock_hw_cycles_per_tick(); + return (stop_time - start_time) / k_ticks_to_cyc_floor32(1); } @@ -103,8 +103,8 @@ static void test_early_sleep(void) k_thread_priority_set(k_current_get(), 0); TC_PRINT("msec per tick: %lld.%03lld, ticks to sleep: %d\n", - __ticks_to_ms(1000) / 1000U, - __ticks_to_ms(1000) % 1000, + k_ticks_to_ms_floor64(1000) / 1000U, + k_ticks_to_ms_floor64(1000) % 1000, TEST_TICKS_TO_SLEEP); /* Create a lower priority thread */ diff --git a/tests/kernel/fifo/fifo_timeout/src/main.c b/tests/kernel/fifo/fifo_timeout/src/main.c index a6baec355f..644c3f8df3 100644 --- a/tests/kernel/fifo/fifo_timeout/src/main.c +++ b/tests/kernel/fifo/fifo_timeout/src/main.c @@ -101,7 +101,7 @@ static bool is_timeout_in_range(u32_t start_time, u32_t timeout) u32_t stop_time, diff; stop_time = k_cycle_get_32(); - diff = SYS_CLOCK_HW_CYCLES_TO_NS(stop_time - + diff = (u32_t)k_cyc_to_ns_floor64(stop_time - start_time) / NSEC_PER_USEC; diff = diff / USEC_PER_MSEC; return timeout <= diff; @@ -177,7 +177,7 @@ static int test_multiple_threads_pending(struct timeout_order_data *test_data, diff_ms = test_data[j].timeout - data->timeout; } - if (z_ms_to_ticks(diff_ms) == 1) { + if (k_ms_to_ticks_ceil32(diff_ms) == 1) { TC_PRINT( " thread (q order: %d, t/o: %d, fifo %p)\n", data->q_order, data->timeout, data->fifo); diff --git a/tests/kernel/lifo/lifo_usage/src/main.c b/tests/kernel/lifo/lifo_usage/src/main.c index a2c354dbfc..c88b124ac7 100644 --- a/tests/kernel/lifo/lifo_usage/src/main.c +++ b/tests/kernel/lifo/lifo_usage/src/main.c @@ -110,7 +110,7 @@ static bool is_timeout_in_range(u32_t start_time, u32_t timeout) u32_t stop_time, diff; stop_time = k_cycle_get_32(); - diff = SYS_CLOCK_HW_CYCLES_TO_NS(stop_time - + diff = (u32_t)k_cyc_to_ns_floor64(stop_time - start_time) / NSEC_PER_USEC; diff = diff / USEC_PER_MSEC; return timeout <= diff; diff --git a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c index bbfb8df78b..101694c39d 100644 --- a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c +++ b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c @@ -69,11 +69,13 @@ static void thread_time_slice(void *p1, void *p2, void *p3) * also expecting task switch below the switching tolerance. */ expected_slice_min = - (z_ms_to_ticks(SLICE_SIZE) - TASK_SWITCH_TOLERANCE) * - sys_clock_hw_cycles_per_tick(); + (k_ms_to_ticks_ceil32(SLICE_SIZE) + - TASK_SWITCH_TOLERANCE) + * k_ticks_to_cyc_floor32(1); expected_slice_max = - (z_ms_to_ticks(SLICE_SIZE) + TASK_SWITCH_TOLERANCE) * - sys_clock_hw_cycles_per_tick(); + (k_ms_to_ticks_ceil32(SLICE_SIZE) + + TASK_SWITCH_TOLERANCE) + * k_ticks_to_cyc_floor32(1); } #ifdef CONFIG_DEBUG diff --git a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c index 5042999115..0026e23bd4 100644 --- a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c +++ b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c @@ -41,8 +41,8 @@ static void thread_tslice(void *p1, void *p2, void *p3) int thread_parameter = (idx == (NUM_THREAD - 1)) ? '\n' : (idx + 'A'); - s64_t expected_slice_min = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE)); - s64_t expected_slice_max = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE) + 1); + s64_t expected_slice_min = k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(SLICE_SIZE)); + s64_t expected_slice_max = k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(SLICE_SIZE) + 1); /* Clumsy, but need to handle the precision loss with * submillisecond ticks. It's always possible to alias and diff --git a/tests/kernel/sleep/src/main.c b/tests/kernel/sleep/src/main.c index 09de55fa9c..ec52804921 100644 --- a/tests/kernel/sleep/src/main.c +++ b/tests/kernel/sleep/src/main.c @@ -22,7 +22,7 @@ #define ONE_SECOND (MSEC_PER_SEC) #define ONE_SECOND_ALIGNED \ - (u32_t)(__ticks_to_ms(z_ms_to_ticks(ONE_SECOND) + _TICK_ALIGN)) + (u32_t)(k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(ONE_SECOND) + _TICK_ALIGN)) static struct k_sem test_thread_sem; static struct k_sem helper_thread_sem; diff --git a/tests/kernel/tickless/tickless_concept/src/main.c b/tests/kernel/tickless/tickless_concept/src/main.c index 0bef530cf3..c0137ea213 100644 --- a/tests/kernel/tickless/tickless_concept/src/main.c +++ b/tests/kernel/tickless/tickless_concept/src/main.c @@ -16,16 +16,16 @@ static struct k_thread tdata[NUM_THREAD]; #define CONFIG_TICKLESS_IDLE_THRESH 20 #endif /*sleep duration tickless*/ -#define SLEEP_TICKLESS __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH) +#define SLEEP_TICKLESS k_ticks_to_ms_floor64(CONFIG_TICKLESS_IDLE_THRESH) /*sleep duration with tick*/ -#define SLEEP_TICKFUL __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH - 1) +#define SLEEP_TICKFUL k_ticks_to_ms_floor64(CONFIG_TICKLESS_IDLE_THRESH - 1) /*slice size is set as half of the sleep duration*/ -#define SLICE_SIZE __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH >> 1) +#define SLICE_SIZE k_ticks_to_ms_floor64(CONFIG_TICKLESS_IDLE_THRESH >> 1) /*maximum slice duration accepted by the test*/ -#define SLICE_SIZE_LIMIT __ticks_to_ms((CONFIG_TICKLESS_IDLE_THRESH >> 1) + 1) +#define SLICE_SIZE_LIMIT k_ticks_to_ms_floor64((CONFIG_TICKLESS_IDLE_THRESH >> 1) + 1) /*align to millisecond boundary*/ #if defined(CONFIG_ARCH_POSIX) diff --git a/tests/kernel/timer/timer_api/src/main.c b/tests/kernel/timer/timer_api/src/main.c index 2ffb81aad2..8f7f2ce393 100644 --- a/tests/kernel/timer/timer_api/src/main.c +++ b/tests/kernel/timer/timer_api/src/main.c @@ -269,10 +269,10 @@ void test_timer_periodicity(void) * Please note, that expected firing time is not the * one requested, as the kernel uses the ticks to manage * time. The actual perioid will be equal to [tick time] - * multiplied by z_ms_to_ticks(PERIOD). + * multiplied by k_ms_to_ticks_ceil32(PERIOD). */ TIMER_ASSERT(WITHIN_ERROR(delta, - __ticks_to_ms(z_ms_to_ticks(PERIOD)), 1), + k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(PERIOD)), 1), &periodicity_timer); } @@ -532,7 +532,7 @@ void test_timer_remaining_get(void) * the value obtained through k_timer_remaining_get() could be larger * than actual remaining time with maximum error equal to one tick. */ - zassert_true(remaining <= (DURATION / 2) + __ticks_to_ms(1), NULL); + zassert_true(remaining <= (DURATION / 2) + k_ticks_to_ms_floor64(1), NULL); } static void timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, diff --git a/tests/kernel/timer/timer_monotonic/src/main.c b/tests/kernel/timer/timer_monotonic/src/main.c index 393724da28..d674face25 100644 --- a/tests/kernel/timer/timer_monotonic/src/main.c +++ b/tests/kernel/timer/timer_monotonic/src/main.c @@ -54,8 +54,8 @@ void test_timer(void) errors = 0U; - TC_PRINT("sys_clock_hw_cycles_per_tick() = %d\n", - sys_clock_hw_cycles_per_tick()); + TC_PRINT("k_ticks_to_cyc_floor32(1) = %d\n", + k_ticks_to_cyc_floor32(1)); TC_PRINT("sys_clock_hw_cycles_per_sec() = %d\n", sys_clock_hw_cycles_per_sec()); diff --git a/tests/kernel/workq/work_queue/src/main.c b/tests/kernel/workq/work_queue/src/main.c index 412956caf0..af2cf59cc7 100644 --- a/tests/kernel/workq/work_queue/src/main.c +++ b/tests/kernel/workq/work_queue/src/main.c @@ -17,7 +17,7 @@ /* In fact, each work item could take up to this value */ #define WORK_ITEM_WAIT_ALIGNED \ - __ticks_to_ms(z_ms_to_ticks(WORK_ITEM_WAIT) + _TICK_ALIGN) + k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(WORK_ITEM_WAIT) + _TICK_ALIGN) /* * Wait 50ms between work submissions, to ensure co-op and prempt diff --git a/tests/kernel/workq/work_queue_api/src/main.c b/tests/kernel/workq/work_queue_api/src/main.c index b9d905fb66..c28f6ab2c5 100644 --- a/tests/kernel/workq/work_queue_api/src/main.c +++ b/tests/kernel/workq/work_queue_api/src/main.c @@ -141,10 +141,10 @@ static void tdelayed_work_submit(void *data) /**TESTPOINT: check remaining timeout after submit */ zassert_true( - time_remaining <= __ticks_to_ms(z_ms_to_ticks(TIMEOUT) + time_remaining <= k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(TIMEOUT) + _TICK_ALIGN) && - time_remaining >= __ticks_to_ms(z_ms_to_ticks(TIMEOUT) - - z_ms_to_ticks(15)), NULL); + time_remaining >= k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(TIMEOUT) - + k_ms_to_ticks_ceil32(15)), NULL); /**TESTPOINT: check pending after delayed work submit*/ zassert_true(k_work_pending((struct k_work *)&delayed_work[i]) == 0, NULL); diff --git a/tests/portability/cmsis_rtos_v1/src/kernel_apis.c b/tests/portability/cmsis_rtos_v1/src/kernel_apis.c index e4f77bc9e5..56cf9e45e1 100644 --- a/tests/portability/cmsis_rtos_v1/src/kernel_apis.c +++ b/tests/portability/cmsis_rtos_v1/src/kernel_apis.c @@ -45,7 +45,7 @@ void test_kernel_systick(void) k_busy_wait(WAIT_TIME_US); stop_time = osKernelSysTick(); - diff = SYS_CLOCK_HW_CYCLES_TO_NS(stop_time - + diff = (u32_t)k_cyc_to_ns_floor64(stop_time - start_time) / NSEC_PER_USEC; /* Check that it's within 1%. On some Zephyr platforms diff --git a/tests/portability/cmsis_rtos_v2/src/thread_apis.c b/tests/portability/cmsis_rtos_v2/src/thread_apis.c index 5ed478847e..2d7abd247b 100644 --- a/tests/portability/cmsis_rtos_v2/src/thread_apis.c +++ b/tests/portability/cmsis_rtos_v2/src/thread_apis.c @@ -252,7 +252,7 @@ void test_thread_prio(void) static void thread5(void *argument) { printk(" * Thread B started.\n"); - osDelay(z_ms_to_ticks(DELAY_MS)); + osDelay(k_ms_to_ticks_ceil32(DELAY_MS)); printk(" * Thread B joining...\n"); } @@ -317,13 +317,13 @@ void test_thread_detached(void) thread = osThreadNew(thread5, NULL, NULL); /* osThreadDetached */ zassert_not_null(thread, "Failed to create thread with osThreadNew!"); - osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELAY_MS - DELTA_MS)); status = osThreadJoin(thread); zassert_equal(status, osErrorResource, "Incorrect status returned from osThreadJoin!"); - osDelay(z_ms_to_ticks(DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELTA_MS)); } void thread6(void *argument) @@ -350,12 +350,12 @@ void test_thread_joinable_detach(void) tB = osThreadNew(thread6, tA, &attr); zassert_not_null(tB, "Failed to create thread with osThreadNew!"); - osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELAY_MS - DELTA_MS)); status = osThreadDetach(tA); zassert_equal(status, osOK, "osThreadDetach failed."); - osDelay(z_ms_to_ticks(DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELTA_MS)); } void test_thread_joinable_terminate(void) @@ -372,10 +372,10 @@ void test_thread_joinable_terminate(void) tB = osThreadNew(thread6, tA, &attr); zassert_not_null(tB, "Failed to create thread with osThreadNew!"); - osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELAY_MS - DELTA_MS)); status = osThreadTerminate(tA); zassert_equal(status, osOK, "osThreadTerminate failed."); - osDelay(z_ms_to_ticks(DELTA_MS)); + osDelay(k_ms_to_ticks_ceil32(DELTA_MS)); }