kernel/sys_clock.h: Deprecate and convert uses of old conversions

Mark the old time conversion APIs deprecated, leave compatibility
macros in place, and replace all usage with the new API.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2019-10-03 11:43:10 -07:00 committed by Carles Cufí
parent f2b75fd644
commit 8892406c1d
53 changed files with 122 additions and 114 deletions

View file

@ -33,7 +33,7 @@ u64_t z_tsc_read(void)
t = (u64_t)z_tick_get(); t = (u64_t)z_tick_get();
count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT); count = z_arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
irq_unlock(key); irq_unlock(key);
t *= (u64_t)sys_clock_hw_cycles_per_tick(); t *= k_ticks_to_cyc_floor64(1);
t += (u64_t)count; t += (u64_t)count;
return t; return t;
} }

View file

@ -129,7 +129,7 @@ between two points in time.
/* compute how long the work took (assumes no counter rollover) */ /* compute how long the work took (assumes no counter rollover) */
cycles_spent = stop_time - start_time; cycles_spent = stop_time - start_time;
nanoseconds_spent = SYS_CLOCK_HW_CYCLES_TO_NS(cycles_spent); nanoseconds_spent = (u32_t)k_cyc_to_ns_floor64(cycles_spent);
Suggested Uses Suggested Uses
************** **************

View file

@ -28,7 +28,7 @@ static void timer_irq_handler(void *unused)
read_timer_start_of_tick_handler(); read_timer_start_of_tick_handler();
#endif #endif
accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); accumulated_cycle_count += k_ticks_to_cyc_floor32(1);
/* Clear the interrupt */ /* Clear the interrupt */
alt_handle_irq((void *)TIMER_0_BASE, TIMER_0_IRQ); alt_handle_irq((void *)TIMER_0_BASE, TIMER_0_IRQ);
@ -46,15 +46,15 @@ int z_clock_driver_init(struct device *device)
ARG_UNUSED(device); ARG_UNUSED(device);
IOWR_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE, IOWR_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE,
sys_clock_hw_cycles_per_tick() & 0xFFFF); k_ticks_to_cyc_floor32(1) & 0xFFFF);
IOWR_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE, IOWR_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE,
(sys_clock_hw_cycles_per_tick() >> 16) & 0xFFFF); (k_ticks_to_cyc_floor32(1) >> 16) & 0xFFFF);
IRQ_CONNECT(TIMER_0_IRQ, 0, timer_irq_handler, NULL, 0); IRQ_CONNECT(TIMER_0_IRQ, 0, timer_irq_handler, NULL, 0);
irq_enable(TIMER_0_IRQ); irq_enable(TIMER_0_IRQ);
alt_avalon_timer_sc_init((void *)TIMER_0_BASE, 0, alt_avalon_timer_sc_init((void *)TIMER_0_BASE, 0,
TIMER_0_IRQ, sys_clock_hw_cycles_per_tick()); TIMER_0_IRQ, k_ticks_to_cyc_floor32(1));
return 0; return 0;
} }

View file

@ -30,7 +30,7 @@ static void litex_timer_irq_handler(void *device)
int key = irq_lock(); int key = irq_lock();
sys_write8(TIMER_EV, TIMER_EV_PENDING_ADDR); sys_write8(TIMER_EV, TIMER_EV_PENDING_ADDR);
accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); accumulated_cycle_count += k_ticks_to_cyc_floor32(1);
z_clock_announce(1); z_clock_announce(1);
irq_unlock(key); irq_unlock(key);
@ -57,9 +57,9 @@ int z_clock_driver_init(struct device *device)
sys_write8(TIMER_DISABLE, TIMER_EN_ADDR); sys_write8(TIMER_DISABLE, TIMER_EN_ADDR);
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
sys_write8(sys_clock_hw_cycles_per_tick() >> (24 - i * 8), sys_write8(k_ticks_to_cyc_floor32(1) >> (24 - i * 8),
TIMER_RELOAD_ADDR + i * 0x4); TIMER_RELOAD_ADDR + i * 0x4);
sys_write8(sys_clock_hw_cycles_per_tick() >> (24 - i * 8), sys_write8(k_ticks_to_cyc_floor32(1) >> (24 - i * 8),
TIMER_LOAD_ADDR + i * 0x4); TIMER_LOAD_ADDR + i * 0x4);
} }

View file

@ -574,7 +574,7 @@ int z_clock_driver_init(struct device *device)
/* determine the timer counter value (in timer clock cycles/system tick) /* determine the timer counter value (in timer clock cycles/system tick)
*/ */
cycles_per_tick = sys_clock_hw_cycles_per_tick(); cycles_per_tick = k_ticks_to_cyc_floor32(1);
tickless_idle_init(); tickless_idle_init();

View file

@ -111,7 +111,7 @@ void _timer_int_handler(void *unused)
u32_t regval; u32_t regval;
regval = sys_read32(TIMER_BASEADDR + XTTCPS_ISR_OFFSET); regval = sys_read32(TIMER_BASEADDR + XTTCPS_ISR_OFFSET);
accumulated_cycles += sys_clock_hw_cycles_per_tick(); accumulated_cycles += k_ticks_to_cyc_floor32(1);
z_clock_announce(_sys_idle_elapsed_ticks); z_clock_announce(_sys_idle_elapsed_ticks);
} }

View file

@ -1637,7 +1637,7 @@ __syscall u32_t k_timer_remaining_get(struct k_timer *timer);
static inline u32_t z_impl_k_timer_remaining_get(struct k_timer *timer) static inline u32_t z_impl_k_timer_remaining_get(struct k_timer *timer)
{ {
const s32_t ticks = z_timeout_remaining(&timer->timeout); const s32_t ticks = z_timeout_remaining(&timer->timeout);
return (ticks > 0) ? (u32_t)__ticks_to_ms(ticks) : 0U; return (ticks > 0) ? (u32_t)k_ticks_to_ms_floor64(ticks) : 0U;
} }
/** /**
@ -3077,7 +3077,7 @@ static inline int k_delayed_work_submit(struct k_delayed_work *work,
*/ */
static inline s32_t k_delayed_work_remaining_get(struct k_delayed_work *work) static inline s32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
{ {
return __ticks_to_ms(z_timeout_remaining(&work->timeout)); return k_ticks_to_ms_floor64(z_timeout_remaining(&work->timeout));
} }
/** /**

View file

@ -72,13 +72,20 @@ extern void z_enable_sys_clock(void);
#endif #endif
#define __ticks_to_ms(t) k_ticks_to_ms_floor64(t) #define __ticks_to_ms(t) __DEPRECATED_MACRO \
#define z_ms_to_ticks(t) k_ms_to_ticks_ceil32(t) k_ticks_to_ms_floor64(t)
#define __ticks_to_us(t) k_ticks_to_us_floor64(t) #define z_ms_to_ticks(t) \
#define z_us_to_ticks(t) k_us_to_ticks_ceil64(t) k_ms_to_ticks_ceil32(t)
#define sys_clock_hw_cycles_per_tick() k_ticks_to_cyc_floor32(1) #define __ticks_to_us(t) __DEPRECATED_MACRO \
#define SYS_CLOCK_HW_CYCLES_TO_NS64(t) (1000 * k_cyc_to_us_floor64(t)) k_ticks_to_us_floor64(t)
#define SYS_CLOCK_HW_CYCLES_TO_NS(t) ((u32_t)(1000 * k_cyc_to_us_floor64(t))) #define z_us_to_ticks(t) __DEPRECATED_MACRO \
k_us_to_ticks_ceil64(t)
#define sys_clock_hw_cycles_per_tick() __DEPRECATED_MACRO \
k_ticks_to_cyc_floor32(1)
#define SYS_CLOCK_HW_CYCLES_TO_NS64(t) __DEPRECATED_MACRO \
k_cyc_to_ns_floor64(t)
#define SYS_CLOCK_HW_CYCLES_TO_NS(t) __DEPRECATED_MACRO \
((u32_t)k_cyc_to_ns_floor64(t))
/* added tick needed to account for tick in progress */ /* added tick needed to account for tick in progress */
#define _TICK_ALIGN 1 #define _TICK_ALIGN 1
@ -88,7 +95,7 @@ extern void z_enable_sys_clock(void);
* and calculates the average cycle time * and calculates the average cycle time
*/ */
#define SYS_CLOCK_HW_CYCLES_TO_NS_AVG(X, NCYCLES) \ #define SYS_CLOCK_HW_CYCLES_TO_NS_AVG(X, NCYCLES) \
(u32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X) / NCYCLES) (u32_t)(k_cyc_to_ns_floor64(X) / NCYCLES)
/** /**
* @defgroup clock_apis Kernel Clock APIs * @defgroup clock_apis Kernel Clock APIs

View file

@ -638,7 +638,7 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q,
if (timeout != K_FOREVER) { if (timeout != K_FOREVER) {
z_add_timeout(&work->timeout, z_add_timeout(&work->timeout,
triggered_work_expiration_handler, triggered_work_expiration_handler,
z_ms_to_ticks(timeout)); k_ms_to_ticks_ceil32(timeout));
} }
/* From now, any event will result in submitted work. */ /* From now, any event will result in submitted work. */

View file

@ -257,7 +257,7 @@ void k_sched_time_slice_set(s32_t slice, int prio)
{ {
LOCKED(&sched_spinlock) { LOCKED(&sched_spinlock) {
_current_cpu->slice_ticks = 0; _current_cpu->slice_ticks = 0;
slice_time = z_ms_to_ticks(slice); slice_time = k_ms_to_ticks_ceil32(slice);
slice_max_prio = prio; slice_max_prio = prio;
z_reset_time_slice(); z_reset_time_slice();
} }
@ -368,7 +368,7 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
} }
if (timeout != K_FOREVER) { if (timeout != K_FOREVER) {
s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(timeout); s32_t ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout);
z_add_thread_timeout(thread, ticks); z_add_thread_timeout(thread, ticks);
} }
@ -975,9 +975,9 @@ s32_t z_impl_k_sleep(int ms)
{ {
s32_t ticks; s32_t ticks;
ticks = z_ms_to_ticks(ms); ticks = k_ms_to_ticks_ceil32(ms);
ticks = z_tick_sleep(ticks); ticks = z_tick_sleep(ticks);
return __ticks_to_ms(ticks); return k_ticks_to_ms_floor64(ticks);
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
@ -992,9 +992,9 @@ s32_t z_impl_k_usleep(int us)
{ {
s32_t ticks; s32_t ticks;
ticks = z_us_to_ticks(us); ticks = k_us_to_ticks_ceil64(us);
ticks = z_tick_sleep(ticks); ticks = z_tick_sleep(ticks);
return __ticks_to_us(ticks); return k_ticks_to_us_floor64(ticks);
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE

View file

@ -378,7 +378,7 @@ static void schedule_new_thread(struct k_thread *thread, s32_t delay)
if (delay == 0) { if (delay == 0) {
k_thread_start(thread); k_thread_start(thread);
} else { } else {
s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(delay); s32_t ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
z_add_thread_timeout(thread, ticks); z_add_thread_timeout(thread, ticks);
} }

View file

@ -238,7 +238,7 @@ u32_t z_tick_get_32(void)
s64_t z_impl_k_uptime_get(void) s64_t z_impl_k_uptime_get(void)
{ {
return __ticks_to_ms(z_tick_get()); return k_ticks_to_ms_floor64(z_tick_get());
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE

View file

@ -112,8 +112,8 @@ void z_impl_k_timer_start(struct k_timer *timer, s32_t duration, s32_t period)
volatile s32_t period_in_ticks, duration_in_ticks; volatile s32_t period_in_ticks, duration_in_ticks;
period_in_ticks = z_ms_to_ticks(period); period_in_ticks = k_ms_to_ticks_ceil32(period);
duration_in_ticks = z_ms_to_ticks(duration); duration_in_ticks = k_ms_to_ticks_ceil32(duration);
(void)z_abort_timeout(&timer->timeout); (void)z_abort_timeout(&timer->timeout);
timer->period = period_in_ticks; timer->period = period_in_ticks;

View file

@ -108,7 +108,7 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
/* Add timeout */ /* Add timeout */
z_add_timeout(&work->timeout, work_timeout, z_add_timeout(&work->timeout, work_timeout,
_TICK_ALIGN + z_ms_to_ticks(delay)); _TICK_ALIGN + k_ms_to_ticks_ceil32(delay));
done: done:
k_spin_unlock(&lock, key); k_spin_unlock(&lock, key);

View file

@ -142,7 +142,7 @@ osEvent osSignalWait(int32_t signals, uint32_t millisec)
*/ */
hwclk_cycles_delta = (u64_t)k_cycle_get_32() - time_stamp_start; hwclk_cycles_delta = (u64_t)k_cycle_get_32() - time_stamp_start;
time_delta_ns = time_delta_ns =
(u32_t)SYS_CLOCK_HW_CYCLES_TO_NS(hwclk_cycles_delta); (u32_t)k_cyc_to_ns_floor64(hwclk_cycles_delta);
time_delta_ms = (u32_t)time_delta_ns/NSEC_PER_MSEC; time_delta_ms = (u32_t)time_delta_ns/NSEC_PER_MSEC;
if (timeout > time_delta_ms) { if (timeout > time_delta_ms) {

View file

@ -109,7 +109,7 @@ uint32_t osEventFlagsWait(osEventFlagsId_t ef_id, uint32_t flags,
struct cv2_event_flags *events = (struct cv2_event_flags *)ef_id; struct cv2_event_flags *events = (struct cv2_event_flags *)ef_id;
int retval, key; int retval, key;
u32_t sig; u32_t sig;
u32_t time_delta_ms, timeout_ms = __ticks_to_ms(timeout); u32_t time_delta_ms, timeout_ms = k_ticks_to_ms_floor64(timeout);
u64_t time_stamp_start, hwclk_cycles_delta, time_delta_ns; u64_t time_stamp_start, hwclk_cycles_delta, time_delta_ns;
/* Can be called from ISRs only if timeout is set to 0 */ /* Can be called from ISRs only if timeout is set to 0 */
@ -172,7 +172,7 @@ uint32_t osEventFlagsWait(osEventFlagsId_t ef_id, uint32_t flags,
(u64_t)k_cycle_get_32() - time_stamp_start; (u64_t)k_cycle_get_32() - time_stamp_start;
time_delta_ns = time_delta_ns =
(u32_t)SYS_CLOCK_HW_CYCLES_TO_NS(hwclk_cycles_delta); (u32_t)k_cyc_to_ns_floor64(hwclk_cycles_delta);
time_delta_ms = (u32_t)time_delta_ns / NSEC_PER_MSEC; time_delta_ms = (u32_t)time_delta_ns / NSEC_PER_MSEC;

View file

@ -132,7 +132,7 @@ osStatus_t osDelay(uint32_t ticks)
return osErrorISR; return osErrorISR;
} }
k_sleep(__ticks_to_ms(ticks)); k_sleep(k_ticks_to_ms_floor64(ticks));
return osOK; return osOK;
} }
@ -149,7 +149,7 @@ osStatus_t osDelayUntil(uint32_t ticks)
} }
ticks_elapsed = osKernelGetTickCount(); ticks_elapsed = osKernelGetTickCount();
k_sleep(__ticks_to_ms(ticks - ticks_elapsed)); k_sleep(k_ticks_to_ms_floor64(ticks - ticks_elapsed));
return osOK; return osOK;
} }

View file

@ -109,7 +109,7 @@ void *osMemoryPoolAlloc(osMemoryPoolId_t mp_id, uint32_t timeout)
} else { } else {
retval = k_mem_slab_alloc( retval = k_mem_slab_alloc(
(struct k_mem_slab *)(&mslab->z_mslab), (struct k_mem_slab *)(&mslab->z_mslab),
(void **)&ptr, __ticks_to_ms(timeout)); (void **)&ptr, k_ticks_to_ms_floor64(timeout));
} }
if (retval == 0) { if (retval == 0) {

View file

@ -104,7 +104,7 @@ osStatus_t osMessageQueuePut(osMessageQueueId_t msgq_id, const void *msg_ptr,
retval = k_msgq_put(&msgq->z_msgq, (void *)msg_ptr, K_FOREVER); retval = k_msgq_put(&msgq->z_msgq, (void *)msg_ptr, K_FOREVER);
} else { } else {
retval = k_msgq_put(&msgq->z_msgq, (void *)msg_ptr, retval = k_msgq_put(&msgq->z_msgq, (void *)msg_ptr,
__ticks_to_ms(timeout)); k_ticks_to_ms_floor64(timeout));
} }
if (retval == 0) { if (retval == 0) {
@ -142,7 +142,7 @@ osStatus_t osMessageQueueGet(osMessageQueueId_t msgq_id, void *msg_ptr,
retval = k_msgq_get(&msgq->z_msgq, msg_ptr, K_FOREVER); retval = k_msgq_get(&msgq->z_msgq, msg_ptr, K_FOREVER);
} else { } else {
retval = k_msgq_get(&msgq->z_msgq, msg_ptr, retval = k_msgq_get(&msgq->z_msgq, msg_ptr,
__ticks_to_ms(timeout)); k_ticks_to_ms_floor64(timeout));
} }
if (retval == 0) { if (retval == 0) {

View file

@ -94,7 +94,7 @@ osStatus_t osMutexAcquire(osMutexId_t mutex_id, uint32_t timeout)
status = k_mutex_lock(&mutex->z_mutex, K_NO_WAIT); status = k_mutex_lock(&mutex->z_mutex, K_NO_WAIT);
} else { } else {
status = k_mutex_lock(&mutex->z_mutex, status = k_mutex_lock(&mutex->z_mutex,
__ticks_to_ms(timeout)); k_ticks_to_ms_floor64(timeout));
} }
if (status == -EBUSY) { if (status == -EBUSY) {

View file

@ -77,7 +77,7 @@ osStatus_t osSemaphoreAcquire(osSemaphoreId_t semaphore_id, uint32_t timeout)
status = k_sem_take(&semaphore->z_semaphore, K_NO_WAIT); status = k_sem_take(&semaphore->z_semaphore, K_NO_WAIT);
} else { } else {
status = k_sem_take(&semaphore->z_semaphore, status = k_sem_take(&semaphore->z_semaphore,
__ticks_to_ms(timeout)); k_ticks_to_ms_floor64(timeout));
} }
if (status == -EBUSY) { if (status == -EBUSY) {

View file

@ -89,7 +89,7 @@ uint32_t osThreadFlagsWait(uint32_t flags, uint32_t options, uint32_t timeout)
struct cv2_thread *tid; struct cv2_thread *tid;
int retval, key; int retval, key;
u32_t sig; u32_t sig;
u32_t time_delta_ms, timeout_ms = __ticks_to_ms(timeout); u32_t time_delta_ms, timeout_ms = k_ticks_to_ms_floor64(timeout);
u64_t time_stamp_start, hwclk_cycles_delta, time_delta_ns; u64_t time_stamp_start, hwclk_cycles_delta, time_delta_ns;
if (k_is_in_isr()) { if (k_is_in_isr()) {
@ -155,7 +155,7 @@ uint32_t osThreadFlagsWait(uint32_t flags, uint32_t options, uint32_t timeout)
(u64_t)k_cycle_get_32() - time_stamp_start; (u64_t)k_cycle_get_32() - time_stamp_start;
time_delta_ns = time_delta_ns =
(u32_t)SYS_CLOCK_HW_CYCLES_TO_NS(hwclk_cycles_delta); (u32_t)k_cyc_to_ns_floor64(hwclk_cycles_delta);
time_delta_ms = (u32_t)time_delta_ns / NSEC_PER_MSEC; time_delta_ms = (u32_t)time_delta_ns / NSEC_PER_MSEC;

View file

@ -80,7 +80,7 @@ osTimerId_t osTimerNew(osTimerFunc_t func, osTimerType_t type,
osStatus_t osTimerStart(osTimerId_t timer_id, uint32_t ticks) osStatus_t osTimerStart(osTimerId_t timer_id, uint32_t ticks)
{ {
struct cv2_timer *timer = (struct cv2_timer *)timer_id; struct cv2_timer *timer = (struct cv2_timer *)timer_id;
u32_t millisec = __ticks_to_ms(ticks); u32_t millisec = k_ticks_to_ms_floor64(ticks);
if (timer == NULL) { if (timer == NULL) {
return osErrorParameter; return osErrorParameter;

View file

@ -102,7 +102,7 @@ void main(void)
stop_time = k_cycle_get_32(); stop_time = k_cycle_get_32();
cycles_spent = stop_time - start_time; cycles_spent = stop_time - start_time;
nanoseconds_spent = SYS_CLOCK_HW_CYCLES_TO_NS(cycles_spent); nanoseconds_spent = (u32_t)k_cyc_to_ns_floor64(cycles_spent);
for (i = 0; i < THREADS_NUM; i++) for (i = 0; i < THREADS_NUM; i++)
printk("Pi value calculated by thread #%d: %s\n", i, buffer[i]); printk("Pi value calculated by thread #%d: %s\n", i, buffer[i]);

View file

@ -860,7 +860,7 @@ static ssize_t write_met(struct bt_conn *conn, const struct bt_gatt_attr *attr,
memcpy(value + offset, buf, len); memcpy(value + offset, buf, len);
delta = k_cycle_get_32() - cycle_stamp; delta = k_cycle_get_32() - cycle_stamp;
delta = SYS_CLOCK_HW_CYCLES_TO_NS(delta); delta = (u32_t)k_cyc_to_ns_floor64(delta);
/* if last data rx-ed was greater than 1 second in the past, /* if last data rx-ed was greater than 1 second in the past,
* reset the metrics. * reset the metrics.

View file

@ -62,7 +62,7 @@ static int l2cap_recv_metrics(struct bt_l2cap_chan *chan, struct net_buf *buf)
u32_t delta; u32_t delta;
delta = k_cycle_get_32() - cycle_stamp; delta = k_cycle_get_32() - cycle_stamp;
delta = SYS_CLOCK_HW_CYCLES_TO_NS(delta); delta = (u32_t)k_cyc_to_ns_floor64(delta);
/* if last data rx-ed was greater than 1 second in the past, /* if last data rx-ed was greater than 1 second in the past,
* reset the metrics. * reset the metrics.

View file

@ -62,10 +62,10 @@ void cpu_stats_get_ns(struct cpu_stats *cpu_stats_ns)
int key = irq_lock(); int key = irq_lock();
cpu_stats_update_counters(); cpu_stats_update_counters();
cpu_stats_ns->idle = SYS_CLOCK_HW_CYCLES_TO_NS(stats_hw_tick.idle); cpu_stats_ns->idle = (u32_t)k_cyc_to_ns_floor64(stats_hw_tick.idle);
cpu_stats_ns->non_idle = SYS_CLOCK_HW_CYCLES_TO_NS( cpu_stats_ns->non_idle = (u32_t)k_cyc_to_ns_floor64(
stats_hw_tick.non_idle); stats_hw_tick.non_idle);
cpu_stats_ns->sched = SYS_CLOCK_HW_CYCLES_TO_NS(stats_hw_tick.sched); cpu_stats_ns->sched = (u32_t)k_cyc_to_ns_floor64(stats_hw_tick.sched);
irq_unlock(key); irq_unlock(key);
} }

View file

@ -2887,9 +2887,9 @@ static enum net_verdict handle_ipv6_echo_reply(struct net_pkt *pkt,
net_pkt_ieee802154_rssi(pkt), net_pkt_ieee802154_rssi(pkt),
#endif #endif
#ifdef CONFIG_FLOAT #ifdef CONFIG_FLOAT
(SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000.f)); ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000.f));
#else #else
(SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000)); ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000));
#endif #endif
k_sem_give(&ping_timeout); k_sem_give(&ping_timeout);
@ -3011,9 +3011,9 @@ static enum net_verdict handle_ipv4_echo_reply(struct net_pkt *pkt,
ntohs(icmp_echo->sequence), ntohs(icmp_echo->sequence),
ip_hdr->ttl, ip_hdr->ttl,
#ifdef CONFIG_FLOAT #ifdef CONFIG_FLOAT
(SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000.f)); ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000.f));
#else #else
(SYS_CLOCK_HW_CYCLES_TO_NS(cycles) / 1000000)); ((u32_t)k_cyc_to_ns_floor64(cycles) / 1000000));
#endif #endif
k_sem_give(&ping_timeout); k_sem_give(&ping_timeout);

View file

@ -327,7 +327,7 @@ static inline void net_stats_update_tx_time(struct net_if *iface,
u32_t diff = end_time - start_time; u32_t diff = end_time - start_time;
UPDATE_STAT(iface, stats.tx_time.sum += UPDATE_STAT(iface, stats.tx_time.sum +=
SYS_CLOCK_HW_CYCLES_TO_NS64(diff) / NSEC_PER_USEC); k_cyc_to_ns_floor64(diff) / 1000);
UPDATE_STAT(iface, stats.tx_time.count += 1); UPDATE_STAT(iface, stats.tx_time.count += 1);
} }
#else #else
@ -379,7 +379,7 @@ static inline void net_stats_update_tc_tx_time(struct net_if *iface,
u32_t diff = end_time - start_time; u32_t diff = end_time - start_time;
UPDATE_STAT(iface, stats.tc.sent[tc].tx_time.sum += UPDATE_STAT(iface, stats.tc.sent[tc].tx_time.sum +=
SYS_CLOCK_HW_CYCLES_TO_NS64(diff) / NSEC_PER_USEC); k_cyc_to_ns_floor64(diff) / 1000);
UPDATE_STAT(iface, stats.tc.sent[tc].tx_time.count += 1); UPDATE_STAT(iface, stats.tc.sent[tc].tx_time.count += 1);
net_stats_update_tx_time(iface, start_time, end_time); net_stats_update_tx_time(iface, start_time, end_time);

View file

@ -160,7 +160,7 @@ static s32_t canbus_stmin_to_ticks(u8_t stmin)
time_ms = stmin; time_ms = stmin;
} }
return z_ms_to_ticks(time_ms); return k_ms_to_ticks_ceil32(time_ms);
} }
static u16_t canbus_get_lladdr(struct net_linkaddr *net_lladdr) static u16_t canbus_get_lladdr(struct net_linkaddr *net_lladdr)
@ -533,7 +533,7 @@ static enum net_verdict canbus_process_cf(struct net_pkt *pkt)
} }
} else { } else {
z_add_timeout(&rx_ctx->timeout, canbus_rx_timeout, z_add_timeout(&rx_ctx->timeout, canbus_rx_timeout,
z_ms_to_ticks(NET_CAN_BS_TIME)); k_ms_to_ticks_ceil32(NET_CAN_BS_TIME));
if (NET_CAN_BS != 0 && !mcast) { if (NET_CAN_BS != 0 && !mcast) {
rx_ctx->act_block_nr++; rx_ctx->act_block_nr++;
@ -637,7 +637,7 @@ static enum net_verdict canbus_process_ff(struct net_pkt *pkt)
/* At this point we expect to get Consecutive frames directly */ /* At this point we expect to get Consecutive frames directly */
z_add_timeout(&rx_ctx->timeout, canbus_rx_timeout, z_add_timeout(&rx_ctx->timeout, canbus_rx_timeout,
z_ms_to_ticks(NET_CAN_BS_TIME)); k_ms_to_ticks_ceil32(NET_CAN_BS_TIME));
rx_ctx->state = NET_CAN_RX_STATE_CF; rx_ctx->state = NET_CAN_RX_STATE_CF;
@ -764,7 +764,7 @@ static void canbus_tx_work(struct net_pkt *pkt)
ctx); ctx);
ctx->state = NET_CAN_TX_STATE_WAIT_FC; ctx->state = NET_CAN_TX_STATE_WAIT_FC;
z_add_timeout(&ctx->timeout, canbus_tx_timeout, z_add_timeout(&ctx->timeout, canbus_tx_timeout,
z_ms_to_ticks(NET_CAN_BS_TIME)); k_ms_to_ticks_ceil32(NET_CAN_BS_TIME));
break; break;
} else if (ctx->opts.stmin) { } else if (ctx->opts.stmin) {
ctx->state = NET_CAN_TX_STATE_WAIT_ST; ctx->state = NET_CAN_TX_STATE_WAIT_ST;
@ -777,7 +777,7 @@ static void canbus_tx_work(struct net_pkt *pkt)
case NET_CAN_TX_STATE_WAIT_ST: case NET_CAN_TX_STATE_WAIT_ST:
NET_DBG("SM wait ST. CTX: %p", ctx); NET_DBG("SM wait ST. CTX: %p", ctx);
z_add_timeout(&ctx->timeout, canbus_st_min_timeout, z_add_timeout(&ctx->timeout, canbus_st_min_timeout,
z_ms_to_ticks(canbus_stmin_to_ticks(ctx->opts.stmin))); k_ms_to_ticks_ceil32(canbus_stmin_to_ticks(ctx->opts.stmin)));
ctx->state = NET_CAN_TX_STATE_SEND_CF; ctx->state = NET_CAN_TX_STATE_SEND_CF;
break; break;
@ -833,7 +833,7 @@ static enum net_verdict canbus_process_fc_data(struct canbus_isotp_tx_ctx *ctx,
NET_DBG("Got WAIT frame. CTX: %p", ctx); NET_DBG("Got WAIT frame. CTX: %p", ctx);
z_abort_timeout(&ctx->timeout); z_abort_timeout(&ctx->timeout);
z_add_timeout(&ctx->timeout, canbus_tx_timeout, z_add_timeout(&ctx->timeout, canbus_tx_timeout,
z_ms_to_ticks(NET_CAN_BS_TIME)); k_ms_to_ticks_ceil32(NET_CAN_BS_TIME));
if (ctx->wft >= NET_CAN_WFTMAX) { if (ctx->wft >= NET_CAN_WFTMAX) {
NET_INFO("Got to many wait frames. CTX: %p", ctx); NET_INFO("Got to many wait frames. CTX: %p", ctx);
ctx->state = NET_CAN_TX_STATE_ERR; ctx->state = NET_CAN_TX_STATE_ERR;
@ -1023,12 +1023,12 @@ static int canbus_send_multiple_frames(struct net_pkt *pkt, size_t len,
if (!mcast) { if (!mcast) {
z_add_timeout(&tx_ctx->timeout, canbus_tx_timeout, z_add_timeout(&tx_ctx->timeout, canbus_tx_timeout,
z_ms_to_ticks(NET_CAN_BS_TIME)); k_ms_to_ticks_ceil32(NET_CAN_BS_TIME));
tx_ctx->state = NET_CAN_TX_STATE_WAIT_FC; tx_ctx->state = NET_CAN_TX_STATE_WAIT_FC;
} else { } else {
tx_ctx->state = NET_CAN_TX_STATE_SEND_CF; tx_ctx->state = NET_CAN_TX_STATE_SEND_CF;
z_add_timeout(&tx_ctx->timeout, canbus_start_sending_cf, z_add_timeout(&tx_ctx->timeout, canbus_start_sending_cf,
z_ms_to_ticks(NET_CAN_FF_CF_TIME)); k_ms_to_ticks_ceil32(NET_CAN_FF_CF_TIME));
} }
return 0; return 0;

View file

@ -345,7 +345,7 @@ static void echo_reply_handler(void *user_data, size_t user_data_len)
time_diff = abs(end_time - ctx->shell.echo_req_data); time_diff = abs(end_time - ctx->shell.echo_req_data);
ctx->shell.echo_req_data = ctx->shell.echo_req_data =
SYS_CLOCK_HW_CYCLES_TO_NS64(time_diff) / 1000; k_cyc_to_ns_floor64(time_diff) / 1000;
k_sem_give(&ctx->shell.wait_echo_reply); k_sem_give(&ctx->shell.wait_echo_reply);
} }

View file

@ -104,7 +104,7 @@ static inline int high_timer_overflow(void)
/* Check if the time elapsed in msec is sufficient to trigger an /* Check if the time elapsed in msec is sufficient to trigger an
* overflow of the high precision timer * overflow of the high precision timer
*/ */
if (tCheck >= (SYS_CLOCK_HW_CYCLES_TO_NS64(UINT_MAX) / if (tCheck >= (k_cyc_to_ns_floor64(UINT_MAX) /
(NSEC_PER_USEC * USEC_PER_MSEC))) { (NSEC_PER_USEC * USEC_PER_MSEC))) {
return -1; return -1;
} }

View file

@ -74,7 +74,7 @@ int int_to_thread(void)
make_int(); make_int();
if (flag_var == 1) { if (flag_var == 1) {
PRINT_FORMAT(" switching time is %u tcs = %u nsec", PRINT_FORMAT(" switching time is %u tcs = %u nsec",
timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); timestamp, (u32_t)k_cyc_to_ns_floor64(timestamp));
} }
return 0; return 0;
} }

View file

@ -92,6 +92,6 @@ int int_to_thread_evt(void)
k_sem_take(&WORKSEMA, K_FOREVER); k_sem_take(&WORKSEMA, K_FOREVER);
PRINT_FORMAT(" switch time is %u tcs = %u nsec", PRINT_FORMAT(" switch time is %u tcs = %u nsec",
timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); timestamp, (u32_t)k_cyc_to_ns_floor64(timestamp));
return 0; return 0;
} }

View file

@ -65,7 +65,7 @@ static inline void print_dash_line(void)
#define PRINT_TIME_BANNER() \ #define PRINT_TIME_BANNER() \
do { \ do { \
PRINT_FORMAT(" tcs = timer clock cycles: 1 tcs is %u nsec", \ PRINT_FORMAT(" tcs = timer clock cycles: 1 tcs is %u nsec", \
SYS_CLOCK_HW_CYCLES_TO_NS(1)); \ (u32_t)k_cyc_to_ns_floor64(1)); \
print_dash_line(); \ print_dash_line(); \
} while (0) } while (0)

View file

@ -174,7 +174,7 @@ do { \
} \ } \
\ \
delta = k_cycle_get_32() - tsc; \ delta = k_cycle_get_32() - tsc; \
delta = SYS_CLOCK_HW_CYCLES_TO_NS64(delta); \ delta = k_cyc_to_ns_floor64(delta); \
\ \
mbedtls_printf("%9lu KiB/s, %9lu ns/byte\n", \ mbedtls_printf("%9lu KiB/s, %9lu ns/byte\n", \
ii * BUFSIZE / 1024, \ ii * BUFSIZE / 1024, \

View file

@ -133,7 +133,7 @@ static inline void benchmark_timer_init(void) { }
static inline void benchmark_timer_stop(void) { } static inline void benchmark_timer_stop(void) { }
static inline void benchmark_timer_start(void) { } static inline void benchmark_timer_start(void) { }
#define CYCLES_TO_NS(x) SYS_CLOCK_HW_CYCLES_TO_NS(x) #define CYCLES_TO_NS(x) (u32_t)k_cyc_to_ns_floor64(x)
/* Get Core Frequency in MHz */ /* Get Core Frequency in MHz */
static inline u32_t get_core_freq_MHz(void) static inline u32_t get_core_freq_MHz(void)

View file

@ -16,14 +16,13 @@
/** /**
* @brief This module verifies the delay specified during boot. * @brief This module verifies the delay specified during boot.
* @see k_cycle_get_32, #SYS_CLOCK_HW_CYCLES_TO_NS64(X)
*/ */
void test_verify_bootdelay(void) void test_verify_bootdelay(void)
{ {
u32_t current_cycles = k_cycle_get_32(); u32_t current_cycles = k_cycle_get_32();
/* compare this with the boot delay specified */ /* compare this with the boot delay specified */
zassert_true(SYS_CLOCK_HW_CYCLES_TO_NS64(current_cycles) >= zassert_true(k_cyc_to_ns_floor64(current_cycles) >=
(NSEC_PER_MSEC * CONFIG_BOOT_DELAY), (NSEC_PER_MSEC * CONFIG_BOOT_DELAY),
"boot delay not executed"); "boot delay not executed");
} }

View file

@ -95,7 +95,7 @@ void test_clock_cycle(void)
c32 = k_cycle_get_32(); c32 = k_cycle_get_32();
/*break if cycle counter wrap around*/ /*break if cycle counter wrap around*/
while (k_cycle_get_32() > c32 && while (k_cycle_get_32() > c32 &&
k_cycle_get_32() < (c32 + sys_clock_hw_cycles_per_tick())) { k_cycle_get_32() < (c32 + k_ticks_to_cyc_floor32(1))) {
#if defined(CONFIG_ARCH_POSIX) #if defined(CONFIG_ARCH_POSIX)
k_busy_wait(50); k_busy_wait(50);
#endif #endif
@ -119,7 +119,7 @@ void test_clock_cycle(void)
(sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC), (sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC),
NULL); NULL);
/* delta NS should be greater than 1 milli-second */ /* delta NS should be greater than 1 milli-second */
zassert_true(SYS_CLOCK_HW_CYCLES_TO_NS(c1 - c0) > zassert_true((u32_t)k_cyc_to_ns_floor64(c1 - c0) >
(NSEC_PER_SEC / MSEC_PER_SEC), NULL); (NSEC_PER_SEC / MSEC_PER_SEC), NULL);
} }
} }

View file

@ -260,7 +260,7 @@ static void _test_kernel_cpu_idle(int atomic)
k_cpu_idle(); k_cpu_idle();
} }
/* calculating milliseconds per tick*/ /* calculating milliseconds per tick*/
tms += __ticks_to_ms(1); tms += k_ticks_to_ms_floor64(1);
tms2 = k_uptime_get_32(); tms2 = k_uptime_get_32();
zassert_false(tms2 < tms, "Bad ms per tick value computed," zassert_false(tms2 < tms, "Bad ms per tick value computed,"
"got %d which is less than %d\n", "got %d which is less than %d\n",
@ -699,7 +699,7 @@ static void thread_sleep(void *delta, void *arg2, void *arg3)
timestamp = k_uptime_get() - timestamp; timestamp = k_uptime_get() - timestamp;
TC_PRINT(" thread back from sleep\n"); TC_PRINT(" thread back from sleep\n");
int slop = MAX(__ticks_to_ms(2), 1); int slop = MAX(k_ticks_to_ms_floor64(2), 1);
if (timestamp < timeout || timestamp > timeout + slop) { if (timestamp < timeout || timestamp > timeout + slop) {
TC_ERROR("timestamp out of range, got %d\n", (int)timestamp); TC_ERROR("timestamp out of range, got %d\n", (int)timestamp);

View file

@ -58,10 +58,10 @@ static int ticks_to_sleep(int ticks)
u32_t stop_time; u32_t stop_time;
start_time = k_cycle_get_32(); start_time = k_cycle_get_32();
k_sleep(__ticks_to_ms(ticks)); k_sleep(k_ticks_to_ms_floor64(ticks));
stop_time = k_cycle_get_32(); stop_time = k_cycle_get_32();
return (stop_time - start_time) / sys_clock_hw_cycles_per_tick(); return (stop_time - start_time) / k_ticks_to_cyc_floor32(1);
} }
@ -103,8 +103,8 @@ static void test_early_sleep(void)
k_thread_priority_set(k_current_get(), 0); k_thread_priority_set(k_current_get(), 0);
TC_PRINT("msec per tick: %lld.%03lld, ticks to sleep: %d\n", TC_PRINT("msec per tick: %lld.%03lld, ticks to sleep: %d\n",
__ticks_to_ms(1000) / 1000U, k_ticks_to_ms_floor64(1000) / 1000U,
__ticks_to_ms(1000) % 1000, k_ticks_to_ms_floor64(1000) % 1000,
TEST_TICKS_TO_SLEEP); TEST_TICKS_TO_SLEEP);
/* Create a lower priority thread */ /* Create a lower priority thread */

View file

@ -101,7 +101,7 @@ static bool is_timeout_in_range(u32_t start_time, u32_t timeout)
u32_t stop_time, diff; u32_t stop_time, diff;
stop_time = k_cycle_get_32(); stop_time = k_cycle_get_32();
diff = SYS_CLOCK_HW_CYCLES_TO_NS(stop_time - diff = (u32_t)k_cyc_to_ns_floor64(stop_time -
start_time) / NSEC_PER_USEC; start_time) / NSEC_PER_USEC;
diff = diff / USEC_PER_MSEC; diff = diff / USEC_PER_MSEC;
return timeout <= diff; return timeout <= diff;
@ -177,7 +177,7 @@ static int test_multiple_threads_pending(struct timeout_order_data *test_data,
diff_ms = test_data[j].timeout - data->timeout; diff_ms = test_data[j].timeout - data->timeout;
} }
if (z_ms_to_ticks(diff_ms) == 1) { if (k_ms_to_ticks_ceil32(diff_ms) == 1) {
TC_PRINT( TC_PRINT(
" thread (q order: %d, t/o: %d, fifo %p)\n", " thread (q order: %d, t/o: %d, fifo %p)\n",
data->q_order, data->timeout, data->fifo); data->q_order, data->timeout, data->fifo);

View file

@ -110,7 +110,7 @@ static bool is_timeout_in_range(u32_t start_time, u32_t timeout)
u32_t stop_time, diff; u32_t stop_time, diff;
stop_time = k_cycle_get_32(); stop_time = k_cycle_get_32();
diff = SYS_CLOCK_HW_CYCLES_TO_NS(stop_time - diff = (u32_t)k_cyc_to_ns_floor64(stop_time -
start_time) / NSEC_PER_USEC; start_time) / NSEC_PER_USEC;
diff = diff / USEC_PER_MSEC; diff = diff / USEC_PER_MSEC;
return timeout <= diff; return timeout <= diff;

View file

@ -69,11 +69,13 @@ static void thread_time_slice(void *p1, void *p2, void *p3)
* also expecting task switch below the switching tolerance. * also expecting task switch below the switching tolerance.
*/ */
expected_slice_min = expected_slice_min =
(z_ms_to_ticks(SLICE_SIZE) - TASK_SWITCH_TOLERANCE) * (k_ms_to_ticks_ceil32(SLICE_SIZE)
sys_clock_hw_cycles_per_tick(); - TASK_SWITCH_TOLERANCE)
* k_ticks_to_cyc_floor32(1);
expected_slice_max = expected_slice_max =
(z_ms_to_ticks(SLICE_SIZE) + TASK_SWITCH_TOLERANCE) * (k_ms_to_ticks_ceil32(SLICE_SIZE)
sys_clock_hw_cycles_per_tick(); + TASK_SWITCH_TOLERANCE)
* k_ticks_to_cyc_floor32(1);
} }
#ifdef CONFIG_DEBUG #ifdef CONFIG_DEBUG

View file

@ -41,8 +41,8 @@ static void thread_tslice(void *p1, void *p2, void *p3)
int thread_parameter = (idx == (NUM_THREAD - 1)) ? '\n' : int thread_parameter = (idx == (NUM_THREAD - 1)) ? '\n' :
(idx + 'A'); (idx + 'A');
s64_t expected_slice_min = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE)); s64_t expected_slice_min = k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(SLICE_SIZE));
s64_t expected_slice_max = __ticks_to_ms(z_ms_to_ticks(SLICE_SIZE) + 1); s64_t expected_slice_max = k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(SLICE_SIZE) + 1);
/* Clumsy, but need to handle the precision loss with /* Clumsy, but need to handle the precision loss with
* submillisecond ticks. It's always possible to alias and * submillisecond ticks. It's always possible to alias and

View file

@ -22,7 +22,7 @@
#define ONE_SECOND (MSEC_PER_SEC) #define ONE_SECOND (MSEC_PER_SEC)
#define ONE_SECOND_ALIGNED \ #define ONE_SECOND_ALIGNED \
(u32_t)(__ticks_to_ms(z_ms_to_ticks(ONE_SECOND) + _TICK_ALIGN)) (u32_t)(k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(ONE_SECOND) + _TICK_ALIGN))
static struct k_sem test_thread_sem; static struct k_sem test_thread_sem;
static struct k_sem helper_thread_sem; static struct k_sem helper_thread_sem;

View file

@ -16,16 +16,16 @@ static struct k_thread tdata[NUM_THREAD];
#define CONFIG_TICKLESS_IDLE_THRESH 20 #define CONFIG_TICKLESS_IDLE_THRESH 20
#endif #endif
/*sleep duration tickless*/ /*sleep duration tickless*/
#define SLEEP_TICKLESS __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH) #define SLEEP_TICKLESS k_ticks_to_ms_floor64(CONFIG_TICKLESS_IDLE_THRESH)
/*sleep duration with tick*/ /*sleep duration with tick*/
#define SLEEP_TICKFUL __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH - 1) #define SLEEP_TICKFUL k_ticks_to_ms_floor64(CONFIG_TICKLESS_IDLE_THRESH - 1)
/*slice size is set as half of the sleep duration*/ /*slice size is set as half of the sleep duration*/
#define SLICE_SIZE __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH >> 1) #define SLICE_SIZE k_ticks_to_ms_floor64(CONFIG_TICKLESS_IDLE_THRESH >> 1)
/*maximum slice duration accepted by the test*/ /*maximum slice duration accepted by the test*/
#define SLICE_SIZE_LIMIT __ticks_to_ms((CONFIG_TICKLESS_IDLE_THRESH >> 1) + 1) #define SLICE_SIZE_LIMIT k_ticks_to_ms_floor64((CONFIG_TICKLESS_IDLE_THRESH >> 1) + 1)
/*align to millisecond boundary*/ /*align to millisecond boundary*/
#if defined(CONFIG_ARCH_POSIX) #if defined(CONFIG_ARCH_POSIX)

View file

@ -269,10 +269,10 @@ void test_timer_periodicity(void)
* Please note, that expected firing time is not the * Please note, that expected firing time is not the
* one requested, as the kernel uses the ticks to manage * one requested, as the kernel uses the ticks to manage
* time. The actual perioid will be equal to [tick time] * time. The actual perioid will be equal to [tick time]
* multiplied by z_ms_to_ticks(PERIOD). * multiplied by k_ms_to_ticks_ceil32(PERIOD).
*/ */
TIMER_ASSERT(WITHIN_ERROR(delta, TIMER_ASSERT(WITHIN_ERROR(delta,
__ticks_to_ms(z_ms_to_ticks(PERIOD)), 1), k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(PERIOD)), 1),
&periodicity_timer); &periodicity_timer);
} }
@ -532,7 +532,7 @@ void test_timer_remaining_get(void)
* the value obtained through k_timer_remaining_get() could be larger * the value obtained through k_timer_remaining_get() could be larger
* than actual remaining time with maximum error equal to one tick. * than actual remaining time with maximum error equal to one tick.
*/ */
zassert_true(remaining <= (DURATION / 2) + __ticks_to_ms(1), NULL); zassert_true(remaining <= (DURATION / 2) + k_ticks_to_ms_floor64(1), NULL);
} }
static void timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, static void timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn,

View file

@ -54,8 +54,8 @@ void test_timer(void)
errors = 0U; errors = 0U;
TC_PRINT("sys_clock_hw_cycles_per_tick() = %d\n", TC_PRINT("k_ticks_to_cyc_floor32(1) = %d\n",
sys_clock_hw_cycles_per_tick()); k_ticks_to_cyc_floor32(1));
TC_PRINT("sys_clock_hw_cycles_per_sec() = %d\n", TC_PRINT("sys_clock_hw_cycles_per_sec() = %d\n",
sys_clock_hw_cycles_per_sec()); sys_clock_hw_cycles_per_sec());

View file

@ -17,7 +17,7 @@
/* In fact, each work item could take up to this value */ /* In fact, each work item could take up to this value */
#define WORK_ITEM_WAIT_ALIGNED \ #define WORK_ITEM_WAIT_ALIGNED \
__ticks_to_ms(z_ms_to_ticks(WORK_ITEM_WAIT) + _TICK_ALIGN) k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(WORK_ITEM_WAIT) + _TICK_ALIGN)
/* /*
* Wait 50ms between work submissions, to ensure co-op and prempt * Wait 50ms between work submissions, to ensure co-op and prempt

View file

@ -141,10 +141,10 @@ static void tdelayed_work_submit(void *data)
/**TESTPOINT: check remaining timeout after submit */ /**TESTPOINT: check remaining timeout after submit */
zassert_true( zassert_true(
time_remaining <= __ticks_to_ms(z_ms_to_ticks(TIMEOUT) time_remaining <= k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(TIMEOUT)
+ _TICK_ALIGN) && + _TICK_ALIGN) &&
time_remaining >= __ticks_to_ms(z_ms_to_ticks(TIMEOUT) - time_remaining >= k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(TIMEOUT) -
z_ms_to_ticks(15)), NULL); k_ms_to_ticks_ceil32(15)), NULL);
/**TESTPOINT: check pending after delayed work submit*/ /**TESTPOINT: check pending after delayed work submit*/
zassert_true(k_work_pending((struct k_work *)&delayed_work[i]) zassert_true(k_work_pending((struct k_work *)&delayed_work[i])
== 0, NULL); == 0, NULL);

View file

@ -45,7 +45,7 @@ void test_kernel_systick(void)
k_busy_wait(WAIT_TIME_US); k_busy_wait(WAIT_TIME_US);
stop_time = osKernelSysTick(); stop_time = osKernelSysTick();
diff = SYS_CLOCK_HW_CYCLES_TO_NS(stop_time - diff = (u32_t)k_cyc_to_ns_floor64(stop_time -
start_time) / NSEC_PER_USEC; start_time) / NSEC_PER_USEC;
/* Check that it's within 1%. On some Zephyr platforms /* Check that it's within 1%. On some Zephyr platforms

View file

@ -252,7 +252,7 @@ void test_thread_prio(void)
static void thread5(void *argument) static void thread5(void *argument)
{ {
printk(" * Thread B started.\n"); printk(" * Thread B started.\n");
osDelay(z_ms_to_ticks(DELAY_MS)); osDelay(k_ms_to_ticks_ceil32(DELAY_MS));
printk(" * Thread B joining...\n"); printk(" * Thread B joining...\n");
} }
@ -317,13 +317,13 @@ void test_thread_detached(void)
thread = osThreadNew(thread5, NULL, NULL); /* osThreadDetached */ thread = osThreadNew(thread5, NULL, NULL); /* osThreadDetached */
zassert_not_null(thread, "Failed to create thread with osThreadNew!"); zassert_not_null(thread, "Failed to create thread with osThreadNew!");
osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS)); osDelay(k_ms_to_ticks_ceil32(DELAY_MS - DELTA_MS));
status = osThreadJoin(thread); status = osThreadJoin(thread);
zassert_equal(status, osErrorResource, zassert_equal(status, osErrorResource,
"Incorrect status returned from osThreadJoin!"); "Incorrect status returned from osThreadJoin!");
osDelay(z_ms_to_ticks(DELTA_MS)); osDelay(k_ms_to_ticks_ceil32(DELTA_MS));
} }
void thread6(void *argument) void thread6(void *argument)
@ -350,12 +350,12 @@ void test_thread_joinable_detach(void)
tB = osThreadNew(thread6, tA, &attr); tB = osThreadNew(thread6, tA, &attr);
zassert_not_null(tB, "Failed to create thread with osThreadNew!"); zassert_not_null(tB, "Failed to create thread with osThreadNew!");
osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS)); osDelay(k_ms_to_ticks_ceil32(DELAY_MS - DELTA_MS));
status = osThreadDetach(tA); status = osThreadDetach(tA);
zassert_equal(status, osOK, "osThreadDetach failed."); zassert_equal(status, osOK, "osThreadDetach failed.");
osDelay(z_ms_to_ticks(DELTA_MS)); osDelay(k_ms_to_ticks_ceil32(DELTA_MS));
} }
void test_thread_joinable_terminate(void) void test_thread_joinable_terminate(void)
@ -372,10 +372,10 @@ void test_thread_joinable_terminate(void)
tB = osThreadNew(thread6, tA, &attr); tB = osThreadNew(thread6, tA, &attr);
zassert_not_null(tB, "Failed to create thread with osThreadNew!"); zassert_not_null(tB, "Failed to create thread with osThreadNew!");
osDelay(z_ms_to_ticks(DELAY_MS - DELTA_MS)); osDelay(k_ms_to_ticks_ceil32(DELAY_MS - DELTA_MS));
status = osThreadTerminate(tA); status = osThreadTerminate(tA);
zassert_equal(status, osOK, "osThreadTerminate failed."); zassert_equal(status, osOK, "osThreadTerminate failed.");
osDelay(z_ms_to_ticks(DELTA_MS)); osDelay(k_ms_to_ticks_ceil32(DELTA_MS));
} }