kernel: Move k_busy_wait from thread to timeout
K_busy_wait is the only function from thread.c that is used when CONFIG_MULTITHREADING=n. Moving to timeout since it fits better there as it requires sys clock to be present. Signed-off-by: Krzysztof Chruscinski <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
parent
09ba258b05
commit
b8fb353cd4
|
@ -120,44 +120,6 @@ bool z_is_thread_essential(void)
|
|||
return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
void z_impl_k_busy_wait(uint32_t usec_to_wait)
|
||||
{
|
||||
if (usec_to_wait == 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
|
||||
uint32_t start_cycles = k_cycle_get_32();
|
||||
|
||||
/* use 64-bit math to prevent overflow when multiplying */
|
||||
uint32_t cycles_to_wait = (uint32_t)(
|
||||
(uint64_t)usec_to_wait *
|
||||
(uint64_t)sys_clock_hw_cycles_per_sec() /
|
||||
(uint64_t)USEC_PER_SEC
|
||||
);
|
||||
|
||||
for (;;) {
|
||||
uint32_t current_cycles = k_cycle_get_32();
|
||||
|
||||
/* this handles the rollover on an unsigned 32-bit value */
|
||||
if ((current_cycles - start_cycles) >= cycles_to_wait) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
#else
|
||||
arch_busy_wait(usec_to_wait);
|
||||
#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
|
||||
{
|
||||
z_impl_k_busy_wait(usec_to_wait);
|
||||
}
|
||||
#include <syscalls/k_busy_wait_mrsh.c>
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#endif /* CONFIG_SYS_CLOCK_EXISTS */
|
||||
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
void z_impl_k_thread_custom_data_set(void *value)
|
||||
|
|
|
@ -297,6 +297,43 @@ static inline int64_t z_vrfy_k_uptime_ticks(void)
|
|||
#include <syscalls/k_uptime_ticks_mrsh.c>
|
||||
#endif
|
||||
|
||||
void z_impl_k_busy_wait(uint32_t usec_to_wait)
|
||||
{
|
||||
if (usec_to_wait == 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
|
||||
uint32_t start_cycles = k_cycle_get_32();
|
||||
|
||||
/* use 64-bit math to prevent overflow when multiplying */
|
||||
uint32_t cycles_to_wait = (uint32_t)(
|
||||
(uint64_t)usec_to_wait *
|
||||
(uint64_t)sys_clock_hw_cycles_per_sec() /
|
||||
(uint64_t)USEC_PER_SEC
|
||||
);
|
||||
|
||||
for (;;) {
|
||||
uint32_t current_cycles = k_cycle_get_32();
|
||||
|
||||
/* this handles the rollover on an unsigned 32-bit value */
|
||||
if ((current_cycles - start_cycles) >= cycles_to_wait) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
#else
|
||||
arch_busy_wait(usec_to_wait);
|
||||
#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
|
||||
{
|
||||
z_impl_k_busy_wait(usec_to_wait);
|
||||
}
|
||||
#include <syscalls/k_busy_wait_mrsh.c>
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/* Returns the uptime expiration (relative to an unlocked "now"!) of a
|
||||
* timeout object. When used correctly, this should be called once,
|
||||
* synchronously with the user passing a new timeout value. It should
|
||||
|
|
Loading…
Reference in a new issue