kernel/timeout: Enable 64 bit timeout precision
Add a CONFIG_TIMEOUT_64BIT kconfig that, when selected, makes the k_ticks_t used in timeout computations pervasively 64 bit. This will allow much longer timeouts and much faster (i.e. more precise) tick rates. It also enables the use of absolute (not delta) timeouts in an upcoming commit. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
7832738ae9
commit
cfeb07eded
|
@ -604,6 +604,10 @@ if(CONFIG_64BIT)
|
|||
set(SYSCALL_LONG_REGISTERS_ARG --long-registers)
|
||||
endif()
|
||||
|
||||
if(CONFIG_TIMEOUT_64BIT)
|
||||
set(SYSCALL_SPLIT_TIMEOUT_ARG --split-type k_timeout_t)
|
||||
endif()
|
||||
|
||||
add_custom_command(OUTPUT include/generated/syscall_dispatch.c ${syscall_list_h}
|
||||
# Also, some files are written to include/generated/syscalls/
|
||||
COMMAND
|
||||
|
@ -614,6 +618,7 @@ add_custom_command(OUTPUT include/generated/syscall_dispatch.c ${syscall_list_h}
|
|||
--syscall-dispatch include/generated/syscall_dispatch.c # Write this file
|
||||
--syscall-list ${syscall_list_h}
|
||||
${SYSCALL_LONG_REGISTERS_ARG}
|
||||
${SYSCALL_SPLIT_TIMEOUT_ARG}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
DEPENDS ${PARSE_SYSCALLS_TARGET}
|
||||
${syscalls_json}
|
||||
|
|
|
@ -33,7 +33,20 @@ extern "C" {
|
|||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Tick precision used in timeout APIs
|
||||
*
|
||||
* This type defines the word size of the timeout values used in
|
||||
* k_timeout_t objects, and thus defines an upper bound on maximum
|
||||
* timeout length (or equivalently minimum tick duration). Note that
|
||||
* this does not affect the size of the system uptime counter, which
|
||||
* is always a 64 bit count of ticks.
|
||||
*/
|
||||
#ifdef CONFIG_TIMEOUT_64BIT
|
||||
typedef s64_t k_ticks_t;
|
||||
#else
|
||||
typedef u32_t k_ticks_t;
|
||||
#endif
|
||||
|
||||
#define K_TICKS_FOREVER ((k_ticks_t) -1)
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ s32_t z_timeout_remaining(struct _timeout *timeout);
|
|||
#define z_init_thread_timeout(t) do {} while (false)
|
||||
#define z_abort_thread_timeout(t) (0)
|
||||
#define z_is_inactive_timeout(t) 0
|
||||
#define z_get_next_timeout_expiry() (K_TICKS_FOREVER)
|
||||
#define z_get_next_timeout_expiry() ((s32_t) K_TICKS_FOREVER)
|
||||
#define z_set_timeout_expiry(t, i) do {} while (false)
|
||||
|
||||
static inline void z_add_thread_timeout(struct k_thread *th, k_timeout_t ticks)
|
||||
|
|
|
@ -579,6 +579,16 @@ config LEGACY_TIMEOUT_API
|
|||
(which were s32_t counts of milliseconds), at the cost of
|
||||
not being able to use new features.
|
||||
|
||||
config TIMEOUT_64BIT
|
||||
bool
|
||||
depends on !LEGACY_TIMEOUT_API
|
||||
default y
|
||||
help
|
||||
When this option is true, the k_ticks_t values passed to
|
||||
kernel APIs will be a 64 bit quantity, allowing the use of
|
||||
larger values (and higher precision tick rates) without fear
|
||||
of overflowing the 32 bit word.
|
||||
|
||||
config XIP
|
||||
bool "Execute in place"
|
||||
help
|
||||
|
|
|
@ -1202,7 +1202,7 @@ s32_t z_impl_k_sleep(k_timeout_t timeout)
|
|||
|
||||
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
||||
k_thread_suspend(_current);
|
||||
return K_TICKS_FOREVER;
|
||||
return (s32_t) K_TICKS_FOREVER;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
|
|
|
@ -157,7 +157,7 @@ s32_t z_timeout_remaining(struct _timeout *timeout)
|
|||
|
||||
s32_t z_get_next_timeout_expiry(void)
|
||||
{
|
||||
s32_t ret = K_TICKS_FOREVER;
|
||||
s32_t ret = (s32_t) K_TICKS_FOREVER;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
ret = next_timeout();
|
||||
|
|
|
@ -134,7 +134,8 @@ static int test_multiple_threads_pending(struct timeout_order_data *test_data,
|
|||
|
||||
if (data->timeout_order == ii) {
|
||||
TC_PRINT(" thread (q order: %d, t/o: %d, lifo %p)\n",
|
||||
data->q_order, data->timeout, data->klifo);
|
||||
data->q_order, (int) data->timeout,
|
||||
data->klifo);
|
||||
} else {
|
||||
zassert_equal(data->timeout_order, ii, " *** thread %d "
|
||||
"woke up, expected %d\n",
|
||||
|
|
Loading…
Reference in a new issue