kernel: Remove CONFIG_LEGACY_TIMEOUT_API
This was a fallback for an API change several versions ago. It's time for it to go. Fixes: #30893 Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
384ad9c3d4
commit
e956639dd6
|
@ -27,4 +27,3 @@ CONFIG_LINKER_ORPHAN_SECTION_PLACE=y
|
|||
CONFIG_COMPILER_OPT="-mcmodel=medlow"
|
||||
CONFIG_GPIO=y
|
||||
CONFIG_GPIO_ITE_IT8XXX2=y
|
||||
CONFIG_LEGACY_TIMEOUT_API=y
|
||||
|
|
|
@ -248,38 +248,6 @@ value passed to :c:func:`z_clock_set_timeout` may be clamped to a
|
|||
smaller value than the current next timeout when a time sliced thread
|
||||
is currently scheduled.
|
||||
|
||||
Legacy Usage and Porting Guide
|
||||
==============================
|
||||
|
||||
In earlier versions of Zephyr, the :c:type:`k_timeout_t` abstraction
|
||||
did not exist and timeouts were presented to the kernel as signed
|
||||
integer values specified in milliseconds. The :c:macro:`K_FOREVER`
|
||||
value was defined with a value of -1.
|
||||
|
||||
In general, application code that uses the pre-existing constructor
|
||||
macros (:c:macro:`K_MSEC()` et. al.) will continue to work without
|
||||
change. Code that presents raw milliseconds to the calls can simply
|
||||
wrap the argument in :c:macro:`K_MSEC()`.
|
||||
|
||||
Some Zephyr subsystem code, however, was written originally to present
|
||||
their own APIs to the user which accept millisecond values (including
|
||||
:c:macro:`K_FOREVER`) and take actions like storing the value for
|
||||
later, or performing arithmetic on the value. This will no longer
|
||||
work unmodified in the new scheme.
|
||||
|
||||
One option in the immediate term is to use the
|
||||
:c:option:`CONFIG_LEGACY_TIMEOUT_API` kconfig. This redefines the
|
||||
:c:type:`k_timeout_t` type to be a 32 bit integer and preserves source
|
||||
code compatibility with the older APIs. This comes at the cost of
|
||||
disabling newer features like absolute timeouts and 64 bit precision.
|
||||
This kconfig exists for application code, however, and will be going
|
||||
away in a forthcoming release.
|
||||
|
||||
A better scheme is to port the subsystem to the new timeout scheme
|
||||
directly. There are two broad architectures for doing this: using
|
||||
:cpp:type:`k_timeout_t` naturally as an application API, or preserving the
|
||||
millisecond subsystem API and converting internally.
|
||||
|
||||
Subsystems that keep millisecond APIs
|
||||
-------------------------------------
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ Deprecated in this release
|
|||
* All timeout values are now encapsulated k_timeout_t opaque structure when
|
||||
passing them to the kernel. If you want to revert to the previous s32_t
|
||||
type for the timeout parameter, please enable
|
||||
:option:`CONFIG_LEGACY_TIMEOUT_API`
|
||||
CONFIG_LEGACY_TIMEOUT_API
|
||||
|
||||
* Bluetooth
|
||||
|
||||
|
|
|
@ -54,6 +54,10 @@ API Changes
|
|||
* The :c:func:`mqtt_keepalive_time_left` function now returns -1 if keep alive
|
||||
messages are disabled by setting ``CONFIG_MQTT_KEEPALIVE`` to 0.
|
||||
|
||||
* The ``CONFIG_LEGACY_TIMEOUT_API`` mode has been removed. All kernel
|
||||
timeout usage must use the new-style k_timeout_t type and not the
|
||||
legacy/deprecated millisecond counts.
|
||||
|
||||
Deprecated in this release
|
||||
==========================
|
||||
|
||||
|
|
|
@ -29,8 +29,6 @@ extern "C" {
|
|||
|
||||
#if defined(CONFIG_NET_OFFLOAD)
|
||||
|
||||
#ifndef CONFIG_LEGACY_TIMEOUT_API
|
||||
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
|
||||
static inline int32_t timeout_to_int32(k_timeout_t timeout)
|
||||
|
@ -46,8 +44,6 @@ static inline int32_t timeout_to_int32(k_timeout_t timeout)
|
|||
|
||||
/** @endcond */
|
||||
|
||||
#endif /* CONFIG_LEGACY_TIMEOUT_API */
|
||||
|
||||
/** For return parameters and return values of the elements in this
|
||||
* struct, see similarly named functions in net_context.h
|
||||
*/
|
||||
|
@ -247,11 +243,7 @@ static inline int net_offload_connect(struct net_if *iface,
|
|||
|
||||
return net_if_offload(iface)->connect(
|
||||
context, addr, addrlen, cb,
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
Z_TIMEOUT_MS(timeout),
|
||||
#else
|
||||
timeout_to_int32(timeout),
|
||||
#endif
|
||||
user_data);
|
||||
}
|
||||
|
||||
|
@ -294,11 +286,7 @@ static inline int net_offload_accept(struct net_if *iface,
|
|||
|
||||
return net_if_offload(iface)->accept(
|
||||
context, cb,
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
Z_TIMEOUT_MS(timeout),
|
||||
#else
|
||||
timeout_to_int32(timeout),
|
||||
#endif
|
||||
user_data);
|
||||
}
|
||||
|
||||
|
@ -340,11 +328,7 @@ static inline int net_offload_send(struct net_if *iface,
|
|||
|
||||
return net_if_offload(iface)->send(
|
||||
pkt, cb,
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
Z_TIMEOUT_MS(timeout),
|
||||
#else
|
||||
timeout_to_int32(timeout),
|
||||
#endif
|
||||
user_data);
|
||||
}
|
||||
|
||||
|
@ -390,11 +374,7 @@ static inline int net_offload_sendto(struct net_if *iface,
|
|||
|
||||
return net_if_offload(iface)->sendto(
|
||||
pkt, dst_addr, addrlen, cb,
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
Z_TIMEOUT_MS(timeout),
|
||||
#else
|
||||
timeout_to_int32(timeout),
|
||||
#endif
|
||||
user_data);
|
||||
}
|
||||
|
||||
|
@ -443,11 +423,7 @@ static inline int net_offload_recv(struct net_if *iface,
|
|||
|
||||
return net_if_offload(iface)->recv(
|
||||
context, cb,
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
Z_TIMEOUT_MS(timeout),
|
||||
#else
|
||||
timeout_to_int32(timeout),
|
||||
#endif
|
||||
user_data);
|
||||
}
|
||||
|
||||
|
|
|
@ -50,8 +50,6 @@ typedef uint32_t k_ticks_t;
|
|||
|
||||
#define K_TICKS_FOREVER ((k_ticks_t) -1)
|
||||
|
||||
#ifndef CONFIG_LEGACY_TIMEOUT_API
|
||||
|
||||
/**
|
||||
* @brief Kernel timeout type
|
||||
*
|
||||
|
@ -107,21 +105,6 @@ typedef struct {
|
|||
*/
|
||||
#define Z_TICK_ABS(t) (K_TICKS_FOREVER - 1 - (t))
|
||||
|
||||
#else
|
||||
|
||||
/* Legacy timeout API */
|
||||
typedef int32_t k_timeout_t;
|
||||
#define K_TIMEOUT_EQ(a, b) ((a) == (b))
|
||||
#define Z_TIMEOUT_NO_WAIT 0
|
||||
#define Z_TIMEOUT_TICKS(t) k_ticks_to_ms_ceil32(t)
|
||||
#define Z_FOREVER K_TICKS_FOREVER
|
||||
#define Z_TIMEOUT_MS(t) (t)
|
||||
#define Z_TIMEOUT_US(t) ((999 + (t)) / 1000)
|
||||
#define Z_TIMEOUT_NS(t) ((999999 + (t)) / 1000000)
|
||||
#define Z_TIMEOUT_CYC(t) k_cyc_to_ms_ceil32(MAX((t), 0))
|
||||
|
||||
#endif
|
||||
|
||||
/** @} */
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
|
|
|
@ -595,18 +595,8 @@ config SYS_CLOCK_EXISTS
|
|||
this is disabled. Obviously timeout-related APIs will not
|
||||
work.
|
||||
|
||||
config LEGACY_TIMEOUT_API
|
||||
bool "Support legacy k_timeout_t API"
|
||||
help
|
||||
The k_timeout_t API has changed to become an opaque type
|
||||
that must be initialized with macros. Older applications
|
||||
can choose this to continue using the old style of timeouts
|
||||
(which were int32_t counts of milliseconds), at the cost of
|
||||
not being able to use new features.
|
||||
|
||||
config TIMEOUT_64BIT
|
||||
bool "Store kernel timeouts in 64 bit precision"
|
||||
depends on !LEGACY_TIMEOUT_API
|
||||
default y
|
||||
help
|
||||
When this option is true, the k_ticks_t values passed to
|
||||
|
|
|
@ -641,10 +641,6 @@ int k_work_poll_submit_to_queue(struct k_work_q *work_q,
|
|||
__ASSERT(num_events == events_registered,
|
||||
"Some events were not registered!\n");
|
||||
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
timeout = k_ms_to_ticks_ceil32(timeout);
|
||||
#endif
|
||||
|
||||
/* Setup timeout if such action is requested */
|
||||
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
||||
z_add_timeout(&work->timeout,
|
||||
|
|
|
@ -666,9 +666,6 @@ static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
|
|||
static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
|
||||
{
|
||||
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
timeout = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout);
|
||||
#endif
|
||||
z_add_thread_timeout(thread, timeout);
|
||||
}
|
||||
}
|
||||
|
@ -1276,14 +1273,7 @@ static int32_t z_tick_sleep(k_ticks_t ticks)
|
|||
return 0;
|
||||
}
|
||||
|
||||
k_timeout_t timeout;
|
||||
|
||||
#ifndef CONFIG_LEGACY_TIMEOUT_API
|
||||
timeout = Z_TIMEOUT_TICKS(ticks);
|
||||
#else
|
||||
ticks += _TICK_ALIGN;
|
||||
timeout = ticks;
|
||||
#endif
|
||||
k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
|
||||
|
||||
expected_wakeup_ticks = ticks + z_tick_get_32();
|
||||
|
||||
|
@ -1322,11 +1312,7 @@ int32_t z_impl_k_sleep(k_timeout_t timeout)
|
|||
return (int32_t) K_TICKS_FOREVER;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
ticks = k_ms_to_ticks_ceil32(timeout);
|
||||
#else
|
||||
ticks = timeout.ticks;
|
||||
#endif
|
||||
|
||||
ticks = z_tick_sleep(ticks);
|
||||
sys_trace_end_call(SYS_TRACE_ID_SLEEP);
|
||||
|
|
|
@ -407,10 +407,6 @@ static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
|
|||
if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
|
||||
k_thread_start(thread);
|
||||
} else {
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
delay = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
|
||||
#endif
|
||||
|
||||
z_add_thread_timeout(thread, delay);
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -95,15 +95,11 @@ void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
|
|||
__ASSERT_NO_MSG(arch_mem_coherent(to));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
k_ticks_t ticks = timeout;
|
||||
#else
|
||||
k_ticks_t ticks = timeout.ticks + 1;
|
||||
|
||||
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(ticks) >= 0) {
|
||||
ticks = Z_TICK_ABS(ticks) - (curr_tick + elapsed());
|
||||
}
|
||||
#endif
|
||||
|
||||
__ASSERT(!sys_dnode_is_linked(&to->node), "");
|
||||
to->fn = fn;
|
||||
|
@ -304,14 +300,10 @@ uint64_t z_timeout_end_calc(k_timeout_t timeout)
|
|||
return z_tick_get();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
dt = k_ms_to_ticks_ceil32(timeout);
|
||||
#else
|
||||
dt = timeout.ticks;
|
||||
|
||||
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
|
||||
return Z_TICK_ABS(dt);
|
||||
}
|
||||
#endif
|
||||
return z_tick_get() + MAX(1, dt);
|
||||
}
|
||||
|
|
|
@ -113,10 +113,6 @@ void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
|
|||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
duration = k_ms_to_ticks_ceil32(duration);
|
||||
period = k_ms_to_ticks_ceil32(period);
|
||||
#else
|
||||
/* z_add_timeout() always adds one to the incoming tick count
|
||||
* to round up to the next tick (by convention it waits for
|
||||
* "at least as long as the specified timeout"), but the
|
||||
|
@ -136,7 +132,6 @@ void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
|
|||
if (Z_TICK_ABS(duration.ticks) < 0) {
|
||||
duration.ticks = MAX(duration.ticks - 1, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
(void)z_abort_timeout(&timer->timeout);
|
||||
timer->period = period;
|
||||
|
|
|
@ -109,10 +109,6 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
delay = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
|
||||
#endif
|
||||
|
||||
/* Add timeout */
|
||||
z_add_timeout(&work->timeout, work_timeout, delay);
|
||||
|
||||
|
|
|
@ -77,11 +77,7 @@ static void timeout_reset(void)
|
|||
{
|
||||
uint32_t ticks = rand32() % MAX_EVENT_DELAY_TICKS;
|
||||
|
||||
#ifdef CONFIG_LEGACY_TIMEOUT_API
|
||||
z_add_timeout(&timeout, dev_timer_expired, ticks);
|
||||
#else
|
||||
z_add_timeout(&timeout, dev_timer_expired, Z_TIMEOUT_TICKS(ticks));
|
||||
#endif
|
||||
}
|
||||
|
||||
void message_dev_init(void)
|
||||
|
|
Loading…
Reference in a new issue