net: buf: Convert net_buf to use k_timeout_t where appropriate

This patch updates the net_buf API to use k_timeout_t in essentially
all places where "s32_t timeout" was previously used. For the most
part the conversion is trivial, except for the places where
intermediate decrements of remaining timeout is needed. For this the
z_timeout_end_calc() API is used.

Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
This commit is contained in:
Johan Hedberg 2020-04-03 12:31:26 +03:00 committed by Johan Hedberg
parent 4547bfb452
commit 54ca1c118b
2 changed files with 73 additions and 60 deletions

View file

@ -786,7 +786,7 @@ struct net_buf {
};
struct net_buf_data_cb {
u8_t * (*alloc)(struct net_buf *buf, size_t *size, s32_t timeout);
u8_t * (*alloc)(struct net_buf *buf, size_t *size, k_timeout_t timeout);
u8_t * (*ref)(struct net_buf *buf, u8_t *data);
void (*unref)(struct net_buf *buf, u8_t *data);
};
@ -1038,22 +1038,23 @@ int net_buf_id(struct net_buf *buf);
* @param pool Which pool to allocate the buffer from.
* @param timeout Affects the action taken should the pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified
* number of milliseconds before timing out. Note that some types
* of data allocators do not support blocking (such as the HEAP
* type). In this case it's still possible for net_buf_alloc() to
* fail (return NULL) even if it was given K_FOREVER.
* wait as long as necessary. Otherwise, wait until the specified
* timeout. Note that some types of data allocators do not support
* blocking (such as the HEAP type). In this case it's still possible
* for net_buf_alloc() to fail (return NULL) even if it was given
* K_FOREVER.
*
* @return New buffer or NULL if out of buffers.
*/
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
s32_t timeout, const char *func,
k_timeout_t timeout, const char *func,
int line);
#define net_buf_alloc_fixed(_pool, _timeout) \
net_buf_alloc_fixed_debug(_pool, _timeout, __func__, __LINE__)
#else
struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, s32_t timeout);
struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool,
k_timeout_t timeout);
#endif
/**
@ -1070,23 +1071,23 @@ struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, s32_t timeout);
* @param size Amount of data the buffer must be able to fit.
* @param timeout Affects the action taken should the pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified
* number of milliseconds before timing out. Note that some types
* of data allocators do not support blocking (such as the HEAP
* type). In this case it's still possible for net_buf_alloc() to
* fail (return NULL) even if it was given K_FOREVER.
* wait as long as necessary. Otherwise, wait until the specified
* timeout. Note that some types of data allocators do not support
* blocking (such as the HEAP type). In this case it's still possible
* for net_buf_alloc() to fail (return NULL) even if it was given
* K_FOREVER.
*
* @return New buffer or NULL if out of buffers.
*/
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
s32_t timeout, const char *func,
k_timeout_t timeout, const char *func,
int line);
#define net_buf_alloc_len(_pool, _size, _timeout) \
net_buf_alloc_len_debug(_pool, _size, _timeout, __func__, __LINE__)
#else
struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
s32_t timeout);
k_timeout_t timeout);
#endif
/**
@ -1100,26 +1101,26 @@ struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
* @param size Amount of data the pointed data buffer if able to fit.
* @param timeout Affects the action taken should the pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified
* number of milliseconds before timing out. Note that some types
* of data allocators do not support blocking (such as the HEAP
* type). In this case it's still possible for net_buf_alloc() to
* fail (return NULL) even if it was given K_FOREVER.
* wait as long as necessary. Otherwise, wait until the specified
* timeout. Note that some types of data allocators do not support
* blocking (such as the HEAP type). In this case it's still possible
* for net_buf_alloc() to fail (return NULL) even if it was given
* K_FOREVER.
*
* @return New buffer or NULL if out of buffers.
*/
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
void *data, size_t size,
s32_t timeout, const char *func,
int line);
k_timeout_t timeout,
const char *func, int line);
#define net_buf_alloc_with_data(_pool, _data_, _size, _timeout) \
net_buf_alloc_with_data_debug(_pool, _data_, _size, _timeout, \
__func__, __LINE__)
#else
struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
void *data, size_t size,
s32_t timeout);
k_timeout_t timeout);
#endif
/**
@ -1128,18 +1129,17 @@ struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
* @param fifo Which FIFO to take the buffer from.
* @param timeout Affects the action taken should the FIFO be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then wait as
* long as necessary. Otherwise, wait up to the specified number of
* milliseconds before timing out.
* long as necessary. Otherwise, wait until the specified timeout.
*
* @return New buffer or NULL if the FIFO is empty.
*/
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, s32_t timeout,
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, k_timeout_t timeout,
const char *func, int line);
#define net_buf_get(_fifo, _timeout) \
net_buf_get_debug(_fifo, _timeout, __func__, __LINE__)
#else
struct net_buf *net_buf_get(struct k_fifo *fifo, s32_t timeout);
struct net_buf *net_buf_get(struct k_fifo *fifo, k_timeout_t timeout);
#endif
/**
@ -1243,12 +1243,12 @@ struct net_buf *net_buf_ref(struct net_buf *buf);
* @param buf A valid pointer on a buffer
* @param timeout Affects the action taken should the pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified
* number of milliseconds before timing out.
* wait as long as necessary. Otherwise, wait until the specified
* timeout.
*
* @return Cloned buffer or NULL if out of buffers.
*/
struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout);
struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout);
/**
* @brief Get a pointer to the user data of a buffer.
@ -1882,12 +1882,13 @@ size_t net_buf_linearize(void *dst, size_t dst_len,
*
* @param timeout Affects the action taken should the net buf pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified
* number of milliseconds before timing out.
* wait as long as necessary. Otherwise, wait until the specified
* timeout.
* @param user_data The user data given in net_buf_append_bytes call.
* @return pointer to allocated net_buf or NULL on error.
*/
typedef struct net_buf *(*net_buf_allocator_cb)(s32_t timeout, void *user_data);
typedef struct net_buf *(*net_buf_allocator_cb)(k_timeout_t timeout,
void *user_data);
/**
* @brief Append data to a list of net_buf
@ -1910,7 +1911,7 @@ typedef struct net_buf *(*net_buf_allocator_cb)(s32_t timeout, void *user_data);
* were no free fragments in a pool to accommodate all data.
*/
size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
const void *value, s32_t timeout,
const void *value, k_timeout_t timeout,
net_buf_allocator_cb allocate_cb, void *user_data);
/**

View file

@ -93,7 +93,7 @@ static u8_t *generic_data_ref(struct net_buf *buf, u8_t *data)
}
static u8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
s32_t timeout)
k_timeout_t timeout)
{
struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
struct k_mem_pool *pool = buf_pool->alloc->alloc_data;
@ -138,7 +138,8 @@ const struct net_buf_data_cb net_buf_var_cb = {
.unref = mem_pool_data_unref,
};
static u8_t *fixed_data_alloc(struct net_buf *buf, size_t *size, s32_t timeout)
static u8_t *fixed_data_alloc(struct net_buf *buf, size_t *size,
k_timeout_t timeout)
{
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
@ -160,7 +161,8 @@ const struct net_buf_data_cb net_buf_fixed_cb = {
#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
static u8_t *heap_data_alloc(struct net_buf *buf, size_t *size, s32_t timeout)
static u8_t *heap_data_alloc(struct net_buf *buf, size_t *size,
k_timeout_t timeout)
{
u8_t *ref_count;
@ -198,7 +200,7 @@ const struct net_buf_data_alloc net_buf_heap_alloc = {
#endif /* CONFIG_HEAP_MEM_POOL_SIZE > 0 */
static u8_t *data_alloc(struct net_buf *buf, size_t *size, s32_t timeout)
static u8_t *data_alloc(struct net_buf *buf, size_t *size, k_timeout_t timeout)
{
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
@ -225,21 +227,20 @@ static void data_unref(struct net_buf *buf, u8_t *data)
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
s32_t timeout, const char *func,
k_timeout_t timeout, const char *func,
int line)
#else
struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
s32_t timeout)
k_timeout_t timeout)
#endif
{
u32_t alloc_start = k_uptime_get_32();
u64_t end = z_timeout_end_calc(timeout);
struct net_buf *buf;
unsigned int key;
__ASSERT_NO_MSG(pool);
NET_BUF_DBG("%s():%d: pool %p size %zu timeout %d", func, line, pool,
size, timeout);
NET_BUF_DBG("%s():%d: pool %p size %zu", func, line, pool, size);
/* We need to lock interrupts temporarily to prevent race conditions
* when accessing pool->uninit_count.
@ -274,7 +275,7 @@ struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
irq_unlock(key);
#if defined(CONFIG_NET_BUF_LOG) && (CONFIG_NET_BUF_LOG_LEVEL >= LOG_LEVEL_WRN)
if (timeout == K_FOREVER) {
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
u32_t ref = k_uptime_get_32();
buf = k_lifo_get(&pool->free, K_NO_WAIT);
while (!buf) {
@ -314,10 +315,15 @@ success:
#if __ASSERT_ON
size_t req_size = size;
#endif
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
u32_t diff = k_uptime_get_32() - alloc_start;
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
s64_t remaining = end - z_tick_get();
timeout -= MIN(timeout, diff);
if (remaining <= 0) {
timeout = K_NO_WAIT;
} else {
timeout = Z_TIMEOUT_TICKS(remaining);
}
}
buf->__buf = data_alloc(buf, &size, timeout);
@ -348,7 +354,7 @@ success:
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
s32_t timeout, const char *func,
k_timeout_t timeout, const char *func,
int line)
{
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
@ -357,7 +363,8 @@ struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
line);
}
#else
struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, s32_t timeout)
struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool,
k_timeout_t timeout)
{
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
@ -368,12 +375,12 @@ struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, s32_t timeout)
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
void *data, size_t size,
s32_t timeout, const char *func,
int line)
k_timeout_t timeout,
const char *func, int line)
#else
struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
void *data, size_t size,
s32_t timeout)
k_timeout_t timeout)
#endif
{
struct net_buf *buf;
@ -394,10 +401,10 @@ struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
}
#if defined(CONFIG_NET_BUF_LOG)
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, s32_t timeout,
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, k_timeout_t timeout,
const char *func, int line)
#else
struct net_buf *net_buf_get(struct k_fifo *fifo, s32_t timeout)
struct net_buf *net_buf_get(struct k_fifo *fifo, k_timeout_t timeout)
#endif
{
struct net_buf *buf, *frag;
@ -569,9 +576,9 @@ struct net_buf *net_buf_ref(struct net_buf *buf)
return buf;
}
struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout)
{
u32_t alloc_start = k_uptime_get_32();
s64_t end = z_timeout_end_calc(timeout);
struct net_buf_pool *pool;
struct net_buf *clone;
@ -595,10 +602,15 @@ struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
} else {
size_t size = buf->size;
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
u32_t diff = k_uptime_get_32() - alloc_start;
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT) &&
!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
s64_t remaining = end - z_tick_get();
timeout -= MIN(timeout, diff);
if (remaining <= 0) {
timeout = K_NO_WAIT;
} else {
timeout = Z_TIMEOUT_TICKS(remaining);
}
}
clone->__buf = data_alloc(clone, &size, timeout);
@ -723,7 +735,7 @@ size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
* the buffer. It assumes that the buffer has at least one fragment.
*/
size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
const void *value, s32_t timeout,
const void *value, k_timeout_t timeout,
net_buf_allocator_cb allocate_cb, void *user_data)
{
struct net_buf *frag = net_buf_frag_last(buf);