2015-10-07 17:32:54 +02:00
|
|
|
/* buf.c - Buffer management */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2015 Intel Corporation
|
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-10-07 17:32:54 +02:00
|
|
|
*/
|
|
|
|
|
2018-10-22 15:48:52 +02:00
|
|
|
#define LOG_MODULE_NAME net_buf
|
|
|
|
#define LOG_LEVEL CONFIG_NET_BUF_LOG_LEVEL
|
|
|
|
|
|
|
|
#include <logging/log.h>
|
|
|
|
LOG_MODULE_REGISTER(LOG_MODULE_NAME);
|
|
|
|
|
2015-10-07 17:32:54 +02:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <string.h>
|
2019-06-26 16:33:41 +02:00
|
|
|
#include <sys/byteorder.h>
|
2015-10-07 17:32:54 +02:00
|
|
|
|
|
|
|
#include <net/buf.h>
|
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2018-09-03 12:29:42 +02:00
|
|
|
#define NET_BUF_DBG(fmt, ...) LOG_DBG("(%p) " fmt, k_current_get(), \
|
|
|
|
##__VA_ARGS__)
|
|
|
|
#define NET_BUF_ERR(fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__)
|
|
|
|
#define NET_BUF_WARN(fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__)
|
|
|
|
#define NET_BUF_INFO(fmt, ...) LOG_INF(fmt, ##__VA_ARGS__)
|
2015-10-29 15:36:44 +01:00
|
|
|
#define NET_BUF_ASSERT(cond) do { if (!(cond)) { \
|
2016-10-25 12:36:44 +02:00
|
|
|
NET_BUF_ERR("assert: '" #cond "' failed"); \
|
|
|
|
} } while (0)
|
2015-10-07 17:32:54 +02:00
|
|
|
#else
|
2016-10-25 12:36:44 +02:00
|
|
|
|
2015-10-07 17:32:54 +02:00
|
|
|
#define NET_BUF_DBG(fmt, ...)
|
|
|
|
#define NET_BUF_ERR(fmt, ...)
|
|
|
|
#define NET_BUF_WARN(fmt, ...)
|
|
|
|
#define NET_BUF_INFO(fmt, ...)
|
|
|
|
#define NET_BUF_ASSERT(cond)
|
2016-12-15 11:18:30 +01:00
|
|
|
#endif /* CONFIG_NET_BUF_LOG */
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2017-04-05 12:39:51 +02:00
|
|
|
#if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
|
|
|
|
#define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
|
|
|
|
#else
|
|
|
|
#define WARN_ALLOC_INTERVAL K_FOREVER
|
|
|
|
#endif
|
2017-04-04 14:52:34 +02:00
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
/* Linker-defined symbol bound to the static pool structs */
|
|
|
|
extern struct net_buf_pool _net_buf_pool_list[];
|
|
|
|
|
|
|
|
struct net_buf_pool *net_buf_pool_get(int id)
|
|
|
|
{
|
|
|
|
return &_net_buf_pool_list[id];
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pool_id(struct net_buf_pool *pool)
|
|
|
|
{
|
|
|
|
return pool - _net_buf_pool_list;
|
|
|
|
}
|
|
|
|
|
2017-10-31 07:39:42 +01:00
|
|
|
int net_buf_id(struct net_buf *buf)
|
|
|
|
{
|
|
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
return buf - pool->__bufs;
|
2017-10-31 07:39:42 +01:00
|
|
|
}
|
|
|
|
|
2016-12-14 07:33:50 +01:00
|
|
|
static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
|
2017-04-21 16:27:50 +02:00
|
|
|
u16_t uninit_count)
|
2016-12-14 07:33:50 +01:00
|
|
|
{
|
|
|
|
struct net_buf *buf;
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
buf = &pool->__bufs[pool->buf_count - uninit_count];
|
2016-12-14 07:33:50 +01:00
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
buf->pool_id = pool_id(pool);
|
2016-12-14 07:33:50 +01:00
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2017-04-18 13:43:04 +02:00
|
|
|
void net_buf_reset(struct net_buf *buf)
|
|
|
|
{
|
2019-03-27 02:57:45 +01:00
|
|
|
NET_BUF_ASSERT(buf->flags == 0U);
|
2017-04-18 13:43:04 +02:00
|
|
|
NET_BUF_ASSERT(buf->frags == NULL);
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
net_buf_simple_reset(&buf->b);
|
|
|
|
}
|
|
|
|
|
2018-02-06 18:18:31 +01:00
|
|
|
static u8_t *generic_data_ref(struct net_buf *buf, u8_t *data)
|
|
|
|
{
|
|
|
|
u8_t *ref_count;
|
|
|
|
|
|
|
|
ref_count = data - 1;
|
|
|
|
(*ref_count)++;
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
|
|
|
|
s32_t timeout)
|
|
|
|
{
|
|
|
|
struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
struct k_mem_pool *pool = buf_pool->alloc->alloc_data;
|
|
|
|
struct k_mem_block block;
|
|
|
|
u8_t *ref_count;
|
|
|
|
|
|
|
|
/* Reserve extra space for k_mem_block_id and ref-count (u8_t) */
|
|
|
|
if (k_mem_pool_alloc(pool, &block,
|
|
|
|
sizeof(struct k_mem_block_id) + 1 + *size,
|
|
|
|
timeout)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* save the block descriptor info at the start of the actual block */
|
|
|
|
memcpy(block.data, &block.id, sizeof(block.id));
|
|
|
|
|
|
|
|
ref_count = (u8_t *)block.data + sizeof(block.id);
|
2018-11-29 20:23:03 +01:00
|
|
|
*ref_count = 1U;
|
2018-02-06 18:18:31 +01:00
|
|
|
|
|
|
|
/* Return pointer to the byte following the ref count */
|
|
|
|
return ref_count + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mem_pool_data_unref(struct net_buf *buf, u8_t *data)
|
|
|
|
{
|
|
|
|
struct k_mem_block_id id;
|
|
|
|
u8_t *ref_count;
|
|
|
|
|
|
|
|
ref_count = data - 1;
|
|
|
|
if (--(*ref_count)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Need to copy to local variable due to alignment */
|
|
|
|
memcpy(&id, ref_count - sizeof(id), sizeof(id));
|
|
|
|
k_mem_pool_free_id(&id);
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct net_buf_data_cb net_buf_var_cb = {
|
|
|
|
.alloc = mem_pool_data_alloc,
|
|
|
|
.ref = generic_data_ref,
|
|
|
|
.unref = mem_pool_data_unref,
|
|
|
|
};
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
static u8_t *fixed_data_alloc(struct net_buf *buf, size_t *size, s32_t timeout)
|
|
|
|
{
|
|
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
|
|
|
|
|
2019-02-11 18:14:19 +01:00
|
|
|
*size = MIN(fixed->data_size, *size);
|
2018-02-06 18:16:49 +01:00
|
|
|
|
|
|
|
return fixed->data_pool + fixed->data_size * net_buf_id(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fixed_data_unref(struct net_buf *buf, u8_t *data)
|
|
|
|
{
|
|
|
|
/* Nothing needed for fixed-size data pools */
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct net_buf_data_cb net_buf_fixed_cb = {
|
|
|
|
.alloc = fixed_data_alloc,
|
|
|
|
.unref = fixed_data_unref,
|
|
|
|
};
|
|
|
|
|
2018-02-06 18:18:31 +01:00
|
|
|
#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
|
|
|
|
|
|
|
|
static u8_t *heap_data_alloc(struct net_buf *buf, size_t *size, s32_t timeout)
|
|
|
|
{
|
|
|
|
u8_t *ref_count;
|
|
|
|
|
|
|
|
ref_count = k_malloc(1 + *size);
|
|
|
|
if (!ref_count) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-29 20:23:03 +01:00
|
|
|
*ref_count = 1U;
|
2018-02-06 18:18:31 +01:00
|
|
|
|
|
|
|
return ref_count + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void heap_data_unref(struct net_buf *buf, u8_t *data)
|
|
|
|
{
|
|
|
|
u8_t *ref_count;
|
|
|
|
|
|
|
|
ref_count = data - 1;
|
|
|
|
if (--(*ref_count)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
k_free(ref_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct net_buf_data_cb net_buf_heap_cb = {
|
|
|
|
.alloc = heap_data_alloc,
|
|
|
|
.ref = generic_data_ref,
|
|
|
|
.unref = heap_data_unref,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct net_buf_data_alloc net_buf_heap_alloc = {
|
|
|
|
.cb = &net_buf_heap_cb,
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* CONFIG_HEAP_MEM_POOL_SIZE > 0 */
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
static u8_t *data_alloc(struct net_buf *buf, size_t *size, s32_t timeout)
|
|
|
|
{
|
|
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
|
|
|
return pool->alloc->cb->alloc(buf, size, timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8_t *data_ref(struct net_buf *buf, u8_t *data)
|
|
|
|
{
|
|
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
|
|
|
return pool->alloc->cb->ref(buf, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void data_unref(struct net_buf *buf, u8_t *data)
|
|
|
|
{
|
|
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
2018-02-08 12:55:43 +01:00
|
|
|
if (buf->flags & NET_BUF_EXTERNAL_DATA) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
pool->alloc->cb->unref(buf, data);
|
2017-04-18 13:43:04 +02:00
|
|
|
}
|
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2018-02-06 18:16:49 +01:00
|
|
|
struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
|
|
|
|
s32_t timeout, const char *func,
|
|
|
|
int line)
|
2016-11-07 12:33:01 +01:00
|
|
|
#else
|
2018-02-06 18:16:49 +01:00
|
|
|
struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
|
|
|
|
s32_t timeout)
|
2016-11-07 12:33:01 +01:00
|
|
|
#endif
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2018-02-06 18:16:49 +01:00
|
|
|
u32_t alloc_start = k_uptime_get_32();
|
2016-10-18 22:24:51 +02:00
|
|
|
struct net_buf *buf;
|
2016-12-14 07:33:50 +01:00
|
|
|
unsigned int key;
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-10-18 22:24:51 +02:00
|
|
|
NET_BUF_ASSERT(pool);
|
2016-11-23 16:59:19 +01:00
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
NET_BUF_DBG("%s():%d: pool %p size %zu timeout %d", func, line, pool,
|
|
|
|
size, timeout);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-12-14 07:33:50 +01:00
|
|
|
/* We need to lock interrupts temporarily to prevent race conditions
|
|
|
|
* when accessing pool->uninit_count.
|
|
|
|
*/
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
/* If there are uninitialized buffers we're guaranteed to succeed
|
|
|
|
* with the allocation one way or another.
|
|
|
|
*/
|
|
|
|
if (pool->uninit_count) {
|
2017-04-21 16:27:50 +02:00
|
|
|
u16_t uninit_count;
|
2016-12-14 07:33:50 +01:00
|
|
|
|
|
|
|
/* If this is not the first access to the pool, we can
|
|
|
|
* be opportunistic and try to fetch a previously used
|
2016-12-14 07:24:19 +01:00
|
|
|
* buffer from the LIFO with K_NO_WAIT.
|
2016-12-14 07:33:50 +01:00
|
|
|
*/
|
|
|
|
if (pool->uninit_count < pool->buf_count) {
|
2016-12-14 07:24:19 +01:00
|
|
|
buf = k_lifo_get(&pool->free, K_NO_WAIT);
|
2016-12-14 07:33:50 +01:00
|
|
|
if (buf) {
|
|
|
|
irq_unlock(key);
|
|
|
|
goto success;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uninit_count = pool->uninit_count--;
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
buf = pool_get_uninit(pool, uninit_count);
|
|
|
|
goto success;
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_unlock(key);
|
|
|
|
|
2018-10-19 14:58:10 +02:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG) && (CONFIG_NET_BUF_LOG_LEVEL >= LOG_LEVEL_WRN)
|
2016-10-18 22:24:51 +02:00
|
|
|
if (timeout == K_FOREVER) {
|
2017-04-21 16:27:50 +02:00
|
|
|
u32_t ref = k_uptime_get_32();
|
2016-12-14 07:24:19 +01:00
|
|
|
buf = k_lifo_get(&pool->free, K_NO_WAIT);
|
2017-04-04 14:52:34 +02:00
|
|
|
while (!buf) {
|
2017-04-10 15:34:45 +02:00
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
|
|
|
NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
|
|
|
|
func, line, pool->name);
|
|
|
|
#else
|
2016-10-18 22:24:51 +02:00
|
|
|
NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
|
|
|
|
func, line, pool);
|
2017-04-10 15:34:45 +02:00
|
|
|
#endif
|
2017-04-04 14:52:34 +02:00
|
|
|
buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
|
2017-04-10 15:34:45 +02:00
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
|
|
|
NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
|
|
|
|
func, line, pool->name,
|
2017-04-04 14:52:34 +02:00
|
|
|
(k_uptime_get_32() - ref) / MSEC_PER_SEC);
|
2017-04-10 15:34:45 +02:00
|
|
|
#else
|
|
|
|
NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
|
|
|
|
func, line, pool,
|
|
|
|
(k_uptime_get_32() - ref) / MSEC_PER_SEC);
|
|
|
|
#endif
|
2016-10-18 22:24:51 +02:00
|
|
|
}
|
|
|
|
} else {
|
2016-12-14 07:24:19 +01:00
|
|
|
buf = k_lifo_get(&pool->free, timeout);
|
2016-10-18 22:24:51 +02:00
|
|
|
}
|
|
|
|
#else
|
2016-12-14 07:24:19 +01:00
|
|
|
buf = k_lifo_get(&pool->free, timeout);
|
2016-10-18 22:24:51 +02:00
|
|
|
#endif
|
2015-10-07 17:32:54 +02:00
|
|
|
if (!buf) {
|
2016-11-07 12:33:01 +01:00
|
|
|
NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
|
2016-05-04 19:27:48 +02:00
|
|
|
return NULL;
|
2015-10-07 17:32:54 +02:00
|
|
|
}
|
|
|
|
|
2016-12-14 07:33:50 +01:00
|
|
|
success:
|
2016-12-21 11:30:27 +01:00
|
|
|
NET_BUF_DBG("allocated buf %p", buf);
|
2016-10-18 22:24:51 +02:00
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
if (size) {
|
|
|
|
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
|
|
|
u32_t diff = k_uptime_get_32() - alloc_start;
|
|
|
|
|
2019-02-11 18:14:19 +01:00
|
|
|
timeout -= MIN(timeout, diff);
|
2018-02-06 18:16:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
buf->__buf = data_alloc(buf, &size, timeout);
|
|
|
|
if (!buf->__buf) {
|
|
|
|
NET_BUF_ERR("%s():%d: Failed to allocate data",
|
|
|
|
func, line);
|
|
|
|
net_buf_destroy(buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
buf->__buf = NULL;
|
|
|
|
}
|
|
|
|
|
2019-03-27 02:57:45 +01:00
|
|
|
buf->ref = 1U;
|
|
|
|
buf->flags = 0U;
|
2016-10-18 22:24:51 +02:00
|
|
|
buf->frags = NULL;
|
2018-02-06 18:16:49 +01:00
|
|
|
buf->size = size;
|
2017-04-18 13:43:04 +02:00
|
|
|
net_buf_reset(buf);
|
2016-10-18 22:24:51 +02:00
|
|
|
|
2017-02-23 13:50:32 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
2017-06-03 18:20:27 +02:00
|
|
|
pool->avail_count--;
|
|
|
|
NET_BUF_ASSERT(pool->avail_count >= 0);
|
2017-02-23 13:50:32 +01:00
|
|
|
#endif
|
|
|
|
|
2016-10-18 22:24:51 +02:00
|
|
|
return buf;
|
|
|
|
}
|
2016-06-05 15:55:47 +02:00
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
|
|
|
|
s32_t timeout, const char *func,
|
|
|
|
int line)
|
|
|
|
{
|
|
|
|
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
|
|
|
|
|
|
|
|
return net_buf_alloc_len_debug(pool, fixed->data_size, timeout, func,
|
|
|
|
line);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool, s32_t timeout)
|
|
|
|
{
|
|
|
|
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
|
|
|
|
|
|
|
|
return net_buf_alloc_len(pool, fixed->data_size, timeout);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-02-08 12:55:43 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
|
|
|
|
void *data, size_t size,
|
|
|
|
s32_t timeout, const char *func,
|
|
|
|
int line)
|
|
|
|
#else
|
|
|
|
struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
|
|
|
|
void *data, size_t size,
|
|
|
|
s32_t timeout)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
struct net_buf *buf;
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line);
|
|
|
|
#else
|
|
|
|
buf = net_buf_alloc_len(pool, 0, timeout);
|
|
|
|
#endif
|
|
|
|
if (!buf) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-11-10 16:07:20 +01:00
|
|
|
net_buf_simple_init_with_data(&buf->b, data, size);
|
2018-02-08 12:55:43 +01:00
|
|
|
buf->flags = NET_BUF_EXTERNAL_DATA;
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2017-04-21 16:27:50 +02:00
|
|
|
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, s32_t timeout,
|
2016-10-18 22:24:51 +02:00
|
|
|
const char *func, int line)
|
|
|
|
#else
|
2017-04-21 16:27:50 +02:00
|
|
|
struct net_buf *net_buf_get(struct k_fifo *fifo, s32_t timeout)
|
2016-10-18 22:24:51 +02:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
struct net_buf *buf, *frag;
|
|
|
|
|
2016-12-16 13:14:48 +01:00
|
|
|
NET_BUF_DBG("%s():%d: fifo %p timeout %d", func, line, fifo, timeout);
|
2016-10-18 22:24:51 +02:00
|
|
|
|
|
|
|
buf = k_fifo_get(fifo, timeout);
|
|
|
|
if (!buf) {
|
|
|
|
return NULL;
|
2016-06-05 15:55:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-16 13:14:48 +01:00
|
|
|
NET_BUF_DBG("%s():%d: buf %p fifo %p", func, line, buf, fifo);
|
2016-10-18 22:24:51 +02:00
|
|
|
|
2016-06-05 15:55:47 +02:00
|
|
|
/* Get any fragments belonging to this buffer */
|
|
|
|
for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
|
2016-11-10 10:45:14 +01:00
|
|
|
frag->frags = k_fifo_get(fifo, K_NO_WAIT);
|
2016-06-05 15:55:47 +02:00
|
|
|
NET_BUF_ASSERT(frag->frags);
|
|
|
|
|
|
|
|
/* The fragments flag is only for FIFO-internal usage */
|
|
|
|
frag->flags &= ~NET_BUF_FRAGS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark the end of the fragment list */
|
|
|
|
frag->frags = NULL;
|
|
|
|
|
2015-10-07 17:32:54 +02:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2019-11-10 16:07:20 +01:00
|
|
|
void net_buf_simple_init_with_data(struct net_buf_simple *buf,
|
|
|
|
void *data, size_t size)
|
|
|
|
{
|
|
|
|
buf->__buf = data;
|
|
|
|
buf->data = data;
|
|
|
|
buf->size = size;
|
|
|
|
buf->len = size;
|
|
|
|
}
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
void net_buf_simple_reserve(struct net_buf_simple *buf, size_t reserve)
|
2016-07-08 09:47:35 +02:00
|
|
|
{
|
2016-11-23 16:59:19 +01:00
|
|
|
NET_BUF_ASSERT(buf);
|
2019-03-27 02:57:45 +01:00
|
|
|
NET_BUF_ASSERT(buf->len == 0U);
|
2016-12-08 10:36:27 +01:00
|
|
|
NET_BUF_DBG("buf %p reserve %zu", buf, reserve);
|
2016-07-08 09:47:35 +02:00
|
|
|
|
|
|
|
buf->data = buf->__buf + reserve;
|
|
|
|
}
|
|
|
|
|
2017-11-04 08:54:31 +01:00
|
|
|
void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
|
|
|
|
{
|
|
|
|
struct net_buf *tail;
|
|
|
|
unsigned int key;
|
|
|
|
|
|
|
|
NET_BUF_ASSERT(list);
|
|
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
|
|
|
|
for (tail = buf; tail->frags; tail = tail->frags) {
|
|
|
|
tail->flags |= NET_BUF_FRAGS;
|
|
|
|
}
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
sys_slist_append_list(list, &buf->node, &tail->node);
|
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct net_buf *net_buf_slist_get(sys_slist_t *list)
|
|
|
|
{
|
|
|
|
struct net_buf *buf, *frag;
|
|
|
|
unsigned int key;
|
|
|
|
|
|
|
|
NET_BUF_ASSERT(list);
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
buf = (void *)sys_slist_get(list);
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
if (!buf) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get any fragments belonging to this buffer */
|
|
|
|
for (frag = buf; (frag->flags & NET_BUF_FRAGS); frag = frag->frags) {
|
|
|
|
key = irq_lock();
|
|
|
|
frag->frags = (void *)sys_slist_get(list);
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
NET_BUF_ASSERT(frag->frags);
|
|
|
|
|
|
|
|
/* The fragments flag is only for list-internal usage */
|
|
|
|
frag->flags &= ~NET_BUF_FRAGS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark the end of the fragment list */
|
|
|
|
frag->frags = NULL;
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2016-11-10 10:45:14 +01:00
|
|
|
void net_buf_put(struct k_fifo *fifo, struct net_buf *buf)
|
2016-06-05 15:55:47 +02:00
|
|
|
{
|
2016-06-11 07:43:09 +02:00
|
|
|
struct net_buf *tail;
|
2016-06-05 15:55:47 +02:00
|
|
|
|
2016-11-23 16:59:19 +01:00
|
|
|
NET_BUF_ASSERT(fifo);
|
|
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
|
2016-06-11 07:43:09 +02:00
|
|
|
for (tail = buf; tail->frags; tail = tail->frags) {
|
|
|
|
tail->flags |= NET_BUF_FRAGS;
|
2016-06-05 15:55:47 +02:00
|
|
|
}
|
|
|
|
|
2016-11-10 10:45:14 +01:00
|
|
|
k_fifo_put_list(fifo, buf, tail);
|
2016-06-05 15:55:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2016-11-07 12:33:01 +01:00
|
|
|
void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
|
|
|
|
#else
|
2015-10-07 17:32:54 +02:00
|
|
|
void net_buf_unref(struct net_buf *buf)
|
2016-11-07 12:33:01 +01:00
|
|
|
#endif
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2016-11-23 16:59:19 +01:00
|
|
|
NET_BUF_ASSERT(buf);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-11-25 15:02:47 +01:00
|
|
|
while (buf) {
|
2016-06-05 18:17:03 +02:00
|
|
|
struct net_buf *frags = buf->frags;
|
2017-06-03 18:20:27 +02:00
|
|
|
struct net_buf_pool *pool;
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2016-11-07 12:33:01 +01:00
|
|
|
if (!buf->ref) {
|
|
|
|
NET_BUF_ERR("%s():%d: buf %p double free", func, line,
|
|
|
|
buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
2017-06-03 18:20:27 +02:00
|
|
|
NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
|
|
|
|
buf->pool_id, buf->frags);
|
2016-11-25 15:02:47 +01:00
|
|
|
|
|
|
|
if (--buf->ref > 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
if (buf->__buf) {
|
|
|
|
data_unref(buf, buf->__buf);
|
|
|
|
buf->__buf = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf->data = NULL;
|
2016-06-05 15:55:47 +02:00
|
|
|
buf->frags = NULL;
|
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
2017-02-23 13:50:32 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
2017-06-03 18:20:27 +02:00
|
|
|
pool->avail_count++;
|
|
|
|
NET_BUF_ASSERT(pool->avail_count <= pool->buf_count);
|
2017-02-23 13:50:32 +01:00
|
|
|
#endif
|
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
if (pool->destroy) {
|
|
|
|
pool->destroy(buf);
|
2016-06-05 18:17:03 +02:00
|
|
|
} else {
|
2016-12-14 07:16:34 +01:00
|
|
|
net_buf_destroy(buf);
|
2016-06-05 18:17:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
buf = frags;
|
2015-10-07 17:32:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct net_buf *net_buf_ref(struct net_buf *buf)
|
|
|
|
{
|
2016-11-23 16:59:19 +01:00
|
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
|
|
|
|
buf, buf->ref, buf->pool_id);
|
2015-10-07 17:32:54 +02:00
|
|
|
buf->ref++;
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
struct net_buf *net_buf_clone(struct net_buf *buf, s32_t timeout)
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2018-02-06 18:16:49 +01:00
|
|
|
u32_t alloc_start = k_uptime_get_32();
|
2017-06-03 18:20:27 +02:00
|
|
|
struct net_buf_pool *pool;
|
2015-10-07 17:32:54 +02:00
|
|
|
struct net_buf *clone;
|
|
|
|
|
2016-11-23 16:59:19 +01:00
|
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
clone = net_buf_alloc_len(pool, 0, timeout);
|
2015-10-07 17:32:54 +02:00
|
|
|
if (!clone) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
/* If the pool supports data referencing use that. Otherwise
|
|
|
|
* we need to allocate new data and make a copy.
|
|
|
|
*/
|
2018-02-08 12:55:43 +01:00
|
|
|
if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) {
|
2018-02-06 18:16:49 +01:00
|
|
|
clone->__buf = data_ref(buf, buf->__buf);
|
|
|
|
clone->data = buf->data;
|
|
|
|
clone->len = buf->len;
|
|
|
|
clone->size = buf->size;
|
|
|
|
} else {
|
|
|
|
size_t size = buf->size;
|
2016-10-18 22:24:51 +02:00
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
if (timeout != K_NO_WAIT && timeout != K_FOREVER) {
|
|
|
|
u32_t diff = k_uptime_get_32() - alloc_start;
|
|
|
|
|
2019-02-11 18:14:19 +01:00
|
|
|
timeout -= MIN(timeout, diff);
|
2018-02-06 18:16:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
clone->__buf = data_alloc(clone, &size, timeout);
|
|
|
|
if (!clone->__buf || size < buf->size) {
|
|
|
|
net_buf_destroy(clone);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
clone->size = size;
|
|
|
|
clone->data = clone->__buf + net_buf_headroom(buf);
|
|
|
|
net_buf_add_mem(clone, buf->data, buf->len);
|
|
|
|
}
|
2015-10-07 17:32:54 +02:00
|
|
|
|
|
|
|
return clone;
|
|
|
|
}
|
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
struct net_buf *net_buf_frag_last(struct net_buf *buf)
|
|
|
|
{
|
2016-11-23 16:59:19 +01:00
|
|
|
NET_BUF_ASSERT(buf);
|
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
while (buf->frags) {
|
|
|
|
buf = buf->frags;
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
|
|
|
|
{
|
2016-11-23 16:59:19 +01:00
|
|
|
NET_BUF_ASSERT(parent);
|
|
|
|
NET_BUF_ASSERT(frag);
|
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
if (parent->frags) {
|
|
|
|
net_buf_frag_last(frag)->frags = parent->frags;
|
|
|
|
}
|
2016-10-21 15:11:36 +02:00
|
|
|
/* Take ownership of the fragment reference */
|
|
|
|
parent->frags = frag;
|
2016-07-12 13:10:32 +02:00
|
|
|
}
|
|
|
|
|
2016-09-29 14:25:30 +02:00
|
|
|
struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
|
|
|
|
{
|
2016-11-23 16:59:19 +01:00
|
|
|
NET_BUF_ASSERT(frag);
|
|
|
|
|
2016-09-29 14:25:30 +02:00
|
|
|
if (!head) {
|
|
|
|
return net_buf_ref(frag);
|
|
|
|
}
|
|
|
|
|
|
|
|
net_buf_frag_insert(net_buf_frag_last(head), frag);
|
|
|
|
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
2017-02-21 13:00:18 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
|
|
|
|
struct net_buf *frag,
|
|
|
|
const char *func, int line)
|
|
|
|
#else
|
2016-09-29 12:27:37 +02:00
|
|
|
struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
|
2017-02-21 13:00:18 +01:00
|
|
|
#endif
|
2016-07-12 13:10:32 +02:00
|
|
|
{
|
2016-09-29 12:27:37 +02:00
|
|
|
struct net_buf *next_frag;
|
|
|
|
|
2016-11-23 16:59:19 +01:00
|
|
|
NET_BUF_ASSERT(frag);
|
|
|
|
|
2016-09-29 12:27:37 +02:00
|
|
|
if (parent) {
|
|
|
|
NET_BUF_ASSERT(parent->frags);
|
|
|
|
NET_BUF_ASSERT(parent->frags == frag);
|
|
|
|
parent->frags = frag->frags;
|
|
|
|
}
|
|
|
|
|
|
|
|
next_frag = frag->frags;
|
2016-07-12 13:10:32 +02:00
|
|
|
|
|
|
|
frag->frags = NULL;
|
2017-02-21 13:00:18 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
net_buf_unref_debug(frag, func, line);
|
|
|
|
#else
|
2016-07-12 13:10:32 +02:00
|
|
|
net_buf_unref(frag);
|
2017-02-21 13:00:18 +01:00
|
|
|
#endif
|
2016-09-29 12:27:37 +02:00
|
|
|
|
|
|
|
return next_frag;
|
2016-07-12 13:10:32 +02:00
|
|
|
}
|
|
|
|
|
net: buf: linearize: Never return -ENOMEM, just do what user asked to
Don't try to find "errors" in the values of dst_len and len params
passed to net_buf_linearize(). Instead, do what entails with the
common sense from the values passed in, specifically:
1. Never read more than dst_len (or it would lead to buffer
overflow).
2. It's absolutely ok to read than specified by "len" param, that's
why this function returns number of bytes read in the first place.
The motivation for this change is that it's not useful with its
current behavior. For example, a number of Ethernet drivers linearize
a packet to send, but each does it with its own duplicated adhoc
routine, because net_buf_linearize() would just return error for the
natural use of:
net_buf_linearize(buf, sizeof(buf), pkt->frags, 0, sizeof(buf));
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2018-12-27 11:35:15 +01:00
|
|
|
size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
|
|
|
|
size_t offset, size_t len)
|
2018-08-02 20:16:58 +02:00
|
|
|
{
|
|
|
|
struct net_buf *frag;
|
2018-09-11 11:55:21 +02:00
|
|
|
size_t to_copy;
|
|
|
|
size_t copied;
|
2018-08-02 20:16:58 +02:00
|
|
|
|
2019-02-11 18:14:19 +01:00
|
|
|
len = MIN(len, dst_len);
|
2018-08-02 20:16:58 +02:00
|
|
|
|
|
|
|
frag = src;
|
|
|
|
|
|
|
|
/* find the right fragment to start copying from */
|
|
|
|
while (frag && offset >= frag->len) {
|
|
|
|
offset -= frag->len;
|
|
|
|
frag = frag->frags;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* traverse the fragment chain until len bytes are copied */
|
|
|
|
copied = 0;
|
|
|
|
while (frag && len > 0) {
|
2019-02-11 18:14:19 +01:00
|
|
|
to_copy = MIN(len, frag->len - offset);
|
2018-09-14 14:24:09 +02:00
|
|
|
memcpy((u8_t *)dst + copied, frag->data + offset, to_copy);
|
2018-08-02 20:16:58 +02:00
|
|
|
|
|
|
|
copied += to_copy;
|
|
|
|
|
|
|
|
/* to_copy is always <= len */
|
|
|
|
len -= to_copy;
|
|
|
|
frag = frag->frags;
|
|
|
|
|
|
|
|
/* after the first iteration, this value will be 0 */
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This helper routine will append multiple bytes, if there is no place for
|
|
|
|
* the data in current fragment then create new fragment and add it to
|
|
|
|
* the buffer. It assumes that the buffer has at least one fragment.
|
|
|
|
*/
|
2018-09-11 11:55:21 +02:00
|
|
|
size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
|
|
|
|
const void *value, s32_t timeout,
|
|
|
|
net_buf_allocator_cb allocate_cb, void *user_data)
|
2018-08-02 20:16:58 +02:00
|
|
|
{
|
|
|
|
struct net_buf *frag = net_buf_frag_last(buf);
|
2018-09-11 11:55:21 +02:00
|
|
|
size_t added_len = 0;
|
2018-09-14 14:24:09 +02:00
|
|
|
const u8_t *value8 = value;
|
2018-08-02 20:16:58 +02:00
|
|
|
|
|
|
|
do {
|
2019-02-11 18:14:19 +01:00
|
|
|
u16_t count = MIN(len, net_buf_tailroom(frag));
|
2018-08-02 20:16:58 +02:00
|
|
|
|
2018-09-14 14:24:09 +02:00
|
|
|
net_buf_add_mem(frag, value8, count);
|
2018-08-02 20:16:58 +02:00
|
|
|
len -= count;
|
|
|
|
added_len += count;
|
2018-09-14 14:24:09 +02:00
|
|
|
value8 += count;
|
2018-08-02 20:16:58 +02:00
|
|
|
|
|
|
|
if (len == 0) {
|
|
|
|
return added_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
frag = allocate_cb(timeout, user_data);
|
|
|
|
if (!frag) {
|
|
|
|
return added_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_buf_frag_add(buf, frag);
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
/* Unreachable */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_SIMPLE_LOG)
|
2016-10-24 12:31:20 +02:00
|
|
|
#define NET_BUF_SIMPLE_DBG(fmt, ...) NET_BUF_DBG(fmt, ##__VA_ARGS__)
|
|
|
|
#define NET_BUF_SIMPLE_ERR(fmt, ...) NET_BUF_ERR(fmt, ##__VA_ARGS__)
|
|
|
|
#define NET_BUF_SIMPLE_WARN(fmt, ...) NET_BUF_WARN(fmt, ##__VA_ARGS__)
|
|
|
|
#define NET_BUF_SIMPLE_INFO(fmt, ...) NET_BUF_INFO(fmt, ##__VA_ARGS__)
|
|
|
|
#define NET_BUF_SIMPLE_ASSERT(cond) NET_BUF_ASSERT(cond)
|
|
|
|
#else
|
|
|
|
#define NET_BUF_SIMPLE_DBG(fmt, ...)
|
|
|
|
#define NET_BUF_SIMPLE_ERR(fmt, ...)
|
|
|
|
#define NET_BUF_SIMPLE_WARN(fmt, ...)
|
|
|
|
#define NET_BUF_SIMPLE_INFO(fmt, ...)
|
|
|
|
#define NET_BUF_SIMPLE_ASSERT(cond)
|
2016-12-15 11:18:30 +01:00
|
|
|
#endif /* CONFIG_NET_BUF_SIMPLE_LOG */
|
2016-10-24 12:31:20 +02:00
|
|
|
|
2019-10-01 13:37:10 +02:00
|
|
|
void net_buf_simple_clone(const struct net_buf_simple *original,
|
|
|
|
struct net_buf_simple *clone)
|
|
|
|
{
|
|
|
|
memcpy(clone, original, sizeof(struct net_buf_simple));
|
|
|
|
}
|
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
void *net_buf_simple_add(struct net_buf_simple *buf, size_t len)
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2017-04-21 16:27:50 +02:00
|
|
|
u8_t *tail = net_buf_simple_tail(buf);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-12-08 10:36:27 +01:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-10-24 12:31:20 +02:00
|
|
|
NET_BUF_SIMPLE_ASSERT(net_buf_simple_tailroom(buf) >= len);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
|
|
|
buf->len += len;
|
|
|
|
return tail;
|
|
|
|
}
|
|
|
|
|
2016-12-22 21:42:47 +01:00
|
|
|
void *net_buf_simple_add_mem(struct net_buf_simple *buf, const void *mem,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
|
|
|
|
|
|
|
|
return memcpy(net_buf_simple_add(buf, len), mem, len);
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
u8_t *net_buf_simple_add_u8(struct net_buf_simple *buf, u8_t val)
|
2016-02-11 10:05:56 +01:00
|
|
|
{
|
2017-04-21 16:27:50 +02:00
|
|
|
u8_t *u8;
|
2016-02-11 10:05:56 +01:00
|
|
|
|
2016-10-25 12:36:44 +02:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val 0x%02x", buf, val);
|
2016-02-11 10:05:56 +01:00
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
u8 = net_buf_simple_add(buf, 1);
|
|
|
|
*u8 = val;
|
2016-02-11 10:05:56 +01:00
|
|
|
|
|
|
|
return u8;
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
void net_buf_simple_add_le16(struct net_buf_simple *buf, u16_t val)
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2016-10-25 12:36:44 +02:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2019-10-25 00:49:00 +02:00
|
|
|
sys_put_le16(val, net_buf_simple_add(buf, sizeof(val)));
|
2015-10-07 17:32:54 +02:00
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
void net_buf_simple_add_be16(struct net_buf_simple *buf, u16_t val)
|
2016-07-08 09:50:13 +02:00
|
|
|
{
|
2016-10-25 12:36:44 +02:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
2016-07-08 09:50:13 +02:00
|
|
|
|
2019-10-25 00:49:00 +02:00
|
|
|
sys_put_be16(val, net_buf_simple_add(buf, sizeof(val)));
|
2016-07-08 09:50:13 +02:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:54:48 +01:00
|
|
|
void net_buf_simple_add_le24(struct net_buf_simple *buf, u32_t val)
|
|
|
|
{
|
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
|
|
|
|
|
|
|
sys_put_le24(val, net_buf_simple_add(buf, 3));
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_buf_simple_add_be24(struct net_buf_simple *buf, u32_t val)
|
|
|
|
{
|
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
|
|
|
|
|
|
|
sys_put_be24(val, net_buf_simple_add(buf, 3));
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
void net_buf_simple_add_le32(struct net_buf_simple *buf, u32_t val)
|
2016-05-06 08:59:53 +02:00
|
|
|
{
|
2016-10-25 12:36:44 +02:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
2016-05-06 08:59:53 +02:00
|
|
|
|
2019-10-25 00:49:00 +02:00
|
|
|
sys_put_le32(val, net_buf_simple_add(buf, sizeof(val)));
|
2016-05-06 08:59:53 +02:00
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
void net_buf_simple_add_be32(struct net_buf_simple *buf, u32_t val)
|
2016-07-15 13:56:36 +02:00
|
|
|
{
|
2016-10-25 12:36:44 +02:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
2016-07-15 13:56:36 +02:00
|
|
|
|
2019-10-25 00:49:00 +02:00
|
|
|
sys_put_be32(val, net_buf_simple_add(buf, sizeof(val)));
|
2016-07-15 13:56:36 +02:00
|
|
|
}
|
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
void *net_buf_simple_push(struct net_buf_simple *buf, size_t len)
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2016-12-08 10:36:27 +01:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-10-24 12:31:20 +02:00
|
|
|
NET_BUF_SIMPLE_ASSERT(net_buf_simple_headroom(buf) >= len);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
|
|
|
buf->data -= len;
|
|
|
|
buf->len += len;
|
|
|
|
return buf->data;
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
void net_buf_simple_push_le16(struct net_buf_simple *buf, u16_t val)
|
2016-01-28 22:39:29 +01:00
|
|
|
{
|
2016-10-25 12:36:44 +02:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
2016-01-28 22:39:29 +01:00
|
|
|
|
2019-10-25 00:49:00 +02:00
|
|
|
sys_put_le16(val, net_buf_simple_push(buf, sizeof(val)));
|
2016-01-28 22:39:29 +01:00
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
void net_buf_simple_push_be16(struct net_buf_simple *buf, u16_t val)
|
2016-07-08 09:50:13 +02:00
|
|
|
{
|
2016-10-25 12:36:44 +02:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p val %u", buf, val);
|
2016-07-08 09:50:13 +02:00
|
|
|
|
2019-10-25 00:49:00 +02:00
|
|
|
sys_put_be16(val, net_buf_simple_push(buf, sizeof(val)));
|
2016-07-08 09:50:13 +02:00
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
void net_buf_simple_push_u8(struct net_buf_simple *buf, u8_t val)
|
2016-07-08 09:51:25 +02:00
|
|
|
{
|
2017-04-21 16:27:50 +02:00
|
|
|
u8_t *data = net_buf_simple_push(buf, 1);
|
2016-07-08 09:51:25 +02:00
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
*data = val;
|
2016-07-08 09:51:25 +02:00
|
|
|
}
|
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
void *net_buf_simple_pull(struct net_buf_simple *buf, size_t len)
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2016-12-08 10:36:27 +01:00
|
|
|
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-11-07 11:42:12 +01:00
|
|
|
NET_BUF_SIMPLE_ASSERT(buf->len >= len);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
|
|
|
buf->len -= len;
|
|
|
|
return buf->data += len;
|
|
|
|
}
|
|
|
|
|
2019-01-26 15:38:25 +01:00
|
|
|
void *net_buf_simple_pull_mem(struct net_buf_simple *buf, size_t len)
|
|
|
|
{
|
|
|
|
void *data = buf->data;
|
|
|
|
|
|
|
|
NET_BUF_SIMPLE_DBG("buf %p len %zu", buf, len);
|
|
|
|
|
|
|
|
NET_BUF_SIMPLE_ASSERT(buf->len >= len);
|
|
|
|
|
|
|
|
buf->len -= len;
|
|
|
|
buf->data += len;
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
u8_t net_buf_simple_pull_u8(struct net_buf_simple *buf)
|
2016-02-12 13:23:44 +01:00
|
|
|
{
|
2017-04-21 16:27:50 +02:00
|
|
|
u8_t val;
|
2016-02-12 13:23:44 +01:00
|
|
|
|
|
|
|
val = buf->data[0];
|
2016-07-12 13:10:32 +02:00
|
|
|
net_buf_simple_pull(buf, 1);
|
2016-02-12 13:23:44 +01:00
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
u16_t net_buf_simple_pull_le16(struct net_buf_simple *buf)
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2017-04-21 16:27:50 +02:00
|
|
|
u16_t val;
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
val = UNALIGNED_GET((u16_t *)buf->data);
|
2016-07-12 13:10:32 +02:00
|
|
|
net_buf_simple_pull(buf, sizeof(val));
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
return sys_le16_to_cpu(val);
|
2015-10-07 17:32:54 +02:00
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
u16_t net_buf_simple_pull_be16(struct net_buf_simple *buf)
|
2016-07-12 21:19:00 +02:00
|
|
|
{
|
2017-04-21 16:27:50 +02:00
|
|
|
u16_t val;
|
2016-07-12 21:19:00 +02:00
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
val = UNALIGNED_GET((u16_t *)buf->data);
|
2016-07-12 21:19:00 +02:00
|
|
|
net_buf_simple_pull(buf, sizeof(val));
|
|
|
|
|
|
|
|
return sys_be16_to_cpu(val);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:54:48 +01:00
|
|
|
u32_t net_buf_simple_pull_le24(struct net_buf_simple *buf)
|
|
|
|
{
|
|
|
|
struct uint24 {
|
|
|
|
u32_t u24:24;
|
|
|
|
} __packed val;
|
|
|
|
|
|
|
|
val = UNALIGNED_GET((struct uint24 *)buf->data);
|
|
|
|
net_buf_simple_pull(buf, sizeof(val));
|
|
|
|
|
|
|
|
return sys_le24_to_cpu(val.u24);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32_t net_buf_simple_pull_be24(struct net_buf_simple *buf)
|
|
|
|
{
|
|
|
|
struct uint24 {
|
|
|
|
u32_t u24:24;
|
|
|
|
} __packed val;
|
|
|
|
|
|
|
|
val = UNALIGNED_GET((struct uint24 *)buf->data);
|
|
|
|
net_buf_simple_pull(buf, sizeof(val));
|
|
|
|
|
|
|
|
return sys_be24_to_cpu(val.u24);
|
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
u32_t net_buf_simple_pull_le32(struct net_buf_simple *buf)
|
2016-04-29 16:24:37 +02:00
|
|
|
{
|
2017-04-21 16:27:50 +02:00
|
|
|
u32_t val;
|
2016-04-29 16:24:37 +02:00
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
val = UNALIGNED_GET((u32_t *)buf->data);
|
2016-07-12 13:10:32 +02:00
|
|
|
net_buf_simple_pull(buf, sizeof(val));
|
2016-04-29 16:24:37 +02:00
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
return sys_le32_to_cpu(val);
|
2016-04-29 16:24:37 +02:00
|
|
|
}
|
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
u32_t net_buf_simple_pull_be32(struct net_buf_simple *buf)
|
2016-07-12 21:19:00 +02:00
|
|
|
{
|
2017-04-21 16:27:50 +02:00
|
|
|
u32_t val;
|
2016-07-12 21:19:00 +02:00
|
|
|
|
2017-04-21 16:27:50 +02:00
|
|
|
val = UNALIGNED_GET((u32_t *)buf->data);
|
2016-07-12 21:19:00 +02:00
|
|
|
net_buf_simple_pull(buf, sizeof(val));
|
|
|
|
|
|
|
|
return sys_be32_to_cpu(val);
|
|
|
|
}
|
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
size_t net_buf_simple_headroom(struct net_buf_simple *buf)
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
|
|
|
return buf->data - buf->__buf;
|
|
|
|
}
|
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
size_t net_buf_simple_tailroom(struct net_buf_simple *buf)
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2016-07-12 13:10:32 +02:00
|
|
|
return buf->size - net_buf_simple_headroom(buf) - buf->len;
|
2016-04-25 09:27:05 +02:00
|
|
|
}
|