2015-10-07 17:32:54 +02:00
|
|
|
/* buf.c - Buffer management */
|
|
|
|
|
|
|
|
/*
|
2019-11-15 09:45:36 +01:00
|
|
|
* Copyright (c) 2015-2019 Intel Corporation
|
2015-10-07 17:32:54 +02:00
|
|
|
*
|
2017-01-19 02:01:01 +01:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-10-07 17:32:54 +02:00
|
|
|
*/
|
|
|
|
|
2018-10-22 15:48:52 +02:00
|
|
|
#define LOG_MODULE_NAME net_buf
|
|
|
|
#define LOG_LEVEL CONFIG_NET_BUF_LOG_LEVEL
|
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/logging/log.h>
|
2018-10-22 15:48:52 +02:00
|
|
|
LOG_MODULE_REGISTER(LOG_MODULE_NAME);
|
|
|
|
|
2015-10-07 17:32:54 +02:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <string.h>
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/sys/byteorder.h>
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/net/buf.h>
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2018-09-03 12:29:42 +02:00
|
|
|
#define NET_BUF_DBG(fmt, ...) LOG_DBG("(%p) " fmt, k_current_get(), \
|
|
|
|
##__VA_ARGS__)
|
|
|
|
#define NET_BUF_ERR(fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__)
|
|
|
|
#define NET_BUF_WARN(fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__)
|
|
|
|
#define NET_BUF_INFO(fmt, ...) LOG_INF(fmt, ##__VA_ARGS__)
|
2015-10-07 17:32:54 +02:00
|
|
|
#else
|
2016-10-25 12:36:44 +02:00
|
|
|
|
2015-10-07 17:32:54 +02:00
|
|
|
#define NET_BUF_DBG(fmt, ...)
|
|
|
|
#define NET_BUF_ERR(fmt, ...)
|
|
|
|
#define NET_BUF_WARN(fmt, ...)
|
|
|
|
#define NET_BUF_INFO(fmt, ...)
|
2016-12-15 11:18:30 +01:00
|
|
|
#endif /* CONFIG_NET_BUF_LOG */
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2019-11-15 09:45:36 +01:00
|
|
|
#define NET_BUF_ASSERT(cond, ...) __ASSERT(cond, "" __VA_ARGS__)
|
|
|
|
|
2017-04-05 12:39:51 +02:00
|
|
|
#if CONFIG_NET_BUF_WARN_ALLOC_INTERVAL > 0
|
|
|
|
#define WARN_ALLOC_INTERVAL K_SECONDS(CONFIG_NET_BUF_WARN_ALLOC_INTERVAL)
|
|
|
|
#else
|
|
|
|
#define WARN_ALLOC_INTERVAL K_FOREVER
|
|
|
|
#endif
|
2017-04-04 14:52:34 +02:00
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
/* Linker-defined symbol bound to the static pool structs */
|
2023-04-19 14:11:19 +02:00
|
|
|
STRUCT_SECTION_START_EXTERN(net_buf_pool);
|
2017-06-03 18:20:27 +02:00
|
|
|
|
|
|
|
struct net_buf_pool *net_buf_pool_get(int id)
|
|
|
|
{
|
2023-04-19 14:11:19 +02:00
|
|
|
struct net_buf_pool *pool;
|
|
|
|
|
|
|
|
STRUCT_SECTION_GET(net_buf_pool, id, &pool);
|
|
|
|
|
|
|
|
return pool;
|
2017-06-03 18:20:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pool_id(struct net_buf_pool *pool)
|
|
|
|
{
|
2023-04-19 14:11:19 +02:00
|
|
|
return pool - TYPE_SECTION_START(net_buf_pool);
|
2017-06-03 18:20:27 +02:00
|
|
|
}
|
|
|
|
|
2017-10-31 07:39:42 +01:00
|
|
|
int net_buf_id(struct net_buf *buf)
|
|
|
|
{
|
|
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
2021-11-21 00:58:44 +01:00
|
|
|
size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
|
|
|
|
__alignof__(struct net_buf));
|
|
|
|
ptrdiff_t offset = (uint8_t *)buf - (uint8_t *)pool->__bufs;
|
2017-10-31 07:39:42 +01:00
|
|
|
|
2021-11-21 00:58:44 +01:00
|
|
|
return offset / struct_size;
|
2017-10-31 07:39:42 +01:00
|
|
|
}
|
|
|
|
|
2016-12-14 07:33:50 +01:00
|
|
|
static inline struct net_buf *pool_get_uninit(struct net_buf_pool *pool,
|
2020-05-27 18:26:57 +02:00
|
|
|
uint16_t uninit_count)
|
2016-12-14 07:33:50 +01:00
|
|
|
{
|
2021-11-21 00:58:44 +01:00
|
|
|
size_t struct_size = ROUND_UP(sizeof(struct net_buf) + pool->user_data_size,
|
|
|
|
__alignof__(struct net_buf));
|
|
|
|
size_t byte_offset = (pool->buf_count - uninit_count) * struct_size;
|
2016-12-14 07:33:50 +01:00
|
|
|
struct net_buf *buf;
|
|
|
|
|
2021-11-21 00:58:44 +01:00
|
|
|
buf = (struct net_buf *)(((uint8_t *)pool->__bufs) + byte_offset);
|
2016-12-14 07:33:50 +01:00
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
buf->pool_id = pool_id(pool);
|
2021-11-21 00:00:44 +01:00
|
|
|
buf->user_data_size = pool->user_data_size;
|
2016-12-14 07:33:50 +01:00
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2017-04-18 13:43:04 +02:00
|
|
|
void net_buf_reset(struct net_buf *buf)
|
|
|
|
{
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(buf->flags == 0U);
|
|
|
|
__ASSERT_NO_MSG(buf->frags == NULL);
|
2017-04-18 13:43:04 +02:00
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
net_buf_simple_reset(&buf->b);
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint8_t *generic_data_ref(struct net_buf *buf, uint8_t *data)
|
2018-02-06 18:18:31 +01:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *ref_count;
|
2018-02-06 18:18:31 +01:00
|
|
|
|
2023-05-31 14:25:56 +02:00
|
|
|
ref_count = data - sizeof(void *);
|
2018-02-06 18:18:31 +01:00
|
|
|
(*ref_count)++;
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint8_t *mem_pool_data_alloc(struct net_buf *buf, size_t *size,
|
2020-04-03 11:31:26 +02:00
|
|
|
k_timeout_t timeout)
|
2018-02-06 18:18:31 +01:00
|
|
|
{
|
|
|
|
struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
|
2020-12-07 14:53:28 +01:00
|
|
|
struct k_heap *pool = buf_pool->alloc->alloc_data;
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *ref_count;
|
2018-02-06 18:18:31 +01:00
|
|
|
|
2020-12-07 14:53:28 +01:00
|
|
|
/* Reserve extra space for a ref-count (uint8_t) */
|
2022-05-16 14:47:56 +02:00
|
|
|
void *b = k_heap_alloc(pool, sizeof(void *) + *size, timeout);
|
2020-12-07 14:53:28 +01:00
|
|
|
|
|
|
|
if (b == NULL) {
|
2018-02-06 18:18:31 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-07 14:53:28 +01:00
|
|
|
ref_count = (uint8_t *)b;
|
2018-11-29 20:23:03 +01:00
|
|
|
*ref_count = 1U;
|
2018-02-06 18:18:31 +01:00
|
|
|
|
|
|
|
/* Return pointer to the byte following the ref count */
|
2022-05-16 14:47:56 +02:00
|
|
|
return ref_count + sizeof(void *);
|
2018-02-06 18:18:31 +01:00
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static void mem_pool_data_unref(struct net_buf *buf, uint8_t *data)
|
2018-02-06 18:18:31 +01:00
|
|
|
{
|
2020-12-07 14:53:28 +01:00
|
|
|
struct net_buf_pool *buf_pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
struct k_heap *pool = buf_pool->alloc->alloc_data;
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *ref_count;
|
2018-02-06 18:18:31 +01:00
|
|
|
|
2022-05-16 14:47:56 +02:00
|
|
|
ref_count = data - sizeof(void *);
|
2018-02-06 18:18:31 +01:00
|
|
|
if (--(*ref_count)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Need to copy to local variable due to alignment */
|
2020-12-07 14:53:28 +01:00
|
|
|
k_heap_free(pool, ref_count);
|
2018-02-06 18:18:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
const struct net_buf_data_cb net_buf_var_cb = {
|
|
|
|
.alloc = mem_pool_data_alloc,
|
|
|
|
.ref = generic_data_ref,
|
|
|
|
.unref = mem_pool_data_unref,
|
|
|
|
};
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint8_t *fixed_data_alloc(struct net_buf *buf, size_t *size,
|
2020-04-03 11:31:26 +02:00
|
|
|
k_timeout_t timeout)
|
2018-02-06 18:16:49 +01:00
|
|
|
{
|
|
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
const struct net_buf_pool_fixed *fixed = pool->alloc->alloc_data;
|
|
|
|
|
2024-02-01 14:08:05 +01:00
|
|
|
*size = pool->alloc->max_alloc_size;
|
2018-02-06 18:16:49 +01:00
|
|
|
|
2024-02-01 14:08:05 +01:00
|
|
|
return fixed->data_pool + *size * net_buf_id(buf);
|
2018-02-06 18:16:49 +01:00
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static void fixed_data_unref(struct net_buf *buf, uint8_t *data)
|
2018-02-06 18:16:49 +01:00
|
|
|
{
|
|
|
|
/* Nothing needed for fixed-size data pools */
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct net_buf_data_cb net_buf_fixed_cb = {
|
|
|
|
.alloc = fixed_data_alloc,
|
|
|
|
.unref = fixed_data_unref,
|
|
|
|
};
|
|
|
|
|
kernel: Introduce a way to specify minimum system heap size
There are several subsystems and boards which require a relatively large
system heap (used by k_malloc()) to function properly. This became even
more notable with the recent introduction of the ACPICA library, which
causes ACPI-using boards to require a system heap of up to several
megabytes in size.
Until now, subsystems and boards have tried to solve this by having
Kconfig overlays which modify the default value of HEAP_MEM_POOL_SIZE.
This works ok, except when applications start explicitly setting values
in their prj.conf files:
$ git grep CONFIG_HEAP_MEM_POOL_SIZE= tests samples|wc -l
157
The vast majority of values set by current sample or test applications
is much too small for subsystems like ACPI, which results in the
application not being able to run on such boards.
To solve this situation, we introduce support for subsystems to specify
their own custom system heap size requirement. Subsystems do
this by defining Kconfig options with the prefix HEAP_MEM_POOL_ADD_SIZE_.
The final value of the system heap is the sum of the custom
minimum requirements, or the value existing HEAP_MEM_POOL_SIZE option,
whichever is greater.
We also introduce a new HEAP_MEM_POOL_IGNORE_MIN Kconfig option which
applications can use to force a lower value than what subsystems have
specficied, however this behavior is disabled by default.
Whenever the minimum is greater than the requested value a CMake warning
will be issued in the build output.
This patch ends up modifying several places outside of kernel code,
since the presence of the system heap is no longer detected using a
non-zero CONFIG_HEAP_MEM_POOL_SIZE value, rather it's now detected using
a new K_HEAP_MEM_POOL_SIZE value that's evaluated at build.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2023-11-29 10:22:39 +01:00
|
|
|
#if (K_HEAP_MEM_POOL_SIZE > 0)
|
2018-02-06 18:18:31 +01:00
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint8_t *heap_data_alloc(struct net_buf *buf, size_t *size,
|
2020-04-03 11:31:26 +02:00
|
|
|
k_timeout_t timeout)
|
2018-02-06 18:18:31 +01:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *ref_count;
|
2018-02-06 18:18:31 +01:00
|
|
|
|
2022-05-16 14:47:56 +02:00
|
|
|
ref_count = k_malloc(sizeof(void *) + *size);
|
2018-02-06 18:18:31 +01:00
|
|
|
if (!ref_count) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-29 20:23:03 +01:00
|
|
|
*ref_count = 1U;
|
2018-02-06 18:18:31 +01:00
|
|
|
|
2022-05-16 14:47:56 +02:00
|
|
|
return ref_count + sizeof(void *);
|
2018-02-06 18:18:31 +01:00
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static void heap_data_unref(struct net_buf *buf, uint8_t *data)
|
2018-02-06 18:18:31 +01:00
|
|
|
{
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t *ref_count;
|
2018-02-06 18:18:31 +01:00
|
|
|
|
2022-05-16 14:47:56 +02:00
|
|
|
ref_count = data - sizeof(void *);
|
2018-02-06 18:18:31 +01:00
|
|
|
if (--(*ref_count)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
k_free(ref_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct net_buf_data_cb net_buf_heap_cb = {
|
|
|
|
.alloc = heap_data_alloc,
|
|
|
|
.ref = generic_data_ref,
|
|
|
|
.unref = heap_data_unref,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct net_buf_data_alloc net_buf_heap_alloc = {
|
|
|
|
.cb = &net_buf_heap_cb,
|
2024-02-01 14:08:05 +01:00
|
|
|
.max_alloc_size = 0,
|
2018-02-06 18:18:31 +01:00
|
|
|
};
|
|
|
|
|
kernel: Introduce a way to specify minimum system heap size
There are several subsystems and boards which require a relatively large
system heap (used by k_malloc()) to function properly. This became even
more notable with the recent introduction of the ACPICA library, which
causes ACPI-using boards to require a system heap of up to several
megabytes in size.
Until now, subsystems and boards have tried to solve this by having
Kconfig overlays which modify the default value of HEAP_MEM_POOL_SIZE.
This works ok, except when applications start explicitly setting values
in their prj.conf files:
$ git grep CONFIG_HEAP_MEM_POOL_SIZE= tests samples|wc -l
157
The vast majority of values set by current sample or test applications
is much too small for subsystems like ACPI, which results in the
application not being able to run on such boards.
To solve this situation, we introduce support for subsystems to specify
their own custom system heap size requirement. Subsystems do
this by defining Kconfig options with the prefix HEAP_MEM_POOL_ADD_SIZE_.
The final value of the system heap is the sum of the custom
minimum requirements, or the value existing HEAP_MEM_POOL_SIZE option,
whichever is greater.
We also introduce a new HEAP_MEM_POOL_IGNORE_MIN Kconfig option which
applications can use to force a lower value than what subsystems have
specficied, however this behavior is disabled by default.
Whenever the minimum is greater than the requested value a CMake warning
will be issued in the build output.
This patch ends up modifying several places outside of kernel code,
since the presence of the system heap is no longer detected using a
non-zero CONFIG_HEAP_MEM_POOL_SIZE value, rather it's now detected using
a new K_HEAP_MEM_POOL_SIZE value that's evaluated at build.
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
2023-11-29 10:22:39 +01:00
|
|
|
#endif /* K_HEAP_MEM_POOL_SIZE > 0 */
|
2018-02-06 18:18:31 +01:00
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint8_t *data_alloc(struct net_buf *buf, size_t *size, k_timeout_t timeout)
|
2018-02-06 18:16:49 +01:00
|
|
|
{
|
|
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
|
|
|
return pool->alloc->cb->alloc(buf, size, timeout);
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:26:57 +02:00
|
|
|
static uint8_t *data_ref(struct net_buf *buf, uint8_t *data)
|
2018-02-06 18:16:49 +01:00
|
|
|
{
|
|
|
|
struct net_buf_pool *pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
|
|
|
return pool->alloc->cb->ref(buf, data);
|
|
|
|
}
|
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2018-02-06 18:16:49 +01:00
|
|
|
struct net_buf *net_buf_alloc_len_debug(struct net_buf_pool *pool, size_t size,
|
2020-04-03 11:31:26 +02:00
|
|
|
k_timeout_t timeout, const char *func,
|
2018-02-06 18:16:49 +01:00
|
|
|
int line)
|
2016-11-07 12:33:01 +01:00
|
|
|
#else
|
2018-02-06 18:16:49 +01:00
|
|
|
struct net_buf *net_buf_alloc_len(struct net_buf_pool *pool, size_t size,
|
2020-04-03 11:31:26 +02:00
|
|
|
k_timeout_t timeout)
|
2016-11-07 12:33:01 +01:00
|
|
|
#endif
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2023-07-08 04:38:36 +02:00
|
|
|
k_timepoint_t end = sys_timepoint_calc(timeout);
|
2016-10-18 22:24:51 +02:00
|
|
|
struct net_buf *buf;
|
2021-09-28 03:43:02 +02:00
|
|
|
k_spinlock_key_t key;
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(pool);
|
2016-11-23 16:59:19 +01:00
|
|
|
|
2020-04-03 11:31:26 +02:00
|
|
|
NET_BUF_DBG("%s():%d: pool %p size %zu", func, line, pool, size);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2021-09-28 03:43:02 +02:00
|
|
|
/* We need to prevent race conditions
|
2016-12-14 07:33:50 +01:00
|
|
|
* when accessing pool->uninit_count.
|
|
|
|
*/
|
2021-09-28 03:43:02 +02:00
|
|
|
key = k_spin_lock(&pool->lock);
|
2016-12-14 07:33:50 +01:00
|
|
|
|
|
|
|
/* If there are uninitialized buffers we're guaranteed to succeed
|
|
|
|
* with the allocation one way or another.
|
|
|
|
*/
|
|
|
|
if (pool->uninit_count) {
|
2020-05-27 18:26:57 +02:00
|
|
|
uint16_t uninit_count;
|
2016-12-14 07:33:50 +01:00
|
|
|
|
|
|
|
/* If this is not the first access to the pool, we can
|
|
|
|
* be opportunistic and try to fetch a previously used
|
2016-12-14 07:24:19 +01:00
|
|
|
* buffer from the LIFO with K_NO_WAIT.
|
2016-12-14 07:33:50 +01:00
|
|
|
*/
|
|
|
|
if (pool->uninit_count < pool->buf_count) {
|
2016-12-14 07:24:19 +01:00
|
|
|
buf = k_lifo_get(&pool->free, K_NO_WAIT);
|
2016-12-14 07:33:50 +01:00
|
|
|
if (buf) {
|
2021-09-28 03:43:02 +02:00
|
|
|
k_spin_unlock(&pool->lock, key);
|
2016-12-14 07:33:50 +01:00
|
|
|
goto success;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uninit_count = pool->uninit_count--;
|
2021-09-28 03:43:02 +02:00
|
|
|
k_spin_unlock(&pool->lock, key);
|
2016-12-14 07:33:50 +01:00
|
|
|
|
|
|
|
buf = pool_get_uninit(pool, uninit_count);
|
|
|
|
goto success;
|
|
|
|
}
|
|
|
|
|
2021-09-28 03:43:02 +02:00
|
|
|
k_spin_unlock(&pool->lock, key);
|
2016-12-14 07:33:50 +01:00
|
|
|
|
2018-10-19 14:58:10 +02:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG) && (CONFIG_NET_BUF_LOG_LEVEL >= LOG_LEVEL_WRN)
|
2020-04-03 11:31:26 +02:00
|
|
|
if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
2020-05-27 18:26:57 +02:00
|
|
|
uint32_t ref = k_uptime_get_32();
|
2016-12-14 07:24:19 +01:00
|
|
|
buf = k_lifo_get(&pool->free, K_NO_WAIT);
|
2017-04-04 14:52:34 +02:00
|
|
|
while (!buf) {
|
2017-04-10 15:34:45 +02:00
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
|
|
|
NET_BUF_WARN("%s():%d: Pool %s low on buffers.",
|
|
|
|
func, line, pool->name);
|
|
|
|
#else
|
2016-10-18 22:24:51 +02:00
|
|
|
NET_BUF_WARN("%s():%d: Pool %p low on buffers.",
|
|
|
|
func, line, pool);
|
2017-04-10 15:34:45 +02:00
|
|
|
#endif
|
2017-04-04 14:52:34 +02:00
|
|
|
buf = k_lifo_get(&pool->free, WARN_ALLOC_INTERVAL);
|
2017-04-10 15:34:45 +02:00
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
|
|
|
NET_BUF_WARN("%s():%d: Pool %s blocked for %u secs",
|
|
|
|
func, line, pool->name,
|
2017-04-04 14:52:34 +02:00
|
|
|
(k_uptime_get_32() - ref) / MSEC_PER_SEC);
|
2017-04-10 15:34:45 +02:00
|
|
|
#else
|
|
|
|
NET_BUF_WARN("%s():%d: Pool %p blocked for %u secs",
|
|
|
|
func, line, pool,
|
|
|
|
(k_uptime_get_32() - ref) / MSEC_PER_SEC);
|
|
|
|
#endif
|
2016-10-18 22:24:51 +02:00
|
|
|
}
|
|
|
|
} else {
|
2016-12-14 07:24:19 +01:00
|
|
|
buf = k_lifo_get(&pool->free, timeout);
|
2016-10-18 22:24:51 +02:00
|
|
|
}
|
|
|
|
#else
|
2016-12-14 07:24:19 +01:00
|
|
|
buf = k_lifo_get(&pool->free, timeout);
|
2016-10-18 22:24:51 +02:00
|
|
|
#endif
|
2015-10-07 17:32:54 +02:00
|
|
|
if (!buf) {
|
2016-11-07 12:33:01 +01:00
|
|
|
NET_BUF_ERR("%s():%d: Failed to get free buffer", func, line);
|
2016-05-04 19:27:48 +02:00
|
|
|
return NULL;
|
2015-10-07 17:32:54 +02:00
|
|
|
}
|
|
|
|
|
2016-12-14 07:33:50 +01:00
|
|
|
success:
|
2016-12-21 11:30:27 +01:00
|
|
|
NET_BUF_DBG("allocated buf %p", buf);
|
2016-10-18 22:24:51 +02:00
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
if (size) {
|
2019-11-22 10:42:48 +01:00
|
|
|
#if __ASSERT_ON
|
|
|
|
size_t req_size = size;
|
|
|
|
#endif
|
2023-07-08 04:38:36 +02:00
|
|
|
timeout = sys_timepoint_timeout(end);
|
2018-02-06 18:16:49 +01:00
|
|
|
buf->__buf = data_alloc(buf, &size, timeout);
|
|
|
|
if (!buf->__buf) {
|
|
|
|
NET_BUF_ERR("%s():%d: Failed to allocate data",
|
|
|
|
func, line);
|
|
|
|
net_buf_destroy(buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-11-22 10:42:48 +01:00
|
|
|
|
2021-07-22 09:28:51 +02:00
|
|
|
#if __ASSERT_ON
|
2019-11-22 10:42:48 +01:00
|
|
|
NET_BUF_ASSERT(req_size <= size);
|
2021-07-22 09:28:51 +02:00
|
|
|
#endif
|
2018-02-06 18:16:49 +01:00
|
|
|
} else {
|
|
|
|
buf->__buf = NULL;
|
|
|
|
}
|
|
|
|
|
2019-03-27 02:57:45 +01:00
|
|
|
buf->ref = 1U;
|
|
|
|
buf->flags = 0U;
|
2016-10-18 22:24:51 +02:00
|
|
|
buf->frags = NULL;
|
2018-02-06 18:16:49 +01:00
|
|
|
buf->size = size;
|
2017-04-18 13:43:04 +02:00
|
|
|
net_buf_reset(buf);
|
2016-10-18 22:24:51 +02:00
|
|
|
|
2017-02-23 13:50:32 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
2020-10-27 21:56:01 +01:00
|
|
|
atomic_dec(&pool->avail_count);
|
|
|
|
__ASSERT_NO_MSG(atomic_get(&pool->avail_count) >= 0);
|
2017-02-23 13:50:32 +01:00
|
|
|
#endif
|
2016-10-18 22:24:51 +02:00
|
|
|
return buf;
|
|
|
|
}
|
2016-06-05 15:55:47 +02:00
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
struct net_buf *net_buf_alloc_fixed_debug(struct net_buf_pool *pool,
|
2020-04-03 11:31:26 +02:00
|
|
|
k_timeout_t timeout, const char *func,
|
2018-02-06 18:16:49 +01:00
|
|
|
int line)
|
|
|
|
{
|
2024-02-01 14:08:05 +01:00
|
|
|
return net_buf_alloc_len_debug(pool, pool->alloc->max_alloc_size, timeout, func,
|
2018-02-06 18:16:49 +01:00
|
|
|
line);
|
|
|
|
}
|
|
|
|
#else
|
2020-04-03 11:31:26 +02:00
|
|
|
struct net_buf *net_buf_alloc_fixed(struct net_buf_pool *pool,
|
|
|
|
k_timeout_t timeout)
|
2018-02-06 18:16:49 +01:00
|
|
|
{
|
2024-02-01 14:08:05 +01:00
|
|
|
return net_buf_alloc_len(pool, pool->alloc->max_alloc_size, timeout);
|
2018-02-06 18:16:49 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-02-08 12:55:43 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
struct net_buf *net_buf_alloc_with_data_debug(struct net_buf_pool *pool,
|
|
|
|
void *data, size_t size,
|
2020-04-03 11:31:26 +02:00
|
|
|
k_timeout_t timeout,
|
|
|
|
const char *func, int line)
|
2018-02-08 12:55:43 +01:00
|
|
|
#else
|
|
|
|
struct net_buf *net_buf_alloc_with_data(struct net_buf_pool *pool,
|
|
|
|
void *data, size_t size,
|
2020-04-03 11:31:26 +02:00
|
|
|
k_timeout_t timeout)
|
2018-02-08 12:55:43 +01:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
struct net_buf *buf;
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
buf = net_buf_alloc_len_debug(pool, 0, timeout, func, line);
|
|
|
|
#else
|
|
|
|
buf = net_buf_alloc_len(pool, 0, timeout);
|
|
|
|
#endif
|
|
|
|
if (!buf) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-11-10 16:07:20 +01:00
|
|
|
net_buf_simple_init_with_data(&buf->b, data, size);
|
2018-02-08 12:55:43 +01:00
|
|
|
buf->flags = NET_BUF_EXTERNAL_DATA;
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2020-04-03 11:31:26 +02:00
|
|
|
struct net_buf *net_buf_get_debug(struct k_fifo *fifo, k_timeout_t timeout,
|
2016-10-18 22:24:51 +02:00
|
|
|
const char *func, int line)
|
|
|
|
#else
|
2020-04-03 11:31:26 +02:00
|
|
|
struct net_buf *net_buf_get(struct k_fifo *fifo, k_timeout_t timeout)
|
2016-10-18 22:24:51 +02:00
|
|
|
#endif
|
|
|
|
{
|
net: buf: Simplify fragment handling
This patch reworks how fragments are handled in the net_buf
infrastructure.
In particular, it removes the union around the node and frags members in
the main net_buf structure. This is done so that both can be used at the
same time, at a cost of 4 bytes per net_buf instance.
This implies that the layout of net_buf instances changes whenever being
inserted into a queue (fifo or lifo) or a linked list (slist).
Until now, this is what happened when enqueueing a net_buf with frags in
a queue or linked list:
1.1 Before enqueueing:
+--------+ +--------+ +--------+
|#1 node|\ |#2 node|\ |#3 node|\
| | \ | | \ | | \
| frags |------| frags |------| frags |------NULL
+--------+ +--------+ +--------+
net_buf #1 has 2 fragments, net_bufs #2 and #3. Both the node and frags
pointers (they are the same, since they are unioned) point to the next
fragment.
1.2 After enqueueing:
+--------+ +--------+ +--------+ +--------+ +--------+
|q/slist |------|#1 node|------|#2 node|------|#3 node|------|q/slist |
|node | | *flag | / | *flag | / | | / |node |
| | | frags |/ | frags |/ | frags |/ | |
+--------+ +--------+ +--------+ +--------+ +--------+
When enqueing a net_buf (in this case #1) that contains fragments, the
current net_buf implementation actually enqueues all the fragments (in
this case #2 and #3) as actual queue/slist items, since node and frags
are one and the same in memory. This makes the enqueuing operation
expensive and it makes it impossible to atomically dequeue. The `*flag`
notation here means that the `flags` member has been set to
`NET_BUF_FRAGS` in order to be able to reconstruct the frags pointers
when dequeuing.
After this patch, the layout changes considerably:
2.1 Before enqueueing:
+--------+ +--------+ +--------+
|#1 node|--NULL |#2 node|--NULL |#3 node|--NULL
| | | | | |
| frags |-------| frags |-------| frags |------NULL
+--------+ +--------+ +--------+
This is very similar to 1.1, except that now node and frags are
different pointers, so node is just set to NULL.
2.2 After enqueueing:
+--------+ +--------+ +--------+
|q/slist |-------|#1 node|-------|q/slist |
|node | | | |node |
| | | frags | | |
+--------+ +--------+ +--------+
| +--------+ +--------+
| |#2 node|--NULL |#3 node|--NULL
| | | | |
+------------| frags |-------| frags |------NULL
+--------+ +--------+
When enqueuing net_buf #1, now we only enqueue that very item, instead
of enqueing the frags as well, since now node and frags are separate
pointers. This simplifies the operation and makes it atomic.
Resolves #52718.
Signed-off-by: Carles Cufi <carles.cufi@nordicsemi.no>
2022-12-02 14:13:07 +01:00
|
|
|
struct net_buf *buf;
|
2016-10-18 22:24:51 +02:00
|
|
|
|
2020-04-06 14:19:33 +02:00
|
|
|
NET_BUF_DBG("%s():%d: fifo %p", func, line, fifo);
|
2016-10-18 22:24:51 +02:00
|
|
|
|
|
|
|
buf = k_fifo_get(fifo, timeout);
|
|
|
|
if (!buf) {
|
|
|
|
return NULL;
|
2016-06-05 15:55:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-16 13:14:48 +01:00
|
|
|
NET_BUF_DBG("%s():%d: buf %p fifo %p", func, line, buf, fifo);
|
2016-10-18 22:24:51 +02:00
|
|
|
|
2015-10-07 17:32:54 +02:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2021-09-28 02:56:38 +02:00
|
|
|
static struct k_spinlock net_buf_slist_lock;
|
|
|
|
|
2017-11-04 08:54:31 +01:00
|
|
|
void net_buf_slist_put(sys_slist_t *list, struct net_buf *buf)
|
|
|
|
{
|
2021-09-28 02:56:38 +02:00
|
|
|
k_spinlock_key_t key;
|
2017-11-04 08:54:31 +01:00
|
|
|
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(list);
|
|
|
|
__ASSERT_NO_MSG(buf);
|
2017-11-04 08:54:31 +01:00
|
|
|
|
2021-09-28 02:56:38 +02:00
|
|
|
key = k_spin_lock(&net_buf_slist_lock);
|
net: buf: Simplify fragment handling
This patch reworks how fragments are handled in the net_buf
infrastructure.
In particular, it removes the union around the node and frags members in
the main net_buf structure. This is done so that both can be used at the
same time, at a cost of 4 bytes per net_buf instance.
This implies that the layout of net_buf instances changes whenever being
inserted into a queue (fifo or lifo) or a linked list (slist).
Until now, this is what happened when enqueueing a net_buf with frags in
a queue or linked list:
1.1 Before enqueueing:
+--------+ +--------+ +--------+
|#1 node|\ |#2 node|\ |#3 node|\
| | \ | | \ | | \
| frags |------| frags |------| frags |------NULL
+--------+ +--------+ +--------+
net_buf #1 has 2 fragments, net_bufs #2 and #3. Both the node and frags
pointers (they are the same, since they are unioned) point to the next
fragment.
1.2 After enqueueing:
+--------+ +--------+ +--------+ +--------+ +--------+
|q/slist |------|#1 node|------|#2 node|------|#3 node|------|q/slist |
|node | | *flag | / | *flag | / | | / |node |
| | | frags |/ | frags |/ | frags |/ | |
+--------+ +--------+ +--------+ +--------+ +--------+
When enqueing a net_buf (in this case #1) that contains fragments, the
current net_buf implementation actually enqueues all the fragments (in
this case #2 and #3) as actual queue/slist items, since node and frags
are one and the same in memory. This makes the enqueuing operation
expensive and it makes it impossible to atomically dequeue. The `*flag`
notation here means that the `flags` member has been set to
`NET_BUF_FRAGS` in order to be able to reconstruct the frags pointers
when dequeuing.
After this patch, the layout changes considerably:
2.1 Before enqueueing:
+--------+ +--------+ +--------+
|#1 node|--NULL |#2 node|--NULL |#3 node|--NULL
| | | | | |
| frags |-------| frags |-------| frags |------NULL
+--------+ +--------+ +--------+
This is very similar to 1.1, except that now node and frags are
different pointers, so node is just set to NULL.
2.2 After enqueueing:
+--------+ +--------+ +--------+
|q/slist |-------|#1 node|-------|q/slist |
|node | | | |node |
| | | frags | | |
+--------+ +--------+ +--------+
| +--------+ +--------+
| |#2 node|--NULL |#3 node|--NULL
| | | | |
+------------| frags |-------| frags |------NULL
+--------+ +--------+
When enqueuing net_buf #1, now we only enqueue that very item, instead
of enqueing the frags as well, since now node and frags are separate
pointers. This simplifies the operation and makes it atomic.
Resolves #52718.
Signed-off-by: Carles Cufi <carles.cufi@nordicsemi.no>
2022-12-02 14:13:07 +01:00
|
|
|
sys_slist_append(list, &buf->node);
|
2021-09-28 02:56:38 +02:00
|
|
|
k_spin_unlock(&net_buf_slist_lock, key);
|
2017-11-04 08:54:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
struct net_buf *net_buf_slist_get(sys_slist_t *list)
|
|
|
|
{
|
net: buf: Simplify fragment handling
This patch reworks how fragments are handled in the net_buf
infrastructure.
In particular, it removes the union around the node and frags members in
the main net_buf structure. This is done so that both can be used at the
same time, at a cost of 4 bytes per net_buf instance.
This implies that the layout of net_buf instances changes whenever being
inserted into a queue (fifo or lifo) or a linked list (slist).
Until now, this is what happened when enqueueing a net_buf with frags in
a queue or linked list:
1.1 Before enqueueing:
+--------+ +--------+ +--------+
|#1 node|\ |#2 node|\ |#3 node|\
| | \ | | \ | | \
| frags |------| frags |------| frags |------NULL
+--------+ +--------+ +--------+
net_buf #1 has 2 fragments, net_bufs #2 and #3. Both the node and frags
pointers (they are the same, since they are unioned) point to the next
fragment.
1.2 After enqueueing:
+--------+ +--------+ +--------+ +--------+ +--------+
|q/slist |------|#1 node|------|#2 node|------|#3 node|------|q/slist |
|node | | *flag | / | *flag | / | | / |node |
| | | frags |/ | frags |/ | frags |/ | |
+--------+ +--------+ +--------+ +--------+ +--------+
When enqueing a net_buf (in this case #1) that contains fragments, the
current net_buf implementation actually enqueues all the fragments (in
this case #2 and #3) as actual queue/slist items, since node and frags
are one and the same in memory. This makes the enqueuing operation
expensive and it makes it impossible to atomically dequeue. The `*flag`
notation here means that the `flags` member has been set to
`NET_BUF_FRAGS` in order to be able to reconstruct the frags pointers
when dequeuing.
After this patch, the layout changes considerably:
2.1 Before enqueueing:
+--------+ +--------+ +--------+
|#1 node|--NULL |#2 node|--NULL |#3 node|--NULL
| | | | | |
| frags |-------| frags |-------| frags |------NULL
+--------+ +--------+ +--------+
This is very similar to 1.1, except that now node and frags are
different pointers, so node is just set to NULL.
2.2 After enqueueing:
+--------+ +--------+ +--------+
|q/slist |-------|#1 node|-------|q/slist |
|node | | | |node |
| | | frags | | |
+--------+ +--------+ +--------+
| +--------+ +--------+
| |#2 node|--NULL |#3 node|--NULL
| | | | |
+------------| frags |-------| frags |------NULL
+--------+ +--------+
When enqueuing net_buf #1, now we only enqueue that very item, instead
of enqueing the frags as well, since now node and frags are separate
pointers. This simplifies the operation and makes it atomic.
Resolves #52718.
Signed-off-by: Carles Cufi <carles.cufi@nordicsemi.no>
2022-12-02 14:13:07 +01:00
|
|
|
struct net_buf *buf;
|
2021-09-28 02:56:38 +02:00
|
|
|
k_spinlock_key_t key;
|
2017-11-04 08:54:31 +01:00
|
|
|
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(list);
|
2017-11-04 08:54:31 +01:00
|
|
|
|
2021-09-28 02:56:38 +02:00
|
|
|
key = k_spin_lock(&net_buf_slist_lock);
|
|
|
|
|
2017-11-04 08:54:31 +01:00
|
|
|
buf = (void *)sys_slist_get(list);
|
|
|
|
|
2021-09-28 02:56:38 +02:00
|
|
|
k_spin_unlock(&net_buf_slist_lock, key);
|
2017-11-04 08:54:31 +01:00
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2016-11-10 10:45:14 +01:00
|
|
|
void net_buf_put(struct k_fifo *fifo, struct net_buf *buf)
|
2016-06-05 15:55:47 +02:00
|
|
|
{
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(fifo);
|
|
|
|
__ASSERT_NO_MSG(buf);
|
2016-11-23 16:59:19 +01:00
|
|
|
|
net: buf: Simplify fragment handling
This patch reworks how fragments are handled in the net_buf
infrastructure.
In particular, it removes the union around the node and frags members in
the main net_buf structure. This is done so that both can be used at the
same time, at a cost of 4 bytes per net_buf instance.
This implies that the layout of net_buf instances changes whenever being
inserted into a queue (fifo or lifo) or a linked list (slist).
Until now, this is what happened when enqueueing a net_buf with frags in
a queue or linked list:
1.1 Before enqueueing:
+--------+ +--------+ +--------+
|#1 node|\ |#2 node|\ |#3 node|\
| | \ | | \ | | \
| frags |------| frags |------| frags |------NULL
+--------+ +--------+ +--------+
net_buf #1 has 2 fragments, net_bufs #2 and #3. Both the node and frags
pointers (they are the same, since they are unioned) point to the next
fragment.
1.2 After enqueueing:
+--------+ +--------+ +--------+ +--------+ +--------+
|q/slist |------|#1 node|------|#2 node|------|#3 node|------|q/slist |
|node | | *flag | / | *flag | / | | / |node |
| | | frags |/ | frags |/ | frags |/ | |
+--------+ +--------+ +--------+ +--------+ +--------+
When enqueing a net_buf (in this case #1) that contains fragments, the
current net_buf implementation actually enqueues all the fragments (in
this case #2 and #3) as actual queue/slist items, since node and frags
are one and the same in memory. This makes the enqueuing operation
expensive and it makes it impossible to atomically dequeue. The `*flag`
notation here means that the `flags` member has been set to
`NET_BUF_FRAGS` in order to be able to reconstruct the frags pointers
when dequeuing.
After this patch, the layout changes considerably:
2.1 Before enqueueing:
+--------+ +--------+ +--------+
|#1 node|--NULL |#2 node|--NULL |#3 node|--NULL
| | | | | |
| frags |-------| frags |-------| frags |------NULL
+--------+ +--------+ +--------+
This is very similar to 1.1, except that now node and frags are
different pointers, so node is just set to NULL.
2.2 After enqueueing:
+--------+ +--------+ +--------+
|q/slist |-------|#1 node|-------|q/slist |
|node | | | |node |
| | | frags | | |
+--------+ +--------+ +--------+
| +--------+ +--------+
| |#2 node|--NULL |#3 node|--NULL
| | | | |
+------------| frags |-------| frags |------NULL
+--------+ +--------+
When enqueuing net_buf #1, now we only enqueue that very item, instead
of enqueing the frags as well, since now node and frags are separate
pointers. This simplifies the operation and makes it atomic.
Resolves #52718.
Signed-off-by: Carles Cufi <carles.cufi@nordicsemi.no>
2022-12-02 14:13:07 +01:00
|
|
|
k_fifo_put(fifo, buf);
|
2016-06-05 15:55:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2016-11-07 12:33:01 +01:00
|
|
|
void net_buf_unref_debug(struct net_buf *buf, const char *func, int line)
|
|
|
|
#else
|
2015-10-07 17:32:54 +02:00
|
|
|
void net_buf_unref(struct net_buf *buf)
|
2016-11-07 12:33:01 +01:00
|
|
|
#endif
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(buf);
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-11-25 15:02:47 +01:00
|
|
|
while (buf) {
|
2016-06-05 18:17:03 +02:00
|
|
|
struct net_buf *frags = buf->frags;
|
2017-06-03 18:20:27 +02:00
|
|
|
struct net_buf_pool *pool;
|
2015-10-07 17:32:54 +02:00
|
|
|
|
2016-12-15 11:18:30 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
2016-11-07 12:33:01 +01:00
|
|
|
if (!buf->ref) {
|
|
|
|
NET_BUF_ERR("%s():%d: buf %p double free", func, line,
|
|
|
|
buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
2017-06-03 18:20:27 +02:00
|
|
|
NET_BUF_DBG("buf %p ref %u pool_id %u frags %p", buf, buf->ref,
|
|
|
|
buf->pool_id, buf->frags);
|
2016-11-25 15:02:47 +01:00
|
|
|
|
|
|
|
if (--buf->ref > 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
buf->data = NULL;
|
2016-06-05 15:55:47 +02:00
|
|
|
buf->frags = NULL;
|
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
2017-02-23 13:50:32 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_POOL_USAGE)
|
2020-10-27 21:56:01 +01:00
|
|
|
atomic_inc(&pool->avail_count);
|
|
|
|
__ASSERT_NO_MSG(atomic_get(&pool->avail_count) <= pool->buf_count);
|
2017-02-23 13:50:32 +01:00
|
|
|
#endif
|
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
if (pool->destroy) {
|
|
|
|
pool->destroy(buf);
|
2016-06-05 18:17:03 +02:00
|
|
|
} else {
|
2016-12-14 07:16:34 +01:00
|
|
|
net_buf_destroy(buf);
|
2016-06-05 18:17:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
buf = frags;
|
2015-10-07 17:32:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct net_buf *net_buf_ref(struct net_buf *buf)
|
|
|
|
{
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(buf);
|
2016-11-23 16:59:19 +01:00
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
NET_BUF_DBG("buf %p (old) ref %u pool_id %u",
|
|
|
|
buf, buf->ref, buf->pool_id);
|
2015-10-07 17:32:54 +02:00
|
|
|
buf->ref++;
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2020-04-03 11:31:26 +02:00
|
|
|
struct net_buf *net_buf_clone(struct net_buf *buf, k_timeout_t timeout)
|
2015-10-07 17:32:54 +02:00
|
|
|
{
|
2023-07-08 04:38:36 +02:00
|
|
|
k_timepoint_t end = sys_timepoint_calc(timeout);
|
2017-06-03 18:20:27 +02:00
|
|
|
struct net_buf_pool *pool;
|
2015-10-07 17:32:54 +02:00
|
|
|
struct net_buf *clone;
|
|
|
|
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(buf);
|
2016-11-23 16:59:19 +01:00
|
|
|
|
2017-06-03 18:20:27 +02:00
|
|
|
pool = net_buf_pool_get(buf->pool_id);
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
clone = net_buf_alloc_len(pool, 0, timeout);
|
2015-10-07 17:32:54 +02:00
|
|
|
if (!clone) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-02-06 18:16:49 +01:00
|
|
|
/* If the pool supports data referencing use that. Otherwise
|
|
|
|
* we need to allocate new data and make a copy.
|
|
|
|
*/
|
2018-02-08 12:55:43 +01:00
|
|
|
if (pool->alloc->cb->ref && !(buf->flags & NET_BUF_EXTERNAL_DATA)) {
|
2018-02-06 18:16:49 +01:00
|
|
|
clone->__buf = data_ref(buf, buf->__buf);
|
|
|
|
clone->data = buf->data;
|
|
|
|
clone->len = buf->len;
|
|
|
|
clone->size = buf->size;
|
|
|
|
} else {
|
|
|
|
size_t size = buf->size;
|
2016-10-18 22:24:51 +02:00
|
|
|
|
2023-07-08 04:38:36 +02:00
|
|
|
timeout = sys_timepoint_timeout(end);
|
2018-02-06 18:16:49 +01:00
|
|
|
|
|
|
|
clone->__buf = data_alloc(clone, &size, timeout);
|
|
|
|
if (!clone->__buf || size < buf->size) {
|
|
|
|
net_buf_destroy(clone);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
clone->size = size;
|
|
|
|
clone->data = clone->__buf + net_buf_headroom(buf);
|
|
|
|
net_buf_add_mem(clone, buf->data, buf->len);
|
|
|
|
}
|
2015-10-07 17:32:54 +02:00
|
|
|
|
|
|
|
return clone;
|
|
|
|
}
|
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
struct net_buf *net_buf_frag_last(struct net_buf *buf)
|
|
|
|
{
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(buf);
|
2016-11-23 16:59:19 +01:00
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
while (buf->frags) {
|
|
|
|
buf = buf->frags;
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
void net_buf_frag_insert(struct net_buf *parent, struct net_buf *frag)
|
|
|
|
{
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(parent);
|
|
|
|
__ASSERT_NO_MSG(frag);
|
2016-11-23 16:59:19 +01:00
|
|
|
|
2016-07-12 13:10:32 +02:00
|
|
|
if (parent->frags) {
|
|
|
|
net_buf_frag_last(frag)->frags = parent->frags;
|
|
|
|
}
|
2016-10-21 15:11:36 +02:00
|
|
|
/* Take ownership of the fragment reference */
|
|
|
|
parent->frags = frag;
|
2016-07-12 13:10:32 +02:00
|
|
|
}
|
|
|
|
|
2016-09-29 14:25:30 +02:00
|
|
|
struct net_buf *net_buf_frag_add(struct net_buf *head, struct net_buf *frag)
|
|
|
|
{
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(frag);
|
2016-11-23 16:59:19 +01:00
|
|
|
|
2016-09-29 14:25:30 +02:00
|
|
|
if (!head) {
|
|
|
|
return net_buf_ref(frag);
|
|
|
|
}
|
|
|
|
|
|
|
|
net_buf_frag_insert(net_buf_frag_last(head), frag);
|
|
|
|
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
2017-02-21 13:00:18 +01:00
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
|
|
|
|
struct net_buf *frag,
|
|
|
|
const char *func, int line)
|
|
|
|
#else
|
2016-09-29 12:27:37 +02:00
|
|
|
struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
|
2017-02-21 13:00:18 +01:00
|
|
|
#endif
|
2016-07-12 13:10:32 +02:00
|
|
|
{
|
2016-09-29 12:27:37 +02:00
|
|
|
struct net_buf *next_frag;
|
|
|
|
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(frag);
|
2016-11-23 16:59:19 +01:00
|
|
|
|
2016-09-29 12:27:37 +02:00
|
|
|
if (parent) {
|
2019-12-09 14:44:17 +01:00
|
|
|
__ASSERT_NO_MSG(parent->frags);
|
|
|
|
__ASSERT_NO_MSG(parent->frags == frag);
|
2016-09-29 12:27:37 +02:00
|
|
|
parent->frags = frag->frags;
|
|
|
|
}
|
|
|
|
|
|
|
|
next_frag = frag->frags;
|
2016-07-12 13:10:32 +02:00
|
|
|
|
|
|
|
frag->frags = NULL;
|
2017-02-21 13:00:18 +01:00
|
|
|
|
|
|
|
#if defined(CONFIG_NET_BUF_LOG)
|
|
|
|
net_buf_unref_debug(frag, func, line);
|
|
|
|
#else
|
2016-07-12 13:10:32 +02:00
|
|
|
net_buf_unref(frag);
|
2017-02-21 13:00:18 +01:00
|
|
|
#endif
|
2016-09-29 12:27:37 +02:00
|
|
|
|
|
|
|
return next_frag;
|
2016-07-12 13:10:32 +02:00
|
|
|
}
|
|
|
|
|
net: buf: linearize: Never return -ENOMEM, just do what user asked to
Don't try to find "errors" in the values of dst_len and len params
passed to net_buf_linearize(). Instead, do what entails with the
common sense from the values passed in, specifically:
1. Never read more than dst_len (or it would lead to buffer
overflow).
2. It's absolutely ok to read than specified by "len" param, that's
why this function returns number of bytes read in the first place.
The motivation for this change is that it's not useful with its
current behavior. For example, a number of Ethernet drivers linearize
a packet to send, but each does it with its own duplicated adhoc
routine, because net_buf_linearize() would just return error for the
natural use of:
net_buf_linearize(buf, sizeof(buf), pkt->frags, 0, sizeof(buf));
Signed-off-by: Paul Sokolovsky <paul.sokolovsky@linaro.org>
2018-12-27 11:35:15 +01:00
|
|
|
size_t net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
|
|
|
|
size_t offset, size_t len)
|
2018-08-02 20:16:58 +02:00
|
|
|
{
|
|
|
|
struct net_buf *frag;
|
2018-09-11 11:55:21 +02:00
|
|
|
size_t to_copy;
|
|
|
|
size_t copied;
|
2018-08-02 20:16:58 +02:00
|
|
|
|
2019-02-11 18:14:19 +01:00
|
|
|
len = MIN(len, dst_len);
|
2018-08-02 20:16:58 +02:00
|
|
|
|
|
|
|
frag = src;
|
|
|
|
|
|
|
|
/* find the right fragment to start copying from */
|
|
|
|
while (frag && offset >= frag->len) {
|
|
|
|
offset -= frag->len;
|
|
|
|
frag = frag->frags;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* traverse the fragment chain until len bytes are copied */
|
|
|
|
copied = 0;
|
|
|
|
while (frag && len > 0) {
|
2019-02-11 18:14:19 +01:00
|
|
|
to_copy = MIN(len, frag->len - offset);
|
2020-05-27 18:26:57 +02:00
|
|
|
memcpy((uint8_t *)dst + copied, frag->data + offset, to_copy);
|
2018-08-02 20:16:58 +02:00
|
|
|
|
|
|
|
copied += to_copy;
|
|
|
|
|
|
|
|
/* to_copy is always <= len */
|
|
|
|
len -= to_copy;
|
|
|
|
frag = frag->frags;
|
|
|
|
|
|
|
|
/* after the first iteration, this value will be 0 */
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This helper routine will append multiple bytes, if there is no place for
|
|
|
|
* the data in current fragment then create new fragment and add it to
|
|
|
|
* the buffer. It assumes that the buffer has at least one fragment.
|
|
|
|
*/
|
2018-09-11 11:55:21 +02:00
|
|
|
size_t net_buf_append_bytes(struct net_buf *buf, size_t len,
|
2020-04-03 11:31:26 +02:00
|
|
|
const void *value, k_timeout_t timeout,
|
2018-09-11 11:55:21 +02:00
|
|
|
net_buf_allocator_cb allocate_cb, void *user_data)
|
2018-08-02 20:16:58 +02:00
|
|
|
{
|
|
|
|
struct net_buf *frag = net_buf_frag_last(buf);
|
2018-09-11 11:55:21 +02:00
|
|
|
size_t added_len = 0;
|
2020-05-27 18:26:57 +02:00
|
|
|
const uint8_t *value8 = value;
|
2024-02-01 14:08:05 +01:00
|
|
|
size_t max_size;
|
2018-08-02 20:16:58 +02:00
|
|
|
|
|
|
|
do {
|
2020-05-27 18:26:57 +02:00
|
|
|
uint16_t count = MIN(len, net_buf_tailroom(frag));
|
2018-08-02 20:16:58 +02:00
|
|
|
|
2018-09-14 14:24:09 +02:00
|
|
|
net_buf_add_mem(frag, value8, count);
|
2018-08-02 20:16:58 +02:00
|
|
|
len -= count;
|
|
|
|
added_len += count;
|
2018-09-14 14:24:09 +02:00
|
|
|
value8 += count;
|
2018-08-02 20:16:58 +02:00
|
|
|
|
|
|
|
if (len == 0) {
|
|
|
|
return added_len;
|
|
|
|
}
|
|
|
|
|
2021-01-21 18:59:46 +01:00
|
|
|
if (allocate_cb) {
|
|
|
|
frag = allocate_cb(timeout, user_data);
|
|
|
|
} else {
|
|
|
|
struct net_buf_pool *pool;
|
|
|
|
|
|
|
|
/* Allocate from the original pool if no callback has
|
|
|
|
* been provided.
|
|
|
|
*/
|
|
|
|
pool = net_buf_pool_get(buf->pool_id);
|
2024-02-01 14:08:05 +01:00
|
|
|
max_size = pool->alloc->max_alloc_size;
|
|
|
|
frag = net_buf_alloc_len(pool,
|
|
|
|
max_size ? MIN(len, max_size) : len,
|
|
|
|
timeout);
|
2021-01-21 18:59:46 +01:00
|
|
|
}
|
|
|
|
|
2018-08-02 20:16:58 +02:00
|
|
|
if (!frag) {
|
|
|
|
return added_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
net_buf_frag_add(buf, frag);
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
/* Unreachable */
|
|
|
|
return 0;
|
|
|
|
}
|
2023-12-21 15:48:24 +01:00
|
|
|
|
|
|
|
size_t net_buf_data_match(const struct net_buf *buf, size_t offset, const void *data, size_t len)
|
|
|
|
{
|
|
|
|
const uint8_t *dptr = data;
|
|
|
|
const uint8_t *bptr;
|
|
|
|
size_t compared = 0;
|
|
|
|
size_t to_compare;
|
|
|
|
|
|
|
|
if (!buf || !data) {
|
|
|
|
return compared;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* find the right fragment to start comparison */
|
|
|
|
while (buf && offset >= buf->len) {
|
|
|
|
offset -= buf->len;
|
|
|
|
buf = buf->frags;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (buf && len > 0) {
|
|
|
|
bptr = buf->data + offset;
|
|
|
|
to_compare = MIN(len, buf->len - offset);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < to_compare; ++i) {
|
|
|
|
if (dptr[compared] != bptr[i]) {
|
|
|
|
return compared;
|
|
|
|
}
|
|
|
|
compared++;
|
|
|
|
}
|
|
|
|
|
|
|
|
len -= to_compare;
|
|
|
|
buf = buf->frags;
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return compared;
|
|
|
|
}
|