net: pkt: Add function for allocating buffers w/o preconditions

Add new function to allocate additional buffers for net_pkt, w/o any
additional preconditions/checks. Just allocate what was requested.

Signed-off-by: Robert Lubos <robert.lubos@nordicsemi.no>
This commit is contained in:
Robert Lubos 2023-11-14 14:31:37 +01:00 committed by Fabio Baltieri
parent e6c7a4c968
commit 16fd744c13
2 changed files with 86 additions and 0 deletions

View file

@ -1707,6 +1707,13 @@ int net_pkt_alloc_buffer_debug(struct net_pkt *pkt,
net_pkt_alloc_buffer_debug(_pkt, _size, _proto, _timeout, \
__func__, __LINE__)
int net_pkt_alloc_buffer_raw_debug(struct net_pkt *pkt, size_t size,
k_timeout_t timeout,
const char *caller, int line);
#define net_pkt_alloc_buffer_raw(_pkt, _size, _timeout) \
net_pkt_alloc_buffer_raw_debug(_pkt, _size, _timeout, \
__func__, __LINE__)
struct net_pkt *net_pkt_alloc_with_buffer_debug(struct net_if *iface,
size_t size,
sa_family_t family,
@ -1821,6 +1828,24 @@ int net_pkt_alloc_buffer(struct net_pkt *pkt,
k_timeout_t timeout);
#endif
/**
* @brief Allocate buffer for a net_pkt, of specified size, w/o any additional
* preconditions
*
* @details: The actual buffer size may be larger than requested one if fixed
* size buffers are in use.
*
* @param pkt The network packet requiring buffer to be allocated.
* @param size The size of buffer being requested.
* @param timeout Maximum time to wait for an allocation.
*
* @return 0 on success, negative errno code otherwise.
*/
#if !defined(NET_PKT_DEBUG_ENABLED)
int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
k_timeout_t timeout);
#endif
/**
* @brief Allocate a network packet and buffer at once
*

View file

@ -1197,6 +1197,67 @@ int net_pkt_alloc_buffer(struct net_pkt *pkt,
return 0;
}
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG
int net_pkt_alloc_buffer_raw_debug(struct net_pkt *pkt, size_t size,
k_timeout_t timeout, const char *caller,
int line)
#else
int net_pkt_alloc_buffer_raw(struct net_pkt *pkt, size_t size,
k_timeout_t timeout)
#endif
{
struct net_buf_pool *pool = NULL;
struct net_buf *buf;
if (size == 0) {
return 0;
}
if (k_is_in_isr()) {
timeout = K_NO_WAIT;
}
NET_DBG("Data allocation size %zu", size);
if (pkt->context) {
pool = get_data_pool(pkt->context);
}
if (!pool) {
pool = pkt->slab == &tx_pkts ? &tx_bufs : &rx_bufs;
}
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG
buf = pkt_alloc_buffer(pool, size, timeout, caller, line);
#else
buf = pkt_alloc_buffer(pool, size, timeout);
#endif
if (!buf) {
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG
NET_ERR("Data buffer (%zd) allocation failed (%s:%d)",
size, caller, line);
#else
NET_ERR("Data buffer (%zd) allocation failed.", size);
#endif
return -ENOMEM;
}
net_pkt_append_buffer(pkt, buf);
#if IS_ENABLED(CONFIG_NET_BUF_FIXED_DATA_SIZE)
/* net_buf allocators shrink the buffer size to the requested size.
* We don't want this behavior here, so restore the real size of the
* last fragment.
*/
buf = net_buf_frag_last(buf);
buf->size = CONFIG_NET_BUF_DATA_SIZE;
#endif
return 0;
}
#if NET_LOG_LEVEL >= LOG_LEVEL_DBG
static struct net_pkt *pkt_alloc(struct k_mem_slab *slab, k_timeout_t timeout,
const char *caller, int line)