net: Add initial TX and RX traffic class support

With this commit it is possible to add priority to sent or received
network packets. So user is able to send or receive higher priority
packets faster than lower level packets.
The traffic class support is activated by CONFIG_NET_TC_COUNT option.
The TC support uses work queues to separate the traffic. The
priority of the work queue thread specifies the ordering of the
network traffic. Each work queue thread handles traffic to one specific
work queue. Note that you should not enable traffic classes unless
you really need them by your application. Each TC thread needs
stack so this feature requires more memory.

It is possible to disable transmit traffic class support and keep the
receive traffic class support, or vice versa. If both RX and TX traffic
classes are enabled, then both will use the same number of queues
defined by CONFIG_NET_TC_COUNT option.

Fixes #6588

Signed-off-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
This commit is contained in:
Jukka Rissanen 2018-02-07 15:00:08 +02:00 committed by Anas Nashif
parent 159aaf1740
commit 6049207a29
16 changed files with 600 additions and 211 deletions

View file

@ -170,14 +170,6 @@
__net_if_dev_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_DATA_PROLOGUE(net_if_event, (OPTIONAL), SUBALIGN(4))
{
__net_if_event_start = .;
*(".net_if_event.*")
KEEP(*(SORT_BY_NAME(".net_if_event.*")))
__net_if_event_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#if defined(CONFIG_NET_SHELL)
SECTION_DATA_PROLOGUE(net_stack, (OPTIONAL), SUBALIGN(4))
{

View file

@ -169,6 +169,8 @@ struct net_stack_info {
#else /* CONFIG_NET_SHELL */
#define NET_STACK_GET_NAME(pretty, name, sfx) (name)
#define NET_STACK_INFO(...)
#define NET_STACK_INFO_ADDR(...)
@ -216,6 +218,22 @@ static inline void net_analyze_stack(const char *name,
#endif
/* @endcond */
/* Some helper defines for traffic class support */
#if defined(CONFIG_NET_TC_TX_COUNT) && defined(CONFIG_NET_TC_RX_COUNT)
#define NET_TC_TX_COUNT CONFIG_NET_TC_TX_COUNT
#define NET_TC_RX_COUNT CONFIG_NET_TC_RX_COUNT
#if NET_TC_TX_COUNT > NET_TC_RX_COUNT
#define NET_TC_COUNT NET_TC_TX_COUNT
#else
#define NET_TC_COUNT NET_TC_RX_COUNT
#endif
#else /* CONFIG_NET_TC_TX_COUNT && CONFIG_NET_TC_RX_COUNT */
#define NET_TC_TX_COUNT 1
#define NET_TC_RX_COUNT 1
#define NET_TC_COUNT 1
#endif /* CONFIG_NET_TC_TX_COUNT && CONFIG_NET_TC_RX_COUNT */
/**
* @}
*/

View file

@ -298,6 +298,26 @@ struct net_if_config {
#endif /* CONFIG_NET_DHCPV4 */
};
/**
* @brief Network traffic class.
*
* Traffic classes are used when sending or receiving data that is classified
* with different priorities. So some traffic can be marked as high priority
* and it will be sent or received first. There is always at least one work
* queue in the system for Rx and Tx. Each network packet that is transmitted
* or received goes through a work queue thread that will transmit it.
*/
struct net_traffic_class {
/** Work queue for handling this Tx or Rx packet */
struct k_work_q work_q;
/** Stack for this work queue */
k_thread_stack_t *stack;
/** Traffic class value */
int tc;
};
/**
* @brief Network Interface Device structure
*
@ -327,9 +347,6 @@ struct net_if_dev {
/** The hardware link address */
struct net_linkaddr link_addr;
/** Queue for outgoing packets from apps */
struct k_fifo tx_queue;
/** The hardware MTU */
u16_t mtu;
@ -440,20 +457,7 @@ static inline struct device *net_if_get_device(struct net_if *iface)
* @param iface Pointer to a network interface structure
* @param pkt Pointer to a net packet to queue
*/
static inline void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
{
k_fifo_put(&iface->if_dev->tx_queue, pkt);
}
/**
* @brief Get TX queue of an network interface
*
* @param iface Pointer to a network interface structure
*/
static inline struct k_fifo *net_if_get_queue_tx(struct net_if *iface)
{
return &iface->if_dev->tx_queue;
}
void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt);
#if defined(CONFIG_NET_OFFLOAD)
/**
@ -1568,8 +1572,6 @@ struct net_if_api {
#define NET_IF_GET_NAME(dev_name, sfx) (__net_if_##dev_name##_##sfx)
#define NET_IF_DEV_GET_NAME(dev_name, sfx) (__net_if_dev_##dev_name##_##sfx)
#define NET_IF_EVENT_GET_NAME(dev_name, sfx) \
(__net_if_event_##dev_name##_##sfx)
#define NET_IF_GET(dev_name, sfx) \
((struct net_if *)&NET_IF_GET_NAME(dev_name, sfx))
@ -1589,11 +1591,7 @@ struct net_if_api {
.if_dev = &(NET_IF_DEV_GET_NAME(dev_name, sfx)), \
NET_IF_CONFIG_INIT \
} \
}; \
static struct k_poll_event \
(NET_IF_EVENT_GET_NAME(dev_name, sfx)) __used \
__attribute__((__section__(".net_if_event.data"))) = {}
}
/* Network device initialization macros */

View file

@ -188,6 +188,18 @@ extern const struct in6_addr in6addr_loopback;
#define NET_IPV6_NEXTHDR_FRAG 44
#define NET_IPV6_NEXTHDR_NONE 59
/** Network packet priority settings described in IEEE 802.1Q Annex I.1 */
enum net_priority {
NET_PRIORITY_BK = 0, /* Background (lowest) */
NET_PRIORITY_BE = 1, /* Best effort (default) */
NET_PRIORITY_EE = 2, /* Excellent effort */
NET_PRIORITY_CA = 3, /* Critical applications (highest) */
NET_PRIORITY_VI = 4, /* Video, < 100 ms latency and jitter */
NET_PRIORITY_VO = 5, /* Voice, < 10 ms latency and jitter */
NET_PRIORITY_IC = 6, /* Internetwork control */
NET_PRIORITY_NC = 7 /* Network control */
};
/** IPv6/IPv4 network connection tuple */
struct net_tuple {
/** IPv6/IPv4 remote address */
@ -946,6 +958,26 @@ static inline bool net_tcp_seq_greater(u32_t seq1, u32_t seq2)
*/
int net_bytes_from_str(u8_t *buf, int buf_len, const char *src);
/**
* @brief Convert Tx network packet priority to traffic class so we can place
* the packet into correct Tx queue.
*
* @param prio Network priority
*
* @return Tx traffic class that handles that priority network traffic.
*/
int net_tx_priority2tc(enum net_priority prio);
/**
* @brief Convert Rx network packet priority to traffic class so we can place
* the packet into correct Rx queue.
*
* @param prio Network priority
*
* @return Rx traffic class that handles that priority network traffic.
*/
int net_rx_priority2tc(enum net_priority prio);
#ifdef __cplusplus
}
#endif

View file

@ -47,6 +47,9 @@ struct net_pkt {
/** FIFO uses first 4 bytes itself, reserve space */
int _reserved;
/** Internal variable that is used when packet is sent */
struct k_work work;
/** Slab pointer from where it belongs to */
struct k_mem_slab *slab;
@ -132,6 +135,13 @@ struct net_pkt {
u8_t ieee802154_rssi; /* Received Signal Strength Indication */
u8_t ieee802154_lqi; /* Link Quality Indicator */
#endif
#if NET_TC_COUNT > 1
/** Network packet priority, can be left out in which case packet
* is not prioritised.
*/
u8_t priority;
#endif
/* @endcond */
/** Reference counter */
@ -140,6 +150,10 @@ struct net_pkt {
/** @cond ignore */
static inline struct k_work *net_pkt_work(struct net_pkt *pkt)
{
return &pkt->work;
}
/* The interface real ll address */
static inline struct net_linkaddr *net_pkt_ll_if(struct net_pkt *pkt)
@ -377,6 +391,31 @@ static inline void net_pkt_set_ipv6_fragment_id(struct net_pkt *pkt,
#define net_pkt_set_ipv6_ext_len(...)
#endif /* CONFIG_NET_IPV6 */
#if NET_TC_COUNT > 1
static inline u8_t net_pkt_priority(struct net_pkt *pkt)
{
return pkt->priority;
}
static inline void net_pkt_set_priority(struct net_pkt *pkt,
u8_t priority)
{
pkt->priority = priority;
}
#else
static inline u8_t net_pkt_priority(struct net_pkt *pkt)
{
return 0;
}
static inline void net_pkt_set_priority(struct net_pkt *pkt,
u8_t priority)
{
ARG_UNUSED(pkt);
ARG_UNUSED(priority);
}
#endif
static inline size_t net_pkt_get_len(struct net_pkt *pkt)
{
return net_buf_frags_len(pkt->frags);

View file

@ -580,8 +580,7 @@ class SizeCalculator:
"_k_sem_area", "_k_mutex_area", "_k_alert_area",
"_k_fifo_area", "_k_lifo_area", "_k_stack_area",
"_k_msgq_area", "_k_mbox_area", "_k_pipe_area",
"net_if", "net_if_dev", "net_if_event", "net_stack",
"net_l2_data",
"net_if", "net_if_dev", "net_stack", "net_l2_data",
"_k_queue_area", "_net_buf_pool_area", "app_datas",
"kobject_data", "mmu_tables", "app_pad", "priv_stacks",
"ccm_data"]

View file

@ -9,6 +9,7 @@ zephyr_library_sources(
net_core.c
net_if.c
net_pkt.c
net_tc.c
utils.c
)

View file

@ -43,6 +43,44 @@ config NET_SHELL
Activate shell module that provides network commands like
ping to the console.
config NET_TC_TX_COUNT
int "How many Tx traffic classes to have for each network device"
default 1
range 1 8
help
Define how many Tx traffic classes (queues) the system should have
when sending a network packet. The network packet priority can then
be mapped to this traffic class so that higher prioritised packets
can be processed before lower prioritised ones. Each queue is handled
by a separate thread which will need RAM for stack space.
Only increase the value from 1 if you really need this feature.
The default value is 1 which means that all the network traffic is
handled equally. In this implementation, the higher traffic class
value corresponds to lower thread priority.
config NET_TC_RX_COUNT
int "How many Rx traffic classes to have for each network device"
default 1
range 1 8
help
Define how many Rx traffic classes (queues) the system should have
when receiving a network packet. The network packet priority can then
be mapped to this traffic class so that higher prioritised packets
can be processed before lower prioritised ones. Each queue is handled
by a separate thread which will need RAM for stack space.
Only increase the value from 1 if you really need this feature.
The default value is 1 which means that all the network traffic is
handled equally. In this implementation, the higher traffic class
value corresponds to lower thread priority.
config NET_TX_DEFAULT_PRIORITY
int "Default network packet priority if none have been set"
default 1
range 0 7
help
What is the default network packet priority if user has not specified
one. The value 0 means lowest priority and 7 is the highest.
config NET_IP_ADDR_CHECK
bool "Check IP address validity before sending IP packet"
default y

View file

@ -74,6 +74,12 @@ config NET_DEBUG_IF
help
Enables network interface code part to output debug messages
config NET_DEBUG_TC
bool "Debug network traffic class code"
default n
help
Enables network traffic class code part to output debug messages
config NET_DEBUG_UTILS
bool "Debug utility functions in IP stack"
default n

View file

@ -24,7 +24,6 @@ config NET_INITIAL_TTL
config NET_IF_MAX_IPV4_COUNT
int "Max number of IPv4 network interfaces in the system"
default 1
default NET_VLAN_COUNT if NET_VLAN
help
This tells how many network interfaces there will be in the system
that will have IPv4 enabled.

View file

@ -18,7 +18,6 @@ if NET_IPV6
config NET_IF_MAX_IPV6_COUNT
int "Max number of IPv6 network interfaces in the system"
default 1
default NET_VLAN_COUNT if NET_VLAN
help
This tells how many network interfaces there will be in the system
that will have IPv6 enabled.

View file

@ -51,19 +51,6 @@
#include "net_stats.h"
/* Stack for the rx thread.
*/
#if !defined(CONFIG_NET_RX_STACK_SIZE)
#define CONFIG_NET_RX_STACK_SIZE 1024
#endif
NET_STACK_DEFINE(RX, rx_stack, CONFIG_NET_RX_STACK_SIZE,
CONFIG_NET_RX_STACK_SIZE + CONFIG_NET_RX_STACK_RPL);
static struct k_thread rx_thread_data;
static struct k_fifo rx_queue;
static k_tid_t rx_tid;
static K_SEM_DEFINE(startup_sync, 0, UINT_MAX);
static inline enum net_verdict process_data(struct net_pkt *pkt,
bool is_loopback)
{
@ -138,61 +125,25 @@ static void processing_data(struct net_pkt *pkt, bool is_loopback)
}
}
static void net_rx_thread(void)
/* Things to setup after we are able to RX and TX */
static void net_post_init(void)
{
struct net_pkt *pkt;
NET_DBG("Starting RX thread (stack %zu bytes)",
K_THREAD_STACK_SIZEOF(rx_stack));
}
static void init_rx_queues(void)
{
/* Starting TX side. The ordering is important here and the TX
* can only be started when RX side is ready to receive packets.
* We synchronize the startup of the device so that both RX and TX
* are only started fully when both are ready to receive or send
* data.
*/
net_if_init(&startup_sync);
net_if_init();
k_sem_take(&startup_sync, K_FOREVER);
net_tc_rx_init();
/* This will take the interface up and start everything. */
net_if_post_init();
while (1) {
#if defined(CONFIG_NET_STATISTICS) || defined(CONFIG_NET_DEBUG_CORE)
size_t pkt_len;
#endif
pkt = k_fifo_get(&rx_queue, K_FOREVER);
net_analyze_stack("RX thread", K_THREAD_STACK_BUFFER(rx_stack),
K_THREAD_STACK_SIZEOF(rx_stack));
#if defined(CONFIG_NET_STATISTICS) || defined(CONFIG_NET_DEBUG_CORE)
pkt_len = net_pkt_get_len(pkt);
#endif
NET_DBG("Received pkt %p len %zu", pkt, pkt_len);
net_stats_update_bytes_recv(pkt_len);
processing_data(pkt, false);
net_print_statistics();
net_pkt_print();
k_yield();
}
}
static void init_rx_queue(void)
{
k_fifo_init(&rx_queue);
rx_tid = k_thread_create(&rx_thread_data, rx_stack,
K_THREAD_STACK_SIZEOF(rx_stack),
(k_thread_entry_t)net_rx_thread,
NULL, NULL, NULL, K_PRIO_COOP(8),
K_ESSENTIAL, K_NO_WAIT);
/* Things to init after network interface is working */
net_post_init();
}
/* If loopback driver is enabled, then direct packets to it so the address
@ -330,6 +281,45 @@ int net_send_data(struct net_pkt *pkt)
return 0;
}
static void net_rx(struct net_if *iface, struct net_pkt *pkt)
{
#if defined(CONFIG_NET_STATISTICS) || defined(CONFIG_NET_DEBUG_CORE)
size_t pkt_len = net_pkt_get_len(pkt);
#endif
NET_DBG("Received pkt %p len %zu", pkt, pkt_len);
net_stats_update_bytes_recv(pkt_len);
processing_data(pkt, false);
net_print_statistics();
net_pkt_print();
}
static void process_rx_packet(struct k_work *work)
{
struct net_pkt *pkt;
pkt = CONTAINER_OF(work, struct net_pkt, work);
net_rx(net_pkt_iface(pkt), pkt);
}
static void net_queue_rx(struct net_if *iface, struct net_pkt *pkt)
{
u8_t prio = net_pkt_priority(pkt);
u8_t tc = net_rx_priority2tc(prio);
k_work_init(net_pkt_work(pkt), process_rx_packet);
#if NET_TC_RX_COUNT > 1
NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
#endif
net_tc_submit_to_rx_queue(tc, pkt);
}
/* Called by driver when an IP packet has been received */
int net_recv_data(struct net_if *iface, struct net_pkt *pkt)
{
@ -344,8 +334,8 @@ int net_recv_data(struct net_if *iface, struct net_pkt *pkt)
return -ENETDOWN;
}
NET_DBG("fifo %p iface %p pkt %p len %zu", &rx_queue, iface, pkt,
net_pkt_get_len(pkt));
NET_DBG("prio %d iface %p pkt %p len %zu", net_pkt_priority(pkt),
iface, pkt, net_pkt_get_len(pkt));
if (IS_ENABLED(CONFIG_NET_ROUTING)) {
net_pkt_set_orig_iface(pkt, iface);
@ -353,7 +343,7 @@ int net_recv_data(struct net_if *iface, struct net_pkt *pkt)
net_pkt_set_iface(pkt, iface);
k_fifo_put(&rx_queue, pkt);
net_queue_rx(iface, pkt);
return 0;
}
@ -401,7 +391,7 @@ static int net_init(struct device *unused)
net_mgmt_event_init();
init_rx_queue();
init_rx_queues();
#if CONFIG_NET_DHCPV4
status = dhcpv4_init();

View file

@ -40,8 +40,8 @@
extern struct net_if __net_if_start[];
extern struct net_if __net_if_end[];
extern struct k_poll_event __net_if_event_start[];
extern struct k_poll_event __net_if_event_stop[];
extern struct net_if_dev __net_if_dev_start[];
extern struct net_if_dev __net_if_dev_end[];
static struct net_if_router routers[CONFIG_NET_MAX_ROUTERS];
@ -69,19 +69,17 @@ static sys_slist_t link_callbacks;
static sys_slist_t mcast_monitor_callbacks;
#endif
NET_STACK_DEFINE(TX, tx_stack, CONFIG_NET_TX_STACK_SIZE,
CONFIG_NET_TX_STACK_SIZE);
static struct k_thread tx_thread_data;
#if defined(CONFIG_NET_DEBUG_IF)
#define debug_check_packet(pkt) \
{ \
size_t len = net_pkt_get_len(pkt); \
\
NET_DBG("Processing (pkt %p, data len %zu) network packet", \
pkt, len); \
\
NET_ASSERT(pkt->frags && len); \
#define debug_check_packet(pkt) \
{ \
size_t len = net_pkt_get_len(pkt); \
\
NET_DBG("Processing (pkt %p, data len %zu, " \
"prio %d) network packet", \
pkt, len, \
net_pkt_priority(pkt)); \
\
NET_ASSERT(pkt->frags && len); \
} while (0)
#else
#define debug_check_packet(...)
@ -112,19 +110,17 @@ static inline void net_context_send_cb(struct net_context *context,
}
}
static bool net_if_tx(struct net_if *iface)
static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
{
const struct net_if_api *api = net_if_get_device(iface)->driver_api;
struct net_linkaddr *dst;
struct net_context *context;
struct net_pkt *pkt;
void *context_token;
int status;
#if defined(CONFIG_NET_STATISTICS)
size_t pkt_len;
#endif
pkt = k_fifo_get(net_if_get_queue_tx(iface), K_NO_WAIT);
if (!pkt) {
return false;
}
@ -176,97 +172,27 @@ static bool net_if_tx(struct net_if *iface)
return true;
}
static void net_if_flush_tx(struct net_if *iface)
static void process_tx_packet(struct k_work *work)
{
if (k_fifo_is_empty(net_if_get_queue_tx(iface))) {
return;
}
struct net_pkt *pkt;
/* Without this, the k_fifo_get() can return a pkt which
* has pkt->frags set to NULL. This is not allowed as we
* cannot send a packet that has no data in it.
* The k_yield() fixes the issue and packets are flushed
* correctly.
*/
k_yield();
pkt = CONTAINER_OF(work, struct net_pkt, work);
while (1) {
if (!net_if_tx(iface)) {
break;
}
}
net_if_tx(net_pkt_iface(pkt), pkt);
}
static void iface_tx_cb(struct net_if *iface, void *user_data)
void net_if_queue_tx(struct net_if *iface, struct net_pkt *pkt)
{
struct net_if_dev *if_dev = user_data;
u8_t prio = net_pkt_priority(pkt);
u8_t tc = net_tx_priority2tc(prio);
if (iface->if_dev == if_dev) {
net_if_tx(iface);
}
}
k_work_init(net_pkt_work(pkt), process_tx_packet);
static void net_if_process_events(struct k_poll_event *event, int ev_count)
{
for (; ev_count; event++, ev_count--) {
switch (event->state) {
case K_POLL_STATE_SIGNALED:
break;
case K_POLL_STATE_FIFO_DATA_AVAILABLE:
{
struct net_if_dev *if_dev;
#if NET_TC_TX_COUNT > 1
NET_DBG("TC %d with prio %d pkt %p", tc, prio, pkt);
#endif
if_dev = CONTAINER_OF(event->fifo, struct net_if_dev,
tx_queue);
net_if_foreach(iface_tx_cb, if_dev);
break;
}
case K_POLL_STATE_NOT_READY:
break;
default:
break;
}
}
}
static int net_if_prepare_events(void)
{
struct net_if *iface;
int ev_count = 0;
for (iface = __net_if_start; iface != __net_if_end; iface++) {
k_poll_event_init(&__net_if_event_start[ev_count],
K_POLL_TYPE_FIFO_DATA_AVAILABLE,
K_POLL_MODE_NOTIFY_ONLY,
net_if_get_queue_tx(iface));
ev_count++;
}
return ev_count;
}
static void net_if_tx_thread(struct k_sem *startup_sync)
{
NET_DBG("Starting TX thread (stack %d bytes)",
CONFIG_NET_TX_STACK_SIZE);
/* This will allow RX thread to start to receive data. */
k_sem_give(startup_sync);
while (1) {
int ev_count, ret;
ev_count = net_if_prepare_events();
ret = k_poll(__net_if_event_start, ev_count, K_FOREVER);
NET_ASSERT(ret == 0);
net_if_process_events(__net_if_event_start, ev_count);
k_yield();
}
net_tc_submit_to_tx_queue(tc, pkt);
}
static inline void init_iface(struct net_if *iface)
@ -277,8 +203,6 @@ static inline void init_iface(struct net_if *iface)
NET_DBG("On iface %p", iface);
k_fifo_init(net_if_get_queue_tx(iface));
api->init(iface);
}
@ -2175,8 +2099,6 @@ void net_if_carrier_down(struct net_if *iface)
atomic_clear_bit(iface->if_dev->flags, NET_IF_UP);
net_if_flush_tx(iface);
net_mgmt_event_notify(NET_EVENT_IF_DOWN, iface);
}
@ -2188,8 +2110,6 @@ int net_if_down(struct net_if *iface)
leave_mcast_all(iface);
net_if_flush_tx(iface);
/* If the L2 does not support enable just clear the flag */
if (!net_if_l2(iface)->enable) {
goto done;
@ -2209,13 +2129,15 @@ done:
return 0;
}
void net_if_init(struct k_sem *startup_sync)
void net_if_init(void)
{
struct net_if *iface;
int i, if_count;
NET_DBG("");
net_tc_tx_init();
for (iface = __net_if_start, if_count = 0; iface != __net_if_end;
iface++, if_count++) {
init_iface(iface);
@ -2261,12 +2183,6 @@ void net_if_init(struct k_sem *startup_sync)
#endif
}
#endif /* CONFIG_NET_IPV6 */
k_thread_create(&tx_thread_data, tx_stack,
K_THREAD_STACK_SIZEOF(tx_stack),
(k_thread_entry_t)net_if_tx_thread,
startup_sync, NULL, NULL, K_PRIO_COOP(7),
K_ESSENTIAL, K_NO_WAIT);
}
void net_if_post_init(void)

View file

@ -318,6 +318,10 @@ struct net_pkt *net_pkt_get_reserve(struct k_mem_slab *slab,
pkt->ref = 1;
pkt->slab = slab;
#if defined(CONFIG_NET_TX_DEFAULT_PRIORITY) && (NET_TC_COUNT > 1)
net_pkt_set_priority(pkt, CONFIG_NET_TX_DEFAULT_PRIORITY);
#endif
#if defined(CONFIG_NET_DEBUG_NET_PKT)
net_pkt_alloc_add(pkt, true, caller, line);
@ -529,6 +533,17 @@ static struct net_pkt *net_pkt_get(struct k_mem_slab *slab,
family = net_context_get_family(context);
net_pkt_set_family(pkt, family);
#if defined(CONFIG_NET_CONTEXT_PRIORITY) && (NET_TC_COUNT > 1)
{
u8_t prio;
if (net_context_get_option(context, NET_OPT_PRIORITY, &prio,
NULL) == 0) {
net_pkt_set_priority(pkt, prio);
}
}
#endif /* CONFIG_NET_CONTEXT_PRIORITY */
if (slab != &rx_pkts) {
uint16_t iface_len, data_len = 0;
enum net_ip_protocol proto;

View file

@ -16,13 +16,17 @@
#include <net/net_pkt.h>
extern void net_pkt_init(void);
extern void net_if_init(struct k_sem *startup_sync);
extern void net_if_init(void);
extern void net_if_post_init(void);
extern void net_if_carrier_down(struct net_if *iface);
extern void net_context_init(void);
enum net_verdict net_ipv4_process_pkt(struct net_pkt *pkt);
enum net_verdict net_ipv6_process_pkt(struct net_pkt *pkt);
extern void net_ipv6_init(void);
extern void net_tc_tx_init(void);
extern void net_tc_rx_init(void);
extern void net_tc_submit_to_tx_queue(u8_t tc, struct net_pkt *pkt);
extern void net_tc_submit_to_rx_queue(u8_t tc, struct net_pkt *pkt);
#if defined(CONFIG_NET_IPV6_FRAGMENT)
int net_ipv6_send_fragmented_pkt(struct net_if *iface, struct net_pkt *pkt,

343
subsys/net/ip/net_tc.c Normal file
View file

@ -0,0 +1,343 @@
/*
* Copyright (c) 2018 Intel Corporation.
*
* SPDX-License-Identifier: Apache-2.0
*/
#if defined(CONFIG_NET_DEBUG_TC)
#define SYS_LOG_DOMAIN "net/tc"
#define NET_LOG_ENABLED 1
#endif
#include <zephyr.h>
#include <string.h>
#include <net/net_core.h>
#include <net/net_pkt.h>
#include <net/net_stats.h>
#include "net_private.h"
#include "net_stats.h"
/* Stacks for TX work queue */
NET_STACK_ARRAY_DEFINE(TX, tx_stack,
CONFIG_NET_TX_STACK_SIZE,
CONFIG_NET_TX_STACK_SIZE,
NET_TC_TX_COUNT);
/* Stacks for RX work queue */
NET_STACK_ARRAY_DEFINE(RX, rx_stack,
CONFIG_NET_RX_STACK_SIZE,
CONFIG_NET_RX_STACK_SIZE + CONFIG_NET_RX_STACK_RPL,
NET_TC_RX_COUNT);
static struct net_traffic_class tx_classes[NET_TC_TX_COUNT];
static struct net_traffic_class rx_classes[NET_TC_RX_COUNT];
void net_tc_submit_to_tx_queue(u8_t tc, struct net_pkt *pkt)
{
k_work_submit_to_queue(&tx_classes[tc].work_q, net_pkt_work(pkt));
}
void net_tc_submit_to_rx_queue(u8_t tc, struct net_pkt *pkt)
{
k_work_submit_to_queue(&rx_classes[tc].work_q, net_pkt_work(pkt));
}
int net_tx_priority2tc(enum net_priority prio)
{
/* FIXME: Initial implementation just maps the priority to certain
* traffic class to certain queue. This needs to be made more generic.
*
* Use the example priority -> traffic class mapper found in
* IEEE 802.1Q chapter 8.6.6 table 8.4 and chapter 34.5 table 34-1
*
* Priority Acronym Traffic types
* 0 (lowest) BK Background
* 1 (default) BE Best effort
* 2 EE Excellent effort
* 3 (highest) CA Critical applications
* 4 VI Video, < 100 ms latency and jitter
* 5 VO Voice, < 10 ms latency and jitter
* 6 IC Internetwork control
* 7 NC Network control
*/
/* Priority is the index to this array */
static const int tc[] = {
#if NET_TC_TX_COUNT == 1
0, 0, 0, 0, 0, 0, 0, 0
#endif
#if NET_TC_TX_COUNT == 2
0, 0, 1, 1, 0, 0, 0, 0
#endif
#if NET_TC_TX_COUNT == 3
0, 0, 1, 2, 0, 0, 0, 0
#endif
#if NET_TC_TX_COUNT == 4
0, 0, 2, 3, 1, 1, 1, 1
#endif
#if NET_TC_TX_COUNT == 5
0, 0, 3, 4, 1, 1, 2, 2
#endif
#if NET_TC_TX_COUNT == 6
0, 0, 4, 5, 1, 1, 2, 3
#endif
#if NET_TC_TX_COUNT == 7
0, 0, 5, 6, 1, 2, 3, 4
#endif
#if NET_TC_TX_COUNT == 8
0, 1, 6, 7, 2, 3, 4, 5
#endif
};
if (prio >= sizeof(tc)) {
/* Use default value suggested in 802.1Q */
prio = NET_PRIORITY_BE;
}
return tc[prio];
}
int net_rx_priority2tc(enum net_priority prio)
{
/* FIXME: Initial implementation just maps the priority to certain
* traffic class to certain queue. This needs to be made more generic.
*
* Use the example priority -> traffic class mapper found in
* IEEE 802.1Q chapter 8.6.6 table 8.4 and chapter 34.5 table 34-1
*
* Priority Acronym Traffic types
* 0 (lowest) BK Background
* 1 (default) BE Best effort
* 2 EE Excellent effort
* 3 (highest) CA Critical applications
* 4 VI Video, < 100 ms latency and jitter
* 5 VO Voice, < 10 ms latency and jitter
* 6 IC Internetwork control
* 7 NC Network control
*/
/* Priority is the index to this array */
static const int tc[] = {
#if NET_TC_RX_COUNT == 1
0, 0, 0, 0, 0, 0, 0, 0
#endif
#if NET_TC_RX_COUNT == 2
0, 0, 1, 1, 0, 0, 0, 0
#endif
#if NET_TC_RX_COUNT == 3
0, 0, 1, 2, 0, 0, 0, 0
#endif
#if NET_TC_RX_COUNT == 4
0, 0, 2, 3, 1, 1, 1, 1
#endif
#if NET_TC_RX_COUNT == 5
0, 0, 3, 4, 1, 1, 2, 2
#endif
#if NET_TC_RX_COUNT == 6
0, 0, 4, 5, 1, 1, 2, 3
#endif
#if NET_TC_RX_COUNT == 7
0, 0, 5, 6, 1, 2, 3, 4
#endif
#if NET_TC_RX_COUNT == 8
0, 1, 6, 7, 2, 3, 4, 5
#endif
};
if (prio >= sizeof(tc)) {
/* Use default value suggested in 802.1Q */
prio = NET_PRIORITY_BE;
}
return tc[prio];
}
/* Convert traffic class to thread priority */
static int tx_tc2thread(int tc)
{
/* Initial implementation just maps the traffic class to certain queue.
* If there are less queues than classes, then map them into
* some specific queue. In order to make this work same way as before,
* the thread priority 7 is used to map the default traffic class so
* this system works same way as before when TX thread default priority
* was 7.
*
* Lower value in this table means higher thread priority. The
* value is used as a parameter to K_PRIO_COOP() which converts it
* to actual thread priority.
*
* Higher traffic class value means higher priority queue. This means
* that thread_priorities[7] value should contain the highest priority
* for the TX queue handling thread.
*/
static const int thread_priorities[] = {
#if NET_TC_TX_COUNT == 1
7
#endif
#if NET_TC_TX_COUNT == 2
8, 7
#endif
#if NET_TC_TX_COUNT == 3
8, 7, 6
#endif
#if NET_TC_TX_COUNT == 4
8, 7, 6, 5
#endif
#if NET_TC_TX_COUNT == 5
8, 7, 6, 5, 4
#endif
#if NET_TC_TX_COUNT == 6
8, 7, 6, 5, 4, 3
#endif
#if NET_TC_TX_COUNT == 7
8, 7, 6, 5, 4, 3, 2
#endif
#if NET_TC_TX_COUNT == 8
8, 7, 6, 5, 4, 3, 2, 1
#endif
};
BUILD_ASSERT_MSG(NET_TC_TX_COUNT <= CONFIG_NUM_COOP_PRIORITIES,
"Too many traffic classes");
NET_ASSERT(tc < sizeof(thread_priorities));
return thread_priorities[tc];
}
/* Convert traffic class to thread priority */
static int rx_tc2thread(int tc)
{
/* Initial implementation just maps the traffic class to certain queue.
* If there are less queues than classes, then map them into
* some specific queue. In order to make this work same way as before,
* the thread priority 7 is used to map the default traffic class so
* this system works same way as before when RX thread default priority
* was 7.
*
* Lower value in this table means higher thread priority. The
* value is used as a parameter to K_PRIO_COOP() which converts it
* to actual thread priority.
*
* Higher traffic class value means higher priority queue. This means
* that thread_priorities[7] value should contain the highest priority
* for the RX queue handling thread.
*/
static const int thread_priorities[] = {
#if NET_TC_RX_COUNT == 1
7
#endif
#if NET_TC_RX_COUNT == 2
8, 7
#endif
#if NET_TC_RX_COUNT == 3
8, 7, 6
#endif
#if NET_TC_RX_COUNT == 4
8, 7, 6, 5
#endif
#if NET_TC_RX_COUNT == 5
8, 7, 6, 5, 4
#endif
#if NET_TC_RX_COUNT == 6
8, 7, 6, 5, 4, 3
#endif
#if NET_TC_RX_COUNT == 7
8, 7, 6, 5, 4, 3, 2
#endif
#if NET_TC_RX_COUNT == 8
8, 7, 6, 5, 4, 3, 2, 1
#endif
};
BUILD_ASSERT_MSG(NET_TC_RX_COUNT <= CONFIG_NUM_COOP_PRIORITIES,
"Too many traffic classes");
NET_ASSERT(tc < sizeof(thread_priorities));
return thread_priorities[tc];
}
#if defined(CONFIG_NET_SHELL)
#define TX_STACK(idx) NET_STACK_GET_NAME(TX, tx_stack, 0)[idx].stack
#define RX_STACK(idx) NET_STACK_GET_NAME(RX, rx_stack, 0)[idx].stack
#else
#define TX_STACK(idx) NET_STACK_GET_NAME(TX, tx_stack, 0)[idx]
#define RX_STACK(idx) NET_STACK_GET_NAME(RX, rx_stack, 0)[idx]
#endif
/* Create workqueue for each traffic class we are using. All the network
* traffic goes through these classes. There needs to be at least one traffic
* class in the system.
*/
void net_tc_tx_init(void)
{
int i;
BUILD_ASSERT(NET_TC_TX_COUNT > 0);
for (i = 0; i < NET_TC_TX_COUNT; i++) {
u8_t thread_priority;
thread_priority = tx_tc2thread(i);
tx_classes[i].tc = thread_priority;
#if defined(CONFIG_NET_SHELL)
/* Fix the thread start address so that "net stacks"
* command will print correct stack information.
*/
NET_STACK_GET_NAME(TX, tx_stack, 0)[i].stack = tx_stack[i];
NET_STACK_GET_NAME(TX, tx_stack, 0)[i].prio = thread_priority;
NET_STACK_GET_NAME(TX, tx_stack, 0)[i].idx = i;
#endif
NET_DBG("[%d] Starting TX queue %p stack %p size %zd "
"prio %d (%d)", i,
&tx_classes[i].work_q.queue, TX_STACK(i),
K_THREAD_STACK_SIZEOF(tx_stack[i]),
thread_priority, K_PRIO_COOP(thread_priority));
k_work_q_start(&tx_classes[i].work_q,
tx_stack[i],
K_THREAD_STACK_SIZEOF(tx_stack[i]),
K_PRIO_COOP(thread_priority));
}
k_yield();
}
void net_tc_rx_init(void)
{
int i;
BUILD_ASSERT(NET_TC_RX_COUNT > 0);
for (i = 0; i < NET_TC_RX_COUNT; i++) {
u8_t thread_priority;
thread_priority = rx_tc2thread(i);
rx_classes[i].tc = thread_priority;
#if defined(CONFIG_NET_SHELL)
/* Fix the thread start address so that "net stacks"
* command will print correct stack information.
*/
NET_STACK_GET_NAME(RX, rx_stack, 0)[i].stack = rx_stack[i];
NET_STACK_GET_NAME(RX, rx_stack, 0)[i].prio = thread_priority;
NET_STACK_GET_NAME(RX, rx_stack, 0)[i].idx = i;
#endif
NET_DBG("[%d] Starting RX queue %p stack %p size %zd "
"prio %d (%d)", i,
&rx_classes[i].work_q.queue, RX_STACK(i),
K_THREAD_STACK_SIZEOF(rx_stack[i]),
thread_priority, K_PRIO_COOP(thread_priority));
k_work_q_start(&rx_classes[i].work_q,
rx_stack[i],
K_THREAD_STACK_SIZEOF(rx_stack[i]),
K_PRIO_COOP(thread_priority));
}
k_yield();
}