net: ip: net_mgmt: Support system work queue and synchronous callbacks

Add Kconfig choice for the user to select how Network Events are
handled. It's own thread, the system work queue or synchronous when
events are emitted.

By default a separate thread is created to be backwards compatible.

Signed-off-by: Pieter De Gendt <pieter.degendt@basalte.be>
This commit is contained in:
Pieter De Gendt 2023-12-01 13:21:39 +01:00 committed by Fabio Baltieri
parent 3fec3964e0
commit c1204affab
2 changed files with 113 additions and 36 deletions

View file

@ -19,6 +19,42 @@ config NET_MGMT_EVENT
if NET_MGMT_EVENT
choice NET_MGMT_EVENT_WORKER
prompt "Network event scheduling"
default NET_MGMT_EVENT_THREAD
config NET_MGMT_EVENT_THREAD
bool "Separate network events thread"
help
Create a dedicated thread for network event callback handlers.
If NET_MGMT_EVENT_INFO is enabled the data will be copied to
a message queue.
config NET_MGMT_EVENT_SYSTEM_WORKQUEUE
bool "System work queue"
help
Submit work to the system work queue to schedule calling network
event callback handlers.
If NET_MGMT_EVENT_INFO is enabled the data will be copied to
a message queue.
config NET_MGMT_EVENT_DIRECT
bool "Trigger callback on event emit"
help
Call network event handlers when the event is emitted.
If NET_MGMT_EVENT_INFO is enabled a data pointer is passed to
callback handlers, no info data is copied.
endchoice
config NET_MGMT_EVENT_QUEUE
bool
default y
depends on NET_MGMT_EVENT_THREAD || NET_MGMT_EVENT_SYSTEM_WORKQUEUE
help
Hidden option to enable the network event's queue if asynchronous
callbacks are done.
config NET_MGMT_EVENT_STACK_SIZE
int "Stack size for the inner thread handling event callbacks"
default 4096 if WIFI_NM_WPA_SUPPLICANT
@ -26,6 +62,7 @@ config NET_MGMT_EVENT_STACK_SIZE
default 840 if X86
default 800 if THREAD_LOCAL_STORAGE
default 768
depends on NET_MGMT_EVENT_THREAD
help
Set the internal stack size for NM to run registered callbacks
on events.
@ -35,6 +72,7 @@ config NET_MGMT_EVENT_QUEUE_SIZE
default 16 if NET_MGMT_EVENT_MONITOR
default 5
range 1 1024
depends on NET_MGMT_EVENT_QUEUE
help
Numbers of events which can be queued at same time. Note that if a
3rd event comes in, the first will be removed without generating any
@ -45,6 +83,7 @@ config NET_MGMT_EVENT_QUEUE_TIMEOUT
int "Timeout for event queue"
default 10
range 1 10000
depends on NET_MGMT_EVENT_QUEUE
help
Timeout in milliseconds for the event queue. This timeout is used to
wait for the queue to be available.
@ -83,6 +122,7 @@ source "subsys/net/Kconfig.template.log_config.net"
config NET_DEBUG_MGMT_EVENT_STACK
bool "Stack analysis output on Net MGMT event core"
select INIT_STACKS
depends on NET_MGMT_EVENT_THREAD
help
Add debug messages output on how much Net MGMT event stack is used.

View file

@ -19,8 +19,12 @@ LOG_MODULE_REGISTER(net_mgmt, CONFIG_NET_MGMT_EVENT_LOG_LEVEL);
#include "net_private.h"
struct mgmt_event_entry {
#ifdef CONFIG_NET_MGMT_EVENT_INFO
#if defined(CONFIG_NET_MGMT_EVENT_INFO)
#if defined(CONFIG_NET_MGMT_EVENT_QUEUE)
uint8_t info[NET_EVENT_INFO_MAX_SIZE];
#else
const void *info;
#endif /* CONFIG_NET_MGMT_EVENT_QUEUE */
size_t info_length;
#endif /* CONFIG_NET_MGMT_EVENT_INFO */
uint32_t event;
@ -36,18 +40,32 @@ struct mgmt_event_wait {
};
static K_MUTEX_DEFINE(net_mgmt_callback_lock);
static K_MUTEX_DEFINE(net_mgmt_event_lock);
#if defined(CONFIG_NET_MGMT_EVENT_THREAD)
K_KERNEL_STACK_DEFINE(mgmt_stack, CONFIG_NET_MGMT_EVENT_STACK_SIZE);
static struct k_thread mgmt_thread_data;
static struct k_work_q mgmt_work_q_obj;
#endif
static uint32_t global_event_mask;
static sys_slist_t event_callbacks = SYS_SLIST_STATIC_INIT(&event_callbacks);
/* Forward declaration for the actual caller */
static void mgmt_run_callbacks(const struct mgmt_event_entry * const mgmt_event);
#if defined(CONFIG_NET_MGMT_EVENT_QUEUE)
static K_MUTEX_DEFINE(net_mgmt_event_lock);
/* event structure used to prevent increasing the stack usage on the caller thread */
static struct mgmt_event_entry new_event;
K_MSGQ_DEFINE(event_msgq, sizeof(struct mgmt_event_entry),
CONFIG_NET_MGMT_EVENT_QUEUE_SIZE, sizeof(uint32_t));
static struct k_work_q *mgmt_work_q = COND_CODE_1(CONFIG_NET_MGMT_EVENT_SYSTEM_WORKQUEUE,
(&k_sys_work_q), (&mgmt_work_q_obj));
static void mgmt_event_work_handler(struct k_work *work);
static K_WORK_DEFINE(mgmt_work, mgmt_event_work_handler);
static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
const void *info, size_t length)
@ -88,13 +106,42 @@ static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
}
(void)k_mutex_unlock(&net_mgmt_event_lock);
k_work_submit_to_queue(mgmt_work_q, &mgmt_work);
}
static inline void mgmt_pop_event(struct mgmt_event_entry *dst)
static void mgmt_event_work_handler(struct k_work *work)
{
do {
} while (k_msgq_get(&event_msgq, dst, K_FOREVER) != 0);
struct mgmt_event_entry mgmt_event;
ARG_UNUSED(work);
while (k_msgq_get(&event_msgq, &mgmt_event, K_FOREVER) == 0) {
NET_DBG("Handling events, forwarding it relevantly");
mgmt_run_callbacks(&mgmt_event);
/* forcefully give up our timeslot, to give time to the callback */
k_yield();
}
}
#else
static inline void mgmt_push_event(uint32_t event, struct net_if *iface,
const void *info, size_t length)
{
const struct mgmt_event_entry mgmt_event = {
.info = info,
.info_length = length,
.event = event,
.iface = iface,
};
mgmt_run_callbacks(&mgmt_event);
}
#endif /* CONFIG_NET_MGMT_EVENT_QUEUE */
static inline void mgmt_add_event_mask(uint32_t event_mask)
{
@ -129,7 +176,7 @@ static inline bool mgmt_is_event_handled(uint32_t mgmt_event)
NET_MGMT_GET_COMMAND(mgmt_event)));
}
static inline void mgmt_run_callbacks(const struct mgmt_event_entry * const mgmt_event)
static inline void mgmt_run_slist_callbacks(const struct mgmt_event_entry * const mgmt_event)
{
sys_snode_t *prev = NULL;
struct net_mgmt_event_callback *cb, *tmp;
@ -189,7 +236,7 @@ static inline void mgmt_run_callbacks(const struct mgmt_event_entry * const mgmt
}
#ifdef CONFIG_NET_DEBUG_MGMT_EVENT_STACK
log_stack_usage(&mgmt_thread_data);
log_stack_usage(&mgmt_work_q->thread);
#endif
}
@ -217,32 +264,15 @@ static inline void mgmt_run_static_callbacks(const struct mgmt_event_entry * con
}
}
static void mgmt_thread(void *p1, void *p2, void *p3)
static void mgmt_run_callbacks(const struct mgmt_event_entry * const mgmt_event)
{
ARG_UNUSED(p1);
ARG_UNUSED(p2);
ARG_UNUSED(p3);
struct mgmt_event_entry mgmt_event;
mgmt_rebuild_global_event_mask();
while (1) {
mgmt_pop_event(&mgmt_event);
NET_DBG("Handling events, forwarding it relevantly");
/* take the lock to prevent changes to the callback structure during use */
(void)k_mutex_lock(&net_mgmt_callback_lock, K_FOREVER);
mgmt_run_static_callbacks(&mgmt_event);
mgmt_run_callbacks(&mgmt_event);
mgmt_run_static_callbacks(mgmt_event);
mgmt_run_slist_callbacks(mgmt_event);
(void)k_mutex_unlock(&net_mgmt_callback_lock);
/* forcefully give up our timeslot, to give time to the callback */
k_yield();
}
}
static int mgmt_event_wait_call(struct net_if *iface,
@ -369,20 +399,27 @@ int net_mgmt_event_wait_on_iface(struct net_if *iface,
void net_mgmt_event_init(void)
{
mgmt_rebuild_global_event_mask();
#if defined(CONFIG_NET_MGMT_EVENT_THREAD)
#if defined(CONFIG_NET_TC_THREAD_COOPERATIVE)
/* Lowest priority cooperative thread */
#define THREAD_PRIORITY K_PRIO_COOP(CONFIG_NUM_COOP_PRIORITIES - 1)
#else
#define THREAD_PRIORITY K_PRIO_PREEMPT(CONFIG_NUM_PREEMPT_PRIORITIES - 1)
#endif
struct k_work_queue_config q_cfg = {
.name = "net_mgmt",
.no_yield = false,
};
k_thread_create(&mgmt_thread_data, mgmt_stack,
k_work_queue_init(&mgmt_work_q_obj);
k_work_queue_start(&mgmt_work_q_obj, mgmt_stack,
K_KERNEL_STACK_SIZEOF(mgmt_stack),
mgmt_thread, NULL, NULL, NULL,
THREAD_PRIORITY, 0, K_NO_WAIT);
k_thread_name_set(&mgmt_thread_data, "net_mgmt");
THREAD_PRIORITY, &q_cfg);
NET_DBG("Net MGMT initialized: queue of %u entries, stack size of %u",
CONFIG_NET_MGMT_EVENT_QUEUE_SIZE,
CONFIG_NET_MGMT_EVENT_STACK_SIZE);
#endif /* CONFIG_NET_MGMT_EVENT_THREAD */
}