net: iface: Introduce TX mutex locking
A recent iface lock removal in ed17320c3d
exposed issues with concurrent access on TX to drivers that are not
re-entrant.
Reverting that commit does not really solve the problem, as it would
still exist if multiple Traffic Class queues are in use.
Therefore, introduce a separate mutex for TX data path, protecting the
L2/driver from concurrent transfers from several threads.
Signed-off-by: Robert Lubos <robert.lubos@nordicsemi.no>
This commit is contained in:
parent
250ff71c64
commit
61c392c5b1
|
@ -212,6 +212,9 @@ enum net_if_flag {
|
|||
/** IPv6 Multicast Listener Discovery disabled. */
|
||||
NET_IF_IPV6_NO_MLD,
|
||||
|
||||
/** Mutex locking on TX data path disabled on the interface. */
|
||||
NET_IF_NO_TX_LOCK,
|
||||
|
||||
/** @cond INTERNAL_HIDDEN */
|
||||
/* Total number of flags - must be at the end of the enum */
|
||||
NET_IF_NUM_FLAGS
|
||||
|
@ -613,6 +616,7 @@ struct net_if {
|
|||
#endif
|
||||
|
||||
struct k_mutex lock;
|
||||
struct k_mutex tx_lock;
|
||||
};
|
||||
|
||||
static inline void net_if_lock(struct net_if *iface)
|
||||
|
@ -629,6 +633,31 @@ static inline void net_if_unlock(struct net_if *iface)
|
|||
k_mutex_unlock(&iface->lock);
|
||||
}
|
||||
|
||||
static inline bool net_if_flag_is_set(struct net_if *iface,
|
||||
enum net_if_flag value);
|
||||
|
||||
static inline void net_if_tx_lock(struct net_if *iface)
|
||||
{
|
||||
NET_ASSERT(iface);
|
||||
|
||||
if (net_if_flag_is_set(iface, NET_IF_NO_TX_LOCK)) {
|
||||
return;
|
||||
}
|
||||
|
||||
(void)k_mutex_lock(&iface->tx_lock, K_FOREVER);
|
||||
}
|
||||
|
||||
static inline void net_if_tx_unlock(struct net_if *iface)
|
||||
{
|
||||
NET_ASSERT(iface);
|
||||
|
||||
if (net_if_flag_is_set(iface, NET_IF_NO_TX_LOCK)) {
|
||||
return;
|
||||
}
|
||||
|
||||
k_mutex_unlock(&iface->tx_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set a value in network interface flags
|
||||
*
|
||||
|
|
|
@ -254,7 +254,9 @@ static bool net_if_tx(struct net_if *iface, struct net_pkt *pkt)
|
|||
}
|
||||
}
|
||||
|
||||
net_if_tx_lock(iface);
|
||||
status = net_if_l2(iface)->send(iface, pkt);
|
||||
net_if_tx_unlock(iface);
|
||||
|
||||
if (IS_ENABLED(CONFIG_NET_PKT_TXTIME_STATS)) {
|
||||
uint32_t end_tick = k_cycle_get_32();
|
||||
|
@ -426,6 +428,7 @@ static inline void init_iface(struct net_if *iface)
|
|||
#endif
|
||||
|
||||
k_mutex_init(&iface->lock);
|
||||
k_mutex_init(&iface->tx_lock);
|
||||
|
||||
api->init(iface);
|
||||
}
|
||||
|
|
|
@ -534,7 +534,9 @@ static void arp_update(struct net_if *iface,
|
|||
* the pkt are not counted twice and the packet filter
|
||||
* callbacks are only called once.
|
||||
*/
|
||||
net_if_tx_lock(iface);
|
||||
ret = net_if_l2(iface)->send(iface, pkt);
|
||||
net_if_tx_unlock(iface);
|
||||
if (ret < 0) {
|
||||
net_pkt_unref(pkt);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue