drivers: wifi: esp: schedule each RX packet on separate work
So far a dedicated FIFO was used for all RX packets, which was consumed in single submitted work. This work was also responsible for closing socket and notifying uppper network layers if some errors occurred previously or socket was simply closed by peer. There is however a potential race condition in scenario described below: esp_rx thread | esp_workq thread --------------------------|----------------------------- | ---- esp_recv_work ---- | handle RX packets from FIFO | ---- on_cmd_ipd ---- | put new RX packet to FIFO | ---- on_cmd_ipd ---- | | ---- on_cmd_closed ---- | mark socket as closed | ---- on_cmd_closed ---- | | | handle close | ---- esp_recv_work ---- In this case we assume that esp_workq was preempted just after processing all RX packets from FIFO and before checking if socket was closed. In such scenario RX packet put to FIFO just before doing close is going to be unhandled, so application layer will miss part of the data. Change the way RX packets are scheduled to workqueue, by using the already available net_pkt->work objects (used for example in native TCP stack). Create a separate work for closing connection. As a result all RX packets and close handlers are on the same queue and there is no risk of handling close events before handling all previously received data. Signed-off-by: Marcin Niestroj <m.niestroj@grinn-global.com>
This commit is contained in:
parent
dca2fd8042
commit
956923f8e5
|
@ -386,7 +386,7 @@ MODEM_CMD_DEFINE(on_cmd_closed)
|
|||
}
|
||||
|
||||
sock->flags &= ~(ESP_SOCK_CONNECTED);
|
||||
k_work_submit_to_queue(&dev->workq, &sock->recv_work);
|
||||
k_work_submit_to_queue(&dev->workq, &sock->close_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -144,16 +144,13 @@ struct esp_socket {
|
|||
enum net_ip_protocol ip_proto;
|
||||
struct sockaddr dst;
|
||||
|
||||
/* packets */
|
||||
struct k_fifo fifo_rx_pkt;
|
||||
|
||||
/* sem */
|
||||
struct k_sem sem_data_ready;
|
||||
|
||||
/* work */
|
||||
struct k_work connect_work;
|
||||
struct k_work recv_work;
|
||||
struct k_work recvdata_work;
|
||||
struct k_work close_work;
|
||||
|
||||
/* net context */
|
||||
struct net_context *context;
|
||||
|
@ -280,6 +277,7 @@ static inline int esp_cmd_send(struct esp_data *data,
|
|||
void esp_connect_work(struct k_work *work);
|
||||
void esp_recv_work(struct k_work *work);
|
||||
void esp_recvdata_work(struct k_work *work);
|
||||
void esp_close_work(struct k_work *work);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -478,31 +478,29 @@ void esp_recvdata_work(struct k_work *work)
|
|||
|
||||
void esp_recv_work(struct k_work *work)
|
||||
{
|
||||
struct esp_socket *sock;
|
||||
struct esp_data *dev;
|
||||
struct net_pkt *pkt;
|
||||
|
||||
sock = CONTAINER_OF(work, struct esp_socket, recv_work);
|
||||
dev = esp_socket_to_dev(sock);
|
||||
struct net_pkt *pkt = CONTAINER_OF(work, struct net_pkt, work);
|
||||
struct net_context *context = pkt->context;
|
||||
struct esp_socket *sock = context->offload_context;
|
||||
|
||||
if (!esp_socket_in_use(sock)) {
|
||||
LOG_DBG("Socket %d not in use", sock->idx);
|
||||
return;
|
||||
}
|
||||
|
||||
pkt = k_fifo_get(&sock->fifo_rx_pkt, K_NO_WAIT);
|
||||
while (pkt) {
|
||||
if (sock->recv_cb) {
|
||||
sock->recv_cb(sock->context, pkt, NULL, NULL,
|
||||
0, sock->recv_user_data);
|
||||
k_sem_give(&sock->sem_data_ready);
|
||||
} else {
|
||||
/* Discard */
|
||||
net_pkt_unref(pkt);
|
||||
}
|
||||
|
||||
pkt = k_fifo_get(&sock->fifo_rx_pkt, K_NO_WAIT);
|
||||
if (sock->recv_cb) {
|
||||
sock->recv_cb(context, pkt, NULL, NULL,
|
||||
0, sock->recv_user_data);
|
||||
k_sem_give(&sock->sem_data_ready);
|
||||
} else {
|
||||
/* Discard */
|
||||
net_pkt_unref(pkt);
|
||||
}
|
||||
}
|
||||
|
||||
void esp_close_work(struct k_work *work)
|
||||
{
|
||||
struct esp_socket *sock = CONTAINER_OF(work, struct esp_socket,
|
||||
close_work);
|
||||
|
||||
if (esp_socket_close_pending(sock)) {
|
||||
if (esp_socket_connected(sock)) {
|
||||
|
@ -553,7 +551,6 @@ static int esp_recv(struct net_context *context,
|
|||
static int esp_put(struct net_context *context)
|
||||
{
|
||||
struct esp_socket *sock = context->offload_context;
|
||||
struct net_pkt *pkt;
|
||||
|
||||
esp_socket_workq_flush(sock);
|
||||
|
||||
|
@ -564,13 +561,6 @@ static int esp_put(struct net_context *context)
|
|||
sock->connect_cb = NULL;
|
||||
sock->recv_cb = NULL;
|
||||
|
||||
/* Drain rxfifo */
|
||||
for (pkt = k_fifo_get(&sock->fifo_rx_pkt, K_NO_WAIT);
|
||||
pkt != NULL;
|
||||
pkt = k_fifo_get(&sock->fifo_rx_pkt, K_NO_WAIT)) {
|
||||
net_pkt_unref(pkt);
|
||||
}
|
||||
|
||||
esp_socket_put(sock);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -76,22 +76,22 @@ void esp_socket_init(struct esp_data *data)
|
|||
sock->link_id = INVALID_LINK_ID;
|
||||
sock->flags = 0;
|
||||
k_sem_init(&sock->sem_data_ready, 0, 1);
|
||||
k_fifo_init(&sock->fifo_rx_pkt);
|
||||
k_work_init(&sock->connect_work, esp_connect_work);
|
||||
k_work_init(&sock->recv_work, esp_recv_work);
|
||||
k_work_init(&sock->recvdata_work, esp_recvdata_work);
|
||||
k_work_init(&sock->close_work, esp_close_work);
|
||||
}
|
||||
}
|
||||
|
||||
static struct net_pkt *esp_prepare_pkt(struct esp_data *dev,
|
||||
struct net_buf *src,
|
||||
size_t offset, size_t len)
|
||||
static struct net_pkt *esp_socket_prepare_pkt(struct esp_socket *sock,
|
||||
struct net_buf *src,
|
||||
size_t offset, size_t len)
|
||||
{
|
||||
struct esp_data *data = esp_socket_to_dev(sock);
|
||||
struct net_buf *frag;
|
||||
struct net_pkt *pkt;
|
||||
size_t to_copy;
|
||||
|
||||
pkt = net_pkt_rx_alloc_with_buffer(dev->net_iface, len, AF_UNSPEC,
|
||||
pkt = net_pkt_rx_alloc_with_buffer(data->net_iface, len, AF_UNSPEC,
|
||||
0, RX_NET_PKT_ALLOC_TIMEOUT);
|
||||
if (!pkt) {
|
||||
return NULL;
|
||||
|
@ -121,6 +121,7 @@ static struct net_pkt *esp_prepare_pkt(struct esp_data *dev,
|
|||
offset = 0;
|
||||
}
|
||||
|
||||
net_pkt_set_context(pkt, sock->context);
|
||||
net_pkt_cursor_init(pkt);
|
||||
|
||||
return pkt;
|
||||
|
@ -138,19 +139,18 @@ void esp_socket_rx(struct esp_socket *sock, struct net_buf *buf,
|
|||
return;
|
||||
}
|
||||
|
||||
pkt = esp_prepare_pkt(data, buf, offset, len);
|
||||
pkt = esp_socket_prepare_pkt(sock, buf, offset, len);
|
||||
if (!pkt) {
|
||||
LOG_ERR("Failed to get net_pkt: len %zu", len);
|
||||
if (sock->type == SOCK_STREAM) {
|
||||
sock->flags |= ESP_SOCK_CLOSE_PENDING;
|
||||
k_work_submit_to_queue(&data->workq, &sock->close_work);
|
||||
}
|
||||
goto submit_work;
|
||||
return;
|
||||
}
|
||||
|
||||
k_fifo_put(&sock->fifo_rx_pkt, pkt);
|
||||
|
||||
submit_work:
|
||||
k_work_submit_to_queue(&data->workq, &sock->recv_work);
|
||||
k_work_init(&pkt->work, esp_recv_work);
|
||||
k_work_submit_to_queue(&data->workq, &pkt->work);
|
||||
}
|
||||
|
||||
void esp_socket_close(struct esp_socket *sock)
|
||||
|
|
Loading…
Reference in a new issue