net: sockets: sockets_can: Allow parallel receive/send
Implements mechanism similar to the one available in net/lib/sockets.c (since the merge of #27054) in sockets_can to enable parallel rx/tx. Fixes #38698 Signed-off-by: Mateusz Karlic <mkarlic@internships.antmicro.com>
This commit is contained in:
parent
a2f3ea5d19
commit
3844b79e96
|
@ -960,7 +960,7 @@ void net_socket_update_tc_rx_time(struct net_pkt *pkt, uint32_t end_tick)
|
|||
}
|
||||
}
|
||||
|
||||
static int wait_data(struct net_context *ctx, k_timeout_t *timeout)
|
||||
int zsock_wait_data(struct net_context *ctx, k_timeout_t *timeout)
|
||||
{
|
||||
if (ctx->cond.lock == NULL) {
|
||||
/* For some reason the lock pointer is not set properly
|
||||
|
@ -1001,7 +1001,7 @@ static inline ssize_t zsock_recv_dgram(struct net_context *ctx,
|
|||
|
||||
net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL);
|
||||
|
||||
ret = wait_data(ctx, &timeout);
|
||||
ret = zsock_wait_data(ctx, &timeout);
|
||||
if (ret < 0) {
|
||||
errno = -ret;
|
||||
return -1;
|
||||
|
@ -1140,7 +1140,7 @@ static inline ssize_t zsock_recv_stream(struct net_context *ctx,
|
|||
}
|
||||
|
||||
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
|
||||
res = wait_data(ctx, &timeout);
|
||||
res = zsock_wait_data(ctx, &timeout);
|
||||
if (res < 0) {
|
||||
errno = -res;
|
||||
return -1;
|
||||
|
|
|
@ -72,6 +72,11 @@ int zcan_socket(int family, int type, int proto)
|
|||
|
||||
k_fifo_init(&ctx->recv_q);
|
||||
|
||||
/* Condition variable is used to avoid keeping lock for a long time
|
||||
* when waiting data to be received
|
||||
*/
|
||||
k_condvar_init(&ctx->cond.recv);
|
||||
|
||||
z_finalize_fd(fd, ctx,
|
||||
(const struct fd_op_vtable *)&can_sock_fd_op_vtable);
|
||||
|
||||
|
@ -131,6 +136,13 @@ static void zcan_received_cb(struct net_context *ctx, struct net_pkt *pkt,
|
|||
|
||||
ctx = receivers[i].ctx;
|
||||
|
||||
/* To prevent the reader from missing the wake-up signal
|
||||
* as described in commit 1184089 and implemented in sockets.c
|
||||
*/
|
||||
if (ctx->cond.lock) {
|
||||
(void)k_mutex_lock(ctx->cond.lock, K_FOREVER);
|
||||
}
|
||||
|
||||
NET_DBG("[%d] ctx %p pkt %p st %d", i, ctx, clone, status);
|
||||
|
||||
/* if pkt is NULL, EOF */
|
||||
|
@ -152,14 +164,18 @@ static void zcan_received_cb(struct net_context *ctx, struct net_pkt *pkt,
|
|||
|
||||
NET_DBG("Set EOF flag on pkt %p", ctx);
|
||||
}
|
||||
|
||||
return;
|
||||
} else {
|
||||
/* Normal packet */
|
||||
net_pkt_set_eof(clone, false);
|
||||
|
||||
k_fifo_put(&ctx->recv_q, clone);
|
||||
}
|
||||
|
||||
if (ctx->cond.lock) {
|
||||
k_mutex_unlock(ctx->cond.lock);
|
||||
}
|
||||
|
||||
k_condvar_signal(&ctx->cond.recv);
|
||||
}
|
||||
|
||||
if (clone && clone != pkt) {
|
||||
|
@ -281,6 +297,18 @@ static ssize_t zcan_recvfrom_ctx(struct net_context *ctx, void *buf,
|
|||
|
||||
pkt = k_fifo_peek_head(&ctx->recv_q);
|
||||
} else {
|
||||
/* Mechanism as in sockets.c to allow parallel rx/tx
|
||||
*/
|
||||
if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
|
||||
int res;
|
||||
|
||||
res = zsock_wait_data(ctx, &timeout);
|
||||
if (res < 0) {
|
||||
errno = -res;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
pkt = k_fifo_get(&ctx->recv_q, timeout);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
int zsock_close_ctx(struct net_context *ctx);
|
||||
int zsock_poll_internal(struct zsock_pollfd *fds, int nfds, k_timeout_t timeout);
|
||||
|
||||
int zsock_wait_data(struct net_context *ctx, k_timeout_t *timeout);
|
||||
|
||||
static inline void sock_set_flag(struct net_context *ctx, uintptr_t mask,
|
||||
uintptr_t flag)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue