2019-10-21 10:12:35 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2019 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "tp.h"
|
|
|
|
|
|
|
|
#define is(_a, _b) (strcmp((_a), (_b)) == 0)
|
|
|
|
|
2020-05-15 17:17:37 +02:00
|
|
|
#ifndef MIN3
|
|
|
|
#define MIN3(_a, _b, _c) MIN((_a), MIN((_b), (_c)))
|
|
|
|
#endif
|
|
|
|
|
2021-01-07 15:39:19 +01:00
|
|
|
#define th_sport(_x) UNALIGNED_GET(&(_x)->th_sport)
|
|
|
|
#define th_dport(_x) UNALIGNED_GET(&(_x)->th_dport)
|
2020-06-23 19:44:49 +02:00
|
|
|
#define th_seq(_x) ntohl(UNALIGNED_GET(&(_x)->th_seq))
|
|
|
|
#define th_ack(_x) ntohl(UNALIGNED_GET(&(_x)->th_ack))
|
2021-01-07 15:39:19 +01:00
|
|
|
#define th_off(_x) ((_x)->th_off)
|
|
|
|
#define th_flags(_x) UNALIGNED_GET(&(_x)->th_flags)
|
|
|
|
#define th_win(_x) UNALIGNED_GET(&(_x)->th_win)
|
2019-10-21 10:12:35 +02:00
|
|
|
|
2021-01-31 17:10:58 +01:00
|
|
|
#define tcp_slist(_conn, _slist, _op, _type, _link) \
|
2019-10-21 10:12:35 +02:00
|
|
|
({ \
|
2021-10-11 12:33:20 +02:00
|
|
|
k_mutex_lock(&_conn->lock, K_FOREVER); \
|
2021-01-31 17:10:58 +01:00
|
|
|
\
|
2019-10-21 10:12:35 +02:00
|
|
|
sys_snode_t *_node = sys_slist_##_op(_slist); \
|
|
|
|
\
|
|
|
|
_type * _x = _node ? CONTAINER_OF(_node, _type, _link) : NULL; \
|
|
|
|
\
|
2021-10-11 12:33:20 +02:00
|
|
|
k_mutex_unlock(&_conn->lock); \
|
2021-01-31 17:10:58 +01:00
|
|
|
\
|
2019-10-21 10:12:35 +02:00
|
|
|
_x; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_NET_TEST_PROTOCOL)
|
|
|
|
#define tcp_malloc(_size) \
|
|
|
|
tp_malloc(_size, tp_basename(__FILE__), __LINE__, __func__)
|
|
|
|
#define tcp_calloc(_nmemb, _size) \
|
|
|
|
tp_calloc(_nmemb, _size, tp_basename(__FILE__), __LINE__, __func__)
|
|
|
|
#define tcp_free(_ptr) tp_free(_ptr, tp_basename(__FILE__), __LINE__, __func__)
|
|
|
|
#else
|
|
|
|
#define tcp_malloc(_size) k_malloc(_size)
|
|
|
|
#define tcp_calloc(_nmemb, _size) k_calloc(_nmemb, _size)
|
|
|
|
#define tcp_free(_ptr) k_free(_ptr)
|
|
|
|
#endif
|
|
|
|
|
2020-05-12 16:08:52 +02:00
|
|
|
#define TCP_PKT_ALLOC_TIMEOUT K_MSEC(100)
|
|
|
|
|
|
|
|
#if defined(CONFIG_NET_TEST_PROTOCOL)
|
|
|
|
#define tcp_pkt_clone(_pkt) tp_pkt_clone(_pkt, tp_basename(__FILE__), __LINE__)
|
|
|
|
#define tcp_pkt_unref(_pkt) tp_pkt_unref(_pkt, tp_basename(__FILE__), __LINE__)
|
|
|
|
#else
|
|
|
|
#define tcp_pkt_clone(_pkt) net_pkt_clone(_pkt, TCP_PKT_ALLOC_TIMEOUT)
|
|
|
|
#define tcp_pkt_unref(_pkt) net_pkt_unref(_pkt)
|
|
|
|
#define tp_pkt_alloc(args...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define tcp_pkt_ref(_pkt) net_pkt_ref(_pkt)
|
2020-04-22 14:09:23 +02:00
|
|
|
#define tcp_pkt_alloc(_conn, _len) \
|
|
|
|
({ \
|
2020-05-12 16:08:52 +02:00
|
|
|
struct net_pkt *_pkt; \
|
|
|
|
\
|
|
|
|
if ((_len) > 0) { \
|
|
|
|
_pkt = net_pkt_alloc_with_buffer( \
|
|
|
|
(_conn)->iface, \
|
|
|
|
(_len), \
|
|
|
|
net_context_get_family((_conn)->context), \
|
|
|
|
IPPROTO_TCP, \
|
|
|
|
TCP_PKT_ALLOC_TIMEOUT); \
|
|
|
|
} else { \
|
|
|
|
_pkt = net_pkt_alloc(TCP_PKT_ALLOC_TIMEOUT); \
|
|
|
|
} \
|
2020-04-22 14:09:23 +02:00
|
|
|
\
|
|
|
|
tp_pkt_alloc(_pkt, tp_basename(__FILE__), __LINE__); \
|
|
|
|
\
|
|
|
|
_pkt; \
|
|
|
|
})
|
|
|
|
|
net: tcp2: Queue received out-of-order data
If we receive data that is out-of-order, queue sequential
TCP segments until we have received earlier segment or a timeout
happens.
Note that we only queue data sequentially in current version i.e.,
there should be no holes in the queue. For example, if we receive
SEQs 5,4,3,6 and are waiting SEQ 2, the data in segments 3,4,5,6 is
queued (in this order), and then given to application when we receive
SEQ 2. But if we receive SEQs 5,4,3,7 then the SEQ 7 is discarded
because the list would not be sequential as number 6 is be missing.
Fixes #30364
Signed-off-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
2020-12-22 15:14:10 +01:00
|
|
|
#define tcp_rx_pkt_alloc(_conn, _len) \
|
|
|
|
({ \
|
|
|
|
struct net_pkt *_pkt; \
|
|
|
|
\
|
|
|
|
if ((_len) > 0) { \
|
|
|
|
_pkt = net_pkt_rx_alloc_with_buffer( \
|
|
|
|
(_conn)->iface, \
|
|
|
|
(_len), \
|
|
|
|
net_context_get_family((_conn)->context), \
|
|
|
|
IPPROTO_TCP, \
|
|
|
|
TCP_PKT_ALLOC_TIMEOUT); \
|
|
|
|
} else { \
|
|
|
|
_pkt = net_pkt_rx_alloc(TCP_PKT_ALLOC_TIMEOUT); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
tp_pkt_alloc(_pkt, tp_basename(__FILE__), __LINE__); \
|
|
|
|
\
|
|
|
|
_pkt; \
|
|
|
|
})
|
|
|
|
|
2019-10-21 10:12:35 +02:00
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_NET_TEST_PROTOCOL)
|
|
|
|
#define conn_seq(_conn, _req) \
|
|
|
|
tp_seq_track(TP_SEQ, &(_conn)->seq, (_req), tp_basename(__FILE__), \
|
|
|
|
__LINE__, __func__)
|
|
|
|
#define conn_ack(_conn, _req) \
|
|
|
|
tp_seq_track(TP_ACK, &(_conn)->ack, (_req), tp_basename(__FILE__), \
|
|
|
|
__LINE__, __func__)
|
|
|
|
#else
|
|
|
|
#define conn_seq(_conn, _req) (_conn)->seq += (_req)
|
|
|
|
#define conn_ack(_conn, _req) (_conn)->ack += (_req)
|
|
|
|
#endif
|
|
|
|
|
2020-05-13 18:52:59 +02:00
|
|
|
#define conn_mss(_conn) \
|
|
|
|
((_conn)->recv_options.mss_found ? \
|
2022-05-23 08:36:28 +02:00
|
|
|
MIN((_conn)->recv_options.mss, \
|
|
|
|
net_tcp_get_supported_mss(_conn)) : net_tcp_get_supported_mss(_conn))
|
2020-05-13 18:52:59 +02:00
|
|
|
|
2019-10-21 10:12:35 +02:00
|
|
|
#define conn_state(_conn, _s) \
|
|
|
|
({ \
|
|
|
|
NET_DBG("%s->%s", \
|
|
|
|
tcp_state_to_str((_conn)->state, false), \
|
|
|
|
tcp_state_to_str((_s), false)); \
|
|
|
|
(_conn)->state = _s; \
|
|
|
|
})
|
|
|
|
|
2021-03-31 17:31:30 +02:00
|
|
|
#define conn_send_data_dump(_conn) \
|
|
|
|
({ \
|
|
|
|
NET_DBG("conn: %p total=%zd, unacked_len=%d, " \
|
|
|
|
"send_win=%hu, mss=%hu", \
|
|
|
|
(_conn), net_pkt_get_len((_conn)->send_data), \
|
2021-10-11 12:29:47 +02:00
|
|
|
_conn->unacked_len, _conn->send_win, \
|
2021-03-31 17:31:30 +02:00
|
|
|
(uint16_t)conn_mss((_conn))); \
|
|
|
|
NET_DBG("conn: %p send_data_timer=%hu, send_data_retries=%hu", \
|
|
|
|
(_conn), \
|
|
|
|
(bool)k_ticks_to_ms_ceil32( \
|
|
|
|
k_work_delayable_remaining_get( \
|
|
|
|
&(_conn)->send_data_timer)), \
|
|
|
|
(_conn)->send_data_retries); \
|
|
|
|
})
|
2020-05-15 17:17:37 +02:00
|
|
|
|
2019-10-21 10:12:35 +02:00
|
|
|
enum pkt_addr {
|
2020-04-15 10:54:19 +02:00
|
|
|
TCP_EP_SRC = 1,
|
|
|
|
TCP_EP_DST = 0
|
2019-10-21 10:12:35 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct tcphdr {
|
2020-05-27 18:26:57 +02:00
|
|
|
uint16_t th_sport;
|
|
|
|
uint16_t th_dport;
|
|
|
|
uint32_t th_seq;
|
|
|
|
uint32_t th_ack;
|
2019-10-21 10:12:35 +02:00
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t th_x2:4; /* unused */
|
2021-03-10 13:13:09 +01:00
|
|
|
uint8_t th_off:4; /* data offset, in units of 32-bit words */
|
2019-10-21 10:12:35 +02:00
|
|
|
#endif
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t th_off:4;
|
|
|
|
uint8_t th_x2:4;
|
2019-10-21 10:12:35 +02:00
|
|
|
#endif
|
2020-05-27 18:26:57 +02:00
|
|
|
uint8_t th_flags;
|
|
|
|
uint16_t th_win;
|
|
|
|
uint16_t th_sum;
|
|
|
|
uint16_t th_urp;
|
2021-01-07 15:39:19 +01:00
|
|
|
} __packed;
|
2019-10-21 10:12:35 +02:00
|
|
|
|
|
|
|
enum th_flags {
|
2021-10-11 15:34:16 +02:00
|
|
|
FIN = BIT(0),
|
|
|
|
SYN = BIT(1),
|
|
|
|
RST = BIT(2),
|
|
|
|
PSH = BIT(3),
|
|
|
|
ACK = BIT(4),
|
|
|
|
URG = BIT(5),
|
|
|
|
ECN = BIT(6),
|
|
|
|
CWR = BIT(7),
|
2019-10-21 10:12:35 +02:00
|
|
|
};
|
|
|
|
|
2021-11-10 14:24:33 +01:00
|
|
|
struct tcp_mss_option {
|
|
|
|
uint32_t option;
|
|
|
|
};
|
|
|
|
|
2019-10-21 10:12:35 +02:00
|
|
|
enum tcp_state {
|
|
|
|
TCP_LISTEN = 1,
|
|
|
|
TCP_SYN_SENT,
|
|
|
|
TCP_SYN_RECEIVED,
|
|
|
|
TCP_ESTABLISHED,
|
2020-03-25 17:35:47 +01:00
|
|
|
TCP_FIN_WAIT_1,
|
|
|
|
TCP_FIN_WAIT_2,
|
2019-10-21 10:12:35 +02:00
|
|
|
TCP_CLOSE_WAIT,
|
|
|
|
TCP_CLOSING,
|
|
|
|
TCP_LAST_ACK,
|
|
|
|
TCP_TIME_WAIT,
|
|
|
|
TCP_CLOSED
|
|
|
|
};
|
|
|
|
|
2020-05-15 17:17:37 +02:00
|
|
|
enum tcp_data_mode {
|
|
|
|
TCP_DATA_MODE_SEND = 0,
|
|
|
|
TCP_DATA_MODE_RESEND = 1
|
|
|
|
};
|
|
|
|
|
2019-10-21 10:12:35 +02:00
|
|
|
union tcp_endpoint {
|
|
|
|
struct sockaddr sa;
|
|
|
|
struct sockaddr_in sin;
|
|
|
|
struct sockaddr_in6 sin6;
|
|
|
|
};
|
|
|
|
|
2021-02-08 14:04:10 +01:00
|
|
|
/* TCP Option codes */
|
|
|
|
#define NET_TCP_END_OPT 0
|
|
|
|
#define NET_TCP_NOP_OPT 1
|
|
|
|
#define NET_TCP_MSS_OPT 2
|
|
|
|
#define NET_TCP_WINDOW_SCALE_OPT 3
|
|
|
|
|
|
|
|
/* TCP Option sizes */
|
|
|
|
#define NET_TCP_END_SIZE 1
|
|
|
|
#define NET_TCP_NOP_SIZE 1
|
|
|
|
#define NET_TCP_MSS_SIZE 4
|
|
|
|
#define NET_TCP_WINDOW_SCALE_SIZE 3
|
|
|
|
|
2020-04-22 14:19:52 +02:00
|
|
|
struct tcp_options {
|
2020-05-27 18:26:57 +02:00
|
|
|
uint16_t mss;
|
|
|
|
uint16_t window;
|
2020-04-22 14:19:52 +02:00
|
|
|
bool mss_found : 1;
|
|
|
|
bool wnd_found : 1;
|
|
|
|
};
|
|
|
|
|
2019-10-21 10:12:35 +02:00
|
|
|
struct tcp { /* TCP connection */
|
|
|
|
sys_snode_t next;
|
2019-10-23 09:56:38 +02:00
|
|
|
struct net_context *context;
|
2020-09-15 09:41:05 +02:00
|
|
|
struct net_pkt *send_data;
|
net: tcp2: Queue received out-of-order data
If we receive data that is out-of-order, queue sequential
TCP segments until we have received earlier segment or a timeout
happens.
Note that we only queue data sequentially in current version i.e.,
there should be no holes in the queue. For example, if we receive
SEQs 5,4,3,6 and are waiting SEQ 2, the data in segments 3,4,5,6 is
queued (in this order), and then given to application when we receive
SEQ 2. But if we receive SEQs 5,4,3,7 then the SEQ 7 is discarded
because the list would not be sequential as number 6 is be missing.
Fixes #30364
Signed-off-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
2020-12-22 15:14:10 +01:00
|
|
|
struct net_pkt *queue_recv_data;
|
2020-09-15 09:41:05 +02:00
|
|
|
struct net_if *iface;
|
2019-10-22 14:04:02 +02:00
|
|
|
void *recv_user_data;
|
2020-09-15 09:41:05 +02:00
|
|
|
sys_slist_t send_queue;
|
2020-10-13 18:29:47 +02:00
|
|
|
union {
|
|
|
|
net_tcp_accept_cb_t accept_cb;
|
|
|
|
struct tcp *accepted_conn;
|
|
|
|
};
|
2020-09-15 09:41:05 +02:00
|
|
|
struct k_mutex lock;
|
|
|
|
struct k_sem connect_sem; /* semaphore for blocking connect */
|
2022-05-10 10:17:36 +02:00
|
|
|
struct k_sem tx_sem; /* Semaphore indicating if transfers are blocked . */
|
2020-10-21 13:26:22 +02:00
|
|
|
struct k_fifo recv_data; /* temp queue before passing data to app */
|
2020-04-22 14:19:52 +02:00
|
|
|
struct tcp_options recv_options;
|
2021-02-08 14:04:10 +01:00
|
|
|
struct tcp_options send_options;
|
2021-03-31 17:31:30 +02:00
|
|
|
struct k_work_delayable send_timer;
|
|
|
|
struct k_work_delayable recv_queue_timer;
|
|
|
|
struct k_work_delayable send_data_timer;
|
|
|
|
struct k_work_delayable timewait_timer;
|
2022-05-10 15:13:21 +02:00
|
|
|
struct k_work_delayable persist_timer;
|
2022-06-14 14:44:06 +02:00
|
|
|
struct k_work_delayable ack_timer;
|
2022-05-10 15:13:21 +02:00
|
|
|
|
2021-01-31 17:25:29 +01:00
|
|
|
union {
|
|
|
|
/* Because FIN and establish timers are never happening
|
|
|
|
* at the same time, share the timer between them to
|
|
|
|
* save memory.
|
|
|
|
*/
|
2021-03-31 17:31:30 +02:00
|
|
|
struct k_work_delayable fin_timer;
|
|
|
|
struct k_work_delayable establish_timer;
|
2021-01-31 17:25:29 +01:00
|
|
|
};
|
2020-09-15 09:41:05 +02:00
|
|
|
union tcp_endpoint src;
|
|
|
|
union tcp_endpoint dst;
|
2020-05-15 17:09:22 +02:00
|
|
|
size_t send_data_total;
|
2019-10-21 10:12:35 +02:00
|
|
|
size_t send_retries;
|
2020-09-15 09:41:05 +02:00
|
|
|
int unacked_len;
|
2019-10-23 09:56:38 +02:00
|
|
|
atomic_t ref_count;
|
2020-09-15 09:41:05 +02:00
|
|
|
enum tcp_state state;
|
|
|
|
enum tcp_data_mode data_mode;
|
|
|
|
uint32_t seq;
|
|
|
|
uint32_t ack;
|
2022-06-14 14:44:06 +02:00
|
|
|
uint16_t recv_win_max;
|
2020-09-15 09:41:05 +02:00
|
|
|
uint16_t recv_win;
|
|
|
|
uint16_t send_win;
|
2022-06-24 10:28:46 +02:00
|
|
|
#ifdef CONFIG_NET_TCP_RANDOMIZED_RTO
|
|
|
|
uint16_t rto;
|
|
|
|
#endif
|
2020-09-15 09:41:05 +02:00
|
|
|
uint8_t send_data_retries;
|
|
|
|
bool in_retransmission : 1;
|
|
|
|
bool in_connect : 1;
|
2020-09-18 09:37:32 +02:00
|
|
|
bool in_close : 1;
|
2022-06-06 22:00:01 +02:00
|
|
|
bool tcp_nodelay : 1;
|
2019-10-21 10:12:35 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
#define _flags(_fl, _op, _mask, _cond) \
|
|
|
|
({ \
|
|
|
|
bool result = false; \
|
|
|
|
\
|
2020-06-23 19:44:49 +02:00
|
|
|
if (UNALIGNED_GET(_fl) && (_cond) && \
|
|
|
|
(UNALIGNED_GET(_fl) _op(_mask))) { \
|
|
|
|
UNALIGNED_PUT(UNALIGNED_GET(_fl) & ~(_mask), _fl); \
|
2019-10-21 10:12:35 +02:00
|
|
|
result = true; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
result; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define FL(_fl, _op, _mask, _args...) \
|
|
|
|
_flags(_fl, _op, _mask, strlen("" #_args) ? _args : true)
|
2020-11-07 11:19:39 +01:00
|
|
|
|
|
|
|
typedef void (*net_tcp_cb_t)(struct tcp *conn, void *user_data);
|