aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h124
1 files changed, 37 insertions, 87 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8d6b983d5099..051dc5c2802d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -65,7 +65,13 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
65#define TCP_MIN_MSS 88U 65#define TCP_MIN_MSS 88U
66 66
67/* The least MTU to use for probing */ 67/* The least MTU to use for probing */
68#define TCP_BASE_MSS 512 68#define TCP_BASE_MSS 1024
69
70/* probing interval, default to 10 minutes as per RFC4821 */
71#define TCP_PROBE_INTERVAL 600
72
73/* Specify interval when tcp mtu probing will stop */
74#define TCP_PROBE_THRESHOLD 8
69 75
70/* After receiving this amount of duplicate ACKs fast retransmit starts. */ 76/* After receiving this amount of duplicate ACKs fast retransmit starts. */
71#define TCP_FASTRETRANS_THRESH 3 77#define TCP_FASTRETRANS_THRESH 3
@@ -173,6 +179,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
173#define TCPOPT_SACK 5 /* SACK Block */ 179#define TCPOPT_SACK 5 /* SACK Block */
174#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ 180#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
175#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ 181#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
182#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
176#define TCPOPT_EXP 254 /* Experimental */ 183#define TCPOPT_EXP 254 /* Experimental */
177/* Magic number to be after the option value for sharing TCP 184/* Magic number to be after the option value for sharing TCP
178 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt 185 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
@@ -188,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
188#define TCPOLEN_SACK_PERM 2 195#define TCPOLEN_SACK_PERM 2
189#define TCPOLEN_TIMESTAMP 10 196#define TCPOLEN_TIMESTAMP 10
190#define TCPOLEN_MD5SIG 18 197#define TCPOLEN_MD5SIG 18
198#define TCPOLEN_FASTOPEN_BASE 2
191#define TCPOLEN_EXP_FASTOPEN_BASE 4 199#define TCPOLEN_EXP_FASTOPEN_BASE 4
192 200
193/* But this is what stacks really send out. */ 201/* But this is what stacks really send out. */
@@ -349,8 +357,7 @@ void tcp_v4_early_demux(struct sk_buff *skb);
349int tcp_v4_rcv(struct sk_buff *skb); 357int tcp_v4_rcv(struct sk_buff *skb);
350 358
351int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); 359int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
352int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 360int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
353 size_t size);
354int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 361int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
355 int flags); 362 int flags);
356void tcp_release_cb(struct sock *sk); 363void tcp_release_cb(struct sock *sk);
@@ -401,8 +408,7 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
401 struct sk_buff *skb, 408 struct sk_buff *skb,
402 const struct tcphdr *th); 409 const struct tcphdr *th);
403struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 410struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
404 struct request_sock *req, struct request_sock **prev, 411 struct request_sock *req, bool fastopen);
405 bool fastopen);
406int tcp_child_process(struct sock *parent, struct sock *child, 412int tcp_child_process(struct sock *parent, struct sock *child,
407 struct sk_buff *skb); 413 struct sk_buff *skb);
408void tcp_enter_loss(struct sock *sk); 414void tcp_enter_loss(struct sock *sk);
@@ -429,9 +435,9 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
429int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 435int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
430 char __user *optval, unsigned int optlen); 436 char __user *optval, unsigned int optlen);
431void tcp_set_keepalive(struct sock *sk, int val); 437void tcp_set_keepalive(struct sock *sk, int val);
432void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); 438void tcp_syn_ack_timeout(const struct request_sock *req);
433int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 439int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
434 size_t len, int nonblock, int flags, int *addr_len); 440 int flags, int *addr_len);
435void tcp_parse_options(const struct sk_buff *skb, 441void tcp_parse_options(const struct sk_buff *skb,
436 struct tcp_options_received *opt_rx, 442 struct tcp_options_received *opt_rx,
437 int estab, struct tcp_fastopen_cookie *foc); 443 int estab, struct tcp_fastopen_cookie *foc);
@@ -443,6 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
443 449
444void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); 450void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
445void tcp_v4_mtu_reduced(struct sock *sk); 451void tcp_v4_mtu_reduced(struct sock *sk);
452void tcp_req_err(struct sock *sk, u32 seq);
446int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 453int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
447struct sock *tcp_create_openreq_child(struct sock *sk, 454struct sock *tcp_create_openreq_child(struct sock *sk,
448 struct request_sock *req, 455 struct request_sock *req,
@@ -524,8 +531,6 @@ int tcp_write_wakeup(struct sock *);
524void tcp_send_fin(struct sock *sk); 531void tcp_send_fin(struct sock *sk);
525void tcp_send_active_reset(struct sock *sk, gfp_t priority); 532void tcp_send_active_reset(struct sock *sk, gfp_t priority);
526int tcp_send_synack(struct sock *); 533int tcp_send_synack(struct sock *);
527bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
528 const char *proto);
529void tcp_push_one(struct sock *, unsigned int mss_now); 534void tcp_push_one(struct sock *, unsigned int mss_now);
530void tcp_send_ack(struct sock *sk); 535void tcp_send_ack(struct sock *sk);
531void tcp_send_delayed_ack(struct sock *sk); 536void tcp_send_delayed_ack(struct sock *sk);
@@ -824,7 +829,7 @@ struct tcp_congestion_ops {
824 /* hook for packet ack accounting (optional) */ 829 /* hook for packet ack accounting (optional) */
825 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); 830 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
826 /* get info for inet_diag (optional) */ 831 /* get info for inet_diag (optional) */
827 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); 832 int (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
828 833
829 char name[TCP_CA_NAME_MAX]; 834 char name[TCP_CA_NAME_MAX];
830 struct module *owner; 835 struct module *owner;
@@ -1132,31 +1137,6 @@ static inline int tcp_full_space(const struct sock *sk)
1132 return tcp_win_from_space(sk->sk_rcvbuf); 1137 return tcp_win_from_space(sk->sk_rcvbuf);
1133} 1138}
1134 1139
1135static inline void tcp_openreq_init(struct request_sock *req,
1136 struct tcp_options_received *rx_opt,
1137 struct sk_buff *skb, struct sock *sk)
1138{
1139 struct inet_request_sock *ireq = inet_rsk(req);
1140
1141 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1142 req->cookie_ts = 0;
1143 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1144 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1145 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1146 tcp_rsk(req)->last_oow_ack_time = 0;
1147 req->mss = rx_opt->mss_clamp;
1148 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1149 ireq->tstamp_ok = rx_opt->tstamp_ok;
1150 ireq->sack_ok = rx_opt->sack_ok;
1151 ireq->snd_wscale = rx_opt->snd_wscale;
1152 ireq->wscale_ok = rx_opt->wscale_ok;
1153 ireq->acked = 0;
1154 ireq->ecn_ok = 0;
1155 ireq->ir_rmt_port = tcp_hdr(skb)->source;
1156 ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
1157 ireq->ir_mark = inet_request_mark(sk, skb);
1158}
1159
1160extern void tcp_openreq_init_rwin(struct request_sock *req, 1140extern void tcp_openreq_init_rwin(struct request_sock *req,
1161 struct sock *sk, struct dst_entry *dst); 1141 struct sock *sk, struct dst_entry *dst);
1162 1142
@@ -1236,36 +1216,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1236 return true; 1216 return true;
1237} 1217}
1238 1218
1239/* Return true if we're currently rate-limiting out-of-window ACKs and 1219bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1240 * thus shouldn't send a dupack right now. We rate-limit dupacks in 1220 int mib_idx, u32 *last_oow_ack_time);
1241 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
1242 * attacks that send repeated SYNs or ACKs for the same connection. To
1243 * do this, we do not send a duplicate SYNACK or ACK if the remote
1244 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
1245 */
1246static inline bool tcp_oow_rate_limited(struct net *net,
1247 const struct sk_buff *skb,
1248 int mib_idx, u32 *last_oow_ack_time)
1249{
1250 /* Data packets without SYNs are not likely part of an ACK loop. */
1251 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
1252 !tcp_hdr(skb)->syn)
1253 goto not_rate_limited;
1254
1255 if (*last_oow_ack_time) {
1256 s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
1257
1258 if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
1259 NET_INC_STATS_BH(net, mib_idx);
1260 return true; /* rate-limited: don't send yet! */
1261 }
1262 }
1263
1264 *last_oow_ack_time = tcp_time_stamp;
1265
1266not_rate_limited:
1267 return false; /* not rate-limited: go ahead, send dupack now! */
1268}
1269 1221
1270static inline void tcp_mib_init(struct net *net) 1222static inline void tcp_mib_init(struct net *net)
1271{ 1223{
@@ -1344,15 +1296,14 @@ struct tcp_md5sig_pool {
1344}; 1296};
1345 1297
1346/* - functions */ 1298/* - functions */
1347int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, 1299int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1348 const struct sock *sk, const struct request_sock *req, 1300 const struct sock *sk, const struct sk_buff *skb);
1349 const struct sk_buff *skb);
1350int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, 1301int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1351 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp); 1302 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1352int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, 1303int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1353 int family); 1304 int family);
1354struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, 1305struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1355 struct sock *addr_sk); 1306 const struct sock *addr_sk);
1356 1307
1357#ifdef CONFIG_TCP_MD5SIG 1308#ifdef CONFIG_TCP_MD5SIG
1358struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, 1309struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
@@ -1388,7 +1339,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1388 struct tcp_fastopen_cookie *cookie, int *syn_loss, 1339 struct tcp_fastopen_cookie *cookie, int *syn_loss,
1389 unsigned long *last_syn_loss); 1340 unsigned long *last_syn_loss);
1390void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 1341void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1391 struct tcp_fastopen_cookie *cookie, bool syn_lost); 1342 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1343 u16 try_exp);
1392struct tcp_fastopen_request { 1344struct tcp_fastopen_request {
1393 /* Fast Open cookie. Size 0 means a cookie request */ 1345 /* Fast Open cookie. Size 0 means a cookie request */
1394 struct tcp_fastopen_cookie cookie; 1346 struct tcp_fastopen_cookie cookie;
@@ -1663,28 +1615,26 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
1663struct tcp_sock_af_ops { 1615struct tcp_sock_af_ops {
1664#ifdef CONFIG_TCP_MD5SIG 1616#ifdef CONFIG_TCP_MD5SIG
1665 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, 1617 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1666 struct sock *addr_sk); 1618 const struct sock *addr_sk);
1667 int (*calc_md5_hash) (char *location, 1619 int (*calc_md5_hash)(char *location,
1668 struct tcp_md5sig_key *md5, 1620 const struct tcp_md5sig_key *md5,
1669 const struct sock *sk, 1621 const struct sock *sk,
1670 const struct request_sock *req, 1622 const struct sk_buff *skb);
1671 const struct sk_buff *skb); 1623 int (*md5_parse)(struct sock *sk,
1672 int (*md5_parse) (struct sock *sk, 1624 char __user *optval,
1673 char __user *optval, 1625 int optlen);
1674 int optlen);
1675#endif 1626#endif
1676}; 1627};
1677 1628
1678struct tcp_request_sock_ops { 1629struct tcp_request_sock_ops {
1679 u16 mss_clamp; 1630 u16 mss_clamp;
1680#ifdef CONFIG_TCP_MD5SIG 1631#ifdef CONFIG_TCP_MD5SIG
1681 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, 1632 struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk,
1682 struct request_sock *req); 1633 const struct sock *addr_sk);
1683 int (*calc_md5_hash) (char *location, 1634 int (*calc_md5_hash) (char *location,
1684 struct tcp_md5sig_key *md5, 1635 const struct tcp_md5sig_key *md5,
1685 const struct sock *sk, 1636 const struct sock *sk,
1686 const struct request_sock *req, 1637 const struct sk_buff *skb);
1687 const struct sk_buff *skb);
1688#endif 1638#endif
1689 void (*init_req)(struct request_sock *req, struct sock *sk, 1639 void (*init_req)(struct request_sock *req, struct sock *sk,
1690 struct sk_buff *skb); 1640 struct sk_buff *skb);