aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ip-sysctl.txt22
-rw-r--r--include/linux/tcp.h6
-rw-r--r--include/net/tcp.h33
-rw-r--r--include/uapi/linux/snmp.h6
-rw-r--r--net/ipv4/proc.c6
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp_input.c30
-rw-r--r--net/ipv4/tcp_minisocks.c36
8 files changed, 133 insertions, 13 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index a5e4c813f17f..1b8c964b0d17 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -290,6 +290,28 @@ tcp_frto - INTEGER
290 290
291 By default it's enabled with a non-zero value. 0 disables F-RTO. 291 By default it's enabled with a non-zero value. 0 disables F-RTO.
292 292
293tcp_invalid_ratelimit - INTEGER
294 Limit the maximal rate for sending duplicate acknowledgments
295 in response to incoming TCP packets that are for an existing
296 connection but that are invalid due to any of these reasons:
297
298 (a) out-of-window sequence number,
299 (b) out-of-window acknowledgment number, or
300 (c) PAWS (Protection Against Wrapped Sequence numbers) check failure
301
302 This can help mitigate simple "ack loop" DoS attacks, wherein
303 a buggy or malicious middlebox or man-in-the-middle can
304 rewrite TCP header fields in manner that causes each endpoint
305 to think that the other is sending invalid TCP segments, thus
306 causing each side to send an unterminating stream of duplicate
307 acknowledgments for invalid segments.
308
309 Using 0 disables rate-limiting of dupacks in response to
310 invalid segments; otherwise this value specifies the minimal
311 space between sending such dupacks, in milliseconds.
312
313 Default: 500 (milliseconds).
314
293tcp_keepalive_time - INTEGER 315tcp_keepalive_time - INTEGER
294 How often TCP sends out keepalive messages when keepalive is enabled. 316 How often TCP sends out keepalive messages when keepalive is enabled.
295 Default: 2hours. 317 Default: 2hours.
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 67309ece0772..1a7adb411647 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -115,6 +115,7 @@ struct tcp_request_sock {
115 u32 rcv_isn; 115 u32 rcv_isn;
116 u32 snt_isn; 116 u32 snt_isn;
117 u32 snt_synack; /* synack sent time */ 117 u32 snt_synack; /* synack sent time */
118 u32 last_oow_ack_time; /* last SYNACK */
118 u32 rcv_nxt; /* the ack # by SYNACK. For 119 u32 rcv_nxt; /* the ack # by SYNACK. For
119 * FastOpen it's the seq# 120 * FastOpen it's the seq#
120 * after data-in-SYN. 121 * after data-in-SYN.
@@ -152,6 +153,7 @@ struct tcp_sock {
152 u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 153 u32 snd_sml; /* Last byte of the most recently transmitted small packet */
153 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 154 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
154 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ 155 u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
156 u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
155 157
156 u32 tsoffset; /* timestamp offset */ 158 u32 tsoffset; /* timestamp offset */
157 159
@@ -340,6 +342,10 @@ struct tcp_timewait_sock {
340 u32 tw_rcv_wnd; 342 u32 tw_rcv_wnd;
341 u32 tw_ts_offset; 343 u32 tw_ts_offset;
342 u32 tw_ts_recent; 344 u32 tw_ts_recent;
345
346 /* The time we sent the last out-of-window ACK: */
347 u32 tw_last_oow_ack_time;
348
343 long tw_ts_recent_stamp; 349 long tw_ts_recent_stamp;
344#ifdef CONFIG_TCP_MD5SIG 350#ifdef CONFIG_TCP_MD5SIG
345 struct tcp_md5sig_key *tw_md5_key; 351 struct tcp_md5sig_key *tw_md5_key;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 28e9bd3abceb..da4196fb78db 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -274,6 +274,7 @@ extern int sysctl_tcp_challenge_ack_limit;
274extern unsigned int sysctl_tcp_notsent_lowat; 274extern unsigned int sysctl_tcp_notsent_lowat;
275extern int sysctl_tcp_min_tso_segs; 275extern int sysctl_tcp_min_tso_segs;
276extern int sysctl_tcp_autocorking; 276extern int sysctl_tcp_autocorking;
277extern int sysctl_tcp_invalid_ratelimit;
277 278
278extern atomic_long_t tcp_memory_allocated; 279extern atomic_long_t tcp_memory_allocated;
279extern struct percpu_counter tcp_sockets_allocated; 280extern struct percpu_counter tcp_sockets_allocated;
@@ -1144,6 +1145,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
1144 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; 1145 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1145 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 1146 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1146 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1147 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1148 tcp_rsk(req)->last_oow_ack_time = 0;
1147 req->mss = rx_opt->mss_clamp; 1149 req->mss = rx_opt->mss_clamp;
1148 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 1150 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1149 ireq->tstamp_ok = rx_opt->tstamp_ok; 1151 ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -1236,6 +1238,37 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1236 return true; 1238 return true;
1237} 1239}
1238 1240
1241/* Return true if we're currently rate-limiting out-of-window ACKs and
1242 * thus shouldn't send a dupack right now. We rate-limit dupacks in
1243 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
1244 * attacks that send repeated SYNs or ACKs for the same connection. To
1245 * do this, we do not send a duplicate SYNACK or ACK if the remote
1246 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
1247 */
1248static inline bool tcp_oow_rate_limited(struct net *net,
1249 const struct sk_buff *skb,
1250 int mib_idx, u32 *last_oow_ack_time)
1251{
1252 /* Data packets without SYNs are not likely part of an ACK loop. */
1253 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
1254 !tcp_hdr(skb)->syn)
1255 goto not_rate_limited;
1256
1257 if (*last_oow_ack_time) {
1258 s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
1259
1260 if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
1261 NET_INC_STATS_BH(net, mib_idx);
1262 return true; /* rate-limited: don't send yet! */
1263 }
1264 }
1265
1266 *last_oow_ack_time = tcp_time_stamp;
1267
1268not_rate_limited:
1269 return false; /* not rate-limited: go ahead, send dupack now! */
1270}
1271
1239static inline void tcp_mib_init(struct net *net) 1272static inline void tcp_mib_init(struct net *net)
1240{ 1273{
1241 /* See RFC 2012 */ 1274 /* See RFC 2012 */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index b22224100011..6a6fb747c78d 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -270,6 +270,12 @@ enum
270 LINUX_MIB_TCPHYSTARTTRAINCWND, /* TCPHystartTrainCwnd */ 270 LINUX_MIB_TCPHYSTARTTRAINCWND, /* TCPHystartTrainCwnd */
271 LINUX_MIB_TCPHYSTARTDELAYDETECT, /* TCPHystartDelayDetect */ 271 LINUX_MIB_TCPHYSTARTDELAYDETECT, /* TCPHystartDelayDetect */
272 LINUX_MIB_TCPHYSTARTDELAYCWND, /* TCPHystartDelayCwnd */ 272 LINUX_MIB_TCPHYSTARTDELAYCWND, /* TCPHystartDelayCwnd */
273 LINUX_MIB_TCPACKSKIPPEDSYNRECV, /* TCPACKSkippedSynRecv */
274 LINUX_MIB_TCPACKSKIPPEDPAWS, /* TCPACKSkippedPAWS */
275 LINUX_MIB_TCPACKSKIPPEDSEQ, /* TCPACKSkippedSeq */
276 LINUX_MIB_TCPACKSKIPPEDFINWAIT2, /* TCPACKSkippedFinWait2 */
277 LINUX_MIB_TCPACKSKIPPEDTIMEWAIT, /* TCPACKSkippedTimeWait */
278 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
273 __LINUX_MIB_MAX 279 __LINUX_MIB_MAX
274}; 280};
275 281
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8f9cd200ce20..d8953ef0770c 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -292,6 +292,12 @@ static const struct snmp_mib snmp4_net_list[] = {
292 SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND), 292 SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND),
293 SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT), 293 SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT),
294 SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND), 294 SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND),
295 SNMP_MIB_ITEM("TCPACKSkippedSynRecv", LINUX_MIB_TCPACKSKIPPEDSYNRECV),
296 SNMP_MIB_ITEM("TCPACKSkippedPAWS", LINUX_MIB_TCPACKSKIPPEDPAWS),
297 SNMP_MIB_ITEM("TCPACKSkippedSeq", LINUX_MIB_TCPACKSKIPPEDSEQ),
298 SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2),
299 SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT),
300 SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
295 SNMP_MIB_SENTINEL 301 SNMP_MIB_SENTINEL
296}; 302};
297 303
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index e0ee384a448f..82601a68cf90 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -729,6 +729,13 @@ static struct ctl_table ipv4_table[] = {
729 .extra2 = &one, 729 .extra2 = &one,
730 }, 730 },
731 { 731 {
732 .procname = "tcp_invalid_ratelimit",
733 .data = &sysctl_tcp_invalid_ratelimit,
734 .maxlen = sizeof(int),
735 .mode = 0644,
736 .proc_handler = proc_dointvec_ms_jiffies,
737 },
738 {
732 .procname = "icmp_msgs_per_sec", 739 .procname = "icmp_msgs_per_sec",
733 .data = &sysctl_icmp_msgs_per_sec, 740 .data = &sysctl_icmp_msgs_per_sec,
734 .maxlen = sizeof(int), 741 .maxlen = sizeof(int),
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d3dfff78fa19..8fdd27b17306 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -100,6 +100,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
100 100
101int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 101int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
102int sysctl_tcp_early_retrans __read_mostly = 3; 102int sysctl_tcp_early_retrans __read_mostly = 3;
103int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
103 104
104#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 105#define FLAG_DATA 0x01 /* Incoming frame contained data. */
105#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 106#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -3321,13 +3322,22 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
3321} 3322}
3322 3323
3323/* RFC 5961 7 [ACK Throttling] */ 3324/* RFC 5961 7 [ACK Throttling] */
3324static void tcp_send_challenge_ack(struct sock *sk) 3325static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
3325{ 3326{
3326 /* unprotected vars, we dont care of overwrites */ 3327 /* unprotected vars, we dont care of overwrites */
3327 static u32 challenge_timestamp; 3328 static u32 challenge_timestamp;
3328 static unsigned int challenge_count; 3329 static unsigned int challenge_count;
3329 u32 now = jiffies / HZ; 3330 struct tcp_sock *tp = tcp_sk(sk);
3331 u32 now;
3332
3333 /* First check our per-socket dupack rate limit. */
3334 if (tcp_oow_rate_limited(sock_net(sk), skb,
3335 LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
3336 &tp->last_oow_ack_time))
3337 return;
3330 3338
3339 /* Then check the check host-wide RFC 5961 rate limit. */
3340 now = jiffies / HZ;
3331 if (now != challenge_timestamp) { 3341 if (now != challenge_timestamp) {
3332 challenge_timestamp = now; 3342 challenge_timestamp = now;
3333 challenge_count = 0; 3343 challenge_count = 0;
@@ -3423,7 +3433,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3423 if (before(ack, prior_snd_una)) { 3433 if (before(ack, prior_snd_una)) {
3424 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ 3434 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
3425 if (before(ack, prior_snd_una - tp->max_window)) { 3435 if (before(ack, prior_snd_una - tp->max_window)) {
3426 tcp_send_challenge_ack(sk); 3436 tcp_send_challenge_ack(sk, skb);
3427 return -1; 3437 return -1;
3428 } 3438 }
3429 goto old_ack; 3439 goto old_ack;
@@ -4992,7 +5002,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
4992 tcp_paws_discard(sk, skb)) { 5002 tcp_paws_discard(sk, skb)) {
4993 if (!th->rst) { 5003 if (!th->rst) {
4994 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5004 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4995 tcp_send_dupack(sk, skb); 5005 if (!tcp_oow_rate_limited(sock_net(sk), skb,
5006 LINUX_MIB_TCPACKSKIPPEDPAWS,
5007 &tp->last_oow_ack_time))
5008 tcp_send_dupack(sk, skb);
4996 goto discard; 5009 goto discard;
4997 } 5010 }
4998 /* Reset is accepted even if it did not pass PAWS. */ 5011 /* Reset is accepted even if it did not pass PAWS. */
@@ -5009,7 +5022,10 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5009 if (!th->rst) { 5022 if (!th->rst) {
5010 if (th->syn) 5023 if (th->syn)
5011 goto syn_challenge; 5024 goto syn_challenge;
5012 tcp_send_dupack(sk, skb); 5025 if (!tcp_oow_rate_limited(sock_net(sk), skb,
5026 LINUX_MIB_TCPACKSKIPPEDSEQ,
5027 &tp->last_oow_ack_time))
5028 tcp_send_dupack(sk, skb);
5013 } 5029 }
5014 goto discard; 5030 goto discard;
5015 } 5031 }
@@ -5025,7 +5041,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5025 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) 5041 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
5026 tcp_reset(sk); 5042 tcp_reset(sk);
5027 else 5043 else
5028 tcp_send_challenge_ack(sk); 5044 tcp_send_challenge_ack(sk, skb);
5029 goto discard; 5045 goto discard;
5030 } 5046 }
5031 5047
@@ -5039,7 +5055,7 @@ syn_challenge:
5039 if (syn_inerr) 5055 if (syn_inerr)
5040 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5056 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
5041 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); 5057 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
5042 tcp_send_challenge_ack(sk); 5058 tcp_send_challenge_ack(sk, skb);
5043 goto discard; 5059 goto discard;
5044 } 5060 }
5045 5061
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index bc9216dc9de1..dd11ac7798c6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -58,6 +58,25 @@ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
58 return seq == e_win && seq == end_seq; 58 return seq == e_win && seq == end_seq;
59} 59}
60 60
61static enum tcp_tw_status
62tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
63 const struct sk_buff *skb, int mib_idx)
64{
65 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
66
67 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
68 &tcptw->tw_last_oow_ack_time)) {
69 /* Send ACK. Note, we do not put the bucket,
70 * it will be released by caller.
71 */
72 return TCP_TW_ACK;
73 }
74
75 /* We are rate-limiting, so just release the tw sock and drop skb. */
76 inet_twsk_put(tw);
77 return TCP_TW_SUCCESS;
78}
79
61/* 80/*
62 * * Main purpose of TIME-WAIT state is to close connection gracefully, 81 * * Main purpose of TIME-WAIT state is to close connection gracefully,
63 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN 82 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
@@ -116,7 +135,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 135 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
117 tcptw->tw_rcv_nxt, 136 tcptw->tw_rcv_nxt,
118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) 137 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
119 return TCP_TW_ACK; 138 return tcp_timewait_check_oow_rate_limit(
139 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
120 140
121 if (th->rst) 141 if (th->rst)
122 goto kill; 142 goto kill;
@@ -250,10 +270,8 @@ kill:
250 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, 270 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
251 TCP_TIMEWAIT_LEN); 271 TCP_TIMEWAIT_LEN);
252 272
253 /* Send ACK. Note, we do not put the bucket, 273 return tcp_timewait_check_oow_rate_limit(
254 * it will be released by caller. 274 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
255 */
256 return TCP_TW_ACK;
257 } 275 }
258 inet_twsk_put(tw); 276 inet_twsk_put(tw);
259 return TCP_TW_SUCCESS; 277 return TCP_TW_SUCCESS;
@@ -289,6 +307,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
289 tcptw->tw_ts_recent = tp->rx_opt.ts_recent; 307 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
290 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; 308 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
291 tcptw->tw_ts_offset = tp->tsoffset; 309 tcptw->tw_ts_offset = tp->tsoffset;
310 tcptw->tw_last_oow_ack_time = 0;
292 311
293#if IS_ENABLED(CONFIG_IPV6) 312#if IS_ENABLED(CONFIG_IPV6)
294 if (tw->tw_family == PF_INET6) { 313 if (tw->tw_family == PF_INET6) {
@@ -467,6 +486,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
467 tcp_enable_early_retrans(newtp); 486 tcp_enable_early_retrans(newtp);
468 newtp->tlp_high_seq = 0; 487 newtp->tlp_high_seq = 0;
469 newtp->lsndtime = treq->snt_synack; 488 newtp->lsndtime = treq->snt_synack;
489 newtp->last_oow_ack_time = 0;
470 newtp->total_retrans = req->num_retrans; 490 newtp->total_retrans = req->num_retrans;
471 491
472 /* So many TCP implementations out there (incorrectly) count the 492 /* So many TCP implementations out there (incorrectly) count the
@@ -605,7 +625,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
605 * Reset timer after retransmitting SYNACK, similar to 625 * Reset timer after retransmitting SYNACK, similar to
606 * the idea of fast retransmit in recovery. 626 * the idea of fast retransmit in recovery.
607 */ 627 */
608 if (!inet_rtx_syn_ack(sk, req)) 628 if (!tcp_oow_rate_limited(sock_net(sk), skb,
629 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
630 &tcp_rsk(req)->last_oow_ack_time) &&
631
632 !inet_rtx_syn_ack(sk, req))
609 req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout, 633 req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
610 TCP_RTO_MAX) + jiffies; 634 TCP_RTO_MAX) + jiffies;
611 return NULL; 635 return NULL;