aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2013-10-29 13:09:05 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-29 22:50:41 -0400
commitc968601d174739cb1e7100c95e0eb3d2f7e91bc9 (patch)
treeba4605fa37ed4d0a8f47bd49c3cb53929285ebf3 /net/ipv4
parentaa58d9813d9d236ca12f921d90634ee1dc2bcc24 (diff)
tcp: temporarily disable Fast Open on SYN timeout
Fast Open currently has a fall back feature to address SYN-data being dropped but it requires the middle-box to pass on regular SYN retry after SYN-data. This is implemented in commit aab487435 ("net-tcp: Fast Open client - detecting SYN-data drops") However some NAT boxes will drop all subsequent packets after first SYN-data and blackholes the entire connections. An example is in commit 356d7d8 "netfilter: nf_conntrack: fix tcp_in_window for Fast Open". The sender should note such incidents and fall back to use the regular TCP handshake on subsequent attempts temporarily as well: after the second SYN timeouts the original Fast Open SYN is most likely lost. When such an event recurs Fast Open is disabled based on the number of recurrences exponentially. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_metrics.c5
-rw-r--r--net/ipv4/tcp_timer.c6
2 files changed, 8 insertions, 3 deletions
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 4a2a84110dfb..2ab09cbae74d 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -671,8 +671,9 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
671 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; 671 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
672 672
673 write_seqlock_bh(&fastopen_seqlock); 673 write_seqlock_bh(&fastopen_seqlock);
674 tfom->mss = mss; 674 if (mss)
675 if (cookie->len > 0) 675 tfom->mss = mss;
676 if (cookie && cookie->len > 0)
676 tfom->cookie = *cookie; 677 tfom->cookie = *cookie;
677 if (syn_lost) { 678 if (syn_lost) {
678 ++tfom->syn_loss; 679 ++tfom->syn_loss;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index af07b5b23ebf..64f0354c84c7 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -156,12 +156,16 @@ static bool retransmits_timed_out(struct sock *sk,
156static int tcp_write_timeout(struct sock *sk) 156static int tcp_write_timeout(struct sock *sk)
157{ 157{
158 struct inet_connection_sock *icsk = inet_csk(sk); 158 struct inet_connection_sock *icsk = inet_csk(sk);
159 struct tcp_sock *tp = tcp_sk(sk);
159 int retry_until; 160 int retry_until;
160 bool do_reset, syn_set = false; 161 bool do_reset, syn_set = false;
161 162
162 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 163 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
163 if (icsk->icsk_retransmits) 164 if (icsk->icsk_retransmits) {
164 dst_negative_advice(sk); 165 dst_negative_advice(sk);
166 if (tp->syn_fastopen || tp->syn_data)
167 tcp_fastopen_cache_set(sk, 0, NULL, true);
168 }
165 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 169 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
166 syn_set = true; 170 syn_set = true;
167 } else { 171 } else {