aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2012-07-19 02:43:10 -0400
committerDavid S. Miller <davem@davemloft.net>2012-07-19 14:02:03 -0400
commitaab4874355679c70f93993cf3b3fd74643b9ac33 (patch)
tree677d3faf161e39f9de18b5956e24cd746e73d996 /net
parentcf60af03ca4e71134206809ea892e49b92a88896 (diff)
net-tcp: Fast Open client - detecting SYN-data drops
On paths with firewalls dropping SYN with data or experimental TCP options, Fast Open connections will have experience SYN timeout and bad performance. The solution is to track such incidents in the cookie cache and disables Fast Open temporarily. Since only the original SYN includes data and/or Fast Open option, the SYN-ACK has some tell-tale sign (tcp_rcv_fastopen_synack()) to detect such drops. If a path has recurring Fast Open SYN drops, Fast Open is disabled for 2^(recurring_losses) minutes starting from four minutes up to roughly one and half day. sendmsg with MSG_FASTOPEN flag will succeed but it behaves as connect() then write(). Signed-off-by: Yuchung Cheng <ycheng@google.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_metrics.c16
-rw-r--r--net/ipv4/tcp_output.c13
3 files changed, 33 insertions, 6 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 38b6a811edf..c49a4fc175b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5652,6 +5652,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5652 struct tcp_sock *tp = tcp_sk(sk); 5652 struct tcp_sock *tp = tcp_sk(sk);
5653 struct sk_buff *data = tcp_write_queue_head(sk); 5653 struct sk_buff *data = tcp_write_queue_head(sk);
5654 u16 mss = tp->rx_opt.mss_clamp; 5654 u16 mss = tp->rx_opt.mss_clamp;
5655 bool syn_drop;
5655 5656
5656 if (mss == tp->rx_opt.user_mss) { 5657 if (mss == tp->rx_opt.user_mss) {
5657 struct tcp_options_received opt; 5658 struct tcp_options_received opt;
@@ -5664,7 +5665,14 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5664 mss = opt.mss_clamp; 5665 mss = opt.mss_clamp;
5665 } 5666 }
5666 5667
5667 tcp_fastopen_cache_set(sk, mss, cookie); 5668 /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably
5669 * the remote receives only the retransmitted (regular) SYNs: either
5670 * the original SYN-data or the corresponding SYN-ACK is lost.
5671 */
5672 syn_drop = (cookie->len <= 0 && data &&
5673 inet_csk(sk)->icsk_retransmits);
5674
5675 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5668 5676
5669 if (data) { /* Retransmit unacked data in SYN */ 5677 if (data) { /* Retransmit unacked data in SYN */
5670 tcp_retransmit_skb(sk, data); 5678 tcp_retransmit_skb(sk, data);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index d02ff377778..99779ae44f6 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -32,6 +32,8 @@ enum tcp_metric_index {
32 32
33struct tcp_fastopen_metrics { 33struct tcp_fastopen_metrics {
34 u16 mss; 34 u16 mss;
35 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
36 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
35 struct tcp_fastopen_cookie cookie; 37 struct tcp_fastopen_cookie cookie;
36}; 38};
37 39
@@ -125,6 +127,7 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
125 tm->tcpm_ts = 0; 127 tm->tcpm_ts = 0;
126 tm->tcpm_ts_stamp = 0; 128 tm->tcpm_ts_stamp = 0;
127 tm->tcpm_fastopen.mss = 0; 129 tm->tcpm_fastopen.mss = 0;
130 tm->tcpm_fastopen.syn_loss = 0;
128 tm->tcpm_fastopen.cookie.len = 0; 131 tm->tcpm_fastopen.cookie.len = 0;
129} 132}
130 133
@@ -644,7 +647,8 @@ bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
644static DEFINE_SEQLOCK(fastopen_seqlock); 647static DEFINE_SEQLOCK(fastopen_seqlock);
645 648
646void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, 649void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
647 struct tcp_fastopen_cookie *cookie) 650 struct tcp_fastopen_cookie *cookie,
651 int *syn_loss, unsigned long *last_syn_loss)
648{ 652{
649 struct tcp_metrics_block *tm; 653 struct tcp_metrics_block *tm;
650 654
@@ -659,14 +663,15 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
659 if (tfom->mss) 663 if (tfom->mss)
660 *mss = tfom->mss; 664 *mss = tfom->mss;
661 *cookie = tfom->cookie; 665 *cookie = tfom->cookie;
666 *syn_loss = tfom->syn_loss;
667 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
662 } while (read_seqretry(&fastopen_seqlock, seq)); 668 } while (read_seqretry(&fastopen_seqlock, seq));
663 } 669 }
664 rcu_read_unlock(); 670 rcu_read_unlock();
665} 671}
666 672
667
668void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 673void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
669 struct tcp_fastopen_cookie *cookie) 674 struct tcp_fastopen_cookie *cookie, bool syn_lost)
670{ 675{
671 struct tcp_metrics_block *tm; 676 struct tcp_metrics_block *tm;
672 677
@@ -679,6 +684,11 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
679 tfom->mss = mss; 684 tfom->mss = mss;
680 if (cookie->len > 0) 685 if (cookie->len > 0)
681 tfom->cookie = *cookie; 686 tfom->cookie = *cookie;
687 if (syn_lost) {
688 ++tfom->syn_loss;
689 tfom->last_syn_loss = jiffies;
690 } else
691 tfom->syn_loss = 0;
682 write_sequnlock_bh(&fastopen_seqlock); 692 write_sequnlock_bh(&fastopen_seqlock);
683 } 693 }
684 rcu_read_unlock(); 694 rcu_read_unlock();
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 88693281da4..c5cfd5ec318 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2860,10 +2860,19 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2860{ 2860{
2861 struct tcp_sock *tp = tcp_sk(sk); 2861 struct tcp_sock *tp = tcp_sk(sk);
2862 struct tcp_fastopen_request *fo = tp->fastopen_req; 2862 struct tcp_fastopen_request *fo = tp->fastopen_req;
2863 int space, i, err = 0, iovlen = fo->data->msg_iovlen; 2863 int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
2864 struct sk_buff *syn_data = NULL, *data; 2864 struct sk_buff *syn_data = NULL, *data;
2865 unsigned long last_syn_loss = 0;
2866
2867 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
2868 &syn_loss, &last_syn_loss);
2869 /* Recurring FO SYN losses: revert to regular handshake temporarily */
2870 if (syn_loss > 1 &&
2871 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
2872 fo->cookie.len = -1;
2873 goto fallback;
2874 }
2865 2875
2866 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie);
2867 if (fo->cookie.len <= 0) 2876 if (fo->cookie.len <= 0)
2868 goto fallback; 2877 goto fallback;
2869 2878