diff options
author | Neal Cardwell <ncardwell@google.com> | 2011-11-16 03:58:04 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-11-27 18:54:09 -0500 |
commit | f698204bd0bfdc645642e271da117b56b795aee0 (patch) | |
tree | a08c3714c3f1e0e523db799d0455b9e9b17fa8ab /net/ipv4/tcp_input.c | |
parent | e95ae2f2cf10f7bf27b492aa6188f3cd745de162 (diff) |
tcp: allow undo from reordered DSACKs
Previously, SACK-enabled connections hung around in TCP_CA_Disorder
state while snd_una==high_seq, just waiting to accumulate DSACKs and
hopefully undo a cwnd reduction. This could and did lead to the
following unfortunate scenario: if some incoming ACKs advance snd_una
beyond high_seq then we were setting undo_marker to 0 and moving to
TCP_CA_Open, so if (due to reordering in the ACK return path) we
shortly thereafter received a DSACK then we were no longer able to
undo the cwnd reduction.
The change: Simplify the congestion avoidance state machine by
removing the behavior where SACK-enabled connections hung around in
the TCP_CA_Disorder state just waiting for DSACKs. Instead, when
snd_una advances to high_seq or beyond we typically move to
TCP_CA_Open immediately and allow an undo in either TCP_CA_Open or
TCP_CA_Disorder if we later receive enough DSACKs.
Other patches in this series will provide other changes that are
necessary to fully fix this problem.
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 15 |
1 files changed, 2 insertions, 13 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 751d39060fb8..a4efdd7cf5a1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2858,7 +2858,7 @@ static void tcp_try_keep_open(struct sock *sk) | |||
2858 | struct tcp_sock *tp = tcp_sk(sk); | 2858 | struct tcp_sock *tp = tcp_sk(sk); |
2859 | int state = TCP_CA_Open; | 2859 | int state = TCP_CA_Open; |
2860 | 2860 | ||
2861 | if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker) | 2861 | if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) |
2862 | state = TCP_CA_Disorder; | 2862 | state = TCP_CA_Disorder; |
2863 | 2863 | ||
2864 | if (inet_csk(sk)->icsk_ca_state != state) { | 2864 | if (inet_csk(sk)->icsk_ca_state != state) { |
@@ -3066,17 +3066,6 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | |||
3066 | } | 3066 | } |
3067 | break; | 3067 | break; |
3068 | 3068 | ||
3069 | case TCP_CA_Disorder: | ||
3070 | tcp_try_undo_dsack(sk); | ||
3071 | if (!tp->undo_marker || | ||
3072 | /* For SACK case do not Open to allow to undo | ||
3073 | * catching for all duplicate ACKs. */ | ||
3074 | tcp_is_reno(tp) || tp->snd_una != tp->high_seq) { | ||
3075 | tp->undo_marker = 0; | ||
3076 | tcp_set_ca_state(sk, TCP_CA_Open); | ||
3077 | } | ||
3078 | break; | ||
3079 | |||
3080 | case TCP_CA_Recovery: | 3069 | case TCP_CA_Recovery: |
3081 | if (tcp_is_reno(tp)) | 3070 | if (tcp_is_reno(tp)) |
3082 | tcp_reset_reno_sack(tp); | 3071 | tcp_reset_reno_sack(tp); |
@@ -3117,7 +3106,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, | |||
3117 | tcp_add_reno_sack(sk); | 3106 | tcp_add_reno_sack(sk); |
3118 | } | 3107 | } |
3119 | 3108 | ||
3120 | if (icsk->icsk_ca_state == TCP_CA_Disorder) | 3109 | if (icsk->icsk_ca_state <= TCP_CA_Disorder) |
3121 | tcp_try_undo_dsack(sk); | 3110 | tcp_try_undo_dsack(sk); |
3122 | 3111 | ||
3123 | if (!tcp_time_to_recover(sk)) { | 3112 | if (!tcp_time_to_recover(sk)) { |