aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-08-02 22:46:58 -0400
committerDavid S. Miller <davem@davemloft.net>2007-08-02 22:46:58 -0400
commit2e6052941ae1f2f875d7d9092acb8836af1e0193 (patch)
treeade0bd7235403397b6165a5a2e788dd739912d94 /net/ipv4/tcp_input.c
parent3a97aeb5c199070f136c085f8b0f9338d19c4148 (diff)
[TCP]: Also handle snd_una changes in tcp_cwnd_down
tcp_cwnd_down must check for it too as it should be conservative in case of collapse stuff and also when receiver is trying to lie (though that wouldn't be very successful/useful anyway). Note: - Separated also is_dupack and do_lost in fast_retransalert * Much cleaner look-and-feel now * This time it really fixes cumulative ACK with many new SACK blocks recovery entry (I claimed this fixes with last patch but it wasn't). TCP will now call tcp_update_scoreboard regardless of is_dupack when in recovery as long as there is enough fackets_out. - Introduce FLAG_SND_UNA_ADVANCED * Some prior_snd_una arguments are unnecessary after it - Added helper FLAG_ANY_PROGRESS to avoid long FLAG...|FLAG... constructs Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 378ca8a086a3..c3124e6de1d3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -102,11 +102,13 @@ int sysctl_tcp_abc __read_mostly;
102#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ 102#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
103#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 103#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
104#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 104#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
105#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
105 106
106#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 107#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
107#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 108#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
108#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 109#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
109#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 110#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
111#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
110 112
111#define IsReno(tp) ((tp)->rx_opt.sack_ok == 0) 113#define IsReno(tp) ((tp)->rx_opt.sack_ok == 0)
112#define IsFack(tp) ((tp)->rx_opt.sack_ok & 2) 114#define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
@@ -1856,7 +1858,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
1856 struct tcp_sock *tp = tcp_sk(sk); 1858 struct tcp_sock *tp = tcp_sk(sk);
1857 int decr = tp->snd_cwnd_cnt + 1; 1859 int decr = tp->snd_cwnd_cnt + 1;
1858 1860
1859 if ((flag&FLAG_FORWARD_PROGRESS) || 1861 if ((flag&FLAG_ANY_PROGRESS) ||
1860 (IsReno(tp) && !(flag&FLAG_NOT_DUP))) { 1862 (IsReno(tp) && !(flag&FLAG_NOT_DUP))) {
1861 tp->snd_cwnd_cnt = decr&1; 1863 tp->snd_cwnd_cnt = decr&1;
1862 decr >>= 1; 1864 decr >>= 1;
@@ -2107,15 +2109,13 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
2107 * tcp_xmit_retransmit_queue(). 2109 * tcp_xmit_retransmit_queue().
2108 */ 2110 */
2109static void 2111static void
2110tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, 2112tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
2111 int prior_packets, int flag)
2112{ 2113{
2113 struct inet_connection_sock *icsk = inet_csk(sk); 2114 struct inet_connection_sock *icsk = inet_csk(sk);
2114 struct tcp_sock *tp = tcp_sk(sk); 2115 struct tcp_sock *tp = tcp_sk(sk);
2115 int is_dupack = (tp->snd_una == prior_snd_una && 2116 int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP));
2116 (!(flag&FLAG_NOT_DUP) || 2117 int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) &&
2117 ((flag&FLAG_DATA_SACKED) && 2118 (tp->fackets_out > tp->reordering));
2118 (tp->fackets_out > tp->reordering))));
2119 2119
2120 /* Some technical things: 2120 /* Some technical things:
2121 * 1. Reno does not count dupacks (sacked_out) automatically. */ 2121 * 1. Reno does not count dupacks (sacked_out) automatically. */
@@ -2192,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
2192 /* F. Process state. */ 2192 /* F. Process state. */
2193 switch (icsk->icsk_ca_state) { 2193 switch (icsk->icsk_ca_state) {
2194 case TCP_CA_Recovery: 2194 case TCP_CA_Recovery:
2195 if (prior_snd_una == tp->snd_una) { 2195 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
2196 if (IsReno(tp) && is_dupack) 2196 if (IsReno(tp) && is_dupack)
2197 tcp_add_reno_sack(sk); 2197 tcp_add_reno_sack(sk);
2198 } else { 2198 } else {
2199 int acked = prior_packets - tp->packets_out; 2199 int acked = prior_packets - tp->packets_out;
2200 if (IsReno(tp)) 2200 if (IsReno(tp))
2201 tcp_remove_reno_sacks(sk, acked); 2201 tcp_remove_reno_sacks(sk, acked);
2202 is_dupack = tcp_try_undo_partial(sk, acked); 2202 do_lost = tcp_try_undo_partial(sk, acked);
2203 } 2203 }
2204 break; 2204 break;
2205 case TCP_CA_Loss: 2205 case TCP_CA_Loss:
@@ -2215,7 +2215,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
2215 /* Loss is undone; fall through to processing in Open state. */ 2215 /* Loss is undone; fall through to processing in Open state. */
2216 default: 2216 default:
2217 if (IsReno(tp)) { 2217 if (IsReno(tp)) {
2218 if (tp->snd_una != prior_snd_una) 2218 if (flag & FLAG_SND_UNA_ADVANCED)
2219 tcp_reset_reno_sack(tp); 2219 tcp_reset_reno_sack(tp);
2220 if (is_dupack) 2220 if (is_dupack)
2221 tcp_add_reno_sack(sk); 2221 tcp_add_reno_sack(sk);
@@ -2264,7 +2264,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
2264 tcp_set_ca_state(sk, TCP_CA_Recovery); 2264 tcp_set_ca_state(sk, TCP_CA_Recovery);
2265 } 2265 }
2266 2266
2267 if (is_dupack || tcp_head_timedout(sk)) 2267 if (do_lost || tcp_head_timedout(sk))
2268 tcp_update_scoreboard(sk); 2268 tcp_update_scoreboard(sk);
2269 tcp_cwnd_down(sk, flag); 2269 tcp_cwnd_down(sk, flag);
2270 tcp_xmit_retransmit_queue(sk); 2270 tcp_xmit_retransmit_queue(sk);
@@ -2684,7 +2684,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
2684 * to prove that the RTO is indeed spurious. It transfers the control 2684 * to prove that the RTO is indeed spurious. It transfers the control
2685 * from F-RTO to the conventional RTO recovery 2685 * from F-RTO to the conventional RTO recovery
2686 */ 2686 */
2687static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag) 2687static int tcp_process_frto(struct sock *sk, int flag)
2688{ 2688{
2689 struct tcp_sock *tp = tcp_sk(sk); 2689 struct tcp_sock *tp = tcp_sk(sk);
2690 2690
@@ -2704,8 +2704,7 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
2704 * ACK isn't duplicate nor advances window, e.g., opposite dir 2704 * ACK isn't duplicate nor advances window, e.g., opposite dir
2705 * data, winupdate 2705 * data, winupdate
2706 */ 2706 */
2707 if ((tp->snd_una == prior_snd_una) && (flag&FLAG_NOT_DUP) && 2707 if (!(flag&FLAG_ANY_PROGRESS) && (flag&FLAG_NOT_DUP))
2708 !(flag&FLAG_FORWARD_PROGRESS))
2709 return 1; 2708 return 1;
2710 2709
2711 if (!(flag&FLAG_DATA_ACKED)) { 2710 if (!(flag&FLAG_DATA_ACKED)) {
@@ -2785,6 +2784,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2785 if (before(ack, prior_snd_una)) 2784 if (before(ack, prior_snd_una))
2786 goto old_ack; 2785 goto old_ack;
2787 2786
2787 if (after(ack, prior_snd_una))
2788 flag |= FLAG_SND_UNA_ADVANCED;
2789
2788 if (sysctl_tcp_abc) { 2790 if (sysctl_tcp_abc) {
2789 if (icsk->icsk_ca_state < TCP_CA_CWR) 2791 if (icsk->icsk_ca_state < TCP_CA_CWR)
2790 tp->bytes_acked += ack - prior_snd_una; 2792 tp->bytes_acked += ack - prior_snd_una;
@@ -2837,14 +2839,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2837 flag |= tcp_clean_rtx_queue(sk, &seq_rtt); 2839 flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
2838 2840
2839 if (tp->frto_counter) 2841 if (tp->frto_counter)
2840 frto_cwnd = tcp_process_frto(sk, prior_snd_una, flag); 2842 frto_cwnd = tcp_process_frto(sk, flag);
2841 2843
2842 if (tcp_ack_is_dubious(sk, flag)) { 2844 if (tcp_ack_is_dubious(sk, flag)) {
2843 /* Advance CWND, if state allows this. */ 2845 /* Advance CWND, if state allows this. */
2844 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && 2846 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
2845 tcp_may_raise_cwnd(sk, flag)) 2847 tcp_may_raise_cwnd(sk, flag))
2846 tcp_cong_avoid(sk, ack, prior_in_flight, 0); 2848 tcp_cong_avoid(sk, ack, prior_in_flight, 0);
2847 tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 2849 tcp_fastretrans_alert(sk, prior_packets, flag);
2848 } else { 2850 } else {
2849 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 2851 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
2850 tcp_cong_avoid(sk, ack, prior_in_flight, 1); 2852 tcp_cong_avoid(sk, ack, prior_in_flight, 1);