diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2007-08-09 07:53:36 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:47:58 -0400 |
commit | 1b6d427bb7eb69e6dc4f194a5b0f4a382a16ff82 (patch) | |
tree | d67f6ea9a5f581f83b4d8228fc2964c70f940d5a | |
parent | d02596e32925edaeccee0af8eb6c229b5615de42 (diff) |
[TCP]: Reduce sacked_out with reno when purging write_queue
Previously TCP had a transitional state during which reno
counted segments that are already below the current window into
sacked_out, which is now prevented. In addition, re-try now
the unconditional S+L skb catching.
This approach conservatively calls just remove_sack and leaves
reset_sack() calls alone. The best solution to the whole problem
would be to first calculate the new sacked_out fully (this patch
does not move reno_sack_reset calls from original sites and thus
does not implement this). However, that would require very
invasive change to fastretrans_alert (perhaps even slicing it to
two halves). Alternatively, all callers of tcp_packets_in_flight
(i.e., users that depend on sacked_out) should be postponed
until the new sacked_out has been calculated but it isn't any
simpler alternative.
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 15 |
2 files changed, 8 insertions, 10 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 7042c32085f5..064c92fe00d2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -759,8 +759,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) | |||
759 | } | 759 | } |
760 | 760 | ||
761 | /* Use define here intentionally to get BUG_ON location shown at the caller */ | 761 | /* Use define here intentionally to get BUG_ON location shown at the caller */ |
762 | #define tcp_verify_left_out(tp) \ | 762 | #define tcp_verify_left_out(tp) BUG_ON(tcp_left_out(tp) > tp->packets_out) |
763 | BUG_ON(tp->rx_opt.sack_ok && (tcp_left_out(tp) > tp->packets_out)) | ||
764 | 763 | ||
765 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); | 764 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); |
766 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | 765 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index bf4fc3516fb9..f8af018dd224 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2187,7 +2187,7 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) | |||
2187 | * tcp_xmit_retransmit_queue(). | 2187 | * tcp_xmit_retransmit_queue(). |
2188 | */ | 2188 | */ |
2189 | static void | 2189 | static void |
2190 | tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) | 2190 | tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) |
2191 | { | 2191 | { |
2192 | struct inet_connection_sock *icsk = inet_csk(sk); | 2192 | struct inet_connection_sock *icsk = inet_csk(sk); |
2193 | struct tcp_sock *tp = tcp_sk(sk); | 2193 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -2273,12 +2273,8 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) | |||
2273 | if (!(flag & FLAG_SND_UNA_ADVANCED)) { | 2273 | if (!(flag & FLAG_SND_UNA_ADVANCED)) { |
2274 | if (IsReno(tp) && is_dupack) | 2274 | if (IsReno(tp) && is_dupack) |
2275 | tcp_add_reno_sack(sk); | 2275 | tcp_add_reno_sack(sk); |
2276 | } else { | 2276 | } else |
2277 | int acked = prior_packets - tp->packets_out; | 2277 | do_lost = tcp_try_undo_partial(sk, pkts_acked); |
2278 | if (IsReno(tp)) | ||
2279 | tcp_remove_reno_sacks(sk, acked); | ||
2280 | do_lost = tcp_try_undo_partial(sk, acked); | ||
2281 | } | ||
2282 | break; | 2278 | break; |
2283 | case TCP_CA_Loss: | 2279 | case TCP_CA_Loss: |
2284 | if (flag&FLAG_DATA_ACKED) | 2280 | if (flag&FLAG_DATA_ACKED) |
@@ -2577,6 +2573,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) | |||
2577 | tcp_ack_update_rtt(sk, acked, seq_rtt); | 2573 | tcp_ack_update_rtt(sk, acked, seq_rtt); |
2578 | tcp_ack_packets_out(sk); | 2574 | tcp_ack_packets_out(sk); |
2579 | 2575 | ||
2576 | if (IsReno(tp)) | ||
2577 | tcp_remove_reno_sacks(sk, pkts_acked); | ||
2578 | |||
2580 | if (ca_ops->pkts_acked) { | 2579 | if (ca_ops->pkts_acked) { |
2581 | s32 rtt_us = -1; | 2580 | s32 rtt_us = -1; |
2582 | 2581 | ||
@@ -2927,7 +2926,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
2927 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && | 2926 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && |
2928 | tcp_may_raise_cwnd(sk, flag)) | 2927 | tcp_may_raise_cwnd(sk, flag)) |
2929 | tcp_cong_avoid(sk, ack, prior_in_flight, 0); | 2928 | tcp_cong_avoid(sk, ack, prior_in_flight, 0); |
2930 | tcp_fastretrans_alert(sk, prior_packets, flag); | 2929 | tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, flag); |
2931 | } else { | 2930 | } else { |
2932 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) | 2931 | if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) |
2933 | tcp_cong_avoid(sk, ack, prior_in_flight, 1); | 2932 | tcp_cong_avoid(sk, ack, prior_in_flight, 1); |