diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2007-11-11 00:22:18 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-11-11 00:22:18 -0500 |
commit | c7caf8d3ed7a6617aa0a3083815c439bd952c45c (patch) | |
tree | 54651b17cea03bbae8f7ea89f0e884ad369d9003 /net/ipv4 | |
parent | 8dd71c5d28cd88d4400e7f474986e799e39aff37 (diff) |
[TCP]: Fix reord detection due to snd_una covered holes
Fixes subtle bug like the one with fastpath_cnt_hint happening
due to the way the GSO and hints interact. Because hints are not
reset when just a GSOed skb is partially ACKed, there's no
guarantee that the relevant part of the write queue is going to
be processed in sacktag at all (skbs below snd_una) because
fastpath hint can fast forward the entrypoint.
This was also on the way of future reductions in sacktag's skb
processing. Also future cleanups in sacktag can be made after
this (in 2.6.25).
This may make reordering update in tcp_try_undo_partial
redundant but I'm not too sure so I left it there.
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_input.c | 50 |
1 files changed, 32 insertions, 18 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0f757578f3bd..9fc9096ada8a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1417,11 +1417,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1417 | if ((dup_sack && in_sack) && | 1417 | if ((dup_sack && in_sack) && |
1418 | (sacked&TCPCB_SACKED_ACKED)) | 1418 | (sacked&TCPCB_SACKED_ACKED)) |
1419 | reord = min(fack_count, reord); | 1419 | reord = min(fack_count, reord); |
1420 | } else { | ||
1421 | /* If it was in a hole, we detected reordering. */ | ||
1422 | if (fack_count < prior_fackets && | ||
1423 | !(sacked&TCPCB_SACKED_ACKED)) | ||
1424 | reord = min(fack_count, reord); | ||
1425 | } | 1420 | } |
1426 | 1421 | ||
1427 | /* Nothing to do; acked frame is about to be dropped. */ | 1422 | /* Nothing to do; acked frame is about to be dropped. */ |
@@ -2634,7 +2629,8 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) | |||
2634 | * is before the ack sequence we can discard it as it's confirmed to have | 2629 | * is before the ack sequence we can discard it as it's confirmed to have |
2635 | * arrived at the other end. | 2630 | * arrived at the other end. |
2636 | */ | 2631 | */ |
2637 | static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p) | 2632 | static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p, |
2633 | int prior_fackets) | ||
2638 | { | 2634 | { |
2639 | struct tcp_sock *tp = tcp_sk(sk); | 2635 | struct tcp_sock *tp = tcp_sk(sk); |
2640 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2636 | const struct inet_connection_sock *icsk = inet_csk(sk); |
@@ -2643,6 +2639,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p) | |||
2643 | int fully_acked = 1; | 2639 | int fully_acked = 1; |
2644 | int flag = 0; | 2640 | int flag = 0; |
2645 | int prior_packets = tp->packets_out; | 2641 | int prior_packets = tp->packets_out; |
2642 | u32 cnt = 0; | ||
2643 | u32 reord = tp->packets_out; | ||
2646 | s32 seq_rtt = -1; | 2644 | s32 seq_rtt = -1; |
2647 | ktime_t last_ackt = net_invalid_timestamp(); | 2645 | ktime_t last_ackt = net_invalid_timestamp(); |
2648 | 2646 | ||
@@ -2683,10 +2681,14 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p) | |||
2683 | if ((flag & FLAG_DATA_ACKED) || | 2681 | if ((flag & FLAG_DATA_ACKED) || |
2684 | (packets_acked > 1)) | 2682 | (packets_acked > 1)) |
2685 | flag |= FLAG_NONHEAD_RETRANS_ACKED; | 2683 | flag |= FLAG_NONHEAD_RETRANS_ACKED; |
2686 | } else if (seq_rtt < 0) { | 2684 | } else { |
2687 | seq_rtt = now - scb->when; | 2685 | if (seq_rtt < 0) { |
2688 | if (fully_acked) | 2686 | seq_rtt = now - scb->when; |
2689 | last_ackt = skb->tstamp; | 2687 | if (fully_acked) |
2688 | last_ackt = skb->tstamp; | ||
2689 | } | ||
2690 | if (!(sacked & TCPCB_SACKED_ACKED)) | ||
2691 | reord = min(cnt, reord); | ||
2690 | } | 2692 | } |
2691 | 2693 | ||
2692 | if (sacked & TCPCB_SACKED_ACKED) | 2694 | if (sacked & TCPCB_SACKED_ACKED) |
@@ -2697,12 +2699,16 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p) | |||
2697 | if ((sacked & TCPCB_URG) && tp->urg_mode && | 2699 | if ((sacked & TCPCB_URG) && tp->urg_mode && |
2698 | !before(end_seq, tp->snd_up)) | 2700 | !before(end_seq, tp->snd_up)) |
2699 | tp->urg_mode = 0; | 2701 | tp->urg_mode = 0; |
2700 | } else if (seq_rtt < 0) { | 2702 | } else { |
2701 | seq_rtt = now - scb->when; | 2703 | if (seq_rtt < 0) { |
2702 | if (fully_acked) | 2704 | seq_rtt = now - scb->when; |
2703 | last_ackt = skb->tstamp; | 2705 | if (fully_acked) |
2706 | last_ackt = skb->tstamp; | ||
2707 | } | ||
2708 | reord = min(cnt, reord); | ||
2704 | } | 2709 | } |
2705 | tp->packets_out -= packets_acked; | 2710 | tp->packets_out -= packets_acked; |
2711 | cnt += packets_acked; | ||
2706 | 2712 | ||
2707 | /* Initial outgoing SYN's get put onto the write_queue | 2713 | /* Initial outgoing SYN's get put onto the write_queue |
2708 | * just like anything else we transmit. It is not | 2714 | * just like anything else we transmit. It is not |
@@ -2734,13 +2740,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p) | |||
2734 | tcp_ack_update_rtt(sk, flag, seq_rtt); | 2740 | tcp_ack_update_rtt(sk, flag, seq_rtt); |
2735 | tcp_rearm_rto(sk); | 2741 | tcp_rearm_rto(sk); |
2736 | 2742 | ||
2743 | if (tcp_is_reno(tp)) { | ||
2744 | tcp_remove_reno_sacks(sk, pkts_acked); | ||
2745 | } else { | ||
2746 | /* Non-retransmitted hole got filled? That's reordering */ | ||
2747 | if (reord < prior_fackets) | ||
2748 | tcp_update_reordering(sk, tp->fackets_out - reord, 0); | ||
2749 | } | ||
2750 | |||
2737 | tp->fackets_out -= min(pkts_acked, tp->fackets_out); | 2751 | tp->fackets_out -= min(pkts_acked, tp->fackets_out); |
2738 | /* hint's skb might be NULL but we don't need to care */ | 2752 | /* hint's skb might be NULL but we don't need to care */ |
2739 | tp->fastpath_cnt_hint -= min_t(u32, pkts_acked, | 2753 | tp->fastpath_cnt_hint -= min_t(u32, pkts_acked, |
2740 | tp->fastpath_cnt_hint); | 2754 | tp->fastpath_cnt_hint); |
2741 | if (tcp_is_reno(tp)) | ||
2742 | tcp_remove_reno_sacks(sk, pkts_acked); | ||
2743 | |||
2744 | if (ca_ops->pkts_acked) { | 2755 | if (ca_ops->pkts_acked) { |
2745 | s32 rtt_us = -1; | 2756 | s32 rtt_us = -1; |
2746 | 2757 | ||
@@ -3023,6 +3034,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
3023 | u32 ack_seq = TCP_SKB_CB(skb)->seq; | 3034 | u32 ack_seq = TCP_SKB_CB(skb)->seq; |
3024 | u32 ack = TCP_SKB_CB(skb)->ack_seq; | 3035 | u32 ack = TCP_SKB_CB(skb)->ack_seq; |
3025 | u32 prior_in_flight; | 3036 | u32 prior_in_flight; |
3037 | u32 prior_fackets; | ||
3026 | s32 seq_rtt; | 3038 | s32 seq_rtt; |
3027 | int prior_packets; | 3039 | int prior_packets; |
3028 | int frto_cwnd = 0; | 3040 | int frto_cwnd = 0; |
@@ -3047,6 +3059,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
3047 | tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache); | 3059 | tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache); |
3048 | } | 3060 | } |
3049 | 3061 | ||
3062 | prior_fackets = tp->fackets_out; | ||
3063 | |||
3050 | if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { | 3064 | if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { |
3051 | /* Window is constant, pure forward advance. | 3065 | /* Window is constant, pure forward advance. |
3052 | * No more checks are required. | 3066 | * No more checks are required. |
@@ -3088,7 +3102,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
3088 | prior_in_flight = tcp_packets_in_flight(tp); | 3102 | prior_in_flight = tcp_packets_in_flight(tp); |
3089 | 3103 | ||
3090 | /* See if we can take anything off of the retransmit queue. */ | 3104 | /* See if we can take anything off of the retransmit queue. */ |
3091 | flag |= tcp_clean_rtx_queue(sk, &seq_rtt); | 3105 | flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets); |
3092 | 3106 | ||
3093 | /* Guarantee sacktag reordering detection against wrap-arounds */ | 3107 | /* Guarantee sacktag reordering detection against wrap-arounds */ |
3094 | if (before(tp->frto_highmark, tp->snd_una)) | 3108 | if (before(tp->frto_highmark, tp->snd_una)) |