diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 35 |
1 files changed, 32 insertions, 3 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 57ae96a04220..12cab7d74dba 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2717,6 +2717,35 @@ static void tcp_try_undo_dsack(struct sock *sk) | |||
2717 | } | 2717 | } |
2718 | } | 2718 | } |
2719 | 2719 | ||
2720 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
2721 | * window. It would seem that it is trivially available for us in | ||
2722 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
2723 | * what will happen if errors occur when sending retransmission for the | ||
2724 | * second time. ...It could the that such segment has only | ||
2725 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
2726 | * the head skb is enough except for some reneging corner cases that | ||
2727 | * are not worth the effort. | ||
2728 | * | ||
2729 | * Main reason for all this complexity is the fact that connection dying | ||
2730 | * time now depends on the validity of the retrans_stamp, in particular, | ||
2731 | * that successive retransmissions of a segment must not advance | ||
2732 | * retrans_stamp under any conditions. | ||
2733 | */ | ||
2734 | static int tcp_any_retrans_done(struct sock *sk) | ||
2735 | { | ||
2736 | struct tcp_sock *tp = tcp_sk(sk); | ||
2737 | struct sk_buff *skb; | ||
2738 | |||
2739 | if (tp->retrans_out) | ||
2740 | return 1; | ||
2741 | |||
2742 | skb = tcp_write_queue_head(sk); | ||
2743 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
2744 | return 1; | ||
2745 | |||
2746 | return 0; | ||
2747 | } | ||
2748 | |||
2720 | /* Undo during fast recovery after partial ACK. */ | 2749 | /* Undo during fast recovery after partial ACK. */ |
2721 | 2750 | ||
2722 | static int tcp_try_undo_partial(struct sock *sk, int acked) | 2751 | static int tcp_try_undo_partial(struct sock *sk, int acked) |
@@ -2729,7 +2758,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) | |||
2729 | /* Plain luck! Hole if filled with delayed | 2758 | /* Plain luck! Hole if filled with delayed |
2730 | * packet, rather than with a retransmit. | 2759 | * packet, rather than with a retransmit. |
2731 | */ | 2760 | */ |
2732 | if (tp->retrans_out == 0) | 2761 | if (!tcp_any_retrans_done(sk)) |
2733 | tp->retrans_stamp = 0; | 2762 | tp->retrans_stamp = 0; |
2734 | 2763 | ||
2735 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); | 2764 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); |
@@ -2788,7 +2817,7 @@ static void tcp_try_keep_open(struct sock *sk) | |||
2788 | struct tcp_sock *tp = tcp_sk(sk); | 2817 | struct tcp_sock *tp = tcp_sk(sk); |
2789 | int state = TCP_CA_Open; | 2818 | int state = TCP_CA_Open; |
2790 | 2819 | ||
2791 | if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) | 2820 | if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker) |
2792 | state = TCP_CA_Disorder; | 2821 | state = TCP_CA_Disorder; |
2793 | 2822 | ||
2794 | if (inet_csk(sk)->icsk_ca_state != state) { | 2823 | if (inet_csk(sk)->icsk_ca_state != state) { |
@@ -2803,7 +2832,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) | |||
2803 | 2832 | ||
2804 | tcp_verify_left_out(tp); | 2833 | tcp_verify_left_out(tp); |
2805 | 2834 | ||
2806 | if (!tp->frto_counter && tp->retrans_out == 0) | 2835 | if (!tp->frto_counter && !tcp_any_retrans_done(sk)) |
2807 | tp->retrans_stamp = 0; | 2836 | tp->retrans_stamp = 0; |
2808 | 2837 | ||
2809 | if (flag & FLAG_ECE) | 2838 | if (flag & FLAG_ECE) |