aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2017-01-13 01:11:38 -0500
committerDavid S. Miller <davem@davemloft.net>2017-01-13 22:37:16 -0500
commit840a3cbe89694fad75578856976f180e852e69aa (patch)
treef5b99c39985394312b5662604e2476eb795dd27b /net/ipv4/tcp_output.c
parent89fe18e44f7ee5ab1c90d0dff5835acee7751427 (diff)
tcp: remove forward retransmit feature
Forward retransmit is an esoteric feature in RFC3517 (condition(3) in the NextSeg()). Basically if a packet is not considered lost by the current criteria (# of dupacks etc), but the congestion window has room for more packets, then retransmit this packet. However it actually conflicts with the rest of recovery design. For example, when reordering is detected we want to be conservative in retransmitting packets but forward-retransmit feature would break that to force more retransmission. Also the implementation is fairly complicated inside the retransmission logic inducing extra iterations in the write queue. With RACK losses are being detected timely and this heuristic is no longer necessary. There this patch removes the feature. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c61
1 files changed, 3 insertions, 58 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0ba9026cb70d..6327e4d368a4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2831,36 +2831,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2831 return err; 2831 return err;
2832} 2832}
2833 2833
2834/* Check if we forward retransmits are possible in the current
2835 * window/congestion state.
2836 */
2837static bool tcp_can_forward_retransmit(struct sock *sk)
2838{
2839 const struct inet_connection_sock *icsk = inet_csk(sk);
2840 const struct tcp_sock *tp = tcp_sk(sk);
2841
2842 /* Forward retransmissions are possible only during Recovery. */
2843 if (icsk->icsk_ca_state != TCP_CA_Recovery)
2844 return false;
2845
2846 /* No forward retransmissions in Reno are possible. */
2847 if (tcp_is_reno(tp))
2848 return false;
2849
2850 /* Yeah, we have to make difficult choice between forward transmission
2851 * and retransmission... Both ways have their merits...
2852 *
2853 * For now we do not retransmit anything, while we have some new
2854 * segments to send. In the other cases, follow rule 3 for
2855 * NextSeg() specified in RFC3517.
2856 */
2857
2858 if (tcp_may_send_now(sk))
2859 return false;
2860
2861 return true;
2862}
2863
2864/* This gets called after a retransmit timeout, and the initially 2834/* This gets called after a retransmit timeout, and the initially
2865 * retransmitted data is acknowledged. It tries to continue 2835 * retransmitted data is acknowledged. It tries to continue
2866 * resending the rest of the retransmit queue, until either 2836 * resending the rest of the retransmit queue, until either
@@ -2875,24 +2845,16 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2875 struct tcp_sock *tp = tcp_sk(sk); 2845 struct tcp_sock *tp = tcp_sk(sk);
2876 struct sk_buff *skb; 2846 struct sk_buff *skb;
2877 struct sk_buff *hole = NULL; 2847 struct sk_buff *hole = NULL;
2878 u32 max_segs, last_lost; 2848 u32 max_segs;
2879 int mib_idx; 2849 int mib_idx;
2880 int fwd_rexmitting = 0;
2881 2850
2882 if (!tp->packets_out) 2851 if (!tp->packets_out)
2883 return; 2852 return;
2884 2853
2885 if (!tp->lost_out)
2886 tp->retransmit_high = tp->snd_una;
2887
2888 if (tp->retransmit_skb_hint) { 2854 if (tp->retransmit_skb_hint) {
2889 skb = tp->retransmit_skb_hint; 2855 skb = tp->retransmit_skb_hint;
2890 last_lost = TCP_SKB_CB(skb)->end_seq;
2891 if (after(last_lost, tp->retransmit_high))
2892 last_lost = tp->retransmit_high;
2893 } else { 2856 } else {
2894 skb = tcp_write_queue_head(sk); 2857 skb = tcp_write_queue_head(sk);
2895 last_lost = tp->snd_una;
2896 } 2858 }
2897 2859
2898 max_segs = tcp_tso_segs(sk, tcp_current_mss(sk)); 2860 max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
@@ -2915,31 +2877,14 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2915 */ 2877 */
2916 segs = min_t(int, segs, max_segs); 2878 segs = min_t(int, segs, max_segs);
2917 2879
2918 if (fwd_rexmitting) { 2880 if (tp->retrans_out >= tp->lost_out) {
2919begin_fwd: 2881 break;
2920 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2921 break;
2922 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2923
2924 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2925 tp->retransmit_high = last_lost;
2926 if (!tcp_can_forward_retransmit(sk))
2927 break;
2928 /* Backtrack if necessary to non-L'ed skb */
2929 if (hole) {
2930 skb = hole;
2931 hole = NULL;
2932 }
2933 fwd_rexmitting = 1;
2934 goto begin_fwd;
2935
2936 } else if (!(sacked & TCPCB_LOST)) { 2882 } else if (!(sacked & TCPCB_LOST)) {
2937 if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2883 if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2938 hole = skb; 2884 hole = skb;
2939 continue; 2885 continue;
2940 2886
2941 } else { 2887 } else {
2942 last_lost = TCP_SKB_CB(skb)->end_seq;
2943 if (icsk->icsk_ca_state != TCP_CA_Loss) 2888 if (icsk->icsk_ca_state != TCP_CA_Loss)
2944 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2889 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2945 else 2890 else