aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2013-07-22 19:20:48 -0400
committerDavid S. Miller <davem@davemloft.net>2013-07-22 20:53:42 -0400
commited08495c31bb991de636d2488abaa50b39f2ff4a (patch)
treeecbd6be76046918abb5a61941d0f845ea2c97b27 /net/ipv4/tcp_input.c
parent59c9af4234b0c21a1ed05cf65bf014d0c1a67bfd (diff)
tcp: use RTT from SACK for RTO
If RTT is not available because Karn's check has failed or no new packet is acked, use the RTT measured from SACK to estimate the RTO. The sender can continue to estimate the RTO during loss recovery or reordering event upon receiving non-partial ACKs. This also changes when the RTO is re-armed. Previously it is only re-armed when some data is cummulatively acknowledged (i.e., SND.UNA advances), but now it is re-armed whenever RTT estimator is updated. This feature is particularly useful to reduce spurious timeout for buffer bloat including cellular carriers [1], and RTT estimation on reordering events. [1] "An In-depth Study of LTE: Effect of Network Protocol and Application Behavior on Performance", In Proc. of SIGCOMM 2013 Signed-off-by: Yuchung Cheng <ycheng@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b85bc7c3736a..b61274b666f6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2800,8 +2800,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
2800 tcp_xmit_retransmit_queue(sk); 2800 tcp_xmit_retransmit_queue(sk);
2801} 2801}
2802 2802
2803static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, 2803static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2804 s32 seq_rtt) 2804 s32 seq_rtt, s32 sack_rtt)
2805{ 2805{
2806 const struct tcp_sock *tp = tcp_sk(sk); 2806 const struct tcp_sock *tp = tcp_sk(sk);
2807 2807
@@ -2813,6 +2813,9 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
2813 if (flag & FLAG_RETRANS_DATA_ACKED) 2813 if (flag & FLAG_RETRANS_DATA_ACKED)
2814 seq_rtt = -1; 2814 seq_rtt = -1;
2815 2815
2816 if (seq_rtt < 0)
2817 seq_rtt = sack_rtt;
2818
2816 /* RTTM Rule: A TSecr value received in a segment is used to 2819 /* RTTM Rule: A TSecr value received in a segment is used to
2817 * update the averaged RTT measurement only if the segment 2820 * update the averaged RTT measurement only if the segment
2818 * acknowledges some new data, i.e., only if it advances the 2821 * acknowledges some new data, i.e., only if it advances the
@@ -2823,13 +2826,14 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
2823 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 2826 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
2824 2827
2825 if (seq_rtt < 0) 2828 if (seq_rtt < 0)
2826 return; 2829 return false;
2827 2830
2828 tcp_rtt_estimator(sk, seq_rtt); 2831 tcp_rtt_estimator(sk, seq_rtt);
2829 tcp_set_rto(sk); 2832 tcp_set_rto(sk);
2830 2833
2831 /* RFC6298: only reset backoff on valid RTT measurement. */ 2834 /* RFC6298: only reset backoff on valid RTT measurement. */
2832 inet_csk(sk)->icsk_backoff = 0; 2835 inet_csk(sk)->icsk_backoff = 0;
2836 return true;
2833} 2837}
2834 2838
2835/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ 2839/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
@@ -2840,7 +2844,7 @@ static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
2840 2844
2841 if (tp->lsndtime && !tp->total_retrans) 2845 if (tp->lsndtime && !tp->total_retrans)
2842 seq_rtt = tcp_time_stamp - tp->lsndtime; 2846 seq_rtt = tcp_time_stamp - tp->lsndtime;
2843 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt); 2847 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
2844} 2848}
2845 2849
2846static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 2850static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -2929,7 +2933,7 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
2929 * arrived at the other end. 2933 * arrived at the other end.
2930 */ 2934 */
2931static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, 2935static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
2932 u32 prior_snd_una) 2936 u32 prior_snd_una, s32 sack_rtt)
2933{ 2937{
2934 struct tcp_sock *tp = tcp_sk(sk); 2938 struct tcp_sock *tp = tcp_sk(sk);
2935 const struct inet_connection_sock *icsk = inet_csk(sk); 2939 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -3019,6 +3023,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3019 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3023 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
3020 flag |= FLAG_SACK_RENEGING; 3024 flag |= FLAG_SACK_RENEGING;
3021 3025
3026 if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) ||
3027 (flag & FLAG_ACKED))
3028 tcp_rearm_rto(sk);
3029
3022 if (flag & FLAG_ACKED) { 3030 if (flag & FLAG_ACKED) {
3023 const struct tcp_congestion_ops *ca_ops 3031 const struct tcp_congestion_ops *ca_ops
3024 = inet_csk(sk)->icsk_ca_ops; 3032 = inet_csk(sk)->icsk_ca_ops;
@@ -3028,9 +3036,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3028 tcp_mtup_probe_success(sk); 3036 tcp_mtup_probe_success(sk);
3029 } 3037 }
3030 3038
3031 tcp_ack_update_rtt(sk, flag, seq_rtt);
3032 tcp_rearm_rto(sk);
3033
3034 if (tcp_is_reno(tp)) { 3039 if (tcp_is_reno(tp)) {
3035 tcp_remove_reno_sacks(sk, pkts_acked); 3040 tcp_remove_reno_sacks(sk, pkts_acked);
3036 } else { 3041 } else {
@@ -3339,7 +3344,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3339 3344
3340 /* See if we can take anything off of the retransmit queue. */ 3345 /* See if we can take anything off of the retransmit queue. */
3341 acked = tp->packets_out; 3346 acked = tp->packets_out;
3342 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3347 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt);
3343 acked -= tp->packets_out; 3348 acked -= tp->packets_out;
3344 3349
3345 if (tcp_ack_is_dubious(sk, flag)) { 3350 if (tcp_ack_is_dubious(sk, flag)) {