aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c183
1 files changed, 88 insertions, 95 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 227cba79fa6b..23a41d978fad 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -667,11 +667,11 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
667 * To save cycles in the RFC 1323 implementation it was better to break 667 * To save cycles in the RFC 1323 implementation it was better to break
668 * it up into three procedures. -- erics 668 * it up into three procedures. -- erics
669 */ 669 */
670static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) 670static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
671{ 671{
672 struct tcp_sock *tp = tcp_sk(sk); 672 struct tcp_sock *tp = tcp_sk(sk);
673 long m = mrtt; /* RTT */ 673 long m = mrtt_us; /* RTT */
674 u32 srtt = tp->srtt; 674 u32 srtt = tp->srtt_us;
675 675
676 /* The following amusing code comes from Jacobson's 676 /* The following amusing code comes from Jacobson's
677 * article in SIGCOMM '88. Note that rtt and mdev 677 * article in SIGCOMM '88. Note that rtt and mdev
@@ -694,7 +694,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
694 srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 694 srtt += m; /* rtt = 7/8 rtt + 1/8 new */
695 if (m < 0) { 695 if (m < 0) {
696 m = -m; /* m is now abs(error) */ 696 m = -m; /* m is now abs(error) */
697 m -= (tp->mdev >> 2); /* similar update on mdev */ 697 m -= (tp->mdev_us >> 2); /* similar update on mdev */
698 /* This is similar to one of Eifel findings. 698 /* This is similar to one of Eifel findings.
699 * Eifel blocks mdev updates when rtt decreases. 699 * Eifel blocks mdev updates when rtt decreases.
700 * This solution is a bit different: we use finer gain 700 * This solution is a bit different: we use finer gain
@@ -706,28 +706,29 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
706 if (m > 0) 706 if (m > 0)
707 m >>= 3; 707 m >>= 3;
708 } else { 708 } else {
709 m -= (tp->mdev >> 2); /* similar update on mdev */ 709 m -= (tp->mdev_us >> 2); /* similar update on mdev */
710 } 710 }
711 tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ 711 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */
712 if (tp->mdev > tp->mdev_max) { 712 if (tp->mdev_us > tp->mdev_max_us) {
713 tp->mdev_max = tp->mdev; 713 tp->mdev_max_us = tp->mdev_us;
714 if (tp->mdev_max > tp->rttvar) 714 if (tp->mdev_max_us > tp->rttvar_us)
715 tp->rttvar = tp->mdev_max; 715 tp->rttvar_us = tp->mdev_max_us;
716 } 716 }
717 if (after(tp->snd_una, tp->rtt_seq)) { 717 if (after(tp->snd_una, tp->rtt_seq)) {
718 if (tp->mdev_max < tp->rttvar) 718 if (tp->mdev_max_us < tp->rttvar_us)
719 tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; 719 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
720 tp->rtt_seq = tp->snd_nxt; 720 tp->rtt_seq = tp->snd_nxt;
721 tp->mdev_max = tcp_rto_min(sk); 721 tp->mdev_max_us = tcp_rto_min_us(sk);
722 } 722 }
723 } else { 723 } else {
724 /* no previous measure. */ 724 /* no previous measure. */
725 srtt = m << 3; /* take the measured time to be rtt */ 725 srtt = m << 3; /* take the measured time to be rtt */
726 tp->mdev = m << 1; /* make sure rto = 3*rtt */ 726 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */
727 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 727 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
728 tp->mdev_max_us = tp->rttvar_us;
728 tp->rtt_seq = tp->snd_nxt; 729 tp->rtt_seq = tp->snd_nxt;
729 } 730 }
730 tp->srtt = max(1U, srtt); 731 tp->srtt_us = max(1U, srtt);
731} 732}
732 733
733/* Set the sk_pacing_rate to allow proper sizing of TSO packets. 734/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
@@ -742,20 +743,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
742 u64 rate; 743 u64 rate;
743 744
744 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ 745 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
745 rate = (u64)tp->mss_cache * 2 * (HZ << 3); 746 rate = (u64)tp->mss_cache * 2 * (USEC_PER_SEC << 3);
746 747
747 rate *= max(tp->snd_cwnd, tp->packets_out); 748 rate *= max(tp->snd_cwnd, tp->packets_out);
748 749
749 /* Correction for small srtt and scheduling constraints. 750 if (likely(tp->srtt_us))
750 * For small rtt, consider noise is too high, and use 751 do_div(rate, tp->srtt_us);
751 * the minimal value (srtt = 1 -> 125 us for HZ=1000)
752 *
753 * We probably need usec resolution in the future.
754 * Note: This also takes care of possible srtt=0 case,
755 * when tcp_rtt_estimator() was not yet called.
756 */
757 if (tp->srtt > 8 + 2)
758 do_div(rate, tp->srtt);
759 752
760 /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate 753 /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
761 * without any lock. We want to make sure compiler wont store 754 * without any lock. We want to make sure compiler wont store
@@ -1122,10 +1115,10 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1122} 1115}
1123 1116
1124struct tcp_sacktag_state { 1117struct tcp_sacktag_state {
1125 int reord; 1118 int reord;
1126 int fack_count; 1119 int fack_count;
1127 int flag; 1120 long rtt_us; /* RTT measured by SACKing never-retransmitted data */
1128 s32 rtt; /* RTT measured by SACKing never-retransmitted data */ 1121 int flag;
1129}; 1122};
1130 1123
1131/* Check if skb is fully within the SACK block. In presence of GSO skbs, 1124/* Check if skb is fully within the SACK block. In presence of GSO skbs,
@@ -1186,7 +1179,8 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1186static u8 tcp_sacktag_one(struct sock *sk, 1179static u8 tcp_sacktag_one(struct sock *sk,
1187 struct tcp_sacktag_state *state, u8 sacked, 1180 struct tcp_sacktag_state *state, u8 sacked,
1188 u32 start_seq, u32 end_seq, 1181 u32 start_seq, u32 end_seq,
1189 int dup_sack, int pcount, u32 xmit_time) 1182 int dup_sack, int pcount,
1183 const struct skb_mstamp *xmit_time)
1190{ 1184{
1191 struct tcp_sock *tp = tcp_sk(sk); 1185 struct tcp_sock *tp = tcp_sk(sk);
1192 int fack_count = state->fack_count; 1186 int fack_count = state->fack_count;
@@ -1227,8 +1221,13 @@ static u8 tcp_sacktag_one(struct sock *sk,
1227 if (!after(end_seq, tp->high_seq)) 1221 if (!after(end_seq, tp->high_seq))
1228 state->flag |= FLAG_ORIG_SACK_ACKED; 1222 state->flag |= FLAG_ORIG_SACK_ACKED;
1229 /* Pick the earliest sequence sacked for RTT */ 1223 /* Pick the earliest sequence sacked for RTT */
1230 if (state->rtt < 0) 1224 if (state->rtt_us < 0) {
1231 state->rtt = tcp_time_stamp - xmit_time; 1225 struct skb_mstamp now;
1226
1227 skb_mstamp_get(&now);
1228 state->rtt_us = skb_mstamp_us_delta(&now,
1229 xmit_time);
1230 }
1232 } 1231 }
1233 1232
1234 if (sacked & TCPCB_LOST) { 1233 if (sacked & TCPCB_LOST) {
@@ -1287,7 +1286,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1287 */ 1286 */
1288 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1287 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1289 start_seq, end_seq, dup_sack, pcount, 1288 start_seq, end_seq, dup_sack, pcount,
1290 TCP_SKB_CB(skb)->when); 1289 &skb->skb_mstamp);
1291 1290
1292 if (skb == tp->lost_skb_hint) 1291 if (skb == tp->lost_skb_hint)
1293 tp->lost_cnt_hint += pcount; 1292 tp->lost_cnt_hint += pcount;
@@ -1565,7 +1564,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1565 TCP_SKB_CB(skb)->end_seq, 1564 TCP_SKB_CB(skb)->end_seq,
1566 dup_sack, 1565 dup_sack,
1567 tcp_skb_pcount(skb), 1566 tcp_skb_pcount(skb),
1568 TCP_SKB_CB(skb)->when); 1567 &skb->skb_mstamp);
1569 1568
1570 if (!before(TCP_SKB_CB(skb)->seq, 1569 if (!before(TCP_SKB_CB(skb)->seq,
1571 tcp_highest_sack_seq(tp))) 1570 tcp_highest_sack_seq(tp)))
@@ -1622,7 +1621,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
1622 1621
1623static int 1622static int
1624tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, 1623tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1625 u32 prior_snd_una, s32 *sack_rtt) 1624 u32 prior_snd_una, long *sack_rtt_us)
1626{ 1625{
1627 struct tcp_sock *tp = tcp_sk(sk); 1626 struct tcp_sock *tp = tcp_sk(sk);
1628 const unsigned char *ptr = (skb_transport_header(ack_skb) + 1627 const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1640,7 +1639,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1640 1639
1641 state.flag = 0; 1640 state.flag = 0;
1642 state.reord = tp->packets_out; 1641 state.reord = tp->packets_out;
1643 state.rtt = -1; 1642 state.rtt_us = -1L;
1644 1643
1645 if (!tp->sacked_out) { 1644 if (!tp->sacked_out) {
1646 if (WARN_ON(tp->fackets_out)) 1645 if (WARN_ON(tp->fackets_out))
@@ -1824,7 +1823,7 @@ out:
1824 WARN_ON((int)tp->retrans_out < 0); 1823 WARN_ON((int)tp->retrans_out < 0);
1825 WARN_ON((int)tcp_packets_in_flight(tp) < 0); 1824 WARN_ON((int)tcp_packets_in_flight(tp) < 0);
1826#endif 1825#endif
1827 *sack_rtt = state.rtt; 1826 *sack_rtt_us = state.rtt_us;
1828 return state.flag; 1827 return state.flag;
1829} 1828}
1830 1829
@@ -2034,10 +2033,12 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
2034 * available, or RTO is scheduled to fire first. 2033 * available, or RTO is scheduled to fire first.
2035 */ 2034 */
2036 if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || 2035 if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 ||
2037 (flag & FLAG_ECE) || !tp->srtt) 2036 (flag & FLAG_ECE) || !tp->srtt_us)
2038 return false; 2037 return false;
2039 2038
2040 delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); 2039 delay = max(usecs_to_jiffies(tp->srtt_us >> 5),
2040 msecs_to_jiffies(2));
2041
2041 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) 2042 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
2042 return false; 2043 return false;
2043 2044
@@ -2884,7 +2885,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
2884} 2885}
2885 2886
2886static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, 2887static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2887 s32 seq_rtt, s32 sack_rtt) 2888 long seq_rtt_us, long sack_rtt_us)
2888{ 2889{
2889 const struct tcp_sock *tp = tcp_sk(sk); 2890 const struct tcp_sock *tp = tcp_sk(sk);
2890 2891
@@ -2894,10 +2895,10 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2894 * is acked (RFC6298). 2895 * is acked (RFC6298).
2895 */ 2896 */
2896 if (flag & FLAG_RETRANS_DATA_ACKED) 2897 if (flag & FLAG_RETRANS_DATA_ACKED)
2897 seq_rtt = -1; 2898 seq_rtt_us = -1L;
2898 2899
2899 if (seq_rtt < 0) 2900 if (seq_rtt_us < 0)
2900 seq_rtt = sack_rtt; 2901 seq_rtt_us = sack_rtt_us;
2901 2902
2902 /* RTTM Rule: A TSecr value received in a segment is used to 2903 /* RTTM Rule: A TSecr value received in a segment is used to
2903 * update the averaged RTT measurement only if the segment 2904 * update the averaged RTT measurement only if the segment
@@ -2905,14 +2906,14 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2905 * left edge of the send window. 2906 * left edge of the send window.
2906 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2907 * See draft-ietf-tcplw-high-performance-00, section 3.3.
2907 */ 2908 */
2908 if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2909 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2909 flag & FLAG_ACKED) 2910 flag & FLAG_ACKED)
2910 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 2911 seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr);
2911 2912
2912 if (seq_rtt < 0) 2913 if (seq_rtt_us < 0)
2913 return false; 2914 return false;
2914 2915
2915 tcp_rtt_estimator(sk, seq_rtt); 2916 tcp_rtt_estimator(sk, seq_rtt_us);
2916 tcp_set_rto(sk); 2917 tcp_set_rto(sk);
2917 2918
2918 /* RFC6298: only reset backoff on valid RTT measurement. */ 2919 /* RFC6298: only reset backoff on valid RTT measurement. */
@@ -2924,16 +2925,16 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2924static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp) 2925static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
2925{ 2926{
2926 struct tcp_sock *tp = tcp_sk(sk); 2927 struct tcp_sock *tp = tcp_sk(sk);
2927 s32 seq_rtt = -1; 2928 long seq_rtt_us = -1L;
2928 2929
2929 if (synack_stamp && !tp->total_retrans) 2930 if (synack_stamp && !tp->total_retrans)
2930 seq_rtt = tcp_time_stamp - synack_stamp; 2931 seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - synack_stamp);
2931 2932
2932 /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets 2933 /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
2933 * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack() 2934 * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
2934 */ 2935 */
2935 if (!tp->srtt) 2936 if (!tp->srtt_us)
2936 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1); 2937 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
2937} 2938}
2938 2939
2939static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 2940static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
@@ -3022,26 +3023,27 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3022 * arrived at the other end. 3023 * arrived at the other end.
3023 */ 3024 */
3024static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, 3025static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3025 u32 prior_snd_una, s32 sack_rtt) 3026 u32 prior_snd_una, long sack_rtt_us)
3026{ 3027{
3027 struct tcp_sock *tp = tcp_sk(sk);
3028 const struct inet_connection_sock *icsk = inet_csk(sk); 3028 const struct inet_connection_sock *icsk = inet_csk(sk);
3029 struct sk_buff *skb; 3029 struct skb_mstamp first_ackt, last_ackt, now;
3030 u32 now = tcp_time_stamp; 3030 struct tcp_sock *tp = tcp_sk(sk);
3031 u32 prior_sacked = tp->sacked_out;
3032 u32 reord = tp->packets_out;
3031 bool fully_acked = true; 3033 bool fully_acked = true;
3032 int flag = 0; 3034 long ca_seq_rtt_us = -1L;
3035 long seq_rtt_us = -1L;
3036 struct sk_buff *skb;
3033 u32 pkts_acked = 0; 3037 u32 pkts_acked = 0;
3034 u32 reord = tp->packets_out;
3035 u32 prior_sacked = tp->sacked_out;
3036 s32 seq_rtt = -1;
3037 s32 ca_seq_rtt = -1;
3038 ktime_t last_ackt = net_invalid_timestamp();
3039 bool rtt_update; 3038 bool rtt_update;
3039 int flag = 0;
3040
3041 first_ackt.v64 = 0;
3040 3042
3041 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3043 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
3042 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3044 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
3043 u32 acked_pcount;
3044 u8 sacked = scb->sacked; 3045 u8 sacked = scb->sacked;
3046 u32 acked_pcount;
3045 3047
3046 /* Determine how many packets and what bytes were acked, tso and else */ 3048 /* Determine how many packets and what bytes were acked, tso and else */
3047 if (after(scb->end_seq, tp->snd_una)) { 3049 if (after(scb->end_seq, tp->snd_una)) {
@@ -3063,11 +3065,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3063 tp->retrans_out -= acked_pcount; 3065 tp->retrans_out -= acked_pcount;
3064 flag |= FLAG_RETRANS_DATA_ACKED; 3066 flag |= FLAG_RETRANS_DATA_ACKED;
3065 } else { 3067 } else {
3066 ca_seq_rtt = now - scb->when; 3068 last_ackt = skb->skb_mstamp;
3067 last_ackt = skb->tstamp; 3069 if (!first_ackt.v64)
3068 if (seq_rtt < 0) { 3070 first_ackt = last_ackt;
3069 seq_rtt = ca_seq_rtt; 3071
3070 }
3071 if (!(sacked & TCPCB_SACKED_ACKED)) 3072 if (!(sacked & TCPCB_SACKED_ACKED))
3072 reord = min(pkts_acked, reord); 3073 reord = min(pkts_acked, reord);
3073 if (!after(scb->end_seq, tp->high_seq)) 3074 if (!after(scb->end_seq, tp->high_seq))
@@ -3113,7 +3114,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3113 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3114 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
3114 flag |= FLAG_SACK_RENEGING; 3115 flag |= FLAG_SACK_RENEGING;
3115 3116
3116 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt); 3117 skb_mstamp_get(&now);
3118 if (first_ackt.v64) {
3119 seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
3120 ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
3121 }
3122
3123 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
3117 3124
3118 if (flag & FLAG_ACKED) { 3125 if (flag & FLAG_ACKED) {
3119 const struct tcp_congestion_ops *ca_ops 3126 const struct tcp_congestion_ops *ca_ops
@@ -3141,25 +3148,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3141 3148
3142 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 3149 tp->fackets_out -= min(pkts_acked, tp->fackets_out);
3143 3150
3144 if (ca_ops->pkts_acked) { 3151 if (ca_ops->pkts_acked)
3145 s32 rtt_us = -1; 3152 ca_ops->pkts_acked(sk, pkts_acked, ca_seq_rtt_us);
3146
3147 /* Is the ACK triggering packet unambiguous? */
3148 if (!(flag & FLAG_RETRANS_DATA_ACKED)) {
3149 /* High resolution needed and available? */
3150 if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
3151 !ktime_equal(last_ackt,
3152 net_invalid_timestamp()))
3153 rtt_us = ktime_us_delta(ktime_get_real(),
3154 last_ackt);
3155 else if (ca_seq_rtt >= 0)
3156 rtt_us = jiffies_to_usecs(ca_seq_rtt);
3157 }
3158 3153
3159 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 3154 } else if (skb && rtt_update && sack_rtt_us >= 0 &&
3160 } 3155 sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
3161 } else if (skb && rtt_update && sack_rtt >= 0 &&
3162 sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
3163 /* Do not re-arm RTO if the sack RTT is measured from data sent 3156 /* Do not re-arm RTO if the sack RTT is measured from data sent
3164 * after when the head was last (re)transmitted. Otherwise the 3157 * after when the head was last (re)transmitted. Otherwise the
3165 * timeout may continue to extend in loss recovery. 3158 * timeout may continue to extend in loss recovery.
@@ -3369,12 +3362,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3369 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3362 u32 ack_seq = TCP_SKB_CB(skb)->seq;
3370 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3363 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3371 bool is_dupack = false; 3364 bool is_dupack = false;
3372 u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt; 3365 u32 prior_in_flight;
3373 u32 prior_fackets; 3366 u32 prior_fackets;
3374 int prior_packets = tp->packets_out; 3367 int prior_packets = tp->packets_out;
3375 const int prior_unsacked = tp->packets_out - tp->sacked_out; 3368 const int prior_unsacked = tp->packets_out - tp->sacked_out;
3376 int acked = 0; /* Number of packets newly acked */ 3369 int acked = 0; /* Number of packets newly acked */
3377 s32 sack_rtt = -1; 3370 long sack_rtt_us = -1L;
3378 3371
3379 /* If the ack is older than previous acks 3372 /* If the ack is older than previous acks
3380 * then we can probably ignore it. 3373 * then we can probably ignore it.
@@ -3432,7 +3425,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3432 3425
3433 if (TCP_SKB_CB(skb)->sacked) 3426 if (TCP_SKB_CB(skb)->sacked)
3434 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3427 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
3435 &sack_rtt); 3428 &sack_rtt_us);
3436 3429
3437 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) 3430 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
3438 flag |= FLAG_ECE; 3431 flag |= FLAG_ECE;
@@ -3451,7 +3444,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3451 3444
3452 /* See if we can take anything off of the retransmit queue. */ 3445 /* See if we can take anything off of the retransmit queue. */
3453 acked = tp->packets_out; 3446 acked = tp->packets_out;
3454 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt); 3447 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
3448 sack_rtt_us);
3455 acked -= tp->packets_out; 3449 acked -= tp->packets_out;
3456 3450
3457 /* Advance cwnd if state allows */ 3451 /* Advance cwnd if state allows */
@@ -3474,8 +3468,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3474 3468
3475 if (icsk->icsk_pending == ICSK_TIME_RETRANS) 3469 if (icsk->icsk_pending == ICSK_TIME_RETRANS)
3476 tcp_schedule_loss_probe(sk); 3470 tcp_schedule_loss_probe(sk);
3477 if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd) 3471 tcp_update_pacing_rate(sk);
3478 tcp_update_pacing_rate(sk);
3479 return 1; 3472 return 1;
3480 3473
3481no_queue: 3474no_queue:
@@ -3504,7 +3497,7 @@ old_ack:
3504 */ 3497 */
3505 if (TCP_SKB_CB(skb)->sacked) { 3498 if (TCP_SKB_CB(skb)->sacked) {
3506 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3499 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
3507 &sack_rtt); 3500 &sack_rtt_us);
3508 tcp_fastretrans_alert(sk, acked, prior_unsacked, 3501 tcp_fastretrans_alert(sk, acked, prior_unsacked,
3509 is_dupack, flag); 3502 is_dupack, flag);
3510 } 3503 }