aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-09-21 11:51:47 -0400
committerDavid S. Miller <davem@davemloft.net>2018-09-21 22:37:59 -0400
commit2fd66ffba50716fc5ab481c48db643af3bda2276 (patch)
tree0429f0bb89fa036d79e9b9d2105f89c0a3e38ca0 /net/ipv4/tcp_input.c
parent72b0094f918294e6cb8cf5c3b4520d928fbb1a57 (diff)
tcp: introduce tcp_skb_timestamp_us() helper
There are few places where TCP reads skb->skb_mstamp expecting a value in usec unit. skb->tstamp (aka skb->skb_mstamp) will soon store CLOCK_TAI nsec value. Add tcp_skb_timestamp_us() to provide proper conversion when needed. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d9034073138c..d703a0b3b6a2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1305,7 +1305,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1305 */ 1305 */
1306 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1306 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1307 start_seq, end_seq, dup_sack, pcount, 1307 start_seq, end_seq, dup_sack, pcount,
1308 skb->skb_mstamp); 1308 tcp_skb_timestamp_us(skb));
1309 tcp_rate_skb_delivered(sk, skb, state->rate); 1309 tcp_rate_skb_delivered(sk, skb, state->rate);
1310 1310
1311 if (skb == tp->lost_skb_hint) 1311 if (skb == tp->lost_skb_hint)
@@ -1580,7 +1580,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1580 TCP_SKB_CB(skb)->end_seq, 1580 TCP_SKB_CB(skb)->end_seq,
1581 dup_sack, 1581 dup_sack,
1582 tcp_skb_pcount(skb), 1582 tcp_skb_pcount(skb),
1583 skb->skb_mstamp); 1583 tcp_skb_timestamp_us(skb));
1584 tcp_rate_skb_delivered(sk, skb, state->rate); 1584 tcp_rate_skb_delivered(sk, skb, state->rate);
1585 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1585 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1586 list_del_init(&skb->tcp_tsorted_anchor); 1586 list_del_init(&skb->tcp_tsorted_anchor);
@@ -3103,7 +3103,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
3103 tp->retrans_out -= acked_pcount; 3103 tp->retrans_out -= acked_pcount;
3104 flag |= FLAG_RETRANS_DATA_ACKED; 3104 flag |= FLAG_RETRANS_DATA_ACKED;
3105 } else if (!(sacked & TCPCB_SACKED_ACKED)) { 3105 } else if (!(sacked & TCPCB_SACKED_ACKED)) {
3106 last_ackt = skb->skb_mstamp; 3106 last_ackt = tcp_skb_timestamp_us(skb);
3107 WARN_ON_ONCE(last_ackt == 0); 3107 WARN_ON_ONCE(last_ackt == 0);
3108 if (!first_ackt) 3108 if (!first_ackt)
3109 first_ackt = last_ackt; 3109 first_ackt = last_ackt;
@@ -3121,7 +3121,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
3121 tp->delivered += acked_pcount; 3121 tp->delivered += acked_pcount;
3122 if (!tcp_skb_spurious_retrans(tp, skb)) 3122 if (!tcp_skb_spurious_retrans(tp, skb))
3123 tcp_rack_advance(tp, sacked, scb->end_seq, 3123 tcp_rack_advance(tp, sacked, scb->end_seq,
3124 skb->skb_mstamp); 3124 tcp_skb_timestamp_us(skb));
3125 } 3125 }
3126 if (sacked & TCPCB_LOST) 3126 if (sacked & TCPCB_LOST)
3127 tp->lost_out -= acked_pcount; 3127 tp->lost_out -= acked_pcount;
@@ -3215,7 +3215,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
3215 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); 3215 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
3216 } 3216 }
3217 } else if (skb && rtt_update && sack_rtt_us >= 0 && 3217 } else if (skb && rtt_update && sack_rtt_us >= 0 &&
3218 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { 3218 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
3219 tcp_skb_timestamp_us(skb))) {
3219 /* Do not re-arm RTO if the sack RTT is measured from data sent 3220 /* Do not re-arm RTO if the sack RTT is measured from data sent
3220 * after when the head was last (re)transmitted. Otherwise the 3221 * after when the head was last (re)transmitted. Otherwise the
3221 * timeout may continue to extend in loss recovery. 3222 * timeout may continue to extend in loss recovery.