aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-09-21 11:51:47 -0400
committerDavid S. Miller <davem@davemloft.net>2018-09-21 22:37:59 -0400
commit2fd66ffba50716fc5ab481c48db643af3bda2276 (patch)
tree0429f0bb89fa036d79e9b9d2105f89c0a3e38ca0 /net
parent72b0094f918294e6cb8cf5c3b4520d928fbb1a57 (diff)
tcp: introduce tcp_skb_timestamp_us() helper
There are few places where TCP reads skb->skb_mstamp expecting a value in usec unit. skb->tstamp (aka skb->skb_mstamp) will soon store CLOCK_TAI nsec value. Add tcp_skb_timestamp_us() to provide proper conversion when needed. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tcp_rate.c15
-rw-r--r--net/ipv4/tcp_recovery.c5
5 files changed, 19 insertions, 16 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d9034073138c..d703a0b3b6a2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1305,7 +1305,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1305 */ 1305 */
1306 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1306 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1307 start_seq, end_seq, dup_sack, pcount, 1307 start_seq, end_seq, dup_sack, pcount,
1308 skb->skb_mstamp); 1308 tcp_skb_timestamp_us(skb));
1309 tcp_rate_skb_delivered(sk, skb, state->rate); 1309 tcp_rate_skb_delivered(sk, skb, state->rate);
1310 1310
1311 if (skb == tp->lost_skb_hint) 1311 if (skb == tp->lost_skb_hint)
@@ -1580,7 +1580,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1580 TCP_SKB_CB(skb)->end_seq, 1580 TCP_SKB_CB(skb)->end_seq,
1581 dup_sack, 1581 dup_sack,
1582 tcp_skb_pcount(skb), 1582 tcp_skb_pcount(skb),
1583 skb->skb_mstamp); 1583 tcp_skb_timestamp_us(skb));
1584 tcp_rate_skb_delivered(sk, skb, state->rate); 1584 tcp_rate_skb_delivered(sk, skb, state->rate);
1585 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1585 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1586 list_del_init(&skb->tcp_tsorted_anchor); 1586 list_del_init(&skb->tcp_tsorted_anchor);
@@ -3103,7 +3103,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
3103 tp->retrans_out -= acked_pcount; 3103 tp->retrans_out -= acked_pcount;
3104 flag |= FLAG_RETRANS_DATA_ACKED; 3104 flag |= FLAG_RETRANS_DATA_ACKED;
3105 } else if (!(sacked & TCPCB_SACKED_ACKED)) { 3105 } else if (!(sacked & TCPCB_SACKED_ACKED)) {
3106 last_ackt = skb->skb_mstamp; 3106 last_ackt = tcp_skb_timestamp_us(skb);
3107 WARN_ON_ONCE(last_ackt == 0); 3107 WARN_ON_ONCE(last_ackt == 0);
3108 if (!first_ackt) 3108 if (!first_ackt)
3109 first_ackt = last_ackt; 3109 first_ackt = last_ackt;
@@ -3121,7 +3121,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
3121 tp->delivered += acked_pcount; 3121 tp->delivered += acked_pcount;
3122 if (!tcp_skb_spurious_retrans(tp, skb)) 3122 if (!tcp_skb_spurious_retrans(tp, skb))
3123 tcp_rack_advance(tp, sacked, scb->end_seq, 3123 tcp_rack_advance(tp, sacked, scb->end_seq,
3124 skb->skb_mstamp); 3124 tcp_skb_timestamp_us(skb));
3125 } 3125 }
3126 if (sacked & TCPCB_LOST) 3126 if (sacked & TCPCB_LOST)
3127 tp->lost_out -= acked_pcount; 3127 tp->lost_out -= acked_pcount;
@@ -3215,7 +3215,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
3215 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); 3215 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
3216 } 3216 }
3217 } else if (skb && rtt_update && sack_rtt_us >= 0 && 3217 } else if (skb && rtt_update && sack_rtt_us >= 0 &&
3218 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { 3218 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
3219 tcp_skb_timestamp_us(skb))) {
3219 /* Do not re-arm RTO if the sack RTT is measured from data sent 3220 /* Do not re-arm RTO if the sack RTT is measured from data sent
3220 * after when the head was last (re)transmitted. Otherwise the 3221 * after when the head was last (re)transmitted. Otherwise the
3221 * timeout may continue to extend in loss recovery. 3222 * timeout may continue to extend in loss recovery.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 09547ef9c4c6..1f2496e8620d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -544,7 +544,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
544 BUG_ON(!skb); 544 BUG_ON(!skb);
545 545
546 tcp_mstamp_refresh(tp); 546 tcp_mstamp_refresh(tp);
547 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); 547 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
548 remaining = icsk->icsk_rto - 548 remaining = icsk->icsk_rto -
549 usecs_to_jiffies(delta_us); 549 usecs_to_jiffies(delta_us);
550 550
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 597dbd749f05..b95aa72d8823 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1966,7 +1966,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1966 head = tcp_rtx_queue_head(sk); 1966 head = tcp_rtx_queue_head(sk);
1967 if (!head) 1967 if (!head)
1968 goto send_now; 1968 goto send_now;
1969 age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp); 1969 age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
1970 /* If next ACK is likely to come too late (half srtt), do not defer */ 1970 /* If next ACK is likely to come too late (half srtt), do not defer */
1971 if (age < (tp->srtt_us >> 4)) 1971 if (age < (tp->srtt_us >> 4))
1972 goto send_now; 1972 goto send_now;
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index 4dff40dad4dc..baed2186c7c6 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -55,8 +55,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
55 * bandwidth estimate. 55 * bandwidth estimate.
56 */ 56 */
57 if (!tp->packets_out) { 57 if (!tp->packets_out) {
58 tp->first_tx_mstamp = skb->skb_mstamp; 58 u64 tstamp_us = tcp_skb_timestamp_us(skb);
59 tp->delivered_mstamp = skb->skb_mstamp; 59
60 tp->first_tx_mstamp = tstamp_us;
61 tp->delivered_mstamp = tstamp_us;
60 } 62 }
61 63
62 TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; 64 TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
@@ -88,13 +90,12 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
88 rs->is_app_limited = scb->tx.is_app_limited; 90 rs->is_app_limited = scb->tx.is_app_limited;
89 rs->is_retrans = scb->sacked & TCPCB_RETRANS; 91 rs->is_retrans = scb->sacked & TCPCB_RETRANS;
90 92
93 /* Record send time of most recently ACKed packet: */
94 tp->first_tx_mstamp = tcp_skb_timestamp_us(skb);
91 /* Find the duration of the "send phase" of this window: */ 95 /* Find the duration of the "send phase" of this window: */
92 rs->interval_us = tcp_stamp_us_delta( 96 rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
93 skb->skb_mstamp, 97 scb->tx.first_tx_mstamp);
94 scb->tx.first_tx_mstamp);
95 98
96 /* Record send time of most recently ACKed packet: */
97 tp->first_tx_mstamp = skb->skb_mstamp;
98 } 99 }
99 /* Mark off the skb delivered once it's sacked to avoid being 100 /* Mark off the skb delivered once it's sacked to avoid being
100 * used again when it's cumulatively acked. For acked packets 101 * used again when it's cumulatively acked. For acked packets
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index c81aadff769b..fdb715bdd2d1 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -50,7 +50,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
50s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) 50s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
51{ 51{
52 return tp->rack.rtt_us + reo_wnd - 52 return tp->rack.rtt_us + reo_wnd -
53 tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp); 53 tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
54} 54}
55 55
56/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): 56/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -91,7 +91,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
91 !(scb->sacked & TCPCB_SACKED_RETRANS)) 91 !(scb->sacked & TCPCB_SACKED_RETRANS))
92 continue; 92 continue;
93 93
94 if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp, 94 if (!tcp_rack_sent_after(tp->rack.mstamp,
95 tcp_skb_timestamp_us(skb),
95 tp->rack.end_seq, scb->end_seq)) 96 tp->rack.end_seq, scb->end_seq))
96 break; 97 break;
97 98