summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2017-05-16 17:00:14 -0400
committerDavid S. Miller <davem@davemloft.net>2017-05-17 16:06:01 -0400
commit9a568de4818dea9a05af141046bd3e589245ab83 (patch)
tree6f1502edf55ecb7205660d62bd683ebcf912cfea
parentac9517fcf310327fa3e3b0d8366e4b11236b1b4b (diff)
tcp: switch TCP TS option (RFC 7323) to 1ms clock
TCP Timestamps option is defined in RFC 7323 Traditionally on linux, it has been tied to the internal 'jiffies' variable, because it had been a cheap and good enough generator. For TCP flows on the Internet, 1 ms resolution would be much better than 4ms or 10ms (HZ=250 or HZ=100 respectively) For TCP flows in the DC, Google has used usec resolution for more than two years with great success [1] Receive size autotuning (DRS) is indeed more precise and converges faster to optimal window size. This patch converts tp->tcp_mstamp to a plain u64 value storing a 1 usec TCP clock. This choice will allow us to upstream the 1 usec TS option as discussed in IETF 97. [1] https://www.ietf.org/proceedings/97/slides/slides-97-tcpm-tcp-options-for-low-latency-00.pdf Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h62
-rw-r--r--include/linux/tcp.h22
-rw-r--r--include/net/tcp.h59
-rw-r--r--net/ipv4/syncookies.c8
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_bbr.c22
-rw-r--r--net/ipv4/tcp_input.c96
-rw-r--r--net/ipv4/tcp_ipv4.c17
-rw-r--r--net/ipv4/tcp_lp.c12
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c16
-rw-r--r--net/ipv4/tcp_rate.c16
-rw-r--r--net/ipv4/tcp_recovery.c23
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/netfilter/nf_synproxy_core.c2
17 files changed, 178 insertions, 199 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bfc7892f6c33..7c0cb2ce8b01 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -506,66 +506,6 @@ typedef unsigned int sk_buff_data_t;
506typedef unsigned char *sk_buff_data_t; 506typedef unsigned char *sk_buff_data_t;
507#endif 507#endif
508 508
509/**
510 * struct skb_mstamp - multi resolution time stamps
511 * @stamp_us: timestamp in us resolution
512 * @stamp_jiffies: timestamp in jiffies
513 */
514struct skb_mstamp {
515 union {
516 u64 v64;
517 struct {
518 u32 stamp_us;
519 u32 stamp_jiffies;
520 };
521 };
522};
523
524/**
525 * skb_mstamp_get - get current timestamp
526 * @cl: place to store timestamps
527 */
528static inline void skb_mstamp_get(struct skb_mstamp *cl)
529{
530 u64 val = local_clock();
531
532 do_div(val, NSEC_PER_USEC);
533 cl->stamp_us = (u32)val;
534 cl->stamp_jiffies = (u32)jiffies;
535}
536
537/**
538 * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
539 * @t1: pointer to newest sample
540 * @t0: pointer to oldest sample
541 */
542static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
543 const struct skb_mstamp *t0)
544{
545 s32 delta_us = t1->stamp_us - t0->stamp_us;
546 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
547
548 /* If delta_us is negative, this might be because interval is too big,
549 * or local_clock() drift is too big : fallback using jiffies.
550 */
551 if (delta_us <= 0 ||
552 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
553
554 delta_us = jiffies_to_usecs(delta_jiffies);
555
556 return delta_us;
557}
558
559static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
560 const struct skb_mstamp *t0)
561{
562 s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
563
564 if (!diff)
565 diff = t1->stamp_us - t0->stamp_us;
566 return diff > 0;
567}
568
569/** 509/**
570 * struct sk_buff - socket buffer 510 * struct sk_buff - socket buffer
571 * @next: Next buffer in list 511 * @next: Next buffer in list
@@ -646,7 +586,7 @@ struct sk_buff {
646 586
647 union { 587 union {
648 ktime_t tstamp; 588 ktime_t tstamp;
649 struct skb_mstamp skb_mstamp; 589 u64 skb_mstamp;
650 }; 590 };
651 }; 591 };
652 struct rb_node rbnode; /* used in netem & tcp stack */ 592 struct rb_node rbnode; /* used in netem & tcp stack */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 22854f028434..542ca1ae02c4 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -123,7 +123,7 @@ struct tcp_request_sock_ops;
123struct tcp_request_sock { 123struct tcp_request_sock {
124 struct inet_request_sock req; 124 struct inet_request_sock req;
125 const struct tcp_request_sock_ops *af_specific; 125 const struct tcp_request_sock_ops *af_specific;
126 struct skb_mstamp snt_synack; /* first SYNACK sent time */ 126 u64 snt_synack; /* first SYNACK sent time */
127 bool tfo_listener; 127 bool tfo_listener;
128 u32 txhash; 128 u32 txhash;
129 u32 rcv_isn; 129 u32 rcv_isn;
@@ -211,7 +211,7 @@ struct tcp_sock {
211 211
212 /* Information of the most recently (s)acked skb */ 212 /* Information of the most recently (s)acked skb */
213 struct tcp_rack { 213 struct tcp_rack {
214 struct skb_mstamp mstamp; /* (Re)sent time of the skb */ 214 u64 mstamp; /* (Re)sent time of the skb */
215 u32 rtt_us; /* Associated RTT */ 215 u32 rtt_us; /* Associated RTT */
216 u32 end_seq; /* Ending TCP sequence of the skb */ 216 u32 end_seq; /* Ending TCP sequence of the skb */
217 u8 advanced; /* mstamp advanced since last lost marking */ 217 u8 advanced; /* mstamp advanced since last lost marking */
@@ -240,7 +240,7 @@ struct tcp_sock {
240 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ 240 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
241 241
242/* RTT measurement */ 242/* RTT measurement */
243 struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */ 243 u64 tcp_mstamp; /* most recent packet received/sent */
244 u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 244 u32 srtt_us; /* smoothed round trip time << 3 in usecs */
245 u32 mdev_us; /* medium deviation */ 245 u32 mdev_us; /* medium deviation */
246 u32 mdev_max_us; /* maximal mdev for the last rtt period */ 246 u32 mdev_max_us; /* maximal mdev for the last rtt period */
@@ -280,8 +280,8 @@ struct tcp_sock {
280 u32 delivered; /* Total data packets delivered incl. rexmits */ 280 u32 delivered; /* Total data packets delivered incl. rexmits */
281 u32 lost; /* Total data packets lost incl. rexmits */ 281 u32 lost; /* Total data packets lost incl. rexmits */
282 u32 app_limited; /* limited until "delivered" reaches this val */ 282 u32 app_limited; /* limited until "delivered" reaches this val */
283 struct skb_mstamp first_tx_mstamp; /* start of window send phase */ 283 u64 first_tx_mstamp; /* start of window send phase */
284 struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */ 284 u64 delivered_mstamp; /* time we reached "delivered" */
285 u32 rate_delivered; /* saved rate sample: packets delivered */ 285 u32 rate_delivered; /* saved rate sample: packets delivered */
286 u32 rate_interval_us; /* saved rate sample: time elapsed */ 286 u32 rate_interval_us; /* saved rate sample: time elapsed */
287 287
@@ -335,16 +335,16 @@ struct tcp_sock {
335 335
336/* Receiver side RTT estimation */ 336/* Receiver side RTT estimation */
337 struct { 337 struct {
338 u32 rtt_us; 338 u32 rtt_us;
339 u32 seq; 339 u32 seq;
340 struct skb_mstamp time; 340 u64 time;
341 } rcv_rtt_est; 341 } rcv_rtt_est;
342 342
343/* Receiver queue space */ 343/* Receiver queue space */
344 struct { 344 struct {
345 int space; 345 int space;
346 u32 seq; 346 u32 seq;
347 struct skb_mstamp time; 347 u64 time;
348 } rcvq_space; 348 } rcvq_space;
349 349
350/* TCP-specific MTU probe information. */ 350/* TCP-specific MTU probe information. */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5b2932b8363f..82462db97183 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -519,7 +519,7 @@ static inline u32 tcp_cookie_time(void)
519u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, 519u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
520 u16 *mssp); 520 u16 *mssp);
521__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); 521__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
522__u32 cookie_init_timestamp(struct request_sock *req); 522u64 cookie_init_timestamp(struct request_sock *req);
523bool cookie_timestamp_decode(struct tcp_options_received *opt); 523bool cookie_timestamp_decode(struct tcp_options_received *opt);
524bool cookie_ecn_ok(const struct tcp_options_received *opt, 524bool cookie_ecn_ok(const struct tcp_options_received *opt,
525 const struct net *net, const struct dst_entry *dst); 525 const struct net *net, const struct dst_entry *dst);
@@ -706,14 +706,55 @@ void tcp_send_window_probe(struct sock *sk);
706 */ 706 */
707#define tcp_jiffies32 ((u32)jiffies) 707#define tcp_jiffies32 ((u32)jiffies)
708 708
709/* Generator for TCP TS option (RFC 7323) 709/*
710 * Currently tied to 'jiffies' but will soon be driven by 1 ms clock. 710 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
711 * It is no longer tied to jiffies, but to 1 ms clock.
712 * Note: double check if you want to use tcp_jiffies32 instead of this.
713 */
714#define TCP_TS_HZ 1000
715
716static inline u64 tcp_clock_ns(void)
717{
718 return local_clock();
719}
720
721static inline u64 tcp_clock_us(void)
722{
723 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
724}
725
726/* This should only be used in contexts where tp->tcp_mstamp is up to date */
727static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
728{
729 return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
730}
731
732/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
733static inline u32 tcp_time_stamp_raw(void)
734{
735 return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
736}
737
738
739/* Refresh 1us clock of a TCP socket,
740 * ensuring monotically increasing values.
711 */ 741 */
712#define tcp_time_stamp ((__u32)(jiffies)) 742static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
743{
744 u64 val = tcp_clock_us();
745
746 if (val > tp->tcp_mstamp)
747 tp->tcp_mstamp = val;
748}
749
750static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
751{
752 return max_t(s64, t1 - t0, 0);
753}
713 754
714static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) 755static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
715{ 756{
716 return skb->skb_mstamp.stamp_jiffies; 757 return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
717} 758}
718 759
719 760
@@ -778,9 +819,9 @@ struct tcp_skb_cb {
778 /* pkts S/ACKed so far upon tx of skb, incl retrans: */ 819 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
779 __u32 delivered; 820 __u32 delivered;
780 /* start of send pipeline phase */ 821 /* start of send pipeline phase */
781 struct skb_mstamp first_tx_mstamp; 822 u64 first_tx_mstamp;
782 /* when we reached the "delivered" count */ 823 /* when we reached the "delivered" count */
783 struct skb_mstamp delivered_mstamp; 824 u64 delivered_mstamp;
784 } tx; /* only used for outgoing skbs */ 825 } tx; /* only used for outgoing skbs */
785 union { 826 union {
786 struct inet_skb_parm h4; 827 struct inet_skb_parm h4;
@@ -896,7 +937,7 @@ struct ack_sample {
896 * A sample is invalid if "delivered" or "interval_us" is negative. 937 * A sample is invalid if "delivered" or "interval_us" is negative.
897 */ 938 */
898struct rate_sample { 939struct rate_sample {
899 struct skb_mstamp prior_mstamp; /* starting timestamp for interval */ 940 u64 prior_mstamp; /* starting timestamp for interval */
900 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ 941 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
901 s32 delivered; /* number of packets delivered over interval */ 942 s32 delivered; /* number of packets delivered over interval */
902 long interval_us; /* time for tp->delivered to incr "delivered" */ 943 long interval_us; /* time for tp->delivered to incr "delivered" */
@@ -1862,7 +1903,7 @@ void tcp_init(void);
1862/* tcp_recovery.c */ 1903/* tcp_recovery.c */
1863extern void tcp_rack_mark_lost(struct sock *sk); 1904extern void tcp_rack_mark_lost(struct sock *sk);
1864extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, 1905extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
1865 const struct skb_mstamp *xmit_time); 1906 u64 xmit_time);
1866extern void tcp_rack_reo_timeout(struct sock *sk); 1907extern void tcp_rack_reo_timeout(struct sock *sk);
1867 1908
1868/* 1909/*
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 0257d965f111..6426250a58ea 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -66,10 +66,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
66 * Since subsequent timestamps use the normal tcp_time_stamp value, we 66 * Since subsequent timestamps use the normal tcp_time_stamp value, we
67 * must make sure that the resulting initial timestamp is <= tcp_time_stamp. 67 * must make sure that the resulting initial timestamp is <= tcp_time_stamp.
68 */ 68 */
69__u32 cookie_init_timestamp(struct request_sock *req) 69u64 cookie_init_timestamp(struct request_sock *req)
70{ 70{
71 struct inet_request_sock *ireq; 71 struct inet_request_sock *ireq;
72 u32 ts, ts_now = tcp_time_stamp; 72 u32 ts, ts_now = tcp_time_stamp_raw();
73 u32 options = 0; 73 u32 options = 0;
74 74
75 ireq = inet_rsk(req); 75 ireq = inet_rsk(req);
@@ -88,7 +88,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
88 ts <<= TSBITS; 88 ts <<= TSBITS;
89 ts |= options; 89 ts |= options;
90 } 90 }
91 return ts; 91 return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ);
92} 92}
93 93
94 94
@@ -343,7 +343,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
343 ireq->wscale_ok = tcp_opt.wscale_ok; 343 ireq->wscale_ok = tcp_opt.wscale_ok;
344 ireq->tstamp_ok = tcp_opt.saw_tstamp; 344 ireq->tstamp_ok = tcp_opt.saw_tstamp;
345 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; 345 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
346 treq->snt_synack.v64 = 0; 346 treq->snt_synack = 0;
347 treq->tfo_listener = false; 347 treq->tfo_listener = false;
348 348
349 ireq->ir_iif = inet_request_bound_dev_if(sk, skb); 349 ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 850054800526..b5d18484746d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2706,7 +2706,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2706 if (!tp->repair) 2706 if (!tp->repair)
2707 err = -EPERM; 2707 err = -EPERM;
2708 else 2708 else
2709 tp->tsoffset = val - tcp_time_stamp; 2709 tp->tsoffset = val - tcp_time_stamp_raw();
2710 break; 2710 break;
2711 case TCP_REPAIR_WINDOW: 2711 case TCP_REPAIR_WINDOW:
2712 err = tcp_repair_set_window(tp, optval, optlen); 2712 err = tcp_repair_set_window(tp, optval, optlen);
@@ -3072,7 +3072,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
3072 break; 3072 break;
3073 3073
3074 case TCP_TIMESTAMP: 3074 case TCP_TIMESTAMP:
3075 val = tcp_time_stamp + tp->tsoffset; 3075 val = tcp_time_stamp_raw() + tp->tsoffset;
3076 break; 3076 break;
3077 case TCP_NOTSENT_LOWAT: 3077 case TCP_NOTSENT_LOWAT:
3078 val = tp->notsent_lowat; 3078 val = tp->notsent_lowat;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 40dc4fc5f6ac..dbcc9352a48f 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -91,7 +91,7 @@ struct bbr {
91 struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */ 91 struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
92 u32 rtt_cnt; /* count of packet-timed rounds elapsed */ 92 u32 rtt_cnt; /* count of packet-timed rounds elapsed */
93 u32 next_rtt_delivered; /* scb->tx.delivered at end of round */ 93 u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
94 struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */ 94 u64 cycle_mstamp; /* time of this cycle phase start */
95 u32 mode:3, /* current bbr_mode in state machine */ 95 u32 mode:3, /* current bbr_mode in state machine */
96 prev_ca_state:3, /* CA state on previous ACK */ 96 prev_ca_state:3, /* CA state on previous ACK */
97 packet_conservation:1, /* use packet conservation? */ 97 packet_conservation:1, /* use packet conservation? */
@@ -411,7 +411,7 @@ static bool bbr_is_next_cycle_phase(struct sock *sk,
411 struct tcp_sock *tp = tcp_sk(sk); 411 struct tcp_sock *tp = tcp_sk(sk);
412 struct bbr *bbr = inet_csk_ca(sk); 412 struct bbr *bbr = inet_csk_ca(sk);
413 bool is_full_length = 413 bool is_full_length =
414 skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) > 414 tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
415 bbr->min_rtt_us; 415 bbr->min_rtt_us;
416 u32 inflight, bw; 416 u32 inflight, bw;
417 417
@@ -497,7 +497,7 @@ static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
497 struct tcp_sock *tp = tcp_sk(sk); 497 struct tcp_sock *tp = tcp_sk(sk);
498 struct bbr *bbr = inet_csk_ca(sk); 498 struct bbr *bbr = inet_csk_ca(sk);
499 499
500 bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies; 500 bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
501 bbr->lt_last_delivered = tp->delivered; 501 bbr->lt_last_delivered = tp->delivered;
502 bbr->lt_last_lost = tp->lost; 502 bbr->lt_last_lost = tp->lost;
503 bbr->lt_rtt_cnt = 0; 503 bbr->lt_rtt_cnt = 0;
@@ -551,7 +551,7 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
551 struct bbr *bbr = inet_csk_ca(sk); 551 struct bbr *bbr = inet_csk_ca(sk);
552 u32 lost, delivered; 552 u32 lost, delivered;
553 u64 bw; 553 u64 bw;
554 s32 t; 554 u32 t;
555 555
556 if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */ 556 if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
557 if (bbr->mode == BBR_PROBE_BW && bbr->round_start && 557 if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
@@ -603,15 +603,15 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
603 return; 603 return;
604 604
605 /* Find average delivery rate in this sampling interval. */ 605 /* Find average delivery rate in this sampling interval. */
606 t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp); 606 t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
607 if (t < 1) 607 if ((s32)t < 1)
608 return; /* interval is less than one jiffy, so wait */ 608 return; /* interval is less than one ms, so wait */
609 t = jiffies_to_usecs(t); 609 /* Check if can multiply without overflow */
610 /* Interval long enough for jiffies_to_usecs() to return a bogus 0? */ 610 if (t >= ~0U / USEC_PER_MSEC) {
611 if (t < 1) {
612 bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */ 611 bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
613 return; 612 return;
614 } 613 }
614 t *= USEC_PER_MSEC;
615 bw = (u64)delivered * BW_UNIT; 615 bw = (u64)delivered * BW_UNIT;
616 do_div(bw, t); 616 do_div(bw, t);
617 bbr_lt_bw_interval_done(sk, bw); 617 bbr_lt_bw_interval_done(sk, bw);
@@ -825,7 +825,7 @@ static void bbr_init(struct sock *sk)
825 bbr->idle_restart = 0; 825 bbr->idle_restart = 0;
826 bbr->full_bw = 0; 826 bbr->full_bw = 0;
827 bbr->full_bw_cnt = 0; 827 bbr->full_bw_cnt = 0;
828 bbr->cycle_mstamp.v64 = 0; 828 bbr->cycle_mstamp = 0;
829 bbr->cycle_idx = 0; 829 bbr->cycle_idx = 0;
830 bbr_reset_lt_bw_sampling(sk); 830 bbr_reset_lt_bw_sampling(sk);
831 bbr_reset_startup_mode(sk); 831 bbr_reset_startup_mode(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 10e6775464f6..9a5a9e8eda89 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -441,7 +441,7 @@ void tcp_init_buffer_space(struct sock *sk)
441 tcp_sndbuf_expand(sk); 441 tcp_sndbuf_expand(sk);
442 442
443 tp->rcvq_space.space = tp->rcv_wnd; 443 tp->rcvq_space.space = tp->rcv_wnd;
444 skb_mstamp_get(&tp->tcp_mstamp); 444 tcp_mstamp_refresh(tp);
445 tp->rcvq_space.time = tp->tcp_mstamp; 445 tp->rcvq_space.time = tp->tcp_mstamp;
446 tp->rcvq_space.seq = tp->copied_seq; 446 tp->rcvq_space.seq = tp->copied_seq;
447 447
@@ -555,11 +555,11 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
555{ 555{
556 u32 delta_us; 556 u32 delta_us;
557 557
558 if (tp->rcv_rtt_est.time.v64 == 0) 558 if (tp->rcv_rtt_est.time == 0)
559 goto new_measure; 559 goto new_measure;
560 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 560 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
561 return; 561 return;
562 delta_us = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcv_rtt_est.time); 562 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
563 tcp_rcv_rtt_update(tp, delta_us, 1); 563 tcp_rcv_rtt_update(tp, delta_us, 1);
564 564
565new_measure: 565new_measure:
@@ -571,13 +571,15 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
571 const struct sk_buff *skb) 571 const struct sk_buff *skb)
572{ 572{
573 struct tcp_sock *tp = tcp_sk(sk); 573 struct tcp_sock *tp = tcp_sk(sk);
574
574 if (tp->rx_opt.rcv_tsecr && 575 if (tp->rx_opt.rcv_tsecr &&
575 (TCP_SKB_CB(skb)->end_seq - 576 (TCP_SKB_CB(skb)->end_seq -
576 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) 577 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
577 tcp_rcv_rtt_update(tp, 578 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
578 jiffies_to_usecs(tcp_time_stamp - 579 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
579 tp->rx_opt.rcv_tsecr), 580
580 0); 581 tcp_rcv_rtt_update(tp, delta_us, 0);
582 }
581} 583}
582 584
583/* 585/*
@@ -590,7 +592,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
590 int time; 592 int time;
591 int copied; 593 int copied;
592 594
593 time = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcvq_space.time); 595 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
594 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) 596 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
595 return; 597 return;
596 598
@@ -1134,8 +1136,8 @@ struct tcp_sacktag_state {
1134 * that was SACKed. RTO needs the earliest RTT to stay conservative, 1136 * that was SACKed. RTO needs the earliest RTT to stay conservative,
1135 * but congestion control should still get an accurate delay signal. 1137 * but congestion control should still get an accurate delay signal.
1136 */ 1138 */
1137 struct skb_mstamp first_sackt; 1139 u64 first_sackt;
1138 struct skb_mstamp last_sackt; 1140 u64 last_sackt;
1139 struct rate_sample *rate; 1141 struct rate_sample *rate;
1140 int flag; 1142 int flag;
1141}; 1143};
@@ -1200,7 +1202,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1200 struct tcp_sacktag_state *state, u8 sacked, 1202 struct tcp_sacktag_state *state, u8 sacked,
1201 u32 start_seq, u32 end_seq, 1203 u32 start_seq, u32 end_seq,
1202 int dup_sack, int pcount, 1204 int dup_sack, int pcount,
1203 const struct skb_mstamp *xmit_time) 1205 u64 xmit_time)
1204{ 1206{
1205 struct tcp_sock *tp = tcp_sk(sk); 1207 struct tcp_sock *tp = tcp_sk(sk);
1206 int fack_count = state->fack_count; 1208 int fack_count = state->fack_count;
@@ -1242,9 +1244,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
1242 state->reord); 1244 state->reord);
1243 if (!after(end_seq, tp->high_seq)) 1245 if (!after(end_seq, tp->high_seq))
1244 state->flag |= FLAG_ORIG_SACK_ACKED; 1246 state->flag |= FLAG_ORIG_SACK_ACKED;
1245 if (state->first_sackt.v64 == 0) 1247 if (state->first_sackt == 0)
1246 state->first_sackt = *xmit_time; 1248 state->first_sackt = xmit_time;
1247 state->last_sackt = *xmit_time; 1249 state->last_sackt = xmit_time;
1248 } 1250 }
1249 1251
1250 if (sacked & TCPCB_LOST) { 1252 if (sacked & TCPCB_LOST) {
@@ -1304,7 +1306,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1304 */ 1306 */
1305 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1307 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1306 start_seq, end_seq, dup_sack, pcount, 1308 start_seq, end_seq, dup_sack, pcount,
1307 &skb->skb_mstamp); 1309 skb->skb_mstamp);
1308 tcp_rate_skb_delivered(sk, skb, state->rate); 1310 tcp_rate_skb_delivered(sk, skb, state->rate);
1309 1311
1310 if (skb == tp->lost_skb_hint) 1312 if (skb == tp->lost_skb_hint)
@@ -1356,8 +1358,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1356 tcp_advance_highest_sack(sk, skb); 1358 tcp_advance_highest_sack(sk, skb);
1357 1359
1358 tcp_skb_collapse_tstamp(prev, skb); 1360 tcp_skb_collapse_tstamp(prev, skb);
1359 if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64)) 1361 if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
1360 TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0; 1362 TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
1361 1363
1362 tcp_unlink_write_queue(skb, sk); 1364 tcp_unlink_write_queue(skb, sk);
1363 sk_wmem_free_skb(sk, skb); 1365 sk_wmem_free_skb(sk, skb);
@@ -1587,7 +1589,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1587 TCP_SKB_CB(skb)->end_seq, 1589 TCP_SKB_CB(skb)->end_seq,
1588 dup_sack, 1590 dup_sack,
1589 tcp_skb_pcount(skb), 1591 tcp_skb_pcount(skb),
1590 &skb->skb_mstamp); 1592 skb->skb_mstamp);
1591 tcp_rate_skb_delivered(sk, skb, state->rate); 1593 tcp_rate_skb_delivered(sk, skb, state->rate);
1592 1594
1593 if (!before(TCP_SKB_CB(skb)->seq, 1595 if (!before(TCP_SKB_CB(skb)->seq,
@@ -2936,9 +2938,12 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2936 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2938 * See draft-ietf-tcplw-high-performance-00, section 3.3.
2937 */ 2939 */
2938 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2940 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2939 flag & FLAG_ACKED) 2941 flag & FLAG_ACKED) {
2940 seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp - 2942 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
2941 tp->rx_opt.rcv_tsecr); 2943 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
2944
2945 seq_rtt_us = ca_rtt_us = delta_us;
2946 }
2942 if (seq_rtt_us < 0) 2947 if (seq_rtt_us < 0)
2943 return false; 2948 return false;
2944 2949
@@ -2960,12 +2965,8 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
2960{ 2965{
2961 long rtt_us = -1L; 2966 long rtt_us = -1L;
2962 2967
2963 if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) { 2968 if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
2964 struct skb_mstamp now; 2969 rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
2965
2966 skb_mstamp_get(&now);
2967 rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
2968 }
2969 2970
2970 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us); 2971 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us);
2971} 2972}
@@ -3003,7 +3004,7 @@ void tcp_rearm_rto(struct sock *sk)
3003 struct sk_buff *skb = tcp_write_queue_head(sk); 3004 struct sk_buff *skb = tcp_write_queue_head(sk);
3004 const u32 rto_time_stamp = 3005 const u32 rto_time_stamp =
3005 tcp_skb_timestamp(skb) + rto; 3006 tcp_skb_timestamp(skb) + rto;
3006 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); 3007 s32 delta = (s32)(rto_time_stamp - tcp_jiffies32);
3007 /* delta may not be positive if the socket is locked 3008 /* delta may not be positive if the socket is locked
3008 * when the retrans timer fires and is rescheduled. 3009 * when the retrans timer fires and is rescheduled.
3009 */ 3010 */
@@ -3060,9 +3061,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3060 struct tcp_sacktag_state *sack) 3061 struct tcp_sacktag_state *sack)
3061{ 3062{
3062 const struct inet_connection_sock *icsk = inet_csk(sk); 3063 const struct inet_connection_sock *icsk = inet_csk(sk);
3063 struct skb_mstamp first_ackt, last_ackt; 3064 u64 first_ackt, last_ackt;
3064 struct tcp_sock *tp = tcp_sk(sk); 3065 struct tcp_sock *tp = tcp_sk(sk);
3065 struct skb_mstamp *now = &tp->tcp_mstamp;
3066 u32 prior_sacked = tp->sacked_out; 3066 u32 prior_sacked = tp->sacked_out;
3067 u32 reord = tp->packets_out; 3067 u32 reord = tp->packets_out;
3068 bool fully_acked = true; 3068 bool fully_acked = true;
@@ -3075,7 +3075,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3075 bool rtt_update; 3075 bool rtt_update;
3076 int flag = 0; 3076 int flag = 0;
3077 3077
3078 first_ackt.v64 = 0; 3078 first_ackt = 0;
3079 3079
3080 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3080 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
3081 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3081 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
@@ -3106,8 +3106,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3106 flag |= FLAG_RETRANS_DATA_ACKED; 3106 flag |= FLAG_RETRANS_DATA_ACKED;
3107 } else if (!(sacked & TCPCB_SACKED_ACKED)) { 3107 } else if (!(sacked & TCPCB_SACKED_ACKED)) {
3108 last_ackt = skb->skb_mstamp; 3108 last_ackt = skb->skb_mstamp;
3109 WARN_ON_ONCE(last_ackt.v64 == 0); 3109 WARN_ON_ONCE(last_ackt == 0);
3110 if (!first_ackt.v64) 3110 if (!first_ackt)
3111 first_ackt = last_ackt; 3111 first_ackt = last_ackt;
3112 3112
3113 last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; 3113 last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
@@ -3122,7 +3122,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3122 tp->delivered += acked_pcount; 3122 tp->delivered += acked_pcount;
3123 if (!tcp_skb_spurious_retrans(tp, skb)) 3123 if (!tcp_skb_spurious_retrans(tp, skb))
3124 tcp_rack_advance(tp, sacked, scb->end_seq, 3124 tcp_rack_advance(tp, sacked, scb->end_seq,
3125 &skb->skb_mstamp); 3125 skb->skb_mstamp);
3126 } 3126 }
3127 if (sacked & TCPCB_LOST) 3127 if (sacked & TCPCB_LOST)
3128 tp->lost_out -= acked_pcount; 3128 tp->lost_out -= acked_pcount;
@@ -3165,13 +3165,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3165 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3165 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
3166 flag |= FLAG_SACK_RENEGING; 3166 flag |= FLAG_SACK_RENEGING;
3167 3167
3168 if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) { 3168 if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
3169 seq_rtt_us = skb_mstamp_us_delta(now, &first_ackt); 3169 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
3170 ca_rtt_us = skb_mstamp_us_delta(now, &last_ackt); 3170 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
3171 } 3171 }
3172 if (sack->first_sackt.v64) { 3172 if (sack->first_sackt) {
3173 sack_rtt_us = skb_mstamp_us_delta(now, &sack->first_sackt); 3173 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
3174 ca_rtt_us = skb_mstamp_us_delta(now, &sack->last_sackt); 3174 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
3175 } 3175 }
3176 sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */ 3176 sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */
3177 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, 3177 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
@@ -3201,7 +3201,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3201 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 3201 tp->fackets_out -= min(pkts_acked, tp->fackets_out);
3202 3202
3203 } else if (skb && rtt_update && sack_rtt_us >= 0 && 3203 } else if (skb && rtt_update && sack_rtt_us >= 0 &&
3204 sack_rtt_us > skb_mstamp_us_delta(now, &skb->skb_mstamp)) { 3204 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
3205 /* Do not re-arm RTO if the sack RTT is measured from data sent 3205 /* Do not re-arm RTO if the sack RTT is measured from data sent
3206 * after when the head was last (re)transmitted. Otherwise the 3206 * after when the head was last (re)transmitted. Otherwise the
3207 * timeout may continue to extend in loss recovery. 3207 * timeout may continue to extend in loss recovery.
@@ -3553,7 +3553,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3553 int acked = 0; /* Number of packets newly acked */ 3553 int acked = 0; /* Number of packets newly acked */
3554 int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ 3554 int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
3555 3555
3556 sack_state.first_sackt.v64 = 0; 3556 sack_state.first_sackt = 0;
3557 sack_state.rate = &rs; 3557 sack_state.rate = &rs;
3558 3558
3559 /* We very likely will need to access write queue head. */ 3559 /* We very likely will need to access write queue head. */
@@ -5356,7 +5356,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5356{ 5356{
5357 struct tcp_sock *tp = tcp_sk(sk); 5357 struct tcp_sock *tp = tcp_sk(sk);
5358 5358
5359 skb_mstamp_get(&tp->tcp_mstamp); 5359 tcp_mstamp_refresh(tp);
5360 if (unlikely(!sk->sk_rx_dst)) 5360 if (unlikely(!sk->sk_rx_dst))
5361 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); 5361 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
5362 /* 5362 /*
@@ -5672,7 +5672,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5672 5672
5673 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5673 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
5674 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 5674 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
5675 tcp_time_stamp)) { 5675 tcp_time_stamp(tp))) {
5676 NET_INC_STATS(sock_net(sk), 5676 NET_INC_STATS(sock_net(sk),
5677 LINUX_MIB_PAWSACTIVEREJECTED); 5677 LINUX_MIB_PAWSACTIVEREJECTED);
5678 goto reset_and_undo; 5678 goto reset_and_undo;
@@ -5917,7 +5917,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
5917 5917
5918 case TCP_SYN_SENT: 5918 case TCP_SYN_SENT:
5919 tp->rx_opt.saw_tstamp = 0; 5919 tp->rx_opt.saw_tstamp = 0;
5920 skb_mstamp_get(&tp->tcp_mstamp); 5920 tcp_mstamp_refresh(tp);
5921 queued = tcp_rcv_synsent_state_process(sk, skb, th); 5921 queued = tcp_rcv_synsent_state_process(sk, skb, th);
5922 if (queued >= 0) 5922 if (queued >= 0)
5923 return queued; 5923 return queued;
@@ -5929,7 +5929,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
5929 return 0; 5929 return 0;
5930 } 5930 }
5931 5931
5932 skb_mstamp_get(&tp->tcp_mstamp); 5932 tcp_mstamp_refresh(tp);
5933 tp->rx_opt.saw_tstamp = 0; 5933 tp->rx_opt.saw_tstamp = 0;
5934 req = tp->fastopen_rsk; 5934 req = tp->fastopen_rsk;
5935 if (req) { 5935 if (req) {
@@ -6202,7 +6202,7 @@ static void tcp_openreq_init(struct request_sock *req,
6202 req->cookie_ts = 0; 6202 req->cookie_ts = 0;
6203 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; 6203 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
6204 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 6204 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
6205 skb_mstamp_get(&tcp_rsk(req)->snt_synack); 6205 tcp_rsk(req)->snt_synack = tcp_clock_us();
6206 tcp_rsk(req)->last_oow_ack_time = 0; 6206 tcp_rsk(req)->last_oow_ack_time = 0;
6207 req->mss = rx_opt->mss_clamp; 6207 req->mss = rx_opt->mss_clamp;
6208 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 6208 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d8fe25db79f2..191b2f78b19d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -376,8 +376,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
376 struct sock *sk; 376 struct sock *sk;
377 struct sk_buff *skb; 377 struct sk_buff *skb;
378 struct request_sock *fastopen; 378 struct request_sock *fastopen;
379 __u32 seq, snd_una; 379 u32 seq, snd_una;
380 __u32 remaining; 380 s32 remaining;
381 u32 delta_us;
381 int err; 382 int err;
382 struct net *net = dev_net(icmp_skb->dev); 383 struct net *net = dev_net(icmp_skb->dev);
383 384
@@ -483,12 +484,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
483 skb = tcp_write_queue_head(sk); 484 skb = tcp_write_queue_head(sk);
484 BUG_ON(!skb); 485 BUG_ON(!skb);
485 486
486 skb_mstamp_get(&tp->tcp_mstamp); 487 tcp_mstamp_refresh(tp);
488 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
487 remaining = icsk->icsk_rto - 489 remaining = icsk->icsk_rto -
488 min(icsk->icsk_rto, 490 usecs_to_jiffies(delta_us);
489 tcp_time_stamp - tcp_skb_timestamp(skb));
490 491
491 if (remaining) { 492 if (remaining > 0) {
492 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 493 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
493 remaining, TCP_RTO_MAX); 494 remaining, TCP_RTO_MAX);
494 } else { 495 } else {
@@ -812,7 +813,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
812 tcp_v4_send_ack(sk, skb, 813 tcp_v4_send_ack(sk, skb,
813 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 814 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
814 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 815 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
815 tcp_time_stamp + tcptw->tw_ts_offset, 816 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
816 tcptw->tw_ts_recent, 817 tcptw->tw_ts_recent,
817 tw->tw_bound_dev_if, 818 tw->tw_bound_dev_if,
818 tcp_twsk_md5_key(tcptw), 819 tcp_twsk_md5_key(tcptw),
@@ -840,7 +841,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
840 tcp_v4_send_ack(sk, skb, seq, 841 tcp_v4_send_ack(sk, skb, seq,
841 tcp_rsk(req)->rcv_nxt, 842 tcp_rsk(req)->rcv_nxt,
842 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, 843 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
843 tcp_time_stamp + tcp_rsk(req)->ts_off, 844 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
844 req->ts_recent, 845 req->ts_recent,
845 0, 846 0,
846 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 847 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index ef3122abb373..ae10ed64fe13 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -37,7 +37,7 @@
37#include <net/tcp.h> 37#include <net/tcp.h>
38 38
39/* resolution of owd */ 39/* resolution of owd */
40#define LP_RESOL 1000 40#define LP_RESOL TCP_TS_HZ
41 41
42/** 42/**
43 * enum tcp_lp_state 43 * enum tcp_lp_state
@@ -147,9 +147,9 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
147 tp->rx_opt.rcv_tsecr == lp->local_ref_time) 147 tp->rx_opt.rcv_tsecr == lp->local_ref_time)
148 goto out; 148 goto out;
149 149
150 m = HZ * (tp->rx_opt.rcv_tsval - 150 m = TCP_TS_HZ *
151 lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr - 151 (tp->rx_opt.rcv_tsval - lp->remote_ref_time) /
152 lp->local_ref_time); 152 (tp->rx_opt.rcv_tsecr - lp->local_ref_time);
153 if (m < 0) 153 if (m < 0)
154 m = -m; 154 m = -m;
155 155
@@ -194,7 +194,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk)
194 if (lp->flag & LP_VALID_RHZ) { 194 if (lp->flag & LP_VALID_RHZ) {
195 owd = 195 owd =
196 tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) - 196 tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) -
197 tp->rx_opt.rcv_tsecr * (LP_RESOL / HZ); 197 tp->rx_opt.rcv_tsecr * (LP_RESOL / TCP_TS_HZ);
198 if (owd < 0) 198 if (owd < 0)
199 owd = -owd; 199 owd = -owd;
200 } 200 }
@@ -264,7 +264,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
264{ 264{
265 struct tcp_sock *tp = tcp_sk(sk); 265 struct tcp_sock *tp = tcp_sk(sk);
266 struct lp *lp = inet_csk_ca(sk); 266 struct lp *lp = inet_csk_ca(sk);
267 u32 now = tcp_time_stamp; 267 u32 now = tcp_time_stamp(tp);
268 u32 delta; 268 u32 delta;
269 269
270 if (sample->rtt_us > 0) 270 if (sample->rtt_us > 0)
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6504f1082bdf..d0642df73044 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
455 newtp->fackets_out = 0; 455 newtp->fackets_out = 0;
456 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 456 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
457 newtp->tlp_high_seq = 0; 457 newtp->tlp_high_seq = 0;
458 newtp->lsndtime = treq->snt_synack.stamp_jiffies; 458 newtp->lsndtime = tcp_jiffies32;
459 newsk->sk_txhash = treq->txhash; 459 newsk->sk_txhash = treq->txhash;
460 newtp->last_oow_ack_time = 0; 460 newtp->last_oow_ack_time = 0;
461 newtp->total_retrans = req->num_retrans; 461 newtp->total_retrans = req->num_retrans;
@@ -526,7 +526,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
526 newtp->fastopen_req = NULL; 526 newtp->fastopen_req = NULL;
527 newtp->fastopen_rsk = NULL; 527 newtp->fastopen_rsk = NULL;
528 newtp->syn_data_acked = 0; 528 newtp->syn_data_acked = 0;
529 newtp->rack.mstamp.v64 = 0; 529 newtp->rack.mstamp = 0;
530 newtp->rack.advanced = 0; 530 newtp->rack.advanced = 0;
531 531
532 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); 532 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 65472e931a0b..478f75baee31 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1962,7 +1962,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1962 1962
1963 head = tcp_write_queue_head(sk); 1963 head = tcp_write_queue_head(sk);
1964 1964
1965 age = skb_mstamp_us_delta(&tp->tcp_mstamp, &head->skb_mstamp); 1965 age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
1966 /* If next ACK is likely to come too late (half srtt), do not defer */ 1966 /* If next ACK is likely to come too late (half srtt), do not defer */
1967 if (age < (tp->srtt_us >> 4)) 1967 if (age < (tp->srtt_us >> 4))
1968 goto send_now; 1968 goto send_now;
@@ -2279,7 +2279,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2279 } 2279 }
2280 2280
2281 max_segs = tcp_tso_segs(sk, mss_now); 2281 max_segs = tcp_tso_segs(sk, mss_now);
2282 skb_mstamp_get(&tp->tcp_mstamp); 2282 tcp_mstamp_refresh(tp);
2283 while ((skb = tcp_send_head(sk))) { 2283 while ((skb = tcp_send_head(sk))) {
2284 unsigned int limit; 2284 unsigned int limit;
2285 2285
@@ -3095,7 +3095,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
3095 skb_reserve(skb, MAX_TCP_HEADER); 3095 skb_reserve(skb, MAX_TCP_HEADER);
3096 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 3096 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3097 TCPHDR_ACK | TCPHDR_RST); 3097 TCPHDR_ACK | TCPHDR_RST);
3098 skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); 3098 tcp_mstamp_refresh(tcp_sk(sk));
3099 /* Send it off. */ 3099 /* Send it off. */
3100 if (tcp_transmit_skb(sk, skb, 0, priority)) 3100 if (tcp_transmit_skb(sk, skb, 0, priority))
3101 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 3101 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
@@ -3191,10 +3191,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3191 memset(&opts, 0, sizeof(opts)); 3191 memset(&opts, 0, sizeof(opts));
3192#ifdef CONFIG_SYN_COOKIES 3192#ifdef CONFIG_SYN_COOKIES
3193 if (unlikely(req->cookie_ts)) 3193 if (unlikely(req->cookie_ts))
3194 skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); 3194 skb->skb_mstamp = cookie_init_timestamp(req);
3195 else 3195 else
3196#endif 3196#endif
3197 skb_mstamp_get(&skb->skb_mstamp); 3197 skb->skb_mstamp = tcp_clock_us();
3198 3198
3199#ifdef CONFIG_TCP_MD5SIG 3199#ifdef CONFIG_TCP_MD5SIG
3200 rcu_read_lock(); 3200 rcu_read_lock();
@@ -3453,8 +3453,8 @@ int tcp_connect(struct sock *sk)
3453 return -ENOBUFS; 3453 return -ENOBUFS;
3454 3454
3455 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 3455 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3456 skb_mstamp_get(&tp->tcp_mstamp); 3456 tcp_mstamp_refresh(tp);
3457 tp->retrans_stamp = tp->tcp_mstamp.stamp_jiffies; 3457 tp->retrans_stamp = tcp_time_stamp(tp);
3458 tcp_connect_queue_skb(sk, buff); 3458 tcp_connect_queue_skb(sk, buff);
3459 tcp_ecn_send_syn(sk, buff); 3459 tcp_ecn_send_syn(sk, buff);
3460 3460
@@ -3615,7 +3615,7 @@ void tcp_send_window_probe(struct sock *sk)
3615{ 3615{
3616 if (sk->sk_state == TCP_ESTABLISHED) { 3616 if (sk->sk_state == TCP_ESTABLISHED) {
3617 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3617 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3618 skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); 3618 tcp_mstamp_refresh(tcp_sk(sk));
3619 tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); 3619 tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3620 } 3620 }
3621} 3621}
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index c6a9fa894646..ad99569d4c1e 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -78,7 +78,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
78 struct tcp_sock *tp = tcp_sk(sk); 78 struct tcp_sock *tp = tcp_sk(sk);
79 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 79 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
80 80
81 if (!scb->tx.delivered_mstamp.v64) 81 if (!scb->tx.delivered_mstamp)
82 return; 82 return;
83 83
84 if (!rs->prior_delivered || 84 if (!rs->prior_delivered ||
@@ -89,9 +89,9 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
89 rs->is_retrans = scb->sacked & TCPCB_RETRANS; 89 rs->is_retrans = scb->sacked & TCPCB_RETRANS;
90 90
91 /* Find the duration of the "send phase" of this window: */ 91 /* Find the duration of the "send phase" of this window: */
92 rs->interval_us = skb_mstamp_us_delta( 92 rs->interval_us = tcp_stamp_us_delta(
93 &skb->skb_mstamp, 93 skb->skb_mstamp,
94 &scb->tx.first_tx_mstamp); 94 scb->tx.first_tx_mstamp);
95 95
96 /* Record send time of most recently ACKed packet: */ 96 /* Record send time of most recently ACKed packet: */
97 tp->first_tx_mstamp = skb->skb_mstamp; 97 tp->first_tx_mstamp = skb->skb_mstamp;
@@ -101,7 +101,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
101 * we don't need to reset since it'll be freed soon. 101 * we don't need to reset since it'll be freed soon.
102 */ 102 */
103 if (scb->sacked & TCPCB_SACKED_ACKED) 103 if (scb->sacked & TCPCB_SACKED_ACKED)
104 scb->tx.delivered_mstamp.v64 = 0; 104 scb->tx.delivered_mstamp = 0;
105} 105}
106 106
107/* Update the connection delivery information and generate a rate sample. */ 107/* Update the connection delivery information and generate a rate sample. */
@@ -125,7 +125,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
125 rs->acked_sacked = delivered; /* freshly ACKed or SACKed */ 125 rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
126 rs->losses = lost; /* freshly marked lost */ 126 rs->losses = lost; /* freshly marked lost */
127 /* Return an invalid sample if no timing information is available. */ 127 /* Return an invalid sample if no timing information is available. */
128 if (!rs->prior_mstamp.v64) { 128 if (!rs->prior_mstamp) {
129 rs->delivered = -1; 129 rs->delivered = -1;
130 rs->interval_us = -1; 130 rs->interval_us = -1;
131 return; 131 return;
@@ -138,8 +138,8 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
138 * longer phase. 138 * longer phase.
139 */ 139 */
140 snd_us = rs->interval_us; /* send phase */ 140 snd_us = rs->interval_us; /* send phase */
141 ack_us = skb_mstamp_us_delta(&tp->tcp_mstamp, 141 ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
142 &rs->prior_mstamp); /* ack phase */ 142 rs->prior_mstamp); /* ack phase */
143 rs->interval_us = max(snd_us, ack_us); 143 rs->interval_us = max(snd_us, ack_us);
144 144
145 /* Normally we expect interval_us >= min-rtt. 145 /* Normally we expect interval_us >= min-rtt.
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index cd72b3d3879e..fe9a493d0208 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -17,12 +17,9 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
17 } 17 }
18} 18}
19 19
20static bool tcp_rack_sent_after(const struct skb_mstamp *t1, 20static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
21 const struct skb_mstamp *t2,
22 u32 seq1, u32 seq2)
23{ 21{
24 return skb_mstamp_after(t1, t2) || 22 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
25 (t1->v64 == t2->v64 && after(seq1, seq2));
26} 23}
27 24
28/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): 25/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -72,14 +69,14 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
72 scb->sacked & TCPCB_SACKED_ACKED) 69 scb->sacked & TCPCB_SACKED_ACKED)
73 continue; 70 continue;
74 71
75 if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp, 72 if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
76 tp->rack.end_seq, scb->end_seq)) { 73 tp->rack.end_seq, scb->end_seq)) {
77 /* Step 3 in draft-cheng-tcpm-rack-00.txt: 74 /* Step 3 in draft-cheng-tcpm-rack-00.txt:
78 * A packet is lost if its elapsed time is beyond 75 * A packet is lost if its elapsed time is beyond
79 * the recent RTT plus the reordering window. 76 * the recent RTT plus the reordering window.
80 */ 77 */
81 u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp, 78 u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
82 &skb->skb_mstamp); 79 skb->skb_mstamp);
83 s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed; 80 s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
84 81
85 if (remaining < 0) { 82 if (remaining < 0) {
@@ -127,16 +124,16 @@ void tcp_rack_mark_lost(struct sock *sk)
127 * draft-cheng-tcpm-rack-00.txt 124 * draft-cheng-tcpm-rack-00.txt
128 */ 125 */
129void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, 126void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
130 const struct skb_mstamp *xmit_time) 127 u64 xmit_time)
131{ 128{
132 u32 rtt_us; 129 u32 rtt_us;
133 130
134 if (tp->rack.mstamp.v64 && 131 if (tp->rack.mstamp &&
135 !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp, 132 !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
136 end_seq, tp->rack.end_seq)) 133 end_seq, tp->rack.end_seq))
137 return; 134 return;
138 135
139 rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time); 136 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
140 if (sacked & TCPCB_RETRANS) { 137 if (sacked & TCPCB_RETRANS) {
141 /* If the sacked packet was retransmitted, it's ambiguous 138 /* If the sacked packet was retransmitted, it's ambiguous
142 * whether the retransmission or the original (or the prior 139 * whether the retransmission or the original (or the prior
@@ -152,7 +149,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
152 return; 149 return;
153 } 150 }
154 tp->rack.rtt_us = rtt_us; 151 tp->rack.rtt_us = rtt_us;
155 tp->rack.mstamp = *xmit_time; 152 tp->rack.mstamp = xmit_time;
156 tp->rack.end_seq = end_seq; 153 tp->rack.end_seq = end_seq;
157 tp->rack.advanced = 1; 154 tp->rack.advanced = 1;
158} 155}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6629f47aa7f0..27a667bce806 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -153,8 +153,8 @@ static bool retransmits_timed_out(struct sock *sk,
153 unsigned int timeout, 153 unsigned int timeout,
154 bool syn_set) 154 bool syn_set)
155{ 155{
156 unsigned int linear_backoff_thresh, start_ts;
157 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; 156 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
157 unsigned int linear_backoff_thresh, start_ts;
158 158
159 if (!inet_csk(sk)->icsk_retransmits) 159 if (!inet_csk(sk)->icsk_retransmits)
160 return false; 160 return false;
@@ -172,7 +172,7 @@ static bool retransmits_timed_out(struct sock *sk,
172 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + 172 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
173 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 173 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
174 } 174 }
175 return (tcp_time_stamp - start_ts) >= timeout; 175 return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
176} 176}
177 177
178/* A write timeout has occurred. Process the after effects. */ 178/* A write timeout has occurred. Process the after effects. */
@@ -341,7 +341,7 @@ static void tcp_probe_timer(struct sock *sk)
341 if (!start_ts) 341 if (!start_ts)
342 tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp; 342 tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp;
343 else if (icsk->icsk_user_timeout && 343 else if (icsk->icsk_user_timeout &&
344 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout) 344 (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
345 goto abort; 345 goto abort;
346 346
347 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; 347 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
@@ -561,7 +561,7 @@ void tcp_write_timer_handler(struct sock *sk)
561 goto out; 561 goto out;
562 } 562 }
563 563
564 skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); 564 tcp_mstamp_refresh(tcp_sk(sk));
565 event = icsk->icsk_pending; 565 event = icsk->icsk_pending;
566 566
567 switch (event) { 567 switch (event) {
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5abc3692b901..971823359f5b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -211,7 +211,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
211 ireq->wscale_ok = tcp_opt.wscale_ok; 211 ireq->wscale_ok = tcp_opt.wscale_ok;
212 ireq->tstamp_ok = tcp_opt.saw_tstamp; 212 ireq->tstamp_ok = tcp_opt.saw_tstamp;
213 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; 213 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
214 treq->snt_synack.v64 = 0; 214 treq->snt_synack = 0;
215 treq->rcv_isn = ntohl(th->seq) - 1; 215 treq->rcv_isn = ntohl(th->seq) - 1;
216 treq->snt_isn = cookie; 216 treq->snt_isn = cookie;
217 treq->ts_off = 0; 217 treq->ts_off = 0;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4f4310a36a04..233edfabe1db 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -949,7 +949,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
949 949
950 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 950 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
951 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 951 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
952 tcp_time_stamp + tcptw->tw_ts_offset, 952 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
953 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), 953 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
954 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); 954 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
955 955
@@ -971,7 +971,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
971 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, 971 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
972 tcp_rsk(req)->rcv_nxt, 972 tcp_rsk(req)->rcv_nxt,
973 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, 973 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
974 tcp_time_stamp + tcp_rsk(req)->ts_off, 974 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
975 req->ts_recent, sk->sk_bound_dev_if, 975 req->ts_recent, sk->sk_bound_dev_if,
976 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 976 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
977 0, 0); 977 0, 0);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index a504e87c6ddf..49bd8bb16b18 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -152,7 +152,7 @@ void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
152 struct synproxy_options *opts) 152 struct synproxy_options *opts)
153{ 153{
154 opts->tsecr = opts->tsval; 154 opts->tsecr = opts->tsval;
155 opts->tsval = tcp_time_stamp & ~0x3f; 155 opts->tsval = tcp_time_stamp_raw() & ~0x3f;
156 156
157 if (opts->options & XT_SYNPROXY_OPT_WSCALE) { 157 if (opts->options & XT_SYNPROXY_OPT_WSCALE) {
158 opts->tsval |= opts->wscale; 158 opts->tsval |= opts->wscale;