aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-09-05 18:33:33 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-05 20:49:33 -0400
commit7faee5c0d514162853a343d93e4a0b6bb8bfec21 (patch)
treeebaba03f755dfbe9d942fbe610ce3aecd1b0fc33 /net/ipv4
parent04317dafd11dd7b0ec19b85f098414abae6ed5f7 (diff)
tcp: remove TCP_SKB_CB(skb)->when
After commit 740b0f1841f6 ("tcp: switch rtt estimations to usec resolution"), we no longer need to maintain timestamps in two different fields. TCP_SKB_CB(skb)->when can be removed, as same information sits in skb_mstamp.stamp_jiffies Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_output.c39
-rw-r--r--net/ipv4/tcp_timer.c7
4 files changed, 24 insertions, 30 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9c8b9f1dcf69..f97003ad0af5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2967,7 +2967,8 @@ void tcp_rearm_rto(struct sock *sk)
2967 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 2967 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2968 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 2968 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2969 struct sk_buff *skb = tcp_write_queue_head(sk); 2969 struct sk_buff *skb = tcp_write_queue_head(sk);
2970 const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; 2970 const u32 rto_time_stamp =
2971 tcp_skb_timestamp(skb) + rto;
2971 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); 2972 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
2972 /* delta may not be positive if the socket is locked 2973 /* delta may not be positive if the socket is locked
2973 * when the retrans timer fires and is rescheduled. 2974 * when the retrans timer fires and is rescheduled.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 02e6cd29ebf1..3f9bc3f0bba0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -437,8 +437,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
437 skb = tcp_write_queue_head(sk); 437 skb = tcp_write_queue_head(sk);
438 BUG_ON(!skb); 438 BUG_ON(!skb);
439 439
440 remaining = icsk->icsk_rto - min(icsk->icsk_rto, 440 remaining = icsk->icsk_rto -
441 tcp_time_stamp - TCP_SKB_CB(skb)->when); 441 min(icsk->icsk_rto,
442 tcp_time_stamp - tcp_skb_timestamp(skb));
442 443
443 if (remaining) { 444 if (remaining) {
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 445 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5a7c41fbc6d3..3b22dcb7bb5c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -550,7 +550,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
550 550
551 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 551 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
552 opts->options |= OPTION_TS; 552 opts->options |= OPTION_TS;
553 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; 553 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
554 opts->tsecr = tp->rx_opt.ts_recent; 554 opts->tsecr = tp->rx_opt.ts_recent;
555 remaining -= TCPOLEN_TSTAMP_ALIGNED; 555 remaining -= TCPOLEN_TSTAMP_ALIGNED;
556 } 556 }
@@ -618,7 +618,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
618 } 618 }
619 if (likely(ireq->tstamp_ok)) { 619 if (likely(ireq->tstamp_ok)) {
620 opts->options |= OPTION_TS; 620 opts->options |= OPTION_TS;
621 opts->tsval = TCP_SKB_CB(skb)->when; 621 opts->tsval = tcp_skb_timestamp(skb);
622 opts->tsecr = req->ts_recent; 622 opts->tsecr = req->ts_recent;
623 remaining -= TCPOLEN_TSTAMP_ALIGNED; 623 remaining -= TCPOLEN_TSTAMP_ALIGNED;
624 } 624 }
@@ -647,7 +647,6 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
647 struct tcp_out_options *opts, 647 struct tcp_out_options *opts,
648 struct tcp_md5sig_key **md5) 648 struct tcp_md5sig_key **md5)
649{ 649{
650 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
651 struct tcp_sock *tp = tcp_sk(sk); 650 struct tcp_sock *tp = tcp_sk(sk);
652 unsigned int size = 0; 651 unsigned int size = 0;
653 unsigned int eff_sacks; 652 unsigned int eff_sacks;
@@ -666,7 +665,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
666 665
667 if (likely(tp->rx_opt.tstamp_ok)) { 666 if (likely(tp->rx_opt.tstamp_ok)) {
668 opts->options |= OPTION_TS; 667 opts->options |= OPTION_TS;
669 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; 668 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
670 opts->tsecr = tp->rx_opt.ts_recent; 669 opts->tsecr = tp->rx_opt.ts_recent;
671 size += TCPOLEN_TSTAMP_ALIGNED; 670 size += TCPOLEN_TSTAMP_ALIGNED;
672 } 671 }
@@ -886,8 +885,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
886 skb = skb_clone(skb, gfp_mask); 885 skb = skb_clone(skb, gfp_mask);
887 if (unlikely(!skb)) 886 if (unlikely(!skb))
888 return -ENOBUFS; 887 return -ENOBUFS;
889 /* Our usage of tstamp should remain private */
890 skb->tstamp.tv64 = 0;
891 } 888 }
892 889
893 inet = inet_sk(sk); 890 inet = inet_sk(sk);
@@ -975,7 +972,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
975 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 972 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
976 tcp_skb_pcount(skb)); 973 tcp_skb_pcount(skb));
977 974
975 /* Our usage of tstamp should remain private */
976 skb->tstamp.tv64 = 0;
978 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); 977 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
978
979 if (likely(err <= 0)) 979 if (likely(err <= 0))
980 return err; 980 return err;
981 981
@@ -1149,7 +1149,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1149 /* Looks stupid, but our code really uses when of 1149 /* Looks stupid, but our code really uses when of
1150 * skbs, which it never sent before. --ANK 1150 * skbs, which it never sent before. --ANK
1151 */ 1151 */
1152 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1153 buff->tstamp = skb->tstamp; 1152 buff->tstamp = skb->tstamp;
1154 tcp_fragment_tstamp(skb, buff); 1153 tcp_fragment_tstamp(skb, buff);
1155 1154
@@ -1874,8 +1873,8 @@ static int tcp_mtu_probe(struct sock *sk)
1874 tcp_init_tso_segs(sk, nskb, nskb->len); 1873 tcp_init_tso_segs(sk, nskb, nskb->len);
1875 1874
1876 /* We're ready to send. If this fails, the probe will 1875 /* We're ready to send. If this fails, the probe will
1877 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1876 * be resegmented into mss-sized pieces by tcp_write_xmit().
1878 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1877 */
1879 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1878 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1880 /* Decrement cwnd here because we are sending 1879 /* Decrement cwnd here because we are sending
1881 * effectively two packets. */ 1880 * effectively two packets. */
@@ -1935,8 +1934,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1935 BUG_ON(!tso_segs); 1934 BUG_ON(!tso_segs);
1936 1935
1937 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { 1936 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
1938 /* "when" is used as a start point for the retransmit timer */ 1937 /* "skb_mstamp" is used as a start point for the retransmit timer */
1939 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1938 skb_mstamp_get(&skb->skb_mstamp);
1940 goto repair; /* Skip network transmission */ 1939 goto repair; /* Skip network transmission */
1941 } 1940 }
1942 1941
@@ -2000,8 +1999,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2000 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 1999 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2001 break; 2000 break;
2002 2001
2003 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2004
2005 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 2002 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2006 break; 2003 break;
2007 2004
@@ -2499,7 +2496,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2499 /* Make a copy, if the first transmission SKB clone we made 2496 /* Make a copy, if the first transmission SKB clone we made
2500 * is still in somebody's hands, else make a clone. 2497 * is still in somebody's hands, else make a clone.
2501 */ 2498 */
2502 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2503 2499
2504 /* make sure skb->data is aligned on arches that require it 2500 /* make sure skb->data is aligned on arches that require it
2505 * and check if ack-trimming & collapsing extended the headroom 2501 * and check if ack-trimming & collapsing extended the headroom
@@ -2544,7 +2540,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2544 2540
2545 /* Save stamp of the first retransmit. */ 2541 /* Save stamp of the first retransmit. */
2546 if (!tp->retrans_stamp) 2542 if (!tp->retrans_stamp)
2547 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2543 tp->retrans_stamp = tcp_skb_timestamp(skb);
2548 2544
2549 /* snd_nxt is stored to detect loss of retransmitted segment, 2545 /* snd_nxt is stored to detect loss of retransmitted segment,
2550 * see tcp_input.c tcp_sacktag_write_queue(). 2546 * see tcp_input.c tcp_sacktag_write_queue().
@@ -2752,7 +2748,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2752 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2748 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2753 TCPHDR_ACK | TCPHDR_RST); 2749 TCPHDR_ACK | TCPHDR_RST);
2754 /* Send it off. */ 2750 /* Send it off. */
2755 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2756 if (tcp_transmit_skb(sk, skb, 0, priority)) 2751 if (tcp_transmit_skb(sk, skb, 0, priority))
2757 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2752 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2758 2753
@@ -2791,7 +2786,6 @@ int tcp_send_synack(struct sock *sk)
2791 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 2786 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
2792 TCP_ECN_send_synack(tcp_sk(sk), skb); 2787 TCP_ECN_send_synack(tcp_sk(sk), skb);
2793 } 2788 }
2794 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2795 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2789 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2796} 2790}
2797 2791
@@ -2835,10 +2829,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2835 memset(&opts, 0, sizeof(opts)); 2829 memset(&opts, 0, sizeof(opts));
2836#ifdef CONFIG_SYN_COOKIES 2830#ifdef CONFIG_SYN_COOKIES
2837 if (unlikely(req->cookie_ts)) 2831 if (unlikely(req->cookie_ts))
2838 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2832 skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
2839 else 2833 else
2840#endif 2834#endif
2841 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2835 skb_mstamp_get(&skb->skb_mstamp);
2842 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, 2836 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
2843 foc) + sizeof(*th); 2837 foc) + sizeof(*th);
2844 2838
@@ -3086,7 +3080,7 @@ int tcp_connect(struct sock *sk)
3086 skb_reserve(buff, MAX_TCP_HEADER); 3080 skb_reserve(buff, MAX_TCP_HEADER);
3087 3081
3088 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 3082 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3089 tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; 3083 tp->retrans_stamp = tcp_time_stamp;
3090 tcp_connect_queue_skb(sk, buff); 3084 tcp_connect_queue_skb(sk, buff);
3091 TCP_ECN_send_syn(sk, buff); 3085 TCP_ECN_send_syn(sk, buff);
3092 3086
@@ -3194,7 +3188,7 @@ void tcp_send_ack(struct sock *sk)
3194 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 3188 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
3195 3189
3196 /* Send it off, this clears delayed acks for us. */ 3190 /* Send it off, this clears delayed acks for us. */
3197 TCP_SKB_CB(buff)->when = tcp_time_stamp; 3191 skb_mstamp_get(&buff->skb_mstamp);
3198 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); 3192 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
3199} 3193}
3200 3194
@@ -3226,7 +3220,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
3226 * send it. 3220 * send it.
3227 */ 3221 */
3228 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 3222 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3229 TCP_SKB_CB(skb)->when = tcp_time_stamp; 3223 skb_mstamp_get(&skb->skb_mstamp);
3230 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 3224 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
3231} 3225}
3232 3226
@@ -3270,7 +3264,6 @@ int tcp_write_wakeup(struct sock *sk)
3270 tcp_set_skb_tso_segs(sk, skb, mss); 3264 tcp_set_skb_tso_segs(sk, skb, mss);
3271 3265
3272 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3266 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3273 TCP_SKB_CB(skb)->when = tcp_time_stamp;
3274 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3267 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3275 if (!err) 3268 if (!err)
3276 tcp_event_new_data_sent(sk, skb); 3269 tcp_event_new_data_sent(sk, skb);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index df90cd1ce37f..a339e7ba05a4 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -135,10 +135,9 @@ static bool retransmits_timed_out(struct sock *sk,
135 if (!inet_csk(sk)->icsk_retransmits) 135 if (!inet_csk(sk)->icsk_retransmits)
136 return false; 136 return false;
137 137
138 if (unlikely(!tcp_sk(sk)->retrans_stamp)) 138 start_ts = tcp_sk(sk)->retrans_stamp;
139 start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; 139 if (unlikely(!start_ts))
140 else 140 start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
141 start_ts = tcp_sk(sk)->retrans_stamp;
142 141
143 if (likely(timeout == 0)) { 142 if (likely(timeout == 0)) {
144 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); 143 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);