diff options
author | Eric Dumazet <edumazet@google.com> | 2017-05-16 17:00:00 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-05-17 16:06:01 -0400 |
commit | 385e20706facd376f27863bd55b7cc7720d3f27b (patch) | |
tree | b3e6e53158d3348cd6d6b7b473adfe2f1c5a8d6c /net/ipv4/tcp_output.c | |
parent | 9d4f97f97bb8adc47f569d995402c33de9a4fa19 (diff) |
tcp: use tp->tcp_mstamp in output path
Idea is to later convert tp->tcp_mstamp to a full u64 counter
using usec resolution, so that we can later have fine
grained TCP TS clock (RFC 7323), regardless of HZ value.
We try to refresh tp->tcp_mstamp only when necessary.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a32172d69a03..4c8a6eaba6b3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -997,8 +997,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
997 | BUG_ON(!skb || !tcp_skb_pcount(skb)); | 997 | BUG_ON(!skb || !tcp_skb_pcount(skb)); |
998 | tp = tcp_sk(sk); | 998 | tp = tcp_sk(sk); |
999 | 999 | ||
1000 | skb->skb_mstamp = tp->tcp_mstamp; | ||
1000 | if (clone_it) { | 1001 | if (clone_it) { |
1001 | skb_mstamp_get(&skb->skb_mstamp); | ||
1002 | TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq | 1002 | TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq |
1003 | - tp->snd_una; | 1003 | - tp->snd_una; |
1004 | tcp_rate_skb_sent(sk, skb); | 1004 | tcp_rate_skb_sent(sk, skb); |
@@ -1906,7 +1906,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | |||
1906 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1906 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1907 | u32 age, send_win, cong_win, limit, in_flight; | 1907 | u32 age, send_win, cong_win, limit, in_flight; |
1908 | struct tcp_sock *tp = tcp_sk(sk); | 1908 | struct tcp_sock *tp = tcp_sk(sk); |
1909 | struct skb_mstamp now; | ||
1910 | struct sk_buff *head; | 1909 | struct sk_buff *head; |
1911 | int win_divisor; | 1910 | int win_divisor; |
1912 | 1911 | ||
@@ -1962,8 +1961,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | |||
1962 | } | 1961 | } |
1963 | 1962 | ||
1964 | head = tcp_write_queue_head(sk); | 1963 | head = tcp_write_queue_head(sk); |
1965 | skb_mstamp_get(&now); | 1964 | |
1966 | age = skb_mstamp_us_delta(&now, &head->skb_mstamp); | 1965 | age = skb_mstamp_us_delta(&tp->tcp_mstamp, &head->skb_mstamp); |
1967 | /* If next ACK is likely to come too late (half srtt), do not defer */ | 1966 | /* If next ACK is likely to come too late (half srtt), do not defer */ |
1968 | if (age < (tp->srtt_us >> 4)) | 1967 | if (age < (tp->srtt_us >> 4)) |
1969 | goto send_now; | 1968 | goto send_now; |
@@ -2280,6 +2279,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
2280 | } | 2279 | } |
2281 | 2280 | ||
2282 | max_segs = tcp_tso_segs(sk, mss_now); | 2281 | max_segs = tcp_tso_segs(sk, mss_now); |
2282 | skb_mstamp_get(&tp->tcp_mstamp); | ||
2283 | while ((skb = tcp_send_head(sk))) { | 2283 | while ((skb = tcp_send_head(sk))) { |
2284 | unsigned int limit; | 2284 | unsigned int limit; |
2285 | 2285 | ||
@@ -2291,7 +2291,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
2291 | 2291 | ||
2292 | if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { | 2292 | if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { |
2293 | /* "skb_mstamp" is used as a start point for the retransmit timer */ | 2293 | /* "skb_mstamp" is used as a start point for the retransmit timer */ |
2294 | skb_mstamp_get(&skb->skb_mstamp); | 2294 | skb->skb_mstamp = tp->tcp_mstamp; |
2295 | goto repair; /* Skip network transmission */ | 2295 | goto repair; /* Skip network transmission */ |
2296 | } | 2296 | } |
2297 | 2297 | ||
@@ -2879,7 +2879,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) | |||
2879 | skb_headroom(skb) >= 0xFFFF)) { | 2879 | skb_headroom(skb) >= 0xFFFF)) { |
2880 | struct sk_buff *nskb; | 2880 | struct sk_buff *nskb; |
2881 | 2881 | ||
2882 | skb_mstamp_get(&skb->skb_mstamp); | 2882 | skb->skb_mstamp = tp->tcp_mstamp; |
2883 | nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); | 2883 | nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); |
2884 | err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : | 2884 | err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : |
2885 | -ENOBUFS; | 2885 | -ENOBUFS; |
@@ -3095,7 +3095,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) | |||
3095 | skb_reserve(skb, MAX_TCP_HEADER); | 3095 | skb_reserve(skb, MAX_TCP_HEADER); |
3096 | tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), | 3096 | tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), |
3097 | TCPHDR_ACK | TCPHDR_RST); | 3097 | TCPHDR_ACK | TCPHDR_RST); |
3098 | skb_mstamp_get(&skb->skb_mstamp); | 3098 | skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); |
3099 | /* Send it off. */ | 3099 | /* Send it off. */ |
3100 | if (tcp_transmit_skb(sk, skb, 0, priority)) | 3100 | if (tcp_transmit_skb(sk, skb, 0, priority)) |
3101 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); | 3101 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); |
@@ -3453,7 +3453,8 @@ int tcp_connect(struct sock *sk) | |||
3453 | return -ENOBUFS; | 3453 | return -ENOBUFS; |
3454 | 3454 | ||
3455 | tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); | 3455 | tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); |
3456 | tp->retrans_stamp = tcp_time_stamp; | 3456 | skb_mstamp_get(&tp->tcp_mstamp); |
3457 | tp->retrans_stamp = tp->tcp_mstamp.stamp_jiffies; | ||
3457 | tcp_connect_queue_skb(sk, buff); | 3458 | tcp_connect_queue_skb(sk, buff); |
3458 | tcp_ecn_send_syn(sk, buff); | 3459 | tcp_ecn_send_syn(sk, buff); |
3459 | 3460 | ||
@@ -3572,7 +3573,6 @@ void tcp_send_ack(struct sock *sk) | |||
3572 | skb_set_tcp_pure_ack(buff); | 3573 | skb_set_tcp_pure_ack(buff); |
3573 | 3574 | ||
3574 | /* Send it off, this clears delayed acks for us. */ | 3575 | /* Send it off, this clears delayed acks for us. */ |
3575 | skb_mstamp_get(&buff->skb_mstamp); | ||
3576 | tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); | 3576 | tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); |
3577 | } | 3577 | } |
3578 | EXPORT_SYMBOL_GPL(tcp_send_ack); | 3578 | EXPORT_SYMBOL_GPL(tcp_send_ack); |
@@ -3606,15 +3606,16 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) | |||
3606 | * send it. | 3606 | * send it. |
3607 | */ | 3607 | */ |
3608 | tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); | 3608 | tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); |
3609 | skb_mstamp_get(&skb->skb_mstamp); | ||
3610 | NET_INC_STATS(sock_net(sk), mib); | 3609 | NET_INC_STATS(sock_net(sk), mib); |
3611 | return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); | 3610 | return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); |
3612 | } | 3611 | } |
3613 | 3612 | ||
3613 | /* Called from setsockopt( ... TCP_REPAIR ) */ | ||
3614 | void tcp_send_window_probe(struct sock *sk) | 3614 | void tcp_send_window_probe(struct sock *sk) |
3615 | { | 3615 | { |
3616 | if (sk->sk_state == TCP_ESTABLISHED) { | 3616 | if (sk->sk_state == TCP_ESTABLISHED) { |
3617 | tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; | 3617 | tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; |
3618 | skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp); | ||
3618 | tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); | 3619 | tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); |
3619 | } | 3620 | } |
3620 | } | 3621 | } |