diff options
author | Eric Dumazet <edumazet@google.com> | 2018-09-21 11:51:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-09-21 22:38:00 -0400 |
commit | c092dd5f4a7f4e4dbbcc8cf2e50b516bf07e432f (patch) | |
tree | 57974bfa7d3c72315774558f421ef9a4dec14e45 /net/ipv4/tcp_output.c | |
parent | ab408b6dc7449c0f791e9e5f8de72fa7428584f2 (diff) |
tcp: switch tcp_internal_pacing() to tcp_wstamp_ns
Now TCP keeps track of tcp_wstamp_ns, recording the earliest
departure time of next packet, we can remove duplicate code
from tcp_internal_pacing()
This removes one ktime_get_tai_ns() call, and a divide.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 17 |
1 files changed, 4 insertions, 13 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2adb719e97b8..fe7855b090e4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -993,21 +993,12 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) | |||
993 | return HRTIMER_NORESTART; | 993 | return HRTIMER_NORESTART; |
994 | } | 994 | } |
995 | 995 | ||
996 | static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) | 996 | static void tcp_internal_pacing(struct sock *sk) |
997 | { | 997 | { |
998 | u64 len_ns; | ||
999 | u32 rate; | ||
1000 | |||
1001 | if (!tcp_needs_internal_pacing(sk)) | 998 | if (!tcp_needs_internal_pacing(sk)) |
1002 | return; | 999 | return; |
1003 | rate = sk->sk_pacing_rate; | ||
1004 | if (!rate || rate == ~0U) | ||
1005 | return; | ||
1006 | |||
1007 | len_ns = (u64)skb->len * NSEC_PER_SEC; | ||
1008 | do_div(len_ns, rate); | ||
1009 | hrtimer_start(&tcp_sk(sk)->pacing_timer, | 1000 | hrtimer_start(&tcp_sk(sk)->pacing_timer, |
1010 | ktime_add_ns(ktime_get_tai_ns(), len_ns), | 1001 | ns_to_ktime(tcp_sk(sk)->tcp_wstamp_ns), |
1011 | HRTIMER_MODE_ABS_PINNED_SOFT); | 1002 | HRTIMER_MODE_ABS_PINNED_SOFT); |
1012 | sock_hold(sk); | 1003 | sock_hold(sk); |
1013 | } | 1004 | } |
@@ -1026,7 +1017,8 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb) | |||
1026 | */ | 1017 | */ |
1027 | if (rate != ~0U && rate && tp->data_segs_out >= 10) { | 1018 | if (rate != ~0U && rate && tp->data_segs_out >= 10) { |
1028 | tp->tcp_wstamp_ns += div_u64((u64)skb->len * NSEC_PER_SEC, rate); | 1019 | tp->tcp_wstamp_ns += div_u64((u64)skb->len * NSEC_PER_SEC, rate); |
1029 | /* TODO: update internal pacing here */ | 1020 | |
1021 | tcp_internal_pacing(sk); | ||
1030 | } | 1022 | } |
1031 | } | 1023 | } |
1032 | list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); | 1024 | list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); |
@@ -1167,7 +1159,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, | |||
1167 | tcp_event_data_sent(tp, sk); | 1159 | tcp_event_data_sent(tp, sk); |
1168 | tp->data_segs_out += tcp_skb_pcount(skb); | 1160 | tp->data_segs_out += tcp_skb_pcount(skb); |
1169 | tp->bytes_sent += skb->len - tcp_header_size; | 1161 | tp->bytes_sent += skb->len - tcp_header_size; |
1170 | tcp_internal_pacing(sk, skb); | ||
1171 | } | 1162 | } |
1172 | 1163 | ||
1173 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) | 1164 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) |