diff options
author | Eric Dumazet <edumazet@google.com> | 2016-12-03 14:14:52 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-12-05 13:32:22 -0500 |
commit | b223feb9de2a65c533ff95c08e834fa732906ea5 (patch) | |
tree | 0327e31d3fe77301791b56fd3a4082cb3229737e /net/ipv4/tcp_output.c | |
parent | 408f0a6c21e124cc4f6c7aa370b38aa47e55428d (diff) |
tcp: tsq: add shortcut in tcp_tasklet_func()
Under high stress, I've seen tcp_tasklet_func() consuming
~700 usec, handling ~150 tcp sockets.
By setting TCP_TSQ_DEFERRED in tcp_wfree(), we give a chance
for other cpus/threads entering tcp_write_xmit() to grab it,
allowing tcp_tasklet_func() to skip sockets that already did
an xmit cycle.
In the future, we might give to ACK processing an increased
budget to reduce even more tcp_tasklet_func() amount of work.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4adaf8e1bb63..fa23b688a6f3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -767,19 +767,19 @@ static void tcp_tasklet_func(unsigned long data) | |||
767 | list_for_each_safe(q, n, &list) { | 767 | list_for_each_safe(q, n, &list) { |
768 | tp = list_entry(q, struct tcp_sock, tsq_node); | 768 | tp = list_entry(q, struct tcp_sock, tsq_node); |
769 | list_del(&tp->tsq_node); | 769 | list_del(&tp->tsq_node); |
770 | clear_bit(TSQ_QUEUED, &tp->tsq_flags); | ||
770 | 771 | ||
771 | sk = (struct sock *)tp; | 772 | sk = (struct sock *)tp; |
772 | bh_lock_sock(sk); | 773 | if (!sk->sk_lock.owned && |
773 | 774 | test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) { | |
774 | if (!sock_owned_by_user(sk)) { | 775 | bh_lock_sock(sk); |
775 | tcp_tsq_handler(sk); | 776 | if (!sock_owned_by_user(sk)) { |
776 | } else { | 777 | clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); |
777 | /* defer the work to tcp_release_cb() */ | 778 | tcp_tsq_handler(sk); |
778 | set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); | 779 | } |
780 | bh_unlock_sock(sk); | ||
779 | } | 781 | } |
780 | bh_unlock_sock(sk); | ||
781 | 782 | ||
782 | clear_bit(TSQ_QUEUED, &tp->tsq_flags); | ||
783 | sk_free(sk); | 783 | sk_free(sk); |
784 | } | 784 | } |
785 | } | 785 | } |
@@ -884,7 +884,7 @@ void tcp_wfree(struct sk_buff *skb) | |||
884 | if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) | 884 | if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) |
885 | goto out; | 885 | goto out; |
886 | 886 | ||
887 | nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED; | 887 | nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED; |
888 | nval = cmpxchg(&tp->tsq_flags, oval, nval); | 888 | nval = cmpxchg(&tp->tsq_flags, oval, nval); |
889 | if (nval != oval) | 889 | if (nval != oval) |
890 | continue; | 890 | continue; |
@@ -2229,6 +2229,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
2229 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) | 2229 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) |
2230 | break; | 2230 | break; |
2231 | 2231 | ||
2232 | if (test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) | ||
2233 | clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); | ||
2232 | if (tcp_small_queue_check(sk, skb, 0)) | 2234 | if (tcp_small_queue_check(sk, skb, 0)) |
2233 | break; | 2235 | break; |
2234 | 2236 | ||