diff options
author | Eric Dumazet <edumazet@google.com> | 2016-12-03 14:14:57 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-12-05 13:32:24 -0500 |
commit | 7aa5470c2c09265902b5e4289afa82e4e7c2987e (patch) | |
tree | acd4d2976939d6de5f4495bbebc4a9420e40d38e /net/ipv4/tcp_output.c | |
parent | 9115e8cd2a0c6eaaa900c462721f12e1d45f326c (diff) |
tcp: tsq: move tsq_flags close to sk_wmem_alloc
tsq_flags being in the same cache line than sk_wmem_alloc
makes a lot of sense. Both fields are changed from tcp_wfree()
and more generally by various TSQ related functions.
Prior patch made room in struct sock and added sk_tsq_flags,
this patch deletes tsq_flags from struct tcp_sock.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 24 |
1 files changed, 11 insertions, 13 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5f04bee4c86a..b45101f3d2bd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -767,14 +767,15 @@ static void tcp_tasklet_func(unsigned long data) | |||
767 | list_for_each_safe(q, n, &list) { | 767 | list_for_each_safe(q, n, &list) { |
768 | tp = list_entry(q, struct tcp_sock, tsq_node); | 768 | tp = list_entry(q, struct tcp_sock, tsq_node); |
769 | list_del(&tp->tsq_node); | 769 | list_del(&tp->tsq_node); |
770 | clear_bit(TSQ_QUEUED, &tp->tsq_flags); | ||
771 | 770 | ||
772 | sk = (struct sock *)tp; | 771 | sk = (struct sock *)tp; |
772 | clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); | ||
773 | |||
773 | if (!sk->sk_lock.owned && | 774 | if (!sk->sk_lock.owned && |
774 | test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) { | 775 | test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) { |
775 | bh_lock_sock(sk); | 776 | bh_lock_sock(sk); |
776 | if (!sock_owned_by_user(sk)) { | 777 | if (!sock_owned_by_user(sk)) { |
777 | clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); | 778 | clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags); |
778 | tcp_tsq_handler(sk); | 779 | tcp_tsq_handler(sk); |
779 | } | 780 | } |
780 | bh_unlock_sock(sk); | 781 | bh_unlock_sock(sk); |
@@ -797,16 +798,15 @@ static void tcp_tasklet_func(unsigned long data) | |||
797 | */ | 798 | */ |
798 | void tcp_release_cb(struct sock *sk) | 799 | void tcp_release_cb(struct sock *sk) |
799 | { | 800 | { |
800 | struct tcp_sock *tp = tcp_sk(sk); | ||
801 | unsigned long flags, nflags; | 801 | unsigned long flags, nflags; |
802 | 802 | ||
803 | /* perform an atomic operation only if at least one flag is set */ | 803 | /* perform an atomic operation only if at least one flag is set */ |
804 | do { | 804 | do { |
805 | flags = tp->tsq_flags; | 805 | flags = sk->sk_tsq_flags; |
806 | if (!(flags & TCP_DEFERRED_ALL)) | 806 | if (!(flags & TCP_DEFERRED_ALL)) |
807 | return; | 807 | return; |
808 | nflags = flags & ~TCP_DEFERRED_ALL; | 808 | nflags = flags & ~TCP_DEFERRED_ALL; |
809 | } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); | 809 | } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); |
810 | 810 | ||
811 | if (flags & TCPF_TSQ_DEFERRED) | 811 | if (flags & TCPF_TSQ_DEFERRED) |
812 | tcp_tsq_handler(sk); | 812 | tcp_tsq_handler(sk); |
@@ -878,7 +878,7 @@ void tcp_wfree(struct sk_buff *skb) | |||
878 | if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) | 878 | if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) |
879 | goto out; | 879 | goto out; |
880 | 880 | ||
881 | for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) { | 881 | for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { |
882 | struct tsq_tasklet *tsq; | 882 | struct tsq_tasklet *tsq; |
883 | bool empty; | 883 | bool empty; |
884 | 884 | ||
@@ -886,7 +886,7 @@ void tcp_wfree(struct sk_buff *skb) | |||
886 | goto out; | 886 | goto out; |
887 | 887 | ||
888 | nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED; | 888 | nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED; |
889 | nval = cmpxchg(&tp->tsq_flags, oval, nval); | 889 | nval = cmpxchg(&sk->sk_tsq_flags, oval, nval); |
890 | if (nval != oval) | 890 | if (nval != oval) |
891 | continue; | 891 | continue; |
892 | 892 | ||
@@ -2100,7 +2100,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, | |||
2100 | skb->prev == sk->sk_write_queue.next) | 2100 | skb->prev == sk->sk_write_queue.next) |
2101 | return false; | 2101 | return false; |
2102 | 2102 | ||
2103 | set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags); | 2103 | set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); |
2104 | /* It is possible TX completion already happened | 2104 | /* It is possible TX completion already happened |
2105 | * before we set TSQ_THROTTLED, so we must | 2105 | * before we set TSQ_THROTTLED, so we must |
2106 | * test again the condition. | 2106 | * test again the condition. |
@@ -2241,8 +2241,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
2241 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) | 2241 | unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) |
2242 | break; | 2242 | break; |
2243 | 2243 | ||
2244 | if (test_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags)) | 2244 | if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) |
2245 | clear_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); | 2245 | clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags); |
2246 | if (tcp_small_queue_check(sk, skb, 0)) | 2246 | if (tcp_small_queue_check(sk, skb, 0)) |
2247 | break; | 2247 | break; |
2248 | 2248 | ||
@@ -3545,8 +3545,6 @@ void tcp_send_ack(struct sock *sk) | |||
3545 | /* We do not want pure acks influencing TCP Small Queues or fq/pacing | 3545 | /* We do not want pure acks influencing TCP Small Queues or fq/pacing |
3546 | * too much. | 3546 | * too much. |
3547 | * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 | 3547 | * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 |
3548 | * We also avoid tcp_wfree() overhead (cache line miss accessing | ||
3549 | * tp->tsq_flags) by using regular sock_wfree() | ||
3550 | */ | 3548 | */ |
3551 | skb_set_tcp_pure_ack(buff); | 3549 | skb_set_tcp_pure_ack(buff); |
3552 | 3550 | ||