aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-09-08 11:06:07 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-09 19:54:41 -0400
commitca777eff51f7fbaebd954e645d8ecb781a906b4a (patch)
treeab24468c8118a7ecbef152d7c0c993023f425c1f /net/ipv4
parent32bc6d1a35f8897fbcdc260addc1b1ad63b8db15 (diff)
tcp: remove dst refcount false sharing for prequeue mode
Alexander Duyck reported high false sharing on dst refcount in tcp stack when prequeue is used. prequeue is the mechanism used when a thread is blocked in recvmsg()/read() on a TCP socket, using a blocking model rather than select()/poll()/epoll() non blocking one. We already try to use RCU in input path as much as possible, but we were forced to take a refcount on the dst when skb escaped RCU protected region. When/if the user thread runs on different cpu, dst_release() will then touch dst refcount again. Commit 093162553c33 (tcp: force a dst refcount when prequeue packet) was an example of a race fix. It turns out the only remaining usage of skb->dst for a packet stored in a TCP socket prequeue is IP early demux. We can add a logic to detect when IP early demux is probably going to use skb->dst. Because we do an optimistic check rather than duplicate existing logic, we need to guard inet_sk_rx_dst_set() and inet6_sk_rx_dst_set() from using a NULL dst. Many thanks to Alexander for providing a nice bug report, git bisection, and reproducer. Tested using Alexander script on a 40Gb NIC, 8 RX queues. Hosts have 24 cores, 48 hyper threads. echo 0 >/proc/sys/net/ipv4/tcp_autocorking for i in `seq 0 47` do for j in `seq 0 2` do netperf -H $DEST -t TCP_STREAM -l 1000 \ -c -C -T $i,$i -P 0 -- \ -m 64 -s 64K -D & done done Before patch : ~6Mpps and ~95% cpu usage on receiver After patch : ~9Mpps and ~35% cpu usage on receiver. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_ipv4.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3f9bc3f0bba0..7881b96d2b72 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1559,7 +1559,17 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1559 skb_queue_len(&tp->ucopy.prequeue) == 0) 1559 skb_queue_len(&tp->ucopy.prequeue) == 0)
1560 return false; 1560 return false;
1561 1561
1562 skb_dst_force(skb); 1562 /* Before escaping RCU protected region, we need to take care of skb
1563 * dst. Prequeue is only enabled for established sockets.
1564 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1565 * Instead of doing full sk_rx_dst validity here, let's perform
1566 * an optimistic check.
1567 */
1568 if (likely(sk->sk_rx_dst))
1569 skb_dst_drop(skb);
1570 else
1571 skb_dst_force(skb);
1572
1563 __skb_queue_tail(&tp->ucopy.prequeue, skb); 1573 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1564 tp->ucopy.memory += skb->truesize; 1574 tp->ucopy.memory += skb->truesize;
1565 if (tp->ucopy.memory > sk->sk_rcvbuf) { 1575 if (tp->ucopy.memory > sk->sk_rcvbuf) {
@@ -1765,9 +1775,11 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1765{ 1775{
1766 struct dst_entry *dst = skb_dst(skb); 1776 struct dst_entry *dst = skb_dst(skb);
1767 1777
1768 dst_hold(dst); 1778 if (dst) {
1769 sk->sk_rx_dst = dst; 1779 dst_hold(dst);
1770 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 1780 sk->sk_rx_dst = dst;
1781 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1782 }
1771} 1783}
1772EXPORT_SYMBOL(inet_sk_rx_dst_set); 1784EXPORT_SYMBOL(inet_sk_rx_dst_set);
1773 1785