aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-07-29 19:20:37 -0400
committerDavid S. Miller <davem@davemloft.net>2012-07-30 17:53:22 -0400
commit404e0a8b6a55d5e1cd138c6deb1bca9abdf75d8c (patch)
tree38e9748d38c415cc97b973fecb9279cd43f76393 /net/ipv4/tcp_ipv4.c
parentcca32e4bf999a34ac08d959f351f2b30bcd02460 (diff)
net: ipv4: fix RCU races on dst refcounts
commit c6cffba4ffa2 (ipv4: Fix input route performance regression.) added various fatal races with dst refcounts. crashes happen on tcp workloads if routes are added/deleted at the same time. The dst_free() calls from free_fib_info_rcu() are clearly racy. We need instead regular dst refcounting (dst_release()) and make sure dst_release() is aware of RCU grace periods : Add DST_RCU_FREE flag so that dst_release() respects an RCU grace period before dst destruction for cached dst Introduce a new inet_sk_rx_dst_set() helper, using atomic_inc_not_zero() to make sure we dont increase a zero refcount (On a dst currently waiting an rcu grace period before destruction) rt_cache_route() must take a reference on the new cached route, and release it if was not able to install it. With this patch, my machines survive various benchmarks. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 2fbd9921253f..7f91e5ac8277 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1617,19 +1617,19 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1617#endif 1617#endif
1618 1618
1619 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1619 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1620 struct dst_entry *dst = sk->sk_rx_dst;
1621
1620 sock_rps_save_rxhash(sk, skb); 1622 sock_rps_save_rxhash(sk, skb);
1621 if (sk->sk_rx_dst) { 1623 if (dst) {
1622 struct dst_entry *dst = sk->sk_rx_dst;
1623 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || 1624 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1624 dst->ops->check(dst, 0) == NULL) { 1625 dst->ops->check(dst, 0) == NULL) {
1625 dst_release(dst); 1626 dst_release(dst);
1626 sk->sk_rx_dst = NULL; 1627 sk->sk_rx_dst = NULL;
1627 } 1628 }
1628 } 1629 }
1629 if (unlikely(sk->sk_rx_dst == NULL)) { 1630 if (unlikely(sk->sk_rx_dst == NULL))
1630 sk->sk_rx_dst = dst_clone(skb_dst(skb)); 1631 inet_sk_rx_dst_set(sk, skb);
1631 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 1632
1632 }
1633 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1633 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1634 rsk = sk; 1634 rsk = sk;
1635 goto reset; 1635 goto reset;