aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2013-12-11 17:46:51 -0500
committerDavid S. Miller <davem@davemloft.net>2013-12-11 20:21:10 -0500
commit975022310233fb0f0193873d79a7b8438070fa82 (patch)
tree2658e9259ca6a53e07a25fe399a4f61d15093220 /net/ipv4/udp.c
parenta1bf1750871a6f242b0fdb174cc55d2c57e7ed66 (diff)
udp: ipv4: must add synchronization in udp_sk_rx_dst_set()
Unlike TCP, UDP input path does not hold the socket lock. Before messing with sk->sk_rx_dst, we must use a spinlock, otherwise multiple cpus could leak a refcount. This patch also takes care of renewing a stale dst entry. (When the sk->sk_rx_dst would not be used by IP early demux) Fixes: 421b3885bf6d ("udp: ipv4: Add udp early demux") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Shawn Bohrer <sbohrer@rgmadvisors.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r--net/ipv4/udp.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 16d246a51a02..62c19fdd102d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1599,12 +1599,21 @@ static void flush_stack(struct sock **stack, unsigned int count,
1599 kfree_skb(skb1); 1599 kfree_skb(skb1);
1600} 1600}
1601 1601
1602static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) 1602/* For TCP sockets, sk_rx_dst is protected by socket lock
1603 * For UDP, we use sk_dst_lock to guard against concurrent changes.
1604 */
1605static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1603{ 1606{
1604 struct dst_entry *dst = skb_dst(skb); 1607 struct dst_entry *old;
1605 1608
1606 dst_hold(dst); 1609 spin_lock(&sk->sk_dst_lock);
1607 sk->sk_rx_dst = dst; 1610 old = sk->sk_rx_dst;
1611 if (likely(old != dst)) {
1612 dst_hold(dst);
1613 sk->sk_rx_dst = dst;
1614 dst_release(old);
1615 }
1616 spin_unlock(&sk->sk_dst_lock);
1608} 1617}
1609 1618
1610/* 1619/*
@@ -1737,10 +1746,11 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1737 1746
1738 sk = skb_steal_sock(skb); 1747 sk = skb_steal_sock(skb);
1739 if (sk) { 1748 if (sk) {
1749 struct dst_entry *dst = skb_dst(skb);
1740 int ret; 1750 int ret;
1741 1751
1742 if (unlikely(sk->sk_rx_dst == NULL)) 1752 if (unlikely(sk->sk_rx_dst != dst))
1743 udp_sk_rx_dst_set(sk, skb); 1753 udp_sk_rx_dst_set(sk, dst);
1744 1754
1745 ret = udp_queue_rcv_skb(sk, skb); 1755 ret = udp_queue_rcv_skb(sk, skb);
1746 sock_put(sk); 1756 sock_put(sk);