diff options
author | Marcelo Leitner <mleitner@redhat.com> | 2014-12-11 07:02:22 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-12-11 14:57:08 -0500 |
commit | 00c83b01d58068dfeb2e1351cca6fccf2a83fa8f (patch) | |
tree | 2b00c1d5a6ba84dc4fb9f8c6f9aee3de7aaa6799 /drivers/net | |
parent | 51f8301485d701caab10e7e9b7f9d7866f2fe3cf (diff) |
Fix race condition between vxlan_sock_add and vxlan_sock_release
Currently, when trying to reuse a socket, vxlan_sock_add will grab
vn->sock_lock, locate a reusable socket, inc refcount and release
vn->sock_lock.
But vxlan_sock_release() will first decrement refcount, and then grab
that lock. refcnt operations are atomic but as currently we have
deferred works which hold vs->refcnt each, this might happen, leading to
a use after free (specially after vxlan_igmp_leave):
CPU 1 CPU 2
deferred work vxlan_sock_add
... ...
spin_lock(&vn->sock_lock)
vs = vxlan_find_sock();
vxlan_sock_release
dec vs->refcnt, reaches 0
spin_lock(&vn->sock_lock)
vxlan_sock_hold(vs), refcnt=1
spin_unlock(&vn->sock_lock)
hlist_del_rcu(&vs->hlist);
vxlan_notify_del_rx_port(vs)
spin_unlock(&vn->sock_lock)
So when we look for a reusable socket, we check if it wasn't freed
already before reusing it.
Signed-off-by: Marcelo Ricardo Leitner <mleitner@redhat.com>
Fixes: 7c47cedf43a8b3 ("vxlan: move IGMP join/leave to work queue")
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/vxlan.c | 10 |
1 files changed, 3 insertions, 7 deletions
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 31ecb03368c6..49d9f2291998 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1985,9 +1985,8 @@ static int vxlan_init(struct net_device *dev) | |||
1985 | spin_lock(&vn->sock_lock); | 1985 | spin_lock(&vn->sock_lock); |
1986 | vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, | 1986 | vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, |
1987 | vxlan->dst_port); | 1987 | vxlan->dst_port); |
1988 | if (vs) { | 1988 | if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) { |
1989 | /* If we have a socket with same port already, reuse it */ | 1989 | /* If we have a socket with same port already, reuse it */ |
1990 | atomic_inc(&vs->refcnt); | ||
1991 | vxlan_vs_add_dev(vs, vxlan); | 1990 | vxlan_vs_add_dev(vs, vxlan); |
1992 | } else { | 1991 | } else { |
1993 | /* otherwise make new socket outside of RTNL */ | 1992 | /* otherwise make new socket outside of RTNL */ |
@@ -2389,12 +2388,9 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, | |||
2389 | 2388 | ||
2390 | spin_lock(&vn->sock_lock); | 2389 | spin_lock(&vn->sock_lock); |
2391 | vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); | 2390 | vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); |
2392 | if (vs) { | 2391 | if (vs && ((vs->rcv != rcv) || |
2393 | if (vs->rcv == rcv) | 2392 | !atomic_add_unless(&vs->refcnt, 1, 0))) |
2394 | atomic_inc(&vs->refcnt); | ||
2395 | else | ||
2396 | vs = ERR_PTR(-EBUSY); | 2393 | vs = ERR_PTR(-EBUSY); |
2397 | } | ||
2398 | spin_unlock(&vn->sock_lock); | 2394 | spin_unlock(&vn->sock_lock); |
2399 | 2395 | ||
2400 | if (!vs) | 2396 | if (!vs) |