diff options
author | Kirill Tkhai <ktkhai@virtuozzo.com> | 2018-01-16 04:31:41 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-01-17 15:42:35 -0500 |
commit | 0c06bea919f3289368a023d1a62a1bc319617fa3 (patch) | |
tree | d15b03fefc6edd450cb18c995d4948d8e6675e07 | |
parent | a29ae44c7aa3efa224ecc7325e4b6da2363c6b5f (diff) |
net: Fix possible race in peernet2id_alloc()
peernet2id_alloc() is racy without rtnl_lock() as refcount_read(&peer->count)
under net->nsid_lock does not guarantee, peer is alive:
rcu_read_lock()
peernet2id_alloc() ..
spin_lock_bh(&net->nsid_lock) ..
refcount_read(&peer->count) (!= 0) ..
.. put_net()
.. cleanup_net()
.. for_each_net(tmp)
.. spin_lock_bh(&tmp->nsid_lock)
.. __peernet2id(tmp, net) == -1
.. ..
.. ..
__peernet2id_alloc(alloc == true) ..
.. ..
rcu_read_unlock() ..
.. synchronize_rcu()
.. kmem_cache_free(net)
After the above situation, net::netns_id contains id pointing to freed memory,
and any other dereferencing by the id will operate with this freed memory.
Currently, peernet2id_alloc() is used under rtnl_lock() everywhere except
ovs_vport_cmd_fill_info(), and this race can't occur. But peernet2id_alloc()
is generic interface, and better we fix it before someone really starts
use it in wrong context.
v2: Don't place refcount_read(&net->count) under net->nsid_lock
as suggested by Eric W. Biederman <ebiederm@xmission.com>
v3: Rebase on top of net-next
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/core/net_namespace.c | 13 |
1 files changed, 11 insertions, 2 deletions
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 2213d45fcafd..3c77d84ad60d 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -221,17 +221,26 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id); | |||
221 | */ | 221 | */ |
222 | int peernet2id_alloc(struct net *net, struct net *peer) | 222 | int peernet2id_alloc(struct net *net, struct net *peer) |
223 | { | 223 | { |
224 | bool alloc; | 224 | bool alloc = false, alive = false; |
225 | int id; | 225 | int id; |
226 | 226 | ||
227 | if (refcount_read(&net->count) == 0) | 227 | if (refcount_read(&net->count) == 0) |
228 | return NETNSA_NSID_NOT_ASSIGNED; | 228 | return NETNSA_NSID_NOT_ASSIGNED; |
229 | spin_lock_bh(&net->nsid_lock); | 229 | spin_lock_bh(&net->nsid_lock); |
230 | alloc = refcount_read(&peer->count) == 0 ? false : true; | 230 | /* |
231 | * When peer is obtained from RCU lists, we may race with | ||
232 | * its cleanup. Check whether it's alive, and this guarantees | ||
233 | * we never hash a peer back to net->netns_ids, after it has | ||
234 | * just been idr_remove()'d from there in cleanup_net(). | ||
235 | */ | ||
236 | if (maybe_get_net(peer)) | ||
237 | alive = alloc = true; | ||
231 | id = __peernet2id_alloc(net, peer, &alloc); | 238 | id = __peernet2id_alloc(net, peer, &alloc); |
232 | spin_unlock_bh(&net->nsid_lock); | 239 | spin_unlock_bh(&net->nsid_lock); |
233 | if (alloc && id >= 0) | 240 | if (alloc && id >= 0) |
234 | rtnl_net_notifyid(net, RTM_NEWNSID, id); | 241 | rtnl_net_notifyid(net, RTM_NEWNSID, id); |
242 | if (alive) | ||
243 | put_net(peer); | ||
235 | return id; | 244 | return id; |
236 | } | 245 | } |
237 | EXPORT_SYMBOL_GPL(peernet2id_alloc); | 246 | EXPORT_SYMBOL_GPL(peernet2id_alloc); |