aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2007-09-27 01:40:08 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:52:55 -0400
commitf4618d39a34dab316090263b42cd8799f31ce277 (patch)
tree75db202b492da1797145fde455b0c4c89cf41767 /net/core
parent32db927686f6d475fc05b6229f82ed576c0c8096 (diff)
[NETNS]: Simplify the network namespace list locking rules.
Denis V. Lunev <den@sw.ru> noticed that the locking rules for the network namespace list are over complicated and broken. In particular the current register_netdev_notifier currently does not take any lock making the for_each_net iteration racy with network namespace creation and destruction. Oops. The fact that we need to use for_each_net in rtnl_unlock() when the rtnetlink support becomes per network namespace makes designing the proper locking tricky. In addition we need to be able to call rtnl_lock() and rtnl_unlock() when we have the net_mutex held. After thinking about it and looking at the alternatives carefully it looks like the simplest and most maintainable solution is to remove net_list_mutex altogether, and to use the rtnl_mutex instead. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/net_namespace.c23
1 files changed, 6 insertions, 17 deletions
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index e478e353ea6b..0e0ca6d06fa5 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -15,7 +15,6 @@ static LIST_HEAD(pernet_list);
15static struct list_head *first_device = &pernet_list; 15static struct list_head *first_device = &pernet_list;
16static DEFINE_MUTEX(net_mutex); 16static DEFINE_MUTEX(net_mutex);
17 17
18static DEFINE_MUTEX(net_list_mutex);
19LIST_HEAD(net_namespace_list); 18LIST_HEAD(net_namespace_list);
20 19
21static struct kmem_cache *net_cachep; 20static struct kmem_cache *net_cachep;
@@ -23,16 +22,6 @@ static struct kmem_cache *net_cachep;
23struct net init_net; 22struct net init_net;
24EXPORT_SYMBOL_GPL(init_net); 23EXPORT_SYMBOL_GPL(init_net);
25 24
26void net_lock(void)
27{
28 mutex_lock(&net_list_mutex);
29}
30
31void net_unlock(void)
32{
33 mutex_unlock(&net_list_mutex);
34}
35
36static struct net *net_alloc(void) 25static struct net *net_alloc(void)
37{ 26{
38 return kmem_cache_alloc(net_cachep, GFP_KERNEL); 27 return kmem_cache_alloc(net_cachep, GFP_KERNEL);
@@ -62,9 +51,9 @@ static void cleanup_net(struct work_struct *work)
62 mutex_lock(&net_mutex); 51 mutex_lock(&net_mutex);
63 52
64 /* Don't let anyone else find us. */ 53 /* Don't let anyone else find us. */
65 net_lock(); 54 rtnl_lock();
66 list_del(&net->list); 55 list_del(&net->list);
67 net_unlock(); 56 rtnl_unlock();
68 57
69 /* Run all of the network namespace exit methods */ 58 /* Run all of the network namespace exit methods */
70 list_for_each_entry_reverse(ops, &pernet_list, list) { 59 list_for_each_entry_reverse(ops, &pernet_list, list) {
@@ -151,9 +140,9 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
151 if (err) 140 if (err)
152 goto out_unlock; 141 goto out_unlock;
153 142
154 net_lock(); 143 rtnl_lock();
155 list_add_tail(&new_net->list, &net_namespace_list); 144 list_add_tail(&new_net->list, &net_namespace_list);
156 net_unlock(); 145 rtnl_unlock();
157 146
158 147
159out_unlock: 148out_unlock:
@@ -178,9 +167,9 @@ static int __init net_ns_init(void)
178 mutex_lock(&net_mutex); 167 mutex_lock(&net_mutex);
179 err = setup_net(&init_net); 168 err = setup_net(&init_net);
180 169
181 net_lock(); 170 rtnl_lock();
182 list_add_tail(&init_net.list, &net_namespace_list); 171 list_add_tail(&init_net.list, &net_namespace_list);
183 net_unlock(); 172 rtnl_unlock();
184 173
185 mutex_unlock(&net_mutex); 174 mutex_unlock(&net_mutex);
186 if (err) 175 if (err)