diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2009-12-02 21:29:03 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-12-03 15:22:01 -0500 |
commit | 72ad937abd0a43b7cf2c557ba1f2ec75e608c516 (patch) | |
tree | ac81af750df170a46ed001fb5647b83dc20f8422 /net/core | |
parent | 8153a10c08f1312af563bb92532002e46d3f504a (diff) |
net: Add support for batching network namespace cleanups
- Add exit_list to struct net to support building lists of network
namespaces to cleanup.
- Add exit_batch to pernet_operations to allow running operations only
once during a network namespace exit. Instead of once per network
namespace.
- Factor opt ops_exit_list and ops_exit_free so the logic with cleanup
up a network namespace does not need to be duplicated.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/net_namespace.c | 122 |
1 files changed, 61 insertions, 61 deletions
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 9679ad292da9..6c7f6e04dbbf 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -70,6 +70,36 @@ static void ops_free(const struct pernet_operations *ops, struct net *net) | |||
70 | } | 70 | } |
71 | } | 71 | } |
72 | 72 | ||
73 | static void ops_exit_list(const struct pernet_operations *ops, | ||
74 | struct list_head *net_exit_list) | ||
75 | { | ||
76 | struct net *net; | ||
77 | if (ops->exit) { | ||
78 | list_for_each_entry(net, net_exit_list, exit_list) | ||
79 | ops->exit(net); | ||
80 | } | ||
81 | if (&ops->list == first_device) { | ||
82 | LIST_HEAD(dev_kill_list); | ||
83 | rtnl_lock(); | ||
84 | list_for_each_entry(net, net_exit_list, exit_list) | ||
85 | unregister_netdevices(net, &dev_kill_list); | ||
86 | unregister_netdevice_many(&dev_kill_list); | ||
87 | rtnl_unlock(); | ||
88 | } | ||
89 | if (ops->exit_batch) | ||
90 | ops->exit_batch(net_exit_list); | ||
91 | } | ||
92 | |||
93 | static void ops_free_list(const struct pernet_operations *ops, | ||
94 | struct list_head *net_exit_list) | ||
95 | { | ||
96 | struct net *net; | ||
97 | if (ops->size && ops->id) { | ||
98 | list_for_each_entry(net, net_exit_list, exit_list) | ||
99 | ops_free(ops, net); | ||
100 | } | ||
101 | } | ||
102 | |||
73 | /* | 103 | /* |
74 | * setup_net runs the initializers for the network namespace object. | 104 | * setup_net runs the initializers for the network namespace object. |
75 | */ | 105 | */ |
@@ -78,6 +108,7 @@ static __net_init int setup_net(struct net *net) | |||
78 | /* Must be called with net_mutex held */ | 108 | /* Must be called with net_mutex held */ |
79 | const struct pernet_operations *ops, *saved_ops; | 109 | const struct pernet_operations *ops, *saved_ops; |
80 | int error = 0; | 110 | int error = 0; |
111 | LIST_HEAD(net_exit_list); | ||
81 | 112 | ||
82 | atomic_set(&net->count, 1); | 113 | atomic_set(&net->count, 1); |
83 | 114 | ||
@@ -97,21 +128,14 @@ out_undo: | |||
97 | /* Walk through the list backwards calling the exit functions | 128 | /* Walk through the list backwards calling the exit functions |
98 | * for the pernet modules whose init functions did not fail. | 129 | * for the pernet modules whose init functions did not fail. |
99 | */ | 130 | */ |
131 | list_add(&net->exit_list, &net_exit_list); | ||
100 | saved_ops = ops; | 132 | saved_ops = ops; |
101 | list_for_each_entry_continue_reverse(ops, &pernet_list, list) { | 133 | list_for_each_entry_continue_reverse(ops, &pernet_list, list) |
102 | if (ops->exit) | 134 | ops_exit_list(ops, &net_exit_list); |
103 | ops->exit(net); | 135 | |
104 | if (&ops->list == first_device) { | ||
105 | LIST_HEAD(dev_kill_list); | ||
106 | rtnl_lock(); | ||
107 | unregister_netdevices(net, &dev_kill_list); | ||
108 | unregister_netdevice_many(&dev_kill_list); | ||
109 | rtnl_unlock(); | ||
110 | } | ||
111 | } | ||
112 | ops = saved_ops; | 136 | ops = saved_ops; |
113 | list_for_each_entry_continue_reverse(ops, &pernet_list, list) | 137 | list_for_each_entry_continue_reverse(ops, &pernet_list, list) |
114 | ops_free(ops, net); | 138 | ops_free_list(ops, &net_exit_list); |
115 | 139 | ||
116 | rcu_barrier(); | 140 | rcu_barrier(); |
117 | goto out; | 141 | goto out; |
@@ -207,6 +231,7 @@ static void cleanup_net(struct work_struct *work) | |||
207 | const struct pernet_operations *ops; | 231 | const struct pernet_operations *ops; |
208 | struct net *net, *tmp; | 232 | struct net *net, *tmp; |
209 | LIST_HEAD(net_kill_list); | 233 | LIST_HEAD(net_kill_list); |
234 | LIST_HEAD(net_exit_list); | ||
210 | 235 | ||
211 | /* Atomically snapshot the list of namespaces to cleanup */ | 236 | /* Atomically snapshot the list of namespaces to cleanup */ |
212 | spin_lock_irq(&cleanup_list_lock); | 237 | spin_lock_irq(&cleanup_list_lock); |
@@ -217,8 +242,10 @@ static void cleanup_net(struct work_struct *work) | |||
217 | 242 | ||
218 | /* Don't let anyone else find us. */ | 243 | /* Don't let anyone else find us. */ |
219 | rtnl_lock(); | 244 | rtnl_lock(); |
220 | list_for_each_entry(net, &net_kill_list, cleanup_list) | 245 | list_for_each_entry(net, &net_kill_list, cleanup_list) { |
221 | list_del_rcu(&net->list); | 246 | list_del_rcu(&net->list); |
247 | list_add_tail(&net->exit_list, &net_exit_list); | ||
248 | } | ||
222 | rtnl_unlock(); | 249 | rtnl_unlock(); |
223 | 250 | ||
224 | /* | 251 | /* |
@@ -229,27 +256,12 @@ static void cleanup_net(struct work_struct *work) | |||
229 | synchronize_rcu(); | 256 | synchronize_rcu(); |
230 | 257 | ||
231 | /* Run all of the network namespace exit methods */ | 258 | /* Run all of the network namespace exit methods */ |
232 | list_for_each_entry_reverse(ops, &pernet_list, list) { | 259 | list_for_each_entry_reverse(ops, &pernet_list, list) |
233 | if (ops->exit) { | 260 | ops_exit_list(ops, &net_exit_list); |
234 | list_for_each_entry(net, &net_kill_list, cleanup_list) | 261 | |
235 | ops->exit(net); | ||
236 | } | ||
237 | if (&ops->list == first_device) { | ||
238 | LIST_HEAD(dev_kill_list); | ||
239 | rtnl_lock(); | ||
240 | list_for_each_entry(net, &net_kill_list, cleanup_list) | ||
241 | unregister_netdevices(net, &dev_kill_list); | ||
242 | unregister_netdevice_many(&dev_kill_list); | ||
243 | rtnl_unlock(); | ||
244 | } | ||
245 | } | ||
246 | /* Free the net generic variables */ | 262 | /* Free the net generic variables */ |
247 | list_for_each_entry_reverse(ops, &pernet_list, list) { | 263 | list_for_each_entry_reverse(ops, &pernet_list, list) |
248 | if (ops->size && ops->id) { | 264 | ops_free_list(ops, &net_exit_list); |
249 | list_for_each_entry(net, &net_kill_list, cleanup_list) | ||
250 | ops_free(ops, net); | ||
251 | } | ||
252 | } | ||
253 | 265 | ||
254 | mutex_unlock(&net_mutex); | 266 | mutex_unlock(&net_mutex); |
255 | 267 | ||
@@ -259,8 +271,8 @@ static void cleanup_net(struct work_struct *work) | |||
259 | rcu_barrier(); | 271 | rcu_barrier(); |
260 | 272 | ||
261 | /* Finally it is safe to free my network namespace structure */ | 273 | /* Finally it is safe to free my network namespace structure */ |
262 | list_for_each_entry_safe(net, tmp, &net_kill_list, cleanup_list) { | 274 | list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { |
263 | list_del_init(&net->cleanup_list); | 275 | list_del_init(&net->exit_list); |
264 | net_free(net); | 276 | net_free(net); |
265 | } | 277 | } |
266 | } | 278 | } |
@@ -348,8 +360,9 @@ pure_initcall(net_ns_init); | |||
348 | static int __register_pernet_operations(struct list_head *list, | 360 | static int __register_pernet_operations(struct list_head *list, |
349 | struct pernet_operations *ops) | 361 | struct pernet_operations *ops) |
350 | { | 362 | { |
351 | struct net *net, *undo_net; | 363 | struct net *net; |
352 | int error; | 364 | int error; |
365 | LIST_HEAD(net_exit_list); | ||
353 | 366 | ||
354 | list_add_tail(&ops->list, list); | 367 | list_add_tail(&ops->list, list); |
355 | if (ops->init || (ops->id && ops->size)) { | 368 | if (ops->init || (ops->id && ops->size)) { |
@@ -357,6 +370,7 @@ static int __register_pernet_operations(struct list_head *list, | |||
357 | error = ops_init(ops, net); | 370 | error = ops_init(ops, net); |
358 | if (error) | 371 | if (error) |
359 | goto out_undo; | 372 | goto out_undo; |
373 | list_add_tail(&net->exit_list, &net_exit_list); | ||
360 | } | 374 | } |
361 | } | 375 | } |
362 | return 0; | 376 | return 0; |
@@ -364,36 +378,21 @@ static int __register_pernet_operations(struct list_head *list, | |||
364 | out_undo: | 378 | out_undo: |
365 | /* If I have an error cleanup all namespaces I initialized */ | 379 | /* If I have an error cleanup all namespaces I initialized */ |
366 | list_del(&ops->list); | 380 | list_del(&ops->list); |
367 | if (ops->exit) { | 381 | ops_exit_list(ops, &net_exit_list); |
368 | for_each_net(undo_net) { | 382 | ops_free_list(ops, &net_exit_list); |
369 | if (net_eq(undo_net, net)) | ||
370 | goto undone; | ||
371 | ops->exit(undo_net); | ||
372 | } | ||
373 | } | ||
374 | undone: | ||
375 | if (ops->size && ops->id) { | ||
376 | for_each_net(undo_net) { | ||
377 | if (net_eq(undo_net, net)) | ||
378 | goto freed; | ||
379 | ops_free(ops, undo_net); | ||
380 | } | ||
381 | } | ||
382 | freed: | ||
383 | return error; | 383 | return error; |
384 | } | 384 | } |
385 | 385 | ||
386 | static void __unregister_pernet_operations(struct pernet_operations *ops) | 386 | static void __unregister_pernet_operations(struct pernet_operations *ops) |
387 | { | 387 | { |
388 | struct net *net; | 388 | struct net *net; |
389 | LIST_HEAD(net_exit_list); | ||
389 | 390 | ||
390 | list_del(&ops->list); | 391 | list_del(&ops->list); |
391 | if (ops->exit) | 392 | for_each_net(net) |
392 | for_each_net(net) | 393 | list_add_tail(&net->exit_list, &net_exit_list); |
393 | ops->exit(net); | 394 | ops_exit_list(ops, &net_exit_list); |
394 | if (ops->id && ops->size) | 395 | ops_free_list(ops, &net_exit_list); |
395 | for_each_net(net) | ||
396 | ops_free(ops, net); | ||
397 | } | 396 | } |
398 | 397 | ||
399 | #else | 398 | #else |
@@ -411,9 +410,10 @@ static int __register_pernet_operations(struct list_head *list, | |||
411 | 410 | ||
412 | static void __unregister_pernet_operations(struct pernet_operations *ops) | 411 | static void __unregister_pernet_operations(struct pernet_operations *ops) |
413 | { | 412 | { |
414 | if (ops->exit) | 413 | LIST_HEAD(net_exit_list); |
415 | ops->exit(&init_net); | 414 | list_add(&init_net.exit_list, &net_exit_list); |
416 | ops_free(ops, &init_net); | 415 | ops_exit_list(ops, &net_exit_list); |
416 | ops_free_list(ops, &net_exit_list); | ||
417 | } | 417 | } |
418 | 418 | ||
419 | #endif /* CONFIG_NET_NS */ | 419 | #endif /* CONFIG_NET_NS */ |