diff options
author | Kirill Tkhai <ktkhai@virtuozzo.com> | 2018-03-29 12:20:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-29 13:47:53 -0400 |
commit | f0b07bb151b098d291fd1fd71ef7a2df56fb124a (patch) | |
tree | 24f28ec5ec61e4b0950fef35da79853357a34afb /net/core/rtnetlink.c | |
parent | 906edee91e79af5a348f1ad1b3f9b4b948db3db7 (diff) |
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high.
When someone wants to iterate over alive net namespaces,
he/she has no a possibility to do that without exclusive lock.
But the exclusive rtnl_lock() in such places is overkill,
and it just increases the contention. Yes, there is already
for_each_net_rcu() in kernel, but it requires rcu_read_lock(),
and this can't be sleepable. Also, sometimes it may be need
really prevent net_namespace_list growth, so for_each_net_rcu()
is not fit there.
This patch introduces new rw_semaphore, which will be used
instead of rtnl_mutex to protect net_namespace_list. It is
sleepable and allows not-exclusive iterations over net
namespaces list. It allows to stop using rtnl_lock()
in several places (what is made in next patches) and makes
less the time, we keep rtnl_mutex. Here we just add new lock,
while the explanation of we can remove rtnl_lock() there are
in next patches.
Fine grained locks generally are better, then one big lock,
so let's do that with net_namespace_list, while the situation
allows that.
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/rtnetlink.c')
-rw-r--r-- | net/core/rtnetlink.c | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2d3949789cef..e86b28482ca7 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -418,9 +418,11 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops) | |||
418 | { | 418 | { |
419 | struct net *net; | 419 | struct net *net; |
420 | 420 | ||
421 | down_read(&net_rwsem); | ||
421 | for_each_net(net) { | 422 | for_each_net(net) { |
422 | __rtnl_kill_links(net, ops); | 423 | __rtnl_kill_links(net, ops); |
423 | } | 424 | } |
425 | up_read(&net_rwsem); | ||
424 | list_del(&ops->list); | 426 | list_del(&ops->list); |
425 | } | 427 | } |
426 | EXPORT_SYMBOL_GPL(__rtnl_link_unregister); | 428 | EXPORT_SYMBOL_GPL(__rtnl_link_unregister); |
@@ -438,6 +440,9 @@ static void rtnl_lock_unregistering_all(void) | |||
438 | for (;;) { | 440 | for (;;) { |
439 | unregistering = false; | 441 | unregistering = false; |
440 | rtnl_lock(); | 442 | rtnl_lock(); |
443 | /* We held write locked pernet_ops_rwsem, and parallel | ||
444 | * setup_net() and cleanup_net() are not possible. | ||
445 | */ | ||
441 | for_each_net(net) { | 446 | for_each_net(net) { |
442 | if (net->dev_unreg_count > 0) { | 447 | if (net->dev_unreg_count > 0) { |
443 | unregistering = true; | 448 | unregistering = true; |