aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@virtuozzo.com>2018-03-29 12:20:32 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-29 13:47:53 -0400
commitf0b07bb151b098d291fd1fd71ef7a2df56fb124a (patch)
tree24f28ec5ec61e4b0950fef35da79853357a34afb /net/core/dev.c
parent906edee91e79af5a348f1ad1b3f9b4b948db3db7 (diff)
net: Introduce net_rwsem to protect net_namespace_list
rtnl_lock() is used everywhere, and contention is very high. When someone wants to iterate over alive net namespaces, he/she has no a possibility to do that without exclusive lock. But the exclusive rtnl_lock() in such places is overkill, and it just increases the contention. Yes, there is already for_each_net_rcu() in kernel, but it requires rcu_read_lock(), and this can't be sleepable. Also, sometimes it may be need really prevent net_namespace_list growth, so for_each_net_rcu() is not fit there. This patch introduces new rw_semaphore, which will be used instead of rtnl_mutex to protect net_namespace_list. It is sleepable and allows not-exclusive iterations over net namespaces list. It allows to stop using rtnl_lock() in several places (what is made in next patches) and makes less the time, we keep rtnl_mutex. Here we just add new lock, while the explanation of we can remove rtnl_lock() there are in next patches. Fine grained locks generally are better, then one big lock, so let's do that with net_namespace_list, while the situation allows that. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index e13807b5c84d..eca5458b2753 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1629,6 +1629,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
1629 goto unlock; 1629 goto unlock;
1630 if (dev_boot_phase) 1630 if (dev_boot_phase)
1631 goto unlock; 1631 goto unlock;
1632 down_read(&net_rwsem);
1632 for_each_net(net) { 1633 for_each_net(net) {
1633 for_each_netdev(net, dev) { 1634 for_each_netdev(net, dev) {
1634 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); 1635 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
@@ -1642,6 +1643,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
1642 call_netdevice_notifier(nb, NETDEV_UP, dev); 1643 call_netdevice_notifier(nb, NETDEV_UP, dev);
1643 } 1644 }
1644 } 1645 }
1646 up_read(&net_rwsem);
1645 1647
1646unlock: 1648unlock:
1647 rtnl_unlock(); 1649 rtnl_unlock();
@@ -1664,6 +1666,7 @@ rollback:
1664 } 1666 }
1665 1667
1666outroll: 1668outroll:
1669 up_read(&net_rwsem);
1667 raw_notifier_chain_unregister(&netdev_chain, nb); 1670 raw_notifier_chain_unregister(&netdev_chain, nb);
1668 goto unlock; 1671 goto unlock;
1669} 1672}
@@ -1694,6 +1697,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
1694 if (err) 1697 if (err)
1695 goto unlock; 1698 goto unlock;
1696 1699
1700 down_read(&net_rwsem);
1697 for_each_net(net) { 1701 for_each_net(net) {
1698 for_each_netdev(net, dev) { 1702 for_each_netdev(net, dev) {
1699 if (dev->flags & IFF_UP) { 1703 if (dev->flags & IFF_UP) {
@@ -1704,6 +1708,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
1704 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); 1708 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1705 } 1709 }
1706 } 1710 }
1711 up_read(&net_rwsem);
1707unlock: 1712unlock:
1708 rtnl_unlock(); 1713 rtnl_unlock();
1709 return err; 1714 return err;