diff options
author | David S. Miller <davem@davemloft.net> | 2015-03-18 22:52:33 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-18 22:52:56 -0400 |
commit | 99c4a26a159b28fa46a3e746a9b41b297e73d261 (patch) | |
tree | 38f5c5d424b7596b77e1f40d7c5f1ce33cfa115e /net/core | |
parent | 738e6d30d392fb75933a5eb4b481811598038786 (diff) |
net: Fix high overhead of vlan sub-device teardown.
When a networking device is taken down that has a non-trivial number
of VLAN devices configured under it, we eat a full synchronize_net()
for every such VLAN device.
This is because of the call chain:
NETDEV_DOWN notifier
--> vlan_device_event()
--> dev_change_flags()
--> __dev_change_flags()
--> __dev_close()
--> __dev_close_many()
--> dev_deactivate_many()
--> synchronize_net()
This is kind of rediculous because we already have infrastructure for
batching doing operation X to a list of net devices so that we only
incur one sync.
So make use of that by exporting dev_close_many() and adjusting it's
interfaace so that the caller can fully manage the batch list. Use
this in vlan_device_event() and all the overhead goes away.
Reported-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index a1f24151db5b..5d43e010ef87 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1385,7 +1385,7 @@ static int __dev_close(struct net_device *dev) | |||
1385 | return retval; | 1385 | return retval; |
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | static int dev_close_many(struct list_head *head) | 1388 | int dev_close_many(struct list_head *head, bool unlink) |
1389 | { | 1389 | { |
1390 | struct net_device *dev, *tmp; | 1390 | struct net_device *dev, *tmp; |
1391 | 1391 | ||
@@ -1399,11 +1399,13 @@ static int dev_close_many(struct list_head *head) | |||
1399 | list_for_each_entry_safe(dev, tmp, head, close_list) { | 1399 | list_for_each_entry_safe(dev, tmp, head, close_list) { |
1400 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); | 1400 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); |
1401 | call_netdevice_notifiers(NETDEV_DOWN, dev); | 1401 | call_netdevice_notifiers(NETDEV_DOWN, dev); |
1402 | list_del_init(&dev->close_list); | 1402 | if (unlink) |
1403 | list_del_init(&dev->close_list); | ||
1403 | } | 1404 | } |
1404 | 1405 | ||
1405 | return 0; | 1406 | return 0; |
1406 | } | 1407 | } |
1408 | EXPORT_SYMBOL(dev_close_many); | ||
1407 | 1409 | ||
1408 | /** | 1410 | /** |
1409 | * dev_close - shutdown an interface. | 1411 | * dev_close - shutdown an interface. |
@@ -1420,7 +1422,7 @@ int dev_close(struct net_device *dev) | |||
1420 | LIST_HEAD(single); | 1422 | LIST_HEAD(single); |
1421 | 1423 | ||
1422 | list_add(&dev->close_list, &single); | 1424 | list_add(&dev->close_list, &single); |
1423 | dev_close_many(&single); | 1425 | dev_close_many(&single, true); |
1424 | list_del(&single); | 1426 | list_del(&single); |
1425 | } | 1427 | } |
1426 | return 0; | 1428 | return 0; |
@@ -5986,7 +5988,7 @@ static void rollback_registered_many(struct list_head *head) | |||
5986 | /* If device is running, close it first. */ | 5988 | /* If device is running, close it first. */ |
5987 | list_for_each_entry(dev, head, unreg_list) | 5989 | list_for_each_entry(dev, head, unreg_list) |
5988 | list_add_tail(&dev->close_list, &close_head); | 5990 | list_add_tail(&dev->close_list, &close_head); |
5989 | dev_close_many(&close_head); | 5991 | dev_close_many(&close_head, true); |
5990 | 5992 | ||
5991 | list_for_each_entry(dev, head, unreg_list) { | 5993 | list_for_each_entry(dev, head, unreg_list) { |
5992 | /* And unlink it from device chain. */ | 5994 | /* And unlink it from device chain. */ |