diff options
| author | David S. Miller <davem@davemloft.net> | 2015-03-18 22:52:33 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2015-03-18 22:52:56 -0400 |
| commit | 99c4a26a159b28fa46a3e746a9b41b297e73d261 (patch) | |
| tree | 38f5c5d424b7596b77e1f40d7c5f1ce33cfa115e | |
| parent | 738e6d30d392fb75933a5eb4b481811598038786 (diff) | |
net: Fix high overhead of vlan sub-device teardown.
When a networking device is taken down that has a non-trivial number
of VLAN devices configured under it, we eat a full synchronize_net()
for every such VLAN device.
This is because of the call chain:
NETDEV_DOWN notifier
--> vlan_device_event()
--> dev_change_flags()
--> __dev_change_flags()
--> __dev_close()
--> __dev_close_many()
--> dev_deactivate_many()
--> synchronize_net()
This is kind of rediculous because we already have infrastructure for
batching doing operation X to a list of net devices so that we only
incur one sync.
So make use of that by exporting dev_close_many() and adjusting it's
interfaace so that the caller can fully manage the batch list. Use
this in vlan_device_event() and all the overhead goes away.
Reported-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | include/linux/netdevice.h | 1 | ||||
| -rw-r--r-- | net/8021q/vlan.c | 16 | ||||
| -rw-r--r-- | net/core/dev.c | 10 |
3 files changed, 20 insertions, 7 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ec8f9b5f6500..76951c5fbedf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -2156,6 +2156,7 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name); | |||
| 2156 | int dev_alloc_name(struct net_device *dev, const char *name); | 2156 | int dev_alloc_name(struct net_device *dev, const char *name); |
| 2157 | int dev_open(struct net_device *dev); | 2157 | int dev_open(struct net_device *dev); |
| 2158 | int dev_close(struct net_device *dev); | 2158 | int dev_close(struct net_device *dev); |
| 2159 | int dev_close_many(struct list_head *head, bool unlink); | ||
| 2159 | void dev_disable_lro(struct net_device *dev); | 2160 | void dev_disable_lro(struct net_device *dev); |
| 2160 | int dev_loopback_xmit(struct sk_buff *newskb); | 2161 | int dev_loopback_xmit(struct sk_buff *newskb); |
| 2161 | int dev_queue_xmit(struct sk_buff *skb); | 2162 | int dev_queue_xmit(struct sk_buff *skb); |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 64c6bed4a3d3..98a30a5b8664 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
| @@ -413,7 +413,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
| 413 | vlan_transfer_features(dev, vlandev); | 413 | vlan_transfer_features(dev, vlandev); |
| 414 | break; | 414 | break; |
| 415 | 415 | ||
| 416 | case NETDEV_DOWN: | 416 | case NETDEV_DOWN: { |
| 417 | struct net_device *tmp; | ||
| 418 | LIST_HEAD(close_list); | ||
| 419 | |||
| 417 | if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) | 420 | if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) |
| 418 | vlan_vid_del(dev, htons(ETH_P_8021Q), 0); | 421 | vlan_vid_del(dev, htons(ETH_P_8021Q), 0); |
| 419 | 422 | ||
| @@ -425,11 +428,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
| 425 | 428 | ||
| 426 | vlan = vlan_dev_priv(vlandev); | 429 | vlan = vlan_dev_priv(vlandev); |
| 427 | if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) | 430 | if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) |
| 428 | dev_change_flags(vlandev, flgs & ~IFF_UP); | 431 | list_add(&vlandev->close_list, &close_list); |
| 432 | } | ||
| 433 | |||
| 434 | dev_close_many(&close_list, false); | ||
| 435 | |||
| 436 | list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) { | ||
| 429 | netif_stacked_transfer_operstate(dev, vlandev); | 437 | netif_stacked_transfer_operstate(dev, vlandev); |
| 438 | list_del_init(&vlandev->close_list); | ||
| 430 | } | 439 | } |
| 440 | list_del(&close_list); | ||
| 431 | break; | 441 | break; |
| 432 | 442 | } | |
| 433 | case NETDEV_UP: | 443 | case NETDEV_UP: |
| 434 | /* Put all VLANs for this dev in the up state too. */ | 444 | /* Put all VLANs for this dev in the up state too. */ |
| 435 | vlan_group_for_each_dev(grp, i, vlandev) { | 445 | vlan_group_for_each_dev(grp, i, vlandev) { |
diff --git a/net/core/dev.c b/net/core/dev.c index a1f24151db5b..5d43e010ef87 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1385,7 +1385,7 @@ static int __dev_close(struct net_device *dev) | |||
| 1385 | return retval; | 1385 | return retval; |
| 1386 | } | 1386 | } |
| 1387 | 1387 | ||
| 1388 | static int dev_close_many(struct list_head *head) | 1388 | int dev_close_many(struct list_head *head, bool unlink) |
| 1389 | { | 1389 | { |
| 1390 | struct net_device *dev, *tmp; | 1390 | struct net_device *dev, *tmp; |
| 1391 | 1391 | ||
| @@ -1399,11 +1399,13 @@ static int dev_close_many(struct list_head *head) | |||
| 1399 | list_for_each_entry_safe(dev, tmp, head, close_list) { | 1399 | list_for_each_entry_safe(dev, tmp, head, close_list) { |
| 1400 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); | 1400 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); |
| 1401 | call_netdevice_notifiers(NETDEV_DOWN, dev); | 1401 | call_netdevice_notifiers(NETDEV_DOWN, dev); |
| 1402 | list_del_init(&dev->close_list); | 1402 | if (unlink) |
| 1403 | list_del_init(&dev->close_list); | ||
| 1403 | } | 1404 | } |
| 1404 | 1405 | ||
| 1405 | return 0; | 1406 | return 0; |
| 1406 | } | 1407 | } |
| 1408 | EXPORT_SYMBOL(dev_close_many); | ||
| 1407 | 1409 | ||
| 1408 | /** | 1410 | /** |
| 1409 | * dev_close - shutdown an interface. | 1411 | * dev_close - shutdown an interface. |
| @@ -1420,7 +1422,7 @@ int dev_close(struct net_device *dev) | |||
| 1420 | LIST_HEAD(single); | 1422 | LIST_HEAD(single); |
| 1421 | 1423 | ||
| 1422 | list_add(&dev->close_list, &single); | 1424 | list_add(&dev->close_list, &single); |
| 1423 | dev_close_many(&single); | 1425 | dev_close_many(&single, true); |
| 1424 | list_del(&single); | 1426 | list_del(&single); |
| 1425 | } | 1427 | } |
| 1426 | return 0; | 1428 | return 0; |
| @@ -5986,7 +5988,7 @@ static void rollback_registered_many(struct list_head *head) | |||
| 5986 | /* If device is running, close it first. */ | 5988 | /* If device is running, close it first. */ |
| 5987 | list_for_each_entry(dev, head, unreg_list) | 5989 | list_for_each_entry(dev, head, unreg_list) |
| 5988 | list_add_tail(&dev->close_list, &close_head); | 5990 | list_add_tail(&dev->close_list, &close_head); |
| 5989 | dev_close_many(&close_head); | 5991 | dev_close_many(&close_head, true); |
| 5990 | 5992 | ||
| 5991 | list_for_each_entry(dev, head, unreg_list) { | 5993 | list_for_each_entry(dev, head, unreg_list) { |
| 5992 | /* And unlink it from device chain. */ | 5994 | /* And unlink it from device chain. */ |
