aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-27 18:38:17 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-29 17:58:37 -0400
commit3f4df2066b4e02cb609fa33b2eae8403b5821f4f (patch)
treee960a9af040e91c50c9bb56c48a76a4c873006e6 /net/core
parent944e294857033dbe519a136cad05dc4e2570874e (diff)
netpoll: Move rx enable/disable into __dev_close_many
Today netpoll_rx_enable and netpoll_rx_disable are called from dev_close and and __dev_close, and not from dev_close_many. Move the calls into __dev_close_many so that we have a single call site to maintain, and so that dev_close_many gains this protection as well. Which importantly makes batched network device deletes safe. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 98ba581b89f0..8d55fe780e3f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1313,6 +1313,9 @@ static int __dev_close_many(struct list_head *head)
1313 might_sleep(); 1313 might_sleep();
1314 1314
1315 list_for_each_entry(dev, head, close_list) { 1315 list_for_each_entry(dev, head, close_list) {
1316 /* Temporarily disable netpoll until the interface is down */
1317 netpoll_rx_disable(dev);
1318
1316 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1319 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1317 1320
1318 clear_bit(__LINK_STATE_START, &dev->state); 1321 clear_bit(__LINK_STATE_START, &dev->state);
@@ -1343,6 +1346,7 @@ static int __dev_close_many(struct list_head *head)
1343 1346
1344 dev->flags &= ~IFF_UP; 1347 dev->flags &= ~IFF_UP;
1345 net_dmaengine_put(); 1348 net_dmaengine_put();
1349 netpoll_rx_enable(dev);
1346 } 1350 }
1347 1351
1348 return 0; 1352 return 0;
@@ -1353,14 +1357,10 @@ static int __dev_close(struct net_device *dev)
1353 int retval; 1357 int retval;
1354 LIST_HEAD(single); 1358 LIST_HEAD(single);
1355 1359
1356 /* Temporarily disable netpoll until the interface is down */
1357 netpoll_rx_disable(dev);
1358
1359 list_add(&dev->close_list, &single); 1360 list_add(&dev->close_list, &single);
1360 retval = __dev_close_many(&single); 1361 retval = __dev_close_many(&single);
1361 list_del(&single); 1362 list_del(&single);
1362 1363
1363 netpoll_rx_enable(dev);
1364 return retval; 1364 return retval;
1365} 1365}
1366 1366
@@ -1398,14 +1398,9 @@ int dev_close(struct net_device *dev)
1398 if (dev->flags & IFF_UP) { 1398 if (dev->flags & IFF_UP) {
1399 LIST_HEAD(single); 1399 LIST_HEAD(single);
1400 1400
1401 /* Block netpoll rx while the interface is going down */
1402 netpoll_rx_disable(dev);
1403
1404 list_add(&dev->close_list, &single); 1401 list_add(&dev->close_list, &single);
1405 dev_close_many(&single); 1402 dev_close_many(&single);
1406 list_del(&single); 1403 list_del(&single);
1407
1408 netpoll_rx_enable(dev);
1409 } 1404 }
1410 return 0; 1405 return 0;
1411} 1406}