aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorNeil Horman <nhorman@tuxdriver.com>2013-02-05 03:05:43 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-06 15:45:03 -0500
commitca99ca14c95ae49fb4c9cd3abf5f84d11a7e8a61 (patch)
tree889b44125a7d9471424fc58537ba7e19aec20a2c /net/core/dev.c
parentf458c647ea5cdd956f8055a4178072be460559f1 (diff)
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit 9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that, while this patch protects the tg3 driver from having its ndo_poll_controller routine called during device initalization, it does nothing for the driver during shutdown. I.e. it would be entirely possible to have the ndo_poll_controller method (or subsequently the ndo_poll) routine called for a driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or ndo_open routine could be called. Given that the two latter routines tend to initizlize and free many data structures that the former two rely on, the result can easily be data corruption or various other crashes. Furthermore, it seems that this is potentially a problem with all net drivers that support netpoll, and so this should ideally be fixed in a common path. As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic context, so I've come up with this solution. We can use a mutex to sleep in open/close paths and just do a mutex_trylock in the napi poll path and abandon the poll attempt if we're locked, as we'll just retry the poll on the next send anyway. I've tested this here by flooding netconsole with messages on a system whos nic driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx workqueue would be forced to send frames and poll the device. While this was going on I rapidly ifdown/up'ed the interface and watched for any problems. I've not found any. Signed-off-by: Neil Horman <nhorman@tuxdriver.com> CC: Ivan Vecera <ivecera@redhat.com> CC: "David S. Miller" <davem@davemloft.net> CC: Ben Hutchings <bhutchings@solarflare.com> CC: Francois Romieu <romieu@fr.zoreil.com> CC: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index e04bfdc9e3e4..2b275a7b8677 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1266,6 +1266,14 @@ static int __dev_open(struct net_device *dev)
1266 if (!netif_device_present(dev)) 1266 if (!netif_device_present(dev))
1267 return -ENODEV; 1267 return -ENODEV;
1268 1268
1269 /* Block netpoll from trying to do any rx path servicing.
1270 * If we don't do this there is a chance ndo_poll_controller
1271 * or ndo_poll may be running while we open the device
1272 */
1273 ret = netpoll_rx_disable(dev);
1274 if (ret)
1275 return ret;
1276
1269 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1277 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1270 ret = notifier_to_errno(ret); 1278 ret = notifier_to_errno(ret);
1271 if (ret) 1279 if (ret)
@@ -1279,6 +1287,8 @@ static int __dev_open(struct net_device *dev)
1279 if (!ret && ops->ndo_open) 1287 if (!ret && ops->ndo_open)
1280 ret = ops->ndo_open(dev); 1288 ret = ops->ndo_open(dev);
1281 1289
1290 netpoll_rx_enable(dev);
1291
1282 if (ret) 1292 if (ret)
1283 clear_bit(__LINK_STATE_START, &dev->state); 1293 clear_bit(__LINK_STATE_START, &dev->state);
1284 else { 1294 else {
@@ -1370,9 +1380,16 @@ static int __dev_close(struct net_device *dev)
1370 int retval; 1380 int retval;
1371 LIST_HEAD(single); 1381 LIST_HEAD(single);
1372 1382
1383 /* Temporarily disable netpoll until the interface is down */
1384 retval = netpoll_rx_disable(dev);
1385 if (retval)
1386 return retval;
1387
1373 list_add(&dev->unreg_list, &single); 1388 list_add(&dev->unreg_list, &single);
1374 retval = __dev_close_many(&single); 1389 retval = __dev_close_many(&single);
1375 list_del(&single); 1390 list_del(&single);
1391
1392 netpoll_rx_enable(dev);
1376 return retval; 1393 return retval;
1377} 1394}
1378 1395
@@ -1408,14 +1425,22 @@ static int dev_close_many(struct list_head *head)
1408 */ 1425 */
1409int dev_close(struct net_device *dev) 1426int dev_close(struct net_device *dev)
1410{ 1427{
1428 int ret = 0;
1411 if (dev->flags & IFF_UP) { 1429 if (dev->flags & IFF_UP) {
1412 LIST_HEAD(single); 1430 LIST_HEAD(single);
1413 1431
1432 /* Block netpoll rx while the interface is going down */
1433 ret = netpoll_rx_disable(dev);
1434 if (ret)
1435 return ret;
1436
1414 list_add(&dev->unreg_list, &single); 1437 list_add(&dev->unreg_list, &single);
1415 dev_close_many(&single); 1438 dev_close_many(&single);
1416 list_del(&single); 1439 list_del(&single);
1440
1441 netpoll_rx_enable(dev);
1417 } 1442 }
1418 return 0; 1443 return ret;
1419} 1444}
1420EXPORT_SYMBOL(dev_close); 1445EXPORT_SYMBOL(dev_close);
1421 1446