aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2013-10-05 22:26:05 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-07 15:23:14 -0400
commit5cde282938915f36a2e6769b51c24c4159654859 (patch)
tree9e364e2988fb3556313eda68eea6fb5655b6df1e
parentd639feaaf3f40cd90b75a2fec5b7d5c3f96c2c88 (diff)
net: Separate the close_list and the unreg_list v2
Separate the unreg_list and the close_list in dev_close_many preventing dev_close_many from permuting the unreg_list. The permutations of the unreg_list have resulted in cases where the loopback device is accessed it has been freed in code such as dst_ifdown. Resulting in subtle memory corruption. This is the second bug from sharing the storage between the close_list and the unreg_list. The issues that crop up with sharing are apparently too subtle to show up in normal testing or usage, so let's forget about being clever and use two separate lists. v2: Make all callers pass in a close_list to dev_close_many Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--net/core/dev.c25
-rw-r--r--net/sched/sch_generic.c6
3 files changed, 18 insertions, 14 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f5cd464271bf..6d77e0f3cc10 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1143,6 +1143,7 @@ struct net_device {
1143 struct list_head dev_list; 1143 struct list_head dev_list;
1144 struct list_head napi_list; 1144 struct list_head napi_list;
1145 struct list_head unreg_list; 1145 struct list_head unreg_list;
1146 struct list_head close_list;
1146 1147
1147 /* directly linked devices, like slaves for bonding */ 1148 /* directly linked devices, like slaves for bonding */
1148 struct { 1149 struct {
diff --git a/net/core/dev.c b/net/core/dev.c
index c25db20a4246..fa0b2b06c1a6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1307,7 +1307,7 @@ static int __dev_close_many(struct list_head *head)
1307 ASSERT_RTNL(); 1307 ASSERT_RTNL();
1308 might_sleep(); 1308 might_sleep();
1309 1309
1310 list_for_each_entry(dev, head, unreg_list) { 1310 list_for_each_entry(dev, head, close_list) {
1311 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1311 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1312 1312
1313 clear_bit(__LINK_STATE_START, &dev->state); 1313 clear_bit(__LINK_STATE_START, &dev->state);
@@ -1323,7 +1323,7 @@ static int __dev_close_many(struct list_head *head)
1323 1323
1324 dev_deactivate_many(head); 1324 dev_deactivate_many(head);
1325 1325
1326 list_for_each_entry(dev, head, unreg_list) { 1326 list_for_each_entry(dev, head, close_list) {
1327 const struct net_device_ops *ops = dev->netdev_ops; 1327 const struct net_device_ops *ops = dev->netdev_ops;
1328 1328
1329 /* 1329 /*
@@ -1351,7 +1351,7 @@ static int __dev_close(struct net_device *dev)
1351 /* Temporarily disable netpoll until the interface is down */ 1351 /* Temporarily disable netpoll until the interface is down */
1352 netpoll_rx_disable(dev); 1352 netpoll_rx_disable(dev);
1353 1353
1354 list_add(&dev->unreg_list, &single); 1354 list_add(&dev->close_list, &single);
1355 retval = __dev_close_many(&single); 1355 retval = __dev_close_many(&single);
1356 list_del(&single); 1356 list_del(&single);
1357 1357
@@ -1362,21 +1362,20 @@ static int __dev_close(struct net_device *dev)
1362static int dev_close_many(struct list_head *head) 1362static int dev_close_many(struct list_head *head)
1363{ 1363{
1364 struct net_device *dev, *tmp; 1364 struct net_device *dev, *tmp;
1365 LIST_HEAD(tmp_list);
1366 1365
1367 list_for_each_entry_safe(dev, tmp, head, unreg_list) 1366 /* Remove the devices that don't need to be closed */
1367 list_for_each_entry_safe(dev, tmp, head, close_list)
1368 if (!(dev->flags & IFF_UP)) 1368 if (!(dev->flags & IFF_UP))
1369 list_move(&dev->unreg_list, &tmp_list); 1369 list_del_init(&dev->close_list);
1370 1370
1371 __dev_close_many(head); 1371 __dev_close_many(head);
1372 1372
1373 list_for_each_entry(dev, head, unreg_list) { 1373 list_for_each_entry_safe(dev, tmp, head, close_list) {
1374 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1374 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1375 call_netdevice_notifiers(NETDEV_DOWN, dev); 1375 call_netdevice_notifiers(NETDEV_DOWN, dev);
1376 list_del_init(&dev->close_list);
1376 } 1377 }
1377 1378
1378 /* rollback_registered_many needs the complete original list */
1379 list_splice(&tmp_list, head);
1380 return 0; 1379 return 0;
1381} 1380}
1382 1381
@@ -1397,7 +1396,7 @@ int dev_close(struct net_device *dev)
1397 /* Block netpoll rx while the interface is going down */ 1396 /* Block netpoll rx while the interface is going down */
1398 netpoll_rx_disable(dev); 1397 netpoll_rx_disable(dev);
1399 1398
1400 list_add(&dev->unreg_list, &single); 1399 list_add(&dev->close_list, &single);
1401 dev_close_many(&single); 1400 dev_close_many(&single);
1402 list_del(&single); 1401 list_del(&single);
1403 1402
@@ -5439,6 +5438,7 @@ static void net_set_todo(struct net_device *dev)
5439static void rollback_registered_many(struct list_head *head) 5438static void rollback_registered_many(struct list_head *head)
5440{ 5439{
5441 struct net_device *dev, *tmp; 5440 struct net_device *dev, *tmp;
5441 LIST_HEAD(close_head);
5442 5442
5443 BUG_ON(dev_boot_phase); 5443 BUG_ON(dev_boot_phase);
5444 ASSERT_RTNL(); 5444 ASSERT_RTNL();
@@ -5461,7 +5461,9 @@ static void rollback_registered_many(struct list_head *head)
5461 } 5461 }
5462 5462
5463 /* If device is running, close it first. */ 5463 /* If device is running, close it first. */
5464 dev_close_many(head); 5464 list_for_each_entry(dev, head, unreg_list)
5465 list_add_tail(&dev->close_list, &close_head);
5466 dev_close_many(&close_head);
5465 5467
5466 list_for_each_entry(dev, head, unreg_list) { 5468 list_for_each_entry(dev, head, unreg_list) {
5467 /* And unlink it from device chain. */ 5469 /* And unlink it from device chain. */
@@ -6257,6 +6259,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6257 6259
6258 INIT_LIST_HEAD(&dev->napi_list); 6260 INIT_LIST_HEAD(&dev->napi_list);
6259 INIT_LIST_HEAD(&dev->unreg_list); 6261 INIT_LIST_HEAD(&dev->unreg_list);
6262 INIT_LIST_HEAD(&dev->close_list);
6260 INIT_LIST_HEAD(&dev->link_watch_list); 6263 INIT_LIST_HEAD(&dev->link_watch_list);
6261 INIT_LIST_HEAD(&dev->adj_list.upper); 6264 INIT_LIST_HEAD(&dev->adj_list.upper);
6262 INIT_LIST_HEAD(&dev->adj_list.lower); 6265 INIT_LIST_HEAD(&dev->adj_list.lower);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index e7121d29c4bd..7fc899a943a8 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -829,7 +829,7 @@ void dev_deactivate_many(struct list_head *head)
829 struct net_device *dev; 829 struct net_device *dev;
830 bool sync_needed = false; 830 bool sync_needed = false;
831 831
832 list_for_each_entry(dev, head, unreg_list) { 832 list_for_each_entry(dev, head, close_list) {
833 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 833 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
834 &noop_qdisc); 834 &noop_qdisc);
835 if (dev_ingress_queue(dev)) 835 if (dev_ingress_queue(dev))
@@ -848,7 +848,7 @@ void dev_deactivate_many(struct list_head *head)
848 synchronize_net(); 848 synchronize_net();
849 849
850 /* Wait for outstanding qdisc_run calls. */ 850 /* Wait for outstanding qdisc_run calls. */
851 list_for_each_entry(dev, head, unreg_list) 851 list_for_each_entry(dev, head, close_list)
852 while (some_qdisc_is_busy(dev)) 852 while (some_qdisc_is_busy(dev))
853 yield(); 853 yield();
854} 854}
@@ -857,7 +857,7 @@ void dev_deactivate(struct net_device *dev)
857{ 857{
858 LIST_HEAD(single); 858 LIST_HEAD(single);
859 859
860 list_add(&dev->unreg_list, &single); 860 list_add(&dev->close_list, &single);
861 dev_deactivate_many(&single); 861 dev_deactivate_many(&single);
862 list_del(&single); 862 list_del(&single);
863} 863}