aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2013-10-05 22:26:05 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-07 15:23:14 -0400
commit5cde282938915f36a2e6769b51c24c4159654859 (patch)
tree9e364e2988fb3556313eda68eea6fb5655b6df1e /net/sched
parentd639feaaf3f40cd90b75a2fec5b7d5c3f96c2c88 (diff)
net: Separate the close_list and the unreg_list v2
Separate the unreg_list and the close_list in dev_close_many preventing dev_close_many from permuting the unreg_list. The permutations of the unreg_list have resulted in cases where the loopback device is accessed it has been freed in code such as dst_ifdown. Resulting in subtle memory corruption. This is the second bug from sharing the storage between the close_list and the unreg_list. The issues that crop up with sharing are apparently too subtle to show up in normal testing or usage, so let's forget about being clever and use two separate lists. v2: Make all callers pass in a close_list to dev_close_many Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_generic.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index e7121d29c4bd..7fc899a943a8 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -829,7 +829,7 @@ void dev_deactivate_many(struct list_head *head)
829 struct net_device *dev; 829 struct net_device *dev;
830 bool sync_needed = false; 830 bool sync_needed = false;
831 831
832 list_for_each_entry(dev, head, unreg_list) { 832 list_for_each_entry(dev, head, close_list) {
833 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 833 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
834 &noop_qdisc); 834 &noop_qdisc);
835 if (dev_ingress_queue(dev)) 835 if (dev_ingress_queue(dev))
@@ -848,7 +848,7 @@ void dev_deactivate_many(struct list_head *head)
848 synchronize_net(); 848 synchronize_net();
849 849
850 /* Wait for outstanding qdisc_run calls. */ 850 /* Wait for outstanding qdisc_run calls. */
851 list_for_each_entry(dev, head, unreg_list) 851 list_for_each_entry(dev, head, close_list)
852 while (some_qdisc_is_busy(dev)) 852 while (some_qdisc_is_busy(dev))
853 yield(); 853 yield();
854} 854}
@@ -857,7 +857,7 @@ void dev_deactivate(struct net_device *dev)
857{ 857{
858 LIST_HEAD(single); 858 LIST_HEAD(single);
859 859
860 list_add(&dev->unreg_list, &single); 860 list_add(&dev->close_list, &single);
861 dev_deactivate_many(&single); 861 dev_deactivate_many(&single);
862 list_del(&single); 862 list_del(&single);
863} 863}