aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-05-10 17:12:47 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-05-11 02:47:42 -0400
commit41a23b0788610b27ecb4c4ee857f3fe7168f1070 (patch)
tree8f239d889253a7d7d9dd9f12bf4c76f7ff184274 /net/sched/sch_generic.c
parentcce1fa36a8ed36e8a3f64455e2a830f48e904c64 (diff)
[NET_SCHED]: Avoid requeue warning on dev_deactivate
When we relinquish queue_lock in qdisc_restart and then retake it for requeueing, we might race against dev_deactivate and end up requeueing onto noop_qdisc. This causes a warning to be printed. This patch fixes this by checking this before we requeue. As an added bonus, we can remove the same check in __qdisc_run which was added to prevent dev->gso_skb from being requeued when we're shutting down. Even though we've had to add a new conditional in its place, it's better because it only happens on requeues rather than every single time that qdisc_run is called. For this to work we also need to move the clearing of gso_skb up in dev_deactivate as now qdisc_restart can occur even after we wait for __LINK_STATE_QDISC_RUNNING to clear (but it won't do anything as long as the queue and gso_skb is already clear). Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 816d311db40f..f28bb2dc58d0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -162,7 +162,9 @@ static inline int qdisc_restart(struct net_device *dev)
162 */ 162 */
163 163
164requeue: 164requeue:
165 if (skb->next) 165 if (unlikely(q == &noop_qdisc))
166 kfree_skb(skb);
167 else if (skb->next)
166 dev->gso_skb = skb; 168 dev->gso_skb = skb;
167 else 169 else
168 q->ops->requeue(skb, q); 170 q->ops->requeue(skb, q);
@@ -177,15 +179,11 @@ out:
177 179
178void __qdisc_run(struct net_device *dev) 180void __qdisc_run(struct net_device *dev)
179{ 181{
180 if (unlikely(dev->qdisc == &noop_qdisc))
181 goto out;
182
183 do { 182 do {
184 if (!qdisc_restart(dev)) 183 if (!qdisc_restart(dev))
185 break; 184 break;
186 } while (!netif_queue_stopped(dev)); 185 } while (!netif_queue_stopped(dev));
187 186
188out:
189 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 187 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
190} 188}
191 189
@@ -547,6 +545,7 @@ void dev_activate(struct net_device *dev)
547void dev_deactivate(struct net_device *dev) 545void dev_deactivate(struct net_device *dev)
548{ 546{
549 struct Qdisc *qdisc; 547 struct Qdisc *qdisc;
548 struct sk_buff *skb;
550 549
551 spin_lock_bh(&dev->queue_lock); 550 spin_lock_bh(&dev->queue_lock);
552 qdisc = dev->qdisc; 551 qdisc = dev->qdisc;
@@ -554,8 +553,12 @@ void dev_deactivate(struct net_device *dev)
554 553
555 qdisc_reset(qdisc); 554 qdisc_reset(qdisc);
556 555
556 skb = dev->gso_skb;
557 dev->gso_skb = NULL;
557 spin_unlock_bh(&dev->queue_lock); 558 spin_unlock_bh(&dev->queue_lock);
558 559
560 kfree_skb(skb);
561
559 dev_watchdog_down(dev); 562 dev_watchdog_down(dev);
560 563
561 /* Wait for outstanding dev_queue_xmit calls. */ 564 /* Wait for outstanding dev_queue_xmit calls. */
@@ -564,11 +567,6 @@ void dev_deactivate(struct net_device *dev)
564 /* Wait for outstanding qdisc_run calls. */ 567 /* Wait for outstanding qdisc_run calls. */
565 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 568 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
566 yield(); 569 yield();
567
568 if (dev->gso_skb) {
569 kfree_skb(dev->gso_skb);
570 dev->gso_skb = NULL;
571 }
572} 570}
573 571
574void dev_init_scheduler(struct net_device *dev) 572void dev_init_scheduler(struct net_device *dev)