aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 02:14:46 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 02:14:46 -0400
commit79d16385c7f287a33ea771c4dbe60ae43f791b49 (patch)
tree858bfe84e52d88356d5d0b49efc5148a0870ccf9 /net/sched
parentb19fa1fa91845234961c64dbd564671aa7c0fd27 (diff)
netdev: Move atomic queue state bits into netdev_queue.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_generic.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b6a36d394663..243de935b182 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -121,9 +121,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
121/* 121/*
122 * NOTE: Called under queue->lock with locally disabled BH. 122 * NOTE: Called under queue->lock with locally disabled BH.
123 * 123 *
124 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this 124 * __QUEUE_STATE_QDISC_RUNNING guarantees only one CPU can process
125 * device at a time. queue->lock serializes queue accesses for 125 * this queue at a time. queue->lock serializes queue accesses for
126 * this device AND txq->qdisc pointer itself. 126 * this queue AND txq->qdisc pointer itself.
127 * 127 *
128 * netif_tx_lock serializes accesses to device driver. 128 * netif_tx_lock serializes accesses to device driver.
129 * 129 *
@@ -206,7 +206,7 @@ void __qdisc_run(struct netdev_queue *txq)
206 } 206 }
207 } 207 }
208 208
209 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 209 clear_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state);
210} 210}
211 211
212static void dev_watchdog(unsigned long arg) 212static void dev_watchdog(unsigned long arg)
@@ -605,9 +605,10 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue,
605 605
606void dev_deactivate(struct net_device *dev) 606void dev_deactivate(struct net_device *dev)
607{ 607{
608 struct netdev_queue *dev_queue = &dev->tx_queue;
608 int running; 609 int running;
609 610
610 dev_deactivate_queue(&dev->tx_queue, &noop_qdisc); 611 dev_deactivate_queue(dev_queue, &noop_qdisc);
611 612
612 dev_watchdog_down(dev); 613 dev_watchdog_down(dev);
613 614
@@ -616,16 +617,17 @@ void dev_deactivate(struct net_device *dev)
616 617
617 /* Wait for outstanding qdisc_run calls. */ 618 /* Wait for outstanding qdisc_run calls. */
618 do { 619 do {
619 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 620 while (test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state))
620 yield(); 621 yield();
621 622
622 /* 623 /*
623 * Double-check inside queue lock to ensure that all effects 624 * Double-check inside queue lock to ensure that all effects
624 * of the queue run are visible when we return. 625 * of the queue run are visible when we return.
625 */ 626 */
626 spin_lock_bh(&dev->tx_queue.lock); 627 spin_lock_bh(&dev_queue->lock);
627 running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 628 running = test_bit(__QUEUE_STATE_QDISC_RUNNING,
628 spin_unlock_bh(&dev->tx_queue.lock); 629 &dev_queue->state);
630 spin_unlock_bh(&dev_queue->lock);
629 631
630 /* 632 /*
631 * The running flag should never be set at this point because 633 * The running flag should never be set at this point because