aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-16 04:42:40 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:19 -0400
commit7698b4fcabcd790efc4f226bada1e7b5870653af (patch)
tree031ce7a911fc5bff995421a5615d9ab25416a479 /net/sched/sch_generic.c
parente2627c8c2241bce45e368e150654d076b58a4595 (diff)
pkt_sched: Add and use qdisc_root() and qdisc_root_lock().
When code wants to lock the qdisc tree state, the logic operation it's doing is locking the top-level qdisc that sits of the root of the netdev_queue. Add qdisc_root_lock() to represent this and convert the easiest cases. In order for this to work out in all cases, we have to hook up the noop_qdisc to a dummy netdev_queue. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ac208c2b2d10..739a8711ab30 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -151,14 +151,17 @@ static inline int qdisc_restart(struct netdev_queue *txq,
151{ 151{
152 int ret = NETDEV_TX_BUSY; 152 int ret = NETDEV_TX_BUSY;
153 struct net_device *dev; 153 struct net_device *dev;
154 spinlock_t *root_lock;
154 struct sk_buff *skb; 155 struct sk_buff *skb;
155 156
156 /* Dequeue packet */ 157 /* Dequeue packet */
157 if (unlikely((skb = dequeue_skb(q)) == NULL)) 158 if (unlikely((skb = dequeue_skb(q)) == NULL))
158 return 0; 159 return 0;
159 160
160 /* And release queue */ 161 root_lock = qdisc_root_lock(q);
161 spin_unlock(&txq->lock); 162
163 /* And release qdisc */
164 spin_unlock(root_lock);
162 165
163 dev = txq->dev; 166 dev = txq->dev;
164 167
@@ -167,7 +170,7 @@ static inline int qdisc_restart(struct netdev_queue *txq,
167 ret = dev_hard_start_xmit(skb, dev, txq); 170 ret = dev_hard_start_xmit(skb, dev, txq);
168 HARD_TX_UNLOCK(dev, txq); 171 HARD_TX_UNLOCK(dev, txq);
169 172
170 spin_lock(&txq->lock); 173 spin_lock(root_lock);
171 174
172 switch (ret) { 175 switch (ret) {
173 case NETDEV_TX_OK: 176 case NETDEV_TX_OK:
@@ -345,12 +348,18 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
345 .owner = THIS_MODULE, 348 .owner = THIS_MODULE,
346}; 349};
347 350
351static struct netdev_queue noop_netdev_queue = {
352 .lock = __SPIN_LOCK_UNLOCKED(noop_netdev_queue.lock),
353 .qdisc = &noop_qdisc,
354};
355
348struct Qdisc noop_qdisc = { 356struct Qdisc noop_qdisc = {
349 .enqueue = noop_enqueue, 357 .enqueue = noop_enqueue,
350 .dequeue = noop_dequeue, 358 .dequeue = noop_dequeue,
351 .flags = TCQ_F_BUILTIN, 359 .flags = TCQ_F_BUILTIN,
352 .ops = &noop_qdisc_ops, 360 .ops = &noop_qdisc_ops,
353 .list = LIST_HEAD_INIT(noop_qdisc.list), 361 .list = LIST_HEAD_INIT(noop_qdisc.list),
362 .dev_queue = &noop_netdev_queue,
354}; 363};
355EXPORT_SYMBOL(noop_qdisc); 364EXPORT_SYMBOL(noop_qdisc);
356 365
@@ -666,19 +675,21 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
666 675
667 for (i = 0; i < dev->num_tx_queues; i++) { 676 for (i = 0; i < dev->num_tx_queues; i++) {
668 struct netdev_queue *dev_queue; 677 struct netdev_queue *dev_queue;
678 spinlock_t *root_lock;
669 struct Qdisc *q; 679 struct Qdisc *q;
670 int val; 680 int val;
671 681
672 dev_queue = netdev_get_tx_queue(dev, i); 682 dev_queue = netdev_get_tx_queue(dev, i);
673 q = dev_queue->qdisc; 683 q = dev_queue->qdisc;
684 root_lock = qdisc_root_lock(q);
674 685
675 if (lock) 686 if (lock)
676 spin_lock_bh(&dev_queue->lock); 687 spin_lock_bh(root_lock);
677 688
678 val = test_bit(__QDISC_STATE_RUNNING, &q->state); 689 val = test_bit(__QDISC_STATE_RUNNING, &q->state);
679 690
680 if (lock) 691 if (lock)
681 spin_unlock_bh(&dev_queue->lock); 692 spin_unlock_bh(root_lock);
682 693
683 if (val) 694 if (val)
684 return true; 695 return true;