aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-08 20:18:23 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-08 20:18:23 -0400
commitdc2b48475a0a36f8b3bbb2da60d3a006dc5c2c84 (patch)
treeb2421a338840bd1c675f4f91de7c7cf03863fb78 /net/sched/sch_generic.c
parent5ce2d488fe039ddd86a638496cf704df86c74eeb (diff)
netdev: Move queue_lock into struct netdev_queue.
The lock is now an attribute of the device queue. One thing to notice is that "suspicious" places emerge which will need specific training about multiple queue handling. They are so marked with explicit "netdev->rx_queue" and "netdev->tx_queue" references. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b626a4f32b6b..ee8f9f78a095 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,31 +29,31 @@
29/* Main transmission queue. */ 29/* Main transmission queue. */
30 30
31/* Modifications to data participating in scheduling must be protected with 31/* Modifications to data participating in scheduling must be protected with
32 * dev->queue_lock spinlock. 32 * queue->lock spinlock.
33 * 33 *
34 * The idea is the following: 34 * The idea is the following:
35 * - enqueue, dequeue are serialized via top level device 35 * - enqueue, dequeue are serialized via top level device
36 * spinlock dev->queue_lock. 36 * spinlock queue->lock.
37 * - ingress filtering is serialized via top level device 37 * - ingress filtering is serialized via top level device
38 * spinlock dev->ingress_lock. 38 * spinlock dev->ingress_lock.
39 * - updates to tree and tree walking are only done under the rtnl mutex. 39 * - updates to tree and tree walking are only done under the rtnl mutex.
40 */ 40 */
41 41
42void qdisc_lock_tree(struct net_device *dev) 42void qdisc_lock_tree(struct net_device *dev)
43 __acquires(dev->queue_lock) 43 __acquires(dev->tx_queue.lock)
44 __acquires(dev->ingress_lock) 44 __acquires(dev->ingress_lock)
45{ 45{
46 spin_lock_bh(&dev->queue_lock); 46 spin_lock_bh(&dev->tx_queue.lock);
47 spin_lock(&dev->ingress_lock); 47 spin_lock(&dev->ingress_lock);
48} 48}
49EXPORT_SYMBOL(qdisc_lock_tree); 49EXPORT_SYMBOL(qdisc_lock_tree);
50 50
51void qdisc_unlock_tree(struct net_device *dev) 51void qdisc_unlock_tree(struct net_device *dev)
52 __releases(dev->ingress_lock) 52 __releases(dev->ingress_lock)
53 __releases(dev->queue_lock) 53 __releases(dev->tx_queue.lock)
54{ 54{
55 spin_unlock(&dev->ingress_lock); 55 spin_unlock(&dev->ingress_lock);
56 spin_unlock_bh(&dev->queue_lock); 56 spin_unlock_bh(&dev->tx_queue.lock);
57} 57}
58EXPORT_SYMBOL(qdisc_unlock_tree); 58EXPORT_SYMBOL(qdisc_unlock_tree);
59 59
@@ -118,15 +118,15 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
118} 118}
119 119
120/* 120/*
121 * NOTE: Called under dev->queue_lock with locally disabled BH. 121 * NOTE: Called under queue->lock with locally disabled BH.
122 * 122 *
123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this 123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
124 * device at a time. dev->queue_lock serializes queue accesses for 124 * device at a time. queue->lock serializes queue accesses for
125 * this device AND dev->qdisc pointer itself. 125 * this device AND dev->qdisc pointer itself.
126 * 126 *
127 * netif_tx_lock serializes accesses to device driver. 127 * netif_tx_lock serializes accesses to device driver.
128 * 128 *
129 * dev->queue_lock and netif_tx_lock are mutually exclusive, 129 * queue->lock and netif_tx_lock are mutually exclusive,
130 * if one is grabbed, another must be free. 130 * if one is grabbed, another must be free.
131 * 131 *
132 * Note, that this procedure can be called by a watchdog timer 132 * Note, that this procedure can be called by a watchdog timer
@@ -148,14 +148,14 @@ static inline int qdisc_restart(struct net_device *dev)
148 148
149 149
150 /* And release queue */ 150 /* And release queue */
151 spin_unlock(&dev->queue_lock); 151 spin_unlock(&q->dev_queue->lock);
152 152
153 HARD_TX_LOCK(dev, smp_processor_id()); 153 HARD_TX_LOCK(dev, smp_processor_id());
154 if (!netif_subqueue_stopped(dev, skb)) 154 if (!netif_subqueue_stopped(dev, skb))
155 ret = dev_hard_start_xmit(skb, dev); 155 ret = dev_hard_start_xmit(skb, dev);
156 HARD_TX_UNLOCK(dev); 156 HARD_TX_UNLOCK(dev);
157 157
158 spin_lock(&dev->queue_lock); 158 spin_lock(&q->dev_queue->lock);
159 q = dev->qdisc; 159 q = dev->qdisc;
160 160
161 switch (ret) { 161 switch (ret) {
@@ -482,7 +482,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev,
482 sch = qdisc_alloc(dev_queue, ops); 482 sch = qdisc_alloc(dev_queue, ops);
483 if (IS_ERR(sch)) 483 if (IS_ERR(sch))
484 goto errout; 484 goto errout;
485 sch->stats_lock = &dev->queue_lock; 485 sch->stats_lock = &dev_queue->lock;
486 sch->parent = parentid; 486 sch->parent = parentid;
487 487
488 if (!ops->init || ops->init(sch, NULL) == 0) 488 if (!ops->init || ops->init(sch, NULL) == 0)
@@ -494,7 +494,7 @@ errout:
494} 494}
495EXPORT_SYMBOL(qdisc_create_dflt); 495EXPORT_SYMBOL(qdisc_create_dflt);
496 496
497/* Under dev->queue_lock and BH! */ 497/* Under queue->lock and BH! */
498 498
499void qdisc_reset(struct Qdisc *qdisc) 499void qdisc_reset(struct Qdisc *qdisc)
500{ 500{
@@ -514,7 +514,7 @@ static void __qdisc_destroy(struct rcu_head *head)
514 kfree((char *) qdisc - qdisc->padded); 514 kfree((char *) qdisc - qdisc->padded);
515} 515}
516 516
517/* Under dev->queue_lock and BH! */ 517/* Under queue->lock and BH! */
518 518
519void qdisc_destroy(struct Qdisc *qdisc) 519void qdisc_destroy(struct Qdisc *qdisc)
520{ 520{
@@ -566,13 +566,13 @@ void dev_activate(struct net_device *dev)
566 /* Delay activation until next carrier-on event */ 566 /* Delay activation until next carrier-on event */
567 return; 567 return;
568 568
569 spin_lock_bh(&dev->queue_lock); 569 spin_lock_bh(&dev->tx_queue.lock);
570 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); 570 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
571 if (dev->qdisc != &noqueue_qdisc) { 571 if (dev->qdisc != &noqueue_qdisc) {
572 dev->trans_start = jiffies; 572 dev->trans_start = jiffies;
573 dev_watchdog_up(dev); 573 dev_watchdog_up(dev);
574 } 574 }
575 spin_unlock_bh(&dev->queue_lock); 575 spin_unlock_bh(&dev->tx_queue.lock);
576} 576}
577 577
578void dev_deactivate(struct net_device *dev) 578void dev_deactivate(struct net_device *dev)
@@ -581,7 +581,7 @@ void dev_deactivate(struct net_device *dev)
581 struct sk_buff *skb; 581 struct sk_buff *skb;
582 int running; 582 int running;
583 583
584 spin_lock_bh(&dev->queue_lock); 584 spin_lock_bh(&dev->tx_queue.lock);
585 qdisc = dev->qdisc; 585 qdisc = dev->qdisc;
586 dev->qdisc = &noop_qdisc; 586 dev->qdisc = &noop_qdisc;
587 587
@@ -589,7 +589,7 @@ void dev_deactivate(struct net_device *dev)
589 589
590 skb = dev->gso_skb; 590 skb = dev->gso_skb;
591 dev->gso_skb = NULL; 591 dev->gso_skb = NULL;
592 spin_unlock_bh(&dev->queue_lock); 592 spin_unlock_bh(&dev->tx_queue.lock);
593 593
594 kfree_skb(skb); 594 kfree_skb(skb);
595 595
@@ -607,9 +607,9 @@ void dev_deactivate(struct net_device *dev)
607 * Double-check inside queue lock to ensure that all effects 607 * Double-check inside queue lock to ensure that all effects
608 * of the queue run are visible when we return. 608 * of the queue run are visible when we return.
609 */ 609 */
610 spin_lock_bh(&dev->queue_lock); 610 spin_lock_bh(&dev->tx_queue.lock);
611 running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 611 running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
612 spin_unlock_bh(&dev->queue_lock); 612 spin_unlock_bh(&dev->tx_queue.lock);
613 613
614 /* 614 /*
615 * The running flag should never be set at this point because 615 * The running flag should never be set at this point because