aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 03:53:03 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:30 -0400
commit83874000929ed63aef30b44083a9f713135ff040 (patch)
tree7646fd185751cad8665eca19aa3f87d13c37eade /net/sched/sch_generic.c
parentc7e4f3bbb4ba4e48ab3b529d5016e454cee1ccd6 (diff)
pkt_sched: Kill netdev_queue lock.
We can simply use the qdisc->q.lock for all of the qdisc tree synchronization. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3d53e92ad9c8..8fc580b3e173 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -96,15 +96,15 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
96} 96}
97 97
98/* 98/*
99 * NOTE: Called under queue->lock with locally disabled BH. 99 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
100 * 100 *
101 * __QDISC_STATE_RUNNING guarantees only one CPU can process 101 * __QDISC_STATE_RUNNING guarantees only one CPU can process
102 * this qdisc at a time. queue->lock serializes queue accesses for 102 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
103 * this queue AND txq->qdisc pointer itself. 103 * this queue.
104 * 104 *
105 * netif_tx_lock serializes accesses to device driver. 105 * netif_tx_lock serializes accesses to device driver.
106 * 106 *
107 * queue->lock and netif_tx_lock are mutually exclusive, 107 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
108 * if one is grabbed, another must be free. 108 * if one is grabbed, another must be free.
109 * 109 *
110 * Note, that this procedure can be called by a watchdog timer 110 * Note, that this procedure can be called by a watchdog timer
@@ -317,7 +317,6 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
317}; 317};
318 318
319static struct netdev_queue noop_netdev_queue = { 319static struct netdev_queue noop_netdev_queue = {
320 .lock = __SPIN_LOCK_UNLOCKED(noop_netdev_queue.lock),
321 .qdisc = &noop_qdisc, 320 .qdisc = &noop_qdisc,
322}; 321};
323 322
@@ -327,6 +326,7 @@ struct Qdisc noop_qdisc = {
327 .flags = TCQ_F_BUILTIN, 326 .flags = TCQ_F_BUILTIN,
328 .ops = &noop_qdisc_ops, 327 .ops = &noop_qdisc_ops,
329 .list = LIST_HEAD_INIT(noop_qdisc.list), 328 .list = LIST_HEAD_INIT(noop_qdisc.list),
329 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
330 .dev_queue = &noop_netdev_queue, 330 .dev_queue = &noop_netdev_queue,
331}; 331};
332EXPORT_SYMBOL(noop_qdisc); 332EXPORT_SYMBOL(noop_qdisc);
@@ -498,7 +498,7 @@ errout:
498} 498}
499EXPORT_SYMBOL(qdisc_create_dflt); 499EXPORT_SYMBOL(qdisc_create_dflt);
500 500
501/* Under queue->lock and BH! */ 501/* Under qdisc_root_lock(qdisc) and BH! */
502 502
503void qdisc_reset(struct Qdisc *qdisc) 503void qdisc_reset(struct Qdisc *qdisc)
504{ 504{
@@ -526,10 +526,12 @@ static void __qdisc_destroy(struct rcu_head *head)
526 module_put(ops->owner); 526 module_put(ops->owner);
527 dev_put(qdisc_dev(qdisc)); 527 dev_put(qdisc_dev(qdisc));
528 528
529 kfree_skb(qdisc->gso_skb);
530
529 kfree((char *) qdisc - qdisc->padded); 531 kfree((char *) qdisc - qdisc->padded);
530} 532}
531 533
532/* Under queue->lock and BH! */ 534/* Under qdisc_root_lock(qdisc) and BH! */
533 535
534void qdisc_destroy(struct Qdisc *qdisc) 536void qdisc_destroy(struct Qdisc *qdisc)
535{ 537{
@@ -586,13 +588,12 @@ static void transition_one_qdisc(struct net_device *dev,
586 struct netdev_queue *dev_queue, 588 struct netdev_queue *dev_queue,
587 void *_need_watchdog) 589 void *_need_watchdog)
588{ 590{
591 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
589 int *need_watchdog_p = _need_watchdog; 592 int *need_watchdog_p = _need_watchdog;
590 593
591 spin_lock_bh(&dev_queue->lock); 594 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
592 rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping); 595 if (new_qdisc != &noqueue_qdisc)
593 if (dev_queue->qdisc != &noqueue_qdisc)
594 *need_watchdog_p = 1; 596 *need_watchdog_p = 1;
595 spin_unlock_bh(&dev_queue->lock);
596} 597}
597 598
598void dev_activate(struct net_device *dev) 599void dev_activate(struct net_device *dev)
@@ -629,19 +630,16 @@ static void dev_deactivate_queue(struct net_device *dev,
629 struct sk_buff *skb = NULL; 630 struct sk_buff *skb = NULL;
630 struct Qdisc *qdisc; 631 struct Qdisc *qdisc;
631 632
632 spin_lock_bh(&dev_queue->lock);
633
634 qdisc = dev_queue->qdisc; 633 qdisc = dev_queue->qdisc;
635 if (qdisc) { 634 if (qdisc) {
635 spin_lock_bh(qdisc_lock(qdisc));
636
636 dev_queue->qdisc = qdisc_default; 637 dev_queue->qdisc = qdisc_default;
637 qdisc_reset(qdisc); 638 qdisc_reset(qdisc);
638 639
639 skb = qdisc->gso_skb; 640 spin_unlock_bh(qdisc_lock(qdisc));
640 qdisc->gso_skb = NULL;
641 } 641 }
642 642
643 spin_unlock_bh(&dev_queue->lock);
644
645 kfree_skb(skb); 643 kfree_skb(skb);
646} 644}
647 645