aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ifb.c8
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--net/core/dev.c33
-rw-r--r--net/mac80211/main.c10
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_cbq.c8
-rw-r--r--net/sched/sch_generic.c40
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c16
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sched/sch_teql.c4
12 files changed, 73 insertions, 62 deletions
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index af233b59153..bc3de272a82 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -229,12 +229,12 @@ module_param(numifbs, int, 0);
229MODULE_PARM_DESC(numifbs, "Number of ifb devices"); 229MODULE_PARM_DESC(numifbs, "Number of ifb devices");
230 230
231/* 231/*
232 * dev_ifb->queue_lock is usually taken after dev->ingress_lock, 232 * dev_ifb->tx_queue.lock is usually taken after dev->ingress_lock,
233 * reversely to e.g. qdisc_lock_tree(). It should be safe until 233 * reversely to e.g. qdisc_lock_tree(). It should be safe until
234 * ifb doesn't take dev->queue_lock with dev_ifb->ingress_lock. 234 * ifb doesn't take dev->tx_queue.lock with dev_ifb->ingress_lock.
235 * But lockdep should know that ifb has different locks from dev. 235 * But lockdep should know that ifb has different locks from dev.
236 */ 236 */
237static struct lock_class_key ifb_queue_lock_key; 237static struct lock_class_key ifb_tx_queue_lock_key;
238static struct lock_class_key ifb_ingress_lock_key; 238static struct lock_class_key ifb_ingress_lock_key;
239 239
240 240
@@ -258,7 +258,7 @@ static int __init ifb_init_one(int index)
258 if (err < 0) 258 if (err < 0)
259 goto err; 259 goto err;
260 260
261 lockdep_set_class(&dev_ifb->queue_lock, &ifb_queue_lock_key); 261 lockdep_set_class(&dev_ifb->tx_queue.lock, &ifb_tx_queue_lock_key);
262 lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key); 262 lockdep_set_class(&dev_ifb->ingress_lock, &ifb_ingress_lock_key);
263 263
264 return 0; 264 return 0;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 515fd25bf0f..e835acacb47 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -449,6 +449,7 @@ static inline void napi_synchronize(const struct napi_struct *n)
449#endif 449#endif
450 450
451struct netdev_queue { 451struct netdev_queue {
452 spinlock_t lock;
452 struct net_device *dev; 453 struct net_device *dev;
453}; 454};
454 455
@@ -629,7 +630,7 @@ struct net_device
629 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 630 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
630 631
631 struct netdev_queue rx_queue; 632 struct netdev_queue rx_queue;
632 struct netdev_queue tx_queue; 633 struct netdev_queue tx_queue ____cacheline_aligned_in_smp;
633 634
634 /* ingress path synchronizer */ 635 /* ingress path synchronizer */
635 spinlock_t ingress_lock; 636 spinlock_t ingress_lock;
@@ -639,7 +640,6 @@ struct net_device
639 * Cache line mostly used on queue transmit path (qdisc) 640 * Cache line mostly used on queue transmit path (qdisc)
640 */ 641 */
641 /* device queue lock */ 642 /* device queue lock */
642 spinlock_t queue_lock ____cacheline_aligned_in_smp;
643 struct Qdisc *qdisc; 643 struct Qdisc *qdisc;
644 struct Qdisc *qdisc_sleeping; 644 struct Qdisc *qdisc_sleeping;
645 struct list_head qdisc_list; 645 struct list_head qdisc_list;
diff --git a/net/core/dev.c b/net/core/dev.c
index 9b281c906eb..05011048b86 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1667,6 +1667,7 @@ out_kfree_skb:
1667int dev_queue_xmit(struct sk_buff *skb) 1667int dev_queue_xmit(struct sk_buff *skb)
1668{ 1668{
1669 struct net_device *dev = skb->dev; 1669 struct net_device *dev = skb->dev;
1670 struct netdev_queue *txq;
1670 struct Qdisc *q; 1671 struct Qdisc *q;
1671 int rc = -ENOMEM; 1672 int rc = -ENOMEM;
1672 1673
@@ -1699,14 +1700,15 @@ int dev_queue_xmit(struct sk_buff *skb)
1699 } 1700 }
1700 1701
1701gso: 1702gso:
1702 spin_lock_prefetch(&dev->queue_lock); 1703 txq = &dev->tx_queue;
1704 spin_lock_prefetch(&txq->lock);
1703 1705
1704 /* Disable soft irqs for various locks below. Also 1706 /* Disable soft irqs for various locks below. Also
1705 * stops preemption for RCU. 1707 * stops preemption for RCU.
1706 */ 1708 */
1707 rcu_read_lock_bh(); 1709 rcu_read_lock_bh();
1708 1710
1709 /* Updates of qdisc are serialized by queue_lock. 1711 /* Updates of qdisc are serialized by queue->lock.
1710 * The struct Qdisc which is pointed to by qdisc is now a 1712 * The struct Qdisc which is pointed to by qdisc is now a
1711 * rcu structure - it may be accessed without acquiring 1713 * rcu structure - it may be accessed without acquiring
1712 * a lock (but the structure may be stale.) The freeing of the 1714 * a lock (but the structure may be stale.) The freeing of the
@@ -1714,7 +1716,7 @@ gso:
1714 * more references to it. 1716 * more references to it.
1715 * 1717 *
1716 * If the qdisc has an enqueue function, we still need to 1718 * If the qdisc has an enqueue function, we still need to
1717 * hold the queue_lock before calling it, since queue_lock 1719 * hold the queue->lock before calling it, since queue->lock
1718 * also serializes access to the device queue. 1720 * also serializes access to the device queue.
1719 */ 1721 */
1720 1722
@@ -1724,19 +1726,19 @@ gso:
1724#endif 1726#endif
1725 if (q->enqueue) { 1727 if (q->enqueue) {
1726 /* Grab device queue */ 1728 /* Grab device queue */
1727 spin_lock(&dev->queue_lock); 1729 spin_lock(&txq->lock);
1728 q = dev->qdisc; 1730 q = dev->qdisc;
1729 if (q->enqueue) { 1731 if (q->enqueue) {
1730 /* reset queue_mapping to zero */ 1732 /* reset queue_mapping to zero */
1731 skb_set_queue_mapping(skb, 0); 1733 skb_set_queue_mapping(skb, 0);
1732 rc = q->enqueue(skb, q); 1734 rc = q->enqueue(skb, q);
1733 qdisc_run(dev); 1735 qdisc_run(dev);
1734 spin_unlock(&dev->queue_lock); 1736 spin_unlock(&txq->lock);
1735 1737
1736 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; 1738 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1737 goto out; 1739 goto out;
1738 } 1740 }
1739 spin_unlock(&dev->queue_lock); 1741 spin_unlock(&txq->lock);
1740 } 1742 }
1741 1743
1742 /* The device has no queue. Common case for software devices: 1744 /* The device has no queue. Common case for software devices:
@@ -1919,14 +1921,17 @@ static void net_tx_action(struct softirq_action *h)
1919 1921
1920 while (head) { 1922 while (head) {
1921 struct net_device *dev = head; 1923 struct net_device *dev = head;
1924 struct netdev_queue *txq;
1922 head = head->next_sched; 1925 head = head->next_sched;
1923 1926
1927 txq = &dev->tx_queue;
1928
1924 smp_mb__before_clear_bit(); 1929 smp_mb__before_clear_bit();
1925 clear_bit(__LINK_STATE_SCHED, &dev->state); 1930 clear_bit(__LINK_STATE_SCHED, &dev->state);
1926 1931
1927 if (spin_trylock(&dev->queue_lock)) { 1932 if (spin_trylock(&txq->lock)) {
1928 qdisc_run(dev); 1933 qdisc_run(dev);
1929 spin_unlock(&dev->queue_lock); 1934 spin_unlock(&txq->lock);
1930 } else { 1935 } else {
1931 netif_schedule(dev); 1936 netif_schedule(dev);
1932 } 1937 }
@@ -3787,7 +3792,6 @@ int register_netdevice(struct net_device *dev)
3787 BUG_ON(!dev_net(dev)); 3792 BUG_ON(!dev_net(dev));
3788 net = dev_net(dev); 3793 net = dev_net(dev);
3789 3794
3790 spin_lock_init(&dev->queue_lock);
3791 spin_lock_init(&dev->_xmit_lock); 3795 spin_lock_init(&dev->_xmit_lock);
3792 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type); 3796 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
3793 dev->xmit_lock_owner = -1; 3797 dev->xmit_lock_owner = -1;
@@ -4072,10 +4076,17 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
4072 return &dev->stats; 4076 return &dev->stats;
4073} 4077}
4074 4078
4079static void netdev_init_one_queue(struct net_device *dev,
4080 struct netdev_queue *queue)
4081{
4082 spin_lock_init(&queue->lock);
4083 queue->dev = dev;
4084}
4085
4075static void netdev_init_queues(struct net_device *dev) 4086static void netdev_init_queues(struct net_device *dev)
4076{ 4087{
4077 dev->rx_queue.dev = dev; 4088 netdev_init_one_queue(dev, &dev->rx_queue);
4078 dev->tx_queue.dev = dev; 4089 netdev_init_one_queue(dev, &dev->tx_queue);
4079} 4090}
4080 4091
4081/** 4092/**
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index cf477ad39da..12aeaf78ae7 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -636,7 +636,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
636 636
637 /* ensure that TX flow won't interrupt us 637 /* ensure that TX flow won't interrupt us
638 * until the end of the call to requeue function */ 638 * until the end of the call to requeue function */
639 spin_lock_bh(&local->mdev->queue_lock); 639 spin_lock_bh(&local->mdev->tx_queue.lock);
640 640
641 /* create a new queue for this aggregation */ 641 /* create a new queue for this aggregation */
642 ret = ieee80211_ht_agg_queue_add(local, sta, tid); 642 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
@@ -675,7 +675,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
675 675
676 /* Will put all the packets in the new SW queue */ 676 /* Will put all the packets in the new SW queue */
677 ieee80211_requeue(local, ieee802_1d_to_ac[tid]); 677 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
678 spin_unlock_bh(&local->mdev->queue_lock); 678 spin_unlock_bh(&local->mdev->tx_queue.lock);
679 spin_unlock_bh(&sta->lock); 679 spin_unlock_bh(&sta->lock);
680 680
681 /* send an addBA request */ 681 /* send an addBA request */
@@ -701,7 +701,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
701err_unlock_queue: 701err_unlock_queue:
702 kfree(sta->ampdu_mlme.tid_tx[tid]); 702 kfree(sta->ampdu_mlme.tid_tx[tid]);
703 sta->ampdu_mlme.tid_tx[tid] = NULL; 703 sta->ampdu_mlme.tid_tx[tid] = NULL;
704 spin_unlock_bh(&local->mdev->queue_lock); 704 spin_unlock_bh(&local->mdev->tx_queue.lock);
705 ret = -EBUSY; 705 ret = -EBUSY;
706err_unlock_sta: 706err_unlock_sta:
707 spin_unlock_bh(&sta->lock); 707 spin_unlock_bh(&sta->lock);
@@ -875,10 +875,10 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
875 875
876 /* avoid ordering issues: we are the only one that can modify 876 /* avoid ordering issues: we are the only one that can modify
877 * the content of the qdiscs */ 877 * the content of the qdiscs */
878 spin_lock_bh(&local->mdev->queue_lock); 878 spin_lock_bh(&local->mdev->tx_queue.lock);
879 /* remove the queue for this aggregation */ 879 /* remove the queue for this aggregation */
880 ieee80211_ht_agg_queue_remove(local, sta, tid, 1); 880 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
881 spin_unlock_bh(&local->mdev->queue_lock); 881 spin_unlock_bh(&local->mdev->tx_queue.lock);
882 882
883 /* we just requeued the all the frames that were in the removed 883 /* we just requeued the all the frames that were in the removed
884 * queue, and since we might miss a softirq we do netif_schedule. 884 * queue, and since we might miss a softirq we do netif_schedule.
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 2fbc171130b..59ed9cae66b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -648,7 +648,7 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
648} 648}
649 649
650/** 650/**
651 * the caller needs to hold local->mdev->queue_lock 651 * the caller needs to hold local->mdev->tx_queue.lock
652 */ 652 */
653void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, 653void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
654 struct sta_info *sta, u16 tid, 654 struct sta_info *sta, u16 tid,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 1f893082a4f..2a1834f8c7d 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -606,7 +606,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
606 sch->stats_lock = &dev->ingress_lock; 606 sch->stats_lock = &dev->ingress_lock;
607 handle = TC_H_MAKE(TC_H_INGRESS, 0); 607 handle = TC_H_MAKE(TC_H_INGRESS, 0);
608 } else { 608 } else {
609 sch->stats_lock = &dev->queue_lock; 609 sch->stats_lock = &dev_queue->lock;
610 if (handle == 0) { 610 if (handle == 0) {
611 handle = qdisc_alloc_handle(dev); 611 handle = qdisc_alloc_handle(dev);
612 err = -ENOMEM; 612 err = -ENOMEM;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 9f2ace585fd..99ce3da2b0a 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1746,10 +1746,10 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1746#ifdef CONFIG_NET_CLS_ACT 1746#ifdef CONFIG_NET_CLS_ACT
1747 struct cbq_sched_data *q = qdisc_priv(sch); 1747 struct cbq_sched_data *q = qdisc_priv(sch);
1748 1748
1749 spin_lock_bh(&qdisc_dev(sch)->queue_lock); 1749 spin_lock_bh(&sch->dev_queue->lock);
1750 if (q->rx_class == cl) 1750 if (q->rx_class == cl)
1751 q->rx_class = NULL; 1751 q->rx_class = NULL;
1752 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1752 spin_unlock_bh(&sch->dev_queue->lock);
1753#endif 1753#endif
1754 1754
1755 cbq_destroy_class(sch, cl); 1755 cbq_destroy_class(sch, cl);
@@ -1828,7 +1828,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1828 1828
1829 if (tca[TCA_RATE]) 1829 if (tca[TCA_RATE])
1830 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1830 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1831 &qdisc_dev(sch)->queue_lock, 1831 &sch->dev_queue->lock,
1832 tca[TCA_RATE]); 1832 tca[TCA_RATE]);
1833 return 0; 1833 return 0;
1834 } 1834 }
@@ -1919,7 +1919,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1919 1919
1920 if (tca[TCA_RATE]) 1920 if (tca[TCA_RATE])
1921 gen_new_estimator(&cl->bstats, &cl->rate_est, 1921 gen_new_estimator(&cl->bstats, &cl->rate_est,
1922 &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]); 1922 &sch->dev_queue->lock, tca[TCA_RATE]);
1923 1923
1924 *arg = (unsigned long)cl; 1924 *arg = (unsigned long)cl;
1925 return 0; 1925 return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b626a4f32b6..ee8f9f78a09 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,31 +29,31 @@
29/* Main transmission queue. */ 29/* Main transmission queue. */
30 30
31/* Modifications to data participating in scheduling must be protected with 31/* Modifications to data participating in scheduling must be protected with
32 * dev->queue_lock spinlock. 32 * queue->lock spinlock.
33 * 33 *
34 * The idea is the following: 34 * The idea is the following:
35 * - enqueue, dequeue are serialized via top level device 35 * - enqueue, dequeue are serialized via top level device
36 * spinlock dev->queue_lock. 36 * spinlock queue->lock.
37 * - ingress filtering is serialized via top level device 37 * - ingress filtering is serialized via top level device
38 * spinlock dev->ingress_lock. 38 * spinlock dev->ingress_lock.
39 * - updates to tree and tree walking are only done under the rtnl mutex. 39 * - updates to tree and tree walking are only done under the rtnl mutex.
40 */ 40 */
41 41
42void qdisc_lock_tree(struct net_device *dev) 42void qdisc_lock_tree(struct net_device *dev)
43 __acquires(dev->queue_lock) 43 __acquires(dev->tx_queue.lock)
44 __acquires(dev->ingress_lock) 44 __acquires(dev->ingress_lock)
45{ 45{
46 spin_lock_bh(&dev->queue_lock); 46 spin_lock_bh(&dev->tx_queue.lock);
47 spin_lock(&dev->ingress_lock); 47 spin_lock(&dev->ingress_lock);
48} 48}
49EXPORT_SYMBOL(qdisc_lock_tree); 49EXPORT_SYMBOL(qdisc_lock_tree);
50 50
51void qdisc_unlock_tree(struct net_device *dev) 51void qdisc_unlock_tree(struct net_device *dev)
52 __releases(dev->ingress_lock) 52 __releases(dev->ingress_lock)
53 __releases(dev->queue_lock) 53 __releases(dev->tx_queue.lock)
54{ 54{
55 spin_unlock(&dev->ingress_lock); 55 spin_unlock(&dev->ingress_lock);
56 spin_unlock_bh(&dev->queue_lock); 56 spin_unlock_bh(&dev->tx_queue.lock);
57} 57}
58EXPORT_SYMBOL(qdisc_unlock_tree); 58EXPORT_SYMBOL(qdisc_unlock_tree);
59 59
@@ -118,15 +118,15 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
118} 118}
119 119
120/* 120/*
121 * NOTE: Called under dev->queue_lock with locally disabled BH. 121 * NOTE: Called under queue->lock with locally disabled BH.
122 * 122 *
123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this 123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
124 * device at a time. dev->queue_lock serializes queue accesses for 124 * device at a time. queue->lock serializes queue accesses for
125 * this device AND dev->qdisc pointer itself. 125 * this device AND dev->qdisc pointer itself.
126 * 126 *
127 * netif_tx_lock serializes accesses to device driver. 127 * netif_tx_lock serializes accesses to device driver.
128 * 128 *
129 * dev->queue_lock and netif_tx_lock are mutually exclusive, 129 * queue->lock and netif_tx_lock are mutually exclusive,
130 * if one is grabbed, another must be free. 130 * if one is grabbed, another must be free.
131 * 131 *
132 * Note, that this procedure can be called by a watchdog timer 132 * Note, that this procedure can be called by a watchdog timer
@@ -148,14 +148,14 @@ static inline int qdisc_restart(struct net_device *dev)
148 148
149 149
150 /* And release queue */ 150 /* And release queue */
151 spin_unlock(&dev->queue_lock); 151 spin_unlock(&q->dev_queue->lock);
152 152
153 HARD_TX_LOCK(dev, smp_processor_id()); 153 HARD_TX_LOCK(dev, smp_processor_id());
154 if (!netif_subqueue_stopped(dev, skb)) 154 if (!netif_subqueue_stopped(dev, skb))
155 ret = dev_hard_start_xmit(skb, dev); 155 ret = dev_hard_start_xmit(skb, dev);
156 HARD_TX_UNLOCK(dev); 156 HARD_TX_UNLOCK(dev);
157 157
158 spin_lock(&dev->queue_lock); 158 spin_lock(&q->dev_queue->lock);
159 q = dev->qdisc; 159 q = dev->qdisc;
160 160
161 switch (ret) { 161 switch (ret) {
@@ -482,7 +482,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev,
482 sch = qdisc_alloc(dev_queue, ops); 482 sch = qdisc_alloc(dev_queue, ops);
483 if (IS_ERR(sch)) 483 if (IS_ERR(sch))
484 goto errout; 484 goto errout;
485 sch->stats_lock = &dev->queue_lock; 485 sch->stats_lock = &dev_queue->lock;
486 sch->parent = parentid; 486 sch->parent = parentid;
487 487
488 if (!ops->init || ops->init(sch, NULL) == 0) 488 if (!ops->init || ops->init(sch, NULL) == 0)
@@ -494,7 +494,7 @@ errout:
494} 494}
495EXPORT_SYMBOL(qdisc_create_dflt); 495EXPORT_SYMBOL(qdisc_create_dflt);
496 496
497/* Under dev->queue_lock and BH! */ 497/* Under queue->lock and BH! */
498 498
499void qdisc_reset(struct Qdisc *qdisc) 499void qdisc_reset(struct Qdisc *qdisc)
500{ 500{
@@ -514,7 +514,7 @@ static void __qdisc_destroy(struct rcu_head *head)
514 kfree((char *) qdisc - qdisc->padded); 514 kfree((char *) qdisc - qdisc->padded);
515} 515}
516 516
517/* Under dev->queue_lock and BH! */ 517/* Under queue->lock and BH! */
518 518
519void qdisc_destroy(struct Qdisc *qdisc) 519void qdisc_destroy(struct Qdisc *qdisc)
520{ 520{
@@ -566,13 +566,13 @@ void dev_activate(struct net_device *dev)
566 /* Delay activation until next carrier-on event */ 566 /* Delay activation until next carrier-on event */
567 return; 567 return;
568 568
569 spin_lock_bh(&dev->queue_lock); 569 spin_lock_bh(&dev->tx_queue.lock);
570 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); 570 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
571 if (dev->qdisc != &noqueue_qdisc) { 571 if (dev->qdisc != &noqueue_qdisc) {
572 dev->trans_start = jiffies; 572 dev->trans_start = jiffies;
573 dev_watchdog_up(dev); 573 dev_watchdog_up(dev);
574 } 574 }
575 spin_unlock_bh(&dev->queue_lock); 575 spin_unlock_bh(&dev->tx_queue.lock);
576} 576}
577 577
578void dev_deactivate(struct net_device *dev) 578void dev_deactivate(struct net_device *dev)
@@ -581,7 +581,7 @@ void dev_deactivate(struct net_device *dev)
581 struct sk_buff *skb; 581 struct sk_buff *skb;
582 int running; 582 int running;
583 583
584 spin_lock_bh(&dev->queue_lock); 584 spin_lock_bh(&dev->tx_queue.lock);
585 qdisc = dev->qdisc; 585 qdisc = dev->qdisc;
586 dev->qdisc = &noop_qdisc; 586 dev->qdisc = &noop_qdisc;
587 587
@@ -589,7 +589,7 @@ void dev_deactivate(struct net_device *dev)
589 589
590 skb = dev->gso_skb; 590 skb = dev->gso_skb;
591 dev->gso_skb = NULL; 591 dev->gso_skb = NULL;
592 spin_unlock_bh(&dev->queue_lock); 592 spin_unlock_bh(&dev->tx_queue.lock);
593 593
594 kfree_skb(skb); 594 kfree_skb(skb);
595 595
@@ -607,9 +607,9 @@ void dev_deactivate(struct net_device *dev)
607 * Double-check inside queue lock to ensure that all effects 607 * Double-check inside queue lock to ensure that all effects
608 * of the queue run are visible when we return. 608 * of the queue run are visible when we return.
609 */ 609 */
610 spin_lock_bh(&dev->queue_lock); 610 spin_lock_bh(&dev->tx_queue.lock);
611 running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 611 running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
612 spin_unlock_bh(&dev->queue_lock); 612 spin_unlock_bh(&dev->tx_queue.lock);
613 613
614 /* 614 /*
615 * The running flag should never be set at this point because 615 * The running flag should never be set at this point because
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 333525422f4..997d520ca58 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1045 1045
1046 if (tca[TCA_RATE]) 1046 if (tca[TCA_RATE])
1047 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1048 &qdisc_dev(sch)->queue_lock, 1048 &sch->dev_queue->lock,
1049 tca[TCA_RATE]); 1049 tca[TCA_RATE]);
1050 return 0; 1050 return 0;
1051 } 1051 }
@@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1104 1104
1105 if (tca[TCA_RATE]) 1105 if (tca[TCA_RATE])
1106 gen_new_estimator(&cl->bstats, &cl->rate_est, 1106 gen_new_estimator(&cl->bstats, &cl->rate_est,
1107 &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]); 1107 &sch->dev_queue->lock, tca[TCA_RATE]);
1108 *arg = (unsigned long)cl; 1108 *arg = (unsigned long)cl;
1109 return 0; 1109 return 0;
1110} 1110}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 31f7d1536e6..c8ca54cc26b 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1043,7 +1043,7 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1043 struct nlattr *nest; 1043 struct nlattr *nest;
1044 struct tc_htb_glob gopt; 1044 struct tc_htb_glob gopt;
1045 1045
1046 spin_lock_bh(&qdisc_dev(sch)->queue_lock); 1046 spin_lock_bh(&sch->dev_queue->lock);
1047 1047
1048 gopt.direct_pkts = q->direct_pkts; 1048 gopt.direct_pkts = q->direct_pkts;
1049 gopt.version = HTB_VER; 1049 gopt.version = HTB_VER;
@@ -1057,11 +1057,11 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1057 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1057 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1058 nla_nest_end(skb, nest); 1058 nla_nest_end(skb, nest);
1059 1059
1060 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1060 spin_unlock_bh(&sch->dev_queue->lock);
1061 return skb->len; 1061 return skb->len;
1062 1062
1063nla_put_failure: 1063nla_put_failure:
1064 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1064 spin_unlock_bh(&sch->dev_queue->lock);
1065 nla_nest_cancel(skb, nest); 1065 nla_nest_cancel(skb, nest);
1066 return -1; 1066 return -1;
1067} 1067}
@@ -1073,7 +1073,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1073 struct nlattr *nest; 1073 struct nlattr *nest;
1074 struct tc_htb_opt opt; 1074 struct tc_htb_opt opt;
1075 1075
1076 spin_lock_bh(&qdisc_dev(sch)->queue_lock); 1076 spin_lock_bh(&sch->dev_queue->lock);
1077 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; 1077 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1078 tcm->tcm_handle = cl->common.classid; 1078 tcm->tcm_handle = cl->common.classid;
1079 if (!cl->level && cl->un.leaf.q) 1079 if (!cl->level && cl->un.leaf.q)
@@ -1095,11 +1095,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1095 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1095 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1096 1096
1097 nla_nest_end(skb, nest); 1097 nla_nest_end(skb, nest);
1098 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1098 spin_unlock_bh(&sch->dev_queue->lock);
1099 return skb->len; 1099 return skb->len;
1100 1100
1101nla_put_failure: 1101nla_put_failure:
1102 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1102 spin_unlock_bh(&sch->dev_queue->lock);
1103 nla_nest_cancel(skb, nest); 1103 nla_nest_cancel(skb, nest);
1104 return -1; 1104 return -1;
1105} 1105}
@@ -1365,7 +1365,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1365 goto failure; 1365 goto failure;
1366 1366
1367 gen_new_estimator(&cl->bstats, &cl->rate_est, 1367 gen_new_estimator(&cl->bstats, &cl->rate_est,
1368 &qdisc_dev(sch)->queue_lock, 1368 &sch->dev_queue->lock,
1369 tca[TCA_RATE] ? : &est.nla); 1369 tca[TCA_RATE] ? : &est.nla);
1370 cl->refcnt = 1; 1370 cl->refcnt = 1;
1371 cl->children = 0; 1371 cl->children = 0;
@@ -1420,7 +1420,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1420 } else { 1420 } else {
1421 if (tca[TCA_RATE]) 1421 if (tca[TCA_RATE])
1422 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1422 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1423 &qdisc_dev(sch)->queue_lock, 1423 &sch->dev_queue->lock,
1424 tca[TCA_RATE]); 1424 tca[TCA_RATE]);
1425 sch_tree_lock(sch); 1425 sch_tree_lock(sch);
1426 } 1426 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 79058296044..71b73c528f9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -333,9 +333,9 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
333 for (i = 0; i < n; i++) 333 for (i = 0; i < n; i++)
334 d->table[i] = data[i]; 334 d->table[i] = data[i];
335 335
336 spin_lock_bh(&qdisc_dev(sch)->queue_lock); 336 spin_lock_bh(&sch->dev_queue->lock);
337 d = xchg(&q->delay_dist, d); 337 d = xchg(&q->delay_dist, d);
338 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 338 spin_unlock_bh(&sch->dev_queue->lock);
339 339
340 kfree(d); 340 kfree(d);
341 return 0; 341 return 0;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index b3fc82623fc..4f3054e8e1a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -154,9 +154,9 @@ teql_destroy(struct Qdisc* sch)
154 master->slaves = NEXT_SLAVE(q); 154 master->slaves = NEXT_SLAVE(q);
155 if (q == master->slaves) { 155 if (q == master->slaves) {
156 master->slaves = NULL; 156 master->slaves = NULL;
157 spin_lock_bh(&master->dev->queue_lock); 157 spin_lock_bh(&master->dev->tx_queue.lock);
158 qdisc_reset(master->dev->qdisc); 158 qdisc_reset(master->dev->qdisc);
159 spin_unlock_bh(&master->dev->queue_lock); 159 spin_unlock_bh(&master->dev->tx_queue.lock);
160 } 160 }
161 } 161 }
162 skb_queue_purge(&dat->q); 162 skb_queue_purge(&dat->q);