aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 03:34:19 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:00 -0400
commite8a0464cc950972824e2e128028ae3db666ec1ed (patch)
tree5022b95396c0f3b313531bc39b19543c03551b9a
parent070825b3840a743e21ebcc44f8279708a4fed977 (diff)
netdev: Allocate multiple queues for TX.
alloc_netdev_mq() now allocates an array of netdev_queue structures for TX, based upon the queue_count argument. Furthermore, all accesses to the TX queues are now vectored through the netdev_get_tx_queue() and netdev_for_each_tx_queue() interfaces. This makes it easy to grep the tree for all things that want to get to a TX queue of a net device. Problem spots which are not really multiqueue aware yet, and only work with one queue, can easily be spotted by grepping for all netdev_get_tx_queue() calls that pass in a zero index. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/hamradio/bpqether.c6
-rw-r--r--drivers/net/ifb.c12
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c6
-rw-r--r--include/linux/netdevice.h69
-rw-r--r--include/net/sch_generic.h37
-rw-r--r--net/8021q/vlan_dev.c10
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/wme.c12
-rw-r--r--net/netrom/af_netrom.c6
-rw-r--r--net/rose/af_rose.c6
-rw-r--r--net/sched/cls_api.c4
-rw-r--r--net/sched/sch_api.c32
-rw-r--r--net/sched/sch_generic.c178
-rw-r--r--net/sched/sch_teql.c21
18 files changed, 320 insertions, 137 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index fd87dbe7999a..9737c06045d6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5042,7 +5042,9 @@ static int bond_check_params(struct bond_params *params)
5042 5042
5043static struct lock_class_key bonding_netdev_xmit_lock_key; 5043static struct lock_class_key bonding_netdev_xmit_lock_key;
5044 5044
5045static void bond_set_lockdep_class_one(struct netdev_queue *txq) 5045static void bond_set_lockdep_class_one(struct net_device *dev,
5046 struct netdev_queue *txq,
5047 void *_unused)
5046{ 5048{
5047 lockdep_set_class(&txq->_xmit_lock, 5049 lockdep_set_class(&txq->_xmit_lock,
5048 &bonding_netdev_xmit_lock_key); 5050 &bonding_netdev_xmit_lock_key);
@@ -5050,7 +5052,7 @@ static void bond_set_lockdep_class_one(struct netdev_queue *txq)
5050 5052
5051static void bond_set_lockdep_class(struct net_device *dev) 5053static void bond_set_lockdep_class(struct net_device *dev)
5052{ 5054{
5053 bond_set_lockdep_class_one(&dev->tx_queue); 5055 netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
5054} 5056}
5055 5057
5056/* Create a new bond based on the specified name and bonding parameters. 5058/* Create a new bond based on the specified name and bonding parameters.
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index fb186b8c3d4d..b6500b2aacf2 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -124,14 +124,16 @@ static LIST_HEAD(bpq_devices);
124 */ 124 */
125static struct lock_class_key bpq_netdev_xmit_lock_key; 125static struct lock_class_key bpq_netdev_xmit_lock_key;
126 126
127static void bpq_set_lockdep_class_one(struct netdev_queue *txq) 127static void bpq_set_lockdep_class_one(struct net_device *dev,
128 struct netdev_queue *txq,
129 void *_unused)
128{ 130{
129 lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key); 131 lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
130} 132}
131 133
132static void bpq_set_lockdep_class(struct net_device *dev) 134static void bpq_set_lockdep_class(struct net_device *dev)
133{ 135{
134 bpq_set_lockdep_class_one(&dev->tx_queue); 136 netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
135} 137}
136 138
137/* ------------------------------------------------------------------------ */ 139/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index ccbd6554f6eb..897b05e79ed0 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -229,14 +229,20 @@ module_param(numifbs, int, 0);
229MODULE_PARM_DESC(numifbs, "Number of ifb devices"); 229MODULE_PARM_DESC(numifbs, "Number of ifb devices");
230 230
231/* 231/*
232 * dev_ifb->tx_queue.lock is usually taken after dev->rx_queue.lock, 232 * dev_ifb's TX queue lock is usually taken after dev->rx_queue.lock,
233 * reversely to e.g. qdisc_lock_tree(). It should be safe until 233 * reversely to e.g. qdisc_lock_tree(). It should be safe until
234 * ifb doesn't take dev->tx_queue.lock with dev_ifb->rx_queue.lock. 234 * ifb doesn't take dev's TX queue lock with dev_ifb->rx_queue.lock.
235 * But lockdep should know that ifb has different locks from dev. 235 * But lockdep should know that ifb has different locks from dev.
236 */ 236 */
237static struct lock_class_key ifb_tx_queue_lock_key; 237static struct lock_class_key ifb_tx_queue_lock_key;
238static struct lock_class_key ifb_rx_queue_lock_key; 238static struct lock_class_key ifb_rx_queue_lock_key;
239 239
240static void set_tx_lockdep_key(struct net_device *dev,
241 struct netdev_queue *txq,
242 void *_unused)
243{
244 lockdep_set_class(&txq->lock, &ifb_tx_queue_lock_key);
245}
240 246
241static int __init ifb_init_one(int index) 247static int __init ifb_init_one(int index)
242{ 248{
@@ -258,7 +264,7 @@ static int __init ifb_init_one(int index)
258 if (err < 0) 264 if (err < 0)
259 goto err; 265 goto err;
260 266
261 lockdep_set_class(&dev_ifb->tx_queue.lock, &ifb_tx_queue_lock_key); 267 netdev_for_each_tx_queue(dev_ifb, set_tx_lockdep_key, NULL);
262 lockdep_set_class(&dev_ifb->rx_queue.lock, &ifb_rx_queue_lock_key); 268 lockdep_set_class(&dev_ifb->rx_queue.lock, &ifb_rx_queue_lock_key);
263 269
264 return 0; 270 return 0;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 980001c2cf96..72745ce588c6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -285,7 +285,9 @@ static struct lock_class_key macvlan_netdev_xmit_lock_key;
285#define MACVLAN_STATE_MASK \ 285#define MACVLAN_STATE_MASK \
286 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 286 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
287 287
288static void macvlan_set_lockdep_class_one(struct netdev_queue *txq) 288static void macvlan_set_lockdep_class_one(struct net_device *dev,
289 struct netdev_queue *txq,
290 void *_unused)
289{ 291{
290 lockdep_set_class(&txq->_xmit_lock, 292 lockdep_set_class(&txq->_xmit_lock,
291 &macvlan_netdev_xmit_lock_key); 293 &macvlan_netdev_xmit_lock_key);
@@ -293,7 +295,7 @@ static void macvlan_set_lockdep_class_one(struct netdev_queue *txq)
293 295
294static void macvlan_set_lockdep_class(struct net_device *dev) 296static void macvlan_set_lockdep_class(struct net_device *dev)
295{ 297{
296 macvlan_set_lockdep_class_one(&dev->tx_queue); 298 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
297} 299}
298 300
299static int macvlan_init(struct net_device *dev) 301static int macvlan_init(struct net_device *dev)
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index c1f4bb005d92..13d5882f1f21 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3102,7 +3102,9 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
3102 */ 3102 */
3103static struct lock_class_key hostap_netdev_xmit_lock_key; 3103static struct lock_class_key hostap_netdev_xmit_lock_key;
3104 3104
3105static void prism2_set_lockdep_class_one(struct netdev_queue *txq) 3105static void prism2_set_lockdep_class_one(struct net_device *dev,
3106 struct netdev_queue *txq,
3107 void *_unused)
3106{ 3108{
3107 lockdep_set_class(&txq->_xmit_lock, 3109 lockdep_set_class(&txq->_xmit_lock,
3108 &hostap_netdev_xmit_lock_key); 3110 &hostap_netdev_xmit_lock_key);
@@ -3110,7 +3112,7 @@ static void prism2_set_lockdep_class_one(struct netdev_queue *txq)
3110 3112
3111static void prism2_set_lockdep_class(struct net_device *dev) 3113static void prism2_set_lockdep_class(struct net_device *dev)
3112{ 3114{
3113 prism2_set_lockdep_class_one(&dev->tx_queue); 3115 netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
3114} 3116}
3115 3117
3116static struct net_device * 3118static struct net_device *
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 570cf7affa72..f25d4f5a31b0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -463,7 +463,7 @@ struct netdev_queue {
463 struct Qdisc *qdisc_sleeping; 463 struct Qdisc *qdisc_sleeping;
464 struct list_head qdisc_list; 464 struct list_head qdisc_list;
465 struct netdev_queue *next_sched; 465 struct netdev_queue *next_sched;
466}; 466} ____cacheline_aligned_in_smp;
467 467
468/* 468/*
469 * The DEVICE structure. 469 * The DEVICE structure.
@@ -641,7 +641,9 @@ struct net_device
641 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 641 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
642 642
643 struct netdev_queue rx_queue; 643 struct netdev_queue rx_queue;
644 struct netdev_queue tx_queue ____cacheline_aligned_in_smp; 644
645 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
646 unsigned int num_tx_queues;
645 unsigned long tx_queue_len; /* Max frames per queue allowed */ 647 unsigned long tx_queue_len; /* Max frames per queue allowed */
646 648
647/* 649/*
@@ -764,6 +766,25 @@ struct net_device
764#define NETDEV_ALIGN 32 766#define NETDEV_ALIGN 32
765#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) 767#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
766 768
769static inline
770struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
771 unsigned int index)
772{
773 return &dev->_tx[index];
774}
775
776static inline void netdev_for_each_tx_queue(struct net_device *dev,
777 void (*f)(struct net_device *,
778 struct netdev_queue *,
779 void *),
780 void *arg)
781{
782 unsigned int i;
783
784 for (i = 0; i < dev->num_tx_queues; i++)
785 f(dev, &dev->_tx[i], arg);
786}
787
767/* 788/*
768 * Net namespace inlines 789 * Net namespace inlines
769 */ 790 */
@@ -977,7 +998,7 @@ static inline void netif_schedule_queue(struct netdev_queue *txq)
977 998
978static inline void netif_schedule(struct net_device *dev) 999static inline void netif_schedule(struct net_device *dev)
979{ 1000{
980 netif_schedule_queue(&dev->tx_queue); 1001 netif_schedule_queue(netdev_get_tx_queue(dev, 0));
981} 1002}
982 1003
983/** 1004/**
@@ -993,7 +1014,7 @@ static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
993 1014
994static inline void netif_start_queue(struct net_device *dev) 1015static inline void netif_start_queue(struct net_device *dev)
995{ 1016{
996 netif_tx_start_queue(&dev->tx_queue); 1017 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
997} 1018}
998 1019
999/** 1020/**
@@ -1017,7 +1038,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1017 1038
1018static inline void netif_wake_queue(struct net_device *dev) 1039static inline void netif_wake_queue(struct net_device *dev)
1019{ 1040{
1020 netif_tx_wake_queue(&dev->tx_queue); 1041 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1021} 1042}
1022 1043
1023/** 1044/**
@@ -1034,7 +1055,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1034 1055
1035static inline void netif_stop_queue(struct net_device *dev) 1056static inline void netif_stop_queue(struct net_device *dev)
1036{ 1057{
1037 netif_tx_stop_queue(&dev->tx_queue); 1058 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1038} 1059}
1039 1060
1040/** 1061/**
@@ -1050,7 +1071,7 @@ static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1050 1071
1051static inline int netif_queue_stopped(const struct net_device *dev) 1072static inline int netif_queue_stopped(const struct net_device *dev)
1052{ 1073{
1053 return netif_tx_queue_stopped(&dev->tx_queue); 1074 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1054} 1075}
1055 1076
1056/** 1077/**
@@ -1134,7 +1155,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1134#endif 1155#endif
1135 if (test_and_clear_bit(__QUEUE_STATE_XOFF, 1156 if (test_and_clear_bit(__QUEUE_STATE_XOFF,
1136 &dev->egress_subqueue[queue_index].state)) 1157 &dev->egress_subqueue[queue_index].state))
1137 __netif_schedule(&dev->tx_queue); 1158 __netif_schedule(netdev_get_tx_queue(dev, 0));
1138} 1159}
1139 1160
1140/** 1161/**
@@ -1430,18 +1451,19 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
1430 1451
1431static inline void netif_tx_lock(struct net_device *dev) 1452static inline void netif_tx_lock(struct net_device *dev)
1432{ 1453{
1433 __netif_tx_lock(&dev->tx_queue, smp_processor_id()); 1454 int cpu = smp_processor_id();
1434} 1455 unsigned int i;
1435 1456
1436static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 1457 for (i = 0; i < dev->num_tx_queues; i++) {
1437{ 1458 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1438 spin_lock_bh(&txq->_xmit_lock); 1459 __netif_tx_lock(txq, cpu);
1439 txq->xmit_lock_owner = smp_processor_id(); 1460 }
1440} 1461}
1441 1462
1442static inline void netif_tx_lock_bh(struct net_device *dev) 1463static inline void netif_tx_lock_bh(struct net_device *dev)
1443{ 1464{
1444 __netif_tx_lock_bh(&dev->tx_queue); 1465 local_bh_disable();
1466 netif_tx_lock(dev);
1445} 1467}
1446 1468
1447static inline int __netif_tx_trylock(struct netdev_queue *txq) 1469static inline int __netif_tx_trylock(struct netdev_queue *txq)
@@ -1454,7 +1476,7 @@ static inline int __netif_tx_trylock(struct netdev_queue *txq)
1454 1476
1455static inline int netif_tx_trylock(struct net_device *dev) 1477static inline int netif_tx_trylock(struct net_device *dev)
1456{ 1478{
1457 return __netif_tx_trylock(&dev->tx_queue); 1479 return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
1458} 1480}
1459 1481
1460static inline void __netif_tx_unlock(struct netdev_queue *txq) 1482static inline void __netif_tx_unlock(struct netdev_queue *txq)
@@ -1465,18 +1487,19 @@ static inline void __netif_tx_unlock(struct netdev_queue *txq)
1465 1487
1466static inline void netif_tx_unlock(struct net_device *dev) 1488static inline void netif_tx_unlock(struct net_device *dev)
1467{ 1489{
1468 __netif_tx_unlock(&dev->tx_queue); 1490 unsigned int i;
1469} 1491
1492 for (i = 0; i < dev->num_tx_queues; i++) {
1493 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1494 __netif_tx_unlock(txq);
1495 }
1470 1496
1471static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1472{
1473 txq->xmit_lock_owner = -1;
1474 spin_unlock_bh(&txq->_xmit_lock);
1475} 1497}
1476 1498
1477static inline void netif_tx_unlock_bh(struct net_device *dev) 1499static inline void netif_tx_unlock_bh(struct net_device *dev)
1478{ 1500{
1479 __netif_tx_unlock_bh(&dev->tx_queue); 1501 netif_tx_unlock(dev);
1502 local_bh_enable();
1480} 1503}
1481 1504
1482#define HARD_TX_LOCK(dev, txq, cpu) { \ 1505#define HARD_TX_LOCK(dev, txq, cpu) { \
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 5ba66b555578..b47f556c66f8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -230,32 +230,47 @@ extern void tcf_destroy_chain(struct tcf_proto **fl);
230/* Reset all TX qdiscs of a device. */ 230/* Reset all TX qdiscs of a device. */
231static inline void qdisc_reset_all_tx(struct net_device *dev) 231static inline void qdisc_reset_all_tx(struct net_device *dev)
232{ 232{
233 qdisc_reset(dev->tx_queue.qdisc); 233 unsigned int i;
234 for (i = 0; i < dev->num_tx_queues; i++)
235 qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc);
234} 236}
235 237
236/* Are all TX queues of the device empty? */ 238/* Are all TX queues of the device empty? */
237static inline bool qdisc_all_tx_empty(const struct net_device *dev) 239static inline bool qdisc_all_tx_empty(const struct net_device *dev)
238{ 240{
239 const struct netdev_queue *txq = &dev->tx_queue; 241 unsigned int i;
240 const struct Qdisc *q = txq->qdisc; 242 for (i = 0; i < dev->num_tx_queues; i++) {
243 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
244 const struct Qdisc *q = txq->qdisc;
241 245
242 return (q->q.qlen == 0); 246 if (q->q.qlen)
247 return false;
248 }
249 return true;
243} 250}
244 251
245/* Are any of the TX qdiscs changing? */ 252/* Are any of the TX qdiscs changing? */
246static inline bool qdisc_tx_changing(struct net_device *dev) 253static inline bool qdisc_tx_changing(struct net_device *dev)
247{ 254{
248 struct netdev_queue *txq = &dev->tx_queue; 255 unsigned int i;
249 256 for (i = 0; i < dev->num_tx_queues; i++) {
250 return (txq->qdisc != txq->qdisc_sleeping); 257 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
258 if (txq->qdisc != txq->qdisc_sleeping)
259 return true;
260 }
261 return false;
251} 262}
252 263
253/* Is the device using the noop qdisc? */ 264/* Is the device using the noop qdisc on all queues? */
254static inline bool qdisc_tx_is_noop(const struct net_device *dev) 265static inline bool qdisc_tx_is_noop(const struct net_device *dev)
255{ 266{
256 const struct netdev_queue *txq = &dev->tx_queue; 267 unsigned int i;
257 268 for (i = 0; i < dev->num_tx_queues; i++) {
258 return (txq->qdisc == &noop_qdisc); 269 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
270 if (txq->qdisc != &noop_qdisc)
271 return false;
272 }
273 return true;
259} 274}
260 275
261static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 276static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 6b985f23fd9f..f42bc2b26b85 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -570,16 +570,18 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
570 */ 570 */
571static struct lock_class_key vlan_netdev_xmit_lock_key; 571static struct lock_class_key vlan_netdev_xmit_lock_key;
572 572
573static void vlan_dev_set_lockdep_one(struct netdev_queue *txq, 573static void vlan_dev_set_lockdep_one(struct net_device *dev,
574 int subclass) 574 struct netdev_queue *txq,
575 void *_subclass)
575{ 576{
576 lockdep_set_class_and_subclass(&txq->_xmit_lock, 577 lockdep_set_class_and_subclass(&txq->_xmit_lock,
577 &vlan_netdev_xmit_lock_key, subclass); 578 &vlan_netdev_xmit_lock_key,
579 *(int *)_subclass);
578} 580}
579 581
580static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) 582static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
581{ 583{
582 vlan_dev_set_lockdep_one(&dev->tx_queue, subclass); 584 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
583} 585}
584 586
585static const struct header_ops vlan_header_ops = { 587static const struct header_ops vlan_header_ops = {
diff --git a/net/core/dev.c b/net/core/dev.c
index 9b49f74a9820..69378f250695 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1666,6 +1666,12 @@ out_kfree_skb:
1666 * --BLG 1666 * --BLG
1667 */ 1667 */
1668 1668
1669static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1670 struct sk_buff *skb)
1671{
1672 return netdev_get_tx_queue(dev, 0);
1673}
1674
1669int dev_queue_xmit(struct sk_buff *skb) 1675int dev_queue_xmit(struct sk_buff *skb)
1670{ 1676{
1671 struct net_device *dev = skb->dev; 1677 struct net_device *dev = skb->dev;
@@ -1702,7 +1708,7 @@ int dev_queue_xmit(struct sk_buff *skb)
1702 } 1708 }
1703 1709
1704gso: 1710gso:
1705 txq = &dev->tx_queue; 1711 txq = dev_pick_tx(dev, skb);
1706 spin_lock_prefetch(&txq->lock); 1712 spin_lock_prefetch(&txq->lock);
1707 1713
1708 /* Disable soft irqs for various locks below. Also 1714 /* Disable soft irqs for various locks below. Also
@@ -3788,8 +3794,9 @@ static void rollback_registered(struct net_device *dev)
3788 dev_put(dev); 3794 dev_put(dev);
3789} 3795}
3790 3796
3791static void __netdev_init_queue_locks_one(struct netdev_queue *dev_queue, 3797static void __netdev_init_queue_locks_one(struct net_device *dev,
3792 struct net_device *dev) 3798 struct netdev_queue *dev_queue,
3799 void *_unused)
3793{ 3800{
3794 spin_lock_init(&dev_queue->_xmit_lock); 3801 spin_lock_init(&dev_queue->_xmit_lock);
3795 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); 3802 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
@@ -3798,8 +3805,8 @@ static void __netdev_init_queue_locks_one(struct netdev_queue *dev_queue,
3798 3805
3799static void netdev_init_queue_locks(struct net_device *dev) 3806static void netdev_init_queue_locks(struct net_device *dev)
3800{ 3807{
3801 __netdev_init_queue_locks_one(&dev->tx_queue, dev); 3808 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3802 __netdev_init_queue_locks_one(&dev->rx_queue, dev); 3809 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3803} 3810}
3804 3811
3805/** 3812/**
@@ -4119,7 +4126,8 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
4119} 4126}
4120 4127
4121static void netdev_init_one_queue(struct net_device *dev, 4128static void netdev_init_one_queue(struct net_device *dev,
4122 struct netdev_queue *queue) 4129 struct netdev_queue *queue,
4130 void *_unused)
4123{ 4131{
4124 spin_lock_init(&queue->lock); 4132 spin_lock_init(&queue->lock);
4125 queue->dev = dev; 4133 queue->dev = dev;
@@ -4127,8 +4135,8 @@ static void netdev_init_one_queue(struct net_device *dev,
4127 4135
4128static void netdev_init_queues(struct net_device *dev) 4136static void netdev_init_queues(struct net_device *dev)
4129{ 4137{
4130 netdev_init_one_queue(dev, &dev->rx_queue); 4138 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4131 netdev_init_one_queue(dev, &dev->tx_queue); 4139 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4132} 4140}
4133 4141
4134/** 4142/**
@@ -4145,9 +4153,10 @@ static void netdev_init_queues(struct net_device *dev)
4145struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 4153struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4146 void (*setup)(struct net_device *), unsigned int queue_count) 4154 void (*setup)(struct net_device *), unsigned int queue_count)
4147{ 4155{
4148 void *p; 4156 struct netdev_queue *tx;
4149 struct net_device *dev; 4157 struct net_device *dev;
4150 int alloc_size; 4158 int alloc_size;
4159 void *p;
4151 4160
4152 BUG_ON(strlen(name) >= sizeof(dev->name)); 4161 BUG_ON(strlen(name) >= sizeof(dev->name));
4153 4162
@@ -4167,11 +4176,22 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4167 return NULL; 4176 return NULL;
4168 } 4177 }
4169 4178
4179 tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL);
4180 if (!tx) {
4181 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4182 "tx qdiscs.\n");
4183 kfree(p);
4184 return NULL;
4185 }
4186
4170 dev = (struct net_device *) 4187 dev = (struct net_device *)
4171 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); 4188 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4172 dev->padded = (char *)dev - (char *)p; 4189 dev->padded = (char *)dev - (char *)p;
4173 dev_net_set(dev, &init_net); 4190 dev_net_set(dev, &init_net);
4174 4191
4192 dev->_tx = tx;
4193 dev->num_tx_queues = queue_count;
4194
4175 if (sizeof_priv) { 4195 if (sizeof_priv) {
4176 dev->priv = ((char *)dev + 4196 dev->priv = ((char *)dev +
4177 ((sizeof(struct net_device) + 4197 ((sizeof(struct net_device) +
@@ -4205,6 +4225,8 @@ void free_netdev(struct net_device *dev)
4205{ 4225{
4206 release_net(dev_net(dev)); 4226 release_net(dev_net(dev));
4207 4227
4228 kfree(dev->_tx);
4229
4208 /* Compatibility with error handling in drivers */ 4230 /* Compatibility with error handling in drivers */
4209 if (dev->reg_state == NETREG_UNINITIALIZED) { 4231 if (dev->reg_state == NETREG_UNINITIALIZED) {
4210 kfree((char *)dev - dev->padded); 4232 kfree((char *)dev - dev->padded);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8ef9f1db610e..71edb8b36341 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -636,7 +636,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
636 if (dev->master) 636 if (dev->master)
637 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); 637 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
638 638
639 txq = &dev->tx_queue; 639 txq = netdev_get_tx_queue(dev, 0);
640 if (txq->qdisc_sleeping) 640 if (txq->qdisc_sleeping)
641 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id); 641 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
642 642
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index af0056e7e5b3..b486e634f4fe 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -621,7 +621,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
621 621
622 /* ensure that TX flow won't interrupt us 622 /* ensure that TX flow won't interrupt us
623 * until the end of the call to requeue function */ 623 * until the end of the call to requeue function */
624 txq = &local->mdev->tx_queue; 624 txq = netdev_get_tx_queue(local->mdev, 0);
625 spin_lock_bh(&txq->lock); 625 spin_lock_bh(&txq->lock);
626 626
627 /* create a new queue for this aggregation */ 627 /* create a new queue for this aggregation */
@@ -862,7 +862,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
862 862
863 /* avoid ordering issues: we are the only one that can modify 863 /* avoid ordering issues: we are the only one that can modify
864 * the content of the qdiscs */ 864 * the content of the qdiscs */
865 txq = &local->mdev->tx_queue; 865 txq = netdev_get_tx_queue(local->mdev, 0);
866 spin_lock_bh(&txq->lock); 866 spin_lock_bh(&txq->lock);
867 /* remove the queue for this aggregation */ 867 /* remove the queue for this aggregation */
868 ieee80211_ht_agg_queue_remove(local, sta, tid, 1); 868 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 6ae43a3c7726..f014cd38c2d0 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -574,7 +574,7 @@ static struct Qdisc_ops wme_qdisc_ops __read_mostly =
574 574
575void ieee80211_install_qdisc(struct net_device *dev) 575void ieee80211_install_qdisc(struct net_device *dev)
576{ 576{
577 struct netdev_queue *txq = &dev->tx_queue; 577 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
578 struct Qdisc *qdisc; 578 struct Qdisc *qdisc;
579 579
580 qdisc = qdisc_create_dflt(dev, txq, 580 qdisc = qdisc_create_dflt(dev, txq,
@@ -596,7 +596,7 @@ void ieee80211_install_qdisc(struct net_device *dev)
596 596
597int ieee80211_qdisc_installed(struct net_device *dev) 597int ieee80211_qdisc_installed(struct net_device *dev)
598{ 598{
599 struct netdev_queue *txq = &dev->tx_queue; 599 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
600 600
601 return txq->qdisc_sleeping->ops == &wme_qdisc_ops; 601 return txq->qdisc_sleeping->ops == &wme_qdisc_ops;
602} 602}
@@ -617,7 +617,7 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
617 struct sta_info *sta, u16 tid) 617 struct sta_info *sta, u16 tid)
618{ 618{
619 int i; 619 int i;
620 struct netdev_queue *txq = &local->mdev->tx_queue; 620 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
621 struct ieee80211_sched_data *q = 621 struct ieee80211_sched_data *q =
622 qdisc_priv(txq->qdisc_sleeping); 622 qdisc_priv(txq->qdisc_sleeping);
623 DECLARE_MAC_BUF(mac); 623 DECLARE_MAC_BUF(mac);
@@ -652,14 +652,14 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
652} 652}
653 653
654/** 654/**
655 * the caller needs to hold local->mdev->tx_queue.lock 655 * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
656 */ 656 */
657void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, 657void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
658 struct sta_info *sta, u16 tid, 658 struct sta_info *sta, u16 tid,
659 u8 requeue) 659 u8 requeue)
660{ 660{
661 struct ieee80211_hw *hw = &local->hw; 661 struct ieee80211_hw *hw = &local->hw;
662 struct netdev_queue *txq = &local->mdev->tx_queue; 662 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
663 struct ieee80211_sched_data *q = 663 struct ieee80211_sched_data *q =
664 qdisc_priv(txq->qdisc_sleeping); 664 qdisc_priv(txq->qdisc_sleeping);
665 int agg_queue = sta->tid_to_tx_q[tid]; 665 int agg_queue = sta->tid_to_tx_q[tid];
@@ -676,7 +676,7 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
676 676
677void ieee80211_requeue(struct ieee80211_local *local, int queue) 677void ieee80211_requeue(struct ieee80211_local *local, int queue)
678{ 678{
679 struct netdev_queue *txq = &local->mdev->tx_queue; 679 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
680 struct Qdisc *root_qd = txq->qdisc_sleeping; 680 struct Qdisc *root_qd = txq->qdisc_sleeping;
681 struct ieee80211_sched_data *q = qdisc_priv(root_qd); 681 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
682 struct Qdisc *qdisc = q->queues[queue]; 682 struct Qdisc *qdisc = q->queues[queue];
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 819afc449e1e..d41be0d66eb0 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -74,14 +74,16 @@ static const struct proto_ops nr_proto_ops;
74 */ 74 */
75static struct lock_class_key nr_netdev_xmit_lock_key; 75static struct lock_class_key nr_netdev_xmit_lock_key;
76 76
77static void nr_set_lockdep_one(struct netdev_queue *txq) 77static void nr_set_lockdep_one(struct net_device *dev,
78 struct netdev_queue *txq,
79 void *_unused)
78{ 80{
79 lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); 81 lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
80} 82}
81 83
82static void nr_set_lockdep_key(struct net_device *dev) 84static void nr_set_lockdep_key(struct net_device *dev)
83{ 85{
84 nr_set_lockdep_one(&dev->tx_queue); 86 netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
85} 87}
86 88
87/* 89/*
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 7dbbc0891623..f3a691f34909 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -75,14 +75,16 @@ ax25_address rose_callsign;
75 */ 75 */
76static struct lock_class_key rose_netdev_xmit_lock_key; 76static struct lock_class_key rose_netdev_xmit_lock_key;
77 77
78static void rose_set_lockdep_one(struct netdev_queue *txq) 78static void rose_set_lockdep_one(struct net_device *dev,
79 struct netdev_queue *txq,
80 void *_unused)
79{ 81{
80 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); 82 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
81} 83}
82 84
83static void rose_set_lockdep_key(struct net_device *dev) 85static void rose_set_lockdep_key(struct net_device *dev)
84{ 86{
85 rose_set_lockdep_one(&dev->tx_queue); 87 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
86} 88}
87 89
88/* 90/*
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b483bbea6118..d0b0a9b14394 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -166,7 +166,7 @@ replay:
166 166
167 /* Find qdisc */ 167 /* Find qdisc */
168 if (!parent) { 168 if (!parent) {
169 struct netdev_queue *dev_queue = &dev->tx_queue; 169 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
170 q = dev_queue->qdisc_sleeping; 170 q = dev_queue->qdisc_sleeping;
171 parent = q->handle; 171 parent = q->handle;
172 } else { 172 } else {
@@ -410,7 +410,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
410 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 410 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
411 return skb->len; 411 return skb->len;
412 412
413 dev_queue = &dev->tx_queue; 413 dev_queue = netdev_get_tx_queue(dev, 0);
414 if (!tcm->tcm_parent) 414 if (!tcm->tcm_parent)
415 q = dev_queue->qdisc_sleeping; 415 q = dev_queue->qdisc_sleeping;
416 else 416 else
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 95873f8dd37c..830ccc544a15 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -183,9 +183,8 @@ EXPORT_SYMBOL(unregister_qdisc);
183 (root qdisc, all its children, children of children etc.) 183 (root qdisc, all its children, children of children etc.)
184 */ 184 */
185 185
186struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 186static struct Qdisc *__qdisc_lookup(struct netdev_queue *dev_queue, u32 handle)
187{ 187{
188 struct netdev_queue *dev_queue = &dev->tx_queue;
189 struct Qdisc *q; 188 struct Qdisc *q;
190 189
191 list_for_each_entry(q, &dev_queue->qdisc_list, list) { 190 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
@@ -195,6 +194,19 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
195 return NULL; 194 return NULL;
196} 195}
197 196
197struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
198{
199 unsigned int i;
200
201 for (i = 0; i < dev->num_tx_queues; i++) {
202 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
203 struct Qdisc *q = __qdisc_lookup(txq, handle);
204 if (q)
205 return q;
206 }
207 return NULL;
208}
209
198static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 210static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
199{ 211{
200 unsigned long cl; 212 unsigned long cl;
@@ -462,7 +474,7 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
462 } 474 }
463 475
464 } else { 476 } else {
465 dev_queue = &dev->tx_queue; 477 dev_queue = netdev_get_tx_queue(dev, 0);
466 oqdisc = dev_queue->qdisc_sleeping; 478 oqdisc = dev_queue->qdisc_sleeping;
467 479
468 /* Prune old scheduler */ 480 /* Prune old scheduler */
@@ -742,7 +754,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
742 q = dev->rx_queue.qdisc; 754 q = dev->rx_queue.qdisc;
743 } 755 }
744 } else { 756 } else {
745 struct netdev_queue *dev_queue = &dev->tx_queue; 757 struct netdev_queue *dev_queue;
758 dev_queue = netdev_get_tx_queue(dev, 0);
746 q = dev_queue->qdisc_sleeping; 759 q = dev_queue->qdisc_sleeping;
747 } 760 }
748 if (!q) 761 if (!q)
@@ -817,7 +830,8 @@ replay:
817 q = dev->rx_queue.qdisc; 830 q = dev->rx_queue.qdisc;
818 } 831 }
819 } else { 832 } else {
820 struct netdev_queue *dev_queue = &dev->tx_queue; 833 struct netdev_queue *dev_queue;
834 dev_queue = netdev_get_tx_queue(dev, 0);
821 q = dev_queue->qdisc_sleeping; 835 q = dev_queue->qdisc_sleeping;
822 } 836 }
823 837
@@ -899,7 +913,7 @@ create_n_graft:
899 tcm->tcm_parent, tcm->tcm_parent, 913 tcm->tcm_parent, tcm->tcm_parent,
900 tca, &err); 914 tca, &err);
901 else 915 else
902 q = qdisc_create(dev, &dev->tx_queue, 916 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
903 tcm->tcm_parent, tcm->tcm_handle, 917 tcm->tcm_parent, tcm->tcm_handle,
904 tca, &err); 918 tca, &err);
905 if (q == NULL) { 919 if (q == NULL) {
@@ -1025,7 +1039,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1025 if (idx > s_idx) 1039 if (idx > s_idx)
1026 s_q_idx = 0; 1040 s_q_idx = 0;
1027 q_idx = 0; 1041 q_idx = 0;
1028 dev_queue = &dev->tx_queue; 1042 dev_queue = netdev_get_tx_queue(dev, 0);
1029 list_for_each_entry(q, &dev_queue->qdisc_list, list) { 1043 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1030 if (q_idx < s_q_idx) { 1044 if (q_idx < s_q_idx) {
1031 q_idx++; 1045 q_idx++;
@@ -1098,7 +1112,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1098 1112
1099 /* Step 1. Determine qdisc handle X:0 */ 1113 /* Step 1. Determine qdisc handle X:0 */
1100 1114
1101 dev_queue = &dev->tx_queue; 1115 dev_queue = netdev_get_tx_queue(dev, 0);
1102 if (pid != TC_H_ROOT) { 1116 if (pid != TC_H_ROOT) {
1103 u32 qid1 = TC_H_MAJ(pid); 1117 u32 qid1 = TC_H_MAJ(pid);
1104 1118
@@ -1275,7 +1289,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1275 s_t = cb->args[0]; 1289 s_t = cb->args[0];
1276 t = 0; 1290 t = 0;
1277 1291
1278 dev_queue = &dev->tx_queue; 1292 dev_queue = netdev_get_tx_queue(dev, 0);
1279 list_for_each_entry(q, &dev_queue->qdisc_list, list) { 1293 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1280 if (t < s_t || !q->ops->cl_ops || 1294 if (t < s_t || !q->ops->cl_ops ||
1281 (tcm->tcm_parent && 1295 (tcm->tcm_parent &&
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 243de935b182..4e2b865cbba0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -40,20 +40,30 @@
40 */ 40 */
41 41
42void qdisc_lock_tree(struct net_device *dev) 42void qdisc_lock_tree(struct net_device *dev)
43 __acquires(dev->tx_queue.lock)
44 __acquires(dev->rx_queue.lock) 43 __acquires(dev->rx_queue.lock)
45{ 44{
46 spin_lock_bh(&dev->tx_queue.lock); 45 unsigned int i;
46
47 local_bh_disable();
48 for (i = 0; i < dev->num_tx_queues; i++) {
49 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
50 spin_lock(&txq->lock);
51 }
47 spin_lock(&dev->rx_queue.lock); 52 spin_lock(&dev->rx_queue.lock);
48} 53}
49EXPORT_SYMBOL(qdisc_lock_tree); 54EXPORT_SYMBOL(qdisc_lock_tree);
50 55
51void qdisc_unlock_tree(struct net_device *dev) 56void qdisc_unlock_tree(struct net_device *dev)
52 __releases(dev->rx_queue.lock) 57 __releases(dev->rx_queue.lock)
53 __releases(dev->tx_queue.lock)
54{ 58{
59 unsigned int i;
60
55 spin_unlock(&dev->rx_queue.lock); 61 spin_unlock(&dev->rx_queue.lock);
56 spin_unlock_bh(&dev->tx_queue.lock); 62 for (i = 0; i < dev->num_tx_queues; i++) {
63 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
64 spin_unlock(&txq->lock);
65 }
66 local_bh_enable();
57} 67}
58EXPORT_SYMBOL(qdisc_unlock_tree); 68EXPORT_SYMBOL(qdisc_unlock_tree);
59 69
@@ -212,22 +222,37 @@ void __qdisc_run(struct netdev_queue *txq)
212static void dev_watchdog(unsigned long arg) 222static void dev_watchdog(unsigned long arg)
213{ 223{
214 struct net_device *dev = (struct net_device *)arg; 224 struct net_device *dev = (struct net_device *)arg;
215 struct netdev_queue *txq = &dev->tx_queue;
216 225
217 netif_tx_lock(dev); 226 netif_tx_lock(dev);
218 if (txq->qdisc != &noop_qdisc) { 227 if (!qdisc_tx_is_noop(dev)) {
219 if (netif_device_present(dev) && 228 if (netif_device_present(dev) &&
220 netif_running(dev) && 229 netif_running(dev) &&
221 netif_carrier_ok(dev)) { 230 netif_carrier_ok(dev)) {
222 if (netif_queue_stopped(dev) && 231 int some_queue_stopped = 0;
223 time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { 232 unsigned int i;
233
234 for (i = 0; i < dev->num_tx_queues; i++) {
235 struct netdev_queue *txq;
236
237 txq = netdev_get_tx_queue(dev, i);
238 if (netif_tx_queue_stopped(txq)) {
239 some_queue_stopped = 1;
240 break;
241 }
242 }
224 243
225 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", 244 if (some_queue_stopped &&
245 time_after(jiffies, (dev->trans_start +
246 dev->watchdog_timeo))) {
247 printk(KERN_INFO "NETDEV WATCHDOG: %s: "
248 "transmit timed out\n",
226 dev->name); 249 dev->name);
227 dev->tx_timeout(dev); 250 dev->tx_timeout(dev);
228 WARN_ON_ONCE(1); 251 WARN_ON_ONCE(1);
229 } 252 }
230 if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) 253 if (!mod_timer(&dev->watchdog_timer,
254 round_jiffies(jiffies +
255 dev->watchdog_timeo)))
231 dev_hold(dev); 256 dev_hold(dev);
232 } 257 }
233 } 258 }
@@ -542,9 +567,55 @@ void qdisc_destroy(struct Qdisc *qdisc)
542} 567}
543EXPORT_SYMBOL(qdisc_destroy); 568EXPORT_SYMBOL(qdisc_destroy);
544 569
570static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
571{
572 unsigned int i;
573
574 for (i = 0; i < dev->num_tx_queues; i++) {
575 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
576
577 if (txq->qdisc_sleeping != &noop_qdisc)
578 return false;
579 }
580 return true;
581}
582
583static void attach_one_default_qdisc(struct net_device *dev,
584 struct netdev_queue *dev_queue,
585 void *_unused)
586{
587 struct Qdisc *qdisc;
588
589 if (dev->tx_queue_len) {
590 qdisc = qdisc_create_dflt(dev, dev_queue,
591 &pfifo_fast_ops, TC_H_ROOT);
592 if (!qdisc) {
593 printk(KERN_INFO "%s: activation failed\n", dev->name);
594 return;
595 }
596 list_add_tail(&qdisc->list, &dev_queue->qdisc_list);
597 } else {
598 qdisc = &noqueue_qdisc;
599 }
600 dev_queue->qdisc_sleeping = qdisc;
601}
602
603static void transition_one_qdisc(struct net_device *dev,
604 struct netdev_queue *dev_queue,
605 void *_need_watchdog)
606{
607 int *need_watchdog_p = _need_watchdog;
608
609 spin_lock_bh(&dev_queue->lock);
610 rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping);
611 if (dev_queue->qdisc != &noqueue_qdisc)
612 *need_watchdog_p = 1;
613 spin_unlock_bh(&dev_queue->lock);
614}
615
545void dev_activate(struct net_device *dev) 616void dev_activate(struct net_device *dev)
546{ 617{
547 struct netdev_queue *txq = &dev->tx_queue; 618 int need_watchdog;
548 619
549 /* No queueing discipline is attached to device; 620 /* No queueing discipline is attached to device;
550 create default one i.e. pfifo_fast for devices, 621 create default one i.e. pfifo_fast for devices,
@@ -552,39 +623,27 @@ void dev_activate(struct net_device *dev)
552 virtual interfaces 623 virtual interfaces
553 */ 624 */
554 625
555 if (txq->qdisc_sleeping == &noop_qdisc) { 626 if (dev_all_qdisc_sleeping_noop(dev))
556 struct Qdisc *qdisc; 627 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
557 if (dev->tx_queue_len) {
558 qdisc = qdisc_create_dflt(dev, txq,
559 &pfifo_fast_ops,
560 TC_H_ROOT);
561 if (qdisc == NULL) {
562 printk(KERN_INFO "%s: activation failed\n", dev->name);
563 return;
564 }
565 list_add_tail(&qdisc->list, &txq->qdisc_list);
566 } else {
567 qdisc = &noqueue_qdisc;
568 }
569 txq->qdisc_sleeping = qdisc;
570 }
571 628
572 if (!netif_carrier_ok(dev)) 629 if (!netif_carrier_ok(dev))
573 /* Delay activation until next carrier-on event */ 630 /* Delay activation until next carrier-on event */
574 return; 631 return;
575 632
576 spin_lock_bh(&txq->lock); 633 need_watchdog = 0;
577 rcu_assign_pointer(txq->qdisc, txq->qdisc_sleeping); 634 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
578 if (txq->qdisc != &noqueue_qdisc) { 635
636 if (need_watchdog) {
579 dev->trans_start = jiffies; 637 dev->trans_start = jiffies;
580 dev_watchdog_up(dev); 638 dev_watchdog_up(dev);
581 } 639 }
582 spin_unlock_bh(&txq->lock);
583} 640}
584 641
585static void dev_deactivate_queue(struct netdev_queue *dev_queue, 642static void dev_deactivate_queue(struct net_device *dev,
586 struct Qdisc *qdisc_default) 643 struct netdev_queue *dev_queue,
644 void *_qdisc_default)
587{ 645{
646 struct Qdisc *qdisc_default = _qdisc_default;
588 struct Qdisc *qdisc; 647 struct Qdisc *qdisc;
589 struct sk_buff *skb; 648 struct sk_buff *skb;
590 649
@@ -603,12 +662,35 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue,
603 kfree_skb(skb); 662 kfree_skb(skb);
604} 663}
605 664
665static bool some_qdisc_is_running(struct net_device *dev, int lock)
666{
667 unsigned int i;
668
669 for (i = 0; i < dev->num_tx_queues; i++) {
670 struct netdev_queue *dev_queue;
671 int val;
672
673 dev_queue = netdev_get_tx_queue(dev, i);
674
675 if (lock)
676 spin_lock_bh(&dev_queue->lock);
677
678 val = test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state);
679
680 if (lock)
681 spin_unlock_bh(&dev_queue->lock);
682
683 if (val)
684 return true;
685 }
686 return false;
687}
688
606void dev_deactivate(struct net_device *dev) 689void dev_deactivate(struct net_device *dev)
607{ 690{
608 struct netdev_queue *dev_queue = &dev->tx_queue; 691 bool running;
609 int running;
610 692
611 dev_deactivate_queue(dev_queue, &noop_qdisc); 693 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
612 694
613 dev_watchdog_down(dev); 695 dev_watchdog_down(dev);
614 696
@@ -617,17 +699,14 @@ void dev_deactivate(struct net_device *dev)
617 699
618 /* Wait for outstanding qdisc_run calls. */ 700 /* Wait for outstanding qdisc_run calls. */
619 do { 701 do {
620 while (test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state)) 702 while (some_qdisc_is_running(dev, 0))
621 yield(); 703 yield();
622 704
623 /* 705 /*
624 * Double-check inside queue lock to ensure that all effects 706 * Double-check inside queue lock to ensure that all effects
625 * of the queue run are visible when we return. 707 * of the queue run are visible when we return.
626 */ 708 */
627 spin_lock_bh(&dev_queue->lock); 709 running = some_qdisc_is_running(dev, 1);
628 running = test_bit(__QUEUE_STATE_QDISC_RUNNING,
629 &dev_queue->state);
630 spin_unlock_bh(&dev_queue->lock);
631 710
632 /* 711 /*
633 * The running flag should never be set at this point because 712 * The running flag should never be set at this point because
@@ -642,8 +721,10 @@ void dev_deactivate(struct net_device *dev)
642 721
643static void dev_init_scheduler_queue(struct net_device *dev, 722static void dev_init_scheduler_queue(struct net_device *dev,
644 struct netdev_queue *dev_queue, 723 struct netdev_queue *dev_queue,
645 struct Qdisc *qdisc) 724 void *_qdisc)
646{ 725{
726 struct Qdisc *qdisc = _qdisc;
727
647 dev_queue->qdisc = qdisc; 728 dev_queue->qdisc = qdisc;
648 dev_queue->qdisc_sleeping = qdisc; 729 dev_queue->qdisc_sleeping = qdisc;
649 INIT_LIST_HEAD(&dev_queue->qdisc_list); 730 INIT_LIST_HEAD(&dev_queue->qdisc_list);
@@ -652,18 +733,19 @@ static void dev_init_scheduler_queue(struct net_device *dev,
652void dev_init_scheduler(struct net_device *dev) 733void dev_init_scheduler(struct net_device *dev)
653{ 734{
654 qdisc_lock_tree(dev); 735 qdisc_lock_tree(dev);
655 dev_init_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); 736 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
656 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); 737 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
657 qdisc_unlock_tree(dev); 738 qdisc_unlock_tree(dev);
658 739
659 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 740 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
660} 741}
661 742
662static void dev_shutdown_scheduler_queue(struct net_device *dev, 743static void shutdown_scheduler_queue(struct net_device *dev,
663 struct netdev_queue *dev_queue, 744 struct netdev_queue *dev_queue,
664 struct Qdisc *qdisc_default) 745 void *_qdisc_default)
665{ 746{
666 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 747 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
748 struct Qdisc *qdisc_default = _qdisc_default;
667 749
668 if (qdisc) { 750 if (qdisc) {
669 dev_queue->qdisc = qdisc_default; 751 dev_queue->qdisc = qdisc_default;
@@ -676,8 +758,8 @@ static void dev_shutdown_scheduler_queue(struct net_device *dev,
676void dev_shutdown(struct net_device *dev) 758void dev_shutdown(struct net_device *dev)
677{ 759{
678 qdisc_lock_tree(dev); 760 qdisc_lock_tree(dev);
679 dev_shutdown_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); 761 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
680 dev_shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); 762 shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
681 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 763 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
682 qdisc_unlock_tree(dev); 764 qdisc_unlock_tree(dev);
683} 765}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 8ac05981be20..44a2c3451f4d 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -111,7 +111,7 @@ teql_dequeue(struct Qdisc* sch)
111 struct sk_buff *skb; 111 struct sk_buff *skb;
112 112
113 skb = __skb_dequeue(&dat->q); 113 skb = __skb_dequeue(&dat->q);
114 dat_queue = &dat->m->dev->tx_queue; 114 dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
115 if (skb == NULL) { 115 if (skb == NULL) {
116 struct net_device *m = qdisc_dev(dat_queue->qdisc); 116 struct net_device *m = qdisc_dev(dat_queue->qdisc);
117 if (m) { 117 if (m) {
@@ -155,10 +155,13 @@ teql_destroy(struct Qdisc* sch)
155 if (q == master->slaves) { 155 if (q == master->slaves) {
156 master->slaves = NEXT_SLAVE(q); 156 master->slaves = NEXT_SLAVE(q);
157 if (q == master->slaves) { 157 if (q == master->slaves) {
158 struct netdev_queue *txq;
159
160 txq = netdev_get_tx_queue(master->dev, 0);
158 master->slaves = NULL; 161 master->slaves = NULL;
159 spin_lock_bh(&master->dev->tx_queue.lock); 162 spin_lock_bh(&txq->lock);
160 qdisc_reset(master->dev->tx_queue.qdisc); 163 qdisc_reset(txq->qdisc);
161 spin_unlock_bh(&master->dev->tx_queue.lock); 164 spin_unlock_bh(&txq->lock);
162 } 165 }
163 } 166 }
164 skb_queue_purge(&dat->q); 167 skb_queue_purge(&dat->q);
@@ -218,7 +221,8 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
218static int 221static int
219__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) 222__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
220{ 223{
221 struct teql_sched_data *q = qdisc_priv(dev->tx_queue.qdisc); 224 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
225 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
222 struct neighbour *mn = skb->dst->neighbour; 226 struct neighbour *mn = skb->dst->neighbour;
223 struct neighbour *n = q->ncache; 227 struct neighbour *n = q->ncache;
224 228
@@ -254,7 +258,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
254static inline int teql_resolve(struct sk_buff *skb, 258static inline int teql_resolve(struct sk_buff *skb,
255 struct sk_buff *skb_res, struct net_device *dev) 259 struct sk_buff *skb_res, struct net_device *dev)
256{ 260{
257 if (dev->tx_queue.qdisc == &noop_qdisc) 261 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
262 if (txq->qdisc == &noop_qdisc)
258 return -ENODEV; 263 return -ENODEV;
259 264
260 if (dev->header_ops == NULL || 265 if (dev->header_ops == NULL ||
@@ -285,8 +290,10 @@ restart:
285 290
286 do { 291 do {
287 struct net_device *slave = qdisc_dev(q); 292 struct net_device *slave = qdisc_dev(q);
293 struct netdev_queue *slave_txq;
288 294
289 if (slave->tx_queue.qdisc_sleeping != q) 295 slave_txq = netdev_get_tx_queue(slave, 0);
296 if (slave_txq->qdisc_sleeping != q)
290 continue; 297 continue;
291 if (netif_queue_stopped(slave) || 298 if (netif_queue_stopped(slave) ||
292 __netif_subqueue_stopped(slave, subq) || 299 __netif_subqueue_stopped(slave, subq) ||