aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--include/linux/netdevice.h10
-rw-r--r--include/net/irda/irda_device.h2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/link_watch.c8
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/mac80211/wme.c20
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/sch_api.c34
-rw-r--r--net/sched/sch_generic.c90
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_teql.c14
13 files changed, 125 insertions, 77 deletions
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index ef1a300068dc..457bbd119f9b 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -287,7 +287,7 @@ isdn_net_unbind_channel(isdn_net_local * lp)
287 BEWARE! This chunk of code cannot be called from hardware 287 BEWARE! This chunk of code cannot be called from hardware
288 interrupt handler. I hope it is true. --ANK 288 interrupt handler. I hope it is true. --ANK
289 */ 289 */
290 qdisc_reset(lp->netdev->dev->qdisc); 290 qdisc_reset(lp->netdev->dev->tx_queue.qdisc);
291 } 291 }
292 lp->dialstate = 0; 292 lp->dialstate = 0;
293 dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL; 293 dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 633a44c6fa5e..df702a7b3db5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -451,6 +451,9 @@ static inline void napi_synchronize(const struct napi_struct *n)
451struct netdev_queue { 451struct netdev_queue {
452 spinlock_t lock; 452 spinlock_t lock;
453 struct net_device *dev; 453 struct net_device *dev;
454 struct Qdisc *qdisc;
455 struct Qdisc *qdisc_sleeping;
456 struct list_head qdisc_list;
454}; 457};
455 458
456/* 459/*
@@ -634,13 +637,6 @@ struct net_device
634 637
635 struct Qdisc *qdisc_ingress; 638 struct Qdisc *qdisc_ingress;
636 639
637/*
638 * Cache line mostly used on queue transmit path (qdisc)
639 */
640 /* device queue lock */
641 struct Qdisc *qdisc;
642 struct Qdisc *qdisc_sleeping;
643 struct list_head qdisc_list;
644 unsigned long tx_queue_len; /* Max frames per queue allowed */ 640 unsigned long tx_queue_len; /* Max frames per queue allowed */
645 641
646 /* Partially transmitted GSO packet. */ 642 /* Partially transmitted GSO packet. */
diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h
index f70e9b39ebaf..16fbf672e0b2 100644
--- a/include/net/irda/irda_device.h
+++ b/include/net/irda/irda_device.h
@@ -223,7 +223,7 @@ int irda_device_is_receiving(struct net_device *dev);
223/* Interface for internal use */ 223/* Interface for internal use */
224static inline int irda_device_txqueue_empty(const struct net_device *dev) 224static inline int irda_device_txqueue_empty(const struct net_device *dev)
225{ 225{
226 return skb_queue_empty(&dev->qdisc->q); 226 return skb_queue_empty(&dev->tx_queue.qdisc->q);
227} 227}
228int irda_device_set_raw_mode(struct net_device* self, int status); 228int irda_device_set_raw_mode(struct net_device* self, int status);
229struct net_device *alloc_irdadev(int sizeof_priv); 229struct net_device *alloc_irdadev(int sizeof_priv);
diff --git a/net/core/dev.c b/net/core/dev.c
index 2322fb69fd53..ce79c28d739d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1720,14 +1720,14 @@ gso:
1720 * also serializes access to the device queue. 1720 * also serializes access to the device queue.
1721 */ 1721 */
1722 1722
1723 q = rcu_dereference(dev->qdisc); 1723 q = rcu_dereference(txq->qdisc);
1724#ifdef CONFIG_NET_CLS_ACT 1724#ifdef CONFIG_NET_CLS_ACT
1725 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); 1725 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1726#endif 1726#endif
1727 if (q->enqueue) { 1727 if (q->enqueue) {
1728 /* Grab device queue */ 1728 /* Grab device queue */
1729 spin_lock(&txq->lock); 1729 spin_lock(&txq->lock);
1730 q = dev->qdisc; 1730 q = txq->qdisc;
1731 if (q->enqueue) { 1731 if (q->enqueue) {
1732 /* reset queue_mapping to zero */ 1732 /* reset queue_mapping to zero */
1733 skb_set_queue_mapping(skb, 0); 1733 skb_set_queue_mapping(skb, 0);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index a5e372b9ec4d..50218218445b 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -79,8 +79,10 @@ static void rfc2863_policy(struct net_device *dev)
79 79
80static int linkwatch_urgent_event(struct net_device *dev) 80static int linkwatch_urgent_event(struct net_device *dev)
81{ 81{
82 struct netdev_queue *txq = &dev->tx_queue;
83
82 return netif_running(dev) && netif_carrier_ok(dev) && 84 return netif_running(dev) && netif_carrier_ok(dev) &&
83 dev->qdisc != dev->qdisc_sleeping; 85 txq->qdisc != txq->qdisc_sleeping;
84} 86}
85 87
86 88
@@ -181,7 +183,9 @@ static void __linkwatch_run_queue(int urgent_only)
181 rfc2863_policy(dev); 183 rfc2863_policy(dev);
182 if (dev->flags & IFF_UP) { 184 if (dev->flags & IFF_UP) {
183 if (netif_carrier_ok(dev)) { 185 if (netif_carrier_ok(dev)) {
184 WARN_ON(dev->qdisc_sleeping == &noop_qdisc); 186 struct netdev_queue *txq = &dev->tx_queue;
187
188 WARN_ON(txq->qdisc_sleeping == &noop_qdisc);
185 dev_activate(dev); 189 dev_activate(dev);
186 } else 190 } else
187 dev_deactivate(dev); 191 dev_deactivate(dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 6c8d7f0ea01a..8ef9f1db610e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -605,6 +605,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
605 int type, u32 pid, u32 seq, u32 change, 605 int type, u32 pid, u32 seq, u32 change,
606 unsigned int flags) 606 unsigned int flags)
607{ 607{
608 struct netdev_queue *txq;
608 struct ifinfomsg *ifm; 609 struct ifinfomsg *ifm;
609 struct nlmsghdr *nlh; 610 struct nlmsghdr *nlh;
610 struct net_device_stats *stats; 611 struct net_device_stats *stats;
@@ -635,8 +636,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
635 if (dev->master) 636 if (dev->master)
636 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); 637 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
637 638
638 if (dev->qdisc_sleeping) 639 txq = &dev->tx_queue;
639 NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc_sleeping->ops->id); 640 if (txq->qdisc_sleeping)
641 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
640 642
641 if (1) { 643 if (1) {
642 struct rtnl_link_ifmap map = { 644 struct rtnl_link_ifmap map = {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8572cb05fc21..5c84c798331d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -231,7 +231,8 @@ const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTER
231/* Check if a valid qdisc is available */ 231/* Check if a valid qdisc is available */
232static inline int addrconf_qdisc_ok(struct net_device *dev) 232static inline int addrconf_qdisc_ok(struct net_device *dev)
233{ 233{
234 return (dev->qdisc != &noop_qdisc); 234 struct netdev_queue *txq = &dev->tx_queue;
235 return (txq->qdisc != &noop_qdisc);
235} 236}
236 237
237/* Check if a route is valid prefix route */ 238/* Check if a route is valid prefix route */
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 59ed9cae66b9..6ae43a3c7726 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -574,9 +574,10 @@ static struct Qdisc_ops wme_qdisc_ops __read_mostly =
574 574
575void ieee80211_install_qdisc(struct net_device *dev) 575void ieee80211_install_qdisc(struct net_device *dev)
576{ 576{
577 struct netdev_queue *txq = &dev->tx_queue;
577 struct Qdisc *qdisc; 578 struct Qdisc *qdisc;
578 579
579 qdisc = qdisc_create_dflt(dev, &dev->tx_queue, 580 qdisc = qdisc_create_dflt(dev, txq,
580 &wme_qdisc_ops, TC_H_ROOT); 581 &wme_qdisc_ops, TC_H_ROOT);
581 if (!qdisc) { 582 if (!qdisc) {
582 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name); 583 printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
@@ -587,15 +588,17 @@ void ieee80211_install_qdisc(struct net_device *dev)
587 qdisc->handle = 0x80010000; 588 qdisc->handle = 0x80010000;
588 589
589 qdisc_lock_tree(dev); 590 qdisc_lock_tree(dev);
590 list_add_tail(&qdisc->list, &dev->qdisc_list); 591 list_add_tail(&qdisc->list, &txq->qdisc_list);
591 dev->qdisc_sleeping = qdisc; 592 txq->qdisc_sleeping = qdisc;
592 qdisc_unlock_tree(dev); 593 qdisc_unlock_tree(dev);
593} 594}
594 595
595 596
596int ieee80211_qdisc_installed(struct net_device *dev) 597int ieee80211_qdisc_installed(struct net_device *dev)
597{ 598{
598 return dev->qdisc_sleeping->ops == &wme_qdisc_ops; 599 struct netdev_queue *txq = &dev->tx_queue;
600
601 return txq->qdisc_sleeping->ops == &wme_qdisc_ops;
599} 602}
600 603
601 604
@@ -614,8 +617,9 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
614 struct sta_info *sta, u16 tid) 617 struct sta_info *sta, u16 tid)
615{ 618{
616 int i; 619 int i;
620 struct netdev_queue *txq = &local->mdev->tx_queue;
617 struct ieee80211_sched_data *q = 621 struct ieee80211_sched_data *q =
618 qdisc_priv(local->mdev->qdisc_sleeping); 622 qdisc_priv(txq->qdisc_sleeping);
619 DECLARE_MAC_BUF(mac); 623 DECLARE_MAC_BUF(mac);
620 624
621 /* prepare the filter and save it for the SW queue 625 /* prepare the filter and save it for the SW queue
@@ -655,8 +659,9 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
655 u8 requeue) 659 u8 requeue)
656{ 660{
657 struct ieee80211_hw *hw = &local->hw; 661 struct ieee80211_hw *hw = &local->hw;
662 struct netdev_queue *txq = &local->mdev->tx_queue;
658 struct ieee80211_sched_data *q = 663 struct ieee80211_sched_data *q =
659 qdisc_priv(local->mdev->qdisc_sleeping); 664 qdisc_priv(txq->qdisc_sleeping);
660 int agg_queue = sta->tid_to_tx_q[tid]; 665 int agg_queue = sta->tid_to_tx_q[tid];
661 666
662 /* return the qdisc to the pool */ 667 /* return the qdisc to the pool */
@@ -671,7 +676,8 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
671 676
672void ieee80211_requeue(struct ieee80211_local *local, int queue) 677void ieee80211_requeue(struct ieee80211_local *local, int queue)
673{ 678{
674 struct Qdisc *root_qd = local->mdev->qdisc_sleeping; 679 struct netdev_queue *txq = &local->mdev->tx_queue;
680 struct Qdisc *root_qd = txq->qdisc_sleeping;
675 struct ieee80211_sched_data *q = qdisc_priv(root_qd); 681 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
676 struct Qdisc *qdisc = q->queues[queue]; 682 struct Qdisc *qdisc = q->queues[queue];
677 struct sk_buff *skb = NULL; 683 struct sk_buff *skb = NULL;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index e2389f161e46..b483bbea6118 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -166,7 +166,8 @@ replay:
166 166
167 /* Find qdisc */ 167 /* Find qdisc */
168 if (!parent) { 168 if (!parent) {
169 q = dev->qdisc_sleeping; 169 struct netdev_queue *dev_queue = &dev->tx_queue;
170 q = dev_queue->qdisc_sleeping;
170 parent = q->handle; 171 parent = q->handle;
171 } else { 172 } else {
172 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 173 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
@@ -390,6 +391,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
390static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 391static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
391{ 392{
392 struct net *net = sock_net(skb->sk); 393 struct net *net = sock_net(skb->sk);
394 struct netdev_queue *dev_queue;
393 int t; 395 int t;
394 int s_t; 396 int s_t;
395 struct net_device *dev; 397 struct net_device *dev;
@@ -408,8 +410,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
408 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 410 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
409 return skb->len; 411 return skb->len;
410 412
413 dev_queue = &dev->tx_queue;
411 if (!tcm->tcm_parent) 414 if (!tcm->tcm_parent)
412 q = dev->qdisc_sleeping; 415 q = dev_queue->qdisc_sleeping;
413 else 416 else
414 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 417 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
415 if (!q) 418 if (!q)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 570cef2a9c5f..2313fa7c97be 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -185,9 +185,10 @@ EXPORT_SYMBOL(unregister_qdisc);
185 185
186struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 186struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
187{ 187{
188 struct netdev_queue *dev_queue = &dev->tx_queue;
188 struct Qdisc *q; 189 struct Qdisc *q;
189 190
190 list_for_each_entry(q, &dev->qdisc_list, list) { 191 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
191 if (q->handle == handle) 192 if (q->handle == handle)
192 return q; 193 return q;
193 } 194 }
@@ -441,6 +442,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
441static struct Qdisc * 442static struct Qdisc *
442dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) 443dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
443{ 444{
445 struct netdev_queue *dev_queue;
444 struct Qdisc *oqdisc; 446 struct Qdisc *oqdisc;
445 447
446 if (dev->flags & IFF_UP) 448 if (dev->flags & IFF_UP)
@@ -459,8 +461,8 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
459 } 461 }
460 462
461 } else { 463 } else {
462 464 dev_queue = &dev->tx_queue;
463 oqdisc = dev->qdisc_sleeping; 465 oqdisc = dev_queue->qdisc_sleeping;
464 466
465 /* Prune old scheduler */ 467 /* Prune old scheduler */
466 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 468 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
@@ -469,8 +471,8 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
469 /* ... and graft new one */ 471 /* ... and graft new one */
470 if (qdisc == NULL) 472 if (qdisc == NULL)
471 qdisc = &noop_qdisc; 473 qdisc = &noop_qdisc;
472 dev->qdisc_sleeping = qdisc; 474 dev_queue->qdisc_sleeping = qdisc;
473 dev->qdisc = &noop_qdisc; 475 dev_queue->qdisc = &noop_qdisc;
474 } 476 }
475 477
476 qdisc_unlock_tree(dev); 478 qdisc_unlock_tree(dev);
@@ -633,7 +635,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
633 } 635 }
634 } 636 }
635 qdisc_lock_tree(dev); 637 qdisc_lock_tree(dev);
636 list_add_tail(&sch->list, &dev->qdisc_list); 638 list_add_tail(&sch->list, &dev_queue->qdisc_list);
637 qdisc_unlock_tree(dev); 639 qdisc_unlock_tree(dev);
638 640
639 return sch; 641 return sch;
@@ -740,7 +742,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
740 q = dev->qdisc_ingress; 742 q = dev->qdisc_ingress;
741 } 743 }
742 } else { 744 } else {
743 q = dev->qdisc_sleeping; 745 struct netdev_queue *dev_queue = &dev->tx_queue;
746 q = dev_queue->qdisc_sleeping;
744 } 747 }
745 if (!q) 748 if (!q)
746 return -ENOENT; 749 return -ENOENT;
@@ -814,7 +817,8 @@ replay:
814 q = dev->qdisc_ingress; 817 q = dev->qdisc_ingress;
815 } 818 }
816 } else { 819 } else {
817 q = dev->qdisc_sleeping; 820 struct netdev_queue *dev_queue = &dev->tx_queue;
821 q = dev_queue->qdisc_sleeping;
818 } 822 }
819 823
820 /* It may be default qdisc, ignore it */ 824 /* It may be default qdisc, ignore it */
@@ -1015,12 +1019,14 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1015 read_lock(&dev_base_lock); 1019 read_lock(&dev_base_lock);
1016 idx = 0; 1020 idx = 0;
1017 for_each_netdev(&init_net, dev) { 1021 for_each_netdev(&init_net, dev) {
1022 struct netdev_queue *dev_queue;
1018 if (idx < s_idx) 1023 if (idx < s_idx)
1019 goto cont; 1024 goto cont;
1020 if (idx > s_idx) 1025 if (idx > s_idx)
1021 s_q_idx = 0; 1026 s_q_idx = 0;
1022 q_idx = 0; 1027 q_idx = 0;
1023 list_for_each_entry(q, &dev->qdisc_list, list) { 1028 dev_queue = &dev->tx_queue;
1029 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1024 if (q_idx < s_q_idx) { 1030 if (q_idx < s_q_idx) {
1025 q_idx++; 1031 q_idx++;
1026 continue; 1032 continue;
@@ -1054,6 +1060,7 @@ done:
1054static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 1060static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1055{ 1061{
1056 struct net *net = sock_net(skb->sk); 1062 struct net *net = sock_net(skb->sk);
1063 struct netdev_queue *dev_queue;
1057 struct tcmsg *tcm = NLMSG_DATA(n); 1064 struct tcmsg *tcm = NLMSG_DATA(n);
1058 struct nlattr *tca[TCA_MAX + 1]; 1065 struct nlattr *tca[TCA_MAX + 1];
1059 struct net_device *dev; 1066 struct net_device *dev;
@@ -1091,6 +1098,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1091 1098
1092 /* Step 1. Determine qdisc handle X:0 */ 1099 /* Step 1. Determine qdisc handle X:0 */
1093 1100
1101 dev_queue = &dev->tx_queue;
1094 if (pid != TC_H_ROOT) { 1102 if (pid != TC_H_ROOT) {
1095 u32 qid1 = TC_H_MAJ(pid); 1103 u32 qid1 = TC_H_MAJ(pid);
1096 1104
@@ -1101,7 +1109,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1101 } else if (qid1) { 1109 } else if (qid1) {
1102 qid = qid1; 1110 qid = qid1;
1103 } else if (qid == 0) 1111 } else if (qid == 0)
1104 qid = dev->qdisc_sleeping->handle; 1112 qid = dev_queue->qdisc_sleeping->handle;
1105 1113
1106 /* Now qid is genuine qdisc handle consistent 1114 /* Now qid is genuine qdisc handle consistent
1107 both with parent and child. 1115 both with parent and child.
@@ -1112,7 +1120,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1112 pid = TC_H_MAKE(qid, pid); 1120 pid = TC_H_MAKE(qid, pid);
1113 } else { 1121 } else {
1114 if (qid == 0) 1122 if (qid == 0)
1115 qid = dev->qdisc_sleeping->handle; 1123 qid = dev_queue->qdisc_sleeping->handle;
1116 } 1124 }
1117 1125
1118 /* OK. Locate qdisc */ 1126 /* OK. Locate qdisc */
@@ -1248,6 +1256,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
1248static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 1256static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1249{ 1257{
1250 struct net *net = sock_net(skb->sk); 1258 struct net *net = sock_net(skb->sk);
1259 struct netdev_queue *dev_queue;
1251 int t; 1260 int t;
1252 int s_t; 1261 int s_t;
1253 struct net_device *dev; 1262 struct net_device *dev;
@@ -1266,7 +1275,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1266 s_t = cb->args[0]; 1275 s_t = cb->args[0];
1267 t = 0; 1276 t = 0;
1268 1277
1269 list_for_each_entry(q, &dev->qdisc_list, list) { 1278 dev_queue = &dev->tx_queue;
1279 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1270 if (t < s_t || !q->ops->cl_ops || 1280 if (t < s_t || !q->ops->cl_ops ||
1271 (tcm->tcm_parent && 1281 (tcm->tcm_parent &&
1272 TC_H_MAJ(tcm->tcm_parent) != q->handle)) { 1282 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 804d44b00348..3223e5ba76aa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -122,7 +122,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
122 * 122 *
123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this 123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
124 * device at a time. queue->lock serializes queue accesses for 124 * device at a time. queue->lock serializes queue accesses for
125 * this device AND dev->qdisc pointer itself. 125 * this device AND txq->qdisc pointer itself.
126 * 126 *
127 * netif_tx_lock serializes accesses to device driver. 127 * netif_tx_lock serializes accesses to device driver.
128 * 128 *
@@ -138,7 +138,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
138 */ 138 */
139static inline int qdisc_restart(struct net_device *dev) 139static inline int qdisc_restart(struct net_device *dev)
140{ 140{
141 struct Qdisc *q = dev->qdisc; 141 struct netdev_queue *txq = &dev->tx_queue;
142 struct Qdisc *q = txq->qdisc;
142 struct sk_buff *skb; 143 struct sk_buff *skb;
143 int ret = NETDEV_TX_BUSY; 144 int ret = NETDEV_TX_BUSY;
144 145
@@ -148,15 +149,15 @@ static inline int qdisc_restart(struct net_device *dev)
148 149
149 150
150 /* And release queue */ 151 /* And release queue */
151 spin_unlock(&q->dev_queue->lock); 152 spin_unlock(&txq->lock);
152 153
153 HARD_TX_LOCK(dev, smp_processor_id()); 154 HARD_TX_LOCK(dev, smp_processor_id());
154 if (!netif_subqueue_stopped(dev, skb)) 155 if (!netif_subqueue_stopped(dev, skb))
155 ret = dev_hard_start_xmit(skb, dev); 156 ret = dev_hard_start_xmit(skb, dev);
156 HARD_TX_UNLOCK(dev); 157 HARD_TX_UNLOCK(dev);
157 158
158 spin_lock(&q->dev_queue->lock); 159 spin_lock(&txq->lock);
159 q = dev->qdisc; 160 q = txq->qdisc;
160 161
161 switch (ret) { 162 switch (ret) {
162 case NETDEV_TX_OK: 163 case NETDEV_TX_OK:
@@ -207,9 +208,10 @@ void __qdisc_run(struct net_device *dev)
207static void dev_watchdog(unsigned long arg) 208static void dev_watchdog(unsigned long arg)
208{ 209{
209 struct net_device *dev = (struct net_device *)arg; 210 struct net_device *dev = (struct net_device *)arg;
211 struct netdev_queue *txq = &dev->tx_queue;
210 212
211 netif_tx_lock(dev); 213 netif_tx_lock(dev);
212 if (dev->qdisc != &noop_qdisc) { 214 if (txq->qdisc != &noop_qdisc) {
213 if (netif_device_present(dev) && 215 if (netif_device_present(dev) &&
214 netif_running(dev) && 216 netif_running(dev) &&
215 netif_carrier_ok(dev)) { 217 netif_carrier_ok(dev)) {
@@ -539,53 +541,63 @@ EXPORT_SYMBOL(qdisc_destroy);
539 541
540void dev_activate(struct net_device *dev) 542void dev_activate(struct net_device *dev)
541{ 543{
544 struct netdev_queue *txq = &dev->tx_queue;
545
542 /* No queueing discipline is attached to device; 546 /* No queueing discipline is attached to device;
543 create default one i.e. pfifo_fast for devices, 547 create default one i.e. pfifo_fast for devices,
544 which need queueing and noqueue_qdisc for 548 which need queueing and noqueue_qdisc for
545 virtual interfaces 549 virtual interfaces
546 */ 550 */
547 551
548 if (dev->qdisc_sleeping == &noop_qdisc) { 552 if (txq->qdisc_sleeping == &noop_qdisc) {
549 struct Qdisc *qdisc; 553 struct Qdisc *qdisc;
550 if (dev->tx_queue_len) { 554 if (dev->tx_queue_len) {
551 qdisc = qdisc_create_dflt(dev, &dev->tx_queue, 555 qdisc = qdisc_create_dflt(dev, txq,
552 &pfifo_fast_ops, 556 &pfifo_fast_ops,
553 TC_H_ROOT); 557 TC_H_ROOT);
554 if (qdisc == NULL) { 558 if (qdisc == NULL) {
555 printk(KERN_INFO "%s: activation failed\n", dev->name); 559 printk(KERN_INFO "%s: activation failed\n", dev->name);
556 return; 560 return;
557 } 561 }
558 list_add_tail(&qdisc->list, &dev->qdisc_list); 562 list_add_tail(&qdisc->list, &txq->qdisc_list);
559 } else { 563 } else {
560 qdisc = &noqueue_qdisc; 564 qdisc = &noqueue_qdisc;
561 } 565 }
562 dev->qdisc_sleeping = qdisc; 566 txq->qdisc_sleeping = qdisc;
563 } 567 }
564 568
565 if (!netif_carrier_ok(dev)) 569 if (!netif_carrier_ok(dev))
566 /* Delay activation until next carrier-on event */ 570 /* Delay activation until next carrier-on event */
567 return; 571 return;
568 572
569 spin_lock_bh(&dev->tx_queue.lock); 573 spin_lock_bh(&txq->lock);
570 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); 574 rcu_assign_pointer(txq->qdisc, txq->qdisc_sleeping);
571 if (dev->qdisc != &noqueue_qdisc) { 575 if (txq->qdisc != &noqueue_qdisc) {
572 dev->trans_start = jiffies; 576 dev->trans_start = jiffies;
573 dev_watchdog_up(dev); 577 dev_watchdog_up(dev);
574 } 578 }
575 spin_unlock_bh(&dev->tx_queue.lock); 579 spin_unlock_bh(&txq->lock);
580}
581
582static void dev_deactivate_queue(struct net_device *dev,
583 struct netdev_queue *dev_queue,
584 struct Qdisc *qdisc_default)
585{
586 struct Qdisc *qdisc = dev_queue->qdisc;
587
588 if (qdisc) {
589 dev_queue->qdisc = qdisc_default;
590 qdisc_reset(qdisc);
591 }
576} 592}
577 593
578void dev_deactivate(struct net_device *dev) 594void dev_deactivate(struct net_device *dev)
579{ 595{
580 struct Qdisc *qdisc;
581 struct sk_buff *skb; 596 struct sk_buff *skb;
582 int running; 597 int running;
583 598
584 spin_lock_bh(&dev->tx_queue.lock); 599 spin_lock_bh(&dev->tx_queue.lock);
585 qdisc = dev->qdisc; 600 dev_deactivate_queue(dev, &dev->tx_queue, &noop_qdisc);
586 dev->qdisc = &noop_qdisc;
587
588 qdisc_reset(qdisc);
589 601
590 skb = dev->gso_skb; 602 skb = dev->gso_skb;
591 dev->gso_skb = NULL; 603 dev->gso_skb = NULL;
@@ -622,32 +634,44 @@ void dev_deactivate(struct net_device *dev)
622 } while (WARN_ON_ONCE(running)); 634 } while (WARN_ON_ONCE(running));
623} 635}
624 636
637static void dev_init_scheduler_queue(struct net_device *dev,
638 struct netdev_queue *dev_queue,
639 struct Qdisc *qdisc)
640{
641 dev_queue->qdisc = qdisc;
642 dev_queue->qdisc_sleeping = qdisc;
643 INIT_LIST_HEAD(&dev_queue->qdisc_list);
644}
645
625void dev_init_scheduler(struct net_device *dev) 646void dev_init_scheduler(struct net_device *dev)
626{ 647{
627 qdisc_lock_tree(dev); 648 qdisc_lock_tree(dev);
628 dev->qdisc = &noop_qdisc; 649 dev_init_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc);
629 dev->qdisc_sleeping = &noop_qdisc; 650 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
630 INIT_LIST_HEAD(&dev->qdisc_list);
631 qdisc_unlock_tree(dev); 651 qdisc_unlock_tree(dev);
632 652
633 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 653 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
634} 654}
635 655
636void dev_shutdown(struct net_device *dev) 656static void dev_shutdown_scheduler_queue(struct net_device *dev,
657 struct netdev_queue *dev_queue,
658 struct Qdisc *qdisc_default)
637{ 659{
638 struct Qdisc *qdisc; 660 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
661
662 if (qdisc) {
663 dev_queue->qdisc = qdisc_default;
664 dev_queue->qdisc_sleeping = qdisc_default;
639 665
640 qdisc_lock_tree(dev);
641 qdisc = dev->qdisc_sleeping;
642 dev->qdisc = &noop_qdisc;
643 dev->qdisc_sleeping = &noop_qdisc;
644 qdisc_destroy(qdisc);
645#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
646 if ((qdisc = dev->qdisc_ingress) != NULL) {
647 dev->qdisc_ingress = NULL;
648 qdisc_destroy(qdisc); 666 qdisc_destroy(qdisc);
649 } 667 }
650#endif 668}
669
670void dev_shutdown(struct net_device *dev)
671{
672 qdisc_lock_tree(dev);
673 dev_shutdown_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc);
674 dev_shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
651 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 675 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
652 qdisc_unlock_tree(dev); 676 qdisc_unlock_tree(dev);
653} 677}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 71b73c528f9b..4093f1eaaf60 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -180,7 +180,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
180 * skb will be queued. 180 * skb will be queued.
181 */ 181 */
182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183 struct Qdisc *rootq = qdisc_dev(sch)->qdisc; 183 struct Qdisc *rootq = qdisc_dev(sch)->tx_queue.qdisc;
184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 q->duplicate = 0; 185 q->duplicate = 0;
186 186
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 4f3054e8e1ab..8ac05981be20 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -107,17 +107,19 @@ static struct sk_buff *
107teql_dequeue(struct Qdisc* sch) 107teql_dequeue(struct Qdisc* sch)
108{ 108{
109 struct teql_sched_data *dat = qdisc_priv(sch); 109 struct teql_sched_data *dat = qdisc_priv(sch);
110 struct netdev_queue *dat_queue;
110 struct sk_buff *skb; 111 struct sk_buff *skb;
111 112
112 skb = __skb_dequeue(&dat->q); 113 skb = __skb_dequeue(&dat->q);
114 dat_queue = &dat->m->dev->tx_queue;
113 if (skb == NULL) { 115 if (skb == NULL) {
114 struct net_device *m = qdisc_dev(dat->m->dev->qdisc); 116 struct net_device *m = qdisc_dev(dat_queue->qdisc);
115 if (m) { 117 if (m) {
116 dat->m->slaves = sch; 118 dat->m->slaves = sch;
117 netif_wake_queue(m); 119 netif_wake_queue(m);
118 } 120 }
119 } 121 }
120 sch->q.qlen = dat->q.qlen + dat->m->dev->qdisc->q.qlen; 122 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
121 return skb; 123 return skb;
122} 124}
123 125
@@ -155,7 +157,7 @@ teql_destroy(struct Qdisc* sch)
155 if (q == master->slaves) { 157 if (q == master->slaves) {
156 master->slaves = NULL; 158 master->slaves = NULL;
157 spin_lock_bh(&master->dev->tx_queue.lock); 159 spin_lock_bh(&master->dev->tx_queue.lock);
158 qdisc_reset(master->dev->qdisc); 160 qdisc_reset(master->dev->tx_queue.qdisc);
159 spin_unlock_bh(&master->dev->tx_queue.lock); 161 spin_unlock_bh(&master->dev->tx_queue.lock);
160 } 162 }
161 } 163 }
@@ -216,7 +218,7 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
216static int 218static int
217__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) 219__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
218{ 220{
219 struct teql_sched_data *q = qdisc_priv(dev->qdisc); 221 struct teql_sched_data *q = qdisc_priv(dev->tx_queue.qdisc);
220 struct neighbour *mn = skb->dst->neighbour; 222 struct neighbour *mn = skb->dst->neighbour;
221 struct neighbour *n = q->ncache; 223 struct neighbour *n = q->ncache;
222 224
@@ -252,7 +254,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
252static inline int teql_resolve(struct sk_buff *skb, 254static inline int teql_resolve(struct sk_buff *skb,
253 struct sk_buff *skb_res, struct net_device *dev) 255 struct sk_buff *skb_res, struct net_device *dev)
254{ 256{
255 if (dev->qdisc == &noop_qdisc) 257 if (dev->tx_queue.qdisc == &noop_qdisc)
256 return -ENODEV; 258 return -ENODEV;
257 259
258 if (dev->header_ops == NULL || 260 if (dev->header_ops == NULL ||
@@ -284,7 +286,7 @@ restart:
284 do { 286 do {
285 struct net_device *slave = qdisc_dev(q); 287 struct net_device *slave = qdisc_dev(q);
286 288
287 if (slave->qdisc_sleeping != q) 289 if (slave->tx_queue.qdisc_sleeping != q)
288 continue; 290 continue;
289 if (netif_queue_stopped(slave) || 291 if (netif_queue_stopped(slave) ||
290 __netif_subqueue_stopped(slave, subq) || 292 __netif_subqueue_stopped(slave, subq) ||