aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-08 20:42:10 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-08 20:42:10 -0400
commitb0e1e6462df3c5944010b3328a546d8fe5d932cd (patch)
tree37e3f86d09d8b37deb06cf1c142baeb8246bbf97 /net/sched
parent555353cfa1aee293de445bfa6de43276138ddd82 (diff)
netdev: Move rest of qdisc state into struct netdev_queue
Now qdisc, qdisc_sleeping, and qdisc_list also live there. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sched/sch_api.c34
-rw-r--r--net/sched/sch_generic.c90
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_teql.c14
5 files changed, 93 insertions, 54 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index e2389f161e46..b483bbea6118 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -166,7 +166,8 @@ replay:
166 166
167 /* Find qdisc */ 167 /* Find qdisc */
168 if (!parent) { 168 if (!parent) {
169 q = dev->qdisc_sleeping; 169 struct netdev_queue *dev_queue = &dev->tx_queue;
170 q = dev_queue->qdisc_sleeping;
170 parent = q->handle; 171 parent = q->handle;
171 } else { 172 } else {
172 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 173 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
@@ -390,6 +391,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
390static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 391static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
391{ 392{
392 struct net *net = sock_net(skb->sk); 393 struct net *net = sock_net(skb->sk);
394 struct netdev_queue *dev_queue;
393 int t; 395 int t;
394 int s_t; 396 int s_t;
395 struct net_device *dev; 397 struct net_device *dev;
@@ -408,8 +410,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
408 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 410 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
409 return skb->len; 411 return skb->len;
410 412
413 dev_queue = &dev->tx_queue;
411 if (!tcm->tcm_parent) 414 if (!tcm->tcm_parent)
412 q = dev->qdisc_sleeping; 415 q = dev_queue->qdisc_sleeping;
413 else 416 else
414 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 417 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
415 if (!q) 418 if (!q)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 570cef2a9c5f..2313fa7c97be 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -185,9 +185,10 @@ EXPORT_SYMBOL(unregister_qdisc);
185 185
186struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 186struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
187{ 187{
188 struct netdev_queue *dev_queue = &dev->tx_queue;
188 struct Qdisc *q; 189 struct Qdisc *q;
189 190
190 list_for_each_entry(q, &dev->qdisc_list, list) { 191 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
191 if (q->handle == handle) 192 if (q->handle == handle)
192 return q; 193 return q;
193 } 194 }
@@ -441,6 +442,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
441static struct Qdisc * 442static struct Qdisc *
442dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) 443dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
443{ 444{
445 struct netdev_queue *dev_queue;
444 struct Qdisc *oqdisc; 446 struct Qdisc *oqdisc;
445 447
446 if (dev->flags & IFF_UP) 448 if (dev->flags & IFF_UP)
@@ -459,8 +461,8 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
459 } 461 }
460 462
461 } else { 463 } else {
462 464 dev_queue = &dev->tx_queue;
463 oqdisc = dev->qdisc_sleeping; 465 oqdisc = dev_queue->qdisc_sleeping;
464 466
465 /* Prune old scheduler */ 467 /* Prune old scheduler */
466 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 468 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
@@ -469,8 +471,8 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
469 /* ... and graft new one */ 471 /* ... and graft new one */
470 if (qdisc == NULL) 472 if (qdisc == NULL)
471 qdisc = &noop_qdisc; 473 qdisc = &noop_qdisc;
472 dev->qdisc_sleeping = qdisc; 474 dev_queue->qdisc_sleeping = qdisc;
473 dev->qdisc = &noop_qdisc; 475 dev_queue->qdisc = &noop_qdisc;
474 } 476 }
475 477
476 qdisc_unlock_tree(dev); 478 qdisc_unlock_tree(dev);
@@ -633,7 +635,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
633 } 635 }
634 } 636 }
635 qdisc_lock_tree(dev); 637 qdisc_lock_tree(dev);
636 list_add_tail(&sch->list, &dev->qdisc_list); 638 list_add_tail(&sch->list, &dev_queue->qdisc_list);
637 qdisc_unlock_tree(dev); 639 qdisc_unlock_tree(dev);
638 640
639 return sch; 641 return sch;
@@ -740,7 +742,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
740 q = dev->qdisc_ingress; 742 q = dev->qdisc_ingress;
741 } 743 }
742 } else { 744 } else {
743 q = dev->qdisc_sleeping; 745 struct netdev_queue *dev_queue = &dev->tx_queue;
746 q = dev_queue->qdisc_sleeping;
744 } 747 }
745 if (!q) 748 if (!q)
746 return -ENOENT; 749 return -ENOENT;
@@ -814,7 +817,8 @@ replay:
814 q = dev->qdisc_ingress; 817 q = dev->qdisc_ingress;
815 } 818 }
816 } else { 819 } else {
817 q = dev->qdisc_sleeping; 820 struct netdev_queue *dev_queue = &dev->tx_queue;
821 q = dev_queue->qdisc_sleeping;
818 } 822 }
819 823
820 /* It may be default qdisc, ignore it */ 824 /* It may be default qdisc, ignore it */
@@ -1015,12 +1019,14 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1015 read_lock(&dev_base_lock); 1019 read_lock(&dev_base_lock);
1016 idx = 0; 1020 idx = 0;
1017 for_each_netdev(&init_net, dev) { 1021 for_each_netdev(&init_net, dev) {
1022 struct netdev_queue *dev_queue;
1018 if (idx < s_idx) 1023 if (idx < s_idx)
1019 goto cont; 1024 goto cont;
1020 if (idx > s_idx) 1025 if (idx > s_idx)
1021 s_q_idx = 0; 1026 s_q_idx = 0;
1022 q_idx = 0; 1027 q_idx = 0;
1023 list_for_each_entry(q, &dev->qdisc_list, list) { 1028 dev_queue = &dev->tx_queue;
1029 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1024 if (q_idx < s_q_idx) { 1030 if (q_idx < s_q_idx) {
1025 q_idx++; 1031 q_idx++;
1026 continue; 1032 continue;
@@ -1054,6 +1060,7 @@ done:
1054static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 1060static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1055{ 1061{
1056 struct net *net = sock_net(skb->sk); 1062 struct net *net = sock_net(skb->sk);
1063 struct netdev_queue *dev_queue;
1057 struct tcmsg *tcm = NLMSG_DATA(n); 1064 struct tcmsg *tcm = NLMSG_DATA(n);
1058 struct nlattr *tca[TCA_MAX + 1]; 1065 struct nlattr *tca[TCA_MAX + 1];
1059 struct net_device *dev; 1066 struct net_device *dev;
@@ -1091,6 +1098,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1091 1098
1092 /* Step 1. Determine qdisc handle X:0 */ 1099 /* Step 1. Determine qdisc handle X:0 */
1093 1100
1101 dev_queue = &dev->tx_queue;
1094 if (pid != TC_H_ROOT) { 1102 if (pid != TC_H_ROOT) {
1095 u32 qid1 = TC_H_MAJ(pid); 1103 u32 qid1 = TC_H_MAJ(pid);
1096 1104
@@ -1101,7 +1109,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1101 } else if (qid1) { 1109 } else if (qid1) {
1102 qid = qid1; 1110 qid = qid1;
1103 } else if (qid == 0) 1111 } else if (qid == 0)
1104 qid = dev->qdisc_sleeping->handle; 1112 qid = dev_queue->qdisc_sleeping->handle;
1105 1113
1106 /* Now qid is genuine qdisc handle consistent 1114 /* Now qid is genuine qdisc handle consistent
1107 both with parent and child. 1115 both with parent and child.
@@ -1112,7 +1120,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1112 pid = TC_H_MAKE(qid, pid); 1120 pid = TC_H_MAKE(qid, pid);
1113 } else { 1121 } else {
1114 if (qid == 0) 1122 if (qid == 0)
1115 qid = dev->qdisc_sleeping->handle; 1123 qid = dev_queue->qdisc_sleeping->handle;
1116 } 1124 }
1117 1125
1118 /* OK. Locate qdisc */ 1126 /* OK. Locate qdisc */
@@ -1248,6 +1256,7 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
1248static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 1256static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1249{ 1257{
1250 struct net *net = sock_net(skb->sk); 1258 struct net *net = sock_net(skb->sk);
1259 struct netdev_queue *dev_queue;
1251 int t; 1260 int t;
1252 int s_t; 1261 int s_t;
1253 struct net_device *dev; 1262 struct net_device *dev;
@@ -1266,7 +1275,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1266 s_t = cb->args[0]; 1275 s_t = cb->args[0];
1267 t = 0; 1276 t = 0;
1268 1277
1269 list_for_each_entry(q, &dev->qdisc_list, list) { 1278 dev_queue = &dev->tx_queue;
1279 list_for_each_entry(q, &dev_queue->qdisc_list, list) {
1270 if (t < s_t || !q->ops->cl_ops || 1280 if (t < s_t || !q->ops->cl_ops ||
1271 (tcm->tcm_parent && 1281 (tcm->tcm_parent &&
1272 TC_H_MAJ(tcm->tcm_parent) != q->handle)) { 1282 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 804d44b00348..3223e5ba76aa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -122,7 +122,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
122 * 122 *
123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this 123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
124 * device at a time. queue->lock serializes queue accesses for 124 * device at a time. queue->lock serializes queue accesses for
125 * this device AND dev->qdisc pointer itself. 125 * this device AND txq->qdisc pointer itself.
126 * 126 *
127 * netif_tx_lock serializes accesses to device driver. 127 * netif_tx_lock serializes accesses to device driver.
128 * 128 *
@@ -138,7 +138,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
138 */ 138 */
139static inline int qdisc_restart(struct net_device *dev) 139static inline int qdisc_restart(struct net_device *dev)
140{ 140{
141 struct Qdisc *q = dev->qdisc; 141 struct netdev_queue *txq = &dev->tx_queue;
142 struct Qdisc *q = txq->qdisc;
142 struct sk_buff *skb; 143 struct sk_buff *skb;
143 int ret = NETDEV_TX_BUSY; 144 int ret = NETDEV_TX_BUSY;
144 145
@@ -148,15 +149,15 @@ static inline int qdisc_restart(struct net_device *dev)
148 149
149 150
150 /* And release queue */ 151 /* And release queue */
151 spin_unlock(&q->dev_queue->lock); 152 spin_unlock(&txq->lock);
152 153
153 HARD_TX_LOCK(dev, smp_processor_id()); 154 HARD_TX_LOCK(dev, smp_processor_id());
154 if (!netif_subqueue_stopped(dev, skb)) 155 if (!netif_subqueue_stopped(dev, skb))
155 ret = dev_hard_start_xmit(skb, dev); 156 ret = dev_hard_start_xmit(skb, dev);
156 HARD_TX_UNLOCK(dev); 157 HARD_TX_UNLOCK(dev);
157 158
158 spin_lock(&q->dev_queue->lock); 159 spin_lock(&txq->lock);
159 q = dev->qdisc; 160 q = txq->qdisc;
160 161
161 switch (ret) { 162 switch (ret) {
162 case NETDEV_TX_OK: 163 case NETDEV_TX_OK:
@@ -207,9 +208,10 @@ void __qdisc_run(struct net_device *dev)
207static void dev_watchdog(unsigned long arg) 208static void dev_watchdog(unsigned long arg)
208{ 209{
209 struct net_device *dev = (struct net_device *)arg; 210 struct net_device *dev = (struct net_device *)arg;
211 struct netdev_queue *txq = &dev->tx_queue;
210 212
211 netif_tx_lock(dev); 213 netif_tx_lock(dev);
212 if (dev->qdisc != &noop_qdisc) { 214 if (txq->qdisc != &noop_qdisc) {
213 if (netif_device_present(dev) && 215 if (netif_device_present(dev) &&
214 netif_running(dev) && 216 netif_running(dev) &&
215 netif_carrier_ok(dev)) { 217 netif_carrier_ok(dev)) {
@@ -539,53 +541,63 @@ EXPORT_SYMBOL(qdisc_destroy);
539 541
540void dev_activate(struct net_device *dev) 542void dev_activate(struct net_device *dev)
541{ 543{
544 struct netdev_queue *txq = &dev->tx_queue;
545
542 /* No queueing discipline is attached to device; 546 /* No queueing discipline is attached to device;
543 create default one i.e. pfifo_fast for devices, 547 create default one i.e. pfifo_fast for devices,
544 which need queueing and noqueue_qdisc for 548 which need queueing and noqueue_qdisc for
545 virtual interfaces 549 virtual interfaces
546 */ 550 */
547 551
548 if (dev->qdisc_sleeping == &noop_qdisc) { 552 if (txq->qdisc_sleeping == &noop_qdisc) {
549 struct Qdisc *qdisc; 553 struct Qdisc *qdisc;
550 if (dev->tx_queue_len) { 554 if (dev->tx_queue_len) {
551 qdisc = qdisc_create_dflt(dev, &dev->tx_queue, 555 qdisc = qdisc_create_dflt(dev, txq,
552 &pfifo_fast_ops, 556 &pfifo_fast_ops,
553 TC_H_ROOT); 557 TC_H_ROOT);
554 if (qdisc == NULL) { 558 if (qdisc == NULL) {
555 printk(KERN_INFO "%s: activation failed\n", dev->name); 559 printk(KERN_INFO "%s: activation failed\n", dev->name);
556 return; 560 return;
557 } 561 }
558 list_add_tail(&qdisc->list, &dev->qdisc_list); 562 list_add_tail(&qdisc->list, &txq->qdisc_list);
559 } else { 563 } else {
560 qdisc = &noqueue_qdisc; 564 qdisc = &noqueue_qdisc;
561 } 565 }
562 dev->qdisc_sleeping = qdisc; 566 txq->qdisc_sleeping = qdisc;
563 } 567 }
564 568
565 if (!netif_carrier_ok(dev)) 569 if (!netif_carrier_ok(dev))
566 /* Delay activation until next carrier-on event */ 570 /* Delay activation until next carrier-on event */
567 return; 571 return;
568 572
569 spin_lock_bh(&dev->tx_queue.lock); 573 spin_lock_bh(&txq->lock);
570 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); 574 rcu_assign_pointer(txq->qdisc, txq->qdisc_sleeping);
571 if (dev->qdisc != &noqueue_qdisc) { 575 if (txq->qdisc != &noqueue_qdisc) {
572 dev->trans_start = jiffies; 576 dev->trans_start = jiffies;
573 dev_watchdog_up(dev); 577 dev_watchdog_up(dev);
574 } 578 }
575 spin_unlock_bh(&dev->tx_queue.lock); 579 spin_unlock_bh(&txq->lock);
580}
581
582static void dev_deactivate_queue(struct net_device *dev,
583 struct netdev_queue *dev_queue,
584 struct Qdisc *qdisc_default)
585{
586 struct Qdisc *qdisc = dev_queue->qdisc;
587
588 if (qdisc) {
589 dev_queue->qdisc = qdisc_default;
590 qdisc_reset(qdisc);
591 }
576} 592}
577 593
578void dev_deactivate(struct net_device *dev) 594void dev_deactivate(struct net_device *dev)
579{ 595{
580 struct Qdisc *qdisc;
581 struct sk_buff *skb; 596 struct sk_buff *skb;
582 int running; 597 int running;
583 598
584 spin_lock_bh(&dev->tx_queue.lock); 599 spin_lock_bh(&dev->tx_queue.lock);
585 qdisc = dev->qdisc; 600 dev_deactivate_queue(dev, &dev->tx_queue, &noop_qdisc);
586 dev->qdisc = &noop_qdisc;
587
588 qdisc_reset(qdisc);
589 601
590 skb = dev->gso_skb; 602 skb = dev->gso_skb;
591 dev->gso_skb = NULL; 603 dev->gso_skb = NULL;
@@ -622,32 +634,44 @@ void dev_deactivate(struct net_device *dev)
622 } while (WARN_ON_ONCE(running)); 634 } while (WARN_ON_ONCE(running));
623} 635}
624 636
637static void dev_init_scheduler_queue(struct net_device *dev,
638 struct netdev_queue *dev_queue,
639 struct Qdisc *qdisc)
640{
641 dev_queue->qdisc = qdisc;
642 dev_queue->qdisc_sleeping = qdisc;
643 INIT_LIST_HEAD(&dev_queue->qdisc_list);
644}
645
625void dev_init_scheduler(struct net_device *dev) 646void dev_init_scheduler(struct net_device *dev)
626{ 647{
627 qdisc_lock_tree(dev); 648 qdisc_lock_tree(dev);
628 dev->qdisc = &noop_qdisc; 649 dev_init_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc);
629 dev->qdisc_sleeping = &noop_qdisc; 650 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
630 INIT_LIST_HEAD(&dev->qdisc_list);
631 qdisc_unlock_tree(dev); 651 qdisc_unlock_tree(dev);
632 652
633 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 653 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
634} 654}
635 655
636void dev_shutdown(struct net_device *dev) 656static void dev_shutdown_scheduler_queue(struct net_device *dev,
657 struct netdev_queue *dev_queue,
658 struct Qdisc *qdisc_default)
637{ 659{
638 struct Qdisc *qdisc; 660 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
661
662 if (qdisc) {
663 dev_queue->qdisc = qdisc_default;
664 dev_queue->qdisc_sleeping = qdisc_default;
639 665
640 qdisc_lock_tree(dev);
641 qdisc = dev->qdisc_sleeping;
642 dev->qdisc = &noop_qdisc;
643 dev->qdisc_sleeping = &noop_qdisc;
644 qdisc_destroy(qdisc);
645#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
646 if ((qdisc = dev->qdisc_ingress) != NULL) {
647 dev->qdisc_ingress = NULL;
648 qdisc_destroy(qdisc); 666 qdisc_destroy(qdisc);
649 } 667 }
650#endif 668}
669
670void dev_shutdown(struct net_device *dev)
671{
672 qdisc_lock_tree(dev);
673 dev_shutdown_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc);
674 dev_shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
651 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 675 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
652 qdisc_unlock_tree(dev); 676 qdisc_unlock_tree(dev);
653} 677}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 71b73c528f9b..4093f1eaaf60 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -180,7 +180,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
180 * skb will be queued. 180 * skb will be queued.
181 */ 181 */
182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183 struct Qdisc *rootq = qdisc_dev(sch)->qdisc; 183 struct Qdisc *rootq = qdisc_dev(sch)->tx_queue.qdisc;
184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 q->duplicate = 0; 185 q->duplicate = 0;
186 186
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 4f3054e8e1ab..8ac05981be20 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -107,17 +107,19 @@ static struct sk_buff *
107teql_dequeue(struct Qdisc* sch) 107teql_dequeue(struct Qdisc* sch)
108{ 108{
109 struct teql_sched_data *dat = qdisc_priv(sch); 109 struct teql_sched_data *dat = qdisc_priv(sch);
110 struct netdev_queue *dat_queue;
110 struct sk_buff *skb; 111 struct sk_buff *skb;
111 112
112 skb = __skb_dequeue(&dat->q); 113 skb = __skb_dequeue(&dat->q);
114 dat_queue = &dat->m->dev->tx_queue;
113 if (skb == NULL) { 115 if (skb == NULL) {
114 struct net_device *m = qdisc_dev(dat->m->dev->qdisc); 116 struct net_device *m = qdisc_dev(dat_queue->qdisc);
115 if (m) { 117 if (m) {
116 dat->m->slaves = sch; 118 dat->m->slaves = sch;
117 netif_wake_queue(m); 119 netif_wake_queue(m);
118 } 120 }
119 } 121 }
120 sch->q.qlen = dat->q.qlen + dat->m->dev->qdisc->q.qlen; 122 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
121 return skb; 123 return skb;
122} 124}
123 125
@@ -155,7 +157,7 @@ teql_destroy(struct Qdisc* sch)
155 if (q == master->slaves) { 157 if (q == master->slaves) {
156 master->slaves = NULL; 158 master->slaves = NULL;
157 spin_lock_bh(&master->dev->tx_queue.lock); 159 spin_lock_bh(&master->dev->tx_queue.lock);
158 qdisc_reset(master->dev->qdisc); 160 qdisc_reset(master->dev->tx_queue.qdisc);
159 spin_unlock_bh(&master->dev->tx_queue.lock); 161 spin_unlock_bh(&master->dev->tx_queue.lock);
160 } 162 }
161 } 163 }
@@ -216,7 +218,7 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
216static int 218static int
217__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) 219__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
218{ 220{
219 struct teql_sched_data *q = qdisc_priv(dev->qdisc); 221 struct teql_sched_data *q = qdisc_priv(dev->tx_queue.qdisc);
220 struct neighbour *mn = skb->dst->neighbour; 222 struct neighbour *mn = skb->dst->neighbour;
221 struct neighbour *n = q->ncache; 223 struct neighbour *n = q->ncache;
222 224
@@ -252,7 +254,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
252static inline int teql_resolve(struct sk_buff *skb, 254static inline int teql_resolve(struct sk_buff *skb,
253 struct sk_buff *skb_res, struct net_device *dev) 255 struct sk_buff *skb_res, struct net_device *dev)
254{ 256{
255 if (dev->qdisc == &noop_qdisc) 257 if (dev->tx_queue.qdisc == &noop_qdisc)
256 return -ENODEV; 258 return -ENODEV;
257 259
258 if (dev->header_ops == NULL || 260 if (dev->header_ops == NULL ||
@@ -284,7 +286,7 @@ restart:
284 do { 286 do {
285 struct net_device *slave = qdisc_dev(q); 287 struct net_device *slave = qdisc_dev(q);
286 288
287 if (slave->qdisc_sleeping != q) 289 if (slave->tx_queue.qdisc_sleeping != q)
288 continue; 290 continue;
289 if (netif_queue_stopped(slave) || 291 if (netif_queue_stopped(slave) ||
290 __netif_subqueue_stopped(slave, subq) || 292 __netif_subqueue_stopped(slave, subq) ||