diff options
author | David S. Miller <davem@davemloft.net> | 2008-07-17 03:34:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-17 22:21:00 -0400 |
commit | e8a0464cc950972824e2e128028ae3db666ec1ed (patch) | |
tree | 5022b95396c0f3b313531bc39b19543c03551b9a /net/sched | |
parent | 070825b3840a743e21ebcc44f8279708a4fed977 (diff) |
netdev: Allocate multiple queues for TX.
alloc_netdev_mq() now allocates an array of netdev_queue
structures for TX, based upon the queue_count argument.
Furthermore, all accesses to the TX queues are now vectored
through the netdev_get_tx_queue() and netdev_for_each_tx_queue()
interfaces. This makes it easy to grep the tree for all
things that want to get to a TX queue of a net device.
Problem spots which are not really multiqueue aware yet, and
only work with one queue, can easily be spotted by grepping
for all netdev_get_tx_queue() calls that pass in a zero index.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/cls_api.c | 4 | ||||
-rw-r--r-- | net/sched/sch_api.c | 32 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 178 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 21 |
4 files changed, 169 insertions, 66 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index b483bbea6118..d0b0a9b14394 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -166,7 +166,7 @@ replay: | |||
166 | 166 | ||
167 | /* Find qdisc */ | 167 | /* Find qdisc */ |
168 | if (!parent) { | 168 | if (!parent) { |
169 | struct netdev_queue *dev_queue = &dev->tx_queue; | 169 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); |
170 | q = dev_queue->qdisc_sleeping; | 170 | q = dev_queue->qdisc_sleeping; |
171 | parent = q->handle; | 171 | parent = q->handle; |
172 | } else { | 172 | } else { |
@@ -410,7 +410,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
410 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 410 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
411 | return skb->len; | 411 | return skb->len; |
412 | 412 | ||
413 | dev_queue = &dev->tx_queue; | 413 | dev_queue = netdev_get_tx_queue(dev, 0); |
414 | if (!tcm->tcm_parent) | 414 | if (!tcm->tcm_parent) |
415 | q = dev_queue->qdisc_sleeping; | 415 | q = dev_queue->qdisc_sleeping; |
416 | else | 416 | else |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 95873f8dd37c..830ccc544a15 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -183,9 +183,8 @@ EXPORT_SYMBOL(unregister_qdisc); | |||
183 | (root qdisc, all its children, children of children etc.) | 183 | (root qdisc, all its children, children of children etc.) |
184 | */ | 184 | */ |
185 | 185 | ||
186 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | 186 | static struct Qdisc *__qdisc_lookup(struct netdev_queue *dev_queue, u32 handle) |
187 | { | 187 | { |
188 | struct netdev_queue *dev_queue = &dev->tx_queue; | ||
189 | struct Qdisc *q; | 188 | struct Qdisc *q; |
190 | 189 | ||
191 | list_for_each_entry(q, &dev_queue->qdisc_list, list) { | 190 | list_for_each_entry(q, &dev_queue->qdisc_list, list) { |
@@ -195,6 +194,19 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | |||
195 | return NULL; | 194 | return NULL; |
196 | } | 195 | } |
197 | 196 | ||
197 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | ||
198 | { | ||
199 | unsigned int i; | ||
200 | |||
201 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
202 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
203 | struct Qdisc *q = __qdisc_lookup(txq, handle); | ||
204 | if (q) | ||
205 | return q; | ||
206 | } | ||
207 | return NULL; | ||
208 | } | ||
209 | |||
198 | static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) | 210 | static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) |
199 | { | 211 | { |
200 | unsigned long cl; | 212 | unsigned long cl; |
@@ -462,7 +474,7 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) | |||
462 | } | 474 | } |
463 | 475 | ||
464 | } else { | 476 | } else { |
465 | dev_queue = &dev->tx_queue; | 477 | dev_queue = netdev_get_tx_queue(dev, 0); |
466 | oqdisc = dev_queue->qdisc_sleeping; | 478 | oqdisc = dev_queue->qdisc_sleeping; |
467 | 479 | ||
468 | /* Prune old scheduler */ | 480 | /* Prune old scheduler */ |
@@ -742,7 +754,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
742 | q = dev->rx_queue.qdisc; | 754 | q = dev->rx_queue.qdisc; |
743 | } | 755 | } |
744 | } else { | 756 | } else { |
745 | struct netdev_queue *dev_queue = &dev->tx_queue; | 757 | struct netdev_queue *dev_queue; |
758 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
746 | q = dev_queue->qdisc_sleeping; | 759 | q = dev_queue->qdisc_sleeping; |
747 | } | 760 | } |
748 | if (!q) | 761 | if (!q) |
@@ -817,7 +830,8 @@ replay: | |||
817 | q = dev->rx_queue.qdisc; | 830 | q = dev->rx_queue.qdisc; |
818 | } | 831 | } |
819 | } else { | 832 | } else { |
820 | struct netdev_queue *dev_queue = &dev->tx_queue; | 833 | struct netdev_queue *dev_queue; |
834 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
821 | q = dev_queue->qdisc_sleeping; | 835 | q = dev_queue->qdisc_sleeping; |
822 | } | 836 | } |
823 | 837 | ||
@@ -899,7 +913,7 @@ create_n_graft: | |||
899 | tcm->tcm_parent, tcm->tcm_parent, | 913 | tcm->tcm_parent, tcm->tcm_parent, |
900 | tca, &err); | 914 | tca, &err); |
901 | else | 915 | else |
902 | q = qdisc_create(dev, &dev->tx_queue, | 916 | q = qdisc_create(dev, netdev_get_tx_queue(dev, 0), |
903 | tcm->tcm_parent, tcm->tcm_handle, | 917 | tcm->tcm_parent, tcm->tcm_handle, |
904 | tca, &err); | 918 | tca, &err); |
905 | if (q == NULL) { | 919 | if (q == NULL) { |
@@ -1025,7 +1039,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
1025 | if (idx > s_idx) | 1039 | if (idx > s_idx) |
1026 | s_q_idx = 0; | 1040 | s_q_idx = 0; |
1027 | q_idx = 0; | 1041 | q_idx = 0; |
1028 | dev_queue = &dev->tx_queue; | 1042 | dev_queue = netdev_get_tx_queue(dev, 0); |
1029 | list_for_each_entry(q, &dev_queue->qdisc_list, list) { | 1043 | list_for_each_entry(q, &dev_queue->qdisc_list, list) { |
1030 | if (q_idx < s_q_idx) { | 1044 | if (q_idx < s_q_idx) { |
1031 | q_idx++; | 1045 | q_idx++; |
@@ -1098,7 +1112,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1098 | 1112 | ||
1099 | /* Step 1. Determine qdisc handle X:0 */ | 1113 | /* Step 1. Determine qdisc handle X:0 */ |
1100 | 1114 | ||
1101 | dev_queue = &dev->tx_queue; | 1115 | dev_queue = netdev_get_tx_queue(dev, 0); |
1102 | if (pid != TC_H_ROOT) { | 1116 | if (pid != TC_H_ROOT) { |
1103 | u32 qid1 = TC_H_MAJ(pid); | 1117 | u32 qid1 = TC_H_MAJ(pid); |
1104 | 1118 | ||
@@ -1275,7 +1289,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1275 | s_t = cb->args[0]; | 1289 | s_t = cb->args[0]; |
1276 | t = 0; | 1290 | t = 0; |
1277 | 1291 | ||
1278 | dev_queue = &dev->tx_queue; | 1292 | dev_queue = netdev_get_tx_queue(dev, 0); |
1279 | list_for_each_entry(q, &dev_queue->qdisc_list, list) { | 1293 | list_for_each_entry(q, &dev_queue->qdisc_list, list) { |
1280 | if (t < s_t || !q->ops->cl_ops || | 1294 | if (t < s_t || !q->ops->cl_ops || |
1281 | (tcm->tcm_parent && | 1295 | (tcm->tcm_parent && |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 243de935b182..4e2b865cbba0 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -40,20 +40,30 @@ | |||
40 | */ | 40 | */ |
41 | 41 | ||
42 | void qdisc_lock_tree(struct net_device *dev) | 42 | void qdisc_lock_tree(struct net_device *dev) |
43 | __acquires(dev->tx_queue.lock) | ||
44 | __acquires(dev->rx_queue.lock) | 43 | __acquires(dev->rx_queue.lock) |
45 | { | 44 | { |
46 | spin_lock_bh(&dev->tx_queue.lock); | 45 | unsigned int i; |
46 | |||
47 | local_bh_disable(); | ||
48 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
49 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
50 | spin_lock(&txq->lock); | ||
51 | } | ||
47 | spin_lock(&dev->rx_queue.lock); | 52 | spin_lock(&dev->rx_queue.lock); |
48 | } | 53 | } |
49 | EXPORT_SYMBOL(qdisc_lock_tree); | 54 | EXPORT_SYMBOL(qdisc_lock_tree); |
50 | 55 | ||
51 | void qdisc_unlock_tree(struct net_device *dev) | 56 | void qdisc_unlock_tree(struct net_device *dev) |
52 | __releases(dev->rx_queue.lock) | 57 | __releases(dev->rx_queue.lock) |
53 | __releases(dev->tx_queue.lock) | ||
54 | { | 58 | { |
59 | unsigned int i; | ||
60 | |||
55 | spin_unlock(&dev->rx_queue.lock); | 61 | spin_unlock(&dev->rx_queue.lock); |
56 | spin_unlock_bh(&dev->tx_queue.lock); | 62 | for (i = 0; i < dev->num_tx_queues; i++) { |
63 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
64 | spin_unlock(&txq->lock); | ||
65 | } | ||
66 | local_bh_enable(); | ||
57 | } | 67 | } |
58 | EXPORT_SYMBOL(qdisc_unlock_tree); | 68 | EXPORT_SYMBOL(qdisc_unlock_tree); |
59 | 69 | ||
@@ -212,22 +222,37 @@ void __qdisc_run(struct netdev_queue *txq) | |||
212 | static void dev_watchdog(unsigned long arg) | 222 | static void dev_watchdog(unsigned long arg) |
213 | { | 223 | { |
214 | struct net_device *dev = (struct net_device *)arg; | 224 | struct net_device *dev = (struct net_device *)arg; |
215 | struct netdev_queue *txq = &dev->tx_queue; | ||
216 | 225 | ||
217 | netif_tx_lock(dev); | 226 | netif_tx_lock(dev); |
218 | if (txq->qdisc != &noop_qdisc) { | 227 | if (!qdisc_tx_is_noop(dev)) { |
219 | if (netif_device_present(dev) && | 228 | if (netif_device_present(dev) && |
220 | netif_running(dev) && | 229 | netif_running(dev) && |
221 | netif_carrier_ok(dev)) { | 230 | netif_carrier_ok(dev)) { |
222 | if (netif_queue_stopped(dev) && | 231 | int some_queue_stopped = 0; |
223 | time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { | 232 | unsigned int i; |
233 | |||
234 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
235 | struct netdev_queue *txq; | ||
236 | |||
237 | txq = netdev_get_tx_queue(dev, i); | ||
238 | if (netif_tx_queue_stopped(txq)) { | ||
239 | some_queue_stopped = 1; | ||
240 | break; | ||
241 | } | ||
242 | } | ||
224 | 243 | ||
225 | printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", | 244 | if (some_queue_stopped && |
245 | time_after(jiffies, (dev->trans_start + | ||
246 | dev->watchdog_timeo))) { | ||
247 | printk(KERN_INFO "NETDEV WATCHDOG: %s: " | ||
248 | "transmit timed out\n", | ||
226 | dev->name); | 249 | dev->name); |
227 | dev->tx_timeout(dev); | 250 | dev->tx_timeout(dev); |
228 | WARN_ON_ONCE(1); | 251 | WARN_ON_ONCE(1); |
229 | } | 252 | } |
230 | if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) | 253 | if (!mod_timer(&dev->watchdog_timer, |
254 | round_jiffies(jiffies + | ||
255 | dev->watchdog_timeo))) | ||
231 | dev_hold(dev); | 256 | dev_hold(dev); |
232 | } | 257 | } |
233 | } | 258 | } |
@@ -542,9 +567,55 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
542 | } | 567 | } |
543 | EXPORT_SYMBOL(qdisc_destroy); | 568 | EXPORT_SYMBOL(qdisc_destroy); |
544 | 569 | ||
570 | static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) | ||
571 | { | ||
572 | unsigned int i; | ||
573 | |||
574 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
575 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
576 | |||
577 | if (txq->qdisc_sleeping != &noop_qdisc) | ||
578 | return false; | ||
579 | } | ||
580 | return true; | ||
581 | } | ||
582 | |||
583 | static void attach_one_default_qdisc(struct net_device *dev, | ||
584 | struct netdev_queue *dev_queue, | ||
585 | void *_unused) | ||
586 | { | ||
587 | struct Qdisc *qdisc; | ||
588 | |||
589 | if (dev->tx_queue_len) { | ||
590 | qdisc = qdisc_create_dflt(dev, dev_queue, | ||
591 | &pfifo_fast_ops, TC_H_ROOT); | ||
592 | if (!qdisc) { | ||
593 | printk(KERN_INFO "%s: activation failed\n", dev->name); | ||
594 | return; | ||
595 | } | ||
596 | list_add_tail(&qdisc->list, &dev_queue->qdisc_list); | ||
597 | } else { | ||
598 | qdisc = &noqueue_qdisc; | ||
599 | } | ||
600 | dev_queue->qdisc_sleeping = qdisc; | ||
601 | } | ||
602 | |||
603 | static void transition_one_qdisc(struct net_device *dev, | ||
604 | struct netdev_queue *dev_queue, | ||
605 | void *_need_watchdog) | ||
606 | { | ||
607 | int *need_watchdog_p = _need_watchdog; | ||
608 | |||
609 | spin_lock_bh(&dev_queue->lock); | ||
610 | rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping); | ||
611 | if (dev_queue->qdisc != &noqueue_qdisc) | ||
612 | *need_watchdog_p = 1; | ||
613 | spin_unlock_bh(&dev_queue->lock); | ||
614 | } | ||
615 | |||
545 | void dev_activate(struct net_device *dev) | 616 | void dev_activate(struct net_device *dev) |
546 | { | 617 | { |
547 | struct netdev_queue *txq = &dev->tx_queue; | 618 | int need_watchdog; |
548 | 619 | ||
549 | /* No queueing discipline is attached to device; | 620 | /* No queueing discipline is attached to device; |
550 | create default one i.e. pfifo_fast for devices, | 621 | create default one i.e. pfifo_fast for devices, |
@@ -552,39 +623,27 @@ void dev_activate(struct net_device *dev) | |||
552 | virtual interfaces | 623 | virtual interfaces |
553 | */ | 624 | */ |
554 | 625 | ||
555 | if (txq->qdisc_sleeping == &noop_qdisc) { | 626 | if (dev_all_qdisc_sleeping_noop(dev)) |
556 | struct Qdisc *qdisc; | 627 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); |
557 | if (dev->tx_queue_len) { | ||
558 | qdisc = qdisc_create_dflt(dev, txq, | ||
559 | &pfifo_fast_ops, | ||
560 | TC_H_ROOT); | ||
561 | if (qdisc == NULL) { | ||
562 | printk(KERN_INFO "%s: activation failed\n", dev->name); | ||
563 | return; | ||
564 | } | ||
565 | list_add_tail(&qdisc->list, &txq->qdisc_list); | ||
566 | } else { | ||
567 | qdisc = &noqueue_qdisc; | ||
568 | } | ||
569 | txq->qdisc_sleeping = qdisc; | ||
570 | } | ||
571 | 628 | ||
572 | if (!netif_carrier_ok(dev)) | 629 | if (!netif_carrier_ok(dev)) |
573 | /* Delay activation until next carrier-on event */ | 630 | /* Delay activation until next carrier-on event */ |
574 | return; | 631 | return; |
575 | 632 | ||
576 | spin_lock_bh(&txq->lock); | 633 | need_watchdog = 0; |
577 | rcu_assign_pointer(txq->qdisc, txq->qdisc_sleeping); | 634 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); |
578 | if (txq->qdisc != &noqueue_qdisc) { | 635 | |
636 | if (need_watchdog) { | ||
579 | dev->trans_start = jiffies; | 637 | dev->trans_start = jiffies; |
580 | dev_watchdog_up(dev); | 638 | dev_watchdog_up(dev); |
581 | } | 639 | } |
582 | spin_unlock_bh(&txq->lock); | ||
583 | } | 640 | } |
584 | 641 | ||
585 | static void dev_deactivate_queue(struct netdev_queue *dev_queue, | 642 | static void dev_deactivate_queue(struct net_device *dev, |
586 | struct Qdisc *qdisc_default) | 643 | struct netdev_queue *dev_queue, |
644 | void *_qdisc_default) | ||
587 | { | 645 | { |
646 | struct Qdisc *qdisc_default = _qdisc_default; | ||
588 | struct Qdisc *qdisc; | 647 | struct Qdisc *qdisc; |
589 | struct sk_buff *skb; | 648 | struct sk_buff *skb; |
590 | 649 | ||
@@ -603,12 +662,35 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue, | |||
603 | kfree_skb(skb); | 662 | kfree_skb(skb); |
604 | } | 663 | } |
605 | 664 | ||
665 | static bool some_qdisc_is_running(struct net_device *dev, int lock) | ||
666 | { | ||
667 | unsigned int i; | ||
668 | |||
669 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
670 | struct netdev_queue *dev_queue; | ||
671 | int val; | ||
672 | |||
673 | dev_queue = netdev_get_tx_queue(dev, i); | ||
674 | |||
675 | if (lock) | ||
676 | spin_lock_bh(&dev_queue->lock); | ||
677 | |||
678 | val = test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state); | ||
679 | |||
680 | if (lock) | ||
681 | spin_unlock_bh(&dev_queue->lock); | ||
682 | |||
683 | if (val) | ||
684 | return true; | ||
685 | } | ||
686 | return false; | ||
687 | } | ||
688 | |||
606 | void dev_deactivate(struct net_device *dev) | 689 | void dev_deactivate(struct net_device *dev) |
607 | { | 690 | { |
608 | struct netdev_queue *dev_queue = &dev->tx_queue; | 691 | bool running; |
609 | int running; | ||
610 | 692 | ||
611 | dev_deactivate_queue(dev_queue, &noop_qdisc); | 693 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); |
612 | 694 | ||
613 | dev_watchdog_down(dev); | 695 | dev_watchdog_down(dev); |
614 | 696 | ||
@@ -617,17 +699,14 @@ void dev_deactivate(struct net_device *dev) | |||
617 | 699 | ||
618 | /* Wait for outstanding qdisc_run calls. */ | 700 | /* Wait for outstanding qdisc_run calls. */ |
619 | do { | 701 | do { |
620 | while (test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state)) | 702 | while (some_qdisc_is_running(dev, 0)) |
621 | yield(); | 703 | yield(); |
622 | 704 | ||
623 | /* | 705 | /* |
624 | * Double-check inside queue lock to ensure that all effects | 706 | * Double-check inside queue lock to ensure that all effects |
625 | * of the queue run are visible when we return. | 707 | * of the queue run are visible when we return. |
626 | */ | 708 | */ |
627 | spin_lock_bh(&dev_queue->lock); | 709 | running = some_qdisc_is_running(dev, 1); |
628 | running = test_bit(__QUEUE_STATE_QDISC_RUNNING, | ||
629 | &dev_queue->state); | ||
630 | spin_unlock_bh(&dev_queue->lock); | ||
631 | 710 | ||
632 | /* | 711 | /* |
633 | * The running flag should never be set at this point because | 712 | * The running flag should never be set at this point because |
@@ -642,8 +721,10 @@ void dev_deactivate(struct net_device *dev) | |||
642 | 721 | ||
643 | static void dev_init_scheduler_queue(struct net_device *dev, | 722 | static void dev_init_scheduler_queue(struct net_device *dev, |
644 | struct netdev_queue *dev_queue, | 723 | struct netdev_queue *dev_queue, |
645 | struct Qdisc *qdisc) | 724 | void *_qdisc) |
646 | { | 725 | { |
726 | struct Qdisc *qdisc = _qdisc; | ||
727 | |||
647 | dev_queue->qdisc = qdisc; | 728 | dev_queue->qdisc = qdisc; |
648 | dev_queue->qdisc_sleeping = qdisc; | 729 | dev_queue->qdisc_sleeping = qdisc; |
649 | INIT_LIST_HEAD(&dev_queue->qdisc_list); | 730 | INIT_LIST_HEAD(&dev_queue->qdisc_list); |
@@ -652,18 +733,19 @@ static void dev_init_scheduler_queue(struct net_device *dev, | |||
652 | void dev_init_scheduler(struct net_device *dev) | 733 | void dev_init_scheduler(struct net_device *dev) |
653 | { | 734 | { |
654 | qdisc_lock_tree(dev); | 735 | qdisc_lock_tree(dev); |
655 | dev_init_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); | 736 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
656 | dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); | 737 | dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); |
657 | qdisc_unlock_tree(dev); | 738 | qdisc_unlock_tree(dev); |
658 | 739 | ||
659 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); | 740 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); |
660 | } | 741 | } |
661 | 742 | ||
662 | static void dev_shutdown_scheduler_queue(struct net_device *dev, | 743 | static void shutdown_scheduler_queue(struct net_device *dev, |
663 | struct netdev_queue *dev_queue, | 744 | struct netdev_queue *dev_queue, |
664 | struct Qdisc *qdisc_default) | 745 | void *_qdisc_default) |
665 | { | 746 | { |
666 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; | 747 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
748 | struct Qdisc *qdisc_default = _qdisc_default; | ||
667 | 749 | ||
668 | if (qdisc) { | 750 | if (qdisc) { |
669 | dev_queue->qdisc = qdisc_default; | 751 | dev_queue->qdisc = qdisc_default; |
@@ -676,8 +758,8 @@ static void dev_shutdown_scheduler_queue(struct net_device *dev, | |||
676 | void dev_shutdown(struct net_device *dev) | 758 | void dev_shutdown(struct net_device *dev) |
677 | { | 759 | { |
678 | qdisc_lock_tree(dev); | 760 | qdisc_lock_tree(dev); |
679 | dev_shutdown_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); | 761 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
680 | dev_shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); | 762 | shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); |
681 | BUG_TRAP(!timer_pending(&dev->watchdog_timer)); | 763 | BUG_TRAP(!timer_pending(&dev->watchdog_timer)); |
682 | qdisc_unlock_tree(dev); | 764 | qdisc_unlock_tree(dev); |
683 | } | 765 | } |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 8ac05981be20..44a2c3451f4d 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -111,7 +111,7 @@ teql_dequeue(struct Qdisc* sch) | |||
111 | struct sk_buff *skb; | 111 | struct sk_buff *skb; |
112 | 112 | ||
113 | skb = __skb_dequeue(&dat->q); | 113 | skb = __skb_dequeue(&dat->q); |
114 | dat_queue = &dat->m->dev->tx_queue; | 114 | dat_queue = netdev_get_tx_queue(dat->m->dev, 0); |
115 | if (skb == NULL) { | 115 | if (skb == NULL) { |
116 | struct net_device *m = qdisc_dev(dat_queue->qdisc); | 116 | struct net_device *m = qdisc_dev(dat_queue->qdisc); |
117 | if (m) { | 117 | if (m) { |
@@ -155,10 +155,13 @@ teql_destroy(struct Qdisc* sch) | |||
155 | if (q == master->slaves) { | 155 | if (q == master->slaves) { |
156 | master->slaves = NEXT_SLAVE(q); | 156 | master->slaves = NEXT_SLAVE(q); |
157 | if (q == master->slaves) { | 157 | if (q == master->slaves) { |
158 | struct netdev_queue *txq; | ||
159 | |||
160 | txq = netdev_get_tx_queue(master->dev, 0); | ||
158 | master->slaves = NULL; | 161 | master->slaves = NULL; |
159 | spin_lock_bh(&master->dev->tx_queue.lock); | 162 | spin_lock_bh(&txq->lock); |
160 | qdisc_reset(master->dev->tx_queue.qdisc); | 163 | qdisc_reset(txq->qdisc); |
161 | spin_unlock_bh(&master->dev->tx_queue.lock); | 164 | spin_unlock_bh(&txq->lock); |
162 | } | 165 | } |
163 | } | 166 | } |
164 | skb_queue_purge(&dat->q); | 167 | skb_queue_purge(&dat->q); |
@@ -218,7 +221,8 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) | |||
218 | static int | 221 | static int |
219 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) | 222 | __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) |
220 | { | 223 | { |
221 | struct teql_sched_data *q = qdisc_priv(dev->tx_queue.qdisc); | 224 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); |
225 | struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); | ||
222 | struct neighbour *mn = skb->dst->neighbour; | 226 | struct neighbour *mn = skb->dst->neighbour; |
223 | struct neighbour *n = q->ncache; | 227 | struct neighbour *n = q->ncache; |
224 | 228 | ||
@@ -254,7 +258,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * | |||
254 | static inline int teql_resolve(struct sk_buff *skb, | 258 | static inline int teql_resolve(struct sk_buff *skb, |
255 | struct sk_buff *skb_res, struct net_device *dev) | 259 | struct sk_buff *skb_res, struct net_device *dev) |
256 | { | 260 | { |
257 | if (dev->tx_queue.qdisc == &noop_qdisc) | 261 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); |
262 | if (txq->qdisc == &noop_qdisc) | ||
258 | return -ENODEV; | 263 | return -ENODEV; |
259 | 264 | ||
260 | if (dev->header_ops == NULL || | 265 | if (dev->header_ops == NULL || |
@@ -285,8 +290,10 @@ restart: | |||
285 | 290 | ||
286 | do { | 291 | do { |
287 | struct net_device *slave = qdisc_dev(q); | 292 | struct net_device *slave = qdisc_dev(q); |
293 | struct netdev_queue *slave_txq; | ||
288 | 294 | ||
289 | if (slave->tx_queue.qdisc_sleeping != q) | 295 | slave_txq = netdev_get_tx_queue(slave, 0); |
296 | if (slave_txq->qdisc_sleeping != q) | ||
290 | continue; | 297 | continue; |
291 | if (netif_queue_stopped(slave) || | 298 | if (netif_queue_stopped(slave) || |
292 | __netif_subqueue_stopped(slave, subq) || | 299 | __netif_subqueue_stopped(slave, subq) || |