aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-17 03:34:19 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:00 -0400
commite8a0464cc950972824e2e128028ae3db666ec1ed (patch)
tree5022b95396c0f3b313531bc39b19543c03551b9a /net/sched/sch_generic.c
parent070825b3840a743e21ebcc44f8279708a4fed977 (diff)
netdev: Allocate multiple queues for TX.
alloc_netdev_mq() now allocates an array of netdev_queue structures for TX, based upon the queue_count argument. Furthermore, all accesses to the TX queues are now vectored through the netdev_get_tx_queue() and netdev_for_each_tx_queue() interfaces. This makes it easy to grep the tree for all things that want to get to a TX queue of a net device. Problem spots which are not really multiqueue aware yet, and only work with one queue, can easily be spotted by grepping for all netdev_get_tx_queue() calls that pass in a zero index. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c178
1 files changed, 130 insertions, 48 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 243de935b182..4e2b865cbba0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -40,20 +40,30 @@
40 */ 40 */
41 41
42void qdisc_lock_tree(struct net_device *dev) 42void qdisc_lock_tree(struct net_device *dev)
43 __acquires(dev->tx_queue.lock)
44 __acquires(dev->rx_queue.lock) 43 __acquires(dev->rx_queue.lock)
45{ 44{
46 spin_lock_bh(&dev->tx_queue.lock); 45 unsigned int i;
46
47 local_bh_disable();
48 for (i = 0; i < dev->num_tx_queues; i++) {
49 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
50 spin_lock(&txq->lock);
51 }
47 spin_lock(&dev->rx_queue.lock); 52 spin_lock(&dev->rx_queue.lock);
48} 53}
49EXPORT_SYMBOL(qdisc_lock_tree); 54EXPORT_SYMBOL(qdisc_lock_tree);
50 55
51void qdisc_unlock_tree(struct net_device *dev) 56void qdisc_unlock_tree(struct net_device *dev)
52 __releases(dev->rx_queue.lock) 57 __releases(dev->rx_queue.lock)
53 __releases(dev->tx_queue.lock)
54{ 58{
59 unsigned int i;
60
55 spin_unlock(&dev->rx_queue.lock); 61 spin_unlock(&dev->rx_queue.lock);
56 spin_unlock_bh(&dev->tx_queue.lock); 62 for (i = 0; i < dev->num_tx_queues; i++) {
63 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
64 spin_unlock(&txq->lock);
65 }
66 local_bh_enable();
57} 67}
58EXPORT_SYMBOL(qdisc_unlock_tree); 68EXPORT_SYMBOL(qdisc_unlock_tree);
59 69
@@ -212,22 +222,37 @@ void __qdisc_run(struct netdev_queue *txq)
212static void dev_watchdog(unsigned long arg) 222static void dev_watchdog(unsigned long arg)
213{ 223{
214 struct net_device *dev = (struct net_device *)arg; 224 struct net_device *dev = (struct net_device *)arg;
215 struct netdev_queue *txq = &dev->tx_queue;
216 225
217 netif_tx_lock(dev); 226 netif_tx_lock(dev);
218 if (txq->qdisc != &noop_qdisc) { 227 if (!qdisc_tx_is_noop(dev)) {
219 if (netif_device_present(dev) && 228 if (netif_device_present(dev) &&
220 netif_running(dev) && 229 netif_running(dev) &&
221 netif_carrier_ok(dev)) { 230 netif_carrier_ok(dev)) {
222 if (netif_queue_stopped(dev) && 231 int some_queue_stopped = 0;
223 time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { 232 unsigned int i;
233
234 for (i = 0; i < dev->num_tx_queues; i++) {
235 struct netdev_queue *txq;
236
237 txq = netdev_get_tx_queue(dev, i);
238 if (netif_tx_queue_stopped(txq)) {
239 some_queue_stopped = 1;
240 break;
241 }
242 }
224 243
225 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", 244 if (some_queue_stopped &&
245 time_after(jiffies, (dev->trans_start +
246 dev->watchdog_timeo))) {
247 printk(KERN_INFO "NETDEV WATCHDOG: %s: "
248 "transmit timed out\n",
226 dev->name); 249 dev->name);
227 dev->tx_timeout(dev); 250 dev->tx_timeout(dev);
228 WARN_ON_ONCE(1); 251 WARN_ON_ONCE(1);
229 } 252 }
230 if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) 253 if (!mod_timer(&dev->watchdog_timer,
254 round_jiffies(jiffies +
255 dev->watchdog_timeo)))
231 dev_hold(dev); 256 dev_hold(dev);
232 } 257 }
233 } 258 }
@@ -542,9 +567,55 @@ void qdisc_destroy(struct Qdisc *qdisc)
542} 567}
543EXPORT_SYMBOL(qdisc_destroy); 568EXPORT_SYMBOL(qdisc_destroy);
544 569
570static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
571{
572 unsigned int i;
573
574 for (i = 0; i < dev->num_tx_queues; i++) {
575 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
576
577 if (txq->qdisc_sleeping != &noop_qdisc)
578 return false;
579 }
580 return true;
581}
582
583static void attach_one_default_qdisc(struct net_device *dev,
584 struct netdev_queue *dev_queue,
585 void *_unused)
586{
587 struct Qdisc *qdisc;
588
589 if (dev->tx_queue_len) {
590 qdisc = qdisc_create_dflt(dev, dev_queue,
591 &pfifo_fast_ops, TC_H_ROOT);
592 if (!qdisc) {
593 printk(KERN_INFO "%s: activation failed\n", dev->name);
594 return;
595 }
596 list_add_tail(&qdisc->list, &dev_queue->qdisc_list);
597 } else {
598 qdisc = &noqueue_qdisc;
599 }
600 dev_queue->qdisc_sleeping = qdisc;
601}
602
603static void transition_one_qdisc(struct net_device *dev,
604 struct netdev_queue *dev_queue,
605 void *_need_watchdog)
606{
607 int *need_watchdog_p = _need_watchdog;
608
609 spin_lock_bh(&dev_queue->lock);
610 rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping);
611 if (dev_queue->qdisc != &noqueue_qdisc)
612 *need_watchdog_p = 1;
613 spin_unlock_bh(&dev_queue->lock);
614}
615
545void dev_activate(struct net_device *dev) 616void dev_activate(struct net_device *dev)
546{ 617{
547 struct netdev_queue *txq = &dev->tx_queue; 618 int need_watchdog;
548 619
549 /* No queueing discipline is attached to device; 620 /* No queueing discipline is attached to device;
550 create default one i.e. pfifo_fast for devices, 621 create default one i.e. pfifo_fast for devices,
@@ -552,39 +623,27 @@ void dev_activate(struct net_device *dev)
552 virtual interfaces 623 virtual interfaces
553 */ 624 */
554 625
555 if (txq->qdisc_sleeping == &noop_qdisc) { 626 if (dev_all_qdisc_sleeping_noop(dev))
556 struct Qdisc *qdisc; 627 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
557 if (dev->tx_queue_len) {
558 qdisc = qdisc_create_dflt(dev, txq,
559 &pfifo_fast_ops,
560 TC_H_ROOT);
561 if (qdisc == NULL) {
562 printk(KERN_INFO "%s: activation failed\n", dev->name);
563 return;
564 }
565 list_add_tail(&qdisc->list, &txq->qdisc_list);
566 } else {
567 qdisc = &noqueue_qdisc;
568 }
569 txq->qdisc_sleeping = qdisc;
570 }
571 628
572 if (!netif_carrier_ok(dev)) 629 if (!netif_carrier_ok(dev))
573 /* Delay activation until next carrier-on event */ 630 /* Delay activation until next carrier-on event */
574 return; 631 return;
575 632
576 spin_lock_bh(&txq->lock); 633 need_watchdog = 0;
577 rcu_assign_pointer(txq->qdisc, txq->qdisc_sleeping); 634 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
578 if (txq->qdisc != &noqueue_qdisc) { 635
636 if (need_watchdog) {
579 dev->trans_start = jiffies; 637 dev->trans_start = jiffies;
580 dev_watchdog_up(dev); 638 dev_watchdog_up(dev);
581 } 639 }
582 spin_unlock_bh(&txq->lock);
583} 640}
584 641
585static void dev_deactivate_queue(struct netdev_queue *dev_queue, 642static void dev_deactivate_queue(struct net_device *dev,
586 struct Qdisc *qdisc_default) 643 struct netdev_queue *dev_queue,
644 void *_qdisc_default)
587{ 645{
646 struct Qdisc *qdisc_default = _qdisc_default;
588 struct Qdisc *qdisc; 647 struct Qdisc *qdisc;
589 struct sk_buff *skb; 648 struct sk_buff *skb;
590 649
@@ -603,12 +662,35 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue,
603 kfree_skb(skb); 662 kfree_skb(skb);
604} 663}
605 664
665static bool some_qdisc_is_running(struct net_device *dev, int lock)
666{
667 unsigned int i;
668
669 for (i = 0; i < dev->num_tx_queues; i++) {
670 struct netdev_queue *dev_queue;
671 int val;
672
673 dev_queue = netdev_get_tx_queue(dev, i);
674
675 if (lock)
676 spin_lock_bh(&dev_queue->lock);
677
678 val = test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state);
679
680 if (lock)
681 spin_unlock_bh(&dev_queue->lock);
682
683 if (val)
684 return true;
685 }
686 return false;
687}
688
606void dev_deactivate(struct net_device *dev) 689void dev_deactivate(struct net_device *dev)
607{ 690{
608 struct netdev_queue *dev_queue = &dev->tx_queue; 691 bool running;
609 int running;
610 692
611 dev_deactivate_queue(dev_queue, &noop_qdisc); 693 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
612 694
613 dev_watchdog_down(dev); 695 dev_watchdog_down(dev);
614 696
@@ -617,17 +699,14 @@ void dev_deactivate(struct net_device *dev)
617 699
618 /* Wait for outstanding qdisc_run calls. */ 700 /* Wait for outstanding qdisc_run calls. */
619 do { 701 do {
620 while (test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state)) 702 while (some_qdisc_is_running(dev, 0))
621 yield(); 703 yield();
622 704
623 /* 705 /*
624 * Double-check inside queue lock to ensure that all effects 706 * Double-check inside queue lock to ensure that all effects
625 * of the queue run are visible when we return. 707 * of the queue run are visible when we return.
626 */ 708 */
627 spin_lock_bh(&dev_queue->lock); 709 running = some_qdisc_is_running(dev, 1);
628 running = test_bit(__QUEUE_STATE_QDISC_RUNNING,
629 &dev_queue->state);
630 spin_unlock_bh(&dev_queue->lock);
631 710
632 /* 711 /*
633 * The running flag should never be set at this point because 712 * The running flag should never be set at this point because
@@ -642,8 +721,10 @@ void dev_deactivate(struct net_device *dev)
642 721
643static void dev_init_scheduler_queue(struct net_device *dev, 722static void dev_init_scheduler_queue(struct net_device *dev,
644 struct netdev_queue *dev_queue, 723 struct netdev_queue *dev_queue,
645 struct Qdisc *qdisc) 724 void *_qdisc)
646{ 725{
726 struct Qdisc *qdisc = _qdisc;
727
647 dev_queue->qdisc = qdisc; 728 dev_queue->qdisc = qdisc;
648 dev_queue->qdisc_sleeping = qdisc; 729 dev_queue->qdisc_sleeping = qdisc;
649 INIT_LIST_HEAD(&dev_queue->qdisc_list); 730 INIT_LIST_HEAD(&dev_queue->qdisc_list);
@@ -652,18 +733,19 @@ static void dev_init_scheduler_queue(struct net_device *dev,
652void dev_init_scheduler(struct net_device *dev) 733void dev_init_scheduler(struct net_device *dev)
653{ 734{
654 qdisc_lock_tree(dev); 735 qdisc_lock_tree(dev);
655 dev_init_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); 736 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
656 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); 737 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
657 qdisc_unlock_tree(dev); 738 qdisc_unlock_tree(dev);
658 739
659 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 740 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
660} 741}
661 742
662static void dev_shutdown_scheduler_queue(struct net_device *dev, 743static void shutdown_scheduler_queue(struct net_device *dev,
663 struct netdev_queue *dev_queue, 744 struct netdev_queue *dev_queue,
664 struct Qdisc *qdisc_default) 745 void *_qdisc_default)
665{ 746{
666 struct Qdisc *qdisc = dev_queue->qdisc_sleeping; 747 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
748 struct Qdisc *qdisc_default = _qdisc_default;
667 749
668 if (qdisc) { 750 if (qdisc) {
669 dev_queue->qdisc = qdisc_default; 751 dev_queue->qdisc = qdisc_default;
@@ -676,8 +758,8 @@ static void dev_shutdown_scheduler_queue(struct net_device *dev,
676void dev_shutdown(struct net_device *dev) 758void dev_shutdown(struct net_device *dev)
677{ 759{
678 qdisc_lock_tree(dev); 760 qdisc_lock_tree(dev);
679 dev_shutdown_scheduler_queue(dev, &dev->tx_queue, &noop_qdisc); 761 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
680 dev_shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); 762 shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
681 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 763 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
682 qdisc_unlock_tree(dev); 764 qdisc_unlock_tree(dev);
683} 765}