diff options
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 2aeb3a4386a1..5dbb3cd96e59 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -383,6 +383,7 @@ struct Qdisc noop_qdisc = { | |||
383 | .list = LIST_HEAD_INIT(noop_qdisc.list), | 383 | .list = LIST_HEAD_INIT(noop_qdisc.list), |
384 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), | 384 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), |
385 | .dev_queue = &noop_netdev_queue, | 385 | .dev_queue = &noop_netdev_queue, |
386 | .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), | ||
386 | }; | 387 | }; |
387 | EXPORT_SYMBOL(noop_qdisc); | 388 | EXPORT_SYMBOL(noop_qdisc); |
388 | 389 | ||
@@ -409,6 +410,7 @@ static struct Qdisc noqueue_qdisc = { | |||
409 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), | 410 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), |
410 | .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), | 411 | .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), |
411 | .dev_queue = &noqueue_netdev_queue, | 412 | .dev_queue = &noqueue_netdev_queue, |
413 | .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock), | ||
412 | }; | 414 | }; |
413 | 415 | ||
414 | 416 | ||
@@ -574,10 +576,8 @@ errout: | |||
574 | return ERR_PTR(err); | 576 | return ERR_PTR(err); |
575 | } | 577 | } |
576 | 578 | ||
577 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, | 579 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
578 | struct netdev_queue *dev_queue, | 580 | struct Qdisc_ops *ops, unsigned int parentid) |
579 | struct Qdisc_ops *ops, | ||
580 | unsigned int parentid) | ||
581 | { | 581 | { |
582 | struct Qdisc *sch; | 582 | struct Qdisc *sch; |
583 | 583 | ||
@@ -682,7 +682,7 @@ static void attach_one_default_qdisc(struct net_device *dev, | |||
682 | struct Qdisc *qdisc; | 682 | struct Qdisc *qdisc; |
683 | 683 | ||
684 | if (dev->tx_queue_len) { | 684 | if (dev->tx_queue_len) { |
685 | qdisc = qdisc_create_dflt(dev, dev_queue, | 685 | qdisc = qdisc_create_dflt(dev_queue, |
686 | &pfifo_fast_ops, TC_H_ROOT); | 686 | &pfifo_fast_ops, TC_H_ROOT); |
687 | if (!qdisc) { | 687 | if (!qdisc) { |
688 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 688 | printk(KERN_INFO "%s: activation failed\n", dev->name); |
@@ -709,7 +709,7 @@ static void attach_default_qdiscs(struct net_device *dev) | |||
709 | dev->qdisc = txq->qdisc_sleeping; | 709 | dev->qdisc = txq->qdisc_sleeping; |
710 | atomic_inc(&dev->qdisc->refcnt); | 710 | atomic_inc(&dev->qdisc->refcnt); |
711 | } else { | 711 | } else { |
712 | qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT); | 712 | qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); |
713 | if (qdisc) { | 713 | if (qdisc) { |
714 | qdisc->ops->attach(qdisc); | 714 | qdisc->ops->attach(qdisc); |
715 | dev->qdisc = qdisc; | 715 | dev->qdisc = qdisc; |
@@ -753,7 +753,8 @@ void dev_activate(struct net_device *dev) | |||
753 | 753 | ||
754 | need_watchdog = 0; | 754 | need_watchdog = 0; |
755 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); | 755 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); |
756 | transition_one_qdisc(dev, &dev->rx_queue, NULL); | 756 | if (dev_ingress_queue(dev)) |
757 | transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); | ||
757 | 758 | ||
758 | if (need_watchdog) { | 759 | if (need_watchdog) { |
759 | dev->trans_start = jiffies; | 760 | dev->trans_start = jiffies; |
@@ -812,7 +813,8 @@ static bool some_qdisc_is_busy(struct net_device *dev) | |||
812 | void dev_deactivate(struct net_device *dev) | 813 | void dev_deactivate(struct net_device *dev) |
813 | { | 814 | { |
814 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); | 815 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); |
815 | dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); | 816 | if (dev_ingress_queue(dev)) |
817 | dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | ||
816 | 818 | ||
817 | dev_watchdog_down(dev); | 819 | dev_watchdog_down(dev); |
818 | 820 | ||
@@ -838,7 +840,8 @@ void dev_init_scheduler(struct net_device *dev) | |||
838 | { | 840 | { |
839 | dev->qdisc = &noop_qdisc; | 841 | dev->qdisc = &noop_qdisc; |
840 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); | 842 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
841 | dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); | 843 | if (dev_ingress_queue(dev)) |
844 | dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | ||
842 | 845 | ||
843 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); | 846 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); |
844 | } | 847 | } |
@@ -861,7 +864,8 @@ static void shutdown_scheduler_queue(struct net_device *dev, | |||
861 | void dev_shutdown(struct net_device *dev) | 864 | void dev_shutdown(struct net_device *dev) |
862 | { | 865 | { |
863 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); | 866 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
864 | shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); | 867 | if (dev_ingress_queue(dev)) |
868 | shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | ||
865 | qdisc_destroy(dev->qdisc); | 869 | qdisc_destroy(dev->qdisc); |
866 | dev->qdisc = &noop_qdisc; | 870 | dev->qdisc = &noop_qdisc; |
867 | 871 | ||