aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-10-02 02:11:55 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-05 03:23:44 -0400
commit24824a09e35402b8d58dcc5be803a5ad3937bdba (patch)
tree65c5fa4046646623b130702c9abc92c485ec575b /net/sched/sch_generic.c
parent0bd9e6a964d86a19f54a9ba31168a37d64e451d1 (diff)
net: dynamic ingress_queue allocation
ingress being not used very much, and net_device->ingress_queue being quite a big object (128 or 256 bytes), use a dynamic allocation if needed (tc qdisc add dev eth0 ingress ...) dev_ingress_queue(dev) helper should be used only with RTNL taken. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 545278a1c478..3d57681bdb76 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -753,7 +753,8 @@ void dev_activate(struct net_device *dev)
753 753
754 need_watchdog = 0; 754 need_watchdog = 0;
755 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 755 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
756 transition_one_qdisc(dev, &dev->ingress_queue, NULL); 756 if (dev_ingress_queue(dev))
757 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
757 758
758 if (need_watchdog) { 759 if (need_watchdog) {
759 dev->trans_start = jiffies; 760 dev->trans_start = jiffies;
@@ -812,7 +813,8 @@ static bool some_qdisc_is_busy(struct net_device *dev)
812void dev_deactivate(struct net_device *dev) 813void dev_deactivate(struct net_device *dev)
813{ 814{
814 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 815 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
815 dev_deactivate_queue(dev, &dev->ingress_queue, &noop_qdisc); 816 if (dev_ingress_queue(dev))
817 dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
816 818
817 dev_watchdog_down(dev); 819 dev_watchdog_down(dev);
818 820
@@ -838,7 +840,8 @@ void dev_init_scheduler(struct net_device *dev)
838{ 840{
839 dev->qdisc = &noop_qdisc; 841 dev->qdisc = &noop_qdisc;
840 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 842 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
841 dev_init_scheduler_queue(dev, &dev->ingress_queue, &noop_qdisc); 843 if (dev_ingress_queue(dev))
844 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
842 845
843 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 846 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
844} 847}
@@ -861,7 +864,8 @@ static void shutdown_scheduler_queue(struct net_device *dev,
861void dev_shutdown(struct net_device *dev) 864void dev_shutdown(struct net_device *dev)
862{ 865{
863 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 866 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
864 shutdown_scheduler_queue(dev, &dev->ingress_queue, &noop_qdisc); 867 if (dev_ingress_queue(dev))
868 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
865 qdisc_destroy(dev->qdisc); 869 qdisc_destroy(dev->qdisc);
866 dev->qdisc = &noop_qdisc; 870 dev->qdisc = &noop_qdisc;
867 871