aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-05-19 19:42:09 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-22 21:01:20 -0400
commit3137663dfb43bb3e3174e9da81db0c05f395fc1b (patch)
treed3a09e8eb8655215c9a730da6798e9623f7f0d03 /net/sched/sch_generic.c
parent6df427fe8c481d3be437cbe8bd366bdac82b73c4 (diff)
net: avoid synchronize_rcu() in dev_deactivate_many
dev_deactivate_many() issues one synchronize_rcu() call after qdiscs set to noop_qdisc. This call is here to make sure they are no outstanding qdisc-less dev_queue_xmit calls before returning to caller. But in dismantle phase, we dont have to wait, because we wont activate again the device, and we are going to wait one rcu grace period later in rollback_registered_many(). After this patch, device dismantle uses one synchronize_net() and one rcu_barrier() call only, so we have a ~30% speedup and a smaller RTNL latency. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: Patrick McHardy <kaber@trash.net>, CC: Ben Greear <greearb@candelatech.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c84b65920d1b..b1721d71c27c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -815,9 +815,17 @@ static bool some_qdisc_is_busy(struct net_device *dev)
815 return false; 815 return false;
816} 816}
817 817
818/**
819 * dev_deactivate_many - deactivate transmissions on several devices
820 * @head: list of devices to deactivate
821 *
822 * This function returns only when all outstanding transmissions
823 * have completed, unless all devices are in dismantle phase.
824 */
818void dev_deactivate_many(struct list_head *head) 825void dev_deactivate_many(struct list_head *head)
819{ 826{
820 struct net_device *dev; 827 struct net_device *dev;
828 bool sync_needed = false;
821 829
822 list_for_each_entry(dev, head, unreg_list) { 830 list_for_each_entry(dev, head, unreg_list) {
823 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 831 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
@@ -827,10 +835,15 @@ void dev_deactivate_many(struct list_head *head)
827 &noop_qdisc); 835 &noop_qdisc);
828 836
829 dev_watchdog_down(dev); 837 dev_watchdog_down(dev);
838 sync_needed |= !dev->dismantle;
830 } 839 }
831 840
832 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 841 /* Wait for outstanding qdisc-less dev_queue_xmit calls.
833 synchronize_rcu(); 842 * This is avoided if all devices are in dismantle phase :
843 * Caller will call synchronize_net() for us
844 */
845 if (sync_needed)
846 synchronize_net();
834 847
835 /* Wait for outstanding qdisc_run calls. */ 848 /* Wait for outstanding qdisc_run calls. */
836 list_for_each_entry(dev, head, unreg_list) 849 list_for_each_entry(dev, head, unreg_list)