diff options
| author | Eric Dumazet <edumazet@google.com> | 2014-03-10 20:11:42 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2014-03-11 23:54:23 -0400 |
| commit | 15dc36ebbbea7da35fff2c51b620c8333fc87528 (patch) | |
| tree | 8da1869274a4800b33ec0666668b9a474f34f3c6 | |
| parent | a19a7ec8fc8eb32113efeaff2a1ceca273726e9b (diff) | |
pkt_sched: do not use rcu in tc_dump_qdisc()
Like all rtnetlink dump operations, we hold RTNL in tc_dump_qdisc(),
so we do not need to use rcu protection to protect list of netdevices.
This will allow preemption to occur, thus reducing latencies.
Following patch adds explicit cond_resched() calls.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | net/sched/sch_api.c | 6 |
1 files changed, 2 insertions, 4 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 1313145e3b86..272292efa7f0 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -1434,9 +1434,9 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1434 | s_idx = cb->args[0]; | 1434 | s_idx = cb->args[0]; |
| 1435 | s_q_idx = q_idx = cb->args[1]; | 1435 | s_q_idx = q_idx = cb->args[1]; |
| 1436 | 1436 | ||
| 1437 | rcu_read_lock(); | ||
| 1438 | idx = 0; | 1437 | idx = 0; |
| 1439 | for_each_netdev_rcu(net, dev) { | 1438 | ASSERT_RTNL(); |
| 1439 | for_each_netdev(net, dev) { | ||
| 1440 | struct netdev_queue *dev_queue; | 1440 | struct netdev_queue *dev_queue; |
| 1441 | 1441 | ||
| 1442 | if (idx < s_idx) | 1442 | if (idx < s_idx) |
| @@ -1459,8 +1459,6 @@ cont: | |||
| 1459 | } | 1459 | } |
| 1460 | 1460 | ||
| 1461 | done: | 1461 | done: |
| 1462 | rcu_read_unlock(); | ||
| 1463 | |||
| 1464 | cb->args[0] = idx; | 1462 | cb->args[0] = idx; |
| 1465 | cb->args[1] = q_idx; | 1463 | cb->args[1] = q_idx; |
| 1466 | 1464 | ||
