aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-03-11 23:54:56 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-11 23:54:56 -0400
commit62cf4be9899dafe0fd84cbdf5352a01d8f1ea326 (patch)
tree64becbfd442aed1f89f4058609eaa042c8e6f417
parenta19a7ec8fc8eb32113efeaff2a1ceca273726e9b (diff)
parentfba373d2bb267eaeba85579dd04b91435df8c83b (diff)
Merge branch 'pkt_sched_cond_resched'
Eric Dumazet says: ==================== pkt_sched: allow scheduling points We have seen delays of more than 50ms in class or qdisc dumps, in case device is under high TX stress, even with the prior 4KB per skb limit. With the new 16KB limit, this could translate to 200ms delays. Add cond_resched() to give a chance to higher prio tasks to get cpu. But before doing so, we need to remove the rcu locking from tc_dump_qdisc() as David spotted. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/sch_api.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 1313145e3b86..0a99d7ced71e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1303,6 +1303,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1303 struct gnet_dump d; 1303 struct gnet_dump d;
1304 struct qdisc_size_table *stab; 1304 struct qdisc_size_table *stab;
1305 1305
1306 cond_resched();
1306 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1307 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1307 if (!nlh) 1308 if (!nlh)
1308 goto out_nlmsg_trim; 1309 goto out_nlmsg_trim;
@@ -1434,9 +1435,9 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1434 s_idx = cb->args[0]; 1435 s_idx = cb->args[0];
1435 s_q_idx = q_idx = cb->args[1]; 1436 s_q_idx = q_idx = cb->args[1];
1436 1437
1437 rcu_read_lock();
1438 idx = 0; 1438 idx = 0;
1439 for_each_netdev_rcu(net, dev) { 1439 ASSERT_RTNL();
1440 for_each_netdev(net, dev) {
1440 struct netdev_queue *dev_queue; 1441 struct netdev_queue *dev_queue;
1441 1442
1442 if (idx < s_idx) 1443 if (idx < s_idx)
@@ -1459,8 +1460,6 @@ cont:
1459 } 1460 }
1460 1461
1461done: 1462done:
1462 rcu_read_unlock();
1463
1464 cb->args[0] = idx; 1463 cb->args[0] = idx;
1465 cb->args[1] = q_idx; 1464 cb->args[1] = q_idx;
1466 1465
@@ -1617,6 +1616,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1617 struct gnet_dump d; 1616 struct gnet_dump d;
1618 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; 1617 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1619 1618
1619 cond_resched();
1620 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1620 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1621 if (!nlh) 1621 if (!nlh)
1622 goto out_nlmsg_trim; 1622 goto out_nlmsg_trim;