aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2014-09-28 14:52:56 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-30 01:02:26 -0400
commit22e0f8b9322cb1a48b1357e8f4ae6f5a9eca8cfa (patch)
tree2c9ef18dca9d9a441d92ea57cf7f7a292f4ceb3f /net/sched/sch_htb.c
parent79cf79abce71eb7dbc40e2f3121048ca5405cb47 (diff)
net: sched: make bstats per cpu and estimator RCU safe
In order to run qdisc's without locking statistics and estimators need to be handled correctly. To resolve bstats make the statistics per cpu. And because this is only needed for qdiscs that are running without locks which is not the case for most qdiscs in the near future only create percpu stats when qdiscs set the TCQ_F_CPUSTATS flag. Next because estimators use the bstats to calculate packets per second and bytes per second the estimator code paths are updated to use the per cpu statistics. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 063e953d9848..0256dee69bd6 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1144,7 +1144,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1144 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); 1144 cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
1145 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); 1145 cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
1146 1146
1147 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 1147 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
1148 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || 1148 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1149 gnet_stats_copy_queue(d, &cl->qstats) < 0) 1149 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1150 return -1; 1150 return -1;
@@ -1402,7 +1402,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1402 goto failure; 1402 goto failure;
1403 1403
1404 if (htb_rate_est || tca[TCA_RATE]) { 1404 if (htb_rate_est || tca[TCA_RATE]) {
1405 err = gen_new_estimator(&cl->bstats, &cl->rate_est, 1405 err = gen_new_estimator(&cl->bstats, NULL,
1406 &cl->rate_est,
1406 qdisc_root_sleeping_lock(sch), 1407 qdisc_root_sleeping_lock(sch),
1407 tca[TCA_RATE] ? : &est.nla); 1408 tca[TCA_RATE] ? : &est.nla);
1408 if (err) { 1409 if (err) {
@@ -1464,8 +1465,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1464 parent->children++; 1465 parent->children++;
1465 } else { 1466 } else {
1466 if (tca[TCA_RATE]) { 1467 if (tca[TCA_RATE]) {
1467 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 1468 spinlock_t *lock = qdisc_root_sleeping_lock(sch);
1468 qdisc_root_sleeping_lock(sch), 1469
1470 err = gen_replace_estimator(&cl->bstats, NULL,
1471 &cl->rate_est,
1472 lock,
1469 tca[TCA_RATE]); 1473 tca[TCA_RATE]);
1470 if (err) 1474 if (err)
1471 return err; 1475 return err;