summaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-08-18 17:55:22 -0400
committerJens Axboe <axboe@fb.com>2015-08-18 18:49:17 -0400
commit24bdb8ef068ebdc2a57ce715f0ab22d5da32832a (patch)
tree466def9b935d1a40ce63cd07607e4a7be551f6bd /block/blk-throttle.c
parente6269c44546755094979ab53609e6e203a68c8ff (diff)
blkcg: make blkcg_[rw]stat per-cpu
blkcg_[rw]stat are used as stat counters for blkcg policies. It isn't per-cpu by itself and blk-throttle makes it per-cpu by wrapping around it. This patch makes blkcg_[rw]stat per-cpu and drop the ad-hoc per-cpu wrapping in blk-throttle. * blkg_[rw]stat->cnt is replaced with cpu_cnt which is struct percpu_counter. This makes syncp unnecessary as remote accesses are handled by percpu_counter itself. * blkg_[rw]stat_init() can now fail due to percpu allocation failure and thus are updated to return int. * percpu_counters need explicit freeing. blkg_[rw]stat_exit() added. * As blkg_rwstat->cpu_cnt[] can't be read directly anymore, reading and summing results are stored in ->aux_cnt[] instead. * Custom per-cpu stat implementation in blk-throttle is removed. This makes all blkcg stat counters per-cpu without complicating policy implmentations. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c89
1 files changed, 28 insertions, 61 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 29c22ed4b073..c0b2263a222a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -83,14 +83,6 @@ enum tg_state_flags {
83 83
84#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 84#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
85 85
86/* Per-cpu group stats */
87struct tg_stats_cpu {
88 /* total bytes transferred */
89 struct blkg_rwstat service_bytes;
90 /* total IOs serviced, post merge */
91 struct blkg_rwstat serviced;
92};
93
94struct throtl_grp { 86struct throtl_grp {
95 /* must be the first member */ 87 /* must be the first member */
96 struct blkg_policy_data pd; 88 struct blkg_policy_data pd;
@@ -142,8 +134,10 @@ struct throtl_grp {
142 unsigned long slice_start[2]; 134 unsigned long slice_start[2];
143 unsigned long slice_end[2]; 135 unsigned long slice_end[2];
144 136
145 /* Per cpu stats pointer */ 137 /* total bytes transferred */
146 struct tg_stats_cpu __percpu *stats_cpu; 138 struct blkg_rwstat service_bytes;
139 /* total IOs serviced, post merge */
140 struct blkg_rwstat serviced;
147}; 141};
148 142
149struct throtl_data 143struct throtl_data
@@ -337,17 +331,15 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq)
337static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node) 331static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
338{ 332{
339 struct throtl_grp *tg; 333 struct throtl_grp *tg;
340 int rw, cpu; 334 int rw;
341 335
342 tg = kzalloc_node(sizeof(*tg), gfp, node); 336 tg = kzalloc_node(sizeof(*tg), gfp, node);
343 if (!tg) 337 if (!tg)
344 return NULL; 338 goto err;
345 339
346 tg->stats_cpu = alloc_percpu_gfp(struct tg_stats_cpu, gfp); 340 if (blkg_rwstat_init(&tg->service_bytes, gfp) ||
347 if (!tg->stats_cpu) { 341 blkg_rwstat_init(&tg->serviced, gfp))
348 kfree(tg); 342 goto err_free_tg;
349 return NULL;
350 }
351 343
352 throtl_service_queue_init(&tg->service_queue); 344 throtl_service_queue_init(&tg->service_queue);
353 345
@@ -362,14 +354,14 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
362 tg->iops[READ] = -1; 354 tg->iops[READ] = -1;
363 tg->iops[WRITE] = -1; 355 tg->iops[WRITE] = -1;
364 356
365 for_each_possible_cpu(cpu) {
366 struct tg_stats_cpu *stats_cpu = per_cpu_ptr(tg->stats_cpu, cpu);
367
368 blkg_rwstat_init(&stats_cpu->service_bytes);
369 blkg_rwstat_init(&stats_cpu->serviced);
370 }
371
372 return &tg->pd; 357 return &tg->pd;
358
359err_free_tg:
360 blkg_rwstat_exit(&tg->serviced);
361 blkg_rwstat_exit(&tg->service_bytes);
362 kfree(tg);
363err:
364 return NULL;
373} 365}
374 366
375static void throtl_pd_init(struct blkg_policy_data *pd) 367static void throtl_pd_init(struct blkg_policy_data *pd)
@@ -427,21 +419,17 @@ static void throtl_pd_free(struct blkg_policy_data *pd)
427 struct throtl_grp *tg = pd_to_tg(pd); 419 struct throtl_grp *tg = pd_to_tg(pd);
428 420
429 del_timer_sync(&tg->service_queue.pending_timer); 421 del_timer_sync(&tg->service_queue.pending_timer);
430 free_percpu(tg->stats_cpu); 422 blkg_rwstat_exit(&tg->serviced);
423 blkg_rwstat_exit(&tg->service_bytes);
431 kfree(tg); 424 kfree(tg);
432} 425}
433 426
434static void throtl_pd_reset_stats(struct blkg_policy_data *pd) 427static void throtl_pd_reset_stats(struct blkg_policy_data *pd)
435{ 428{
436 struct throtl_grp *tg = pd_to_tg(pd); 429 struct throtl_grp *tg = pd_to_tg(pd);
437 int cpu;
438 430
439 for_each_possible_cpu(cpu) { 431 blkg_rwstat_reset(&tg->service_bytes);
440 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); 432 blkg_rwstat_reset(&tg->serviced);
441
442 blkg_rwstat_reset(&sc->service_bytes);
443 blkg_rwstat_reset(&sc->serviced);
444 }
445} 433}
446 434
447static struct throtl_grp * 435static struct throtl_grp *
@@ -855,7 +843,6 @@ static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
855 int rw) 843 int rw)
856{ 844{
857 struct throtl_grp *tg = blkg_to_tg(blkg); 845 struct throtl_grp *tg = blkg_to_tg(blkg);
858 struct tg_stats_cpu *stats_cpu;
859 unsigned long flags; 846 unsigned long flags;
860 847
861 /* 848 /*
@@ -865,10 +852,8 @@ static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
865 */ 852 */
866 local_irq_save(flags); 853 local_irq_save(flags);
867 854
868 stats_cpu = this_cpu_ptr(tg->stats_cpu); 855 blkg_rwstat_add(&tg->serviced, rw, 1);
869 856 blkg_rwstat_add(&tg->service_bytes, rw, bytes);
870 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
871 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
872 857
873 local_irq_restore(flags); 858 local_irq_restore(flags);
874} 859}
@@ -1176,27 +1161,9 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1176 } 1161 }
1177} 1162}
1178 1163
1179static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, 1164static int tg_print_rwstat(struct seq_file *sf, void *v)
1180 struct blkg_policy_data *pd, int off)
1181{
1182 struct throtl_grp *tg = pd_to_tg(pd);
1183 struct blkg_rwstat rwstat = { }, tmp;
1184 int i, cpu;
1185
1186 for_each_possible_cpu(cpu) {
1187 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
1188
1189 tmp = blkg_rwstat_read((void *)sc + off);
1190 for (i = 0; i < BLKG_RWSTAT_NR; i++)
1191 rwstat.cnt[i] += tmp.cnt[i];
1192 }
1193
1194 return __blkg_prfill_rwstat(sf, pd, &rwstat);
1195}
1196
1197static int tg_print_cpu_rwstat(struct seq_file *sf, void *v)
1198{ 1165{
1199 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_cpu_rwstat, 1166 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1200 &blkcg_policy_throtl, seq_cft(sf)->private, true); 1167 &blkcg_policy_throtl, seq_cft(sf)->private, true);
1201 return 0; 1168 return 0;
1202} 1169}
@@ -1337,13 +1304,13 @@ static struct cftype throtl_files[] = {
1337 }, 1304 },
1338 { 1305 {
1339 .name = "throttle.io_service_bytes", 1306 .name = "throttle.io_service_bytes",
1340 .private = offsetof(struct tg_stats_cpu, service_bytes), 1307 .private = offsetof(struct throtl_grp, service_bytes),
1341 .seq_show = tg_print_cpu_rwstat, 1308 .seq_show = tg_print_rwstat,
1342 }, 1309 },
1343 { 1310 {
1344 .name = "throttle.io_serviced", 1311 .name = "throttle.io_serviced",
1345 .private = offsetof(struct tg_stats_cpu, serviced), 1312 .private = offsetof(struct throtl_grp, serviced),
1346 .seq_show = tg_print_cpu_rwstat, 1313 .seq_show = tg_print_rwstat,
1347 }, 1314 },
1348 { } /* terminate */ 1315 { } /* terminate */
1349}; 1316};