aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-08-18 17:55:24 -0400
committerJens Axboe <axboe@fb.com>2015-08-18 18:49:17 -0400
commit77ea733884eb5520f22c36def1309fe2ab61633e (patch)
tree125f3d034cd5c7e57425789f8c23a66368643841 /block/blk-cgroup.c
parentf12c74cab1635d67077ce8cc40da88b57980f637 (diff)
blkcg: move io_service_bytes and io_serviced stats into blkcg_gq
Currently, both cfq-iosched and blk-throttle keep track of io_service_bytes and io_serviced stats. While keeping track of them separately may be useful during development, it doesn't make much sense otherwise. Also, blk-throttle was counting bio's as IOs while cfq-iosched request's, which is more confusing than informative. This patch adds ->stat_bytes and ->stat_ios to blkg (blkcg_gq), removes the counterparts from cfq-iosched and blk-throttle and let them print from the common blkg counters. The common counters are incremented during bio issue in blkcg_bio_issue_check(). The outputs are still filtered by whether the policy has blkg_policy_data on a given blkg, so cfq's output won't show up if it has never been used for a given blkg. The only times when the outputs would differ significantly are when policies are attached on the fly or elevators are switched back and forth. Those are quite exceptional operations and I don't think they warrant keeping separate counters. v3: Update blkio-controller.txt accordingly. v2: Account IOs during bio issues instead of request completions so that bio-based drivers can be handled the same way. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c98
1 files changed, 98 insertions, 0 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b26320720a3c..a25263ca39ca 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -73,6 +73,9 @@ static void blkg_free(struct blkcg_gq *blkg)
73 73
74 if (blkg->blkcg != &blkcg_root) 74 if (blkg->blkcg != &blkcg_root)
75 blk_exit_rl(&blkg->rl); 75 blk_exit_rl(&blkg->rl);
76
77 blkg_rwstat_exit(&blkg->stat_ios);
78 blkg_rwstat_exit(&blkg->stat_bytes);
76 kfree(blkg); 79 kfree(blkg);
77} 80}
78 81
@@ -95,6 +98,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
95 if (!blkg) 98 if (!blkg)
96 return NULL; 99 return NULL;
97 100
101 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
102 blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
103 goto err_free;
104
98 blkg->q = q; 105 blkg->q = q;
99 INIT_LIST_HEAD(&blkg->q_node); 106 INIT_LIST_HEAD(&blkg->q_node);
100 blkg->blkcg = blkcg; 107 blkg->blkcg = blkcg;
@@ -300,6 +307,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
300static void blkg_destroy(struct blkcg_gq *blkg) 307static void blkg_destroy(struct blkcg_gq *blkg)
301{ 308{
302 struct blkcg *blkcg = blkg->blkcg; 309 struct blkcg *blkcg = blkg->blkcg;
310 struct blkcg_gq *parent = blkg->parent;
303 int i; 311 int i;
304 312
305 lockdep_assert_held(blkg->q->queue_lock); 313 lockdep_assert_held(blkg->q->queue_lock);
@@ -315,6 +323,12 @@ static void blkg_destroy(struct blkcg_gq *blkg)
315 if (blkg->pd[i] && pol->pd_offline_fn) 323 if (blkg->pd[i] && pol->pd_offline_fn)
316 pol->pd_offline_fn(blkg->pd[i]); 324 pol->pd_offline_fn(blkg->pd[i]);
317 } 325 }
326
327 if (parent) {
328 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
329 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
330 }
331
318 blkg->online = false; 332 blkg->online = false;
319 333
320 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 334 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
@@ -431,6 +445,9 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
431 * anyway. If you get hit by a race, retry. 445 * anyway. If you get hit by a race, retry.
432 */ 446 */
433 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 447 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
448 blkg_rwstat_reset(&blkg->stat_bytes);
449 blkg_rwstat_reset(&blkg->stat_ios);
450
434 for (i = 0; i < BLKCG_MAX_POLS; i++) { 451 for (i = 0; i < BLKCG_MAX_POLS; i++) {
435 struct blkcg_policy *pol = blkcg_policy[i]; 452 struct blkcg_policy *pol = blkcg_policy[i];
436 453
@@ -579,6 +596,87 @@ u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
579} 596}
580EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); 597EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
581 598
599static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
600 struct blkg_policy_data *pd, int off)
601{
602 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
603
604 return __blkg_prfill_rwstat(sf, pd, &rwstat);
605}
606
607/**
608 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
609 * @sf: seq_file to print to
610 * @v: unused
611 *
612 * To be used as cftype->seq_show to print blkg->stat_bytes.
613 * cftype->private must be set to the blkcg_policy.
614 */
615int blkg_print_stat_bytes(struct seq_file *sf, void *v)
616{
617 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
618 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
619 offsetof(struct blkcg_gq, stat_bytes), true);
620 return 0;
621}
622EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
623
624/**
625 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
626 * @sf: seq_file to print to
627 * @v: unused
628 *
629 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
630 * must be set to the blkcg_policy.
631 */
632int blkg_print_stat_ios(struct seq_file *sf, void *v)
633{
634 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
635 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
636 offsetof(struct blkcg_gq, stat_ios), true);
637 return 0;
638}
639EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
640
641static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
642 struct blkg_policy_data *pd,
643 int off)
644{
645 struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
646 NULL, off);
647 return __blkg_prfill_rwstat(sf, pd, &rwstat);
648}
649
650/**
651 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
652 * @sf: seq_file to print to
653 * @v: unused
654 */
655int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
656{
657 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
658 blkg_prfill_rwstat_field_recursive,
659 (void *)seq_cft(sf)->private,
660 offsetof(struct blkcg_gq, stat_bytes), true);
661 return 0;
662}
663EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
664
665/**
666 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
667 * @sf: seq_file to print to
668 * @v: unused
669 */
670int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
671{
672 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
673 blkg_prfill_rwstat_field_recursive,
674 (void *)seq_cft(sf)->private,
675 offsetof(struct blkcg_gq, stat_ios), true);
676 return 0;
677}
678EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
679
582/** 680/**
583 * blkg_stat_recursive_sum - collect hierarchical blkg_stat 681 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
584 * @blkg: blkg of interest 682 * @blkg: blkg of interest