aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-09 11:05:12 -0500
committerTejun Heo <tj@kernel.org>2013-01-09 11:05:12 -0500
commit16b3de6652c7aef151f38726faf90f0dbc9e9c71 (patch)
tree1b8e615c5261b677ff6a6a3b25fad46b34d1b64b /block/blk-cgroup.c
parentb50da39f51139f81b3115d0f9d8632507f802755 (diff)
blkcg: implement blkg_[rw]stat_recursive_sum() and blkg_[rw]stat_merge()
Implement blkg_[rw]stat_recursive_sum() and blkg_[rw]stat_merge(). The former two collect the [rw]stats designated by the target policy data and offset from the pd's subtree. The latter two add one [rw]stat to another. Note that the recursive sum functions require the queue lock to be held on entry to make blkg online test reliable. This is necessary to properly handle stats of a dying blkg. These will be used to implement hierarchical stats. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c107
1 files changed, 107 insertions, 0 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3aec4cdc8968..f9797b244eb3 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -32,6 +32,26 @@ EXPORT_SYMBOL_GPL(blkcg_root);
32 32
33static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 33static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
34 34
35static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
36 struct request_queue *q, bool update_hint);
37
38/**
39 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
40 * @d_blkg: loop cursor pointing to the current descendant
41 * @pos_cgrp: used for iteration
42 * @p_blkg: target blkg to walk descendants of
43 *
44 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
45 * read locked. If called under either blkcg or queue lock, the iteration
46 * is guaranteed to include all and only online blkgs. The caller may
47 * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip
48 * subtree.
49 */
50#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \
51 cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
52 if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
53 (p_blkg)->q, false)))
54
35static bool blkcg_policy_enabled(struct request_queue *q, 55static bool blkcg_policy_enabled(struct request_queue *q,
36 const struct blkcg_policy *pol) 56 const struct blkcg_policy *pol)
37{ 57{
@@ -127,6 +147,17 @@ err_free:
127 return NULL; 147 return NULL;
128} 148}
129 149
150/**
151 * __blkg_lookup - internal version of blkg_lookup()
152 * @blkcg: blkcg of interest
153 * @q: request_queue of interest
154 * @update_hint: whether to update lookup hint with the result or not
155 *
156 * This is internal version and shouldn't be used by policy
157 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
158 * @q's bypass state. If @update_hint is %true, the caller should be
159 * holding @q->queue_lock and lookup hint is updated on success.
160 */
130static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, 161static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
131 struct request_queue *q, bool update_hint) 162 struct request_queue *q, bool update_hint)
132{ 163{
@@ -586,6 +617,82 @@ u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
586EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); 617EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
587 618
588/** 619/**
620 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
621 * @pd: policy private data of interest
622 * @off: offset to the blkg_stat in @pd
623 *
624 * Collect the blkg_stat specified by @off from @pd and all its online
625 * descendants and return the sum. The caller must be holding the queue
626 * lock for online tests.
627 */
628u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
629{
630 struct blkcg_policy *pol = blkcg_policy[pd->plid];
631 struct blkcg_gq *pos_blkg;
632 struct cgroup *pos_cgrp;
633 u64 sum;
634
635 lockdep_assert_held(pd->blkg->q->queue_lock);
636
637 sum = blkg_stat_read((void *)pd + off);
638
639 rcu_read_lock();
640 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
641 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
642 struct blkg_stat *stat = (void *)pos_pd + off;
643
644 if (pos_blkg->online)
645 sum += blkg_stat_read(stat);
646 }
647 rcu_read_unlock();
648
649 return sum;
650}
651EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
652
653/**
654 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
655 * @pd: policy private data of interest
656 * @off: offset to the blkg_stat in @pd
657 *
658 * Collect the blkg_rwstat specified by @off from @pd and all its online
659 * descendants and return the sum. The caller must be holding the queue
660 * lock for online tests.
661 */
662struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
663 int off)
664{
665 struct blkcg_policy *pol = blkcg_policy[pd->plid];
666 struct blkcg_gq *pos_blkg;
667 struct cgroup *pos_cgrp;
668 struct blkg_rwstat sum;
669 int i;
670
671 lockdep_assert_held(pd->blkg->q->queue_lock);
672
673 sum = blkg_rwstat_read((void *)pd + off);
674
675 rcu_read_lock();
676 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
677 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
678 struct blkg_rwstat *rwstat = (void *)pos_pd + off;
679 struct blkg_rwstat tmp;
680
681 if (!pos_blkg->online)
682 continue;
683
684 tmp = blkg_rwstat_read(rwstat);
685
686 for (i = 0; i < BLKG_RWSTAT_NR; i++)
687 sum.cnt[i] += tmp.cnt[i];
688 }
689 rcu_read_unlock();
690
691 return sum;
692}
693EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
694
695/**
589 * blkg_conf_prep - parse and prepare for per-blkg config update 696 * blkg_conf_prep - parse and prepare for per-blkg config update
590 * @blkcg: target block cgroup 697 * @blkcg: target block cgroup
591 * @pol: target policy 698 * @pol: target policy