summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-08-08 20:11:25 -0400
committerTejun Heo <tj@kernel.org>2013-08-08 20:11:25 -0400
commit492eb21b98f88e411a8bb43d6edcd7d7022add10 (patch)
treeda06df9485fd607762fdec06169f7d9f601e3cf6 /block
parentf48e3924dca268c677c4e338e5d91ad9e6fe6b9e (diff)
cgroup: make hierarchy iterators deal with cgroup_subsys_state instead of cgroup
cgroup is currently in the process of transitioning to using css (cgroup_subsys_state) as the primary handle instead of cgroup in subsystem API. For hierarchy iterators, this is beneficial because * In most cases, css is the only thing subsystems care about anyway. * On the planned unified hierarchy, iterations for different subsystems will need to skip over different subtrees of the hierarchy depending on which subsystems are enabled on each cgroup. Passing around css makes it unnecessary to explicitly specify the subsystem in question as css is intersection between cgroup and subsystem * For the planned unified hierarchy, css's would need to be created and destroyed dynamically independent from cgroup hierarchy. Having cgroup core manage css iteration makes enforcing deref rules a lot easier. Most subsystem conversions are straight-forward. Noteworthy changes are * blkio: cgroup_to_blkcg() is no longer used. Removed. * freezer: cgroup_freezer() is no longer used. Removed. * devices: cgroup_to_devcgroup() is no longer used. Removed. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Vivek Goyal <vgoyal@redhat.com> Acked-by: Aristeu Rozanski <aris@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Matt Helsley <matthltc@us.ibm.com> Cc: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c8
-rw-r--r--block/blk-cgroup.h25
-rw-r--r--block/blk-throttle.c8
3 files changed, 17 insertions, 24 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index f46f3c69179c..4b40640240a4 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -614,7 +614,7 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
614{ 614{
615 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 615 struct blkcg_policy *pol = blkcg_policy[pd->plid];
616 struct blkcg_gq *pos_blkg; 616 struct blkcg_gq *pos_blkg;
617 struct cgroup *pos_cgrp; 617 struct cgroup_subsys_state *pos_css;
618 u64 sum; 618 u64 sum;
619 619
620 lockdep_assert_held(pd->blkg->q->queue_lock); 620 lockdep_assert_held(pd->blkg->q->queue_lock);
@@ -622,7 +622,7 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
622 sum = blkg_stat_read((void *)pd + off); 622 sum = blkg_stat_read((void *)pd + off);
623 623
624 rcu_read_lock(); 624 rcu_read_lock();
625 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) { 625 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
626 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 626 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
627 struct blkg_stat *stat = (void *)pos_pd + off; 627 struct blkg_stat *stat = (void *)pos_pd + off;
628 628
@@ -649,7 +649,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
649{ 649{
650 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 650 struct blkcg_policy *pol = blkcg_policy[pd->plid];
651 struct blkcg_gq *pos_blkg; 651 struct blkcg_gq *pos_blkg;
652 struct cgroup *pos_cgrp; 652 struct cgroup_subsys_state *pos_css;
653 struct blkg_rwstat sum; 653 struct blkg_rwstat sum;
654 int i; 654 int i;
655 655
@@ -658,7 +658,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
658 sum = blkg_rwstat_read((void *)pd + off); 658 sum = blkg_rwstat_read((void *)pd + off);
659 659
660 rcu_read_lock(); 660 rcu_read_lock();
661 blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) { 661 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
662 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 662 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
663 struct blkg_rwstat *rwstat = (void *)pos_pd + off; 663 struct blkg_rwstat *rwstat = (void *)pos_pd + off;
664 struct blkg_rwstat tmp; 664 struct blkg_rwstat tmp;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index b6802c46d68f..855538630300 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -184,11 +184,6 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
184 return css ? container_of(css, struct blkcg, css) : NULL; 184 return css ? container_of(css, struct blkcg, css) : NULL;
185} 185}
186 186
187static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
188{
189 return css_to_blkcg(cgroup_css(cgroup, blkio_subsys_id));
190}
191
192static inline struct blkcg *task_blkcg(struct task_struct *tsk) 187static inline struct blkcg *task_blkcg(struct task_struct *tsk)
193{ 188{
194 return css_to_blkcg(task_css(tsk, blkio_subsys_id)); 189 return css_to_blkcg(task_css(tsk, blkio_subsys_id));
@@ -289,32 +284,31 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
289/** 284/**
290 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 285 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
291 * @d_blkg: loop cursor pointing to the current descendant 286 * @d_blkg: loop cursor pointing to the current descendant
292 * @pos_cgrp: used for iteration 287 * @pos_css: used for iteration
293 * @p_blkg: target blkg to walk descendants of 288 * @p_blkg: target blkg to walk descendants of
294 * 289 *
295 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 290 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
296 * read locked. If called under either blkcg or queue lock, the iteration 291 * read locked. If called under either blkcg or queue lock, the iteration
297 * is guaranteed to include all and only online blkgs. The caller may 292 * is guaranteed to include all and only online blkgs. The caller may
298 * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip 293 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
299 * subtree.
300 */ 294 */
301#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \ 295#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
302 cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \ 296 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
303 if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \ 297 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
304 (p_blkg)->q, false))) 298 (p_blkg)->q, false)))
305 299
306/** 300/**
307 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 301 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
308 * @d_blkg: loop cursor pointing to the current descendant 302 * @d_blkg: loop cursor pointing to the current descendant
309 * @pos_cgrp: used for iteration 303 * @pos_css: used for iteration
310 * @p_blkg: target blkg to walk descendants of 304 * @p_blkg: target blkg to walk descendants of
311 * 305 *
312 * Similar to blkg_for_each_descendant_pre() but performs post-order 306 * Similar to blkg_for_each_descendant_pre() but performs post-order
313 * traversal instead. Synchronization rules are the same. 307 * traversal instead. Synchronization rules are the same.
314 */ 308 */
315#define blkg_for_each_descendant_post(d_blkg, pos_cgrp, p_blkg) \ 309#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
316 cgroup_for_each_descendant_post((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \ 310 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
317 if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \ 311 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
318 (p_blkg)->q, false))) 312 (p_blkg)->q, false)))
319 313
320/** 314/**
@@ -577,7 +571,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
577static inline void blkcg_deactivate_policy(struct request_queue *q, 571static inline void blkcg_deactivate_policy(struct request_queue *q,
578 const struct blkcg_policy *pol) { } 572 const struct blkcg_policy *pol) { }
579 573
580static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
581static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } 574static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
582 575
583static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 576static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 88bcfb651b0b..8cefa7f8590e 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1349,7 +1349,7 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
1349 struct throtl_grp *tg; 1349 struct throtl_grp *tg;
1350 struct throtl_service_queue *sq; 1350 struct throtl_service_queue *sq;
1351 struct blkcg_gq *blkg; 1351 struct blkcg_gq *blkg;
1352 struct cgroup *pos_cgrp; 1352 struct cgroup_subsys_state *pos_css;
1353 int ret; 1353 int ret;
1354 1354
1355 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); 1355 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
@@ -1380,7 +1380,7 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
1380 * blk-throttle. 1380 * blk-throttle.
1381 */ 1381 */
1382 tg_update_has_rules(tg); 1382 tg_update_has_rules(tg);
1383 blkg_for_each_descendant_pre(blkg, pos_cgrp, ctx.blkg) 1383 blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
1384 tg_update_has_rules(blkg_to_tg(blkg)); 1384 tg_update_has_rules(blkg_to_tg(blkg));
1385 1385
1386 /* 1386 /*
@@ -1623,7 +1623,7 @@ void blk_throtl_drain(struct request_queue *q)
1623{ 1623{
1624 struct throtl_data *td = q->td; 1624 struct throtl_data *td = q->td;
1625 struct blkcg_gq *blkg; 1625 struct blkcg_gq *blkg;
1626 struct cgroup *pos_cgrp; 1626 struct cgroup_subsys_state *pos_css;
1627 struct bio *bio; 1627 struct bio *bio;
1628 int rw; 1628 int rw;
1629 1629
@@ -1636,7 +1636,7 @@ void blk_throtl_drain(struct request_queue *q)
1636 * better to walk service_queue tree directly but blkg walk is 1636 * better to walk service_queue tree directly but blkg walk is
1637 * easier. 1637 * easier.
1638 */ 1638 */
1639 blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg) 1639 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
1640 tg_drain_bios(&blkg_to_tg(blkg)->service_queue); 1640 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
1641 1641
1642 tg_drain_bios(&td_root_tg(td)->service_queue); 1642 tg_drain_bios(&td_root_tg(td)->service_queue);