diff options
author | Tejun Heo <tj@kernel.org> | 2013-08-08 20:11:25 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-08-08 20:11:25 -0400 |
commit | 492eb21b98f88e411a8bb43d6edcd7d7022add10 (patch) | |
tree | da06df9485fd607762fdec06169f7d9f601e3cf6 | |
parent | f48e3924dca268c677c4e338e5d91ad9e6fe6b9e (diff) |
cgroup: make hierarchy iterators deal with cgroup_subsys_state instead of cgroup
cgroup is currently in the process of transitioning to using css
(cgroup_subsys_state) as the primary handle instead of cgroup in
subsystem API. For hierarchy iterators, this is beneficial because
* In most cases, css is the only thing subsystems care about anyway.
* On the planned unified hierarchy, iterations for different
subsystems will need to skip over different subtrees of the
hierarchy depending on which subsystems are enabled on each cgroup.
Passing around css makes it unnecessary to explicitly specify the
subsystem in question as css is intersection between cgroup and
subsystem
* For the planned unified hierarchy, css's would need to be created
and destroyed dynamically independent from cgroup hierarchy. Having
cgroup core manage css iteration makes enforcing deref rules a lot
easier.
Most subsystem conversions are straight-forward. Noteworthy changes
are
* blkio: cgroup_to_blkcg() is no longer used. Removed.
* freezer: cgroup_freezer() is no longer used. Removed.
* devices: cgroup_to_devcgroup() is no longer used. Removed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Acked-by: Aristeu Rozanski <aris@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-cgroup.c | 8 | ||||
-rw-r--r-- | block/blk-cgroup.h | 25 | ||||
-rw-r--r-- | block/blk-throttle.c | 8 | ||||
-rw-r--r-- | include/linux/cgroup.h | 88 | ||||
-rw-r--r-- | kernel/cgroup.c | 131 | ||||
-rw-r--r-- | kernel/cgroup_freezer.c | 25 | ||||
-rw-r--r-- | kernel/cpuset.c | 58 | ||||
-rw-r--r-- | mm/memcontrol.c | 20 | ||||
-rw-r--r-- | security/device_cgroup.c | 11 |
9 files changed, 187 insertions, 187 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f46f3c69179c..4b40640240a4 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -614,7 +614,7 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) | |||
614 | { | 614 | { |
615 | struct blkcg_policy *pol = blkcg_policy[pd->plid]; | 615 | struct blkcg_policy *pol = blkcg_policy[pd->plid]; |
616 | struct blkcg_gq *pos_blkg; | 616 | struct blkcg_gq *pos_blkg; |
617 | struct cgroup *pos_cgrp; | 617 | struct cgroup_subsys_state *pos_css; |
618 | u64 sum; | 618 | u64 sum; |
619 | 619 | ||
620 | lockdep_assert_held(pd->blkg->q->queue_lock); | 620 | lockdep_assert_held(pd->blkg->q->queue_lock); |
@@ -622,7 +622,7 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) | |||
622 | sum = blkg_stat_read((void *)pd + off); | 622 | sum = blkg_stat_read((void *)pd + off); |
623 | 623 | ||
624 | rcu_read_lock(); | 624 | rcu_read_lock(); |
625 | blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) { | 625 | blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { |
626 | struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); | 626 | struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); |
627 | struct blkg_stat *stat = (void *)pos_pd + off; | 627 | struct blkg_stat *stat = (void *)pos_pd + off; |
628 | 628 | ||
@@ -649,7 +649,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, | |||
649 | { | 649 | { |
650 | struct blkcg_policy *pol = blkcg_policy[pd->plid]; | 650 | struct blkcg_policy *pol = blkcg_policy[pd->plid]; |
651 | struct blkcg_gq *pos_blkg; | 651 | struct blkcg_gq *pos_blkg; |
652 | struct cgroup *pos_cgrp; | 652 | struct cgroup_subsys_state *pos_css; |
653 | struct blkg_rwstat sum; | 653 | struct blkg_rwstat sum; |
654 | int i; | 654 | int i; |
655 | 655 | ||
@@ -658,7 +658,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, | |||
658 | sum = blkg_rwstat_read((void *)pd + off); | 658 | sum = blkg_rwstat_read((void *)pd + off); |
659 | 659 | ||
660 | rcu_read_lock(); | 660 | rcu_read_lock(); |
661 | blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) { | 661 | blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { |
662 | struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); | 662 | struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); |
663 | struct blkg_rwstat *rwstat = (void *)pos_pd + off; | 663 | struct blkg_rwstat *rwstat = (void *)pos_pd + off; |
664 | struct blkg_rwstat tmp; | 664 | struct blkg_rwstat tmp; |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index b6802c46d68f..855538630300 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -184,11 +184,6 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) | |||
184 | return css ? container_of(css, struct blkcg, css) : NULL; | 184 | return css ? container_of(css, struct blkcg, css) : NULL; |
185 | } | 185 | } |
186 | 186 | ||
187 | static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) | ||
188 | { | ||
189 | return css_to_blkcg(cgroup_css(cgroup, blkio_subsys_id)); | ||
190 | } | ||
191 | |||
192 | static inline struct blkcg *task_blkcg(struct task_struct *tsk) | 187 | static inline struct blkcg *task_blkcg(struct task_struct *tsk) |
193 | { | 188 | { |
194 | return css_to_blkcg(task_css(tsk, blkio_subsys_id)); | 189 | return css_to_blkcg(task_css(tsk, blkio_subsys_id)); |
@@ -289,32 +284,31 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, | |||
289 | /** | 284 | /** |
290 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants | 285 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants |
291 | * @d_blkg: loop cursor pointing to the current descendant | 286 | * @d_blkg: loop cursor pointing to the current descendant |
292 | * @pos_cgrp: used for iteration | 287 | * @pos_css: used for iteration |
293 | * @p_blkg: target blkg to walk descendants of | 288 | * @p_blkg: target blkg to walk descendants of |
294 | * | 289 | * |
295 | * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU | 290 | * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU |
296 | * read locked. If called under either blkcg or queue lock, the iteration | 291 | * read locked. If called under either blkcg or queue lock, the iteration |
297 | * is guaranteed to include all and only online blkgs. The caller may | 292 | * is guaranteed to include all and only online blkgs. The caller may |
298 | * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip | 293 | * update @pos_css by calling css_rightmost_descendant() to skip subtree. |
299 | * subtree. | ||
300 | */ | 294 | */ |
301 | #define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \ | 295 | #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ |
302 | cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \ | 296 | css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ |
303 | if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \ | 297 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ |
304 | (p_blkg)->q, false))) | 298 | (p_blkg)->q, false))) |
305 | 299 | ||
306 | /** | 300 | /** |
307 | * blkg_for_each_descendant_post - post-order walk of a blkg's descendants | 301 | * blkg_for_each_descendant_post - post-order walk of a blkg's descendants |
308 | * @d_blkg: loop cursor pointing to the current descendant | 302 | * @d_blkg: loop cursor pointing to the current descendant |
309 | * @pos_cgrp: used for iteration | 303 | * @pos_css: used for iteration |
310 | * @p_blkg: target blkg to walk descendants of | 304 | * @p_blkg: target blkg to walk descendants of |
311 | * | 305 | * |
312 | * Similar to blkg_for_each_descendant_pre() but performs post-order | 306 | * Similar to blkg_for_each_descendant_pre() but performs post-order |
313 | * traversal instead. Synchronization rules are the same. | 307 | * traversal instead. Synchronization rules are the same. |
314 | */ | 308 | */ |
315 | #define blkg_for_each_descendant_post(d_blkg, pos_cgrp, p_blkg) \ | 309 | #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ |
316 | cgroup_for_each_descendant_post((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \ | 310 | css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ |
317 | if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \ | 311 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ |
318 | (p_blkg)->q, false))) | 312 | (p_blkg)->q, false))) |
319 | 313 | ||
320 | /** | 314 | /** |
@@ -577,7 +571,6 @@ static inline int blkcg_activate_policy(struct request_queue *q, | |||
577 | static inline void blkcg_deactivate_policy(struct request_queue *q, | 571 | static inline void blkcg_deactivate_policy(struct request_queue *q, |
578 | const struct blkcg_policy *pol) { } | 572 | const struct blkcg_policy *pol) { } |
579 | 573 | ||
580 | static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } | ||
581 | static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } | 574 | static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } |
582 | 575 | ||
583 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, | 576 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 88bcfb651b0b..8cefa7f8590e 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -1349,7 +1349,7 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft, | |||
1349 | struct throtl_grp *tg; | 1349 | struct throtl_grp *tg; |
1350 | struct throtl_service_queue *sq; | 1350 | struct throtl_service_queue *sq; |
1351 | struct blkcg_gq *blkg; | 1351 | struct blkcg_gq *blkg; |
1352 | struct cgroup *pos_cgrp; | 1352 | struct cgroup_subsys_state *pos_css; |
1353 | int ret; | 1353 | int ret; |
1354 | 1354 | ||
1355 | ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); | 1355 | ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); |
@@ -1380,7 +1380,7 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft, | |||
1380 | * blk-throttle. | 1380 | * blk-throttle. |
1381 | */ | 1381 | */ |
1382 | tg_update_has_rules(tg); | 1382 | tg_update_has_rules(tg); |
1383 | blkg_for_each_descendant_pre(blkg, pos_cgrp, ctx.blkg) | 1383 | blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg) |
1384 | tg_update_has_rules(blkg_to_tg(blkg)); | 1384 | tg_update_has_rules(blkg_to_tg(blkg)); |
1385 | 1385 | ||
1386 | /* | 1386 | /* |
@@ -1623,7 +1623,7 @@ void blk_throtl_drain(struct request_queue *q) | |||
1623 | { | 1623 | { |
1624 | struct throtl_data *td = q->td; | 1624 | struct throtl_data *td = q->td; |
1625 | struct blkcg_gq *blkg; | 1625 | struct blkcg_gq *blkg; |
1626 | struct cgroup *pos_cgrp; | 1626 | struct cgroup_subsys_state *pos_css; |
1627 | struct bio *bio; | 1627 | struct bio *bio; |
1628 | int rw; | 1628 | int rw; |
1629 | 1629 | ||
@@ -1636,7 +1636,7 @@ void blk_throtl_drain(struct request_queue *q) | |||
1636 | * better to walk service_queue tree directly but blkg walk is | 1636 | * better to walk service_queue tree directly but blkg walk is |
1637 | * easier. | 1637 | * easier. |
1638 | */ | 1638 | */ |
1639 | blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg) | 1639 | blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) |
1640 | tg_drain_bios(&blkg_to_tg(blkg)->service_queue); | 1640 | tg_drain_bios(&blkg_to_tg(blkg)->service_queue); |
1641 | 1641 | ||
1642 | tg_drain_bios(&td_root_tg(td)->service_queue); | 1642 | tg_drain_bios(&td_root_tg(td)->service_queue); |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c288bce428f8..4bc22f4a1abb 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -779,68 +779,72 @@ static inline struct cgroup *cgroup_from_id(struct cgroup_subsys *ss, int id) | |||
779 | return idr_find(&ss->root->cgroup_idr, id); | 779 | return idr_find(&ss->root->cgroup_idr, id); |
780 | } | 780 | } |
781 | 781 | ||
782 | struct cgroup *cgroup_next_child(struct cgroup *pos, struct cgroup *cgrp); | 782 | struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, |
783 | struct cgroup_subsys_state *parent); | ||
783 | 784 | ||
784 | /** | 785 | /** |
785 | * cgroup_for_each_child - iterate through children of a cgroup | 786 | * css_for_each_child - iterate through children of a css |
786 | * @pos: the cgroup * to use as the loop cursor | 787 | * @pos: the css * to use as the loop cursor |
787 | * @cgrp: cgroup whose children to walk | 788 | * @parent: css whose children to walk |
788 | * | 789 | * |
789 | * Walk @cgrp's children. Must be called under rcu_read_lock(). A child | 790 | * Walk @parent's children. Must be called under rcu_read_lock(). A child |
790 | * cgroup which hasn't finished ->css_online() or already has finished | 791 | * css which hasn't finished ->css_online() or already has finished |
791 | * ->css_offline() may show up during traversal and it's each subsystem's | 792 | * ->css_offline() may show up during traversal and it's each subsystem's |
792 | * responsibility to verify that each @pos is alive. | 793 | * responsibility to verify that each @pos is alive. |
793 | * | 794 | * |
794 | * If a subsystem synchronizes against the parent in its ->css_online() and | 795 | * If a subsystem synchronizes against the parent in its ->css_online() and |
795 | * before starting iterating, a cgroup which finished ->css_online() is | 796 | * before starting iterating, a css which finished ->css_online() is |
796 | * guaranteed to be visible in the future iterations. | 797 | * guaranteed to be visible in the future iterations. |
797 | * | 798 | * |
798 | * It is allowed to temporarily drop RCU read lock during iteration. The | 799 | * It is allowed to temporarily drop RCU read lock during iteration. The |
799 | * caller is responsible for ensuring that @pos remains accessible until | 800 | * caller is responsible for ensuring that @pos remains accessible until |
800 | * the start of the next iteration by, for example, bumping the css refcnt. | 801 | * the start of the next iteration by, for example, bumping the css refcnt. |
801 | */ | 802 | */ |
802 | #define cgroup_for_each_child(pos, cgrp) \ | 803 | #define css_for_each_child(pos, parent) \ |
803 | for ((pos) = cgroup_next_child(NULL, (cgrp)); (pos); \ | 804 | for ((pos) = css_next_child(NULL, (parent)); (pos); \ |
804 | (pos) = cgroup_next_child((pos), (cgrp))) | 805 | (pos) = css_next_child((pos), (parent))) |
805 | 806 | ||
806 | struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, | 807 | struct cgroup_subsys_state * |
807 | struct cgroup *cgroup); | 808 | css_next_descendant_pre(struct cgroup_subsys_state *pos, |
808 | struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); | 809 | struct cgroup_subsys_state *css); |
810 | |||
811 | struct cgroup_subsys_state * | ||
812 | css_rightmost_descendant(struct cgroup_subsys_state *pos); | ||
809 | 813 | ||
810 | /** | 814 | /** |
811 | * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants | 815 | * css_for_each_descendant_pre - pre-order walk of a css's descendants |
812 | * @pos: the cgroup * to use as the loop cursor | 816 | * @pos: the css * to use as the loop cursor |
813 | * @cgroup: cgroup whose descendants to walk | 817 | * @root: css whose descendants to walk |
814 | * | 818 | * |
815 | * Walk @cgroup's descendants. Must be called under rcu_read_lock(). A | 819 | * Walk @root's descendants. Must be called under rcu_read_lock(). A |
816 | * descendant cgroup which hasn't finished ->css_online() or already has | 820 | * descendant css which hasn't finished ->css_online() or already has |
817 | * finished ->css_offline() may show up during traversal and it's each | 821 | * finished ->css_offline() may show up during traversal and it's each |
818 | * subsystem's responsibility to verify that each @pos is alive. | 822 | * subsystem's responsibility to verify that each @pos is alive. |
819 | * | 823 | * |
820 | * If a subsystem synchronizes against the parent in its ->css_online() and | 824 | * If a subsystem synchronizes against the parent in its ->css_online() and |
821 | * before starting iterating, and synchronizes against @pos on each | 825 | * before starting iterating, and synchronizes against @pos on each |
822 | * iteration, any descendant cgroup which finished ->css_online() is | 826 | * iteration, any descendant css which finished ->css_online() is |
823 | * guaranteed to be visible in the future iterations. | 827 | * guaranteed to be visible in the future iterations. |
824 | * | 828 | * |
825 | * In other words, the following guarantees that a descendant can't escape | 829 | * In other words, the following guarantees that a descendant can't escape |
826 | * state updates of its ancestors. | 830 | * state updates of its ancestors. |
827 | * | 831 | * |
828 | * my_online(@cgrp) | 832 | * my_online(@css) |
829 | * { | 833 | * { |
830 | * Lock @cgrp->parent and @cgrp; | 834 | * Lock @css's parent and @css; |
831 | * Inherit state from @cgrp->parent; | 835 | * Inherit state from the parent; |
832 | * Unlock both. | 836 | * Unlock both. |
833 | * } | 837 | * } |
834 | * | 838 | * |
835 | * my_update_state(@cgrp) | 839 | * my_update_state(@css) |
836 | * { | 840 | * { |
837 | * Lock @cgrp; | 841 | * Lock @css; |
838 | * Update @cgrp's state; | 842 | * Update @css's state; |
839 | * Unlock @cgrp; | 843 | * Unlock @css; |
840 | * | 844 | * |
841 | * cgroup_for_each_descendant_pre(@pos, @cgrp) { | 845 | * css_for_each_descendant_pre(@pos, @css) { |
842 | * Lock @pos; | 846 | * Lock @pos; |
843 | * Verify @pos is alive and inherit state from @pos->parent; | 847 | * Verify @pos is alive and inherit state from @pos's parent; |
844 | * Unlock @pos; | 848 | * Unlock @pos; |
845 | * } | 849 | * } |
846 | * } | 850 | * } |
@@ -851,8 +855,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); | |||
851 | * visible by walking order and, as long as inheriting operations to the | 855 | * visible by walking order and, as long as inheriting operations to the |
852 | * same @pos are atomic to each other, multiple updates racing each other | 856 | * same @pos are atomic to each other, multiple updates racing each other |
853 | * still result in the correct state. It's guaranateed that at least one | 857 | * still result in the correct state. It's guaranateed that at least one |
854 | * inheritance happens for any cgroup after the latest update to its | 858 | * inheritance happens for any css after the latest update to its parent. |
855 | * parent. | ||
856 | * | 859 | * |
857 | * If checking parent's state requires locking the parent, each inheriting | 860 | * If checking parent's state requires locking the parent, each inheriting |
858 | * iteration should lock and unlock both @pos->parent and @pos. | 861 | * iteration should lock and unlock both @pos->parent and @pos. |
@@ -865,25 +868,26 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos); | |||
865 | * caller is responsible for ensuring that @pos remains accessible until | 868 | * caller is responsible for ensuring that @pos remains accessible until |
866 | * the start of the next iteration by, for example, bumping the css refcnt. | 869 | * the start of the next iteration by, for example, bumping the css refcnt. |
867 | */ | 870 | */ |
868 | #define cgroup_for_each_descendant_pre(pos, cgroup) \ | 871 | #define css_for_each_descendant_pre(pos, css) \ |
869 | for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \ | 872 | for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ |
870 | pos = cgroup_next_descendant_pre((pos), (cgroup))) | 873 | (pos) = css_next_descendant_pre((pos), (css))) |
871 | 874 | ||
872 | struct cgroup *cgroup_next_descendant_post(struct cgroup *pos, | 875 | struct cgroup_subsys_state * |
873 | struct cgroup *cgroup); | 876 | css_next_descendant_post(struct cgroup_subsys_state *pos, |
877 | struct cgroup_subsys_state *css); | ||
874 | 878 | ||
875 | /** | 879 | /** |
876 | * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants | 880 | * css_for_each_descendant_post - post-order walk of a css's descendants |
877 | * @pos: the cgroup * to use as the loop cursor | 881 | * @pos: the css * to use as the loop cursor |
878 | * @cgroup: cgroup whose descendants to walk | 882 | * @css: css whose descendants to walk |
879 | * | 883 | * |
880 | * Similar to cgroup_for_each_descendant_pre() but performs post-order | 884 | * Similar to css_for_each_descendant_pre() but performs post-order |
881 | * traversal instead. Note that the walk visibility guarantee described in | 885 | * traversal instead. Note that the walk visibility guarantee described in |
882 | * pre-order walk doesn't apply the same to post-order walks. | 886 | * pre-order walk doesn't apply the same to post-order walks. |
883 | */ | 887 | */ |
884 | #define cgroup_for_each_descendant_post(pos, cgroup) \ | 888 | #define css_for_each_descendant_post(pos, css) \ |
885 | for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \ | 889 | for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ |
886 | pos = cgroup_next_descendant_post((pos), (cgroup))) | 890 | (pos) = css_next_descendant_post((pos), (css))) |
887 | 891 | ||
888 | /* A cgroup_iter should be treated as an opaque object */ | 892 | /* A cgroup_iter should be treated as an opaque object */ |
889 | struct cgroup_iter { | 893 | struct cgroup_iter { |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2b7354faaca7..91eac33fac86 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2814,8 +2814,8 @@ static void cgroup_cfts_prepare(void) | |||
2814 | /* | 2814 | /* |
2815 | * Thanks to the entanglement with vfs inode locking, we can't walk | 2815 | * Thanks to the entanglement with vfs inode locking, we can't walk |
2816 | * the existing cgroups under cgroup_mutex and create files. | 2816 | * the existing cgroups under cgroup_mutex and create files. |
2817 | * Instead, we use cgroup_for_each_descendant_pre() and drop RCU | 2817 | * Instead, we use css_for_each_descendant_pre() and drop RCU read |
2818 | * read lock before calling cgroup_addrm_files(). | 2818 | * lock before calling cgroup_addrm_files(). |
2819 | */ | 2819 | */ |
2820 | mutex_lock(&cgroup_mutex); | 2820 | mutex_lock(&cgroup_mutex); |
2821 | } | 2821 | } |
@@ -2825,10 +2825,11 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
2825 | { | 2825 | { |
2826 | LIST_HEAD(pending); | 2826 | LIST_HEAD(pending); |
2827 | struct cgroup_subsys *ss = cfts[0].ss; | 2827 | struct cgroup_subsys *ss = cfts[0].ss; |
2828 | struct cgroup *cgrp, *root = &ss->root->top_cgroup; | 2828 | struct cgroup *root = &ss->root->top_cgroup; |
2829 | struct super_block *sb = ss->root->sb; | 2829 | struct super_block *sb = ss->root->sb; |
2830 | struct dentry *prev = NULL; | 2830 | struct dentry *prev = NULL; |
2831 | struct inode *inode; | 2831 | struct inode *inode; |
2832 | struct cgroup_subsys_state *css; | ||
2832 | u64 update_before; | 2833 | u64 update_before; |
2833 | int ret = 0; | 2834 | int ret = 0; |
2834 | 2835 | ||
@@ -2861,7 +2862,9 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) | |||
2861 | 2862 | ||
2862 | /* add/rm files for all cgroups created before */ | 2863 | /* add/rm files for all cgroups created before */ |
2863 | rcu_read_lock(); | 2864 | rcu_read_lock(); |
2864 | cgroup_for_each_descendant_pre(cgrp, root) { | 2865 | css_for_each_descendant_pre(css, cgroup_css(root, ss->subsys_id)) { |
2866 | struct cgroup *cgrp = css->cgroup; | ||
2867 | |||
2865 | if (cgroup_is_dead(cgrp)) | 2868 | if (cgroup_is_dead(cgrp)) |
2866 | continue; | 2869 | continue; |
2867 | 2870 | ||
@@ -3037,17 +3040,21 @@ static void cgroup_enable_task_cg_lists(void) | |||
3037 | } | 3040 | } |
3038 | 3041 | ||
3039 | /** | 3042 | /** |
3040 | * cgroup_next_child - find the next child of a given cgroup | 3043 | * css_next_child - find the next child of a given css |
3041 | * @pos: the current position (%NULL to initiate traversal) | 3044 | * @pos_css: the current position (%NULL to initiate traversal) |
3042 | * @cgrp: cgroup whose descendants to walk | 3045 | * @parent_css: css whose children to walk |
3043 | * | 3046 | * |
3044 | * This function returns the next child of @cgrp and should be called under | 3047 | * This function returns the next child of @parent_css and should be called |
3045 | * RCU read lock. The only requirement is that @cgrp and @pos are | 3048 | * under RCU read lock. The only requirement is that @parent_css and |
3046 | * accessible. The next sibling is guaranteed to be returned regardless of | 3049 | * @pos_css are accessible. The next sibling is guaranteed to be returned |
3047 | * their states. | 3050 | * regardless of their states. |
3048 | */ | 3051 | */ |
3049 | struct cgroup *cgroup_next_child(struct cgroup *pos, struct cgroup *cgrp) | 3052 | struct cgroup_subsys_state * |
3053 | css_next_child(struct cgroup_subsys_state *pos_css, | ||
3054 | struct cgroup_subsys_state *parent_css) | ||
3050 | { | 3055 | { |
3056 | struct cgroup *pos = pos_css ? pos_css->cgroup : NULL; | ||
3057 | struct cgroup *cgrp = parent_css->cgroup; | ||
3051 | struct cgroup *next; | 3058 | struct cgroup *next; |
3052 | 3059 | ||
3053 | WARN_ON_ONCE(!rcu_read_lock_held()); | 3060 | WARN_ON_ONCE(!rcu_read_lock_held()); |
@@ -3081,59 +3088,64 @@ struct cgroup *cgroup_next_child(struct cgroup *pos, struct cgroup *cgrp) | |||
3081 | break; | 3088 | break; |
3082 | } | 3089 | } |
3083 | 3090 | ||
3084 | if (&next->sibling != &cgrp->children) | 3091 | if (&next->sibling == &cgrp->children) |
3085 | return next; | 3092 | return NULL; |
3086 | return NULL; | 3093 | |
3094 | if (parent_css->ss) | ||
3095 | return cgroup_css(next, parent_css->ss->subsys_id); | ||
3096 | else | ||
3097 | return &next->dummy_css; | ||
3087 | } | 3098 | } |
3088 | EXPORT_SYMBOL_GPL(cgroup_next_child); | 3099 | EXPORT_SYMBOL_GPL(css_next_child); |
3089 | 3100 | ||
3090 | /** | 3101 | /** |
3091 | * cgroup_next_descendant_pre - find the next descendant for pre-order walk | 3102 | * css_next_descendant_pre - find the next descendant for pre-order walk |
3092 | * @pos: the current position (%NULL to initiate traversal) | 3103 | * @pos: the current position (%NULL to initiate traversal) |
3093 | * @cgroup: cgroup whose descendants to walk | 3104 | * @root: css whose descendants to walk |
3094 | * | 3105 | * |
3095 | * To be used by cgroup_for_each_descendant_pre(). Find the next | 3106 | * To be used by css_for_each_descendant_pre(). Find the next descendant |
3096 | * descendant to visit for pre-order traversal of @cgroup's descendants. | 3107 | * to visit for pre-order traversal of @root's descendants. |
3097 | * | 3108 | * |
3098 | * While this function requires RCU read locking, it doesn't require the | 3109 | * While this function requires RCU read locking, it doesn't require the |
3099 | * whole traversal to be contained in a single RCU critical section. This | 3110 | * whole traversal to be contained in a single RCU critical section. This |
3100 | * function will return the correct next descendant as long as both @pos | 3111 | * function will return the correct next descendant as long as both @pos |
3101 | * and @cgroup are accessible and @pos is a descendant of @cgroup. | 3112 | * and @root are accessible and @pos is a descendant of @root. |
3102 | */ | 3113 | */ |
3103 | struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, | 3114 | struct cgroup_subsys_state * |
3104 | struct cgroup *cgroup) | 3115 | css_next_descendant_pre(struct cgroup_subsys_state *pos, |
3116 | struct cgroup_subsys_state *root) | ||
3105 | { | 3117 | { |
3106 | struct cgroup *next; | 3118 | struct cgroup_subsys_state *next; |
3107 | 3119 | ||
3108 | WARN_ON_ONCE(!rcu_read_lock_held()); | 3120 | WARN_ON_ONCE(!rcu_read_lock_held()); |
3109 | 3121 | ||
3110 | /* if first iteration, pretend we just visited @cgroup */ | 3122 | /* if first iteration, pretend we just visited @root */ |
3111 | if (!pos) | 3123 | if (!pos) |
3112 | pos = cgroup; | 3124 | pos = root; |
3113 | 3125 | ||
3114 | /* visit the first child if exists */ | 3126 | /* visit the first child if exists */ |
3115 | next = cgroup_next_child(NULL, pos); | 3127 | next = css_next_child(NULL, pos); |
3116 | if (next) | 3128 | if (next) |
3117 | return next; | 3129 | return next; |
3118 | 3130 | ||
3119 | /* no child, visit my or the closest ancestor's next sibling */ | 3131 | /* no child, visit my or the closest ancestor's next sibling */ |
3120 | while (pos != cgroup) { | 3132 | while (pos != root) { |
3121 | next = cgroup_next_child(pos, pos->parent); | 3133 | next = css_next_child(pos, css_parent(pos)); |
3122 | if (next) | 3134 | if (next) |
3123 | return next; | 3135 | return next; |
3124 | pos = pos->parent; | 3136 | pos = css_parent(pos); |
3125 | } | 3137 | } |
3126 | 3138 | ||
3127 | return NULL; | 3139 | return NULL; |
3128 | } | 3140 | } |
3129 | EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre); | 3141 | EXPORT_SYMBOL_GPL(css_next_descendant_pre); |
3130 | 3142 | ||
3131 | /** | 3143 | /** |
3132 | * cgroup_rightmost_descendant - return the rightmost descendant of a cgroup | 3144 | * css_rightmost_descendant - return the rightmost descendant of a css |
3133 | * @pos: cgroup of interest | 3145 | * @pos: css of interest |
3134 | * | 3146 | * |
3135 | * Return the rightmost descendant of @pos. If there's no descendant, | 3147 | * Return the rightmost descendant of @pos. If there's no descendant, @pos |
3136 | * @pos is returned. This can be used during pre-order traversal to skip | 3148 | * is returned. This can be used during pre-order traversal to skip |
3137 | * subtree of @pos. | 3149 | * subtree of @pos. |
3138 | * | 3150 | * |
3139 | * While this function requires RCU read locking, it doesn't require the | 3151 | * While this function requires RCU read locking, it doesn't require the |
@@ -3141,9 +3153,10 @@ EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre); | |||
3141 | * function will return the correct rightmost descendant as long as @pos is | 3153 | * function will return the correct rightmost descendant as long as @pos is |
3142 | * accessible. | 3154 | * accessible. |
3143 | */ | 3155 | */ |
3144 | struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos) | 3156 | struct cgroup_subsys_state * |
3157 | css_rightmost_descendant(struct cgroup_subsys_state *pos) | ||
3145 | { | 3158 | { |
3146 | struct cgroup *last, *tmp; | 3159 | struct cgroup_subsys_state *last, *tmp; |
3147 | 3160 | ||
3148 | WARN_ON_ONCE(!rcu_read_lock_held()); | 3161 | WARN_ON_ONCE(!rcu_read_lock_held()); |
3149 | 3162 | ||
@@ -3151,62 +3164,64 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos) | |||
3151 | last = pos; | 3164 | last = pos; |
3152 | /* ->prev isn't RCU safe, walk ->next till the end */ | 3165 | /* ->prev isn't RCU safe, walk ->next till the end */ |
3153 | pos = NULL; | 3166 | pos = NULL; |
3154 | cgroup_for_each_child(tmp, last) | 3167 | css_for_each_child(tmp, last) |
3155 | pos = tmp; | 3168 | pos = tmp; |
3156 | } while (pos); | 3169 | } while (pos); |
3157 | 3170 | ||
3158 | return last; | 3171 | return last; |
3159 | } | 3172 | } |
3160 | EXPORT_SYMBOL_GPL(cgroup_rightmost_descendant); | 3173 | EXPORT_SYMBOL_GPL(css_rightmost_descendant); |
3161 | 3174 | ||
3162 | static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos) | 3175 | static struct cgroup_subsys_state * |
3176 | css_leftmost_descendant(struct cgroup_subsys_state *pos) | ||
3163 | { | 3177 | { |
3164 | struct cgroup *last; | 3178 | struct cgroup_subsys_state *last; |
3165 | 3179 | ||
3166 | do { | 3180 | do { |
3167 | last = pos; | 3181 | last = pos; |
3168 | pos = cgroup_next_child(NULL, pos); | 3182 | pos = css_next_child(NULL, pos); |
3169 | } while (pos); | 3183 | } while (pos); |
3170 | 3184 | ||
3171 | return last; | 3185 | return last; |
3172 | } | 3186 | } |
3173 | 3187 | ||
3174 | /** | 3188 | /** |
3175 | * cgroup_next_descendant_post - find the next descendant for post-order walk | 3189 | * css_next_descendant_post - find the next descendant for post-order walk |
3176 | * @pos: the current position (%NULL to initiate traversal) | 3190 | * @pos: the current position (%NULL to initiate traversal) |
3177 | * @cgroup: cgroup whose descendants to walk | 3191 | * @root: css whose descendants to walk |
3178 | * | 3192 | * |
3179 | * To be used by cgroup_for_each_descendant_post(). Find the next | 3193 | * To be used by css_for_each_descendant_post(). Find the next descendant |
3180 | * descendant to visit for post-order traversal of @cgroup's descendants. | 3194 | * to visit for post-order traversal of @root's descendants. |
3181 | * | 3195 | * |
3182 | * While this function requires RCU read locking, it doesn't require the | 3196 | * While this function requires RCU read locking, it doesn't require the |
3183 | * whole traversal to be contained in a single RCU critical section. This | 3197 | * whole traversal to be contained in a single RCU critical section. This |
3184 | * function will return the correct next descendant as long as both @pos | 3198 | * function will return the correct next descendant as long as both @pos |
3185 | * and @cgroup are accessible and @pos is a descendant of @cgroup. | 3199 | * and @cgroup are accessible and @pos is a descendant of @cgroup. |
3186 | */ | 3200 | */ |
3187 | struct cgroup *cgroup_next_descendant_post(struct cgroup *pos, | 3201 | struct cgroup_subsys_state * |
3188 | struct cgroup *cgroup) | 3202 | css_next_descendant_post(struct cgroup_subsys_state *pos, |
3203 | struct cgroup_subsys_state *root) | ||
3189 | { | 3204 | { |
3190 | struct cgroup *next; | 3205 | struct cgroup_subsys_state *next; |
3191 | 3206 | ||
3192 | WARN_ON_ONCE(!rcu_read_lock_held()); | 3207 | WARN_ON_ONCE(!rcu_read_lock_held()); |
3193 | 3208 | ||
3194 | /* if first iteration, visit the leftmost descendant */ | 3209 | /* if first iteration, visit the leftmost descendant */ |
3195 | if (!pos) { | 3210 | if (!pos) { |
3196 | next = cgroup_leftmost_descendant(cgroup); | 3211 | next = css_leftmost_descendant(root); |
3197 | return next != cgroup ? next : NULL; | 3212 | return next != root ? next : NULL; |
3198 | } | 3213 | } |
3199 | 3214 | ||
3200 | /* if there's an unvisited sibling, visit its leftmost descendant */ | 3215 | /* if there's an unvisited sibling, visit its leftmost descendant */ |
3201 | next = cgroup_next_child(pos, pos->parent); | 3216 | next = css_next_child(pos, css_parent(pos)); |
3202 | if (next) | 3217 | if (next) |
3203 | return cgroup_leftmost_descendant(next); | 3218 | return css_leftmost_descendant(next); |
3204 | 3219 | ||
3205 | /* no sibling left, visit parent */ | 3220 | /* no sibling left, visit parent */ |
3206 | next = pos->parent; | 3221 | next = css_parent(pos); |
3207 | return next != cgroup ? next : NULL; | 3222 | return next != root ? next : NULL; |
3208 | } | 3223 | } |
3209 | EXPORT_SYMBOL_GPL(cgroup_next_descendant_post); | 3224 | EXPORT_SYMBOL_GPL(css_next_descendant_post); |
3210 | 3225 | ||
3211 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) | 3226 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) |
3212 | __acquires(css_set_lock) | 3227 | __acquires(css_set_lock) |
@@ -4549,9 +4564,9 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
4549 | /* | 4564 | /* |
4550 | * Mark @cgrp dead. This prevents further task migration and child | 4565 | * Mark @cgrp dead. This prevents further task migration and child |
4551 | * creation by disabling cgroup_lock_live_group(). Note that | 4566 | * creation by disabling cgroup_lock_live_group(). Note that |
4552 | * CGRP_DEAD assertion is depended upon by cgroup_next_child() to | 4567 | * CGRP_DEAD assertion is depended upon by css_next_child() to |
4553 | * resume iteration after dropping RCU read lock. See | 4568 | * resume iteration after dropping RCU read lock. See |
4554 | * cgroup_next_child() for details. | 4569 | * css_next_child() for details. |
4555 | */ | 4570 | */ |
4556 | set_bit(CGRP_DEAD, &cgrp->flags); | 4571 | set_bit(CGRP_DEAD, &cgrp->flags); |
4557 | 4572 | ||
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 19613ba51444..98ca48d9ceb4 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -50,11 +50,6 @@ static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) | |||
50 | return css ? container_of(css, struct freezer, css) : NULL; | 50 | return css ? container_of(css, struct freezer, css) : NULL; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline struct freezer *cgroup_freezer(struct cgroup *cgroup) | ||
54 | { | ||
55 | return css_freezer(cgroup_css(cgroup, freezer_subsys_id)); | ||
56 | } | ||
57 | |||
58 | static inline struct freezer *task_freezer(struct task_struct *task) | 53 | static inline struct freezer *task_freezer(struct task_struct *task) |
59 | { | 54 | { |
60 | return css_freezer(task_css(task, freezer_subsys_id)); | 55 | return css_freezer(task_css(task, freezer_subsys_id)); |
@@ -120,7 +115,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css) | |||
120 | /* | 115 | /* |
121 | * The following double locking and freezing state inheritance | 116 | * The following double locking and freezing state inheritance |
122 | * guarantee that @cgroup can never escape ancestors' freezing | 117 | * guarantee that @cgroup can never escape ancestors' freezing |
123 | * states. See cgroup_for_each_descendant_pre() for details. | 118 | * states. See css_for_each_descendant_pre() for details. |
124 | */ | 119 | */ |
125 | if (parent) | 120 | if (parent) |
126 | spin_lock_irq(&parent->lock); | 121 | spin_lock_irq(&parent->lock); |
@@ -262,7 +257,7 @@ out: | |||
262 | static void update_if_frozen(struct cgroup_subsys_state *css) | 257 | static void update_if_frozen(struct cgroup_subsys_state *css) |
263 | { | 258 | { |
264 | struct freezer *freezer = css_freezer(css); | 259 | struct freezer *freezer = css_freezer(css); |
265 | struct cgroup *pos; | 260 | struct cgroup_subsys_state *pos; |
266 | struct cgroup_iter it; | 261 | struct cgroup_iter it; |
267 | struct task_struct *task; | 262 | struct task_struct *task; |
268 | 263 | ||
@@ -275,8 +270,8 @@ static void update_if_frozen(struct cgroup_subsys_state *css) | |||
275 | goto out_unlock; | 270 | goto out_unlock; |
276 | 271 | ||
277 | /* are all (live) children frozen? */ | 272 | /* are all (live) children frozen? */ |
278 | cgroup_for_each_child(pos, css->cgroup) { | 273 | css_for_each_child(pos, css) { |
279 | struct freezer *child = cgroup_freezer(pos); | 274 | struct freezer *child = css_freezer(pos); |
280 | 275 | ||
281 | if ((child->state & CGROUP_FREEZER_ONLINE) && | 276 | if ((child->state & CGROUP_FREEZER_ONLINE) && |
282 | !(child->state & CGROUP_FROZEN)) | 277 | !(child->state & CGROUP_FROZEN)) |
@@ -309,13 +304,13 @@ out_unlock: | |||
309 | static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft, | 304 | static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft, |
310 | struct seq_file *m) | 305 | struct seq_file *m) |
311 | { | 306 | { |
312 | struct cgroup *pos; | 307 | struct cgroup_subsys_state *pos; |
313 | 308 | ||
314 | rcu_read_lock(); | 309 | rcu_read_lock(); |
315 | 310 | ||
316 | /* update states bottom-up */ | 311 | /* update states bottom-up */ |
317 | cgroup_for_each_descendant_post(pos, css->cgroup) | 312 | css_for_each_descendant_post(pos, css) |
318 | update_if_frozen(cgroup_css(pos, freezer_subsys_id)); | 313 | update_if_frozen(pos); |
319 | update_if_frozen(css); | 314 | update_if_frozen(css); |
320 | 315 | ||
321 | rcu_read_unlock(); | 316 | rcu_read_unlock(); |
@@ -396,7 +391,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze, | |||
396 | */ | 391 | */ |
397 | static void freezer_change_state(struct freezer *freezer, bool freeze) | 392 | static void freezer_change_state(struct freezer *freezer, bool freeze) |
398 | { | 393 | { |
399 | struct cgroup *pos; | 394 | struct cgroup_subsys_state *pos; |
400 | 395 | ||
401 | /* update @freezer */ | 396 | /* update @freezer */ |
402 | spin_lock_irq(&freezer->lock); | 397 | spin_lock_irq(&freezer->lock); |
@@ -409,8 +404,8 @@ static void freezer_change_state(struct freezer *freezer, bool freeze) | |||
409 | * CGROUP_FREEZING_PARENT. | 404 | * CGROUP_FREEZING_PARENT. |
410 | */ | 405 | */ |
411 | rcu_read_lock(); | 406 | rcu_read_lock(); |
412 | cgroup_for_each_descendant_pre(pos, freezer->css.cgroup) { | 407 | css_for_each_descendant_pre(pos, &freezer->css) { |
413 | struct freezer *pos_f = cgroup_freezer(pos); | 408 | struct freezer *pos_f = css_freezer(pos); |
414 | struct freezer *parent = parent_freezer(pos_f); | 409 | struct freezer *parent = parent_freezer(pos_f); |
415 | 410 | ||
416 | /* | 411 | /* |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 89b76e1d3aa1..be4f5036ea5e 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -210,29 +210,29 @@ static struct cpuset top_cpuset = { | |||
210 | /** | 210 | /** |
211 | * cpuset_for_each_child - traverse online children of a cpuset | 211 | * cpuset_for_each_child - traverse online children of a cpuset |
212 | * @child_cs: loop cursor pointing to the current child | 212 | * @child_cs: loop cursor pointing to the current child |
213 | * @pos_cgrp: used for iteration | 213 | * @pos_css: used for iteration |
214 | * @parent_cs: target cpuset to walk children of | 214 | * @parent_cs: target cpuset to walk children of |
215 | * | 215 | * |
216 | * Walk @child_cs through the online children of @parent_cs. Must be used | 216 | * Walk @child_cs through the online children of @parent_cs. Must be used |
217 | * with RCU read locked. | 217 | * with RCU read locked. |
218 | */ | 218 | */ |
219 | #define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs) \ | 219 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
220 | cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup) \ | 220 | css_for_each_child((pos_css), &(parent_cs)->css) \ |
221 | if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp))))) | 221 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) |
222 | 222 | ||
223 | /** | 223 | /** |
224 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants | 224 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants |
225 | * @des_cs: loop cursor pointing to the current descendant | 225 | * @des_cs: loop cursor pointing to the current descendant |
226 | * @pos_cgrp: used for iteration | 226 | * @pos_css: used for iteration |
227 | * @root_cs: target cpuset to walk ancestor of | 227 | * @root_cs: target cpuset to walk ancestor of |
228 | * | 228 | * |
229 | * Walk @des_cs through the online descendants of @root_cs. Must be used | 229 | * Walk @des_cs through the online descendants of @root_cs. Must be used |
230 | * with RCU read locked. The caller may modify @pos_cgrp by calling | 230 | * with RCU read locked. The caller may modify @pos_css by calling |
231 | * cgroup_rightmost_descendant() to skip subtree. | 231 | * css_rightmost_descendant() to skip subtree. |
232 | */ | 232 | */ |
233 | #define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs) \ | 233 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
234 | cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \ | 234 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ |
235 | if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp))))) | 235 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) |
236 | 236 | ||
237 | /* | 237 | /* |
238 | * There are two global mutexes guarding cpuset structures - cpuset_mutex | 238 | * There are two global mutexes guarding cpuset structures - cpuset_mutex |
@@ -430,7 +430,7 @@ static void free_trial_cpuset(struct cpuset *trial) | |||
430 | 430 | ||
431 | static int validate_change(struct cpuset *cur, struct cpuset *trial) | 431 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
432 | { | 432 | { |
433 | struct cgroup *cgrp; | 433 | struct cgroup_subsys_state *css; |
434 | struct cpuset *c, *par; | 434 | struct cpuset *c, *par; |
435 | int ret; | 435 | int ret; |
436 | 436 | ||
@@ -438,7 +438,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) | |||
438 | 438 | ||
439 | /* Each of our child cpusets must be a subset of us */ | 439 | /* Each of our child cpusets must be a subset of us */ |
440 | ret = -EBUSY; | 440 | ret = -EBUSY; |
441 | cpuset_for_each_child(c, cgrp, cur) | 441 | cpuset_for_each_child(c, css, cur) |
442 | if (!is_cpuset_subset(c, trial)) | 442 | if (!is_cpuset_subset(c, trial)) |
443 | goto out; | 443 | goto out; |
444 | 444 | ||
@@ -459,7 +459,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) | |||
459 | * overlap | 459 | * overlap |
460 | */ | 460 | */ |
461 | ret = -EINVAL; | 461 | ret = -EINVAL; |
462 | cpuset_for_each_child(c, cgrp, par) { | 462 | cpuset_for_each_child(c, css, par) { |
463 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && | 463 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
464 | c != cur && | 464 | c != cur && |
465 | cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) | 465 | cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) |
@@ -508,13 +508,13 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, | |||
508 | struct cpuset *root_cs) | 508 | struct cpuset *root_cs) |
509 | { | 509 | { |
510 | struct cpuset *cp; | 510 | struct cpuset *cp; |
511 | struct cgroup *pos_cgrp; | 511 | struct cgroup_subsys_state *pos_css; |
512 | 512 | ||
513 | rcu_read_lock(); | 513 | rcu_read_lock(); |
514 | cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) { | 514 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
515 | /* skip the whole subtree if @cp doesn't have any CPU */ | 515 | /* skip the whole subtree if @cp doesn't have any CPU */ |
516 | if (cpumask_empty(cp->cpus_allowed)) { | 516 | if (cpumask_empty(cp->cpus_allowed)) { |
517 | pos_cgrp = cgroup_rightmost_descendant(pos_cgrp); | 517 | pos_css = css_rightmost_descendant(pos_css); |
518 | continue; | 518 | continue; |
519 | } | 519 | } |
520 | 520 | ||
@@ -589,7 +589,7 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
589 | struct sched_domain_attr *dattr; /* attributes for custom domains */ | 589 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
590 | int ndoms = 0; /* number of sched domains in result */ | 590 | int ndoms = 0; /* number of sched domains in result */ |
591 | int nslot; /* next empty doms[] struct cpumask slot */ | 591 | int nslot; /* next empty doms[] struct cpumask slot */ |
592 | struct cgroup *pos_cgrp; | 592 | struct cgroup_subsys_state *pos_css; |
593 | 593 | ||
594 | doms = NULL; | 594 | doms = NULL; |
595 | dattr = NULL; | 595 | dattr = NULL; |
@@ -618,7 +618,7 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
618 | csn = 0; | 618 | csn = 0; |
619 | 619 | ||
620 | rcu_read_lock(); | 620 | rcu_read_lock(); |
621 | cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) { | 621 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
622 | /* | 622 | /* |
623 | * Continue traversing beyond @cp iff @cp has some CPUs and | 623 | * Continue traversing beyond @cp iff @cp has some CPUs and |
624 | * isn't load balancing. The former is obvious. The | 624 | * isn't load balancing. The former is obvious. The |
@@ -635,7 +635,7 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
635 | csa[csn++] = cp; | 635 | csa[csn++] = cp; |
636 | 636 | ||
637 | /* skip @cp's subtree */ | 637 | /* skip @cp's subtree */ |
638 | pos_cgrp = cgroup_rightmost_descendant(pos_cgrp); | 638 | pos_css = css_rightmost_descendant(pos_css); |
639 | } | 639 | } |
640 | rcu_read_unlock(); | 640 | rcu_read_unlock(); |
641 | 641 | ||
@@ -886,16 +886,16 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, | |||
886 | bool update_root, struct ptr_heap *heap) | 886 | bool update_root, struct ptr_heap *heap) |
887 | { | 887 | { |
888 | struct cpuset *cp; | 888 | struct cpuset *cp; |
889 | struct cgroup *pos_cgrp; | 889 | struct cgroup_subsys_state *pos_css; |
890 | 890 | ||
891 | if (update_root) | 891 | if (update_root) |
892 | update_tasks_cpumask(root_cs, heap); | 892 | update_tasks_cpumask(root_cs, heap); |
893 | 893 | ||
894 | rcu_read_lock(); | 894 | rcu_read_lock(); |
895 | cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) { | 895 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
896 | /* skip the whole subtree if @cp have some CPU */ | 896 | /* skip the whole subtree if @cp have some CPU */ |
897 | if (!cpumask_empty(cp->cpus_allowed)) { | 897 | if (!cpumask_empty(cp->cpus_allowed)) { |
898 | pos_cgrp = cgroup_rightmost_descendant(pos_cgrp); | 898 | pos_css = css_rightmost_descendant(pos_css); |
899 | continue; | 899 | continue; |
900 | } | 900 | } |
901 | if (!css_tryget(&cp->css)) | 901 | if (!css_tryget(&cp->css)) |
@@ -1143,16 +1143,16 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs, | |||
1143 | bool update_root, struct ptr_heap *heap) | 1143 | bool update_root, struct ptr_heap *heap) |
1144 | { | 1144 | { |
1145 | struct cpuset *cp; | 1145 | struct cpuset *cp; |
1146 | struct cgroup *pos_cgrp; | 1146 | struct cgroup_subsys_state *pos_css; |
1147 | 1147 | ||
1148 | if (update_root) | 1148 | if (update_root) |
1149 | update_tasks_nodemask(root_cs, heap); | 1149 | update_tasks_nodemask(root_cs, heap); |
1150 | 1150 | ||
1151 | rcu_read_lock(); | 1151 | rcu_read_lock(); |
1152 | cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) { | 1152 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
1153 | /* skip the whole subtree if @cp have some CPU */ | 1153 | /* skip the whole subtree if @cp have some CPU */ |
1154 | if (!nodes_empty(cp->mems_allowed)) { | 1154 | if (!nodes_empty(cp->mems_allowed)) { |
1155 | pos_cgrp = cgroup_rightmost_descendant(pos_cgrp); | 1155 | pos_css = css_rightmost_descendant(pos_css); |
1156 | continue; | 1156 | continue; |
1157 | } | 1157 | } |
1158 | if (!css_tryget(&cp->css)) | 1158 | if (!css_tryget(&cp->css)) |
@@ -1973,7 +1973,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
1973 | struct cpuset *cs = css_cs(css); | 1973 | struct cpuset *cs = css_cs(css); |
1974 | struct cpuset *parent = parent_cs(cs); | 1974 | struct cpuset *parent = parent_cs(cs); |
1975 | struct cpuset *tmp_cs; | 1975 | struct cpuset *tmp_cs; |
1976 | struct cgroup *pos_cgrp; | 1976 | struct cgroup_subsys_state *pos_css; |
1977 | 1977 | ||
1978 | if (!parent) | 1978 | if (!parent) |
1979 | return 0; | 1979 | return 0; |
@@ -2005,7 +2005,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
2005 | * (and likewise for mems) to the new cgroup. | 2005 | * (and likewise for mems) to the new cgroup. |
2006 | */ | 2006 | */ |
2007 | rcu_read_lock(); | 2007 | rcu_read_lock(); |
2008 | cpuset_for_each_child(tmp_cs, pos_cgrp, parent) { | 2008 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
2009 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { | 2009 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
2010 | rcu_read_unlock(); | 2010 | rcu_read_unlock(); |
2011 | goto out_unlock; | 2011 | goto out_unlock; |
@@ -2252,10 +2252,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
2252 | /* if cpus or mems changed, we need to propagate to descendants */ | 2252 | /* if cpus or mems changed, we need to propagate to descendants */ |
2253 | if (cpus_updated || mems_updated) { | 2253 | if (cpus_updated || mems_updated) { |
2254 | struct cpuset *cs; | 2254 | struct cpuset *cs; |
2255 | struct cgroup *pos_cgrp; | 2255 | struct cgroup_subsys_state *pos_css; |
2256 | 2256 | ||
2257 | rcu_read_lock(); | 2257 | rcu_read_lock(); |
2258 | cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset) { | 2258 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
2259 | if (!css_tryget(&cs->css)) | 2259 | if (!css_tryget(&cs->css)) |
2260 | continue; | 2260 | continue; |
2261 | rcu_read_unlock(); | 2261 | rcu_read_unlock(); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ab64dfc84f8c..2285319e23a9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1082,7 +1082,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) | |||
1082 | static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, | 1082 | static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, |
1083 | struct mem_cgroup *last_visited) | 1083 | struct mem_cgroup *last_visited) |
1084 | { | 1084 | { |
1085 | struct cgroup *prev_cgroup, *next_cgroup; | 1085 | struct cgroup_subsys_state *prev_css, *next_css; |
1086 | 1086 | ||
1087 | /* | 1087 | /* |
1088 | * Root is not visited by cgroup iterators so it needs an | 1088 | * Root is not visited by cgroup iterators so it needs an |
@@ -1091,11 +1091,9 @@ static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, | |||
1091 | if (!last_visited) | 1091 | if (!last_visited) |
1092 | return root; | 1092 | return root; |
1093 | 1093 | ||
1094 | prev_cgroup = (last_visited == root) ? NULL | 1094 | prev_css = (last_visited == root) ? NULL : &last_visited->css; |
1095 | : last_visited->css.cgroup; | ||
1096 | skip_node: | 1095 | skip_node: |
1097 | next_cgroup = cgroup_next_descendant_pre( | 1096 | next_css = css_next_descendant_pre(prev_css, &root->css); |
1098 | prev_cgroup, root->css.cgroup); | ||
1099 | 1097 | ||
1100 | /* | 1098 | /* |
1101 | * Even if we found a group we have to make sure it is | 1099 | * Even if we found a group we have to make sure it is |
@@ -1104,13 +1102,13 @@ skip_node: | |||
1104 | * last_visited css is safe to use because it is | 1102 | * last_visited css is safe to use because it is |
1105 | * protected by css_get and the tree walk is rcu safe. | 1103 | * protected by css_get and the tree walk is rcu safe. |
1106 | */ | 1104 | */ |
1107 | if (next_cgroup) { | 1105 | if (next_css) { |
1108 | struct mem_cgroup *mem = mem_cgroup_from_cont( | 1106 | struct mem_cgroup *mem = mem_cgroup_from_css(next_css); |
1109 | next_cgroup); | 1107 | |
1110 | if (css_tryget(&mem->css)) | 1108 | if (css_tryget(&mem->css)) |
1111 | return mem; | 1109 | return mem; |
1112 | else { | 1110 | else { |
1113 | prev_cgroup = next_cgroup; | 1111 | prev_css = next_css; |
1114 | goto skip_node; | 1112 | goto skip_node; |
1115 | } | 1113 | } |
1116 | } | 1114 | } |
@@ -4939,10 +4937,10 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) | |||
4939 | */ | 4937 | */ |
4940 | static inline bool __memcg_has_children(struct mem_cgroup *memcg) | 4938 | static inline bool __memcg_has_children(struct mem_cgroup *memcg) |
4941 | { | 4939 | { |
4942 | struct cgroup *pos; | 4940 | struct cgroup_subsys_state *pos; |
4943 | 4941 | ||
4944 | /* bounce at first found */ | 4942 | /* bounce at first found */ |
4945 | cgroup_for_each_child(pos, memcg->css.cgroup) | 4943 | css_for_each_child(pos, &memcg->css) |
4946 | return true; | 4944 | return true; |
4947 | return false; | 4945 | return false; |
4948 | } | 4946 | } |
diff --git a/security/device_cgroup.c b/security/device_cgroup.c index e0ca464fa854..9bf230aa28b0 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c | |||
@@ -56,11 +56,6 @@ static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) | |||
56 | return s ? container_of(s, struct dev_cgroup, css) : NULL; | 56 | return s ? container_of(s, struct dev_cgroup, css) : NULL; |
57 | } | 57 | } |
58 | 58 | ||
59 | static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup) | ||
60 | { | ||
61 | return css_to_devcgroup(cgroup_css(cgroup, devices_subsys_id)); | ||
62 | } | ||
63 | |||
64 | static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) | 59 | static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) |
65 | { | 60 | { |
66 | return css_to_devcgroup(task_css(task, devices_subsys_id)); | 61 | return css_to_devcgroup(task_css(task, devices_subsys_id)); |
@@ -447,13 +442,13 @@ static void revalidate_active_exceptions(struct dev_cgroup *devcg) | |||
447 | static int propagate_exception(struct dev_cgroup *devcg_root, | 442 | static int propagate_exception(struct dev_cgroup *devcg_root, |
448 | struct dev_exception_item *ex) | 443 | struct dev_exception_item *ex) |
449 | { | 444 | { |
450 | struct cgroup *root = devcg_root->css.cgroup, *pos; | 445 | struct cgroup_subsys_state *pos; |
451 | int rc = 0; | 446 | int rc = 0; |
452 | 447 | ||
453 | rcu_read_lock(); | 448 | rcu_read_lock(); |
454 | 449 | ||
455 | cgroup_for_each_descendant_pre(pos, root) { | 450 | css_for_each_descendant_pre(pos, &devcg_root->css) { |
456 | struct dev_cgroup *devcg = cgroup_to_devcgroup(pos); | 451 | struct dev_cgroup *devcg = css_to_devcgroup(pos); |
457 | 452 | ||
458 | /* | 453 | /* |
459 | * Because devcgroup_mutex is held, no devcg will become | 454 | * Because devcgroup_mutex is held, no devcg will become |