aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-08-08 20:11:24 -0400
committerTejun Heo <tj@kernel.org>2013-08-08 20:11:24 -0400
commit3b287a505ef4024634beb12a93773254909d5dae (patch)
tree8cd2892ea52ec1f17a75c333daebd8d967d1f7c4
parent182446d087906de40e514573a92a97b203695f71 (diff)
cgroup: convert cgroup_next_sibling() to cgroup_next_child()
cgroup is transitioning to using css (cgroup_subsys_state) as the main subsys interface handle instead of cgroup and the iterators will be updated to use css too. The iterators need to walk the cgroup hierarchy and return the css's matching the origin css, which is a bit cumbersome to open code. This patch converts cgroup_next_sibling() to cgroup_next_child() so that it can handle all steps of direct child iteration. This will be used to update iterators to take @css instead of @cgrp. In addition to the new iteration init handling, cgroup_next_child() is restructured so that the different branches share the end of iteration condition check. This patch doesn't change any behavior. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--kernel/cgroup.c59
2 files changed, 32 insertions, 31 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 0b91436c68ef..5f9ba5881717 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -779,7 +779,7 @@ static inline struct cgroup *cgroup_from_id(struct cgroup_subsys *ss, int id)
779 return idr_find(&ss->root->cgroup_idr, id); 779 return idr_find(&ss->root->cgroup_idr, id);
780} 780}
781 781
782struct cgroup *cgroup_next_sibling(struct cgroup *pos); 782struct cgroup *cgroup_next_child(struct cgroup *pos, struct cgroup *cgrp);
783 783
784/** 784/**
785 * cgroup_for_each_child - iterate through children of a cgroup 785 * cgroup_for_each_child - iterate through children of a cgroup
@@ -802,7 +802,7 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos);
802#define cgroup_for_each_child(pos, cgrp) \ 802#define cgroup_for_each_child(pos, cgrp) \
803 for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \ 803 for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \
804 struct cgroup, sibling); \ 804 struct cgroup, sibling); \
805 (pos); (pos) = cgroup_next_sibling((pos))) 805 (pos); (pos) = cgroup_next_child((pos), (cgrp)))
806 806
807struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, 807struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
808 struct cgroup *cgroup); 808 struct cgroup *cgroup);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 6ee469837fda..dd55244952bd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -3037,15 +3037,16 @@ static void cgroup_enable_task_cg_lists(void)
3037} 3037}
3038 3038
3039/** 3039/**
3040 * cgroup_next_sibling - find the next sibling of a given cgroup 3040 * cgroup_next_child - find the next child of a given cgroup
3041 * @pos: the current cgroup 3041 * @pos: the current position (%NULL to initiate traversal)
3042 * @cgrp: cgroup whose descendants to walk
3042 * 3043 *
3043 * This function returns the next sibling of @pos and should be called 3044 * This function returns the next child of @cgrp and should be called under
3044 * under RCU read lock. The only requirement is that @pos is accessible. 3045 * RCU read lock. The only requirement is that @cgrp and @pos are
3045 * The next sibling is guaranteed to be returned regardless of @pos's 3046 * accessible. The next sibling is guaranteed to be returned regardless of
3046 * state. 3047 * their states.
3047 */ 3048 */
3048struct cgroup *cgroup_next_sibling(struct cgroup *pos) 3049struct cgroup *cgroup_next_child(struct cgroup *pos, struct cgroup *cgrp)
3049{ 3050{
3050 struct cgroup *next; 3051 struct cgroup *next;
3051 3052
@@ -3061,30 +3062,30 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos)
3061 * safe to dereference from this RCU critical section. If 3062 * safe to dereference from this RCU critical section. If
3062 * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed 3063 * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
3063 * to be visible as %true here. 3064 * to be visible as %true here.
3065 *
3066 * If @pos is dead, its next pointer can't be dereferenced;
3067 * however, as each cgroup is given a monotonically increasing
3068 * unique serial number and always appended to the sibling list,
3069 * the next one can be found by walking the parent's children until
3070 * we see a cgroup with higher serial number than @pos's. While
3071 * this path can be slower, it's taken only when either the current
3072 * cgroup is removed or iteration and removal race.
3064 */ 3073 */
3065 if (likely(!cgroup_is_dead(pos))) { 3074 if (!pos) {
3075 next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling);
3076 } else if (likely(!cgroup_is_dead(pos))) {
3066 next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling); 3077 next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
3067 if (&next->sibling != &pos->parent->children) 3078 } else {
3068 return next; 3079 list_for_each_entry_rcu(next, &cgrp->children, sibling)
3069 return NULL; 3080 if (next->serial_nr > pos->serial_nr)
3081 break;
3070 } 3082 }
3071 3083
3072 /* 3084 if (&next->sibling != &cgrp->children)
3073 * Can't dereference the next pointer. Each cgroup is given a 3085 return next;
3074 * monotonically increasing unique serial number and always
3075 * appended to the sibling list, so the next one can be found by
3076 * walking the parent's children until we see a cgroup with higher
3077 * serial number than @pos's.
3078 *
3079 * While this path can be slow, it's taken only when either the
3080 * current cgroup is removed or iteration and removal race.
3081 */
3082 list_for_each_entry_rcu(next, &pos->parent->children, sibling)
3083 if (next->serial_nr > pos->serial_nr)
3084 return next;
3085 return NULL; 3086 return NULL;
3086} 3087}
3087EXPORT_SYMBOL_GPL(cgroup_next_sibling); 3088EXPORT_SYMBOL_GPL(cgroup_next_child);
3088 3089
3089/** 3090/**
3090 * cgroup_next_descendant_pre - find the next descendant for pre-order walk 3091 * cgroup_next_descendant_pre - find the next descendant for pre-order walk
@@ -3117,7 +3118,7 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
3117 3118
3118 /* no child, visit my or the closest ancestor's next sibling */ 3119 /* no child, visit my or the closest ancestor's next sibling */
3119 while (pos != cgroup) { 3120 while (pos != cgroup) {
3120 next = cgroup_next_sibling(pos); 3121 next = cgroup_next_child(pos, pos->parent);
3121 if (next) 3122 if (next)
3122 return next; 3123 return next;
3123 pos = pos->parent; 3124 pos = pos->parent;
@@ -3198,7 +3199,7 @@ struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
3198 } 3199 }
3199 3200
3200 /* if there's an unvisited sibling, visit its leftmost descendant */ 3201 /* if there's an unvisited sibling, visit its leftmost descendant */
3201 next = cgroup_next_sibling(pos); 3202 next = cgroup_next_child(pos, pos->parent);
3202 if (next) 3203 if (next)
3203 return cgroup_leftmost_descendant(next); 3204 return cgroup_leftmost_descendant(next);
3204 3205
@@ -4549,9 +4550,9 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4549 /* 4550 /*
4550 * Mark @cgrp dead. This prevents further task migration and child 4551 * Mark @cgrp dead. This prevents further task migration and child
4551 * creation by disabling cgroup_lock_live_group(). Note that 4552 * creation by disabling cgroup_lock_live_group(). Note that
4552 * CGRP_DEAD assertion is depended upon by cgroup_next_sibling() to 4553 * CGRP_DEAD assertion is depended upon by cgroup_next_child() to
4553 * resume iteration after dropping RCU read lock. See 4554 * resume iteration after dropping RCU read lock. See
4554 * cgroup_next_sibling() for details. 4555 * cgroup_next_child() for details.
4555 */ 4556 */
4556 set_bit(CGRP_DEAD, &cgrp->flags); 4557 set_bit(CGRP_DEAD, &cgrp->flags);
4557 4558