aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/cgroup.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-23 21:55:38 -0400
committerTejun Heo <tj@kernel.org>2013-05-23 21:55:38 -0400
commit75501a6d59e989e5c286716e5b3b66ace4660e83 (patch)
tree087af4f93bba2257ae139c5e4a0f3b850954ed81 /include/linux/cgroup.h
parent53fa5261747a90746531e8a1c81eeb78fedc2f71 (diff)
cgroup: update iterators to use cgroup_next_sibling()
This patch converts cgroup_for_each_child(), cgroup_next_descendant_pre/post() and thus cgroup_for_each_descendant_pre/post() to use cgroup_next_sibling() instead of manually dereferencing ->sibling.next. The only reason the iterators couldn't allow dropping RCU read lock while iteration is in progress was because they couldn't determine the next sibling safely once RCU read lock is dropped. Using cgroup_next_sibling() removes that problem and enables all iterators to allow dropping RCU read lock in the middle. Comments are updated accordingly. This makes the iterators easier to use and will simplify controllers. Note that @cgroup argument is renamed to @cgrp in cgroup_for_each_child() because it conflicts with "struct cgroup" used in the new macro body. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz>
Diffstat (limited to 'include/linux/cgroup.h')
-rw-r--r--include/linux/cgroup.h18
1 files changed, 14 insertions, 4 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ee041a01a67e..d0ad3794b947 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -688,9 +688,9 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos);
688/** 688/**
689 * cgroup_for_each_child - iterate through children of a cgroup 689 * cgroup_for_each_child - iterate through children of a cgroup
690 * @pos: the cgroup * to use as the loop cursor 690 * @pos: the cgroup * to use as the loop cursor
691 * @cgroup: cgroup whose children to walk 691 * @cgrp: cgroup whose children to walk
692 * 692 *
693 * Walk @cgroup's children. Must be called under rcu_read_lock(). A child 693 * Walk @cgrp's children. Must be called under rcu_read_lock(). A child
694 * cgroup which hasn't finished ->css_online() or already has finished 694 * cgroup which hasn't finished ->css_online() or already has finished
695 * ->css_offline() may show up during traversal and it's each subsystem's 695 * ->css_offline() may show up during traversal and it's each subsystem's
696 * responsibility to verify that each @pos is alive. 696 * responsibility to verify that each @pos is alive.
@@ -698,9 +698,15 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos);
698 * If a subsystem synchronizes against the parent in its ->css_online() and 698 * If a subsystem synchronizes against the parent in its ->css_online() and
699 * before starting iterating, a cgroup which finished ->css_online() is 699 * before starting iterating, a cgroup which finished ->css_online() is
700 * guaranteed to be visible in the future iterations. 700 * guaranteed to be visible in the future iterations.
701 *
702 * It is allowed to temporarily drop RCU read lock during iteration. The
703 * caller is responsible for ensuring that @pos remains accessible until
704 * the start of the next iteration by, for example, bumping the css refcnt.
701 */ 705 */
702#define cgroup_for_each_child(pos, cgroup) \ 706#define cgroup_for_each_child(pos, cgrp) \
703 list_for_each_entry_rcu(pos, &(cgroup)->children, sibling) 707 for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \
708 struct cgroup, sibling); \
709 (pos); (pos) = cgroup_next_sibling((pos)))
704 710
705struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos, 711struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
706 struct cgroup *cgroup); 712 struct cgroup *cgroup);
@@ -759,6 +765,10 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
759 * Alternatively, a subsystem may choose to use a single global lock to 765 * Alternatively, a subsystem may choose to use a single global lock to
760 * synchronize ->css_online() and ->css_offline() against tree-walking 766 * synchronize ->css_online() and ->css_offline() against tree-walking
761 * operations. 767 * operations.
768 *
769 * It is allowed to temporarily drop RCU read lock during iteration. The
770 * caller is responsible for ensuring that @pos remains accessible until
771 * the start of the next iteration by, for example, bumping the css refcnt.
762 */ 772 */
763#define cgroup_for_each_descendant_pre(pos, cgroup) \ 773#define cgroup_for_each_descendant_pre(pos, cgroup) \
764 for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \ 774 for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \