diff options
author | Tejun Heo <tj@kernel.org> | 2013-08-08 20:11:27 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-08-08 20:11:27 -0400 |
commit | bd8815a6d802fc16a7a106e170593aa05dc17e72 (patch) | |
tree | 6be9383cac4c468fe77b3a598cdd1664dba4afb4 /kernel/cpuset.c | |
parent | 95109b627ba6a043c181fa5fa45d1c754dd44fbc (diff) |
cgroup: make css_for_each_descendant() and friends include the origin css in the iteration
Previously, all css descendant iterators didn't include the origin
(root of subtree) css in the iteration. The reasons were maintaining
consistency with css_for_each_child() and that at the time of
introduction more use cases needed skipping the origin anyway;
however, given that css_is_descendant() considers self to be a
descendant, omitting the origin css has become more confusing and
looking at the accumulated use cases rather clearly indicates that
including origin would result in simpler code overall.
While this is a change which can easily lead to subtle bugs, cgroup
API including the iterators has recently gone through major
restructuring and no out-of-tree changes will be applicable without
adjustments making this a relatively acceptable opportunity for this
type of change.
The conversions are mostly straight-forward. If the iteration block
had explicit origin handling before or after, it's moved inside the
iteration. If not, if (pos == origin) continue; is added. Some
conversions add extra reference get/put around origin handling by
consolidating origin handling and the rest. While the extra ref
operations aren't strictly necessary, this shouldn't cause any
noticeable difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Acked-by: Aristeu Rozanski <aris@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 42 |
1 files changed, 26 insertions, 16 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index bf69717325b4..72a0383f382f 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -222,7 +222,8 @@ static struct cpuset top_cpuset = { | |||
222 | * | 222 | * |
223 | * Walk @des_cs through the online descendants of @root_cs. Must be used | 223 | * Walk @des_cs through the online descendants of @root_cs. Must be used |
224 | * with RCU read locked. The caller may modify @pos_css by calling | 224 | * with RCU read locked. The caller may modify @pos_css by calling |
225 | * css_rightmost_descendant() to skip subtree. | 225 | * css_rightmost_descendant() to skip subtree. @root_cs is included in the |
226 | * iteration and the first node to be visited. | ||
226 | */ | 227 | */ |
227 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ | 228 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
228 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ | 229 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ |
@@ -506,6 +507,9 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, | |||
506 | 507 | ||
507 | rcu_read_lock(); | 508 | rcu_read_lock(); |
508 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { | 509 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
510 | if (cp == root_cs) | ||
511 | continue; | ||
512 | |||
509 | /* skip the whole subtree if @cp doesn't have any CPU */ | 513 | /* skip the whole subtree if @cp doesn't have any CPU */ |
510 | if (cpumask_empty(cp->cpus_allowed)) { | 514 | if (cpumask_empty(cp->cpus_allowed)) { |
511 | pos_css = css_rightmost_descendant(pos_css); | 515 | pos_css = css_rightmost_descendant(pos_css); |
@@ -613,6 +617,8 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
613 | 617 | ||
614 | rcu_read_lock(); | 618 | rcu_read_lock(); |
615 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { | 619 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
620 | if (cp == &top_cpuset) | ||
621 | continue; | ||
616 | /* | 622 | /* |
617 | * Continue traversing beyond @cp iff @cp has some CPUs and | 623 | * Continue traversing beyond @cp iff @cp has some CPUs and |
618 | * isn't load balancing. The former is obvious. The | 624 | * isn't load balancing. The former is obvious. The |
@@ -875,15 +881,17 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, | |||
875 | struct cpuset *cp; | 881 | struct cpuset *cp; |
876 | struct cgroup_subsys_state *pos_css; | 882 | struct cgroup_subsys_state *pos_css; |
877 | 883 | ||
878 | if (update_root) | ||
879 | update_tasks_cpumask(root_cs, heap); | ||
880 | |||
881 | rcu_read_lock(); | 884 | rcu_read_lock(); |
882 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { | 885 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
883 | /* skip the whole subtree if @cp have some CPU */ | 886 | if (cp == root_cs) { |
884 | if (!cpumask_empty(cp->cpus_allowed)) { | 887 | if (!update_root) |
885 | pos_css = css_rightmost_descendant(pos_css); | 888 | continue; |
886 | continue; | 889 | } else { |
890 | /* skip the whole subtree if @cp have some CPU */ | ||
891 | if (!cpumask_empty(cp->cpus_allowed)) { | ||
892 | pos_css = css_rightmost_descendant(pos_css); | ||
893 | continue; | ||
894 | } | ||
887 | } | 895 | } |
888 | if (!css_tryget(&cp->css)) | 896 | if (!css_tryget(&cp->css)) |
889 | continue; | 897 | continue; |
@@ -1130,15 +1138,17 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs, | |||
1130 | struct cpuset *cp; | 1138 | struct cpuset *cp; |
1131 | struct cgroup_subsys_state *pos_css; | 1139 | struct cgroup_subsys_state *pos_css; |
1132 | 1140 | ||
1133 | if (update_root) | ||
1134 | update_tasks_nodemask(root_cs, heap); | ||
1135 | |||
1136 | rcu_read_lock(); | 1141 | rcu_read_lock(); |
1137 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { | 1142 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
1138 | /* skip the whole subtree if @cp have some CPU */ | 1143 | if (cp == root_cs) { |
1139 | if (!nodes_empty(cp->mems_allowed)) { | 1144 | if (!update_root) |
1140 | pos_css = css_rightmost_descendant(pos_css); | 1145 | continue; |
1141 | continue; | 1146 | } else { |
1147 | /* skip the whole subtree if @cp have some CPU */ | ||
1148 | if (!nodes_empty(cp->mems_allowed)) { | ||
1149 | pos_css = css_rightmost_descendant(pos_css); | ||
1150 | continue; | ||
1151 | } | ||
1142 | } | 1152 | } |
1143 | if (!css_tryget(&cp->css)) | 1153 | if (!css_tryget(&cp->css)) |
1144 | continue; | 1154 | continue; |
@@ -2237,7 +2247,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
2237 | 2247 | ||
2238 | rcu_read_lock(); | 2248 | rcu_read_lock(); |
2239 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { | 2249 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
2240 | if (!css_tryget(&cs->css)) | 2250 | if (cs == &top_cpuset || !css_tryget(&cs->css)) |
2241 | continue; | 2251 | continue; |
2242 | rcu_read_unlock(); | 2252 | rcu_read_unlock(); |
2243 | 2253 | ||