diff options
author | Tejun Heo <tj@kernel.org> | 2013-08-08 20:11:25 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-08-08 20:11:25 -0400 |
commit | 492eb21b98f88e411a8bb43d6edcd7d7022add10 (patch) | |
tree | da06df9485fd607762fdec06169f7d9f601e3cf6 /kernel/cpuset.c | |
parent | f48e3924dca268c677c4e338e5d91ad9e6fe6b9e (diff) |
cgroup: make hierarchy iterators deal with cgroup_subsys_state instead of cgroup
cgroup is currently in the process of transitioning to using css
(cgroup_subsys_state) as the primary handle instead of cgroup in
subsystem API. For hierarchy iterators, this is beneficial because
* In most cases, css is the only thing subsystems care about anyway.
* On the planned unified hierarchy, iterations for different
subsystems will need to skip over different subtrees of the
hierarchy depending on which subsystems are enabled on each cgroup.
Passing around css makes it unnecessary to explicitly specify the
subsystem in question as css is intersection between cgroup and
subsystem
* For the planned unified hierarchy, css's would need to be created
and destroyed dynamically independent from cgroup hierarchy. Having
cgroup core manage css iteration makes enforcing deref rules a lot
easier.
Most subsystem conversions are straight-forward. Noteworthy changes
are
* blkio: cgroup_to_blkcg() is no longer used. Removed.
* freezer: cgroup_freezer() is no longer used. Removed.
* devices: cgroup_to_devcgroup() is no longer used. Removed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Acked-by: Aristeu Rozanski <aris@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 89b76e1d3aa1..be4f5036ea5e 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -210,29 +210,29 @@ static struct cpuset top_cpuset = { | |||
210 | /** | 210 | /** |
211 | * cpuset_for_each_child - traverse online children of a cpuset | 211 | * cpuset_for_each_child - traverse online children of a cpuset |
212 | * @child_cs: loop cursor pointing to the current child | 212 | * @child_cs: loop cursor pointing to the current child |
213 | * @pos_cgrp: used for iteration | 213 | * @pos_css: used for iteration |
214 | * @parent_cs: target cpuset to walk children of | 214 | * @parent_cs: target cpuset to walk children of |
215 | * | 215 | * |
216 | * Walk @child_cs through the online children of @parent_cs. Must be used | 216 | * Walk @child_cs through the online children of @parent_cs. Must be used |
217 | * with RCU read locked. | 217 | * with RCU read locked. |
218 | */ | 218 | */ |
219 | #define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs) \ | 219 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
220 | cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup) \ | 220 | css_for_each_child((pos_css), &(parent_cs)->css) \ |
221 | if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp))))) | 221 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) |
222 | 222 | ||
223 | /** | 223 | /** |
224 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants | 224 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants |
225 | * @des_cs: loop cursor pointing to the current descendant | 225 | * @des_cs: loop cursor pointing to the current descendant |
226 | * @pos_cgrp: used for iteration | 226 | * @pos_css: used for iteration |
227 | * @root_cs: target cpuset to walk ancestor of | 227 | * @root_cs: target cpuset to walk ancestor of |
228 | * | 228 | * |
229 | * Walk @des_cs through the online descendants of @root_cs. Must be used | 229 | * Walk @des_cs through the online descendants of @root_cs. Must be used |
230 | * with RCU read locked. The caller may modify @pos_cgrp by calling | 230 | * with RCU read locked. The caller may modify @pos_css by calling |
231 | * cgroup_rightmost_descendant() to skip subtree. | 231 | * css_rightmost_descendant() to skip subtree. |
232 | */ | 232 | */ |
233 | #define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs) \ | 233 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
234 | cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \ | 234 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ |
235 | if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp))))) | 235 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) |
236 | 236 | ||
237 | /* | 237 | /* |
238 | * There are two global mutexes guarding cpuset structures - cpuset_mutex | 238 | * There are two global mutexes guarding cpuset structures - cpuset_mutex |
@@ -430,7 +430,7 @@ static void free_trial_cpuset(struct cpuset *trial) | |||
430 | 430 | ||
431 | static int validate_change(struct cpuset *cur, struct cpuset *trial) | 431 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
432 | { | 432 | { |
433 | struct cgroup *cgrp; | 433 | struct cgroup_subsys_state *css; |
434 | struct cpuset *c, *par; | 434 | struct cpuset *c, *par; |
435 | int ret; | 435 | int ret; |
436 | 436 | ||
@@ -438,7 +438,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) | |||
438 | 438 | ||
439 | /* Each of our child cpusets must be a subset of us */ | 439 | /* Each of our child cpusets must be a subset of us */ |
440 | ret = -EBUSY; | 440 | ret = -EBUSY; |
441 | cpuset_for_each_child(c, cgrp, cur) | 441 | cpuset_for_each_child(c, css, cur) |
442 | if (!is_cpuset_subset(c, trial)) | 442 | if (!is_cpuset_subset(c, trial)) |
443 | goto out; | 443 | goto out; |
444 | 444 | ||
@@ -459,7 +459,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) | |||
459 | * overlap | 459 | * overlap |
460 | */ | 460 | */ |
461 | ret = -EINVAL; | 461 | ret = -EINVAL; |
462 | cpuset_for_each_child(c, cgrp, par) { | 462 | cpuset_for_each_child(c, css, par) { |
463 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && | 463 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
464 | c != cur && | 464 | c != cur && |
465 | cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) | 465 | cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) |
@@ -508,13 +508,13 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, | |||
508 | struct cpuset *root_cs) | 508 | struct cpuset *root_cs) |
509 | { | 509 | { |
510 | struct cpuset *cp; | 510 | struct cpuset *cp; |
511 | struct cgroup *pos_cgrp; | 511 | struct cgroup_subsys_state *pos_css; |
512 | 512 | ||
513 | rcu_read_lock(); | 513 | rcu_read_lock(); |
514 | cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) { | 514 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
515 | /* skip the whole subtree if @cp doesn't have any CPU */ | 515 | /* skip the whole subtree if @cp doesn't have any CPU */ |
516 | if (cpumask_empty(cp->cpus_allowed)) { | 516 | if (cpumask_empty(cp->cpus_allowed)) { |
517 | pos_cgrp = cgroup_rightmost_descendant(pos_cgrp); | 517 | pos_css = css_rightmost_descendant(pos_css); |
518 | continue; | 518 | continue; |
519 | } | 519 | } |
520 | 520 | ||
@@ -589,7 +589,7 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
589 | struct sched_domain_attr *dattr; /* attributes for custom domains */ | 589 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
590 | int ndoms = 0; /* number of sched domains in result */ | 590 | int ndoms = 0; /* number of sched domains in result */ |
591 | int nslot; /* next empty doms[] struct cpumask slot */ | 591 | int nslot; /* next empty doms[] struct cpumask slot */ |
592 | struct cgroup *pos_cgrp; | 592 | struct cgroup_subsys_state *pos_css; |
593 | 593 | ||
594 | doms = NULL; | 594 | doms = NULL; |
595 | dattr = NULL; | 595 | dattr = NULL; |
@@ -618,7 +618,7 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
618 | csn = 0; | 618 | csn = 0; |
619 | 619 | ||
620 | rcu_read_lock(); | 620 | rcu_read_lock(); |
621 | cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) { | 621 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
622 | /* | 622 | /* |
623 | * Continue traversing beyond @cp iff @cp has some CPUs and | 623 | * Continue traversing beyond @cp iff @cp has some CPUs and |
624 | * isn't load balancing. The former is obvious. The | 624 | * isn't load balancing. The former is obvious. The |
@@ -635,7 +635,7 @@ static int generate_sched_domains(cpumask_var_t **domains, | |||
635 | csa[csn++] = cp; | 635 | csa[csn++] = cp; |
636 | 636 | ||
637 | /* skip @cp's subtree */ | 637 | /* skip @cp's subtree */ |
638 | pos_cgrp = cgroup_rightmost_descendant(pos_cgrp); | 638 | pos_css = css_rightmost_descendant(pos_css); |
639 | } | 639 | } |
640 | rcu_read_unlock(); | 640 | rcu_read_unlock(); |
641 | 641 | ||
@@ -886,16 +886,16 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, | |||
886 | bool update_root, struct ptr_heap *heap) | 886 | bool update_root, struct ptr_heap *heap) |
887 | { | 887 | { |
888 | struct cpuset *cp; | 888 | struct cpuset *cp; |
889 | struct cgroup *pos_cgrp; | 889 | struct cgroup_subsys_state *pos_css; |
890 | 890 | ||
891 | if (update_root) | 891 | if (update_root) |
892 | update_tasks_cpumask(root_cs, heap); | 892 | update_tasks_cpumask(root_cs, heap); |
893 | 893 | ||
894 | rcu_read_lock(); | 894 | rcu_read_lock(); |
895 | cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) { | 895 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
896 | /* skip the whole subtree if @cp have some CPU */ | 896 | /* skip the whole subtree if @cp have some CPU */ |
897 | if (!cpumask_empty(cp->cpus_allowed)) { | 897 | if (!cpumask_empty(cp->cpus_allowed)) { |
898 | pos_cgrp = cgroup_rightmost_descendant(pos_cgrp); | 898 | pos_css = css_rightmost_descendant(pos_css); |
899 | continue; | 899 | continue; |
900 | } | 900 | } |
901 | if (!css_tryget(&cp->css)) | 901 | if (!css_tryget(&cp->css)) |
@@ -1143,16 +1143,16 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs, | |||
1143 | bool update_root, struct ptr_heap *heap) | 1143 | bool update_root, struct ptr_heap *heap) |
1144 | { | 1144 | { |
1145 | struct cpuset *cp; | 1145 | struct cpuset *cp; |
1146 | struct cgroup *pos_cgrp; | 1146 | struct cgroup_subsys_state *pos_css; |
1147 | 1147 | ||
1148 | if (update_root) | 1148 | if (update_root) |
1149 | update_tasks_nodemask(root_cs, heap); | 1149 | update_tasks_nodemask(root_cs, heap); |
1150 | 1150 | ||
1151 | rcu_read_lock(); | 1151 | rcu_read_lock(); |
1152 | cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) { | 1152 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
1153 | /* skip the whole subtree if @cp have some CPU */ | 1153 | /* skip the whole subtree if @cp have some CPU */ |
1154 | if (!nodes_empty(cp->mems_allowed)) { | 1154 | if (!nodes_empty(cp->mems_allowed)) { |
1155 | pos_cgrp = cgroup_rightmost_descendant(pos_cgrp); | 1155 | pos_css = css_rightmost_descendant(pos_css); |
1156 | continue; | 1156 | continue; |
1157 | } | 1157 | } |
1158 | if (!css_tryget(&cp->css)) | 1158 | if (!css_tryget(&cp->css)) |
@@ -1973,7 +1973,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
1973 | struct cpuset *cs = css_cs(css); | 1973 | struct cpuset *cs = css_cs(css); |
1974 | struct cpuset *parent = parent_cs(cs); | 1974 | struct cpuset *parent = parent_cs(cs); |
1975 | struct cpuset *tmp_cs; | 1975 | struct cpuset *tmp_cs; |
1976 | struct cgroup *pos_cgrp; | 1976 | struct cgroup_subsys_state *pos_css; |
1977 | 1977 | ||
1978 | if (!parent) | 1978 | if (!parent) |
1979 | return 0; | 1979 | return 0; |
@@ -2005,7 +2005,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
2005 | * (and likewise for mems) to the new cgroup. | 2005 | * (and likewise for mems) to the new cgroup. |
2006 | */ | 2006 | */ |
2007 | rcu_read_lock(); | 2007 | rcu_read_lock(); |
2008 | cpuset_for_each_child(tmp_cs, pos_cgrp, parent) { | 2008 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
2009 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { | 2009 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
2010 | rcu_read_unlock(); | 2010 | rcu_read_unlock(); |
2011 | goto out_unlock; | 2011 | goto out_unlock; |
@@ -2252,10 +2252,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work) | |||
2252 | /* if cpus or mems changed, we need to propagate to descendants */ | 2252 | /* if cpus or mems changed, we need to propagate to descendants */ |
2253 | if (cpus_updated || mems_updated) { | 2253 | if (cpus_updated || mems_updated) { |
2254 | struct cpuset *cs; | 2254 | struct cpuset *cs; |
2255 | struct cgroup *pos_cgrp; | 2255 | struct cgroup_subsys_state *pos_css; |
2256 | 2256 | ||
2257 | rcu_read_lock(); | 2257 | rcu_read_lock(); |
2258 | cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset) { | 2258 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
2259 | if (!css_tryget(&cs->css)) | 2259 | if (!css_tryget(&cs->css)) |
2260 | continue; | 2260 | continue; |
2261 | rcu_read_unlock(); | 2261 | rcu_read_unlock(); |