summaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-08-08 20:11:23 -0400
committerTejun Heo <tj@kernel.org>2013-08-08 20:11:23 -0400
commiteb95419b023abacb415e2a18fea899023ce7624d (patch)
tree705284469b67cbe440b86c6cb81e1cf27648eba9 /kernel/cpuset.c
parent6387698699afd72d6304566fb6ccf84bffe07c56 (diff)
cgroup: pass around cgroup_subsys_state instead of cgroup in subsystem methods
cgroup is currently in the process of transitioning to using struct cgroup_subsys_state * as the primary handle instead of struct cgroup * in subsystem implementations for the following reasons. * With unified hierarchy, subsystems will be dynamically bound and unbound from cgroups and thus css's (cgroup_subsys_state) may be created and destroyed dynamically over the lifetime of a cgroup, which is different from the current state where all css's are allocated and destroyed together with the associated cgroup. This in turn means that cgroup_css() should be synchronized and may return NULL, making it more cumbersome to use. * Differing levels of per-subsystem granularity in the unified hierarchy means that the task and descendant iterators should behave differently depending on the specific subsystem the iteration is being performed for. * In majority of the cases, subsystems only care about its part in the cgroup hierarchy - ie. the hierarchy of css's. Subsystem methods often obtain the matching css pointer from the cgroup and don't bother with the cgroup pointer itself. Passing around css fits much better. This patch converts all cgroup_subsys methods to take @css instead of @cgroup. The conversions are mostly straight-forward. A few noteworthy changes are * ->css_alloc() now takes css of the parent cgroup rather than the pointer to the new cgroup as the css for the new cgroup doesn't exist yet. Knowing the parent css is enough for all the existing subsystems. * In kernel/cgroup.c::offline_css(), unnecessary open coded css dereference is replaced with local variable access. This patch shouldn't cause any behavior differences. v2: Unnecessary explicit cgrp->subsys[] deref in css_online() replaced with local variable @css as suggested by Li Zefan. Rebased on top of new for-3.12 which includes for-3.11-fixes so that ->css_free() invocation added by da0a12caff ("cgroup: fix a leak when percpu_ref_init() fails") is converted too. Suggested by Li Zefan. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Vivek Goyal <vgoyal@redhat.com> Acked-by: Aristeu Rozanski <aris@redhat.com> Acked-by: Daniel Wagner <daniel.wagner@bmw-carit.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Matt Helsley <matthltc@us.ibm.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c39
1 files changed, 21 insertions, 18 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 259a4af37e69..8ce3fdc3dfcc 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1455,9 +1455,10 @@ static int fmeter_getrate(struct fmeter *fmp)
1455} 1455}
1456 1456
1457/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 1457/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1458static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 1458static int cpuset_can_attach(struct cgroup_subsys_state *css,
1459 struct cgroup_taskset *tset)
1459{ 1460{
1460 struct cpuset *cs = cgroup_cs(cgrp); 1461 struct cpuset *cs = css_cs(css);
1461 struct task_struct *task; 1462 struct task_struct *task;
1462 int ret; 1463 int ret;
1463 1464
@@ -1468,11 +1469,11 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1468 * flag is set. 1469 * flag is set.
1469 */ 1470 */
1470 ret = -ENOSPC; 1471 ret = -ENOSPC;
1471 if (!cgroup_sane_behavior(cgrp) && 1472 if (!cgroup_sane_behavior(css->cgroup) &&
1472 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) 1473 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1473 goto out_unlock; 1474 goto out_unlock;
1474 1475
1475 cgroup_taskset_for_each(task, cgrp, tset) { 1476 cgroup_taskset_for_each(task, css->cgroup, tset) {
1476 /* 1477 /*
1477 * Kthreads which disallow setaffinity shouldn't be moved 1478 * Kthreads which disallow setaffinity shouldn't be moved
1478 * to a new cpuset; we don't want to change their cpu 1479 * to a new cpuset; we don't want to change their cpu
@@ -1501,11 +1502,11 @@ out_unlock:
1501 return ret; 1502 return ret;
1502} 1503}
1503 1504
1504static void cpuset_cancel_attach(struct cgroup *cgrp, 1505static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
1505 struct cgroup_taskset *tset) 1506 struct cgroup_taskset *tset)
1506{ 1507{
1507 mutex_lock(&cpuset_mutex); 1508 mutex_lock(&cpuset_mutex);
1508 cgroup_cs(cgrp)->attach_in_progress--; 1509 css_cs(css)->attach_in_progress--;
1509 mutex_unlock(&cpuset_mutex); 1510 mutex_unlock(&cpuset_mutex);
1510} 1511}
1511 1512
@@ -1516,7 +1517,8 @@ static void cpuset_cancel_attach(struct cgroup *cgrp,
1516 */ 1517 */
1517static cpumask_var_t cpus_attach; 1518static cpumask_var_t cpus_attach;
1518 1519
1519static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 1520static void cpuset_attach(struct cgroup_subsys_state *css,
1521 struct cgroup_taskset *tset)
1520{ 1522{
1521 /* static buf protected by cpuset_mutex */ 1523 /* static buf protected by cpuset_mutex */
1522 static nodemask_t cpuset_attach_nodemask_to; 1524 static nodemask_t cpuset_attach_nodemask_to;
@@ -1524,7 +1526,7 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1524 struct task_struct *task; 1526 struct task_struct *task;
1525 struct task_struct *leader = cgroup_taskset_first(tset); 1527 struct task_struct *leader = cgroup_taskset_first(tset);
1526 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset); 1528 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1527 struct cpuset *cs = cgroup_cs(cgrp); 1529 struct cpuset *cs = css_cs(css);
1528 struct cpuset *oldcs = cgroup_cs(oldcgrp); 1530 struct cpuset *oldcs = cgroup_cs(oldcgrp);
1529 struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); 1531 struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
1530 struct cpuset *mems_cs = effective_nodemask_cpuset(cs); 1532 struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
@@ -1539,7 +1541,7 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1539 1541
1540 guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); 1542 guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
1541 1543
1542 cgroup_taskset_for_each(task, cgrp, tset) { 1544 cgroup_taskset_for_each(task, css->cgroup, tset) {
1543 /* 1545 /*
1544 * can_attach beforehand should guarantee that this doesn't 1546 * can_attach beforehand should guarantee that this doesn't
1545 * fail. TODO: have a better way to handle failure here 1547 * fail. TODO: have a better way to handle failure here
@@ -1940,11 +1942,12 @@ static struct cftype files[] = {
1940 * cgrp: control group that the new cpuset will be part of 1942 * cgrp: control group that the new cpuset will be part of
1941 */ 1943 */
1942 1944
1943static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp) 1945static struct cgroup_subsys_state *
1946cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
1944{ 1947{
1945 struct cpuset *cs; 1948 struct cpuset *cs;
1946 1949
1947 if (!cgrp->parent) 1950 if (!parent_css)
1948 return &top_cpuset.css; 1951 return &top_cpuset.css;
1949 1952
1950 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 1953 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
@@ -1964,9 +1967,9 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
1964 return &cs->css; 1967 return &cs->css;
1965} 1968}
1966 1969
1967static int cpuset_css_online(struct cgroup *cgrp) 1970static int cpuset_css_online(struct cgroup_subsys_state *css)
1968{ 1971{
1969 struct cpuset *cs = cgroup_cs(cgrp); 1972 struct cpuset *cs = css_cs(css);
1970 struct cpuset *parent = parent_cs(cs); 1973 struct cpuset *parent = parent_cs(cs);
1971 struct cpuset *tmp_cs; 1974 struct cpuset *tmp_cs;
1972 struct cgroup *pos_cgrp; 1975 struct cgroup *pos_cgrp;
@@ -1984,7 +1987,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
1984 1987
1985 number_of_cpusets++; 1988 number_of_cpusets++;
1986 1989
1987 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags)) 1990 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
1988 goto out_unlock; 1991 goto out_unlock;
1989 1992
1990 /* 1993 /*
@@ -2024,9 +2027,9 @@ out_unlock:
2024 * will call rebuild_sched_domains_locked(). 2027 * will call rebuild_sched_domains_locked().
2025 */ 2028 */
2026 2029
2027static void cpuset_css_offline(struct cgroup *cgrp) 2030static void cpuset_css_offline(struct cgroup_subsys_state *css)
2028{ 2031{
2029 struct cpuset *cs = cgroup_cs(cgrp); 2032 struct cpuset *cs = css_cs(css);
2030 2033
2031 mutex_lock(&cpuset_mutex); 2034 mutex_lock(&cpuset_mutex);
2032 2035
@@ -2039,9 +2042,9 @@ static void cpuset_css_offline(struct cgroup *cgrp)
2039 mutex_unlock(&cpuset_mutex); 2042 mutex_unlock(&cpuset_mutex);
2040} 2043}
2041 2044
2042static void cpuset_css_free(struct cgroup *cgrp) 2045static void cpuset_css_free(struct cgroup_subsys_state *css)
2043{ 2046{
2044 struct cpuset *cs = cgroup_cs(cgrp); 2047 struct cpuset *cs = css_cs(css);
2045 2048
2046 free_cpumask_var(cs->cpus_allowed); 2049 free_cpumask_var(cs->cpus_allowed);
2047 kfree(cs); 2050 kfree(cs);