aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-08-08 20:11:23 -0400
committerTejun Heo <tj@kernel.org>2013-08-08 20:11:23 -0400
commiteb95419b023abacb415e2a18fea899023ce7624d (patch)
tree705284469b67cbe440b86c6cb81e1cf27648eba9 /kernel
parent6387698699afd72d6304566fb6ccf84bffe07c56 (diff)
cgroup: pass around cgroup_subsys_state instead of cgroup in subsystem methods
cgroup is currently in the process of transitioning to using struct cgroup_subsys_state * as the primary handle instead of struct cgroup * in subsystem implementations for the following reasons. * With unified hierarchy, subsystems will be dynamically bound and unbound from cgroups and thus css's (cgroup_subsys_state) may be created and destroyed dynamically over the lifetime of a cgroup, which is different from the current state where all css's are allocated and destroyed together with the associated cgroup. This in turn means that cgroup_css() should be synchronized and may return NULL, making it more cumbersome to use. * Differing levels of per-subsystem granularity in the unified hierarchy means that the task and descendant iterators should behave differently depending on the specific subsystem the iteration is being performed for. * In majority of the cases, subsystems only care about its part in the cgroup hierarchy - ie. the hierarchy of css's. Subsystem methods often obtain the matching css pointer from the cgroup and don't bother with the cgroup pointer itself. Passing around css fits much better. This patch converts all cgroup_subsys methods to take @css instead of @cgroup. The conversions are mostly straight-forward. A few noteworthy changes are * ->css_alloc() now takes css of the parent cgroup rather than the pointer to the new cgroup as the css for the new cgroup doesn't exist yet. Knowing the parent css is enough for all the existing subsystems. * In kernel/cgroup.c::offline_css(), unnecessary open coded css dereference is replaced with local variable access. This patch shouldn't cause any behavior differences. v2: Unnecessary explicit cgrp->subsys[] deref in css_online() replaced with local variable @css as suggested by Li Zefan. Rebased on top of new for-3.12 which includes for-3.11-fixes so that ->css_free() invocation added by da0a12caff ("cgroup: fix a leak when percpu_ref_init() fails") is converted too. Suggested by Li Zefan. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Vivek Goyal <vgoyal@redhat.com> Acked-by: Aristeu Rozanski <aris@redhat.com> Acked-by: Daniel Wagner <daniel.wagner@bmw-carit.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Matt Helsley <matthltc@us.ibm.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c57
-rw-r--r--kernel/cgroup_freezer.c40
-rw-r--r--kernel/cpuset.c39
-rw-r--r--kernel/events/core.c18
-rw-r--r--kernel/sched/core.c39
-rw-r--r--kernel/sched/cpuacct.c9
6 files changed, 111 insertions, 91 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 4234428f1014..271d9a5cde5f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -853,8 +853,11 @@ static void cgroup_free_fn(struct work_struct *work)
853 /* 853 /*
854 * Release the subsystem state objects. 854 * Release the subsystem state objects.
855 */ 855 */
856 for_each_root_subsys(cgrp->root, ss) 856 for_each_root_subsys(cgrp->root, ss) {
857 ss->css_free(cgrp); 857 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
858
859 ss->css_free(css);
860 }
858 861
859 cgrp->root->number_of_cgroups--; 862 cgrp->root->number_of_cgroups--;
860 mutex_unlock(&cgroup_mutex); 863 mutex_unlock(&cgroup_mutex);
@@ -1056,7 +1059,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
1056 list_move(&ss->sibling, &root->subsys_list); 1059 list_move(&ss->sibling, &root->subsys_list);
1057 ss->root = root; 1060 ss->root = root;
1058 if (ss->bind) 1061 if (ss->bind)
1059 ss->bind(cgrp); 1062 ss->bind(cgrp->subsys[i]);
1060 1063
1061 /* refcount was already taken, and we're keeping it */ 1064 /* refcount was already taken, and we're keeping it */
1062 root->subsys_mask |= bit; 1065 root->subsys_mask |= bit;
@@ -1066,7 +1069,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
1066 BUG_ON(cgrp->subsys[i]->cgroup != cgrp); 1069 BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
1067 1070
1068 if (ss->bind) 1071 if (ss->bind)
1069 ss->bind(cgroup_dummy_top); 1072 ss->bind(cgroup_dummy_top->subsys[i]);
1070 cgroup_dummy_top->subsys[i]->cgroup = cgroup_dummy_top; 1073 cgroup_dummy_top->subsys[i]->cgroup = cgroup_dummy_top;
1071 cgrp->subsys[i] = NULL; 1074 cgrp->subsys[i] = NULL;
1072 cgroup_subsys[i]->root = &cgroup_dummy_root; 1075 cgroup_subsys[i]->root = &cgroup_dummy_root;
@@ -2049,8 +2052,10 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2049 * step 1: check that we can legitimately attach to the cgroup. 2052 * step 1: check that we can legitimately attach to the cgroup.
2050 */ 2053 */
2051 for_each_root_subsys(root, ss) { 2054 for_each_root_subsys(root, ss) {
2055 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2056
2052 if (ss->can_attach) { 2057 if (ss->can_attach) {
2053 retval = ss->can_attach(cgrp, &tset); 2058 retval = ss->can_attach(css, &tset);
2054 if (retval) { 2059 if (retval) {
2055 failed_ss = ss; 2060 failed_ss = ss;
2056 goto out_cancel_attach; 2061 goto out_cancel_attach;
@@ -2089,8 +2094,10 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2089 * step 4: do subsystem attach callbacks. 2094 * step 4: do subsystem attach callbacks.
2090 */ 2095 */
2091 for_each_root_subsys(root, ss) { 2096 for_each_root_subsys(root, ss) {
2097 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2098
2092 if (ss->attach) 2099 if (ss->attach)
2093 ss->attach(cgrp, &tset); 2100 ss->attach(css, &tset);
2094 } 2101 }
2095 2102
2096 /* 2103 /*
@@ -2109,10 +2116,12 @@ out_put_css_set_refs:
2109out_cancel_attach: 2116out_cancel_attach:
2110 if (retval) { 2117 if (retval) {
2111 for_each_root_subsys(root, ss) { 2118 for_each_root_subsys(root, ss) {
2119 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2120
2112 if (ss == failed_ss) 2121 if (ss == failed_ss)
2113 break; 2122 break;
2114 if (ss->cancel_attach) 2123 if (ss->cancel_attach)
2115 ss->cancel_attach(cgrp, &tset); 2124 ss->cancel_attach(css, &tset);
2116 } 2125 }
2117 } 2126 }
2118out_free_group_list: 2127out_free_group_list:
@@ -4206,14 +4215,15 @@ static void init_cgroup_css(struct cgroup_subsys_state *css,
4206/* invoke ->css_online() on a new CSS and mark it online if successful */ 4215/* invoke ->css_online() on a new CSS and mark it online if successful */
4207static int online_css(struct cgroup_subsys *ss, struct cgroup *cgrp) 4216static int online_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
4208{ 4217{
4218 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
4209 int ret = 0; 4219 int ret = 0;
4210 4220
4211 lockdep_assert_held(&cgroup_mutex); 4221 lockdep_assert_held(&cgroup_mutex);
4212 4222
4213 if (ss->css_online) 4223 if (ss->css_online)
4214 ret = ss->css_online(cgrp); 4224 ret = ss->css_online(css);
4215 if (!ret) 4225 if (!ret)
4216 cgrp->subsys[ss->subsys_id]->flags |= CSS_ONLINE; 4226 css->flags |= CSS_ONLINE;
4217 return ret; 4227 return ret;
4218} 4228}
4219 4229
@@ -4228,9 +4238,9 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
4228 return; 4238 return;
4229 4239
4230 if (ss->css_offline) 4240 if (ss->css_offline)
4231 ss->css_offline(cgrp); 4241 ss->css_offline(css);
4232 4242
4233 cgrp->subsys[ss->subsys_id]->flags &= ~CSS_ONLINE; 4243 css->flags &= ~CSS_ONLINE;
4234} 4244}
4235 4245
4236/* 4246/*
@@ -4305,7 +4315,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4305 for_each_root_subsys(root, ss) { 4315 for_each_root_subsys(root, ss) {
4306 struct cgroup_subsys_state *css; 4316 struct cgroup_subsys_state *css;
4307 4317
4308 css = ss->css_alloc(cgrp); 4318 css = ss->css_alloc(parent->subsys[ss->subsys_id]);
4309 if (IS_ERR(css)) { 4319 if (IS_ERR(css)) {
4310 err = PTR_ERR(css); 4320 err = PTR_ERR(css);
4311 goto err_free_all; 4321 goto err_free_all;
@@ -4313,7 +4323,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4313 4323
4314 err = percpu_ref_init(&css->refcnt, css_release); 4324 err = percpu_ref_init(&css->refcnt, css_release);
4315 if (err) { 4325 if (err) {
4316 ss->css_free(cgrp); 4326 ss->css_free(css);
4317 goto err_free_all; 4327 goto err_free_all;
4318 } 4328 }
4319 4329
@@ -4386,7 +4396,7 @@ err_free_all:
4386 4396
4387 if (css) { 4397 if (css) {
4388 percpu_ref_cancel_init(&css->refcnt); 4398 percpu_ref_cancel_init(&css->refcnt);
4389 ss->css_free(cgrp); 4399 ss->css_free(css);
4390 } 4400 }
4391 } 4401 }
4392 mutex_unlock(&cgroup_mutex); 4402 mutex_unlock(&cgroup_mutex);
@@ -4641,7 +4651,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
4641 /* Create the top cgroup state for this subsystem */ 4651 /* Create the top cgroup state for this subsystem */
4642 list_add(&ss->sibling, &cgroup_dummy_root.subsys_list); 4652 list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
4643 ss->root = &cgroup_dummy_root; 4653 ss->root = &cgroup_dummy_root;
4644 css = ss->css_alloc(cgroup_dummy_top); 4654 css = ss->css_alloc(cgroup_dummy_top->subsys[ss->subsys_id]);
4645 /* We don't handle early failures gracefully */ 4655 /* We don't handle early failures gracefully */
4646 BUG_ON(IS_ERR(css)); 4656 BUG_ON(IS_ERR(css));
4647 init_cgroup_css(css, ss, cgroup_dummy_top); 4657 init_cgroup_css(css, ss, cgroup_dummy_top);
@@ -4720,7 +4730,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4720 * struct, so this can happen first (i.e. before the dummy root 4730 * struct, so this can happen first (i.e. before the dummy root
4721 * attachment). 4731 * attachment).
4722 */ 4732 */
4723 css = ss->css_alloc(cgroup_dummy_top); 4733 css = ss->css_alloc(cgroup_dummy_top->subsys[ss->subsys_id]);
4724 if (IS_ERR(css)) { 4734 if (IS_ERR(css)) {
4725 /* failure case - need to deassign the cgroup_subsys[] slot. */ 4735 /* failure case - need to deassign the cgroup_subsys[] slot. */
4726 cgroup_subsys[ss->subsys_id] = NULL; 4736 cgroup_subsys[ss->subsys_id] = NULL;
@@ -4836,7 +4846,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
4836 * the cgrp->subsys pointer to find their state. note that this 4846 * the cgrp->subsys pointer to find their state. note that this
4837 * also takes care of freeing the css_id. 4847 * also takes care of freeing the css_id.
4838 */ 4848 */
4839 ss->css_free(cgroup_dummy_top); 4849 ss->css_free(cgroup_dummy_top->subsys[ss->subsys_id]);
4840 cgroup_dummy_top->subsys[ss->subsys_id] = NULL; 4850 cgroup_dummy_top->subsys[ss->subsys_id] = NULL;
4841 4851
4842 mutex_unlock(&cgroup_mutex); 4852 mutex_unlock(&cgroup_mutex);
@@ -5192,10 +5202,10 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
5192 */ 5202 */
5193 for_each_builtin_subsys(ss, i) { 5203 for_each_builtin_subsys(ss, i) {
5194 if (ss->exit) { 5204 if (ss->exit) {
5195 struct cgroup *old_cgrp = cset->subsys[i]->cgroup; 5205 struct cgroup_subsys_state *old_css = cset->subsys[i];
5196 struct cgroup *cgrp = task_cgroup(tsk, i); 5206 struct cgroup_subsys_state *css = task_css(tsk, i);
5197 5207
5198 ss->exit(cgrp, old_cgrp, tsk); 5208 ss->exit(css, old_css, tsk);
5199 } 5209 }
5200 } 5210 }
5201 } 5211 }
@@ -5529,7 +5539,8 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
5529} 5539}
5530 5540
5531#ifdef CONFIG_CGROUP_DEBUG 5541#ifdef CONFIG_CGROUP_DEBUG
5532static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cgrp) 5542static struct cgroup_subsys_state *
5543debug_css_alloc(struct cgroup_subsys_state *parent_css)
5533{ 5544{
5534 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); 5545 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
5535 5546
@@ -5539,9 +5550,9 @@ static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cgrp)
5539 return css; 5550 return css;
5540} 5551}
5541 5552
5542static void debug_css_free(struct cgroup *cgrp) 5553static void debug_css_free(struct cgroup_subsys_state *css)
5543{ 5554{
5544 kfree(cgrp->subsys[debug_subsys_id]); 5555 kfree(css);
5545} 5556}
5546 5557
5547static u64 debug_taskcount_read(struct cgroup *cgrp, struct cftype *cft) 5558static u64 debug_taskcount_read(struct cgroup *cgrp, struct cftype *cft)
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 657a73cd44c4..f03a85719c3c 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -91,7 +91,8 @@ static const char *freezer_state_strs(unsigned int state)
91 91
92struct cgroup_subsys freezer_subsys; 92struct cgroup_subsys freezer_subsys;
93 93
94static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup) 94static struct cgroup_subsys_state *
95freezer_css_alloc(struct cgroup_subsys_state *parent_css)
95{ 96{
96 struct freezer *freezer; 97 struct freezer *freezer;
97 98
@@ -104,16 +105,16 @@ static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup)
104} 105}
105 106
106/** 107/**
107 * freezer_css_online - commit creation of a freezer cgroup 108 * freezer_css_online - commit creation of a freezer css
108 * @cgroup: cgroup being created 109 * @css: css being created
109 * 110 *
110 * We're committing to creation of @cgroup. Mark it online and inherit 111 * We're committing to creation of @css. Mark it online and inherit
111 * parent's freezing state while holding both parent's and our 112 * parent's freezing state while holding both parent's and our
112 * freezer->lock. 113 * freezer->lock.
113 */ 114 */
114static int freezer_css_online(struct cgroup *cgroup) 115static int freezer_css_online(struct cgroup_subsys_state *css)
115{ 116{
116 struct freezer *freezer = cgroup_freezer(cgroup); 117 struct freezer *freezer = css_freezer(css);
117 struct freezer *parent = parent_freezer(freezer); 118 struct freezer *parent = parent_freezer(freezer);
118 119
119 /* 120 /*
@@ -140,15 +141,15 @@ static int freezer_css_online(struct cgroup *cgroup)
140} 141}
141 142
142/** 143/**
143 * freezer_css_offline - initiate destruction of @cgroup 144 * freezer_css_offline - initiate destruction of a freezer css
144 * @cgroup: cgroup being destroyed 145 * @css: css being destroyed
145 * 146 *
146 * @cgroup is going away. Mark it dead and decrement system_freezing_count 147 * @css is going away. Mark it dead and decrement system_freezing_count if
147 * if it was holding one. 148 * it was holding one.
148 */ 149 */
149static void freezer_css_offline(struct cgroup *cgroup) 150static void freezer_css_offline(struct cgroup_subsys_state *css)
150{ 151{
151 struct freezer *freezer = cgroup_freezer(cgroup); 152 struct freezer *freezer = css_freezer(css);
152 153
153 spin_lock_irq(&freezer->lock); 154 spin_lock_irq(&freezer->lock);
154 155
@@ -160,9 +161,9 @@ static void freezer_css_offline(struct cgroup *cgroup)
160 spin_unlock_irq(&freezer->lock); 161 spin_unlock_irq(&freezer->lock);
161} 162}
162 163
163static void freezer_css_free(struct cgroup *cgroup) 164static void freezer_css_free(struct cgroup_subsys_state *css)
164{ 165{
165 kfree(cgroup_freezer(cgroup)); 166 kfree(css_freezer(css));
166} 167}
167 168
168/* 169/*
@@ -174,25 +175,26 @@ static void freezer_css_free(struct cgroup *cgroup)
174 * @freezer->lock. freezer_attach() makes the new tasks conform to the 175 * @freezer->lock. freezer_attach() makes the new tasks conform to the
175 * current state and all following state changes can see the new tasks. 176 * current state and all following state changes can see the new tasks.
176 */ 177 */
177static void freezer_attach(struct cgroup *new_cgrp, struct cgroup_taskset *tset) 178static void freezer_attach(struct cgroup_subsys_state *new_css,
179 struct cgroup_taskset *tset)
178{ 180{
179 struct freezer *freezer = cgroup_freezer(new_cgrp); 181 struct freezer *freezer = css_freezer(new_css);
180 struct task_struct *task; 182 struct task_struct *task;
181 bool clear_frozen = false; 183 bool clear_frozen = false;
182 184
183 spin_lock_irq(&freezer->lock); 185 spin_lock_irq(&freezer->lock);
184 186
185 /* 187 /*
186 * Make the new tasks conform to the current state of @new_cgrp. 188 * Make the new tasks conform to the current state of @new_css.
187 * For simplicity, when migrating any task to a FROZEN cgroup, we 189 * For simplicity, when migrating any task to a FROZEN cgroup, we
188 * revert it to FREEZING and let update_if_frozen() determine the 190 * revert it to FREEZING and let update_if_frozen() determine the
189 * correct state later. 191 * correct state later.
190 * 192 *
191 * Tasks in @tset are on @new_cgrp but may not conform to its 193 * Tasks in @tset are on @new_css but may not conform to its
192 * current state before executing the following - !frozen tasks may 194 * current state before executing the following - !frozen tasks may
193 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. 195 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
194 */ 196 */
195 cgroup_taskset_for_each(task, new_cgrp, tset) { 197 cgroup_taskset_for_each(task, new_css->cgroup, tset) {
196 if (!(freezer->state & CGROUP_FREEZING)) { 198 if (!(freezer->state & CGROUP_FREEZING)) {
197 __thaw_task(task); 199 __thaw_task(task);
198 } else { 200 } else {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 259a4af37e69..8ce3fdc3dfcc 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1455,9 +1455,10 @@ static int fmeter_getrate(struct fmeter *fmp)
1455} 1455}
1456 1456
1457/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 1457/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1458static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 1458static int cpuset_can_attach(struct cgroup_subsys_state *css,
1459 struct cgroup_taskset *tset)
1459{ 1460{
1460 struct cpuset *cs = cgroup_cs(cgrp); 1461 struct cpuset *cs = css_cs(css);
1461 struct task_struct *task; 1462 struct task_struct *task;
1462 int ret; 1463 int ret;
1463 1464
@@ -1468,11 +1469,11 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1468 * flag is set. 1469 * flag is set.
1469 */ 1470 */
1470 ret = -ENOSPC; 1471 ret = -ENOSPC;
1471 if (!cgroup_sane_behavior(cgrp) && 1472 if (!cgroup_sane_behavior(css->cgroup) &&
1472 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) 1473 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1473 goto out_unlock; 1474 goto out_unlock;
1474 1475
1475 cgroup_taskset_for_each(task, cgrp, tset) { 1476 cgroup_taskset_for_each(task, css->cgroup, tset) {
1476 /* 1477 /*
1477 * Kthreads which disallow setaffinity shouldn't be moved 1478 * Kthreads which disallow setaffinity shouldn't be moved
1478 * to a new cpuset; we don't want to change their cpu 1479 * to a new cpuset; we don't want to change their cpu
@@ -1501,11 +1502,11 @@ out_unlock:
1501 return ret; 1502 return ret;
1502} 1503}
1503 1504
1504static void cpuset_cancel_attach(struct cgroup *cgrp, 1505static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
1505 struct cgroup_taskset *tset) 1506 struct cgroup_taskset *tset)
1506{ 1507{
1507 mutex_lock(&cpuset_mutex); 1508 mutex_lock(&cpuset_mutex);
1508 cgroup_cs(cgrp)->attach_in_progress--; 1509 css_cs(css)->attach_in_progress--;
1509 mutex_unlock(&cpuset_mutex); 1510 mutex_unlock(&cpuset_mutex);
1510} 1511}
1511 1512
@@ -1516,7 +1517,8 @@ static void cpuset_cancel_attach(struct cgroup *cgrp,
1516 */ 1517 */
1517static cpumask_var_t cpus_attach; 1518static cpumask_var_t cpus_attach;
1518 1519
1519static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 1520static void cpuset_attach(struct cgroup_subsys_state *css,
1521 struct cgroup_taskset *tset)
1520{ 1522{
1521 /* static buf protected by cpuset_mutex */ 1523 /* static buf protected by cpuset_mutex */
1522 static nodemask_t cpuset_attach_nodemask_to; 1524 static nodemask_t cpuset_attach_nodemask_to;
@@ -1524,7 +1526,7 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1524 struct task_struct *task; 1526 struct task_struct *task;
1525 struct task_struct *leader = cgroup_taskset_first(tset); 1527 struct task_struct *leader = cgroup_taskset_first(tset);
1526 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset); 1528 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1527 struct cpuset *cs = cgroup_cs(cgrp); 1529 struct cpuset *cs = css_cs(css);
1528 struct cpuset *oldcs = cgroup_cs(oldcgrp); 1530 struct cpuset *oldcs = cgroup_cs(oldcgrp);
1529 struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); 1531 struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
1530 struct cpuset *mems_cs = effective_nodemask_cpuset(cs); 1532 struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
@@ -1539,7 +1541,7 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1539 1541
1540 guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); 1542 guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
1541 1543
1542 cgroup_taskset_for_each(task, cgrp, tset) { 1544 cgroup_taskset_for_each(task, css->cgroup, tset) {
1543 /* 1545 /*
1544 * can_attach beforehand should guarantee that this doesn't 1546 * can_attach beforehand should guarantee that this doesn't
1545 * fail. TODO: have a better way to handle failure here 1547 * fail. TODO: have a better way to handle failure here
@@ -1940,11 +1942,12 @@ static struct cftype files[] = {
1940 * cgrp: control group that the new cpuset will be part of 1942 * cgrp: control group that the new cpuset will be part of
1941 */ 1943 */
1942 1944
1943static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp) 1945static struct cgroup_subsys_state *
1946cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
1944{ 1947{
1945 struct cpuset *cs; 1948 struct cpuset *cs;
1946 1949
1947 if (!cgrp->parent) 1950 if (!parent_css)
1948 return &top_cpuset.css; 1951 return &top_cpuset.css;
1949 1952
1950 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 1953 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
@@ -1964,9 +1967,9 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
1964 return &cs->css; 1967 return &cs->css;
1965} 1968}
1966 1969
1967static int cpuset_css_online(struct cgroup *cgrp) 1970static int cpuset_css_online(struct cgroup_subsys_state *css)
1968{ 1971{
1969 struct cpuset *cs = cgroup_cs(cgrp); 1972 struct cpuset *cs = css_cs(css);
1970 struct cpuset *parent = parent_cs(cs); 1973 struct cpuset *parent = parent_cs(cs);
1971 struct cpuset *tmp_cs; 1974 struct cpuset *tmp_cs;
1972 struct cgroup *pos_cgrp; 1975 struct cgroup *pos_cgrp;
@@ -1984,7 +1987,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
1984 1987
1985 number_of_cpusets++; 1988 number_of_cpusets++;
1986 1989
1987 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags)) 1990 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
1988 goto out_unlock; 1991 goto out_unlock;
1989 1992
1990 /* 1993 /*
@@ -2024,9 +2027,9 @@ out_unlock:
2024 * will call rebuild_sched_domains_locked(). 2027 * will call rebuild_sched_domains_locked().
2025 */ 2028 */
2026 2029
2027static void cpuset_css_offline(struct cgroup *cgrp) 2030static void cpuset_css_offline(struct cgroup_subsys_state *css)
2028{ 2031{
2029 struct cpuset *cs = cgroup_cs(cgrp); 2032 struct cpuset *cs = css_cs(css);
2030 2033
2031 mutex_lock(&cpuset_mutex); 2034 mutex_lock(&cpuset_mutex);
2032 2035
@@ -2039,9 +2042,9 @@ static void cpuset_css_offline(struct cgroup *cgrp)
2039 mutex_unlock(&cpuset_mutex); 2042 mutex_unlock(&cpuset_mutex);
2040} 2043}
2041 2044
2042static void cpuset_css_free(struct cgroup *cgrp) 2045static void cpuset_css_free(struct cgroup_subsys_state *css)
2043{ 2046{
2044 struct cpuset *cs = cgroup_cs(cgrp); 2047 struct cpuset *cs = css_cs(css);
2045 2048
2046 free_cpumask_var(cs->cpus_allowed); 2049 free_cpumask_var(cs->cpus_allowed);
2047 kfree(cs); 2050 kfree(cs);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 414c61f4d776..9705a0ed1dce 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7778,7 +7778,8 @@ unlock:
7778device_initcall(perf_event_sysfs_init); 7778device_initcall(perf_event_sysfs_init);
7779 7779
7780#ifdef CONFIG_CGROUP_PERF 7780#ifdef CONFIG_CGROUP_PERF
7781static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont) 7781static struct cgroup_subsys_state *
7782perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
7782{ 7783{
7783 struct perf_cgroup *jc; 7784 struct perf_cgroup *jc;
7784 7785
@@ -7795,11 +7796,10 @@ static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
7795 return &jc->css; 7796 return &jc->css;
7796} 7797}
7797 7798
7798static void perf_cgroup_css_free(struct cgroup *cont) 7799static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
7799{ 7800{
7800 struct perf_cgroup *jc; 7801 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
7801 jc = container_of(cgroup_css(cont, perf_subsys_id), 7802
7802 struct perf_cgroup, css);
7803 free_percpu(jc->info); 7803 free_percpu(jc->info);
7804 kfree(jc); 7804 kfree(jc);
7805} 7805}
@@ -7811,15 +7811,17 @@ static int __perf_cgroup_move(void *info)
7811 return 0; 7811 return 0;
7812} 7812}
7813 7813
7814static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 7814static void perf_cgroup_attach(struct cgroup_subsys_state *css,
7815 struct cgroup_taskset *tset)
7815{ 7816{
7816 struct task_struct *task; 7817 struct task_struct *task;
7817 7818
7818 cgroup_taskset_for_each(task, cgrp, tset) 7819 cgroup_taskset_for_each(task, css->cgroup, tset)
7819 task_function_call(task, __perf_cgroup_move, task); 7820 task_function_call(task, __perf_cgroup_move, task);
7820} 7821}
7821 7822
7822static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, 7823static void perf_cgroup_exit(struct cgroup_subsys_state *css,
7824 struct cgroup_subsys_state *old_css,
7823 struct task_struct *task) 7825 struct task_struct *task)
7824{ 7826{
7825 /* 7827 /*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7a10742b389a..622b7efc5ade 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7094,16 +7094,17 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
7094 return css_tg(cgroup_css(cgrp, cpu_cgroup_subsys_id)); 7094 return css_tg(cgroup_css(cgrp, cpu_cgroup_subsys_id));
7095} 7095}
7096 7096
7097static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp) 7097static struct cgroup_subsys_state *
7098cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
7098{ 7099{
7099 struct task_group *tg, *parent; 7100 struct task_group *parent = css_tg(parent_css);
7101 struct task_group *tg;
7100 7102
7101 if (!cgrp->parent) { 7103 if (!parent) {
7102 /* This is early initialization for the top cgroup */ 7104 /* This is early initialization for the top cgroup */
7103 return &root_task_group.css; 7105 return &root_task_group.css;
7104 } 7106 }
7105 7107
7106 parent = cgroup_tg(cgrp->parent);
7107 tg = sched_create_group(parent); 7108 tg = sched_create_group(parent);
7108 if (IS_ERR(tg)) 7109 if (IS_ERR(tg))
7109 return ERR_PTR(-ENOMEM); 7110 return ERR_PTR(-ENOMEM);
@@ -7111,38 +7112,38 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
7111 return &tg->css; 7112 return &tg->css;
7112} 7113}
7113 7114
7114static int cpu_cgroup_css_online(struct cgroup *cgrp) 7115static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
7115{ 7116{
7116 struct task_group *tg = cgroup_tg(cgrp); 7117 struct task_group *tg = css_tg(css);
7117 struct task_group *parent = css_tg(css_parent(&tg->css)); 7118 struct task_group *parent = css_tg(css_parent(css));
7118 7119
7119 if (parent) 7120 if (parent)
7120 sched_online_group(tg, parent); 7121 sched_online_group(tg, parent);
7121 return 0; 7122 return 0;
7122} 7123}
7123 7124
7124static void cpu_cgroup_css_free(struct cgroup *cgrp) 7125static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
7125{ 7126{
7126 struct task_group *tg = cgroup_tg(cgrp); 7127 struct task_group *tg = css_tg(css);
7127 7128
7128 sched_destroy_group(tg); 7129 sched_destroy_group(tg);
7129} 7130}
7130 7131
7131static void cpu_cgroup_css_offline(struct cgroup *cgrp) 7132static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
7132{ 7133{
7133 struct task_group *tg = cgroup_tg(cgrp); 7134 struct task_group *tg = css_tg(css);
7134 7135
7135 sched_offline_group(tg); 7136 sched_offline_group(tg);
7136} 7137}
7137 7138
7138static int cpu_cgroup_can_attach(struct cgroup *cgrp, 7139static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
7139 struct cgroup_taskset *tset) 7140 struct cgroup_taskset *tset)
7140{ 7141{
7141 struct task_struct *task; 7142 struct task_struct *task;
7142 7143
7143 cgroup_taskset_for_each(task, cgrp, tset) { 7144 cgroup_taskset_for_each(task, css->cgroup, tset) {
7144#ifdef CONFIG_RT_GROUP_SCHED 7145#ifdef CONFIG_RT_GROUP_SCHED
7145 if (!sched_rt_can_attach(cgroup_tg(cgrp), task)) 7146 if (!sched_rt_can_attach(css_tg(css), task))
7146 return -EINVAL; 7147 return -EINVAL;
7147#else 7148#else
7148 /* We don't support RT-tasks being in separate groups */ 7149 /* We don't support RT-tasks being in separate groups */
@@ -7153,18 +7154,18 @@ static int cpu_cgroup_can_attach(struct cgroup *cgrp,
7153 return 0; 7154 return 0;
7154} 7155}
7155 7156
7156static void cpu_cgroup_attach(struct cgroup *cgrp, 7157static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
7157 struct cgroup_taskset *tset) 7158 struct cgroup_taskset *tset)
7158{ 7159{
7159 struct task_struct *task; 7160 struct task_struct *task;
7160 7161
7161 cgroup_taskset_for_each(task, cgrp, tset) 7162 cgroup_taskset_for_each(task, css->cgroup, tset)
7162 sched_move_task(task); 7163 sched_move_task(task);
7163} 7164}
7164 7165
7165static void 7166static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
7166cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, 7167 struct cgroup_subsys_state *old_css,
7167 struct task_struct *task) 7168 struct task_struct *task)
7168{ 7169{
7169 /* 7170 /*
7170 * cgroup_exit() is called in the copy_process() failure path. 7171 * cgroup_exit() is called in the copy_process() failure path.
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index f6926a149a71..1b784d9b3630 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -62,11 +62,12 @@ static struct cpuacct root_cpuacct = {
62}; 62};
63 63
64/* create a new cpu accounting group */ 64/* create a new cpu accounting group */
65static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp) 65static struct cgroup_subsys_state *
66cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
66{ 67{
67 struct cpuacct *ca; 68 struct cpuacct *ca;
68 69
69 if (!cgrp->parent) 70 if (!parent_css)
70 return &root_cpuacct.css; 71 return &root_cpuacct.css;
71 72
72 ca = kzalloc(sizeof(*ca), GFP_KERNEL); 73 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
@@ -92,9 +93,9 @@ out:
92} 93}
93 94
94/* destroy an existing cpu accounting group */ 95/* destroy an existing cpu accounting group */
95static void cpuacct_css_free(struct cgroup *cgrp) 96static void cpuacct_css_free(struct cgroup_subsys_state *css)
96{ 97{
97 struct cpuacct *ca = cgroup_ca(cgrp); 98 struct cpuacct *ca = css_ca(css);
98 99
99 free_percpu(ca->cpustat); 100 free_percpu(ca->cpustat);
100 free_percpu(ca->cpuusage); 101 free_percpu(ca->cpuusage);