aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Menage <menage@google.com>2007-10-19 02:39:36 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-19 14:53:36 -0400
commit817929ec274bcfe771586d338bb31d1659615686 (patch)
tree5a96ed1afd308016e8720437a00bf2f114e907cb
parenta424316ca154317367c7ddf89997d1c80e4a8051 (diff)
Task Control Groups: shared cgroup subsystem group arrays
Replace the struct css_set embedded in task_struct with a pointer; all tasks that have the same set of memberships across all hierarchies will share a css_set object, and will be linked via their css_sets field to the "tasks" list_head in the css_set. Assuming that many tasks share the same cgroup assignments, this reduces overall space usage and keeps the size of the task_struct down (three pointers added to task_struct compared to a non-cgroups kernel, no matter how many subsystems are registered). [akpm@linux-foundation.org: fix a printk] [akpm@linux-foundation.org: build fix] Signed-off-by: Paul Menage <menage@google.com> Cc: Serge E. Hallyn <serue@us.ibm.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Paul Jackson <pj@sgi.com> Cc: Kirill Korotaev <dev@openvz.org> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com> Cc: Cedric Le Goater <clg@fr.ibm.com> Cc: Serge E. Hallyn <serue@us.ibm.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Paul Jackson <pj@sgi.com> Cc: Kirill Korotaev <dev@openvz.org> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com> Cc: Cedric Le Goater <clg@fr.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/cgroups.txt14
-rw-r--r--include/linux/cgroup.h89
-rw-r--r--include/linux/sched.h33
-rw-r--r--kernel/cgroup.c649
-rw-r--r--kernel/fork.c1
5 files changed, 632 insertions, 154 deletions
diff --git a/Documentation/cgroups.txt b/Documentation/cgroups.txt
index 553727cc9944..98a26f81fa75 100644
--- a/Documentation/cgroups.txt
+++ b/Documentation/cgroups.txt
@@ -176,7 +176,9 @@ Control Groups extends the kernel as follows:
176 subsystem state is something that's expected to happen frequently 176 subsystem state is something that's expected to happen frequently
177 and in performance-critical code, whereas operations that require a 177 and in performance-critical code, whereas operations that require a
178 task's actual cgroup assignments (in particular, moving between 178 task's actual cgroup assignments (in particular, moving between
179 cgroups) are less common. 179 cgroups) are less common. A linked list runs through the cg_list
180 field of each task_struct using the css_set, anchored at
181 css_set->tasks.
180 182
181 - A cgroup hierarchy filesystem can be mounted for browsing and 183 - A cgroup hierarchy filesystem can be mounted for browsing and
182 manipulation from user space. 184 manipulation from user space.
@@ -252,6 +254,16 @@ linear search to locate an appropriate existing css_set, so isn't
252very efficient. A future version will use a hash table for better 254very efficient. A future version will use a hash table for better
253performance. 255performance.
254 256
257To allow access from a cgroup to the css_sets (and hence tasks)
258that comprise it, a set of cg_cgroup_link objects form a lattice;
259each cg_cgroup_link is linked into a list of cg_cgroup_links for
260a single cgroup on its cont_link_list field, and a list of
261cg_cgroup_links for a single css_set on its cg_link_list.
262
263Thus the set of tasks in a cgroup can be listed by iterating over
264each css_set that references the cgroup, and sub-iterating over
265each css_set's task set.
266
255The use of a Linux virtual file system (vfs) to represent the 267The use of a Linux virtual file system (vfs) to represent the
256cgroup hierarchy provides for a familiar permission and name space 268cgroup hierarchy provides for a familiar permission and name space
257for cgroups, with a minimum of additional kernel code. 269for cgroups, with a minimum of additional kernel code.
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a9553568118f..836b3557bb76 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -27,10 +27,19 @@ extern void cgroup_lock(void);
27extern void cgroup_unlock(void); 27extern void cgroup_unlock(void);
28extern void cgroup_fork(struct task_struct *p); 28extern void cgroup_fork(struct task_struct *p);
29extern void cgroup_fork_callbacks(struct task_struct *p); 29extern void cgroup_fork_callbacks(struct task_struct *p);
30extern void cgroup_post_fork(struct task_struct *p);
30extern void cgroup_exit(struct task_struct *p, int run_callbacks); 31extern void cgroup_exit(struct task_struct *p, int run_callbacks);
31 32
32extern struct file_operations proc_cgroup_operations; 33extern struct file_operations proc_cgroup_operations;
33 34
35/* Define the enumeration of all cgroup subsystems */
36#define SUBSYS(_x) _x ## _subsys_id,
37enum cgroup_subsys_id {
38#include <linux/cgroup_subsys.h>
39 CGROUP_SUBSYS_COUNT
40};
41#undef SUBSYS
42
34/* Per-subsystem/per-cgroup state maintained by the system. */ 43/* Per-subsystem/per-cgroup state maintained by the system. */
35struct cgroup_subsys_state { 44struct cgroup_subsys_state {
36 /* The cgroup that this subsystem is attached to. Useful 45 /* The cgroup that this subsystem is attached to. Useful
@@ -97,6 +106,52 @@ struct cgroup {
97 106
98 struct cgroupfs_root *root; 107 struct cgroupfs_root *root;
99 struct cgroup *top_cgroup; 108 struct cgroup *top_cgroup;
109
110 /*
111 * List of cg_cgroup_links pointing at css_sets with
112 * tasks in this cgroup. Protected by css_set_lock
113 */
114 struct list_head css_sets;
115};
116
117/* A css_set is a structure holding pointers to a set of
118 * cgroup_subsys_state objects. This saves space in the task struct
119 * object and speeds up fork()/exit(), since a single inc/dec and a
120 * list_add()/del() can bump the reference count on the entire
121 * cgroup set for a task.
122 */
123
124struct css_set {
125
126 /* Reference count */
127 struct kref ref;
128
129 /*
130 * List running through all cgroup groups. Protected by
131 * css_set_lock
132 */
133 struct list_head list;
134
135 /*
136 * List running through all tasks using this cgroup
137 * group. Protected by css_set_lock
138 */
139 struct list_head tasks;
140
141 /*
142 * List of cg_cgroup_link objects on link chains from
143 * cgroups referenced from this css_set. Protected by
144 * css_set_lock
145 */
146 struct list_head cg_links;
147
148 /*
149 * Set of subsystem states, one for each subsystem. This array
150 * is immutable after creation apart from the init_css_set
151 * during subsystem registration (at boot time).
152 */
153 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
154
100}; 155};
101 156
102/* struct cftype: 157/* struct cftype:
@@ -157,15 +212,7 @@ int cgroup_is_removed(const struct cgroup *cont);
157 212
158int cgroup_path(const struct cgroup *cont, char *buf, int buflen); 213int cgroup_path(const struct cgroup *cont, char *buf, int buflen);
159 214
160int __cgroup_task_count(const struct cgroup *cont); 215int cgroup_task_count(const struct cgroup *cont);
161static inline int cgroup_task_count(const struct cgroup *cont)
162{
163 int task_count;
164 rcu_read_lock();
165 task_count = __cgroup_task_count(cont);
166 rcu_read_unlock();
167 return task_count;
168}
169 216
170/* Return true if the cgroup is a descendant of the current cgroup */ 217/* Return true if the cgroup is a descendant of the current cgroup */
171int cgroup_is_descendant(const struct cgroup *cont); 218int cgroup_is_descendant(const struct cgroup *cont);
@@ -213,7 +260,7 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
213static inline struct cgroup_subsys_state *task_subsys_state( 260static inline struct cgroup_subsys_state *task_subsys_state(
214 struct task_struct *task, int subsys_id) 261 struct task_struct *task, int subsys_id)
215{ 262{
216 return rcu_dereference(task->cgroups.subsys[subsys_id]); 263 return rcu_dereference(task->cgroups->subsys[subsys_id]);
217} 264}
218 265
219static inline struct cgroup* task_cgroup(struct task_struct *task, 266static inline struct cgroup* task_cgroup(struct task_struct *task,
@@ -226,6 +273,27 @@ int cgroup_path(const struct cgroup *cont, char *buf, int buflen);
226 273
227int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss); 274int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss);
228 275
276/* A cgroup_iter should be treated as an opaque object */
277struct cgroup_iter {
278 struct list_head *cg_link;
279 struct list_head *task;
280};
281
282/* To iterate across the tasks in a cgroup:
283 *
284 * 1) call cgroup_iter_start to intialize an iterator
285 *
286 * 2) call cgroup_iter_next() to retrieve member tasks until it
287 * returns NULL or until you want to end the iteration
288 *
289 * 3) call cgroup_iter_end() to destroy the iterator.
290 */
291void cgroup_iter_start(struct cgroup *cont, struct cgroup_iter *it);
292struct task_struct *cgroup_iter_next(struct cgroup *cont,
293 struct cgroup_iter *it);
294void cgroup_iter_end(struct cgroup *cont, struct cgroup_iter *it);
295
296
229#else /* !CONFIG_CGROUPS */ 297#else /* !CONFIG_CGROUPS */
230 298
231static inline int cgroup_init_early(void) { return 0; } 299static inline int cgroup_init_early(void) { return 0; }
@@ -233,6 +301,7 @@ static inline int cgroup_init(void) { return 0; }
233static inline void cgroup_init_smp(void) {} 301static inline void cgroup_init_smp(void) {}
234static inline void cgroup_fork(struct task_struct *p) {} 302static inline void cgroup_fork(struct task_struct *p) {}
235static inline void cgroup_fork_callbacks(struct task_struct *p) {} 303static inline void cgroup_fork_callbacks(struct task_struct *p) {}
304static inline void cgroup_post_fork(struct task_struct *p) {}
236static inline void cgroup_exit(struct task_struct *p, int callbacks) {} 305static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
237 306
238static inline void cgroup_lock(void) {} 307static inline void cgroup_lock(void) {}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index af2ed4bae678..1aa1cfa63b37 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -894,34 +894,6 @@ struct sched_entity {
894#endif 894#endif
895}; 895};
896 896
897#ifdef CONFIG_CGROUPS
898
899#define SUBSYS(_x) _x ## _subsys_id,
900enum cgroup_subsys_id {
901#include <linux/cgroup_subsys.h>
902 CGROUP_SUBSYS_COUNT
903};
904#undef SUBSYS
905
906/* A css_set is a structure holding pointers to a set of
907 * cgroup_subsys_state objects.
908 */
909
910struct css_set {
911
912 /* Set of subsystem states, one for each subsystem. NULL for
913 * subsystems that aren't part of this hierarchy. These
914 * pointers reduce the number of dereferences required to get
915 * from a task to its state for a given cgroup, but result
916 * in increased space usage if tasks are in wildly different
917 * groupings across different hierarchies. This array is
918 * immutable after creation */
919 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
920
921};
922
923#endif /* CONFIG_CGROUPS */
924
925struct task_struct { 897struct task_struct {
926 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 898 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
927 void *stack; 899 void *stack;
@@ -1159,7 +1131,10 @@ struct task_struct {
1159 int cpuset_mem_spread_rotor; 1131 int cpuset_mem_spread_rotor;
1160#endif 1132#endif
1161#ifdef CONFIG_CGROUPS 1133#ifdef CONFIG_CGROUPS
1162 struct css_set cgroups; 1134 /* Control Group info protected by css_set_lock */
1135 struct css_set *cgroups;
1136 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1137 struct list_head cg_list;
1163#endif 1138#endif
1164#ifdef CONFIG_FUTEX 1139#ifdef CONFIG_FUTEX
1165 struct robust_list_head __user *robust_list; 1140 struct robust_list_head __user *robust_list;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index db245f19eb8a..883928c0e147 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -36,6 +36,7 @@
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/rcupdate.h> 37#include <linux/rcupdate.h>
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/backing-dev.h>
39#include <linux/seq_file.h> 40#include <linux/seq_file.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
41#include <linux/magic.h> 42#include <linux/magic.h>
@@ -95,6 +96,7 @@ static struct cgroupfs_root rootnode;
95/* The list of hierarchy roots */ 96/* The list of hierarchy roots */
96 97
97static LIST_HEAD(roots); 98static LIST_HEAD(roots);
99static int root_count;
98 100
99/* dummytop is a shorthand for the dummy hierarchy's top cgroup */ 101/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
100#define dummytop (&rootnode.top_cgroup) 102#define dummytop (&rootnode.top_cgroup)
@@ -133,12 +135,49 @@ list_for_each_entry(_ss, &_root->subsys_list, sibling)
133#define for_each_root(_root) \ 135#define for_each_root(_root) \
134list_for_each_entry(_root, &roots, root_list) 136list_for_each_entry(_root, &roots, root_list)
135 137
136/* Each task_struct has an embedded css_set, so the get/put 138/* Link structure for associating css_set objects with cgroups */
137 * operation simply takes a reference count on all the cgroups 139struct cg_cgroup_link {
138 * referenced by subsystems in this css_set. This can end up 140 /*
139 * multiple-counting some cgroups, but that's OK - the ref-count is 141 * List running through cg_cgroup_links associated with a
140 * just a busy/not-busy indicator; ensuring that we only count each 142 * cgroup, anchored on cgroup->css_sets
141 * cgroup once would require taking a global lock to ensure that no 143 */
144 struct list_head cont_link_list;
145 /*
146 * List running through cg_cgroup_links pointing at a
147 * single css_set object, anchored on css_set->cg_links
148 */
149 struct list_head cg_link_list;
150 struct css_set *cg;
151};
152
153/* The default css_set - used by init and its children prior to any
154 * hierarchies being mounted. It contains a pointer to the root state
155 * for each subsystem. Also used to anchor the list of css_sets. Not
156 * reference-counted, to improve performance when child cgroups
157 * haven't been created.
158 */
159
160static struct css_set init_css_set;
161static struct cg_cgroup_link init_css_set_link;
162
163/* css_set_lock protects the list of css_set objects, and the
164 * chain of tasks off each css_set. Nests outside task->alloc_lock
165 * due to cgroup_iter_start() */
166static DEFINE_RWLOCK(css_set_lock);
167static int css_set_count;
168
169/* We don't maintain the lists running through each css_set to its
170 * task until after the first call to cgroup_iter_start(). This
171 * reduces the fork()/exit() overhead for people who have cgroups
172 * compiled into their kernel but not actually in use */
173static int use_task_css_set_links;
174
175/* When we create or destroy a css_set, the operation simply
176 * takes/releases a reference count on all the cgroups referenced
177 * by subsystems in this css_set. This can end up multiple-counting
178 * some cgroups, but that's OK - the ref-count is just a
179 * busy/not-busy indicator; ensuring that we only count each cgroup
180 * once would require taking a global lock to ensure that no
142 * subsystems moved between hierarchies while we were doing so. 181 * subsystems moved between hierarchies while we were doing so.
143 * 182 *
144 * Possible TODO: decide at boot time based on the number of 183 * Possible TODO: decide at boot time based on the number of
@@ -146,18 +185,230 @@ list_for_each_entry(_root, &roots, root_list)
146 * it's better for performance to ref-count every subsystem, or to 185 * it's better for performance to ref-count every subsystem, or to
147 * take a global lock and only add one ref count to each hierarchy. 186 * take a global lock and only add one ref count to each hierarchy.
148 */ 187 */
149static void get_css_set(struct css_set *cg) 188
189/*
190 * unlink a css_set from the list and free it
191 */
192static void release_css_set(struct kref *k)
150{ 193{
194 struct css_set *cg = container_of(k, struct css_set, ref);
151 int i; 195 int i;
196
197 write_lock(&css_set_lock);
198 list_del(&cg->list);
199 css_set_count--;
200 while (!list_empty(&cg->cg_links)) {
201 struct cg_cgroup_link *link;
202 link = list_entry(cg->cg_links.next,
203 struct cg_cgroup_link, cg_link_list);
204 list_del(&link->cg_link_list);
205 list_del(&link->cont_link_list);
206 kfree(link);
207 }
208 write_unlock(&css_set_lock);
152 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) 209 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
153 atomic_inc(&cg->subsys[i]->cgroup->count); 210 atomic_dec(&cg->subsys[i]->cgroup->count);
211 kfree(cg);
154} 212}
155 213
156static void put_css_set(struct css_set *cg) 214/*
215 * refcounted get/put for css_set objects
216 */
217static inline void get_css_set(struct css_set *cg)
218{
219 kref_get(&cg->ref);
220}
221
222static inline void put_css_set(struct css_set *cg)
223{
224 kref_put(&cg->ref, release_css_set);
225}
226
227/*
228 * find_existing_css_set() is a helper for
229 * find_css_set(), and checks to see whether an existing
230 * css_set is suitable. This currently walks a linked-list for
231 * simplicity; a later patch will use a hash table for better
232 * performance
233 *
234 * oldcg: the cgroup group that we're using before the cgroup
235 * transition
236 *
237 * cont: the cgroup that we're moving into
238 *
239 * template: location in which to build the desired set of subsystem
240 * state objects for the new cgroup group
241 */
242
243static struct css_set *find_existing_css_set(
244 struct css_set *oldcg,
245 struct cgroup *cont,
246 struct cgroup_subsys_state *template[])
157{ 247{
158 int i; 248 int i;
159 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) 249 struct cgroupfs_root *root = cont->root;
160 atomic_dec(&cg->subsys[i]->cgroup->count); 250 struct list_head *l = &init_css_set.list;
251
252 /* Built the set of subsystem state objects that we want to
253 * see in the new css_set */
254 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
255 if (root->subsys_bits & (1ull << i)) {
256 /* Subsystem is in this hierarchy. So we want
257 * the subsystem state from the new
258 * cgroup */
259 template[i] = cont->subsys[i];
260 } else {
261 /* Subsystem is not in this hierarchy, so we
262 * don't want to change the subsystem state */
263 template[i] = oldcg->subsys[i];
264 }
265 }
266
267 /* Look through existing cgroup groups to find one to reuse */
268 do {
269 struct css_set *cg =
270 list_entry(l, struct css_set, list);
271
272 if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
273 /* All subsystems matched */
274 return cg;
275 }
276 /* Try the next cgroup group */
277 l = l->next;
278 } while (l != &init_css_set.list);
279
280 /* No existing cgroup group matched */
281 return NULL;
282}
283
284/*
285 * allocate_cg_links() allocates "count" cg_cgroup_link structures
286 * and chains them on tmp through their cont_link_list fields. Returns 0 on
287 * success or a negative error
288 */
289
290static int allocate_cg_links(int count, struct list_head *tmp)
291{
292 struct cg_cgroup_link *link;
293 int i;
294 INIT_LIST_HEAD(tmp);
295 for (i = 0; i < count; i++) {
296 link = kmalloc(sizeof(*link), GFP_KERNEL);
297 if (!link) {
298 while (!list_empty(tmp)) {
299 link = list_entry(tmp->next,
300 struct cg_cgroup_link,
301 cont_link_list);
302 list_del(&link->cont_link_list);
303 kfree(link);
304 }
305 return -ENOMEM;
306 }
307 list_add(&link->cont_link_list, tmp);
308 }
309 return 0;
310}
311
312static void free_cg_links(struct list_head *tmp)
313{
314 while (!list_empty(tmp)) {
315 struct cg_cgroup_link *link;
316 link = list_entry(tmp->next,
317 struct cg_cgroup_link,
318 cont_link_list);
319 list_del(&link->cont_link_list);
320 kfree(link);
321 }
322}
323
324/*
325 * find_css_set() takes an existing cgroup group and a
326 * cgroup object, and returns a css_set object that's
327 * equivalent to the old group, but with the given cgroup
328 * substituted into the appropriate hierarchy. Must be called with
329 * cgroup_mutex held
330 */
331
332static struct css_set *find_css_set(
333 struct css_set *oldcg, struct cgroup *cont)
334{
335 struct css_set *res;
336 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
337 int i;
338
339 struct list_head tmp_cg_links;
340 struct cg_cgroup_link *link;
341
342 /* First see if we already have a cgroup group that matches
343 * the desired set */
344 write_lock(&css_set_lock);
345 res = find_existing_css_set(oldcg, cont, template);
346 if (res)
347 get_css_set(res);
348 write_unlock(&css_set_lock);
349
350 if (res)
351 return res;
352
353 res = kmalloc(sizeof(*res), GFP_KERNEL);
354 if (!res)
355 return NULL;
356
357 /* Allocate all the cg_cgroup_link objects that we'll need */
358 if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
359 kfree(res);
360 return NULL;
361 }
362
363 kref_init(&res->ref);
364 INIT_LIST_HEAD(&res->cg_links);
365 INIT_LIST_HEAD(&res->tasks);
366
367 /* Copy the set of subsystem state objects generated in
368 * find_existing_css_set() */
369 memcpy(res->subsys, template, sizeof(res->subsys));
370
371 write_lock(&css_set_lock);
372 /* Add reference counts and links from the new css_set. */
373 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
374 struct cgroup *cont = res->subsys[i]->cgroup;
375 struct cgroup_subsys *ss = subsys[i];
376 atomic_inc(&cont->count);
377 /*
378 * We want to add a link once per cgroup, so we
379 * only do it for the first subsystem in each
380 * hierarchy
381 */
382 if (ss->root->subsys_list.next == &ss->sibling) {
383 BUG_ON(list_empty(&tmp_cg_links));
384 link = list_entry(tmp_cg_links.next,
385 struct cg_cgroup_link,
386 cont_link_list);
387 list_del(&link->cont_link_list);
388 list_add(&link->cont_link_list, &cont->css_sets);
389 link->cg = res;
390 list_add(&link->cg_link_list, &res->cg_links);
391 }
392 }
393 if (list_empty(&rootnode.subsys_list)) {
394 link = list_entry(tmp_cg_links.next,
395 struct cg_cgroup_link,
396 cont_link_list);
397 list_del(&link->cont_link_list);
398 list_add(&link->cont_link_list, &dummytop->css_sets);
399 link->cg = res;
400 list_add(&link->cg_link_list, &res->cg_links);
401 }
402
403 BUG_ON(!list_empty(&tmp_cg_links));
404
405 /* Link this cgroup group into the list */
406 list_add(&res->list, &init_css_set.list);
407 css_set_count++;
408 INIT_LIST_HEAD(&res->tasks);
409 write_unlock(&css_set_lock);
410
411 return res;
161} 412}
162 413
163/* 414/*
@@ -504,6 +755,7 @@ static void init_cgroup_root(struct cgroupfs_root *root)
504 cont->top_cgroup = cont; 755 cont->top_cgroup = cont;
505 INIT_LIST_HEAD(&cont->sibling); 756 INIT_LIST_HEAD(&cont->sibling);
506 INIT_LIST_HEAD(&cont->children); 757 INIT_LIST_HEAD(&cont->children);
758 INIT_LIST_HEAD(&cont->css_sets);
507} 759}
508 760
509static int cgroup_test_super(struct super_block *sb, void *data) 761static int cgroup_test_super(struct super_block *sb, void *data)
@@ -573,6 +825,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
573 int ret = 0; 825 int ret = 0;
574 struct super_block *sb; 826 struct super_block *sb;
575 struct cgroupfs_root *root; 827 struct cgroupfs_root *root;
828 struct list_head tmp_cg_links, *l;
829 INIT_LIST_HEAD(&tmp_cg_links);
576 830
577 /* First find the desired set of subsystems */ 831 /* First find the desired set of subsystems */
578 ret = parse_cgroupfs_options(data, &opts); 832 ret = parse_cgroupfs_options(data, &opts);
@@ -602,18 +856,36 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
602 } else { 856 } else {
603 /* New superblock */ 857 /* New superblock */
604 struct cgroup *cont = &root->top_cgroup; 858 struct cgroup *cont = &root->top_cgroup;
859 struct inode *inode;
605 860
606 BUG_ON(sb->s_root != NULL); 861 BUG_ON(sb->s_root != NULL);
607 862
608 ret = cgroup_get_rootdir(sb); 863 ret = cgroup_get_rootdir(sb);
609 if (ret) 864 if (ret)
610 goto drop_new_super; 865 goto drop_new_super;
866 inode = sb->s_root->d_inode;
611 867
868 mutex_lock(&inode->i_mutex);
612 mutex_lock(&cgroup_mutex); 869 mutex_lock(&cgroup_mutex);
613 870
871 /*
872 * We're accessing css_set_count without locking
873 * css_set_lock here, but that's OK - it can only be
874 * increased by someone holding cgroup_lock, and
875 * that's us. The worst that can happen is that we
876 * have some link structures left over
877 */
878 ret = allocate_cg_links(css_set_count, &tmp_cg_links);
879 if (ret) {
880 mutex_unlock(&cgroup_mutex);
881 mutex_unlock(&inode->i_mutex);
882 goto drop_new_super;
883 }
884
614 ret = rebind_subsystems(root, root->subsys_bits); 885 ret = rebind_subsystems(root, root->subsys_bits);
615 if (ret == -EBUSY) { 886 if (ret == -EBUSY) {
616 mutex_unlock(&cgroup_mutex); 887 mutex_unlock(&cgroup_mutex);
888 mutex_unlock(&inode->i_mutex);
617 goto drop_new_super; 889 goto drop_new_super;
618 } 890 }
619 891
@@ -621,24 +893,40 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
621 BUG_ON(ret); 893 BUG_ON(ret);
622 894
623 list_add(&root->root_list, &roots); 895 list_add(&root->root_list, &roots);
896 root_count++;
624 897
625 sb->s_root->d_fsdata = &root->top_cgroup; 898 sb->s_root->d_fsdata = &root->top_cgroup;
626 root->top_cgroup.dentry = sb->s_root; 899 root->top_cgroup.dentry = sb->s_root;
627 900
901 /* Link the top cgroup in this hierarchy into all
902 * the css_set objects */
903 write_lock(&css_set_lock);
904 l = &init_css_set.list;
905 do {
906 struct css_set *cg;
907 struct cg_cgroup_link *link;
908 cg = list_entry(l, struct css_set, list);
909 BUG_ON(list_empty(&tmp_cg_links));
910 link = list_entry(tmp_cg_links.next,
911 struct cg_cgroup_link,
912 cont_link_list);
913 list_del(&link->cont_link_list);
914 link->cg = cg;
915 list_add(&link->cont_link_list,
916 &root->top_cgroup.css_sets);
917 list_add(&link->cg_link_list, &cg->cg_links);
918 l = l->next;
919 } while (l != &init_css_set.list);
920 write_unlock(&css_set_lock);
921
922 free_cg_links(&tmp_cg_links);
923
628 BUG_ON(!list_empty(&cont->sibling)); 924 BUG_ON(!list_empty(&cont->sibling));
629 BUG_ON(!list_empty(&cont->children)); 925 BUG_ON(!list_empty(&cont->children));
630 BUG_ON(root->number_of_cgroups != 1); 926 BUG_ON(root->number_of_cgroups != 1);
631 927
632 /*
633 * I believe that it's safe to nest i_mutex inside
634 * cgroup_mutex in this case, since no-one else can
635 * be accessing this directory yet. But we still need
636 * to teach lockdep that this is the case - currently
637 * a cgroupfs remount triggers a lockdep warning
638 */
639 mutex_lock(&cont->dentry->d_inode->i_mutex);
640 cgroup_populate_dir(cont); 928 cgroup_populate_dir(cont);
641 mutex_unlock(&cont->dentry->d_inode->i_mutex); 929 mutex_unlock(&inode->i_mutex);
642 mutex_unlock(&cgroup_mutex); 930 mutex_unlock(&cgroup_mutex);
643 } 931 }
644 932
@@ -647,6 +935,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
647 drop_new_super: 935 drop_new_super:
648 up_write(&sb->s_umount); 936 up_write(&sb->s_umount);
649 deactivate_super(sb); 937 deactivate_super(sb);
938 free_cg_links(&tmp_cg_links);
650 return ret; 939 return ret;
651} 940}
652 941
@@ -668,8 +957,25 @@ static void cgroup_kill_sb(struct super_block *sb) {
668 /* Shouldn't be able to fail ... */ 957 /* Shouldn't be able to fail ... */
669 BUG_ON(ret); 958 BUG_ON(ret);
670 959
671 if (!list_empty(&root->root_list)) 960 /*
961 * Release all the links from css_sets to this hierarchy's
962 * root cgroup
963 */
964 write_lock(&css_set_lock);
965 while (!list_empty(&cont->css_sets)) {
966 struct cg_cgroup_link *link;
967 link = list_entry(cont->css_sets.next,
968 struct cg_cgroup_link, cont_link_list);
969 list_del(&link->cg_link_list);
970 list_del(&link->cont_link_list);
971 kfree(link);
972 }
973 write_unlock(&css_set_lock);
974
975 if (!list_empty(&root->root_list)) {
672 list_del(&root->root_list); 976 list_del(&root->root_list);
977 root_count--;
978 }
673 mutex_unlock(&cgroup_mutex); 979 mutex_unlock(&cgroup_mutex);
674 980
675 kfree(root); 981 kfree(root);
@@ -762,9 +1068,9 @@ static int attach_task(struct cgroup *cont, struct task_struct *tsk)
762 int retval = 0; 1068 int retval = 0;
763 struct cgroup_subsys *ss; 1069 struct cgroup_subsys *ss;
764 struct cgroup *oldcont; 1070 struct cgroup *oldcont;
765 struct css_set *cg = &tsk->cgroups; 1071 struct css_set *cg = tsk->cgroups;
1072 struct css_set *newcg;
766 struct cgroupfs_root *root = cont->root; 1073 struct cgroupfs_root *root = cont->root;
767 int i;
768 int subsys_id; 1074 int subsys_id;
769 1075
770 get_first_subsys(cont, NULL, &subsys_id); 1076 get_first_subsys(cont, NULL, &subsys_id);
@@ -783,26 +1089,32 @@ static int attach_task(struct cgroup *cont, struct task_struct *tsk)
783 } 1089 }
784 } 1090 }
785 1091
1092 /*
1093 * Locate or allocate a new css_set for this task,
1094 * based on its final set of cgroups
1095 */
1096 newcg = find_css_set(cg, cont);
1097 if (!newcg) {
1098 return -ENOMEM;
1099 }
1100
786 task_lock(tsk); 1101 task_lock(tsk);
787 if (tsk->flags & PF_EXITING) { 1102 if (tsk->flags & PF_EXITING) {
788 task_unlock(tsk); 1103 task_unlock(tsk);
1104 put_css_set(newcg);
789 return -ESRCH; 1105 return -ESRCH;
790 } 1106 }
791 /* Update the css_set pointers for the subsystems in this 1107 rcu_assign_pointer(tsk->cgroups, newcg);
792 * hierarchy */
793 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
794 if (root->subsys_bits & (1ull << i)) {
795 /* Subsystem is in this hierarchy. So we want
796 * the subsystem state from the new
797 * cgroup. Transfer the refcount from the
798 * old to the new */
799 atomic_inc(&cont->count);
800 atomic_dec(&cg->subsys[i]->cgroup->count);
801 rcu_assign_pointer(cg->subsys[i], cont->subsys[i]);
802 }
803 }
804 task_unlock(tsk); 1108 task_unlock(tsk);
805 1109
1110 /* Update the css_set linked lists if we're using them */
1111 write_lock(&css_set_lock);
1112 if (!list_empty(&tsk->cg_list)) {
1113 list_del(&tsk->cg_list);
1114 list_add(&tsk->cg_list, &newcg->tasks);
1115 }
1116 write_unlock(&css_set_lock);
1117
806 for_each_subsys(root, ss) { 1118 for_each_subsys(root, ss) {
807 if (ss->attach) { 1119 if (ss->attach) {
808 ss->attach(ss, cont, oldcont, tsk); 1120 ss->attach(ss, cont, oldcont, tsk);
@@ -810,6 +1122,7 @@ static int attach_task(struct cgroup *cont, struct task_struct *tsk)
810 } 1122 }
811 1123
812 synchronize_rcu(); 1124 synchronize_rcu();
1125 put_css_set(cg);
813 return 0; 1126 return 0;
814} 1127}
815 1128
@@ -1069,7 +1382,7 @@ static int cgroup_create_file(struct dentry *dentry, int mode,
1069 1382
1070 /* start with the directory inode held, so that we can 1383 /* start with the directory inode held, so that we can
1071 * populate it without racing with another mkdir */ 1384 * populate it without racing with another mkdir */
1072 mutex_lock(&inode->i_mutex); 1385 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
1073 } else if (S_ISREG(mode)) { 1386 } else if (S_ISREG(mode)) {
1074 inode->i_size = 0; 1387 inode->i_size = 0;
1075 inode->i_fop = &cgroup_file_operations; 1388 inode->i_fop = &cgroup_file_operations;
@@ -1148,28 +1461,102 @@ int cgroup_add_files(struct cgroup *cont,
1148 return 0; 1461 return 0;
1149} 1462}
1150 1463
1151/* Count the number of tasks in a cgroup. Could be made more 1464/* Count the number of tasks in a cgroup. */
1152 * time-efficient but less space-efficient with more linked lists 1465
1153 * running through each cgroup and the css_set structures that 1466int cgroup_task_count(const struct cgroup *cont)
1154 * referenced it. Must be called with tasklist_lock held for read or
1155 * write or in an rcu critical section.
1156 */
1157int __cgroup_task_count(const struct cgroup *cont)
1158{ 1467{
1159 int count = 0; 1468 int count = 0;
1160 struct task_struct *g, *p; 1469 struct list_head *l;
1161 struct cgroup_subsys_state *css; 1470
1162 int subsys_id; 1471 read_lock(&css_set_lock);
1163 1472 l = cont->css_sets.next;
1164 get_first_subsys(cont, &css, &subsys_id); 1473 while (l != &cont->css_sets) {
1165 do_each_thread(g, p) { 1474 struct cg_cgroup_link *link =
1166 if (task_subsys_state(p, subsys_id) == css) 1475 list_entry(l, struct cg_cgroup_link, cont_link_list);
1167 count ++; 1476 count += atomic_read(&link->cg->ref.refcount);
1168 } while_each_thread(g, p); 1477 l = l->next;
1478 }
1479 read_unlock(&css_set_lock);
1169 return count; 1480 return count;
1170} 1481}
1171 1482
1172/* 1483/*
1484 * Advance a list_head iterator. The iterator should be positioned at
1485 * the start of a css_set
1486 */
1487static void cgroup_advance_iter(struct cgroup *cont,
1488 struct cgroup_iter *it)
1489{
1490 struct list_head *l = it->cg_link;
1491 struct cg_cgroup_link *link;
1492 struct css_set *cg;
1493
1494 /* Advance to the next non-empty css_set */
1495 do {
1496 l = l->next;
1497 if (l == &cont->css_sets) {
1498 it->cg_link = NULL;
1499 return;
1500 }
1501 link = list_entry(l, struct cg_cgroup_link, cont_link_list);
1502 cg = link->cg;
1503 } while (list_empty(&cg->tasks));
1504 it->cg_link = l;
1505 it->task = cg->tasks.next;
1506}
1507
1508void cgroup_iter_start(struct cgroup *cont, struct cgroup_iter *it)
1509{
1510 /*
1511 * The first time anyone tries to iterate across a cgroup,
1512 * we need to enable the list linking each css_set to its
1513 * tasks, and fix up all existing tasks.
1514 */
1515 if (!use_task_css_set_links) {
1516 struct task_struct *p, *g;
1517 write_lock(&css_set_lock);
1518 use_task_css_set_links = 1;
1519 do_each_thread(g, p) {
1520 task_lock(p);
1521 if (list_empty(&p->cg_list))
1522 list_add(&p->cg_list, &p->cgroups->tasks);
1523 task_unlock(p);
1524 } while_each_thread(g, p);
1525 write_unlock(&css_set_lock);
1526 }
1527 read_lock(&css_set_lock);
1528 it->cg_link = &cont->css_sets;
1529 cgroup_advance_iter(cont, it);
1530}
1531
1532struct task_struct *cgroup_iter_next(struct cgroup *cont,
1533 struct cgroup_iter *it)
1534{
1535 struct task_struct *res;
1536 struct list_head *l = it->task;
1537
1538 /* If the iterator cg is NULL, we have no tasks */
1539 if (!it->cg_link)
1540 return NULL;
1541 res = list_entry(l, struct task_struct, cg_list);
1542 /* Advance iterator to find next entry */
1543 l = l->next;
1544 if (l == &res->cgroups->tasks) {
1545 /* We reached the end of this task list - move on to
1546 * the next cg_cgroup_link */
1547 cgroup_advance_iter(cont, it);
1548 } else {
1549 it->task = l;
1550 }
1551 return res;
1552}
1553
1554void cgroup_iter_end(struct cgroup *cont, struct cgroup_iter *it)
1555{
1556 read_unlock(&css_set_lock);
1557}
1558
1559/*
1173 * Stuff for reading the 'tasks' file. 1560 * Stuff for reading the 'tasks' file.
1174 * 1561 *
1175 * Reading this file can return large amounts of data if a cgroup has 1562 * Reading this file can return large amounts of data if a cgroup has
@@ -1198,22 +1585,15 @@ struct ctr_struct {
1198static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cont) 1585static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cont)
1199{ 1586{
1200 int n = 0; 1587 int n = 0;
1201 struct task_struct *g, *p; 1588 struct cgroup_iter it;
1202 struct cgroup_subsys_state *css; 1589 struct task_struct *tsk;
1203 int subsys_id; 1590 cgroup_iter_start(cont, &it);
1204 1591 while ((tsk = cgroup_iter_next(cont, &it))) {
1205 get_first_subsys(cont, &css, &subsys_id); 1592 if (unlikely(n == npids))
1206 rcu_read_lock(); 1593 break;
1207 do_each_thread(g, p) { 1594 pidarray[n++] = pid_nr(task_pid(tsk));
1208 if (task_subsys_state(p, subsys_id) == css) { 1595 }
1209 pidarray[n++] = pid_nr(task_pid(p)); 1596 cgroup_iter_end(cont, &it);
1210 if (unlikely(n == npids))
1211 goto array_full;
1212 }
1213 } while_each_thread(g, p);
1214
1215array_full:
1216 rcu_read_unlock();
1217 return n; 1597 return n;
1218} 1598}
1219 1599
@@ -1398,6 +1778,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
1398 cont->flags = 0; 1778 cont->flags = 0;
1399 INIT_LIST_HEAD(&cont->sibling); 1779 INIT_LIST_HEAD(&cont->sibling);
1400 INIT_LIST_HEAD(&cont->children); 1780 INIT_LIST_HEAD(&cont->children);
1781 INIT_LIST_HEAD(&cont->css_sets);
1401 1782
1402 cont->parent = parent; 1783 cont->parent = parent;
1403 cont->root = parent->root; 1784 cont->root = parent->root;
@@ -1529,8 +1910,8 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
1529 1910
1530static void cgroup_init_subsys(struct cgroup_subsys *ss) 1911static void cgroup_init_subsys(struct cgroup_subsys *ss)
1531{ 1912{
1532 struct task_struct *g, *p;
1533 struct cgroup_subsys_state *css; 1913 struct cgroup_subsys_state *css;
1914 struct list_head *l;
1534 printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name); 1915 printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name);
1535 1916
1536 /* Create the top cgroup state for this subsystem */ 1917 /* Create the top cgroup state for this subsystem */
@@ -1540,26 +1921,32 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss)
1540 BUG_ON(IS_ERR(css)); 1921 BUG_ON(IS_ERR(css));
1541 init_cgroup_css(css, ss, dummytop); 1922 init_cgroup_css(css, ss, dummytop);
1542 1923
1543 /* Update all tasks to contain a subsys pointer to this state 1924 /* Update all cgroup groups to contain a subsys
1544 * - since the subsystem is newly registered, all tasks are in 1925 * pointer to this state - since the subsystem is
1545 * the subsystem's top cgroup. */ 1926 * newly registered, all tasks and hence all cgroup
1927 * groups are in the subsystem's top cgroup. */
1928 write_lock(&css_set_lock);
1929 l = &init_css_set.list;
1930 do {
1931 struct css_set *cg =
1932 list_entry(l, struct css_set, list);
1933 cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
1934 l = l->next;
1935 } while (l != &init_css_set.list);
1936 write_unlock(&css_set_lock);
1546 1937
1547 /* If this subsystem requested that it be notified with fork 1938 /* If this subsystem requested that it be notified with fork
1548 * events, we should send it one now for every process in the 1939 * events, we should send it one now for every process in the
1549 * system */ 1940 * system */
1941 if (ss->fork) {
1942 struct task_struct *g, *p;
1550 1943
1551 read_lock(&tasklist_lock); 1944 read_lock(&tasklist_lock);
1552 init_task.cgroups.subsys[ss->subsys_id] = css; 1945 do_each_thread(g, p) {
1553 if (ss->fork) 1946 ss->fork(ss, p);
1554 ss->fork(ss, &init_task); 1947 } while_each_thread(g, p);
1555 1948 read_unlock(&tasklist_lock);
1556 do_each_thread(g, p) { 1949 }
1557 printk(KERN_INFO "Setting task %p css to %p (%d)\n", css, p, p->pid);
1558 p->cgroups.subsys[ss->subsys_id] = css;
1559 if (ss->fork)
1560 ss->fork(ss, p);
1561 } while_each_thread(g, p);
1562 read_unlock(&tasklist_lock);
1563 1950
1564 need_forkexit_callback |= ss->fork || ss->exit; 1951 need_forkexit_callback |= ss->fork || ss->exit;
1565 1952
@@ -1573,8 +1960,22 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss)
1573int __init cgroup_init_early(void) 1960int __init cgroup_init_early(void)
1574{ 1961{
1575 int i; 1962 int i;
1963 kref_init(&init_css_set.ref);
1964 kref_get(&init_css_set.ref);
1965 INIT_LIST_HEAD(&init_css_set.list);
1966 INIT_LIST_HEAD(&init_css_set.cg_links);
1967 INIT_LIST_HEAD(&init_css_set.tasks);
1968 css_set_count = 1;
1576 init_cgroup_root(&rootnode); 1969 init_cgroup_root(&rootnode);
1577 list_add(&rootnode.root_list, &roots); 1970 list_add(&rootnode.root_list, &roots);
1971 root_count = 1;
1972 init_task.cgroups = &init_css_set;
1973
1974 init_css_set_link.cg = &init_css_set;
1975 list_add(&init_css_set_link.cont_link_list,
1976 &rootnode.top_cgroup.css_sets);
1977 list_add(&init_css_set_link.cg_link_list,
1978 &init_css_set.cg_links);
1578 1979
1579 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 1980 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1580 struct cgroup_subsys *ss = subsys[i]; 1981 struct cgroup_subsys *ss = subsys[i];
@@ -1715,29 +2116,13 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v)
1715 int i; 2116 int i;
1716 struct cgroupfs_root *root; 2117 struct cgroupfs_root *root;
1717 2118
2119 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\n");
1718 mutex_lock(&cgroup_mutex); 2120 mutex_lock(&cgroup_mutex);
1719 seq_puts(m, "Hierarchies:\n");
1720 for_each_root(root) {
1721 struct cgroup_subsys *ss;
1722 int first = 1;
1723 seq_printf(m, "%p: bits=%lx cgroups=%d (", root,
1724 root->subsys_bits, root->number_of_cgroups);
1725 for_each_subsys(root, ss) {
1726 seq_printf(m, "%s%s", first ? "" : ", ", ss->name);
1727 first = false;
1728 }
1729 seq_putc(m, ')');
1730 if (root->sb) {
1731 seq_printf(m, " s_active=%d",
1732 atomic_read(&root->sb->s_active));
1733 }
1734 seq_putc(m, '\n');
1735 }
1736 seq_puts(m, "Subsystems:\n");
1737 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 2121 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1738 struct cgroup_subsys *ss = subsys[i]; 2122 struct cgroup_subsys *ss = subsys[i];
1739 seq_printf(m, "%d: name=%s hierarchy=%p\n", 2123 seq_printf(m, "%s\t%lu\t%d\n",
1740 i, ss->name, ss->root); 2124 ss->name, ss->root->subsys_bits,
2125 ss->root->number_of_cgroups);
1741 } 2126 }
1742 mutex_unlock(&cgroup_mutex); 2127 mutex_unlock(&cgroup_mutex);
1743 return 0; 2128 return 0;
@@ -1765,18 +2150,19 @@ static struct file_operations proc_cgroupstats_operations = {
1765 * fork.c by dup_task_struct(). However, we ignore that copy, since 2150 * fork.c by dup_task_struct(). However, we ignore that copy, since
1766 * it was not made under the protection of RCU or cgroup_mutex, so 2151 * it was not made under the protection of RCU or cgroup_mutex, so
1767 * might no longer be a valid cgroup pointer. attach_task() might 2152 * might no longer be a valid cgroup pointer. attach_task() might
1768 * have already changed current->cgroup, allowing the previously 2153 * have already changed current->cgroups, allowing the previously
1769 * referenced cgroup to be removed and freed. 2154 * referenced cgroup group to be removed and freed.
1770 * 2155 *
1771 * At the point that cgroup_fork() is called, 'current' is the parent 2156 * At the point that cgroup_fork() is called, 'current' is the parent
1772 * task, and the passed argument 'child' points to the child task. 2157 * task, and the passed argument 'child' points to the child task.
1773 */ 2158 */
1774void cgroup_fork(struct task_struct *child) 2159void cgroup_fork(struct task_struct *child)
1775{ 2160{
1776 rcu_read_lock(); 2161 task_lock(current);
1777 child->cgroups = rcu_dereference(current->cgroups); 2162 child->cgroups = current->cgroups;
1778 get_css_set(&child->cgroups); 2163 get_css_set(child->cgroups);
1779 rcu_read_unlock(); 2164 task_unlock(current);
2165 INIT_LIST_HEAD(&child->cg_list);
1780} 2166}
1781 2167
1782/** 2168/**
@@ -1797,6 +2183,21 @@ void cgroup_fork_callbacks(struct task_struct *child)
1797} 2183}
1798 2184
1799/** 2185/**
2186 * cgroup_post_fork - called on a new task after adding it to the
2187 * task list. Adds the task to the list running through its css_set
2188 * if necessary. Has to be after the task is visible on the task list
2189 * in case we race with the first call to cgroup_iter_start() - to
2190 * guarantee that the new task ends up on its list. */
2191void cgroup_post_fork(struct task_struct *child)
2192{
2193 if (use_task_css_set_links) {
2194 write_lock(&css_set_lock);
2195 if (list_empty(&child->cg_list))
2196 list_add(&child->cg_list, &child->cgroups->tasks);
2197 write_unlock(&css_set_lock);
2198 }
2199}
2200/**
1800 * cgroup_exit - detach cgroup from exiting task 2201 * cgroup_exit - detach cgroup from exiting task
1801 * @tsk: pointer to task_struct of exiting process 2202 * @tsk: pointer to task_struct of exiting process
1802 * 2203 *
@@ -1834,6 +2235,7 @@ void cgroup_fork_callbacks(struct task_struct *child)
1834void cgroup_exit(struct task_struct *tsk, int run_callbacks) 2235void cgroup_exit(struct task_struct *tsk, int run_callbacks)
1835{ 2236{
1836 int i; 2237 int i;
2238 struct css_set *cg;
1837 2239
1838 if (run_callbacks && need_forkexit_callback) { 2240 if (run_callbacks && need_forkexit_callback) {
1839 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 2241 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
@@ -1842,11 +2244,26 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
1842 ss->exit(ss, tsk); 2244 ss->exit(ss, tsk);
1843 } 2245 }
1844 } 2246 }
2247
2248 /*
2249 * Unlink from the css_set task list if necessary.
2250 * Optimistically check cg_list before taking
2251 * css_set_lock
2252 */
2253 if (!list_empty(&tsk->cg_list)) {
2254 write_lock(&css_set_lock);
2255 if (!list_empty(&tsk->cg_list))
2256 list_del(&tsk->cg_list);
2257 write_unlock(&css_set_lock);
2258 }
2259
1845 /* Reassign the task to the init_css_set. */ 2260 /* Reassign the task to the init_css_set. */
1846 task_lock(tsk); 2261 task_lock(tsk);
1847 put_css_set(&tsk->cgroups); 2262 cg = tsk->cgroups;
1848 tsk->cgroups = init_task.cgroups; 2263 tsk->cgroups = &init_css_set;
1849 task_unlock(tsk); 2264 task_unlock(tsk);
2265 if (cg)
2266 put_css_set(cg);
1850} 2267}
1851 2268
1852/** 2269/**
@@ -1880,7 +2297,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
1880 mutex_unlock(&cgroup_mutex); 2297 mutex_unlock(&cgroup_mutex);
1881 return 0; 2298 return 0;
1882 } 2299 }
1883 cg = &tsk->cgroups; 2300 cg = tsk->cgroups;
1884 parent = task_cgroup(tsk, subsys->subsys_id); 2301 parent = task_cgroup(tsk, subsys->subsys_id);
1885 2302
1886 snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid); 2303 snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid);
@@ -1888,6 +2305,8 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
1888 /* Pin the hierarchy */ 2305 /* Pin the hierarchy */
1889 atomic_inc(&parent->root->sb->s_active); 2306 atomic_inc(&parent->root->sb->s_active);
1890 2307
2308 /* Keep the cgroup alive */
2309 get_css_set(cg);
1891 mutex_unlock(&cgroup_mutex); 2310 mutex_unlock(&cgroup_mutex);
1892 2311
1893 /* Now do the VFS work to create a cgroup */ 2312 /* Now do the VFS work to create a cgroup */
@@ -1931,6 +2350,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
1931 (parent != task_cgroup(tsk, subsys->subsys_id))) { 2350 (parent != task_cgroup(tsk, subsys->subsys_id))) {
1932 /* Aargh, we raced ... */ 2351 /* Aargh, we raced ... */
1933 mutex_unlock(&inode->i_mutex); 2352 mutex_unlock(&inode->i_mutex);
2353 put_css_set(cg);
1934 2354
1935 deactivate_super(parent->root->sb); 2355 deactivate_super(parent->root->sb);
1936 /* The cgroup is still accessible in the VFS, but 2356 /* The cgroup is still accessible in the VFS, but
@@ -1954,6 +2374,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
1954 2374
1955 out_release: 2375 out_release:
1956 mutex_unlock(&inode->i_mutex); 2376 mutex_unlock(&inode->i_mutex);
2377 put_css_set(cg);
1957 deactivate_super(parent->root->sb); 2378 deactivate_super(parent->root->sb);
1958 return ret; 2379 return ret;
1959} 2380}
diff --git a/kernel/fork.c b/kernel/fork.c
index e7c181454dca..fcac38929245 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1301,6 +1301,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1301 spin_unlock(&current->sighand->siglock); 1301 spin_unlock(&current->sighand->siglock);
1302 write_unlock_irq(&tasklist_lock); 1302 write_unlock_irq(&tasklist_lock);
1303 proc_fork_connector(p); 1303 proc_fork_connector(p);
1304 cgroup_post_fork(p);
1304 return p; 1305 return p;
1305 1306
1306bad_fork_cleanup_namespaces: 1307bad_fork_cleanup_namespaces: