diff options
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r-- | kernel/cgroup.c | 2805 |
1 files changed, 2805 insertions, 0 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c new file mode 100644 index 000000000000..5987dccdb2a0 --- /dev/null +++ b/kernel/cgroup.c | |||
@@ -0,0 +1,2805 @@ | |||
1 | /* | ||
2 | * kernel/cgroup.c | ||
3 | * | ||
4 | * Generic process-grouping system. | ||
5 | * | ||
6 | * Based originally on the cpuset system, extracted by Paul Menage | ||
7 | * Copyright (C) 2006 Google, Inc | ||
8 | * | ||
9 | * Copyright notices from the original cpuset code: | ||
10 | * -------------------------------------------------- | ||
11 | * Copyright (C) 2003 BULL SA. | ||
12 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. | ||
13 | * | ||
14 | * Portions derived from Patrick Mochel's sysfs code. | ||
15 | * sysfs is Copyright (c) 2001-3 Patrick Mochel | ||
16 | * | ||
17 | * 2003-10-10 Written by Simon Derr. | ||
18 | * 2003-10-22 Updates by Stephen Hemminger. | ||
19 | * 2004 May-July Rework by Paul Jackson. | ||
20 | * --------------------------------------------------- | ||
21 | * | ||
22 | * This file is subject to the terms and conditions of the GNU General Public | ||
23 | * License. See the file COPYING in the main directory of the Linux | ||
24 | * distribution for more details. | ||
25 | */ | ||
26 | |||
27 | #include <linux/cgroup.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/list.h> | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/mutex.h> | ||
34 | #include <linux/mount.h> | ||
35 | #include <linux/pagemap.h> | ||
36 | #include <linux/proc_fs.h> | ||
37 | #include <linux/rcupdate.h> | ||
38 | #include <linux/sched.h> | ||
39 | #include <linux/backing-dev.h> | ||
40 | #include <linux/seq_file.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/magic.h> | ||
43 | #include <linux/spinlock.h> | ||
44 | #include <linux/string.h> | ||
45 | #include <linux/sort.h> | ||
46 | #include <linux/kmod.h> | ||
47 | #include <linux/delayacct.h> | ||
48 | #include <linux/cgroupstats.h> | ||
49 | |||
50 | #include <asm/atomic.h> | ||
51 | |||
52 | static DEFINE_MUTEX(cgroup_mutex); | ||
53 | |||
54 | /* Generate an array of cgroup subsystem pointers */ | ||
55 | #define SUBSYS(_x) &_x ## _subsys, | ||
56 | |||
57 | static struct cgroup_subsys *subsys[] = { | ||
58 | #include <linux/cgroup_subsys.h> | ||
59 | }; | ||
60 | |||
61 | /* | ||
62 | * A cgroupfs_root represents the root of a cgroup hierarchy, | ||
63 | * and may be associated with a superblock to form an active | ||
64 | * hierarchy | ||
65 | */ | ||
66 | struct cgroupfs_root { | ||
67 | struct super_block *sb; | ||
68 | |||
69 | /* | ||
70 | * The bitmask of subsystems intended to be attached to this | ||
71 | * hierarchy | ||
72 | */ | ||
73 | unsigned long subsys_bits; | ||
74 | |||
75 | /* The bitmask of subsystems currently attached to this hierarchy */ | ||
76 | unsigned long actual_subsys_bits; | ||
77 | |||
78 | /* A list running through the attached subsystems */ | ||
79 | struct list_head subsys_list; | ||
80 | |||
81 | /* The root cgroup for this hierarchy */ | ||
82 | struct cgroup top_cgroup; | ||
83 | |||
84 | /* Tracks how many cgroups are currently defined in hierarchy.*/ | ||
85 | int number_of_cgroups; | ||
86 | |||
87 | /* A list running through the mounted hierarchies */ | ||
88 | struct list_head root_list; | ||
89 | |||
90 | /* Hierarchy-specific flags */ | ||
91 | unsigned long flags; | ||
92 | |||
93 | /* The path to use for release notifications. No locking | ||
94 | * between setting and use - so if userspace updates this | ||
95 | * while child cgroups exist, you could miss a | ||
96 | * notification. We ensure that it's always a valid | ||
97 | * NUL-terminated string */ | ||
98 | char release_agent_path[PATH_MAX]; | ||
99 | }; | ||
100 | |||
101 | |||
102 | /* | ||
103 | * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the | ||
104 | * subsystems that are otherwise unattached - it never has more than a | ||
105 | * single cgroup, and all tasks are part of that cgroup. | ||
106 | */ | ||
107 | static struct cgroupfs_root rootnode; | ||
108 | |||
109 | /* The list of hierarchy roots */ | ||
110 | |||
111 | static LIST_HEAD(roots); | ||
112 | static int root_count; | ||
113 | |||
114 | /* dummytop is a shorthand for the dummy hierarchy's top cgroup */ | ||
115 | #define dummytop (&rootnode.top_cgroup) | ||
116 | |||
117 | /* This flag indicates whether tasks in the fork and exit paths should | ||
118 | * take callback_mutex and check for fork/exit handlers to call. This | ||
119 | * avoids us having to do extra work in the fork/exit path if none of the | ||
120 | * subsystems need to be called. | ||
121 | */ | ||
122 | static int need_forkexit_callback; | ||
123 | |||
124 | /* bits in struct cgroup flags field */ | ||
125 | enum { | ||
126 | /* Control Group is dead */ | ||
127 | CGRP_REMOVED, | ||
128 | /* Control Group has previously had a child cgroup or a task, | ||
129 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ | ||
130 | CGRP_RELEASABLE, | ||
131 | /* Control Group requires release notifications to userspace */ | ||
132 | CGRP_NOTIFY_ON_RELEASE, | ||
133 | }; | ||
134 | |||
135 | /* convenient tests for these bits */ | ||
136 | inline int cgroup_is_removed(const struct cgroup *cgrp) | ||
137 | { | ||
138 | return test_bit(CGRP_REMOVED, &cgrp->flags); | ||
139 | } | ||
140 | |||
141 | /* bits in struct cgroupfs_root flags field */ | ||
142 | enum { | ||
143 | ROOT_NOPREFIX, /* mounted subsystems have no named prefix */ | ||
144 | }; | ||
145 | |||
146 | inline int cgroup_is_releasable(const struct cgroup *cgrp) | ||
147 | { | ||
148 | const int bits = | ||
149 | (1 << CGRP_RELEASABLE) | | ||
150 | (1 << CGRP_NOTIFY_ON_RELEASE); | ||
151 | return (cgrp->flags & bits) == bits; | ||
152 | } | ||
153 | |||
154 | inline int notify_on_release(const struct cgroup *cgrp) | ||
155 | { | ||
156 | return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * for_each_subsys() allows you to iterate on each subsystem attached to | ||
161 | * an active hierarchy | ||
162 | */ | ||
163 | #define for_each_subsys(_root, _ss) \ | ||
164 | list_for_each_entry(_ss, &_root->subsys_list, sibling) | ||
165 | |||
166 | /* for_each_root() allows you to iterate across the active hierarchies */ | ||
167 | #define for_each_root(_root) \ | ||
168 | list_for_each_entry(_root, &roots, root_list) | ||
169 | |||
170 | /* the list of cgroups eligible for automatic release. Protected by | ||
171 | * release_list_lock */ | ||
172 | static LIST_HEAD(release_list); | ||
173 | static DEFINE_SPINLOCK(release_list_lock); | ||
174 | static void cgroup_release_agent(struct work_struct *work); | ||
175 | static DECLARE_WORK(release_agent_work, cgroup_release_agent); | ||
176 | static void check_for_release(struct cgroup *cgrp); | ||
177 | |||
178 | /* Link structure for associating css_set objects with cgroups */ | ||
179 | struct cg_cgroup_link { | ||
180 | /* | ||
181 | * List running through cg_cgroup_links associated with a | ||
182 | * cgroup, anchored on cgroup->css_sets | ||
183 | */ | ||
184 | struct list_head cgrp_link_list; | ||
185 | /* | ||
186 | * List running through cg_cgroup_links pointing at a | ||
187 | * single css_set object, anchored on css_set->cg_links | ||
188 | */ | ||
189 | struct list_head cg_link_list; | ||
190 | struct css_set *cg; | ||
191 | }; | ||
192 | |||
193 | /* The default css_set - used by init and its children prior to any | ||
194 | * hierarchies being mounted. It contains a pointer to the root state | ||
195 | * for each subsystem. Also used to anchor the list of css_sets. Not | ||
196 | * reference-counted, to improve performance when child cgroups | ||
197 | * haven't been created. | ||
198 | */ | ||
199 | |||
200 | static struct css_set init_css_set; | ||
201 | static struct cg_cgroup_link init_css_set_link; | ||
202 | |||
203 | /* css_set_lock protects the list of css_set objects, and the | ||
204 | * chain of tasks off each css_set. Nests outside task->alloc_lock | ||
205 | * due to cgroup_iter_start() */ | ||
206 | static DEFINE_RWLOCK(css_set_lock); | ||
207 | static int css_set_count; | ||
208 | |||
209 | /* We don't maintain the lists running through each css_set to its | ||
210 | * task until after the first call to cgroup_iter_start(). This | ||
211 | * reduces the fork()/exit() overhead for people who have cgroups | ||
212 | * compiled into their kernel but not actually in use */ | ||
213 | static int use_task_css_set_links; | ||
214 | |||
215 | /* When we create or destroy a css_set, the operation simply | ||
216 | * takes/releases a reference count on all the cgroups referenced | ||
217 | * by subsystems in this css_set. This can end up multiple-counting | ||
218 | * some cgroups, but that's OK - the ref-count is just a | ||
219 | * busy/not-busy indicator; ensuring that we only count each cgroup | ||
220 | * once would require taking a global lock to ensure that no | ||
221 | * subsystems moved between hierarchies while we were doing so. | ||
222 | * | ||
223 | * Possible TODO: decide at boot time based on the number of | ||
224 | * registered subsystems and the number of CPUs or NUMA nodes whether | ||
225 | * it's better for performance to ref-count every subsystem, or to | ||
226 | * take a global lock and only add one ref count to each hierarchy. | ||
227 | */ | ||
228 | |||
229 | /* | ||
230 | * unlink a css_set from the list and free it | ||
231 | */ | ||
232 | static void unlink_css_set(struct css_set *cg) | ||
233 | { | ||
234 | write_lock(&css_set_lock); | ||
235 | list_del(&cg->list); | ||
236 | css_set_count--; | ||
237 | while (!list_empty(&cg->cg_links)) { | ||
238 | struct cg_cgroup_link *link; | ||
239 | link = list_entry(cg->cg_links.next, | ||
240 | struct cg_cgroup_link, cg_link_list); | ||
241 | list_del(&link->cg_link_list); | ||
242 | list_del(&link->cgrp_link_list); | ||
243 | kfree(link); | ||
244 | } | ||
245 | write_unlock(&css_set_lock); | ||
246 | } | ||
247 | |||
248 | static void __release_css_set(struct kref *k, int taskexit) | ||
249 | { | ||
250 | int i; | ||
251 | struct css_set *cg = container_of(k, struct css_set, ref); | ||
252 | |||
253 | unlink_css_set(cg); | ||
254 | |||
255 | rcu_read_lock(); | ||
256 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
257 | struct cgroup *cgrp = cg->subsys[i]->cgroup; | ||
258 | if (atomic_dec_and_test(&cgrp->count) && | ||
259 | notify_on_release(cgrp)) { | ||
260 | if (taskexit) | ||
261 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
262 | check_for_release(cgrp); | ||
263 | } | ||
264 | } | ||
265 | rcu_read_unlock(); | ||
266 | kfree(cg); | ||
267 | } | ||
268 | |||
269 | static void release_css_set(struct kref *k) | ||
270 | { | ||
271 | __release_css_set(k, 0); | ||
272 | } | ||
273 | |||
274 | static void release_css_set_taskexit(struct kref *k) | ||
275 | { | ||
276 | __release_css_set(k, 1); | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * refcounted get/put for css_set objects | ||
281 | */ | ||
282 | static inline void get_css_set(struct css_set *cg) | ||
283 | { | ||
284 | kref_get(&cg->ref); | ||
285 | } | ||
286 | |||
287 | static inline void put_css_set(struct css_set *cg) | ||
288 | { | ||
289 | kref_put(&cg->ref, release_css_set); | ||
290 | } | ||
291 | |||
292 | static inline void put_css_set_taskexit(struct css_set *cg) | ||
293 | { | ||
294 | kref_put(&cg->ref, release_css_set_taskexit); | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * find_existing_css_set() is a helper for | ||
299 | * find_css_set(), and checks to see whether an existing | ||
300 | * css_set is suitable. This currently walks a linked-list for | ||
301 | * simplicity; a later patch will use a hash table for better | ||
302 | * performance | ||
303 | * | ||
304 | * oldcg: the cgroup group that we're using before the cgroup | ||
305 | * transition | ||
306 | * | ||
307 | * cgrp: the cgroup that we're moving into | ||
308 | * | ||
309 | * template: location in which to build the desired set of subsystem | ||
310 | * state objects for the new cgroup group | ||
311 | */ | ||
312 | |||
313 | static struct css_set *find_existing_css_set( | ||
314 | struct css_set *oldcg, | ||
315 | struct cgroup *cgrp, | ||
316 | struct cgroup_subsys_state *template[]) | ||
317 | { | ||
318 | int i; | ||
319 | struct cgroupfs_root *root = cgrp->root; | ||
320 | struct list_head *l = &init_css_set.list; | ||
321 | |||
322 | /* Built the set of subsystem state objects that we want to | ||
323 | * see in the new css_set */ | ||
324 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
325 | if (root->subsys_bits & (1ull << i)) { | ||
326 | /* Subsystem is in this hierarchy. So we want | ||
327 | * the subsystem state from the new | ||
328 | * cgroup */ | ||
329 | template[i] = cgrp->subsys[i]; | ||
330 | } else { | ||
331 | /* Subsystem is not in this hierarchy, so we | ||
332 | * don't want to change the subsystem state */ | ||
333 | template[i] = oldcg->subsys[i]; | ||
334 | } | ||
335 | } | ||
336 | |||
337 | /* Look through existing cgroup groups to find one to reuse */ | ||
338 | do { | ||
339 | struct css_set *cg = | ||
340 | list_entry(l, struct css_set, list); | ||
341 | |||
342 | if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) { | ||
343 | /* All subsystems matched */ | ||
344 | return cg; | ||
345 | } | ||
346 | /* Try the next cgroup group */ | ||
347 | l = l->next; | ||
348 | } while (l != &init_css_set.list); | ||
349 | |||
350 | /* No existing cgroup group matched */ | ||
351 | return NULL; | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * allocate_cg_links() allocates "count" cg_cgroup_link structures | ||
356 | * and chains them on tmp through their cgrp_link_list fields. Returns 0 on | ||
357 | * success or a negative error | ||
358 | */ | ||
359 | |||
360 | static int allocate_cg_links(int count, struct list_head *tmp) | ||
361 | { | ||
362 | struct cg_cgroup_link *link; | ||
363 | int i; | ||
364 | INIT_LIST_HEAD(tmp); | ||
365 | for (i = 0; i < count; i++) { | ||
366 | link = kmalloc(sizeof(*link), GFP_KERNEL); | ||
367 | if (!link) { | ||
368 | while (!list_empty(tmp)) { | ||
369 | link = list_entry(tmp->next, | ||
370 | struct cg_cgroup_link, | ||
371 | cgrp_link_list); | ||
372 | list_del(&link->cgrp_link_list); | ||
373 | kfree(link); | ||
374 | } | ||
375 | return -ENOMEM; | ||
376 | } | ||
377 | list_add(&link->cgrp_link_list, tmp); | ||
378 | } | ||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | static void free_cg_links(struct list_head *tmp) | ||
383 | { | ||
384 | while (!list_empty(tmp)) { | ||
385 | struct cg_cgroup_link *link; | ||
386 | link = list_entry(tmp->next, | ||
387 | struct cg_cgroup_link, | ||
388 | cgrp_link_list); | ||
389 | list_del(&link->cgrp_link_list); | ||
390 | kfree(link); | ||
391 | } | ||
392 | } | ||
393 | |||
394 | /* | ||
395 | * find_css_set() takes an existing cgroup group and a | ||
396 | * cgroup object, and returns a css_set object that's | ||
397 | * equivalent to the old group, but with the given cgroup | ||
398 | * substituted into the appropriate hierarchy. Must be called with | ||
399 | * cgroup_mutex held | ||
400 | */ | ||
401 | |||
402 | static struct css_set *find_css_set( | ||
403 | struct css_set *oldcg, struct cgroup *cgrp) | ||
404 | { | ||
405 | struct css_set *res; | ||
406 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; | ||
407 | int i; | ||
408 | |||
409 | struct list_head tmp_cg_links; | ||
410 | struct cg_cgroup_link *link; | ||
411 | |||
412 | /* First see if we already have a cgroup group that matches | ||
413 | * the desired set */ | ||
414 | write_lock(&css_set_lock); | ||
415 | res = find_existing_css_set(oldcg, cgrp, template); | ||
416 | if (res) | ||
417 | get_css_set(res); | ||
418 | write_unlock(&css_set_lock); | ||
419 | |||
420 | if (res) | ||
421 | return res; | ||
422 | |||
423 | res = kmalloc(sizeof(*res), GFP_KERNEL); | ||
424 | if (!res) | ||
425 | return NULL; | ||
426 | |||
427 | /* Allocate all the cg_cgroup_link objects that we'll need */ | ||
428 | if (allocate_cg_links(root_count, &tmp_cg_links) < 0) { | ||
429 | kfree(res); | ||
430 | return NULL; | ||
431 | } | ||
432 | |||
433 | kref_init(&res->ref); | ||
434 | INIT_LIST_HEAD(&res->cg_links); | ||
435 | INIT_LIST_HEAD(&res->tasks); | ||
436 | |||
437 | /* Copy the set of subsystem state objects generated in | ||
438 | * find_existing_css_set() */ | ||
439 | memcpy(res->subsys, template, sizeof(res->subsys)); | ||
440 | |||
441 | write_lock(&css_set_lock); | ||
442 | /* Add reference counts and links from the new css_set. */ | ||
443 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
444 | struct cgroup *cgrp = res->subsys[i]->cgroup; | ||
445 | struct cgroup_subsys *ss = subsys[i]; | ||
446 | atomic_inc(&cgrp->count); | ||
447 | /* | ||
448 | * We want to add a link once per cgroup, so we | ||
449 | * only do it for the first subsystem in each | ||
450 | * hierarchy | ||
451 | */ | ||
452 | if (ss->root->subsys_list.next == &ss->sibling) { | ||
453 | BUG_ON(list_empty(&tmp_cg_links)); | ||
454 | link = list_entry(tmp_cg_links.next, | ||
455 | struct cg_cgroup_link, | ||
456 | cgrp_link_list); | ||
457 | list_del(&link->cgrp_link_list); | ||
458 | list_add(&link->cgrp_link_list, &cgrp->css_sets); | ||
459 | link->cg = res; | ||
460 | list_add(&link->cg_link_list, &res->cg_links); | ||
461 | } | ||
462 | } | ||
463 | if (list_empty(&rootnode.subsys_list)) { | ||
464 | link = list_entry(tmp_cg_links.next, | ||
465 | struct cg_cgroup_link, | ||
466 | cgrp_link_list); | ||
467 | list_del(&link->cgrp_link_list); | ||
468 | list_add(&link->cgrp_link_list, &dummytop->css_sets); | ||
469 | link->cg = res; | ||
470 | list_add(&link->cg_link_list, &res->cg_links); | ||
471 | } | ||
472 | |||
473 | BUG_ON(!list_empty(&tmp_cg_links)); | ||
474 | |||
475 | /* Link this cgroup group into the list */ | ||
476 | list_add(&res->list, &init_css_set.list); | ||
477 | css_set_count++; | ||
478 | INIT_LIST_HEAD(&res->tasks); | ||
479 | write_unlock(&css_set_lock); | ||
480 | |||
481 | return res; | ||
482 | } | ||
483 | |||
484 | /* | ||
485 | * There is one global cgroup mutex. We also require taking | ||
486 | * task_lock() when dereferencing a task's cgroup subsys pointers. | ||
487 | * See "The task_lock() exception", at the end of this comment. | ||
488 | * | ||
489 | * A task must hold cgroup_mutex to modify cgroups. | ||
490 | * | ||
491 | * Any task can increment and decrement the count field without lock. | ||
492 | * So in general, code holding cgroup_mutex can't rely on the count | ||
493 | * field not changing. However, if the count goes to zero, then only | ||
494 | * attach_task() can increment it again. Because a count of zero | ||
495 | * means that no tasks are currently attached, therefore there is no | ||
496 | * way a task attached to that cgroup can fork (the other way to | ||
497 | * increment the count). So code holding cgroup_mutex can safely | ||
498 | * assume that if the count is zero, it will stay zero. Similarly, if | ||
499 | * a task holds cgroup_mutex on a cgroup with zero count, it | ||
500 | * knows that the cgroup won't be removed, as cgroup_rmdir() | ||
501 | * needs that mutex. | ||
502 | * | ||
503 | * The cgroup_common_file_write handler for operations that modify | ||
504 | * the cgroup hierarchy holds cgroup_mutex across the entire operation, | ||
505 | * single threading all such cgroup modifications across the system. | ||
506 | * | ||
507 | * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't | ||
508 | * (usually) take cgroup_mutex. These are the two most performance | ||
509 | * critical pieces of code here. The exception occurs on cgroup_exit(), | ||
510 | * when a task in a notify_on_release cgroup exits. Then cgroup_mutex | ||
511 | * is taken, and if the cgroup count is zero, a usermode call made | ||
512 | * to /sbin/cgroup_release_agent with the name of the cgroup (path | ||
513 | * relative to the root of cgroup file system) as the argument. | ||
514 | * | ||
515 | * A cgroup can only be deleted if both its 'count' of using tasks | ||
516 | * is zero, and its list of 'children' cgroups is empty. Since all | ||
517 | * tasks in the system use _some_ cgroup, and since there is always at | ||
518 | * least one task in the system (init, pid == 1), therefore, top_cgroup | ||
519 | * always has either children cgroups and/or using tasks. So we don't | ||
520 | * need a special hack to ensure that top_cgroup cannot be deleted. | ||
521 | * | ||
522 | * The task_lock() exception | ||
523 | * | ||
524 | * The need for this exception arises from the action of | ||
525 | * attach_task(), which overwrites one tasks cgroup pointer with | ||
526 | * another. It does so using cgroup_mutexe, however there are | ||
527 | * several performance critical places that need to reference | ||
528 | * task->cgroup without the expense of grabbing a system global | ||
529 | * mutex. Therefore except as noted below, when dereferencing or, as | ||
530 | * in attach_task(), modifying a task'ss cgroup pointer we use | ||
531 | * task_lock(), which acts on a spinlock (task->alloc_lock) already in | ||
532 | * the task_struct routinely used for such matters. | ||
533 | * | ||
534 | * P.S. One more locking exception. RCU is used to guard the | ||
535 | * update of a tasks cgroup pointer by attach_task() | ||
536 | */ | ||
537 | |||
538 | /** | ||
539 | * cgroup_lock - lock out any changes to cgroup structures | ||
540 | * | ||
541 | */ | ||
542 | |||
543 | void cgroup_lock(void) | ||
544 | { | ||
545 | mutex_lock(&cgroup_mutex); | ||
546 | } | ||
547 | |||
548 | /** | ||
549 | * cgroup_unlock - release lock on cgroup changes | ||
550 | * | ||
551 | * Undo the lock taken in a previous cgroup_lock() call. | ||
552 | */ | ||
553 | |||
554 | void cgroup_unlock(void) | ||
555 | { | ||
556 | mutex_unlock(&cgroup_mutex); | ||
557 | } | ||
558 | |||
559 | /* | ||
560 | * A couple of forward declarations required, due to cyclic reference loop: | ||
561 | * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir -> | ||
562 | * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations | ||
563 | * -> cgroup_mkdir. | ||
564 | */ | ||
565 | |||
566 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); | ||
567 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); | ||
568 | static int cgroup_populate_dir(struct cgroup *cgrp); | ||
569 | static struct inode_operations cgroup_dir_inode_operations; | ||
570 | static struct file_operations proc_cgroupstats_operations; | ||
571 | |||
572 | static struct backing_dev_info cgroup_backing_dev_info = { | ||
573 | .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, | ||
574 | }; | ||
575 | |||
576 | static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) | ||
577 | { | ||
578 | struct inode *inode = new_inode(sb); | ||
579 | |||
580 | if (inode) { | ||
581 | inode->i_mode = mode; | ||
582 | inode->i_uid = current->fsuid; | ||
583 | inode->i_gid = current->fsgid; | ||
584 | inode->i_blocks = 0; | ||
585 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
586 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; | ||
587 | } | ||
588 | return inode; | ||
589 | } | ||
590 | |||
591 | static void cgroup_diput(struct dentry *dentry, struct inode *inode) | ||
592 | { | ||
593 | /* is dentry a directory ? if so, kfree() associated cgroup */ | ||
594 | if (S_ISDIR(inode->i_mode)) { | ||
595 | struct cgroup *cgrp = dentry->d_fsdata; | ||
596 | BUG_ON(!(cgroup_is_removed(cgrp))); | ||
597 | /* It's possible for external users to be holding css | ||
598 | * reference counts on a cgroup; css_put() needs to | ||
599 | * be able to access the cgroup after decrementing | ||
600 | * the reference count in order to know if it needs to | ||
601 | * queue the cgroup to be handled by the release | ||
602 | * agent */ | ||
603 | synchronize_rcu(); | ||
604 | kfree(cgrp); | ||
605 | } | ||
606 | iput(inode); | ||
607 | } | ||
608 | |||
609 | static void remove_dir(struct dentry *d) | ||
610 | { | ||
611 | struct dentry *parent = dget(d->d_parent); | ||
612 | |||
613 | d_delete(d); | ||
614 | simple_rmdir(parent->d_inode, d); | ||
615 | dput(parent); | ||
616 | } | ||
617 | |||
618 | static void cgroup_clear_directory(struct dentry *dentry) | ||
619 | { | ||
620 | struct list_head *node; | ||
621 | |||
622 | BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); | ||
623 | spin_lock(&dcache_lock); | ||
624 | node = dentry->d_subdirs.next; | ||
625 | while (node != &dentry->d_subdirs) { | ||
626 | struct dentry *d = list_entry(node, struct dentry, d_u.d_child); | ||
627 | list_del_init(node); | ||
628 | if (d->d_inode) { | ||
629 | /* This should never be called on a cgroup | ||
630 | * directory with child cgroups */ | ||
631 | BUG_ON(d->d_inode->i_mode & S_IFDIR); | ||
632 | d = dget_locked(d); | ||
633 | spin_unlock(&dcache_lock); | ||
634 | d_delete(d); | ||
635 | simple_unlink(dentry->d_inode, d); | ||
636 | dput(d); | ||
637 | spin_lock(&dcache_lock); | ||
638 | } | ||
639 | node = dentry->d_subdirs.next; | ||
640 | } | ||
641 | spin_unlock(&dcache_lock); | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * NOTE : the dentry must have been dget()'ed | ||
646 | */ | ||
647 | static void cgroup_d_remove_dir(struct dentry *dentry) | ||
648 | { | ||
649 | cgroup_clear_directory(dentry); | ||
650 | |||
651 | spin_lock(&dcache_lock); | ||
652 | list_del_init(&dentry->d_u.d_child); | ||
653 | spin_unlock(&dcache_lock); | ||
654 | remove_dir(dentry); | ||
655 | } | ||
656 | |||
657 | static int rebind_subsystems(struct cgroupfs_root *root, | ||
658 | unsigned long final_bits) | ||
659 | { | ||
660 | unsigned long added_bits, removed_bits; | ||
661 | struct cgroup *cgrp = &root->top_cgroup; | ||
662 | int i; | ||
663 | |||
664 | removed_bits = root->actual_subsys_bits & ~final_bits; | ||
665 | added_bits = final_bits & ~root->actual_subsys_bits; | ||
666 | /* Check that any added subsystems are currently free */ | ||
667 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
668 | unsigned long long bit = 1ull << i; | ||
669 | struct cgroup_subsys *ss = subsys[i]; | ||
670 | if (!(bit & added_bits)) | ||
671 | continue; | ||
672 | if (ss->root != &rootnode) { | ||
673 | /* Subsystem isn't free */ | ||
674 | return -EBUSY; | ||
675 | } | ||
676 | } | ||
677 | |||
678 | /* Currently we don't handle adding/removing subsystems when | ||
679 | * any child cgroups exist. This is theoretically supportable | ||
680 | * but involves complex error handling, so it's being left until | ||
681 | * later */ | ||
682 | if (!list_empty(&cgrp->children)) | ||
683 | return -EBUSY; | ||
684 | |||
685 | /* Process each subsystem */ | ||
686 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
687 | struct cgroup_subsys *ss = subsys[i]; | ||
688 | unsigned long bit = 1UL << i; | ||
689 | if (bit & added_bits) { | ||
690 | /* We're binding this subsystem to this hierarchy */ | ||
691 | BUG_ON(cgrp->subsys[i]); | ||
692 | BUG_ON(!dummytop->subsys[i]); | ||
693 | BUG_ON(dummytop->subsys[i]->cgroup != dummytop); | ||
694 | cgrp->subsys[i] = dummytop->subsys[i]; | ||
695 | cgrp->subsys[i]->cgroup = cgrp; | ||
696 | list_add(&ss->sibling, &root->subsys_list); | ||
697 | rcu_assign_pointer(ss->root, root); | ||
698 | if (ss->bind) | ||
699 | ss->bind(ss, cgrp); | ||
700 | |||
701 | } else if (bit & removed_bits) { | ||
702 | /* We're removing this subsystem */ | ||
703 | BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]); | ||
704 | BUG_ON(cgrp->subsys[i]->cgroup != cgrp); | ||
705 | if (ss->bind) | ||
706 | ss->bind(ss, dummytop); | ||
707 | dummytop->subsys[i]->cgroup = dummytop; | ||
708 | cgrp->subsys[i] = NULL; | ||
709 | rcu_assign_pointer(subsys[i]->root, &rootnode); | ||
710 | list_del(&ss->sibling); | ||
711 | } else if (bit & final_bits) { | ||
712 | /* Subsystem state should already exist */ | ||
713 | BUG_ON(!cgrp->subsys[i]); | ||
714 | } else { | ||
715 | /* Subsystem state shouldn't exist */ | ||
716 | BUG_ON(cgrp->subsys[i]); | ||
717 | } | ||
718 | } | ||
719 | root->subsys_bits = root->actual_subsys_bits = final_bits; | ||
720 | synchronize_rcu(); | ||
721 | |||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs) | ||
726 | { | ||
727 | struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info; | ||
728 | struct cgroup_subsys *ss; | ||
729 | |||
730 | mutex_lock(&cgroup_mutex); | ||
731 | for_each_subsys(root, ss) | ||
732 | seq_printf(seq, ",%s", ss->name); | ||
733 | if (test_bit(ROOT_NOPREFIX, &root->flags)) | ||
734 | seq_puts(seq, ",noprefix"); | ||
735 | if (strlen(root->release_agent_path)) | ||
736 | seq_printf(seq, ",release_agent=%s", root->release_agent_path); | ||
737 | mutex_unlock(&cgroup_mutex); | ||
738 | return 0; | ||
739 | } | ||
740 | |||
741 | struct cgroup_sb_opts { | ||
742 | unsigned long subsys_bits; | ||
743 | unsigned long flags; | ||
744 | char *release_agent; | ||
745 | }; | ||
746 | |||
747 | /* Convert a hierarchy specifier into a bitmask of subsystems and | ||
748 | * flags. */ | ||
749 | static int parse_cgroupfs_options(char *data, | ||
750 | struct cgroup_sb_opts *opts) | ||
751 | { | ||
752 | char *token, *o = data ?: "all"; | ||
753 | |||
754 | opts->subsys_bits = 0; | ||
755 | opts->flags = 0; | ||
756 | opts->release_agent = NULL; | ||
757 | |||
758 | while ((token = strsep(&o, ",")) != NULL) { | ||
759 | if (!*token) | ||
760 | return -EINVAL; | ||
761 | if (!strcmp(token, "all")) { | ||
762 | opts->subsys_bits = (1 << CGROUP_SUBSYS_COUNT) - 1; | ||
763 | } else if (!strcmp(token, "noprefix")) { | ||
764 | set_bit(ROOT_NOPREFIX, &opts->flags); | ||
765 | } else if (!strncmp(token, "release_agent=", 14)) { | ||
766 | /* Specifying two release agents is forbidden */ | ||
767 | if (opts->release_agent) | ||
768 | return -EINVAL; | ||
769 | opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL); | ||
770 | if (!opts->release_agent) | ||
771 | return -ENOMEM; | ||
772 | strncpy(opts->release_agent, token + 14, PATH_MAX - 1); | ||
773 | opts->release_agent[PATH_MAX - 1] = 0; | ||
774 | } else { | ||
775 | struct cgroup_subsys *ss; | ||
776 | int i; | ||
777 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
778 | ss = subsys[i]; | ||
779 | if (!strcmp(token, ss->name)) { | ||
780 | set_bit(i, &opts->subsys_bits); | ||
781 | break; | ||
782 | } | ||
783 | } | ||
784 | if (i == CGROUP_SUBSYS_COUNT) | ||
785 | return -ENOENT; | ||
786 | } | ||
787 | } | ||
788 | |||
789 | /* We can't have an empty hierarchy */ | ||
790 | if (!opts->subsys_bits) | ||
791 | return -EINVAL; | ||
792 | |||
793 | return 0; | ||
794 | } | ||
795 | |||
796 | static int cgroup_remount(struct super_block *sb, int *flags, char *data) | ||
797 | { | ||
798 | int ret = 0; | ||
799 | struct cgroupfs_root *root = sb->s_fs_info; | ||
800 | struct cgroup *cgrp = &root->top_cgroup; | ||
801 | struct cgroup_sb_opts opts; | ||
802 | |||
803 | mutex_lock(&cgrp->dentry->d_inode->i_mutex); | ||
804 | mutex_lock(&cgroup_mutex); | ||
805 | |||
806 | /* See what subsystems are wanted */ | ||
807 | ret = parse_cgroupfs_options(data, &opts); | ||
808 | if (ret) | ||
809 | goto out_unlock; | ||
810 | |||
811 | /* Don't allow flags to change at remount */ | ||
812 | if (opts.flags != root->flags) { | ||
813 | ret = -EINVAL; | ||
814 | goto out_unlock; | ||
815 | } | ||
816 | |||
817 | ret = rebind_subsystems(root, opts.subsys_bits); | ||
818 | |||
819 | /* (re)populate subsystem files */ | ||
820 | if (!ret) | ||
821 | cgroup_populate_dir(cgrp); | ||
822 | |||
823 | if (opts.release_agent) | ||
824 | strcpy(root->release_agent_path, opts.release_agent); | ||
825 | out_unlock: | ||
826 | if (opts.release_agent) | ||
827 | kfree(opts.release_agent); | ||
828 | mutex_unlock(&cgroup_mutex); | ||
829 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | ||
830 | return ret; | ||
831 | } | ||
832 | |||
833 | static struct super_operations cgroup_ops = { | ||
834 | .statfs = simple_statfs, | ||
835 | .drop_inode = generic_delete_inode, | ||
836 | .show_options = cgroup_show_options, | ||
837 | .remount_fs = cgroup_remount, | ||
838 | }; | ||
839 | |||
840 | static void init_cgroup_root(struct cgroupfs_root *root) | ||
841 | { | ||
842 | struct cgroup *cgrp = &root->top_cgroup; | ||
843 | INIT_LIST_HEAD(&root->subsys_list); | ||
844 | INIT_LIST_HEAD(&root->root_list); | ||
845 | root->number_of_cgroups = 1; | ||
846 | cgrp->root = root; | ||
847 | cgrp->top_cgroup = cgrp; | ||
848 | INIT_LIST_HEAD(&cgrp->sibling); | ||
849 | INIT_LIST_HEAD(&cgrp->children); | ||
850 | INIT_LIST_HEAD(&cgrp->css_sets); | ||
851 | INIT_LIST_HEAD(&cgrp->release_list); | ||
852 | } | ||
853 | |||
854 | static int cgroup_test_super(struct super_block *sb, void *data) | ||
855 | { | ||
856 | struct cgroupfs_root *new = data; | ||
857 | struct cgroupfs_root *root = sb->s_fs_info; | ||
858 | |||
859 | /* First check subsystems */ | ||
860 | if (new->subsys_bits != root->subsys_bits) | ||
861 | return 0; | ||
862 | |||
863 | /* Next check flags */ | ||
864 | if (new->flags != root->flags) | ||
865 | return 0; | ||
866 | |||
867 | return 1; | ||
868 | } | ||
869 | |||
870 | static int cgroup_set_super(struct super_block *sb, void *data) | ||
871 | { | ||
872 | int ret; | ||
873 | struct cgroupfs_root *root = data; | ||
874 | |||
875 | ret = set_anon_super(sb, NULL); | ||
876 | if (ret) | ||
877 | return ret; | ||
878 | |||
879 | sb->s_fs_info = root; | ||
880 | root->sb = sb; | ||
881 | |||
882 | sb->s_blocksize = PAGE_CACHE_SIZE; | ||
883 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | ||
884 | sb->s_magic = CGROUP_SUPER_MAGIC; | ||
885 | sb->s_op = &cgroup_ops; | ||
886 | |||
887 | return 0; | ||
888 | } | ||
889 | |||
890 | static int cgroup_get_rootdir(struct super_block *sb) | ||
891 | { | ||
892 | struct inode *inode = | ||
893 | cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); | ||
894 | struct dentry *dentry; | ||
895 | |||
896 | if (!inode) | ||
897 | return -ENOMEM; | ||
898 | |||
899 | inode->i_op = &simple_dir_inode_operations; | ||
900 | inode->i_fop = &simple_dir_operations; | ||
901 | inode->i_op = &cgroup_dir_inode_operations; | ||
902 | /* directories start off with i_nlink == 2 (for "." entry) */ | ||
903 | inc_nlink(inode); | ||
904 | dentry = d_alloc_root(inode); | ||
905 | if (!dentry) { | ||
906 | iput(inode); | ||
907 | return -ENOMEM; | ||
908 | } | ||
909 | sb->s_root = dentry; | ||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | static int cgroup_get_sb(struct file_system_type *fs_type, | ||
914 | int flags, const char *unused_dev_name, | ||
915 | void *data, struct vfsmount *mnt) | ||
916 | { | ||
917 | struct cgroup_sb_opts opts; | ||
918 | int ret = 0; | ||
919 | struct super_block *sb; | ||
920 | struct cgroupfs_root *root; | ||
921 | struct list_head tmp_cg_links, *l; | ||
922 | INIT_LIST_HEAD(&tmp_cg_links); | ||
923 | |||
924 | /* First find the desired set of subsystems */ | ||
925 | ret = parse_cgroupfs_options(data, &opts); | ||
926 | if (ret) { | ||
927 | if (opts.release_agent) | ||
928 | kfree(opts.release_agent); | ||
929 | return ret; | ||
930 | } | ||
931 | |||
932 | root = kzalloc(sizeof(*root), GFP_KERNEL); | ||
933 | if (!root) | ||
934 | return -ENOMEM; | ||
935 | |||
936 | init_cgroup_root(root); | ||
937 | root->subsys_bits = opts.subsys_bits; | ||
938 | root->flags = opts.flags; | ||
939 | if (opts.release_agent) { | ||
940 | strcpy(root->release_agent_path, opts.release_agent); | ||
941 | kfree(opts.release_agent); | ||
942 | } | ||
943 | |||
944 | sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root); | ||
945 | |||
946 | if (IS_ERR(sb)) { | ||
947 | kfree(root); | ||
948 | return PTR_ERR(sb); | ||
949 | } | ||
950 | |||
951 | if (sb->s_fs_info != root) { | ||
952 | /* Reusing an existing superblock */ | ||
953 | BUG_ON(sb->s_root == NULL); | ||
954 | kfree(root); | ||
955 | root = NULL; | ||
956 | } else { | ||
957 | /* New superblock */ | ||
958 | struct cgroup *cgrp = &root->top_cgroup; | ||
959 | struct inode *inode; | ||
960 | |||
961 | BUG_ON(sb->s_root != NULL); | ||
962 | |||
963 | ret = cgroup_get_rootdir(sb); | ||
964 | if (ret) | ||
965 | goto drop_new_super; | ||
966 | inode = sb->s_root->d_inode; | ||
967 | |||
968 | mutex_lock(&inode->i_mutex); | ||
969 | mutex_lock(&cgroup_mutex); | ||
970 | |||
971 | /* | ||
972 | * We're accessing css_set_count without locking | ||
973 | * css_set_lock here, but that's OK - it can only be | ||
974 | * increased by someone holding cgroup_lock, and | ||
975 | * that's us. The worst that can happen is that we | ||
976 | * have some link structures left over | ||
977 | */ | ||
978 | ret = allocate_cg_links(css_set_count, &tmp_cg_links); | ||
979 | if (ret) { | ||
980 | mutex_unlock(&cgroup_mutex); | ||
981 | mutex_unlock(&inode->i_mutex); | ||
982 | goto drop_new_super; | ||
983 | } | ||
984 | |||
985 | ret = rebind_subsystems(root, root->subsys_bits); | ||
986 | if (ret == -EBUSY) { | ||
987 | mutex_unlock(&cgroup_mutex); | ||
988 | mutex_unlock(&inode->i_mutex); | ||
989 | goto drop_new_super; | ||
990 | } | ||
991 | |||
992 | /* EBUSY should be the only error here */ | ||
993 | BUG_ON(ret); | ||
994 | |||
995 | list_add(&root->root_list, &roots); | ||
996 | root_count++; | ||
997 | |||
998 | sb->s_root->d_fsdata = &root->top_cgroup; | ||
999 | root->top_cgroup.dentry = sb->s_root; | ||
1000 | |||
1001 | /* Link the top cgroup in this hierarchy into all | ||
1002 | * the css_set objects */ | ||
1003 | write_lock(&css_set_lock); | ||
1004 | l = &init_css_set.list; | ||
1005 | do { | ||
1006 | struct css_set *cg; | ||
1007 | struct cg_cgroup_link *link; | ||
1008 | cg = list_entry(l, struct css_set, list); | ||
1009 | BUG_ON(list_empty(&tmp_cg_links)); | ||
1010 | link = list_entry(tmp_cg_links.next, | ||
1011 | struct cg_cgroup_link, | ||
1012 | cgrp_link_list); | ||
1013 | list_del(&link->cgrp_link_list); | ||
1014 | link->cg = cg; | ||
1015 | list_add(&link->cgrp_link_list, | ||
1016 | &root->top_cgroup.css_sets); | ||
1017 | list_add(&link->cg_link_list, &cg->cg_links); | ||
1018 | l = l->next; | ||
1019 | } while (l != &init_css_set.list); | ||
1020 | write_unlock(&css_set_lock); | ||
1021 | |||
1022 | free_cg_links(&tmp_cg_links); | ||
1023 | |||
1024 | BUG_ON(!list_empty(&cgrp->sibling)); | ||
1025 | BUG_ON(!list_empty(&cgrp->children)); | ||
1026 | BUG_ON(root->number_of_cgroups != 1); | ||
1027 | |||
1028 | cgroup_populate_dir(cgrp); | ||
1029 | mutex_unlock(&inode->i_mutex); | ||
1030 | mutex_unlock(&cgroup_mutex); | ||
1031 | } | ||
1032 | |||
1033 | return simple_set_mnt(mnt, sb); | ||
1034 | |||
1035 | drop_new_super: | ||
1036 | up_write(&sb->s_umount); | ||
1037 | deactivate_super(sb); | ||
1038 | free_cg_links(&tmp_cg_links); | ||
1039 | return ret; | ||
1040 | } | ||
1041 | |||
1042 | static void cgroup_kill_sb(struct super_block *sb) { | ||
1043 | struct cgroupfs_root *root = sb->s_fs_info; | ||
1044 | struct cgroup *cgrp = &root->top_cgroup; | ||
1045 | int ret; | ||
1046 | |||
1047 | BUG_ON(!root); | ||
1048 | |||
1049 | BUG_ON(root->number_of_cgroups != 1); | ||
1050 | BUG_ON(!list_empty(&cgrp->children)); | ||
1051 | BUG_ON(!list_empty(&cgrp->sibling)); | ||
1052 | |||
1053 | mutex_lock(&cgroup_mutex); | ||
1054 | |||
1055 | /* Rebind all subsystems back to the default hierarchy */ | ||
1056 | ret = rebind_subsystems(root, 0); | ||
1057 | /* Shouldn't be able to fail ... */ | ||
1058 | BUG_ON(ret); | ||
1059 | |||
1060 | /* | ||
1061 | * Release all the links from css_sets to this hierarchy's | ||
1062 | * root cgroup | ||
1063 | */ | ||
1064 | write_lock(&css_set_lock); | ||
1065 | while (!list_empty(&cgrp->css_sets)) { | ||
1066 | struct cg_cgroup_link *link; | ||
1067 | link = list_entry(cgrp->css_sets.next, | ||
1068 | struct cg_cgroup_link, cgrp_link_list); | ||
1069 | list_del(&link->cg_link_list); | ||
1070 | list_del(&link->cgrp_link_list); | ||
1071 | kfree(link); | ||
1072 | } | ||
1073 | write_unlock(&css_set_lock); | ||
1074 | |||
1075 | if (!list_empty(&root->root_list)) { | ||
1076 | list_del(&root->root_list); | ||
1077 | root_count--; | ||
1078 | } | ||
1079 | mutex_unlock(&cgroup_mutex); | ||
1080 | |||
1081 | kfree(root); | ||
1082 | kill_litter_super(sb); | ||
1083 | } | ||
1084 | |||
1085 | static struct file_system_type cgroup_fs_type = { | ||
1086 | .name = "cgroup", | ||
1087 | .get_sb = cgroup_get_sb, | ||
1088 | .kill_sb = cgroup_kill_sb, | ||
1089 | }; | ||
1090 | |||
1091 | static inline struct cgroup *__d_cgrp(struct dentry *dentry) | ||
1092 | { | ||
1093 | return dentry->d_fsdata; | ||
1094 | } | ||
1095 | |||
1096 | static inline struct cftype *__d_cft(struct dentry *dentry) | ||
1097 | { | ||
1098 | return dentry->d_fsdata; | ||
1099 | } | ||
1100 | |||
1101 | /* | ||
1102 | * Called with cgroup_mutex held. Writes path of cgroup into buf. | ||
1103 | * Returns 0 on success, -errno on error. | ||
1104 | */ | ||
1105 | int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) | ||
1106 | { | ||
1107 | char *start; | ||
1108 | |||
1109 | if (cgrp == dummytop) { | ||
1110 | /* | ||
1111 | * Inactive subsystems have no dentry for their root | ||
1112 | * cgroup | ||
1113 | */ | ||
1114 | strcpy(buf, "/"); | ||
1115 | return 0; | ||
1116 | } | ||
1117 | |||
1118 | start = buf + buflen; | ||
1119 | |||
1120 | *--start = '\0'; | ||
1121 | for (;;) { | ||
1122 | int len = cgrp->dentry->d_name.len; | ||
1123 | if ((start -= len) < buf) | ||
1124 | return -ENAMETOOLONG; | ||
1125 | memcpy(start, cgrp->dentry->d_name.name, len); | ||
1126 | cgrp = cgrp->parent; | ||
1127 | if (!cgrp) | ||
1128 | break; | ||
1129 | if (!cgrp->parent) | ||
1130 | continue; | ||
1131 | if (--start < buf) | ||
1132 | return -ENAMETOOLONG; | ||
1133 | *start = '/'; | ||
1134 | } | ||
1135 | memmove(buf, start, buf + buflen - start); | ||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * Return the first subsystem attached to a cgroup's hierarchy, and | ||
1141 | * its subsystem id. | ||
1142 | */ | ||
1143 | |||
1144 | static void get_first_subsys(const struct cgroup *cgrp, | ||
1145 | struct cgroup_subsys_state **css, int *subsys_id) | ||
1146 | { | ||
1147 | const struct cgroupfs_root *root = cgrp->root; | ||
1148 | const struct cgroup_subsys *test_ss; | ||
1149 | BUG_ON(list_empty(&root->subsys_list)); | ||
1150 | test_ss = list_entry(root->subsys_list.next, | ||
1151 | struct cgroup_subsys, sibling); | ||
1152 | if (css) { | ||
1153 | *css = cgrp->subsys[test_ss->subsys_id]; | ||
1154 | BUG_ON(!*css); | ||
1155 | } | ||
1156 | if (subsys_id) | ||
1157 | *subsys_id = test_ss->subsys_id; | ||
1158 | } | ||
1159 | |||
1160 | /* | ||
1161 | * Attach task 'tsk' to cgroup 'cgrp' | ||
1162 | * | ||
1163 | * Call holding cgroup_mutex. May take task_lock of | ||
1164 | * the task 'pid' during call. | ||
1165 | */ | ||
1166 | static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) | ||
1167 | { | ||
1168 | int retval = 0; | ||
1169 | struct cgroup_subsys *ss; | ||
1170 | struct cgroup *oldcgrp; | ||
1171 | struct css_set *cg = tsk->cgroups; | ||
1172 | struct css_set *newcg; | ||
1173 | struct cgroupfs_root *root = cgrp->root; | ||
1174 | int subsys_id; | ||
1175 | |||
1176 | get_first_subsys(cgrp, NULL, &subsys_id); | ||
1177 | |||
1178 | /* Nothing to do if the task is already in that cgroup */ | ||
1179 | oldcgrp = task_cgroup(tsk, subsys_id); | ||
1180 | if (cgrp == oldcgrp) | ||
1181 | return 0; | ||
1182 | |||
1183 | for_each_subsys(root, ss) { | ||
1184 | if (ss->can_attach) { | ||
1185 | retval = ss->can_attach(ss, cgrp, tsk); | ||
1186 | if (retval) { | ||
1187 | return retval; | ||
1188 | } | ||
1189 | } | ||
1190 | } | ||
1191 | |||
1192 | /* | ||
1193 | * Locate or allocate a new css_set for this task, | ||
1194 | * based on its final set of cgroups | ||
1195 | */ | ||
1196 | newcg = find_css_set(cg, cgrp); | ||
1197 | if (!newcg) { | ||
1198 | return -ENOMEM; | ||
1199 | } | ||
1200 | |||
1201 | task_lock(tsk); | ||
1202 | if (tsk->flags & PF_EXITING) { | ||
1203 | task_unlock(tsk); | ||
1204 | put_css_set(newcg); | ||
1205 | return -ESRCH; | ||
1206 | } | ||
1207 | rcu_assign_pointer(tsk->cgroups, newcg); | ||
1208 | task_unlock(tsk); | ||
1209 | |||
1210 | /* Update the css_set linked lists if we're using them */ | ||
1211 | write_lock(&css_set_lock); | ||
1212 | if (!list_empty(&tsk->cg_list)) { | ||
1213 | list_del(&tsk->cg_list); | ||
1214 | list_add(&tsk->cg_list, &newcg->tasks); | ||
1215 | } | ||
1216 | write_unlock(&css_set_lock); | ||
1217 | |||
1218 | for_each_subsys(root, ss) { | ||
1219 | if (ss->attach) { | ||
1220 | ss->attach(ss, cgrp, oldcgrp, tsk); | ||
1221 | } | ||
1222 | } | ||
1223 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); | ||
1224 | synchronize_rcu(); | ||
1225 | put_css_set(cg); | ||
1226 | return 0; | ||
1227 | } | ||
1228 | |||
1229 | /* | ||
1230 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with | ||
1231 | * cgroup_mutex, may take task_lock of task | ||
1232 | */ | ||
1233 | static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) | ||
1234 | { | ||
1235 | pid_t pid; | ||
1236 | struct task_struct *tsk; | ||
1237 | int ret; | ||
1238 | |||
1239 | if (sscanf(pidbuf, "%d", &pid) != 1) | ||
1240 | return -EIO; | ||
1241 | |||
1242 | if (pid) { | ||
1243 | rcu_read_lock(); | ||
1244 | tsk = find_task_by_pid(pid); | ||
1245 | if (!tsk || tsk->flags & PF_EXITING) { | ||
1246 | rcu_read_unlock(); | ||
1247 | return -ESRCH; | ||
1248 | } | ||
1249 | get_task_struct(tsk); | ||
1250 | rcu_read_unlock(); | ||
1251 | |||
1252 | if ((current->euid) && (current->euid != tsk->uid) | ||
1253 | && (current->euid != tsk->suid)) { | ||
1254 | put_task_struct(tsk); | ||
1255 | return -EACCES; | ||
1256 | } | ||
1257 | } else { | ||
1258 | tsk = current; | ||
1259 | get_task_struct(tsk); | ||
1260 | } | ||
1261 | |||
1262 | ret = attach_task(cgrp, tsk); | ||
1263 | put_task_struct(tsk); | ||
1264 | return ret; | ||
1265 | } | ||
1266 | |||
1267 | /* The various types of files and directories in a cgroup file system */ | ||
1268 | |||
1269 | enum cgroup_filetype { | ||
1270 | FILE_ROOT, | ||
1271 | FILE_DIR, | ||
1272 | FILE_TASKLIST, | ||
1273 | FILE_NOTIFY_ON_RELEASE, | ||
1274 | FILE_RELEASABLE, | ||
1275 | FILE_RELEASE_AGENT, | ||
1276 | }; | ||
1277 | |||
1278 | static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft, | ||
1279 | struct file *file, | ||
1280 | const char __user *userbuf, | ||
1281 | size_t nbytes, loff_t *unused_ppos) | ||
1282 | { | ||
1283 | char buffer[64]; | ||
1284 | int retval = 0; | ||
1285 | u64 val; | ||
1286 | char *end; | ||
1287 | |||
1288 | if (!nbytes) | ||
1289 | return -EINVAL; | ||
1290 | if (nbytes >= sizeof(buffer)) | ||
1291 | return -E2BIG; | ||
1292 | if (copy_from_user(buffer, userbuf, nbytes)) | ||
1293 | return -EFAULT; | ||
1294 | |||
1295 | buffer[nbytes] = 0; /* nul-terminate */ | ||
1296 | |||
1297 | /* strip newline if necessary */ | ||
1298 | if (nbytes && (buffer[nbytes-1] == '\n')) | ||
1299 | buffer[nbytes-1] = 0; | ||
1300 | val = simple_strtoull(buffer, &end, 0); | ||
1301 | if (*end) | ||
1302 | return -EINVAL; | ||
1303 | |||
1304 | /* Pass to subsystem */ | ||
1305 | retval = cft->write_uint(cgrp, cft, val); | ||
1306 | if (!retval) | ||
1307 | retval = nbytes; | ||
1308 | return retval; | ||
1309 | } | ||
1310 | |||
1311 | static ssize_t cgroup_common_file_write(struct cgroup *cgrp, | ||
1312 | struct cftype *cft, | ||
1313 | struct file *file, | ||
1314 | const char __user *userbuf, | ||
1315 | size_t nbytes, loff_t *unused_ppos) | ||
1316 | { | ||
1317 | enum cgroup_filetype type = cft->private; | ||
1318 | char *buffer; | ||
1319 | int retval = 0; | ||
1320 | |||
1321 | if (nbytes >= PATH_MAX) | ||
1322 | return -E2BIG; | ||
1323 | |||
1324 | /* +1 for nul-terminator */ | ||
1325 | buffer = kmalloc(nbytes + 1, GFP_KERNEL); | ||
1326 | if (buffer == NULL) | ||
1327 | return -ENOMEM; | ||
1328 | |||
1329 | if (copy_from_user(buffer, userbuf, nbytes)) { | ||
1330 | retval = -EFAULT; | ||
1331 | goto out1; | ||
1332 | } | ||
1333 | buffer[nbytes] = 0; /* nul-terminate */ | ||
1334 | |||
1335 | mutex_lock(&cgroup_mutex); | ||
1336 | |||
1337 | if (cgroup_is_removed(cgrp)) { | ||
1338 | retval = -ENODEV; | ||
1339 | goto out2; | ||
1340 | } | ||
1341 | |||
1342 | switch (type) { | ||
1343 | case FILE_TASKLIST: | ||
1344 | retval = attach_task_by_pid(cgrp, buffer); | ||
1345 | break; | ||
1346 | case FILE_NOTIFY_ON_RELEASE: | ||
1347 | clear_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
1348 | if (simple_strtoul(buffer, NULL, 10) != 0) | ||
1349 | set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); | ||
1350 | else | ||
1351 | clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); | ||
1352 | break; | ||
1353 | case FILE_RELEASE_AGENT: | ||
1354 | { | ||
1355 | struct cgroupfs_root *root = cgrp->root; | ||
1356 | /* Strip trailing newline */ | ||
1357 | if (nbytes && (buffer[nbytes-1] == '\n')) { | ||
1358 | buffer[nbytes-1] = 0; | ||
1359 | } | ||
1360 | if (nbytes < sizeof(root->release_agent_path)) { | ||
1361 | /* We never write anything other than '\0' | ||
1362 | * into the last char of release_agent_path, | ||
1363 | * so it always remains a NUL-terminated | ||
1364 | * string */ | ||
1365 | strncpy(root->release_agent_path, buffer, nbytes); | ||
1366 | root->release_agent_path[nbytes] = 0; | ||
1367 | } else { | ||
1368 | retval = -ENOSPC; | ||
1369 | } | ||
1370 | break; | ||
1371 | } | ||
1372 | default: | ||
1373 | retval = -EINVAL; | ||
1374 | goto out2; | ||
1375 | } | ||
1376 | |||
1377 | if (retval == 0) | ||
1378 | retval = nbytes; | ||
1379 | out2: | ||
1380 | mutex_unlock(&cgroup_mutex); | ||
1381 | out1: | ||
1382 | kfree(buffer); | ||
1383 | return retval; | ||
1384 | } | ||
1385 | |||
1386 | static ssize_t cgroup_file_write(struct file *file, const char __user *buf, | ||
1387 | size_t nbytes, loff_t *ppos) | ||
1388 | { | ||
1389 | struct cftype *cft = __d_cft(file->f_dentry); | ||
1390 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); | ||
1391 | |||
1392 | if (!cft) | ||
1393 | return -ENODEV; | ||
1394 | if (cft->write) | ||
1395 | return cft->write(cgrp, cft, file, buf, nbytes, ppos); | ||
1396 | if (cft->write_uint) | ||
1397 | return cgroup_write_uint(cgrp, cft, file, buf, nbytes, ppos); | ||
1398 | return -EINVAL; | ||
1399 | } | ||
1400 | |||
1401 | static ssize_t cgroup_read_uint(struct cgroup *cgrp, struct cftype *cft, | ||
1402 | struct file *file, | ||
1403 | char __user *buf, size_t nbytes, | ||
1404 | loff_t *ppos) | ||
1405 | { | ||
1406 | char tmp[64]; | ||
1407 | u64 val = cft->read_uint(cgrp, cft); | ||
1408 | int len = sprintf(tmp, "%llu\n", (unsigned long long) val); | ||
1409 | |||
1410 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); | ||
1411 | } | ||
1412 | |||
1413 | static ssize_t cgroup_common_file_read(struct cgroup *cgrp, | ||
1414 | struct cftype *cft, | ||
1415 | struct file *file, | ||
1416 | char __user *buf, | ||
1417 | size_t nbytes, loff_t *ppos) | ||
1418 | { | ||
1419 | enum cgroup_filetype type = cft->private; | ||
1420 | char *page; | ||
1421 | ssize_t retval = 0; | ||
1422 | char *s; | ||
1423 | |||
1424 | if (!(page = (char *)__get_free_page(GFP_KERNEL))) | ||
1425 | return -ENOMEM; | ||
1426 | |||
1427 | s = page; | ||
1428 | |||
1429 | switch (type) { | ||
1430 | case FILE_RELEASE_AGENT: | ||
1431 | { | ||
1432 | struct cgroupfs_root *root; | ||
1433 | size_t n; | ||
1434 | mutex_lock(&cgroup_mutex); | ||
1435 | root = cgrp->root; | ||
1436 | n = strnlen(root->release_agent_path, | ||
1437 | sizeof(root->release_agent_path)); | ||
1438 | n = min(n, (size_t) PAGE_SIZE); | ||
1439 | strncpy(s, root->release_agent_path, n); | ||
1440 | mutex_unlock(&cgroup_mutex); | ||
1441 | s += n; | ||
1442 | break; | ||
1443 | } | ||
1444 | default: | ||
1445 | retval = -EINVAL; | ||
1446 | goto out; | ||
1447 | } | ||
1448 | *s++ = '\n'; | ||
1449 | |||
1450 | retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); | ||
1451 | out: | ||
1452 | free_page((unsigned long)page); | ||
1453 | return retval; | ||
1454 | } | ||
1455 | |||
1456 | static ssize_t cgroup_file_read(struct file *file, char __user *buf, | ||
1457 | size_t nbytes, loff_t *ppos) | ||
1458 | { | ||
1459 | struct cftype *cft = __d_cft(file->f_dentry); | ||
1460 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); | ||
1461 | |||
1462 | if (!cft) | ||
1463 | return -ENODEV; | ||
1464 | |||
1465 | if (cft->read) | ||
1466 | return cft->read(cgrp, cft, file, buf, nbytes, ppos); | ||
1467 | if (cft->read_uint) | ||
1468 | return cgroup_read_uint(cgrp, cft, file, buf, nbytes, ppos); | ||
1469 | return -EINVAL; | ||
1470 | } | ||
1471 | |||
1472 | static int cgroup_file_open(struct inode *inode, struct file *file) | ||
1473 | { | ||
1474 | int err; | ||
1475 | struct cftype *cft; | ||
1476 | |||
1477 | err = generic_file_open(inode, file); | ||
1478 | if (err) | ||
1479 | return err; | ||
1480 | |||
1481 | cft = __d_cft(file->f_dentry); | ||
1482 | if (!cft) | ||
1483 | return -ENODEV; | ||
1484 | if (cft->open) | ||
1485 | err = cft->open(inode, file); | ||
1486 | else | ||
1487 | err = 0; | ||
1488 | |||
1489 | return err; | ||
1490 | } | ||
1491 | |||
1492 | static int cgroup_file_release(struct inode *inode, struct file *file) | ||
1493 | { | ||
1494 | struct cftype *cft = __d_cft(file->f_dentry); | ||
1495 | if (cft->release) | ||
1496 | return cft->release(inode, file); | ||
1497 | return 0; | ||
1498 | } | ||
1499 | |||
1500 | /* | ||
1501 | * cgroup_rename - Only allow simple rename of directories in place. | ||
1502 | */ | ||
1503 | static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, | ||
1504 | struct inode *new_dir, struct dentry *new_dentry) | ||
1505 | { | ||
1506 | if (!S_ISDIR(old_dentry->d_inode->i_mode)) | ||
1507 | return -ENOTDIR; | ||
1508 | if (new_dentry->d_inode) | ||
1509 | return -EEXIST; | ||
1510 | if (old_dir != new_dir) | ||
1511 | return -EIO; | ||
1512 | return simple_rename(old_dir, old_dentry, new_dir, new_dentry); | ||
1513 | } | ||
1514 | |||
1515 | static struct file_operations cgroup_file_operations = { | ||
1516 | .read = cgroup_file_read, | ||
1517 | .write = cgroup_file_write, | ||
1518 | .llseek = generic_file_llseek, | ||
1519 | .open = cgroup_file_open, | ||
1520 | .release = cgroup_file_release, | ||
1521 | }; | ||
1522 | |||
1523 | static struct inode_operations cgroup_dir_inode_operations = { | ||
1524 | .lookup = simple_lookup, | ||
1525 | .mkdir = cgroup_mkdir, | ||
1526 | .rmdir = cgroup_rmdir, | ||
1527 | .rename = cgroup_rename, | ||
1528 | }; | ||
1529 | |||
1530 | static int cgroup_create_file(struct dentry *dentry, int mode, | ||
1531 | struct super_block *sb) | ||
1532 | { | ||
1533 | static struct dentry_operations cgroup_dops = { | ||
1534 | .d_iput = cgroup_diput, | ||
1535 | }; | ||
1536 | |||
1537 | struct inode *inode; | ||
1538 | |||
1539 | if (!dentry) | ||
1540 | return -ENOENT; | ||
1541 | if (dentry->d_inode) | ||
1542 | return -EEXIST; | ||
1543 | |||
1544 | inode = cgroup_new_inode(mode, sb); | ||
1545 | if (!inode) | ||
1546 | return -ENOMEM; | ||
1547 | |||
1548 | if (S_ISDIR(mode)) { | ||
1549 | inode->i_op = &cgroup_dir_inode_operations; | ||
1550 | inode->i_fop = &simple_dir_operations; | ||
1551 | |||
1552 | /* start off with i_nlink == 2 (for "." entry) */ | ||
1553 | inc_nlink(inode); | ||
1554 | |||
1555 | /* start with the directory inode held, so that we can | ||
1556 | * populate it without racing with another mkdir */ | ||
1557 | mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); | ||
1558 | } else if (S_ISREG(mode)) { | ||
1559 | inode->i_size = 0; | ||
1560 | inode->i_fop = &cgroup_file_operations; | ||
1561 | } | ||
1562 | dentry->d_op = &cgroup_dops; | ||
1563 | d_instantiate(dentry, inode); | ||
1564 | dget(dentry); /* Extra count - pin the dentry in core */ | ||
1565 | return 0; | ||
1566 | } | ||
1567 | |||
1568 | /* | ||
1569 | * cgroup_create_dir - create a directory for an object. | ||
1570 | * cgrp: the cgroup we create the directory for. | ||
1571 | * It must have a valid ->parent field | ||
1572 | * And we are going to fill its ->dentry field. | ||
1573 | * dentry: dentry of the new cgroup | ||
1574 | * mode: mode to set on new directory. | ||
1575 | */ | ||
1576 | static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, | ||
1577 | int mode) | ||
1578 | { | ||
1579 | struct dentry *parent; | ||
1580 | int error = 0; | ||
1581 | |||
1582 | parent = cgrp->parent->dentry; | ||
1583 | error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb); | ||
1584 | if (!error) { | ||
1585 | dentry->d_fsdata = cgrp; | ||
1586 | inc_nlink(parent->d_inode); | ||
1587 | cgrp->dentry = dentry; | ||
1588 | dget(dentry); | ||
1589 | } | ||
1590 | dput(dentry); | ||
1591 | |||
1592 | return error; | ||
1593 | } | ||
1594 | |||
1595 | int cgroup_add_file(struct cgroup *cgrp, | ||
1596 | struct cgroup_subsys *subsys, | ||
1597 | const struct cftype *cft) | ||
1598 | { | ||
1599 | struct dentry *dir = cgrp->dentry; | ||
1600 | struct dentry *dentry; | ||
1601 | int error; | ||
1602 | |||
1603 | char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; | ||
1604 | if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) { | ||
1605 | strcpy(name, subsys->name); | ||
1606 | strcat(name, "."); | ||
1607 | } | ||
1608 | strcat(name, cft->name); | ||
1609 | BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); | ||
1610 | dentry = lookup_one_len(name, dir, strlen(name)); | ||
1611 | if (!IS_ERR(dentry)) { | ||
1612 | error = cgroup_create_file(dentry, 0644 | S_IFREG, | ||
1613 | cgrp->root->sb); | ||
1614 | if (!error) | ||
1615 | dentry->d_fsdata = (void *)cft; | ||
1616 | dput(dentry); | ||
1617 | } else | ||
1618 | error = PTR_ERR(dentry); | ||
1619 | return error; | ||
1620 | } | ||
1621 | |||
1622 | int cgroup_add_files(struct cgroup *cgrp, | ||
1623 | struct cgroup_subsys *subsys, | ||
1624 | const struct cftype cft[], | ||
1625 | int count) | ||
1626 | { | ||
1627 | int i, err; | ||
1628 | for (i = 0; i < count; i++) { | ||
1629 | err = cgroup_add_file(cgrp, subsys, &cft[i]); | ||
1630 | if (err) | ||
1631 | return err; | ||
1632 | } | ||
1633 | return 0; | ||
1634 | } | ||
1635 | |||
1636 | /* Count the number of tasks in a cgroup. */ | ||
1637 | |||
1638 | int cgroup_task_count(const struct cgroup *cgrp) | ||
1639 | { | ||
1640 | int count = 0; | ||
1641 | struct list_head *l; | ||
1642 | |||
1643 | read_lock(&css_set_lock); | ||
1644 | l = cgrp->css_sets.next; | ||
1645 | while (l != &cgrp->css_sets) { | ||
1646 | struct cg_cgroup_link *link = | ||
1647 | list_entry(l, struct cg_cgroup_link, cgrp_link_list); | ||
1648 | count += atomic_read(&link->cg->ref.refcount); | ||
1649 | l = l->next; | ||
1650 | } | ||
1651 | read_unlock(&css_set_lock); | ||
1652 | return count; | ||
1653 | } | ||
1654 | |||
1655 | /* | ||
1656 | * Advance a list_head iterator. The iterator should be positioned at | ||
1657 | * the start of a css_set | ||
1658 | */ | ||
1659 | static void cgroup_advance_iter(struct cgroup *cgrp, | ||
1660 | struct cgroup_iter *it) | ||
1661 | { | ||
1662 | struct list_head *l = it->cg_link; | ||
1663 | struct cg_cgroup_link *link; | ||
1664 | struct css_set *cg; | ||
1665 | |||
1666 | /* Advance to the next non-empty css_set */ | ||
1667 | do { | ||
1668 | l = l->next; | ||
1669 | if (l == &cgrp->css_sets) { | ||
1670 | it->cg_link = NULL; | ||
1671 | return; | ||
1672 | } | ||
1673 | link = list_entry(l, struct cg_cgroup_link, cgrp_link_list); | ||
1674 | cg = link->cg; | ||
1675 | } while (list_empty(&cg->tasks)); | ||
1676 | it->cg_link = l; | ||
1677 | it->task = cg->tasks.next; | ||
1678 | } | ||
1679 | |||
1680 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) | ||
1681 | { | ||
1682 | /* | ||
1683 | * The first time anyone tries to iterate across a cgroup, | ||
1684 | * we need to enable the list linking each css_set to its | ||
1685 | * tasks, and fix up all existing tasks. | ||
1686 | */ | ||
1687 | if (!use_task_css_set_links) { | ||
1688 | struct task_struct *p, *g; | ||
1689 | write_lock(&css_set_lock); | ||
1690 | use_task_css_set_links = 1; | ||
1691 | do_each_thread(g, p) { | ||
1692 | task_lock(p); | ||
1693 | if (list_empty(&p->cg_list)) | ||
1694 | list_add(&p->cg_list, &p->cgroups->tasks); | ||
1695 | task_unlock(p); | ||
1696 | } while_each_thread(g, p); | ||
1697 | write_unlock(&css_set_lock); | ||
1698 | } | ||
1699 | read_lock(&css_set_lock); | ||
1700 | it->cg_link = &cgrp->css_sets; | ||
1701 | cgroup_advance_iter(cgrp, it); | ||
1702 | } | ||
1703 | |||
1704 | struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | ||
1705 | struct cgroup_iter *it) | ||
1706 | { | ||
1707 | struct task_struct *res; | ||
1708 | struct list_head *l = it->task; | ||
1709 | |||
1710 | /* If the iterator cg is NULL, we have no tasks */ | ||
1711 | if (!it->cg_link) | ||
1712 | return NULL; | ||
1713 | res = list_entry(l, struct task_struct, cg_list); | ||
1714 | /* Advance iterator to find next entry */ | ||
1715 | l = l->next; | ||
1716 | if (l == &res->cgroups->tasks) { | ||
1717 | /* We reached the end of this task list - move on to | ||
1718 | * the next cg_cgroup_link */ | ||
1719 | cgroup_advance_iter(cgrp, it); | ||
1720 | } else { | ||
1721 | it->task = l; | ||
1722 | } | ||
1723 | return res; | ||
1724 | } | ||
1725 | |||
1726 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it) | ||
1727 | { | ||
1728 | read_unlock(&css_set_lock); | ||
1729 | } | ||
1730 | |||
1731 | /* | ||
1732 | * Stuff for reading the 'tasks' file. | ||
1733 | * | ||
1734 | * Reading this file can return large amounts of data if a cgroup has | ||
1735 | * *lots* of attached tasks. So it may need several calls to read(), | ||
1736 | * but we cannot guarantee that the information we produce is correct | ||
1737 | * unless we produce it entirely atomically. | ||
1738 | * | ||
1739 | * Upon tasks file open(), a struct ctr_struct is allocated, that | ||
1740 | * will have a pointer to an array (also allocated here). The struct | ||
1741 | * ctr_struct * is stored in file->private_data. Its resources will | ||
1742 | * be freed by release() when the file is closed. The array is used | ||
1743 | * to sprintf the PIDs and then used by read(). | ||
1744 | */ | ||
1745 | struct ctr_struct { | ||
1746 | char *buf; | ||
1747 | int bufsz; | ||
1748 | }; | ||
1749 | |||
1750 | /* | ||
1751 | * Load into 'pidarray' up to 'npids' of the tasks using cgroup | ||
1752 | * 'cgrp'. Return actual number of pids loaded. No need to | ||
1753 | * task_lock(p) when reading out p->cgroup, since we're in an RCU | ||
1754 | * read section, so the css_set can't go away, and is | ||
1755 | * immutable after creation. | ||
1756 | */ | ||
1757 | static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp) | ||
1758 | { | ||
1759 | int n = 0; | ||
1760 | struct cgroup_iter it; | ||
1761 | struct task_struct *tsk; | ||
1762 | cgroup_iter_start(cgrp, &it); | ||
1763 | while ((tsk = cgroup_iter_next(cgrp, &it))) { | ||
1764 | if (unlikely(n == npids)) | ||
1765 | break; | ||
1766 | pidarray[n++] = task_pid_nr(tsk); | ||
1767 | } | ||
1768 | cgroup_iter_end(cgrp, &it); | ||
1769 | return n; | ||
1770 | } | ||
1771 | |||
1772 | /** | ||
1773 | * Build and fill cgroupstats so that taskstats can export it to user | ||
1774 | * space. | ||
1775 | * | ||
1776 | * @stats: cgroupstats to fill information into | ||
1777 | * @dentry: A dentry entry belonging to the cgroup for which stats have | ||
1778 | * been requested. | ||
1779 | */ | ||
1780 | int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) | ||
1781 | { | ||
1782 | int ret = -EINVAL; | ||
1783 | struct cgroup *cgrp; | ||
1784 | struct cgroup_iter it; | ||
1785 | struct task_struct *tsk; | ||
1786 | /* | ||
1787 | * Validate dentry by checking the superblock operations | ||
1788 | */ | ||
1789 | if (dentry->d_sb->s_op != &cgroup_ops) | ||
1790 | goto err; | ||
1791 | |||
1792 | ret = 0; | ||
1793 | cgrp = dentry->d_fsdata; | ||
1794 | rcu_read_lock(); | ||
1795 | |||
1796 | cgroup_iter_start(cgrp, &it); | ||
1797 | while ((tsk = cgroup_iter_next(cgrp, &it))) { | ||
1798 | switch (tsk->state) { | ||
1799 | case TASK_RUNNING: | ||
1800 | stats->nr_running++; | ||
1801 | break; | ||
1802 | case TASK_INTERRUPTIBLE: | ||
1803 | stats->nr_sleeping++; | ||
1804 | break; | ||
1805 | case TASK_UNINTERRUPTIBLE: | ||
1806 | stats->nr_uninterruptible++; | ||
1807 | break; | ||
1808 | case TASK_STOPPED: | ||
1809 | stats->nr_stopped++; | ||
1810 | break; | ||
1811 | default: | ||
1812 | if (delayacct_is_task_waiting_on_io(tsk)) | ||
1813 | stats->nr_io_wait++; | ||
1814 | break; | ||
1815 | } | ||
1816 | } | ||
1817 | cgroup_iter_end(cgrp, &it); | ||
1818 | |||
1819 | rcu_read_unlock(); | ||
1820 | err: | ||
1821 | return ret; | ||
1822 | } | ||
1823 | |||
1824 | static int cmppid(const void *a, const void *b) | ||
1825 | { | ||
1826 | return *(pid_t *)a - *(pid_t *)b; | ||
1827 | } | ||
1828 | |||
1829 | /* | ||
1830 | * Convert array 'a' of 'npids' pid_t's to a string of newline separated | ||
1831 | * decimal pids in 'buf'. Don't write more than 'sz' chars, but return | ||
1832 | * count 'cnt' of how many chars would be written if buf were large enough. | ||
1833 | */ | ||
1834 | static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) | ||
1835 | { | ||
1836 | int cnt = 0; | ||
1837 | int i; | ||
1838 | |||
1839 | for (i = 0; i < npids; i++) | ||
1840 | cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); | ||
1841 | return cnt; | ||
1842 | } | ||
1843 | |||
1844 | /* | ||
1845 | * Handle an open on 'tasks' file. Prepare a buffer listing the | ||
1846 | * process id's of tasks currently attached to the cgroup being opened. | ||
1847 | * | ||
1848 | * Does not require any specific cgroup mutexes, and does not take any. | ||
1849 | */ | ||
1850 | static int cgroup_tasks_open(struct inode *unused, struct file *file) | ||
1851 | { | ||
1852 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); | ||
1853 | struct ctr_struct *ctr; | ||
1854 | pid_t *pidarray; | ||
1855 | int npids; | ||
1856 | char c; | ||
1857 | |||
1858 | if (!(file->f_mode & FMODE_READ)) | ||
1859 | return 0; | ||
1860 | |||
1861 | ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); | ||
1862 | if (!ctr) | ||
1863 | goto err0; | ||
1864 | |||
1865 | /* | ||
1866 | * If cgroup gets more users after we read count, we won't have | ||
1867 | * enough space - tough. This race is indistinguishable to the | ||
1868 | * caller from the case that the additional cgroup users didn't | ||
1869 | * show up until sometime later on. | ||
1870 | */ | ||
1871 | npids = cgroup_task_count(cgrp); | ||
1872 | if (npids) { | ||
1873 | pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); | ||
1874 | if (!pidarray) | ||
1875 | goto err1; | ||
1876 | |||
1877 | npids = pid_array_load(pidarray, npids, cgrp); | ||
1878 | sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); | ||
1879 | |||
1880 | /* Call pid_array_to_buf() twice, first just to get bufsz */ | ||
1881 | ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; | ||
1882 | ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); | ||
1883 | if (!ctr->buf) | ||
1884 | goto err2; | ||
1885 | ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); | ||
1886 | |||
1887 | kfree(pidarray); | ||
1888 | } else { | ||
1889 | ctr->buf = 0; | ||
1890 | ctr->bufsz = 0; | ||
1891 | } | ||
1892 | file->private_data = ctr; | ||
1893 | return 0; | ||
1894 | |||
1895 | err2: | ||
1896 | kfree(pidarray); | ||
1897 | err1: | ||
1898 | kfree(ctr); | ||
1899 | err0: | ||
1900 | return -ENOMEM; | ||
1901 | } | ||
1902 | |||
1903 | static ssize_t cgroup_tasks_read(struct cgroup *cgrp, | ||
1904 | struct cftype *cft, | ||
1905 | struct file *file, char __user *buf, | ||
1906 | size_t nbytes, loff_t *ppos) | ||
1907 | { | ||
1908 | struct ctr_struct *ctr = file->private_data; | ||
1909 | |||
1910 | return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz); | ||
1911 | } | ||
1912 | |||
1913 | static int cgroup_tasks_release(struct inode *unused_inode, | ||
1914 | struct file *file) | ||
1915 | { | ||
1916 | struct ctr_struct *ctr; | ||
1917 | |||
1918 | if (file->f_mode & FMODE_READ) { | ||
1919 | ctr = file->private_data; | ||
1920 | kfree(ctr->buf); | ||
1921 | kfree(ctr); | ||
1922 | } | ||
1923 | return 0; | ||
1924 | } | ||
1925 | |||
1926 | static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, | ||
1927 | struct cftype *cft) | ||
1928 | { | ||
1929 | return notify_on_release(cgrp); | ||
1930 | } | ||
1931 | |||
1932 | static u64 cgroup_read_releasable(struct cgroup *cgrp, struct cftype *cft) | ||
1933 | { | ||
1934 | return test_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
1935 | } | ||
1936 | |||
1937 | /* | ||
1938 | * for the common functions, 'private' gives the type of file | ||
1939 | */ | ||
1940 | static struct cftype files[] = { | ||
1941 | { | ||
1942 | .name = "tasks", | ||
1943 | .open = cgroup_tasks_open, | ||
1944 | .read = cgroup_tasks_read, | ||
1945 | .write = cgroup_common_file_write, | ||
1946 | .release = cgroup_tasks_release, | ||
1947 | .private = FILE_TASKLIST, | ||
1948 | }, | ||
1949 | |||
1950 | { | ||
1951 | .name = "notify_on_release", | ||
1952 | .read_uint = cgroup_read_notify_on_release, | ||
1953 | .write = cgroup_common_file_write, | ||
1954 | .private = FILE_NOTIFY_ON_RELEASE, | ||
1955 | }, | ||
1956 | |||
1957 | { | ||
1958 | .name = "releasable", | ||
1959 | .read_uint = cgroup_read_releasable, | ||
1960 | .private = FILE_RELEASABLE, | ||
1961 | } | ||
1962 | }; | ||
1963 | |||
1964 | static struct cftype cft_release_agent = { | ||
1965 | .name = "release_agent", | ||
1966 | .read = cgroup_common_file_read, | ||
1967 | .write = cgroup_common_file_write, | ||
1968 | .private = FILE_RELEASE_AGENT, | ||
1969 | }; | ||
1970 | |||
1971 | static int cgroup_populate_dir(struct cgroup *cgrp) | ||
1972 | { | ||
1973 | int err; | ||
1974 | struct cgroup_subsys *ss; | ||
1975 | |||
1976 | /* First clear out any existing files */ | ||
1977 | cgroup_clear_directory(cgrp->dentry); | ||
1978 | |||
1979 | err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files)); | ||
1980 | if (err < 0) | ||
1981 | return err; | ||
1982 | |||
1983 | if (cgrp == cgrp->top_cgroup) { | ||
1984 | if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0) | ||
1985 | return err; | ||
1986 | } | ||
1987 | |||
1988 | for_each_subsys(cgrp->root, ss) { | ||
1989 | if (ss->populate && (err = ss->populate(ss, cgrp)) < 0) | ||
1990 | return err; | ||
1991 | } | ||
1992 | |||
1993 | return 0; | ||
1994 | } | ||
1995 | |||
1996 | static void init_cgroup_css(struct cgroup_subsys_state *css, | ||
1997 | struct cgroup_subsys *ss, | ||
1998 | struct cgroup *cgrp) | ||
1999 | { | ||
2000 | css->cgroup = cgrp; | ||
2001 | atomic_set(&css->refcnt, 0); | ||
2002 | css->flags = 0; | ||
2003 | if (cgrp == dummytop) | ||
2004 | set_bit(CSS_ROOT, &css->flags); | ||
2005 | BUG_ON(cgrp->subsys[ss->subsys_id]); | ||
2006 | cgrp->subsys[ss->subsys_id] = css; | ||
2007 | } | ||
2008 | |||
2009 | /* | ||
2010 | * cgroup_create - create a cgroup | ||
2011 | * parent: cgroup that will be parent of the new cgroup. | ||
2012 | * name: name of the new cgroup. Will be strcpy'ed. | ||
2013 | * mode: mode to set on new inode | ||
2014 | * | ||
2015 | * Must be called with the mutex on the parent inode held | ||
2016 | */ | ||
2017 | |||
2018 | static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | ||
2019 | int mode) | ||
2020 | { | ||
2021 | struct cgroup *cgrp; | ||
2022 | struct cgroupfs_root *root = parent->root; | ||
2023 | int err = 0; | ||
2024 | struct cgroup_subsys *ss; | ||
2025 | struct super_block *sb = root->sb; | ||
2026 | |||
2027 | cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); | ||
2028 | if (!cgrp) | ||
2029 | return -ENOMEM; | ||
2030 | |||
2031 | /* Grab a reference on the superblock so the hierarchy doesn't | ||
2032 | * get deleted on unmount if there are child cgroups. This | ||
2033 | * can be done outside cgroup_mutex, since the sb can't | ||
2034 | * disappear while someone has an open control file on the | ||
2035 | * fs */ | ||
2036 | atomic_inc(&sb->s_active); | ||
2037 | |||
2038 | mutex_lock(&cgroup_mutex); | ||
2039 | |||
2040 | cgrp->flags = 0; | ||
2041 | INIT_LIST_HEAD(&cgrp->sibling); | ||
2042 | INIT_LIST_HEAD(&cgrp->children); | ||
2043 | INIT_LIST_HEAD(&cgrp->css_sets); | ||
2044 | INIT_LIST_HEAD(&cgrp->release_list); | ||
2045 | |||
2046 | cgrp->parent = parent; | ||
2047 | cgrp->root = parent->root; | ||
2048 | cgrp->top_cgroup = parent->top_cgroup; | ||
2049 | |||
2050 | for_each_subsys(root, ss) { | ||
2051 | struct cgroup_subsys_state *css = ss->create(ss, cgrp); | ||
2052 | if (IS_ERR(css)) { | ||
2053 | err = PTR_ERR(css); | ||
2054 | goto err_destroy; | ||
2055 | } | ||
2056 | init_cgroup_css(css, ss, cgrp); | ||
2057 | } | ||
2058 | |||
2059 | list_add(&cgrp->sibling, &cgrp->parent->children); | ||
2060 | root->number_of_cgroups++; | ||
2061 | |||
2062 | err = cgroup_create_dir(cgrp, dentry, mode); | ||
2063 | if (err < 0) | ||
2064 | goto err_remove; | ||
2065 | |||
2066 | /* The cgroup directory was pre-locked for us */ | ||
2067 | BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex)); | ||
2068 | |||
2069 | err = cgroup_populate_dir(cgrp); | ||
2070 | /* If err < 0, we have a half-filled directory - oh well ;) */ | ||
2071 | |||
2072 | mutex_unlock(&cgroup_mutex); | ||
2073 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | ||
2074 | |||
2075 | return 0; | ||
2076 | |||
2077 | err_remove: | ||
2078 | |||
2079 | list_del(&cgrp->sibling); | ||
2080 | root->number_of_cgroups--; | ||
2081 | |||
2082 | err_destroy: | ||
2083 | |||
2084 | for_each_subsys(root, ss) { | ||
2085 | if (cgrp->subsys[ss->subsys_id]) | ||
2086 | ss->destroy(ss, cgrp); | ||
2087 | } | ||
2088 | |||
2089 | mutex_unlock(&cgroup_mutex); | ||
2090 | |||
2091 | /* Release the reference count that we took on the superblock */ | ||
2092 | deactivate_super(sb); | ||
2093 | |||
2094 | kfree(cgrp); | ||
2095 | return err; | ||
2096 | } | ||
2097 | |||
2098 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode) | ||
2099 | { | ||
2100 | struct cgroup *c_parent = dentry->d_parent->d_fsdata; | ||
2101 | |||
2102 | /* the vfs holds inode->i_mutex already */ | ||
2103 | return cgroup_create(c_parent, dentry, mode | S_IFDIR); | ||
2104 | } | ||
2105 | |||
2106 | static inline int cgroup_has_css_refs(struct cgroup *cgrp) | ||
2107 | { | ||
2108 | /* Check the reference count on each subsystem. Since we | ||
2109 | * already established that there are no tasks in the | ||
2110 | * cgroup, if the css refcount is also 0, then there should | ||
2111 | * be no outstanding references, so the subsystem is safe to | ||
2112 | * destroy. We scan across all subsystems rather than using | ||
2113 | * the per-hierarchy linked list of mounted subsystems since | ||
2114 | * we can be called via check_for_release() with no | ||
2115 | * synchronization other than RCU, and the subsystem linked | ||
2116 | * list isn't RCU-safe */ | ||
2117 | int i; | ||
2118 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
2119 | struct cgroup_subsys *ss = subsys[i]; | ||
2120 | struct cgroup_subsys_state *css; | ||
2121 | /* Skip subsystems not in this hierarchy */ | ||
2122 | if (ss->root != cgrp->root) | ||
2123 | continue; | ||
2124 | css = cgrp->subsys[ss->subsys_id]; | ||
2125 | /* When called from check_for_release() it's possible | ||
2126 | * that by this point the cgroup has been removed | ||
2127 | * and the css deleted. But a false-positive doesn't | ||
2128 | * matter, since it can only happen if the cgroup | ||
2129 | * has been deleted and hence no longer needs the | ||
2130 | * release agent to be called anyway. */ | ||
2131 | if (css && atomic_read(&css->refcnt)) { | ||
2132 | return 1; | ||
2133 | } | ||
2134 | } | ||
2135 | return 0; | ||
2136 | } | ||
2137 | |||
2138 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) | ||
2139 | { | ||
2140 | struct cgroup *cgrp = dentry->d_fsdata; | ||
2141 | struct dentry *d; | ||
2142 | struct cgroup *parent; | ||
2143 | struct cgroup_subsys *ss; | ||
2144 | struct super_block *sb; | ||
2145 | struct cgroupfs_root *root; | ||
2146 | |||
2147 | /* the vfs holds both inode->i_mutex already */ | ||
2148 | |||
2149 | mutex_lock(&cgroup_mutex); | ||
2150 | if (atomic_read(&cgrp->count) != 0) { | ||
2151 | mutex_unlock(&cgroup_mutex); | ||
2152 | return -EBUSY; | ||
2153 | } | ||
2154 | if (!list_empty(&cgrp->children)) { | ||
2155 | mutex_unlock(&cgroup_mutex); | ||
2156 | return -EBUSY; | ||
2157 | } | ||
2158 | |||
2159 | parent = cgrp->parent; | ||
2160 | root = cgrp->root; | ||
2161 | sb = root->sb; | ||
2162 | |||
2163 | if (cgroup_has_css_refs(cgrp)) { | ||
2164 | mutex_unlock(&cgroup_mutex); | ||
2165 | return -EBUSY; | ||
2166 | } | ||
2167 | |||
2168 | for_each_subsys(root, ss) { | ||
2169 | if (cgrp->subsys[ss->subsys_id]) | ||
2170 | ss->destroy(ss, cgrp); | ||
2171 | } | ||
2172 | |||
2173 | spin_lock(&release_list_lock); | ||
2174 | set_bit(CGRP_REMOVED, &cgrp->flags); | ||
2175 | if (!list_empty(&cgrp->release_list)) | ||
2176 | list_del(&cgrp->release_list); | ||
2177 | spin_unlock(&release_list_lock); | ||
2178 | /* delete my sibling from parent->children */ | ||
2179 | list_del(&cgrp->sibling); | ||
2180 | spin_lock(&cgrp->dentry->d_lock); | ||
2181 | d = dget(cgrp->dentry); | ||
2182 | cgrp->dentry = NULL; | ||
2183 | spin_unlock(&d->d_lock); | ||
2184 | |||
2185 | cgroup_d_remove_dir(d); | ||
2186 | dput(d); | ||
2187 | root->number_of_cgroups--; | ||
2188 | |||
2189 | set_bit(CGRP_RELEASABLE, &parent->flags); | ||
2190 | check_for_release(parent); | ||
2191 | |||
2192 | mutex_unlock(&cgroup_mutex); | ||
2193 | /* Drop the active superblock reference that we took when we | ||
2194 | * created the cgroup */ | ||
2195 | deactivate_super(sb); | ||
2196 | return 0; | ||
2197 | } | ||
2198 | |||
2199 | static void cgroup_init_subsys(struct cgroup_subsys *ss) | ||
2200 | { | ||
2201 | struct cgroup_subsys_state *css; | ||
2202 | struct list_head *l; | ||
2203 | printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name); | ||
2204 | |||
2205 | /* Create the top cgroup state for this subsystem */ | ||
2206 | ss->root = &rootnode; | ||
2207 | css = ss->create(ss, dummytop); | ||
2208 | /* We don't handle early failures gracefully */ | ||
2209 | BUG_ON(IS_ERR(css)); | ||
2210 | init_cgroup_css(css, ss, dummytop); | ||
2211 | |||
2212 | /* Update all cgroup groups to contain a subsys | ||
2213 | * pointer to this state - since the subsystem is | ||
2214 | * newly registered, all tasks and hence all cgroup | ||
2215 | * groups are in the subsystem's top cgroup. */ | ||
2216 | write_lock(&css_set_lock); | ||
2217 | l = &init_css_set.list; | ||
2218 | do { | ||
2219 | struct css_set *cg = | ||
2220 | list_entry(l, struct css_set, list); | ||
2221 | cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id]; | ||
2222 | l = l->next; | ||
2223 | } while (l != &init_css_set.list); | ||
2224 | write_unlock(&css_set_lock); | ||
2225 | |||
2226 | /* If this subsystem requested that it be notified with fork | ||
2227 | * events, we should send it one now for every process in the | ||
2228 | * system */ | ||
2229 | if (ss->fork) { | ||
2230 | struct task_struct *g, *p; | ||
2231 | |||
2232 | read_lock(&tasklist_lock); | ||
2233 | do_each_thread(g, p) { | ||
2234 | ss->fork(ss, p); | ||
2235 | } while_each_thread(g, p); | ||
2236 | read_unlock(&tasklist_lock); | ||
2237 | } | ||
2238 | |||
2239 | need_forkexit_callback |= ss->fork || ss->exit; | ||
2240 | |||
2241 | ss->active = 1; | ||
2242 | } | ||
2243 | |||
2244 | /** | ||
2245 | * cgroup_init_early - initialize cgroups at system boot, and | ||
2246 | * initialize any subsystems that request early init. | ||
2247 | */ | ||
2248 | int __init cgroup_init_early(void) | ||
2249 | { | ||
2250 | int i; | ||
2251 | kref_init(&init_css_set.ref); | ||
2252 | kref_get(&init_css_set.ref); | ||
2253 | INIT_LIST_HEAD(&init_css_set.list); | ||
2254 | INIT_LIST_HEAD(&init_css_set.cg_links); | ||
2255 | INIT_LIST_HEAD(&init_css_set.tasks); | ||
2256 | css_set_count = 1; | ||
2257 | init_cgroup_root(&rootnode); | ||
2258 | list_add(&rootnode.root_list, &roots); | ||
2259 | root_count = 1; | ||
2260 | init_task.cgroups = &init_css_set; | ||
2261 | |||
2262 | init_css_set_link.cg = &init_css_set; | ||
2263 | list_add(&init_css_set_link.cgrp_link_list, | ||
2264 | &rootnode.top_cgroup.css_sets); | ||
2265 | list_add(&init_css_set_link.cg_link_list, | ||
2266 | &init_css_set.cg_links); | ||
2267 | |||
2268 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
2269 | struct cgroup_subsys *ss = subsys[i]; | ||
2270 | |||
2271 | BUG_ON(!ss->name); | ||
2272 | BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); | ||
2273 | BUG_ON(!ss->create); | ||
2274 | BUG_ON(!ss->destroy); | ||
2275 | if (ss->subsys_id != i) { | ||
2276 | printk(KERN_ERR "Subsys %s id == %d\n", | ||
2277 | ss->name, ss->subsys_id); | ||
2278 | BUG(); | ||
2279 | } | ||
2280 | |||
2281 | if (ss->early_init) | ||
2282 | cgroup_init_subsys(ss); | ||
2283 | } | ||
2284 | return 0; | ||
2285 | } | ||
2286 | |||
2287 | /** | ||
2288 | * cgroup_init - register cgroup filesystem and /proc file, and | ||
2289 | * initialize any subsystems that didn't request early init. | ||
2290 | */ | ||
2291 | int __init cgroup_init(void) | ||
2292 | { | ||
2293 | int err; | ||
2294 | int i; | ||
2295 | struct proc_dir_entry *entry; | ||
2296 | |||
2297 | err = bdi_init(&cgroup_backing_dev_info); | ||
2298 | if (err) | ||
2299 | return err; | ||
2300 | |||
2301 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
2302 | struct cgroup_subsys *ss = subsys[i]; | ||
2303 | if (!ss->early_init) | ||
2304 | cgroup_init_subsys(ss); | ||
2305 | } | ||
2306 | |||
2307 | err = register_filesystem(&cgroup_fs_type); | ||
2308 | if (err < 0) | ||
2309 | goto out; | ||
2310 | |||
2311 | entry = create_proc_entry("cgroups", 0, NULL); | ||
2312 | if (entry) | ||
2313 | entry->proc_fops = &proc_cgroupstats_operations; | ||
2314 | |||
2315 | out: | ||
2316 | if (err) | ||
2317 | bdi_destroy(&cgroup_backing_dev_info); | ||
2318 | |||
2319 | return err; | ||
2320 | } | ||
2321 | |||
2322 | /* | ||
2323 | * proc_cgroup_show() | ||
2324 | * - Print task's cgroup paths into seq_file, one line for each hierarchy | ||
2325 | * - Used for /proc/<pid>/cgroup. | ||
2326 | * - No need to task_lock(tsk) on this tsk->cgroup reference, as it | ||
2327 | * doesn't really matter if tsk->cgroup changes after we read it, | ||
2328 | * and we take cgroup_mutex, keeping attach_task() from changing it | ||
2329 | * anyway. No need to check that tsk->cgroup != NULL, thanks to | ||
2330 | * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks | ||
2331 | * cgroup to top_cgroup. | ||
2332 | */ | ||
2333 | |||
2334 | /* TODO: Use a proper seq_file iterator */ | ||
2335 | static int proc_cgroup_show(struct seq_file *m, void *v) | ||
2336 | { | ||
2337 | struct pid *pid; | ||
2338 | struct task_struct *tsk; | ||
2339 | char *buf; | ||
2340 | int retval; | ||
2341 | struct cgroupfs_root *root; | ||
2342 | |||
2343 | retval = -ENOMEM; | ||
2344 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
2345 | if (!buf) | ||
2346 | goto out; | ||
2347 | |||
2348 | retval = -ESRCH; | ||
2349 | pid = m->private; | ||
2350 | tsk = get_pid_task(pid, PIDTYPE_PID); | ||
2351 | if (!tsk) | ||
2352 | goto out_free; | ||
2353 | |||
2354 | retval = 0; | ||
2355 | |||
2356 | mutex_lock(&cgroup_mutex); | ||
2357 | |||
2358 | for_each_root(root) { | ||
2359 | struct cgroup_subsys *ss; | ||
2360 | struct cgroup *cgrp; | ||
2361 | int subsys_id; | ||
2362 | int count = 0; | ||
2363 | |||
2364 | /* Skip this hierarchy if it has no active subsystems */ | ||
2365 | if (!root->actual_subsys_bits) | ||
2366 | continue; | ||
2367 | for_each_subsys(root, ss) | ||
2368 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); | ||
2369 | seq_putc(m, ':'); | ||
2370 | get_first_subsys(&root->top_cgroup, NULL, &subsys_id); | ||
2371 | cgrp = task_cgroup(tsk, subsys_id); | ||
2372 | retval = cgroup_path(cgrp, buf, PAGE_SIZE); | ||
2373 | if (retval < 0) | ||
2374 | goto out_unlock; | ||
2375 | seq_puts(m, buf); | ||
2376 | seq_putc(m, '\n'); | ||
2377 | } | ||
2378 | |||
2379 | out_unlock: | ||
2380 | mutex_unlock(&cgroup_mutex); | ||
2381 | put_task_struct(tsk); | ||
2382 | out_free: | ||
2383 | kfree(buf); | ||
2384 | out: | ||
2385 | return retval; | ||
2386 | } | ||
2387 | |||
2388 | static int cgroup_open(struct inode *inode, struct file *file) | ||
2389 | { | ||
2390 | struct pid *pid = PROC_I(inode)->pid; | ||
2391 | return single_open(file, proc_cgroup_show, pid); | ||
2392 | } | ||
2393 | |||
2394 | struct file_operations proc_cgroup_operations = { | ||
2395 | .open = cgroup_open, | ||
2396 | .read = seq_read, | ||
2397 | .llseek = seq_lseek, | ||
2398 | .release = single_release, | ||
2399 | }; | ||
2400 | |||
2401 | /* Display information about each subsystem and each hierarchy */ | ||
2402 | static int proc_cgroupstats_show(struct seq_file *m, void *v) | ||
2403 | { | ||
2404 | int i; | ||
2405 | struct cgroupfs_root *root; | ||
2406 | |||
2407 | seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\n"); | ||
2408 | mutex_lock(&cgroup_mutex); | ||
2409 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
2410 | struct cgroup_subsys *ss = subsys[i]; | ||
2411 | seq_printf(m, "%s\t%lu\t%d\n", | ||
2412 | ss->name, ss->root->subsys_bits, | ||
2413 | ss->root->number_of_cgroups); | ||
2414 | } | ||
2415 | mutex_unlock(&cgroup_mutex); | ||
2416 | return 0; | ||
2417 | } | ||
2418 | |||
2419 | static int cgroupstats_open(struct inode *inode, struct file *file) | ||
2420 | { | ||
2421 | return single_open(file, proc_cgroupstats_show, 0); | ||
2422 | } | ||
2423 | |||
2424 | static struct file_operations proc_cgroupstats_operations = { | ||
2425 | .open = cgroupstats_open, | ||
2426 | .read = seq_read, | ||
2427 | .llseek = seq_lseek, | ||
2428 | .release = single_release, | ||
2429 | }; | ||
2430 | |||
2431 | /** | ||
2432 | * cgroup_fork - attach newly forked task to its parents cgroup. | ||
2433 | * @tsk: pointer to task_struct of forking parent process. | ||
2434 | * | ||
2435 | * Description: A task inherits its parent's cgroup at fork(). | ||
2436 | * | ||
2437 | * A pointer to the shared css_set was automatically copied in | ||
2438 | * fork.c by dup_task_struct(). However, we ignore that copy, since | ||
2439 | * it was not made under the protection of RCU or cgroup_mutex, so | ||
2440 | * might no longer be a valid cgroup pointer. attach_task() might | ||
2441 | * have already changed current->cgroups, allowing the previously | ||
2442 | * referenced cgroup group to be removed and freed. | ||
2443 | * | ||
2444 | * At the point that cgroup_fork() is called, 'current' is the parent | ||
2445 | * task, and the passed argument 'child' points to the child task. | ||
2446 | */ | ||
2447 | void cgroup_fork(struct task_struct *child) | ||
2448 | { | ||
2449 | task_lock(current); | ||
2450 | child->cgroups = current->cgroups; | ||
2451 | get_css_set(child->cgroups); | ||
2452 | task_unlock(current); | ||
2453 | INIT_LIST_HEAD(&child->cg_list); | ||
2454 | } | ||
2455 | |||
2456 | /** | ||
2457 | * cgroup_fork_callbacks - called on a new task very soon before | ||
2458 | * adding it to the tasklist. No need to take any locks since no-one | ||
2459 | * can be operating on this task | ||
2460 | */ | ||
2461 | void cgroup_fork_callbacks(struct task_struct *child) | ||
2462 | { | ||
2463 | if (need_forkexit_callback) { | ||
2464 | int i; | ||
2465 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
2466 | struct cgroup_subsys *ss = subsys[i]; | ||
2467 | if (ss->fork) | ||
2468 | ss->fork(ss, child); | ||
2469 | } | ||
2470 | } | ||
2471 | } | ||
2472 | |||
2473 | /** | ||
2474 | * cgroup_post_fork - called on a new task after adding it to the | ||
2475 | * task list. Adds the task to the list running through its css_set | ||
2476 | * if necessary. Has to be after the task is visible on the task list | ||
2477 | * in case we race with the first call to cgroup_iter_start() - to | ||
2478 | * guarantee that the new task ends up on its list. */ | ||
2479 | void cgroup_post_fork(struct task_struct *child) | ||
2480 | { | ||
2481 | if (use_task_css_set_links) { | ||
2482 | write_lock(&css_set_lock); | ||
2483 | if (list_empty(&child->cg_list)) | ||
2484 | list_add(&child->cg_list, &child->cgroups->tasks); | ||
2485 | write_unlock(&css_set_lock); | ||
2486 | } | ||
2487 | } | ||
2488 | /** | ||
2489 | * cgroup_exit - detach cgroup from exiting task | ||
2490 | * @tsk: pointer to task_struct of exiting process | ||
2491 | * | ||
2492 | * Description: Detach cgroup from @tsk and release it. | ||
2493 | * | ||
2494 | * Note that cgroups marked notify_on_release force every task in | ||
2495 | * them to take the global cgroup_mutex mutex when exiting. | ||
2496 | * This could impact scaling on very large systems. Be reluctant to | ||
2497 | * use notify_on_release cgroups where very high task exit scaling | ||
2498 | * is required on large systems. | ||
2499 | * | ||
2500 | * the_top_cgroup_hack: | ||
2501 | * | ||
2502 | * Set the exiting tasks cgroup to the root cgroup (top_cgroup). | ||
2503 | * | ||
2504 | * We call cgroup_exit() while the task is still competent to | ||
2505 | * handle notify_on_release(), then leave the task attached to the | ||
2506 | * root cgroup in each hierarchy for the remainder of its exit. | ||
2507 | * | ||
2508 | * To do this properly, we would increment the reference count on | ||
2509 | * top_cgroup, and near the very end of the kernel/exit.c do_exit() | ||
2510 | * code we would add a second cgroup function call, to drop that | ||
2511 | * reference. This would just create an unnecessary hot spot on | ||
2512 | * the top_cgroup reference count, to no avail. | ||
2513 | * | ||
2514 | * Normally, holding a reference to a cgroup without bumping its | ||
2515 | * count is unsafe. The cgroup could go away, or someone could | ||
2516 | * attach us to a different cgroup, decrementing the count on | ||
2517 | * the first cgroup that we never incremented. But in this case, | ||
2518 | * top_cgroup isn't going away, and either task has PF_EXITING set, | ||
2519 | * which wards off any attach_task() attempts, or task is a failed | ||
2520 | * fork, never visible to attach_task. | ||
2521 | * | ||
2522 | */ | ||
2523 | void cgroup_exit(struct task_struct *tsk, int run_callbacks) | ||
2524 | { | ||
2525 | int i; | ||
2526 | struct css_set *cg; | ||
2527 | |||
2528 | if (run_callbacks && need_forkexit_callback) { | ||
2529 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
2530 | struct cgroup_subsys *ss = subsys[i]; | ||
2531 | if (ss->exit) | ||
2532 | ss->exit(ss, tsk); | ||
2533 | } | ||
2534 | } | ||
2535 | |||
2536 | /* | ||
2537 | * Unlink from the css_set task list if necessary. | ||
2538 | * Optimistically check cg_list before taking | ||
2539 | * css_set_lock | ||
2540 | */ | ||
2541 | if (!list_empty(&tsk->cg_list)) { | ||
2542 | write_lock(&css_set_lock); | ||
2543 | if (!list_empty(&tsk->cg_list)) | ||
2544 | list_del(&tsk->cg_list); | ||
2545 | write_unlock(&css_set_lock); | ||
2546 | } | ||
2547 | |||
2548 | /* Reassign the task to the init_css_set. */ | ||
2549 | task_lock(tsk); | ||
2550 | cg = tsk->cgroups; | ||
2551 | tsk->cgroups = &init_css_set; | ||
2552 | task_unlock(tsk); | ||
2553 | if (cg) | ||
2554 | put_css_set_taskexit(cg); | ||
2555 | } | ||
2556 | |||
2557 | /** | ||
2558 | * cgroup_clone - duplicate the current cgroup in the hierarchy | ||
2559 | * that the given subsystem is attached to, and move this task into | ||
2560 | * the new child | ||
2561 | */ | ||
2562 | int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) | ||
2563 | { | ||
2564 | struct dentry *dentry; | ||
2565 | int ret = 0; | ||
2566 | char nodename[MAX_CGROUP_TYPE_NAMELEN]; | ||
2567 | struct cgroup *parent, *child; | ||
2568 | struct inode *inode; | ||
2569 | struct css_set *cg; | ||
2570 | struct cgroupfs_root *root; | ||
2571 | struct cgroup_subsys *ss; | ||
2572 | |||
2573 | /* We shouldn't be called by an unregistered subsystem */ | ||
2574 | BUG_ON(!subsys->active); | ||
2575 | |||
2576 | /* First figure out what hierarchy and cgroup we're dealing | ||
2577 | * with, and pin them so we can drop cgroup_mutex */ | ||
2578 | mutex_lock(&cgroup_mutex); | ||
2579 | again: | ||
2580 | root = subsys->root; | ||
2581 | if (root == &rootnode) { | ||
2582 | printk(KERN_INFO | ||
2583 | "Not cloning cgroup for unused subsystem %s\n", | ||
2584 | subsys->name); | ||
2585 | mutex_unlock(&cgroup_mutex); | ||
2586 | return 0; | ||
2587 | } | ||
2588 | cg = tsk->cgroups; | ||
2589 | parent = task_cgroup(tsk, subsys->subsys_id); | ||
2590 | |||
2591 | snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid); | ||
2592 | |||
2593 | /* Pin the hierarchy */ | ||
2594 | atomic_inc(&parent->root->sb->s_active); | ||
2595 | |||
2596 | /* Keep the cgroup alive */ | ||
2597 | get_css_set(cg); | ||
2598 | mutex_unlock(&cgroup_mutex); | ||
2599 | |||
2600 | /* Now do the VFS work to create a cgroup */ | ||
2601 | inode = parent->dentry->d_inode; | ||
2602 | |||
2603 | /* Hold the parent directory mutex across this operation to | ||
2604 | * stop anyone else deleting the new cgroup */ | ||
2605 | mutex_lock(&inode->i_mutex); | ||
2606 | dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename)); | ||
2607 | if (IS_ERR(dentry)) { | ||
2608 | printk(KERN_INFO | ||
2609 | "Couldn't allocate dentry for %s: %ld\n", nodename, | ||
2610 | PTR_ERR(dentry)); | ||
2611 | ret = PTR_ERR(dentry); | ||
2612 | goto out_release; | ||
2613 | } | ||
2614 | |||
2615 | /* Create the cgroup directory, which also creates the cgroup */ | ||
2616 | ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755); | ||
2617 | child = __d_cgrp(dentry); | ||
2618 | dput(dentry); | ||
2619 | if (ret) { | ||
2620 | printk(KERN_INFO | ||
2621 | "Failed to create cgroup %s: %d\n", nodename, | ||
2622 | ret); | ||
2623 | goto out_release; | ||
2624 | } | ||
2625 | |||
2626 | if (!child) { | ||
2627 | printk(KERN_INFO | ||
2628 | "Couldn't find new cgroup %s\n", nodename); | ||
2629 | ret = -ENOMEM; | ||
2630 | goto out_release; | ||
2631 | } | ||
2632 | |||
2633 | /* The cgroup now exists. Retake cgroup_mutex and check | ||
2634 | * that we're still in the same state that we thought we | ||
2635 | * were. */ | ||
2636 | mutex_lock(&cgroup_mutex); | ||
2637 | if ((root != subsys->root) || | ||
2638 | (parent != task_cgroup(tsk, subsys->subsys_id))) { | ||
2639 | /* Aargh, we raced ... */ | ||
2640 | mutex_unlock(&inode->i_mutex); | ||
2641 | put_css_set(cg); | ||
2642 | |||
2643 | deactivate_super(parent->root->sb); | ||
2644 | /* The cgroup is still accessible in the VFS, but | ||
2645 | * we're not going to try to rmdir() it at this | ||
2646 | * point. */ | ||
2647 | printk(KERN_INFO | ||
2648 | "Race in cgroup_clone() - leaking cgroup %s\n", | ||
2649 | nodename); | ||
2650 | goto again; | ||
2651 | } | ||
2652 | |||
2653 | /* do any required auto-setup */ | ||
2654 | for_each_subsys(root, ss) { | ||
2655 | if (ss->post_clone) | ||
2656 | ss->post_clone(ss, child); | ||
2657 | } | ||
2658 | |||
2659 | /* All seems fine. Finish by moving the task into the new cgroup */ | ||
2660 | ret = attach_task(child, tsk); | ||
2661 | mutex_unlock(&cgroup_mutex); | ||
2662 | |||
2663 | out_release: | ||
2664 | mutex_unlock(&inode->i_mutex); | ||
2665 | |||
2666 | mutex_lock(&cgroup_mutex); | ||
2667 | put_css_set(cg); | ||
2668 | mutex_unlock(&cgroup_mutex); | ||
2669 | deactivate_super(parent->root->sb); | ||
2670 | return ret; | ||
2671 | } | ||
2672 | |||
2673 | /* | ||
2674 | * See if "cgrp" is a descendant of the current task's cgroup in | ||
2675 | * the appropriate hierarchy | ||
2676 | * | ||
2677 | * If we are sending in dummytop, then presumably we are creating | ||
2678 | * the top cgroup in the subsystem. | ||
2679 | * | ||
2680 | * Called only by the ns (nsproxy) cgroup. | ||
2681 | */ | ||
2682 | int cgroup_is_descendant(const struct cgroup *cgrp) | ||
2683 | { | ||
2684 | int ret; | ||
2685 | struct cgroup *target; | ||
2686 | int subsys_id; | ||
2687 | |||
2688 | if (cgrp == dummytop) | ||
2689 | return 1; | ||
2690 | |||
2691 | get_first_subsys(cgrp, NULL, &subsys_id); | ||
2692 | target = task_cgroup(current, subsys_id); | ||
2693 | while (cgrp != target && cgrp!= cgrp->top_cgroup) | ||
2694 | cgrp = cgrp->parent; | ||
2695 | ret = (cgrp == target); | ||
2696 | return ret; | ||
2697 | } | ||
2698 | |||
2699 | static void check_for_release(struct cgroup *cgrp) | ||
2700 | { | ||
2701 | /* All of these checks rely on RCU to keep the cgroup | ||
2702 | * structure alive */ | ||
2703 | if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count) | ||
2704 | && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) { | ||
2705 | /* Control Group is currently removeable. If it's not | ||
2706 | * already queued for a userspace notification, queue | ||
2707 | * it now */ | ||
2708 | int need_schedule_work = 0; | ||
2709 | spin_lock(&release_list_lock); | ||
2710 | if (!cgroup_is_removed(cgrp) && | ||
2711 | list_empty(&cgrp->release_list)) { | ||
2712 | list_add(&cgrp->release_list, &release_list); | ||
2713 | need_schedule_work = 1; | ||
2714 | } | ||
2715 | spin_unlock(&release_list_lock); | ||
2716 | if (need_schedule_work) | ||
2717 | schedule_work(&release_agent_work); | ||
2718 | } | ||
2719 | } | ||
2720 | |||
2721 | void __css_put(struct cgroup_subsys_state *css) | ||
2722 | { | ||
2723 | struct cgroup *cgrp = css->cgroup; | ||
2724 | rcu_read_lock(); | ||
2725 | if (atomic_dec_and_test(&css->refcnt) && notify_on_release(cgrp)) { | ||
2726 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
2727 | check_for_release(cgrp); | ||
2728 | } | ||
2729 | rcu_read_unlock(); | ||
2730 | } | ||
2731 | |||
2732 | /* | ||
2733 | * Notify userspace when a cgroup is released, by running the | ||
2734 | * configured release agent with the name of the cgroup (path | ||
2735 | * relative to the root of cgroup file system) as the argument. | ||
2736 | * | ||
2737 | * Most likely, this user command will try to rmdir this cgroup. | ||
2738 | * | ||
2739 | * This races with the possibility that some other task will be | ||
2740 | * attached to this cgroup before it is removed, or that some other | ||
2741 | * user task will 'mkdir' a child cgroup of this cgroup. That's ok. | ||
2742 | * The presumed 'rmdir' will fail quietly if this cgroup is no longer | ||
2743 | * unused, and this cgroup will be reprieved from its death sentence, | ||
2744 | * to continue to serve a useful existence. Next time it's released, | ||
2745 | * we will get notified again, if it still has 'notify_on_release' set. | ||
2746 | * | ||
2747 | * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which | ||
2748 | * means only wait until the task is successfully execve()'d. The | ||
2749 | * separate release agent task is forked by call_usermodehelper(), | ||
2750 | * then control in this thread returns here, without waiting for the | ||
2751 | * release agent task. We don't bother to wait because the caller of | ||
2752 | * this routine has no use for the exit status of the release agent | ||
2753 | * task, so no sense holding our caller up for that. | ||
2754 | * | ||
2755 | */ | ||
2756 | |||
2757 | static void cgroup_release_agent(struct work_struct *work) | ||
2758 | { | ||
2759 | BUG_ON(work != &release_agent_work); | ||
2760 | mutex_lock(&cgroup_mutex); | ||
2761 | spin_lock(&release_list_lock); | ||
2762 | while (!list_empty(&release_list)) { | ||
2763 | char *argv[3], *envp[3]; | ||
2764 | int i; | ||
2765 | char *pathbuf; | ||
2766 | struct cgroup *cgrp = list_entry(release_list.next, | ||
2767 | struct cgroup, | ||
2768 | release_list); | ||
2769 | list_del_init(&cgrp->release_list); | ||
2770 | spin_unlock(&release_list_lock); | ||
2771 | pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
2772 | if (!pathbuf) { | ||
2773 | spin_lock(&release_list_lock); | ||
2774 | continue; | ||
2775 | } | ||
2776 | |||
2777 | if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) { | ||
2778 | kfree(pathbuf); | ||
2779 | spin_lock(&release_list_lock); | ||
2780 | continue; | ||
2781 | } | ||
2782 | |||
2783 | i = 0; | ||
2784 | argv[i++] = cgrp->root->release_agent_path; | ||
2785 | argv[i++] = (char *)pathbuf; | ||
2786 | argv[i] = NULL; | ||
2787 | |||
2788 | i = 0; | ||
2789 | /* minimal command environment */ | ||
2790 | envp[i++] = "HOME=/"; | ||
2791 | envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | ||
2792 | envp[i] = NULL; | ||
2793 | |||
2794 | /* Drop the lock while we invoke the usermode helper, | ||
2795 | * since the exec could involve hitting disk and hence | ||
2796 | * be a slow process */ | ||
2797 | mutex_unlock(&cgroup_mutex); | ||
2798 | call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | ||
2799 | kfree(pathbuf); | ||
2800 | mutex_lock(&cgroup_mutex); | ||
2801 | spin_lock(&release_list_lock); | ||
2802 | } | ||
2803 | spin_unlock(&release_list_lock); | ||
2804 | mutex_unlock(&cgroup_mutex); | ||
2805 | } | ||