diff options
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r-- | kernel/cgroup.c | 1113 |
1 files changed, 807 insertions, 306 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c7ece8f027f..7ccba4bc5e3 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/cgroup.h> | 25 | #include <linux/cgroup.h> |
26 | #include <linux/ctype.h> | ||
26 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
27 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
28 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
@@ -48,6 +49,8 @@ | |||
48 | #include <linux/namei.h> | 49 | #include <linux/namei.h> |
49 | #include <linux/smp_lock.h> | 50 | #include <linux/smp_lock.h> |
50 | #include <linux/pid_namespace.h> | 51 | #include <linux/pid_namespace.h> |
52 | #include <linux/idr.h> | ||
53 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ | ||
51 | 54 | ||
52 | #include <asm/atomic.h> | 55 | #include <asm/atomic.h> |
53 | 56 | ||
@@ -60,6 +63,8 @@ static struct cgroup_subsys *subsys[] = { | |||
60 | #include <linux/cgroup_subsys.h> | 63 | #include <linux/cgroup_subsys.h> |
61 | }; | 64 | }; |
62 | 65 | ||
66 | #define MAX_CGROUP_ROOT_NAMELEN 64 | ||
67 | |||
63 | /* | 68 | /* |
64 | * A cgroupfs_root represents the root of a cgroup hierarchy, | 69 | * A cgroupfs_root represents the root of a cgroup hierarchy, |
65 | * and may be associated with a superblock to form an active | 70 | * and may be associated with a superblock to form an active |
@@ -74,6 +79,9 @@ struct cgroupfs_root { | |||
74 | */ | 79 | */ |
75 | unsigned long subsys_bits; | 80 | unsigned long subsys_bits; |
76 | 81 | ||
82 | /* Unique id for this hierarchy. */ | ||
83 | int hierarchy_id; | ||
84 | |||
77 | /* The bitmask of subsystems currently attached to this hierarchy */ | 85 | /* The bitmask of subsystems currently attached to this hierarchy */ |
78 | unsigned long actual_subsys_bits; | 86 | unsigned long actual_subsys_bits; |
79 | 87 | ||
@@ -94,6 +102,9 @@ struct cgroupfs_root { | |||
94 | 102 | ||
95 | /* The path to use for release notifications. */ | 103 | /* The path to use for release notifications. */ |
96 | char release_agent_path[PATH_MAX]; | 104 | char release_agent_path[PATH_MAX]; |
105 | |||
106 | /* The name for this hierarchy - may be empty */ | ||
107 | char name[MAX_CGROUP_ROOT_NAMELEN]; | ||
97 | }; | 108 | }; |
98 | 109 | ||
99 | /* | 110 | /* |
@@ -141,6 +152,10 @@ struct css_id { | |||
141 | static LIST_HEAD(roots); | 152 | static LIST_HEAD(roots); |
142 | static int root_count; | 153 | static int root_count; |
143 | 154 | ||
155 | static DEFINE_IDA(hierarchy_ida); | ||
156 | static int next_hierarchy_id; | ||
157 | static DEFINE_SPINLOCK(hierarchy_id_lock); | ||
158 | |||
144 | /* dummytop is a shorthand for the dummy hierarchy's top cgroup */ | 159 | /* dummytop is a shorthand for the dummy hierarchy's top cgroup */ |
145 | #define dummytop (&rootnode.top_cgroup) | 160 | #define dummytop (&rootnode.top_cgroup) |
146 | 161 | ||
@@ -201,6 +216,7 @@ struct cg_cgroup_link { | |||
201 | * cgroup, anchored on cgroup->css_sets | 216 | * cgroup, anchored on cgroup->css_sets |
202 | */ | 217 | */ |
203 | struct list_head cgrp_link_list; | 218 | struct list_head cgrp_link_list; |
219 | struct cgroup *cgrp; | ||
204 | /* | 220 | /* |
205 | * List running through cg_cgroup_links pointing at a | 221 | * List running through cg_cgroup_links pointing at a |
206 | * single css_set object, anchored on css_set->cg_links | 222 | * single css_set object, anchored on css_set->cg_links |
@@ -227,8 +243,11 @@ static int cgroup_subsys_init_idr(struct cgroup_subsys *ss); | |||
227 | static DEFINE_RWLOCK(css_set_lock); | 243 | static DEFINE_RWLOCK(css_set_lock); |
228 | static int css_set_count; | 244 | static int css_set_count; |
229 | 245 | ||
230 | /* hash table for cgroup groups. This improves the performance to | 246 | /* |
231 | * find an existing css_set */ | 247 | * hash table for cgroup groups. This improves the performance to find |
248 | * an existing css_set. This hash doesn't (currently) take into | ||
249 | * account cgroups in empty hierarchies. | ||
250 | */ | ||
232 | #define CSS_SET_HASH_BITS 7 | 251 | #define CSS_SET_HASH_BITS 7 |
233 | #define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS) | 252 | #define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS) |
234 | static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE]; | 253 | static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE]; |
@@ -248,48 +267,22 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[]) | |||
248 | return &css_set_table[index]; | 267 | return &css_set_table[index]; |
249 | } | 268 | } |
250 | 269 | ||
270 | static void free_css_set_rcu(struct rcu_head *obj) | ||
271 | { | ||
272 | struct css_set *cg = container_of(obj, struct css_set, rcu_head); | ||
273 | kfree(cg); | ||
274 | } | ||
275 | |||
251 | /* We don't maintain the lists running through each css_set to its | 276 | /* We don't maintain the lists running through each css_set to its |
252 | * task until after the first call to cgroup_iter_start(). This | 277 | * task until after the first call to cgroup_iter_start(). This |
253 | * reduces the fork()/exit() overhead for people who have cgroups | 278 | * reduces the fork()/exit() overhead for people who have cgroups |
254 | * compiled into their kernel but not actually in use */ | 279 | * compiled into their kernel but not actually in use */ |
255 | static int use_task_css_set_links __read_mostly; | 280 | static int use_task_css_set_links __read_mostly; |
256 | 281 | ||
257 | /* When we create or destroy a css_set, the operation simply | 282 | static void __put_css_set(struct css_set *cg, int taskexit) |
258 | * takes/releases a reference count on all the cgroups referenced | ||
259 | * by subsystems in this css_set. This can end up multiple-counting | ||
260 | * some cgroups, but that's OK - the ref-count is just a | ||
261 | * busy/not-busy indicator; ensuring that we only count each cgroup | ||
262 | * once would require taking a global lock to ensure that no | ||
263 | * subsystems moved between hierarchies while we were doing so. | ||
264 | * | ||
265 | * Possible TODO: decide at boot time based on the number of | ||
266 | * registered subsystems and the number of CPUs or NUMA nodes whether | ||
267 | * it's better for performance to ref-count every subsystem, or to | ||
268 | * take a global lock and only add one ref count to each hierarchy. | ||
269 | */ | ||
270 | |||
271 | /* | ||
272 | * unlink a css_set from the list and free it | ||
273 | */ | ||
274 | static void unlink_css_set(struct css_set *cg) | ||
275 | { | 283 | { |
276 | struct cg_cgroup_link *link; | 284 | struct cg_cgroup_link *link; |
277 | struct cg_cgroup_link *saved_link; | 285 | struct cg_cgroup_link *saved_link; |
278 | |||
279 | hlist_del(&cg->hlist); | ||
280 | css_set_count--; | ||
281 | |||
282 | list_for_each_entry_safe(link, saved_link, &cg->cg_links, | ||
283 | cg_link_list) { | ||
284 | list_del(&link->cg_link_list); | ||
285 | list_del(&link->cgrp_link_list); | ||
286 | kfree(link); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static void __put_css_set(struct css_set *cg, int taskexit) | ||
291 | { | ||
292 | int i; | ||
293 | /* | 286 | /* |
294 | * Ensure that the refcount doesn't hit zero while any readers | 287 | * Ensure that the refcount doesn't hit zero while any readers |
295 | * can see it. Similar to atomic_dec_and_lock(), but for an | 288 | * can see it. Similar to atomic_dec_and_lock(), but for an |
@@ -302,21 +295,28 @@ static void __put_css_set(struct css_set *cg, int taskexit) | |||
302 | write_unlock(&css_set_lock); | 295 | write_unlock(&css_set_lock); |
303 | return; | 296 | return; |
304 | } | 297 | } |
305 | unlink_css_set(cg); | ||
306 | write_unlock(&css_set_lock); | ||
307 | 298 | ||
308 | rcu_read_lock(); | 299 | /* This css_set is dead. unlink it and release cgroup refcounts */ |
309 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 300 | hlist_del(&cg->hlist); |
310 | struct cgroup *cgrp = rcu_dereference(cg->subsys[i]->cgroup); | 301 | css_set_count--; |
302 | |||
303 | list_for_each_entry_safe(link, saved_link, &cg->cg_links, | ||
304 | cg_link_list) { | ||
305 | struct cgroup *cgrp = link->cgrp; | ||
306 | list_del(&link->cg_link_list); | ||
307 | list_del(&link->cgrp_link_list); | ||
311 | if (atomic_dec_and_test(&cgrp->count) && | 308 | if (atomic_dec_and_test(&cgrp->count) && |
312 | notify_on_release(cgrp)) { | 309 | notify_on_release(cgrp)) { |
313 | if (taskexit) | 310 | if (taskexit) |
314 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | 311 | set_bit(CGRP_RELEASABLE, &cgrp->flags); |
315 | check_for_release(cgrp); | 312 | check_for_release(cgrp); |
316 | } | 313 | } |
314 | |||
315 | kfree(link); | ||
317 | } | 316 | } |
318 | rcu_read_unlock(); | 317 | |
319 | kfree(cg); | 318 | write_unlock(&css_set_lock); |
319 | call_rcu(&cg->rcu_head, free_css_set_rcu); | ||
320 | } | 320 | } |
321 | 321 | ||
322 | /* | 322 | /* |
@@ -338,6 +338,78 @@ static inline void put_css_set_taskexit(struct css_set *cg) | |||
338 | } | 338 | } |
339 | 339 | ||
340 | /* | 340 | /* |
341 | * compare_css_sets - helper function for find_existing_css_set(). | ||
342 | * @cg: candidate css_set being tested | ||
343 | * @old_cg: existing css_set for a task | ||
344 | * @new_cgrp: cgroup that's being entered by the task | ||
345 | * @template: desired set of css pointers in css_set (pre-calculated) | ||
346 | * | ||
347 | * Returns true if "cg" matches "old_cg" except for the hierarchy | ||
348 | * which "new_cgrp" belongs to, for which it should match "new_cgrp". | ||
349 | */ | ||
350 | static bool compare_css_sets(struct css_set *cg, | ||
351 | struct css_set *old_cg, | ||
352 | struct cgroup *new_cgrp, | ||
353 | struct cgroup_subsys_state *template[]) | ||
354 | { | ||
355 | struct list_head *l1, *l2; | ||
356 | |||
357 | if (memcmp(template, cg->subsys, sizeof(cg->subsys))) { | ||
358 | /* Not all subsystems matched */ | ||
359 | return false; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * Compare cgroup pointers in order to distinguish between | ||
364 | * different cgroups in heirarchies with no subsystems. We | ||
365 | * could get by with just this check alone (and skip the | ||
366 | * memcmp above) but on most setups the memcmp check will | ||
367 | * avoid the need for this more expensive check on almost all | ||
368 | * candidates. | ||
369 | */ | ||
370 | |||
371 | l1 = &cg->cg_links; | ||
372 | l2 = &old_cg->cg_links; | ||
373 | while (1) { | ||
374 | struct cg_cgroup_link *cgl1, *cgl2; | ||
375 | struct cgroup *cg1, *cg2; | ||
376 | |||
377 | l1 = l1->next; | ||
378 | l2 = l2->next; | ||
379 | /* See if we reached the end - both lists are equal length. */ | ||
380 | if (l1 == &cg->cg_links) { | ||
381 | BUG_ON(l2 != &old_cg->cg_links); | ||
382 | break; | ||
383 | } else { | ||
384 | BUG_ON(l2 == &old_cg->cg_links); | ||
385 | } | ||
386 | /* Locate the cgroups associated with these links. */ | ||
387 | cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list); | ||
388 | cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list); | ||
389 | cg1 = cgl1->cgrp; | ||
390 | cg2 = cgl2->cgrp; | ||
391 | /* Hierarchies should be linked in the same order. */ | ||
392 | BUG_ON(cg1->root != cg2->root); | ||
393 | |||
394 | /* | ||
395 | * If this hierarchy is the hierarchy of the cgroup | ||
396 | * that's changing, then we need to check that this | ||
397 | * css_set points to the new cgroup; if it's any other | ||
398 | * hierarchy, then this css_set should point to the | ||
399 | * same cgroup as the old css_set. | ||
400 | */ | ||
401 | if (cg1->root == new_cgrp->root) { | ||
402 | if (cg1 != new_cgrp) | ||
403 | return false; | ||
404 | } else { | ||
405 | if (cg1 != cg2) | ||
406 | return false; | ||
407 | } | ||
408 | } | ||
409 | return true; | ||
410 | } | ||
411 | |||
412 | /* | ||
341 | * find_existing_css_set() is a helper for | 413 | * find_existing_css_set() is a helper for |
342 | * find_css_set(), and checks to see whether an existing | 414 | * find_css_set(), and checks to see whether an existing |
343 | * css_set is suitable. | 415 | * css_set is suitable. |
@@ -378,10 +450,11 @@ static struct css_set *find_existing_css_set( | |||
378 | 450 | ||
379 | hhead = css_set_hash(template); | 451 | hhead = css_set_hash(template); |
380 | hlist_for_each_entry(cg, node, hhead, hlist) { | 452 | hlist_for_each_entry(cg, node, hhead, hlist) { |
381 | if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) { | 453 | if (!compare_css_sets(cg, oldcg, cgrp, template)) |
382 | /* All subsystems matched */ | 454 | continue; |
383 | return cg; | 455 | |
384 | } | 456 | /* This css_set matches what we need */ |
457 | return cg; | ||
385 | } | 458 | } |
386 | 459 | ||
387 | /* No existing cgroup group matched */ | 460 | /* No existing cgroup group matched */ |
@@ -435,8 +508,14 @@ static void link_css_set(struct list_head *tmp_cg_links, | |||
435 | link = list_first_entry(tmp_cg_links, struct cg_cgroup_link, | 508 | link = list_first_entry(tmp_cg_links, struct cg_cgroup_link, |
436 | cgrp_link_list); | 509 | cgrp_link_list); |
437 | link->cg = cg; | 510 | link->cg = cg; |
511 | link->cgrp = cgrp; | ||
512 | atomic_inc(&cgrp->count); | ||
438 | list_move(&link->cgrp_link_list, &cgrp->css_sets); | 513 | list_move(&link->cgrp_link_list, &cgrp->css_sets); |
439 | list_add(&link->cg_link_list, &cg->cg_links); | 514 | /* |
515 | * Always add links to the tail of the list so that the list | ||
516 | * is sorted by order of hierarchy creation | ||
517 | */ | ||
518 | list_add_tail(&link->cg_link_list, &cg->cg_links); | ||
440 | } | 519 | } |
441 | 520 | ||
442 | /* | 521 | /* |
@@ -451,11 +530,11 @@ static struct css_set *find_css_set( | |||
451 | { | 530 | { |
452 | struct css_set *res; | 531 | struct css_set *res; |
453 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; | 532 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; |
454 | int i; | ||
455 | 533 | ||
456 | struct list_head tmp_cg_links; | 534 | struct list_head tmp_cg_links; |
457 | 535 | ||
458 | struct hlist_head *hhead; | 536 | struct hlist_head *hhead; |
537 | struct cg_cgroup_link *link; | ||
459 | 538 | ||
460 | /* First see if we already have a cgroup group that matches | 539 | /* First see if we already have a cgroup group that matches |
461 | * the desired set */ | 540 | * the desired set */ |
@@ -489,20 +568,12 @@ static struct css_set *find_css_set( | |||
489 | 568 | ||
490 | write_lock(&css_set_lock); | 569 | write_lock(&css_set_lock); |
491 | /* Add reference counts and links from the new css_set. */ | 570 | /* Add reference counts and links from the new css_set. */ |
492 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 571 | list_for_each_entry(link, &oldcg->cg_links, cg_link_list) { |
493 | struct cgroup *cgrp = res->subsys[i]->cgroup; | 572 | struct cgroup *c = link->cgrp; |
494 | struct cgroup_subsys *ss = subsys[i]; | 573 | if (c->root == cgrp->root) |
495 | atomic_inc(&cgrp->count); | 574 | c = cgrp; |
496 | /* | 575 | link_css_set(&tmp_cg_links, res, c); |
497 | * We want to add a link once per cgroup, so we | ||
498 | * only do it for the first subsystem in each | ||
499 | * hierarchy | ||
500 | */ | ||
501 | if (ss->root->subsys_list.next == &ss->sibling) | ||
502 | link_css_set(&tmp_cg_links, res, cgrp); | ||
503 | } | 576 | } |
504 | if (list_empty(&rootnode.subsys_list)) | ||
505 | link_css_set(&tmp_cg_links, res, dummytop); | ||
506 | 577 | ||
507 | BUG_ON(!list_empty(&tmp_cg_links)); | 578 | BUG_ON(!list_empty(&tmp_cg_links)); |
508 | 579 | ||
@@ -518,6 +589,41 @@ static struct css_set *find_css_set( | |||
518 | } | 589 | } |
519 | 590 | ||
520 | /* | 591 | /* |
592 | * Return the cgroup for "task" from the given hierarchy. Must be | ||
593 | * called with cgroup_mutex held. | ||
594 | */ | ||
595 | static struct cgroup *task_cgroup_from_root(struct task_struct *task, | ||
596 | struct cgroupfs_root *root) | ||
597 | { | ||
598 | struct css_set *css; | ||
599 | struct cgroup *res = NULL; | ||
600 | |||
601 | BUG_ON(!mutex_is_locked(&cgroup_mutex)); | ||
602 | read_lock(&css_set_lock); | ||
603 | /* | ||
604 | * No need to lock the task - since we hold cgroup_mutex the | ||
605 | * task can't change groups, so the only thing that can happen | ||
606 | * is that it exits and its css is set back to init_css_set. | ||
607 | */ | ||
608 | css = task->cgroups; | ||
609 | if (css == &init_css_set) { | ||
610 | res = &root->top_cgroup; | ||
611 | } else { | ||
612 | struct cg_cgroup_link *link; | ||
613 | list_for_each_entry(link, &css->cg_links, cg_link_list) { | ||
614 | struct cgroup *c = link->cgrp; | ||
615 | if (c->root == root) { | ||
616 | res = c; | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | } | ||
621 | read_unlock(&css_set_lock); | ||
622 | BUG_ON(!res); | ||
623 | return res; | ||
624 | } | ||
625 | |||
626 | /* | ||
521 | * There is one global cgroup mutex. We also require taking | 627 | * There is one global cgroup mutex. We also require taking |
522 | * task_lock() when dereferencing a task's cgroup subsys pointers. | 628 | * task_lock() when dereferencing a task's cgroup subsys pointers. |
523 | * See "The task_lock() exception", at the end of this comment. | 629 | * See "The task_lock() exception", at the end of this comment. |
@@ -596,7 +702,7 @@ void cgroup_unlock(void) | |||
596 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); | 702 | static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); |
597 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); | 703 | static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); |
598 | static int cgroup_populate_dir(struct cgroup *cgrp); | 704 | static int cgroup_populate_dir(struct cgroup *cgrp); |
599 | static struct inode_operations cgroup_dir_inode_operations; | 705 | static const struct inode_operations cgroup_dir_inode_operations; |
600 | static struct file_operations proc_cgroupstats_operations; | 706 | static struct file_operations proc_cgroupstats_operations; |
601 | 707 | ||
602 | static struct backing_dev_info cgroup_backing_dev_info = { | 708 | static struct backing_dev_info cgroup_backing_dev_info = { |
@@ -677,6 +783,12 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
677 | */ | 783 | */ |
678 | deactivate_super(cgrp->root->sb); | 784 | deactivate_super(cgrp->root->sb); |
679 | 785 | ||
786 | /* | ||
787 | * if we're getting rid of the cgroup, refcount should ensure | ||
788 | * that there are no pidlists left. | ||
789 | */ | ||
790 | BUG_ON(!list_empty(&cgrp->pidlists)); | ||
791 | |||
680 | call_rcu(&cgrp->rcu_head, free_cgroup_rcu); | 792 | call_rcu(&cgrp->rcu_head, free_cgroup_rcu); |
681 | } | 793 | } |
682 | iput(inode); | 794 | iput(inode); |
@@ -841,6 +953,8 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
841 | seq_puts(seq, ",noprefix"); | 953 | seq_puts(seq, ",noprefix"); |
842 | if (strlen(root->release_agent_path)) | 954 | if (strlen(root->release_agent_path)) |
843 | seq_printf(seq, ",release_agent=%s", root->release_agent_path); | 955 | seq_printf(seq, ",release_agent=%s", root->release_agent_path); |
956 | if (strlen(root->name)) | ||
957 | seq_printf(seq, ",name=%s", root->name); | ||
844 | mutex_unlock(&cgroup_mutex); | 958 | mutex_unlock(&cgroup_mutex); |
845 | return 0; | 959 | return 0; |
846 | } | 960 | } |
@@ -849,6 +963,12 @@ struct cgroup_sb_opts { | |||
849 | unsigned long subsys_bits; | 963 | unsigned long subsys_bits; |
850 | unsigned long flags; | 964 | unsigned long flags; |
851 | char *release_agent; | 965 | char *release_agent; |
966 | char *name; | ||
967 | /* User explicitly requested empty subsystem */ | ||
968 | bool none; | ||
969 | |||
970 | struct cgroupfs_root *new_root; | ||
971 | |||
852 | }; | 972 | }; |
853 | 973 | ||
854 | /* Convert a hierarchy specifier into a bitmask of subsystems and | 974 | /* Convert a hierarchy specifier into a bitmask of subsystems and |
@@ -863,9 +983,7 @@ static int parse_cgroupfs_options(char *data, | |||
863 | mask = ~(1UL << cpuset_subsys_id); | 983 | mask = ~(1UL << cpuset_subsys_id); |
864 | #endif | 984 | #endif |
865 | 985 | ||
866 | opts->subsys_bits = 0; | 986 | memset(opts, 0, sizeof(*opts)); |
867 | opts->flags = 0; | ||
868 | opts->release_agent = NULL; | ||
869 | 987 | ||
870 | while ((token = strsep(&o, ",")) != NULL) { | 988 | while ((token = strsep(&o, ",")) != NULL) { |
871 | if (!*token) | 989 | if (!*token) |
@@ -879,17 +997,42 @@ static int parse_cgroupfs_options(char *data, | |||
879 | if (!ss->disabled) | 997 | if (!ss->disabled) |
880 | opts->subsys_bits |= 1ul << i; | 998 | opts->subsys_bits |= 1ul << i; |
881 | } | 999 | } |
1000 | } else if (!strcmp(token, "none")) { | ||
1001 | /* Explicitly have no subsystems */ | ||
1002 | opts->none = true; | ||
882 | } else if (!strcmp(token, "noprefix")) { | 1003 | } else if (!strcmp(token, "noprefix")) { |
883 | set_bit(ROOT_NOPREFIX, &opts->flags); | 1004 | set_bit(ROOT_NOPREFIX, &opts->flags); |
884 | } else if (!strncmp(token, "release_agent=", 14)) { | 1005 | } else if (!strncmp(token, "release_agent=", 14)) { |
885 | /* Specifying two release agents is forbidden */ | 1006 | /* Specifying two release agents is forbidden */ |
886 | if (opts->release_agent) | 1007 | if (opts->release_agent) |
887 | return -EINVAL; | 1008 | return -EINVAL; |
888 | opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL); | 1009 | opts->release_agent = |
1010 | kstrndup(token + 14, PATH_MAX, GFP_KERNEL); | ||
889 | if (!opts->release_agent) | 1011 | if (!opts->release_agent) |
890 | return -ENOMEM; | 1012 | return -ENOMEM; |
891 | strncpy(opts->release_agent, token + 14, PATH_MAX - 1); | 1013 | } else if (!strncmp(token, "name=", 5)) { |
892 | opts->release_agent[PATH_MAX - 1] = 0; | 1014 | int i; |
1015 | const char *name = token + 5; | ||
1016 | /* Can't specify an empty name */ | ||
1017 | if (!strlen(name)) | ||
1018 | return -EINVAL; | ||
1019 | /* Must match [\w.-]+ */ | ||
1020 | for (i = 0; i < strlen(name); i++) { | ||
1021 | char c = name[i]; | ||
1022 | if (isalnum(c)) | ||
1023 | continue; | ||
1024 | if ((c == '.') || (c == '-') || (c == '_')) | ||
1025 | continue; | ||
1026 | return -EINVAL; | ||
1027 | } | ||
1028 | /* Specifying two names is forbidden */ | ||
1029 | if (opts->name) | ||
1030 | return -EINVAL; | ||
1031 | opts->name = kstrndup(name, | ||
1032 | MAX_CGROUP_ROOT_NAMELEN, | ||
1033 | GFP_KERNEL); | ||
1034 | if (!opts->name) | ||
1035 | return -ENOMEM; | ||
893 | } else { | 1036 | } else { |
894 | struct cgroup_subsys *ss; | 1037 | struct cgroup_subsys *ss; |
895 | int i; | 1038 | int i; |
@@ -906,6 +1049,8 @@ static int parse_cgroupfs_options(char *data, | |||
906 | } | 1049 | } |
907 | } | 1050 | } |
908 | 1051 | ||
1052 | /* Consistency checks */ | ||
1053 | |||
909 | /* | 1054 | /* |
910 | * Option noprefix was introduced just for backward compatibility | 1055 | * Option noprefix was introduced just for backward compatibility |
911 | * with the old cpuset, so we allow noprefix only if mounting just | 1056 | * with the old cpuset, so we allow noprefix only if mounting just |
@@ -915,8 +1060,16 @@ static int parse_cgroupfs_options(char *data, | |||
915 | (opts->subsys_bits & mask)) | 1060 | (opts->subsys_bits & mask)) |
916 | return -EINVAL; | 1061 | return -EINVAL; |
917 | 1062 | ||
918 | /* We can't have an empty hierarchy */ | 1063 | |
919 | if (!opts->subsys_bits) | 1064 | /* Can't specify "none" and some subsystems */ |
1065 | if (opts->subsys_bits && opts->none) | ||
1066 | return -EINVAL; | ||
1067 | |||
1068 | /* | ||
1069 | * We either have to specify by name or by subsystems. (So all | ||
1070 | * empty hierarchies must have a name). | ||
1071 | */ | ||
1072 | if (!opts->subsys_bits && !opts->name) | ||
920 | return -EINVAL; | 1073 | return -EINVAL; |
921 | 1074 | ||
922 | return 0; | 1075 | return 0; |
@@ -944,6 +1097,12 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
944 | goto out_unlock; | 1097 | goto out_unlock; |
945 | } | 1098 | } |
946 | 1099 | ||
1100 | /* Don't allow name to change at remount */ | ||
1101 | if (opts.name && strcmp(opts.name, root->name)) { | ||
1102 | ret = -EINVAL; | ||
1103 | goto out_unlock; | ||
1104 | } | ||
1105 | |||
947 | ret = rebind_subsystems(root, opts.subsys_bits); | 1106 | ret = rebind_subsystems(root, opts.subsys_bits); |
948 | if (ret) | 1107 | if (ret) |
949 | goto out_unlock; | 1108 | goto out_unlock; |
@@ -955,13 +1114,14 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
955 | strcpy(root->release_agent_path, opts.release_agent); | 1114 | strcpy(root->release_agent_path, opts.release_agent); |
956 | out_unlock: | 1115 | out_unlock: |
957 | kfree(opts.release_agent); | 1116 | kfree(opts.release_agent); |
1117 | kfree(opts.name); | ||
958 | mutex_unlock(&cgroup_mutex); | 1118 | mutex_unlock(&cgroup_mutex); |
959 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 1119 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); |
960 | unlock_kernel(); | 1120 | unlock_kernel(); |
961 | return ret; | 1121 | return ret; |
962 | } | 1122 | } |
963 | 1123 | ||
964 | static struct super_operations cgroup_ops = { | 1124 | static const struct super_operations cgroup_ops = { |
965 | .statfs = simple_statfs, | 1125 | .statfs = simple_statfs, |
966 | .drop_inode = generic_delete_inode, | 1126 | .drop_inode = generic_delete_inode, |
967 | .show_options = cgroup_show_options, | 1127 | .show_options = cgroup_show_options, |
@@ -974,9 +1134,10 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) | |||
974 | INIT_LIST_HEAD(&cgrp->children); | 1134 | INIT_LIST_HEAD(&cgrp->children); |
975 | INIT_LIST_HEAD(&cgrp->css_sets); | 1135 | INIT_LIST_HEAD(&cgrp->css_sets); |
976 | INIT_LIST_HEAD(&cgrp->release_list); | 1136 | INIT_LIST_HEAD(&cgrp->release_list); |
977 | INIT_LIST_HEAD(&cgrp->pids_list); | 1137 | INIT_LIST_HEAD(&cgrp->pidlists); |
978 | init_rwsem(&cgrp->pids_mutex); | 1138 | mutex_init(&cgrp->pidlist_mutex); |
979 | } | 1139 | } |
1140 | |||
980 | static void init_cgroup_root(struct cgroupfs_root *root) | 1141 | static void init_cgroup_root(struct cgroupfs_root *root) |
981 | { | 1142 | { |
982 | struct cgroup *cgrp = &root->top_cgroup; | 1143 | struct cgroup *cgrp = &root->top_cgroup; |
@@ -988,33 +1149,106 @@ static void init_cgroup_root(struct cgroupfs_root *root) | |||
988 | init_cgroup_housekeeping(cgrp); | 1149 | init_cgroup_housekeeping(cgrp); |
989 | } | 1150 | } |
990 | 1151 | ||
1152 | static bool init_root_id(struct cgroupfs_root *root) | ||
1153 | { | ||
1154 | int ret = 0; | ||
1155 | |||
1156 | do { | ||
1157 | if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL)) | ||
1158 | return false; | ||
1159 | spin_lock(&hierarchy_id_lock); | ||
1160 | /* Try to allocate the next unused ID */ | ||
1161 | ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id, | ||
1162 | &root->hierarchy_id); | ||
1163 | if (ret == -ENOSPC) | ||
1164 | /* Try again starting from 0 */ | ||
1165 | ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id); | ||
1166 | if (!ret) { | ||
1167 | next_hierarchy_id = root->hierarchy_id + 1; | ||
1168 | } else if (ret != -EAGAIN) { | ||
1169 | /* Can only get here if the 31-bit IDR is full ... */ | ||
1170 | BUG_ON(ret); | ||
1171 | } | ||
1172 | spin_unlock(&hierarchy_id_lock); | ||
1173 | } while (ret); | ||
1174 | return true; | ||
1175 | } | ||
1176 | |||
991 | static int cgroup_test_super(struct super_block *sb, void *data) | 1177 | static int cgroup_test_super(struct super_block *sb, void *data) |
992 | { | 1178 | { |
993 | struct cgroupfs_root *new = data; | 1179 | struct cgroup_sb_opts *opts = data; |
994 | struct cgroupfs_root *root = sb->s_fs_info; | 1180 | struct cgroupfs_root *root = sb->s_fs_info; |
995 | 1181 | ||
996 | /* First check subsystems */ | 1182 | /* If we asked for a name then it must match */ |
997 | if (new->subsys_bits != root->subsys_bits) | 1183 | if (opts->name && strcmp(opts->name, root->name)) |
998 | return 0; | 1184 | return 0; |
999 | 1185 | ||
1000 | /* Next check flags */ | 1186 | /* |
1001 | if (new->flags != root->flags) | 1187 | * If we asked for subsystems (or explicitly for no |
1188 | * subsystems) then they must match | ||
1189 | */ | ||
1190 | if ((opts->subsys_bits || opts->none) | ||
1191 | && (opts->subsys_bits != root->subsys_bits)) | ||
1002 | return 0; | 1192 | return 0; |
1003 | 1193 | ||
1004 | return 1; | 1194 | return 1; |
1005 | } | 1195 | } |
1006 | 1196 | ||
1197 | static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) | ||
1198 | { | ||
1199 | struct cgroupfs_root *root; | ||
1200 | |||
1201 | if (!opts->subsys_bits && !opts->none) | ||
1202 | return NULL; | ||
1203 | |||
1204 | root = kzalloc(sizeof(*root), GFP_KERNEL); | ||
1205 | if (!root) | ||
1206 | return ERR_PTR(-ENOMEM); | ||
1207 | |||
1208 | if (!init_root_id(root)) { | ||
1209 | kfree(root); | ||
1210 | return ERR_PTR(-ENOMEM); | ||
1211 | } | ||
1212 | init_cgroup_root(root); | ||
1213 | |||
1214 | root->subsys_bits = opts->subsys_bits; | ||
1215 | root->flags = opts->flags; | ||
1216 | if (opts->release_agent) | ||
1217 | strcpy(root->release_agent_path, opts->release_agent); | ||
1218 | if (opts->name) | ||
1219 | strcpy(root->name, opts->name); | ||
1220 | return root; | ||
1221 | } | ||
1222 | |||
1223 | static void cgroup_drop_root(struct cgroupfs_root *root) | ||
1224 | { | ||
1225 | if (!root) | ||
1226 | return; | ||
1227 | |||
1228 | BUG_ON(!root->hierarchy_id); | ||
1229 | spin_lock(&hierarchy_id_lock); | ||
1230 | ida_remove(&hierarchy_ida, root->hierarchy_id); | ||
1231 | spin_unlock(&hierarchy_id_lock); | ||
1232 | kfree(root); | ||
1233 | } | ||
1234 | |||
1007 | static int cgroup_set_super(struct super_block *sb, void *data) | 1235 | static int cgroup_set_super(struct super_block *sb, void *data) |
1008 | { | 1236 | { |
1009 | int ret; | 1237 | int ret; |
1010 | struct cgroupfs_root *root = data; | 1238 | struct cgroup_sb_opts *opts = data; |
1239 | |||
1240 | /* If we don't have a new root, we can't set up a new sb */ | ||
1241 | if (!opts->new_root) | ||
1242 | return -EINVAL; | ||
1243 | |||
1244 | BUG_ON(!opts->subsys_bits && !opts->none); | ||
1011 | 1245 | ||
1012 | ret = set_anon_super(sb, NULL); | 1246 | ret = set_anon_super(sb, NULL); |
1013 | if (ret) | 1247 | if (ret) |
1014 | return ret; | 1248 | return ret; |
1015 | 1249 | ||
1016 | sb->s_fs_info = root; | 1250 | sb->s_fs_info = opts->new_root; |
1017 | root->sb = sb; | 1251 | opts->new_root->sb = sb; |
1018 | 1252 | ||
1019 | sb->s_blocksize = PAGE_CACHE_SIZE; | 1253 | sb->s_blocksize = PAGE_CACHE_SIZE; |
1020 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 1254 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
@@ -1051,48 +1285,43 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1051 | void *data, struct vfsmount *mnt) | 1285 | void *data, struct vfsmount *mnt) |
1052 | { | 1286 | { |
1053 | struct cgroup_sb_opts opts; | 1287 | struct cgroup_sb_opts opts; |
1288 | struct cgroupfs_root *root; | ||
1054 | int ret = 0; | 1289 | int ret = 0; |
1055 | struct super_block *sb; | 1290 | struct super_block *sb; |
1056 | struct cgroupfs_root *root; | 1291 | struct cgroupfs_root *new_root; |
1057 | struct list_head tmp_cg_links; | ||
1058 | 1292 | ||
1059 | /* First find the desired set of subsystems */ | 1293 | /* First find the desired set of subsystems */ |
1060 | ret = parse_cgroupfs_options(data, &opts); | 1294 | ret = parse_cgroupfs_options(data, &opts); |
1061 | if (ret) { | 1295 | if (ret) |
1062 | kfree(opts.release_agent); | 1296 | goto out_err; |
1063 | return ret; | ||
1064 | } | ||
1065 | |||
1066 | root = kzalloc(sizeof(*root), GFP_KERNEL); | ||
1067 | if (!root) { | ||
1068 | kfree(opts.release_agent); | ||
1069 | return -ENOMEM; | ||
1070 | } | ||
1071 | 1297 | ||
1072 | init_cgroup_root(root); | 1298 | /* |
1073 | root->subsys_bits = opts.subsys_bits; | 1299 | * Allocate a new cgroup root. We may not need it if we're |
1074 | root->flags = opts.flags; | 1300 | * reusing an existing hierarchy. |
1075 | if (opts.release_agent) { | 1301 | */ |
1076 | strcpy(root->release_agent_path, opts.release_agent); | 1302 | new_root = cgroup_root_from_opts(&opts); |
1077 | kfree(opts.release_agent); | 1303 | if (IS_ERR(new_root)) { |
1304 | ret = PTR_ERR(new_root); | ||
1305 | goto out_err; | ||
1078 | } | 1306 | } |
1307 | opts.new_root = new_root; | ||
1079 | 1308 | ||
1080 | sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root); | 1309 | /* Locate an existing or new sb for this hierarchy */ |
1081 | 1310 | sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts); | |
1082 | if (IS_ERR(sb)) { | 1311 | if (IS_ERR(sb)) { |
1083 | kfree(root); | 1312 | ret = PTR_ERR(sb); |
1084 | return PTR_ERR(sb); | 1313 | cgroup_drop_root(opts.new_root); |
1314 | goto out_err; | ||
1085 | } | 1315 | } |
1086 | 1316 | ||
1087 | if (sb->s_fs_info != root) { | 1317 | root = sb->s_fs_info; |
1088 | /* Reusing an existing superblock */ | 1318 | BUG_ON(!root); |
1089 | BUG_ON(sb->s_root == NULL); | 1319 | if (root == opts.new_root) { |
1090 | kfree(root); | 1320 | /* We used the new root structure, so this is a new hierarchy */ |
1091 | root = NULL; | 1321 | struct list_head tmp_cg_links; |
1092 | } else { | ||
1093 | /* New superblock */ | ||
1094 | struct cgroup *root_cgrp = &root->top_cgroup; | 1322 | struct cgroup *root_cgrp = &root->top_cgroup; |
1095 | struct inode *inode; | 1323 | struct inode *inode; |
1324 | struct cgroupfs_root *existing_root; | ||
1096 | int i; | 1325 | int i; |
1097 | 1326 | ||
1098 | BUG_ON(sb->s_root != NULL); | 1327 | BUG_ON(sb->s_root != NULL); |
@@ -1105,6 +1334,18 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1105 | mutex_lock(&inode->i_mutex); | 1334 | mutex_lock(&inode->i_mutex); |
1106 | mutex_lock(&cgroup_mutex); | 1335 | mutex_lock(&cgroup_mutex); |
1107 | 1336 | ||
1337 | if (strlen(root->name)) { | ||
1338 | /* Check for name clashes with existing mounts */ | ||
1339 | for_each_active_root(existing_root) { | ||
1340 | if (!strcmp(existing_root->name, root->name)) { | ||
1341 | ret = -EBUSY; | ||
1342 | mutex_unlock(&cgroup_mutex); | ||
1343 | mutex_unlock(&inode->i_mutex); | ||
1344 | goto drop_new_super; | ||
1345 | } | ||
1346 | } | ||
1347 | } | ||
1348 | |||
1108 | /* | 1349 | /* |
1109 | * We're accessing css_set_count without locking | 1350 | * We're accessing css_set_count without locking |
1110 | * css_set_lock here, but that's OK - it can only be | 1351 | * css_set_lock here, but that's OK - it can only be |
@@ -1123,7 +1364,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1123 | if (ret == -EBUSY) { | 1364 | if (ret == -EBUSY) { |
1124 | mutex_unlock(&cgroup_mutex); | 1365 | mutex_unlock(&cgroup_mutex); |
1125 | mutex_unlock(&inode->i_mutex); | 1366 | mutex_unlock(&inode->i_mutex); |
1126 | goto free_cg_links; | 1367 | free_cg_links(&tmp_cg_links); |
1368 | goto drop_new_super; | ||
1127 | } | 1369 | } |
1128 | 1370 | ||
1129 | /* EBUSY should be the only error here */ | 1371 | /* EBUSY should be the only error here */ |
@@ -1155,17 +1397,27 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1155 | BUG_ON(root->number_of_cgroups != 1); | 1397 | BUG_ON(root->number_of_cgroups != 1); |
1156 | 1398 | ||
1157 | cgroup_populate_dir(root_cgrp); | 1399 | cgroup_populate_dir(root_cgrp); |
1158 | mutex_unlock(&inode->i_mutex); | ||
1159 | mutex_unlock(&cgroup_mutex); | 1400 | mutex_unlock(&cgroup_mutex); |
1401 | mutex_unlock(&inode->i_mutex); | ||
1402 | } else { | ||
1403 | /* | ||
1404 | * We re-used an existing hierarchy - the new root (if | ||
1405 | * any) is not needed | ||
1406 | */ | ||
1407 | cgroup_drop_root(opts.new_root); | ||
1160 | } | 1408 | } |
1161 | 1409 | ||
1162 | simple_set_mnt(mnt, sb); | 1410 | simple_set_mnt(mnt, sb); |
1411 | kfree(opts.release_agent); | ||
1412 | kfree(opts.name); | ||
1163 | return 0; | 1413 | return 0; |
1164 | 1414 | ||
1165 | free_cg_links: | ||
1166 | free_cg_links(&tmp_cg_links); | ||
1167 | drop_new_super: | 1415 | drop_new_super: |
1168 | deactivate_locked_super(sb); | 1416 | deactivate_locked_super(sb); |
1417 | out_err: | ||
1418 | kfree(opts.release_agent); | ||
1419 | kfree(opts.name); | ||
1420 | |||
1169 | return ret; | 1421 | return ret; |
1170 | } | 1422 | } |
1171 | 1423 | ||
@@ -1211,7 +1463,7 @@ static void cgroup_kill_sb(struct super_block *sb) { | |||
1211 | mutex_unlock(&cgroup_mutex); | 1463 | mutex_unlock(&cgroup_mutex); |
1212 | 1464 | ||
1213 | kill_litter_super(sb); | 1465 | kill_litter_super(sb); |
1214 | kfree(root); | 1466 | cgroup_drop_root(root); |
1215 | } | 1467 | } |
1216 | 1468 | ||
1217 | static struct file_system_type cgroup_fs_type = { | 1469 | static struct file_system_type cgroup_fs_type = { |
@@ -1276,27 +1528,6 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) | |||
1276 | return 0; | 1528 | return 0; |
1277 | } | 1529 | } |
1278 | 1530 | ||
1279 | /* | ||
1280 | * Return the first subsystem attached to a cgroup's hierarchy, and | ||
1281 | * its subsystem id. | ||
1282 | */ | ||
1283 | |||
1284 | static void get_first_subsys(const struct cgroup *cgrp, | ||
1285 | struct cgroup_subsys_state **css, int *subsys_id) | ||
1286 | { | ||
1287 | const struct cgroupfs_root *root = cgrp->root; | ||
1288 | const struct cgroup_subsys *test_ss; | ||
1289 | BUG_ON(list_empty(&root->subsys_list)); | ||
1290 | test_ss = list_entry(root->subsys_list.next, | ||
1291 | struct cgroup_subsys, sibling); | ||
1292 | if (css) { | ||
1293 | *css = cgrp->subsys[test_ss->subsys_id]; | ||
1294 | BUG_ON(!*css); | ||
1295 | } | ||
1296 | if (subsys_id) | ||
1297 | *subsys_id = test_ss->subsys_id; | ||
1298 | } | ||
1299 | |||
1300 | /** | 1531 | /** |
1301 | * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' | 1532 | * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' |
1302 | * @cgrp: the cgroup the task is attaching to | 1533 | * @cgrp: the cgroup the task is attaching to |
@@ -1313,18 +1544,15 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1313 | struct css_set *cg; | 1544 | struct css_set *cg; |
1314 | struct css_set *newcg; | 1545 | struct css_set *newcg; |
1315 | struct cgroupfs_root *root = cgrp->root; | 1546 | struct cgroupfs_root *root = cgrp->root; |
1316 | int subsys_id; | ||
1317 | |||
1318 | get_first_subsys(cgrp, NULL, &subsys_id); | ||
1319 | 1547 | ||
1320 | /* Nothing to do if the task is already in that cgroup */ | 1548 | /* Nothing to do if the task is already in that cgroup */ |
1321 | oldcgrp = task_cgroup(tsk, subsys_id); | 1549 | oldcgrp = task_cgroup_from_root(tsk, root); |
1322 | if (cgrp == oldcgrp) | 1550 | if (cgrp == oldcgrp) |
1323 | return 0; | 1551 | return 0; |
1324 | 1552 | ||
1325 | for_each_subsys(root, ss) { | 1553 | for_each_subsys(root, ss) { |
1326 | if (ss->can_attach) { | 1554 | if (ss->can_attach) { |
1327 | retval = ss->can_attach(ss, cgrp, tsk); | 1555 | retval = ss->can_attach(ss, cgrp, tsk, false); |
1328 | if (retval) | 1556 | if (retval) |
1329 | return retval; | 1557 | return retval; |
1330 | } | 1558 | } |
@@ -1362,7 +1590,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1362 | 1590 | ||
1363 | for_each_subsys(root, ss) { | 1591 | for_each_subsys(root, ss) { |
1364 | if (ss->attach) | 1592 | if (ss->attach) |
1365 | ss->attach(ss, cgrp, oldcgrp, tsk); | 1593 | ss->attach(ss, cgrp, oldcgrp, tsk, false); |
1366 | } | 1594 | } |
1367 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); | 1595 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); |
1368 | synchronize_rcu(); | 1596 | synchronize_rcu(); |
@@ -1423,15 +1651,6 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid) | |||
1423 | return ret; | 1651 | return ret; |
1424 | } | 1652 | } |
1425 | 1653 | ||
1426 | /* The various types of files and directories in a cgroup file system */ | ||
1427 | enum cgroup_filetype { | ||
1428 | FILE_ROOT, | ||
1429 | FILE_DIR, | ||
1430 | FILE_TASKLIST, | ||
1431 | FILE_NOTIFY_ON_RELEASE, | ||
1432 | FILE_RELEASE_AGENT, | ||
1433 | }; | ||
1434 | |||
1435 | /** | 1654 | /** |
1436 | * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. | 1655 | * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. |
1437 | * @cgrp: the cgroup to be checked for liveness | 1656 | * @cgrp: the cgroup to be checked for liveness |
@@ -1711,7 +1930,7 @@ static struct file_operations cgroup_file_operations = { | |||
1711 | .release = cgroup_file_release, | 1930 | .release = cgroup_file_release, |
1712 | }; | 1931 | }; |
1713 | 1932 | ||
1714 | static struct inode_operations cgroup_dir_inode_operations = { | 1933 | static const struct inode_operations cgroup_dir_inode_operations = { |
1715 | .lookup = simple_lookup, | 1934 | .lookup = simple_lookup, |
1716 | .mkdir = cgroup_mkdir, | 1935 | .mkdir = cgroup_mkdir, |
1717 | .rmdir = cgroup_rmdir, | 1936 | .rmdir = cgroup_rmdir, |
@@ -1876,7 +2095,7 @@ int cgroup_task_count(const struct cgroup *cgrp) | |||
1876 | * the start of a css_set | 2095 | * the start of a css_set |
1877 | */ | 2096 | */ |
1878 | static void cgroup_advance_iter(struct cgroup *cgrp, | 2097 | static void cgroup_advance_iter(struct cgroup *cgrp, |
1879 | struct cgroup_iter *it) | 2098 | struct cgroup_iter *it) |
1880 | { | 2099 | { |
1881 | struct list_head *l = it->cg_link; | 2100 | struct list_head *l = it->cg_link; |
1882 | struct cg_cgroup_link *link; | 2101 | struct cg_cgroup_link *link; |
@@ -2129,7 +2348,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan) | |||
2129 | } | 2348 | } |
2130 | 2349 | ||
2131 | /* | 2350 | /* |
2132 | * Stuff for reading the 'tasks' file. | 2351 | * Stuff for reading the 'tasks'/'procs' files. |
2133 | * | 2352 | * |
2134 | * Reading this file can return large amounts of data if a cgroup has | 2353 | * Reading this file can return large amounts of data if a cgroup has |
2135 | * *lots* of attached tasks. So it may need several calls to read(), | 2354 | * *lots* of attached tasks. So it may need several calls to read(), |
@@ -2139,27 +2358,196 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan) | |||
2139 | */ | 2358 | */ |
2140 | 2359 | ||
2141 | /* | 2360 | /* |
2142 | * Load into 'pidarray' up to 'npids' of the tasks using cgroup | 2361 | * The following two functions "fix" the issue where there are more pids |
2143 | * 'cgrp'. Return actual number of pids loaded. No need to | 2362 | * than kmalloc will give memory for; in such cases, we use vmalloc/vfree. |
2144 | * task_lock(p) when reading out p->cgroup, since we're in an RCU | 2363 | * TODO: replace with a kernel-wide solution to this problem |
2145 | * read section, so the css_set can't go away, and is | 2364 | */ |
2146 | * immutable after creation. | 2365 | #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2)) |
2366 | static void *pidlist_allocate(int count) | ||
2367 | { | ||
2368 | if (PIDLIST_TOO_LARGE(count)) | ||
2369 | return vmalloc(count * sizeof(pid_t)); | ||
2370 | else | ||
2371 | return kmalloc(count * sizeof(pid_t), GFP_KERNEL); | ||
2372 | } | ||
2373 | static void pidlist_free(void *p) | ||
2374 | { | ||
2375 | if (is_vmalloc_addr(p)) | ||
2376 | vfree(p); | ||
2377 | else | ||
2378 | kfree(p); | ||
2379 | } | ||
2380 | static void *pidlist_resize(void *p, int newcount) | ||
2381 | { | ||
2382 | void *newlist; | ||
2383 | /* note: if new alloc fails, old p will still be valid either way */ | ||
2384 | if (is_vmalloc_addr(p)) { | ||
2385 | newlist = vmalloc(newcount * sizeof(pid_t)); | ||
2386 | if (!newlist) | ||
2387 | return NULL; | ||
2388 | memcpy(newlist, p, newcount * sizeof(pid_t)); | ||
2389 | vfree(p); | ||
2390 | } else { | ||
2391 | newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL); | ||
2392 | } | ||
2393 | return newlist; | ||
2394 | } | ||
2395 | |||
2396 | /* | ||
2397 | * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries | ||
2398 | * If the new stripped list is sufficiently smaller and there's enough memory | ||
2399 | * to allocate a new buffer, will let go of the unneeded memory. Returns the | ||
2400 | * number of unique elements. | ||
2401 | */ | ||
2402 | /* is the size difference enough that we should re-allocate the array? */ | ||
2403 | #define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new)) | ||
2404 | static int pidlist_uniq(pid_t **p, int length) | ||
2405 | { | ||
2406 | int src, dest = 1; | ||
2407 | pid_t *list = *p; | ||
2408 | pid_t *newlist; | ||
2409 | |||
2410 | /* | ||
2411 | * we presume the 0th element is unique, so i starts at 1. trivial | ||
2412 | * edge cases first; no work needs to be done for either | ||
2413 | */ | ||
2414 | if (length == 0 || length == 1) | ||
2415 | return length; | ||
2416 | /* src and dest walk down the list; dest counts unique elements */ | ||
2417 | for (src = 1; src < length; src++) { | ||
2418 | /* find next unique element */ | ||
2419 | while (list[src] == list[src-1]) { | ||
2420 | src++; | ||
2421 | if (src == length) | ||
2422 | goto after; | ||
2423 | } | ||
2424 | /* dest always points to where the next unique element goes */ | ||
2425 | list[dest] = list[src]; | ||
2426 | dest++; | ||
2427 | } | ||
2428 | after: | ||
2429 | /* | ||
2430 | * if the length difference is large enough, we want to allocate a | ||
2431 | * smaller buffer to save memory. if this fails due to out of memory, | ||
2432 | * we'll just stay with what we've got. | ||
2433 | */ | ||
2434 | if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) { | ||
2435 | newlist = pidlist_resize(list, dest); | ||
2436 | if (newlist) | ||
2437 | *p = newlist; | ||
2438 | } | ||
2439 | return dest; | ||
2440 | } | ||
2441 | |||
2442 | static int cmppid(const void *a, const void *b) | ||
2443 | { | ||
2444 | return *(pid_t *)a - *(pid_t *)b; | ||
2445 | } | ||
2446 | |||
2447 | /* | ||
2448 | * find the appropriate pidlist for our purpose (given procs vs tasks) | ||
2449 | * returns with the lock on that pidlist already held, and takes care | ||
2450 | * of the use count, or returns NULL with no locks held if we're out of | ||
2451 | * memory. | ||
2147 | */ | 2452 | */ |
2148 | static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp) | 2453 | static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, |
2454 | enum cgroup_filetype type) | ||
2149 | { | 2455 | { |
2150 | int n = 0, pid; | 2456 | struct cgroup_pidlist *l; |
2457 | /* don't need task_nsproxy() if we're looking at ourself */ | ||
2458 | struct pid_namespace *ns = get_pid_ns(current->nsproxy->pid_ns); | ||
2459 | /* | ||
2460 | * We can't drop the pidlist_mutex before taking the l->mutex in case | ||
2461 | * the last ref-holder is trying to remove l from the list at the same | ||
2462 | * time. Holding the pidlist_mutex precludes somebody taking whichever | ||
2463 | * list we find out from under us - compare release_pid_array(). | ||
2464 | */ | ||
2465 | mutex_lock(&cgrp->pidlist_mutex); | ||
2466 | list_for_each_entry(l, &cgrp->pidlists, links) { | ||
2467 | if (l->key.type == type && l->key.ns == ns) { | ||
2468 | /* found a matching list - drop the extra refcount */ | ||
2469 | put_pid_ns(ns); | ||
2470 | /* make sure l doesn't vanish out from under us */ | ||
2471 | down_write(&l->mutex); | ||
2472 | mutex_unlock(&cgrp->pidlist_mutex); | ||
2473 | l->use_count++; | ||
2474 | return l; | ||
2475 | } | ||
2476 | } | ||
2477 | /* entry not found; create a new one */ | ||
2478 | l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); | ||
2479 | if (!l) { | ||
2480 | mutex_unlock(&cgrp->pidlist_mutex); | ||
2481 | put_pid_ns(ns); | ||
2482 | return l; | ||
2483 | } | ||
2484 | init_rwsem(&l->mutex); | ||
2485 | down_write(&l->mutex); | ||
2486 | l->key.type = type; | ||
2487 | l->key.ns = ns; | ||
2488 | l->use_count = 0; /* don't increment here */ | ||
2489 | l->list = NULL; | ||
2490 | l->owner = cgrp; | ||
2491 | list_add(&l->links, &cgrp->pidlists); | ||
2492 | mutex_unlock(&cgrp->pidlist_mutex); | ||
2493 | return l; | ||
2494 | } | ||
2495 | |||
2496 | /* | ||
2497 | * Load a cgroup's pidarray with either procs' tgids or tasks' pids | ||
2498 | */ | ||
2499 | static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, | ||
2500 | struct cgroup_pidlist **lp) | ||
2501 | { | ||
2502 | pid_t *array; | ||
2503 | int length; | ||
2504 | int pid, n = 0; /* used for populating the array */ | ||
2151 | struct cgroup_iter it; | 2505 | struct cgroup_iter it; |
2152 | struct task_struct *tsk; | 2506 | struct task_struct *tsk; |
2507 | struct cgroup_pidlist *l; | ||
2508 | |||
2509 | /* | ||
2510 | * If cgroup gets more users after we read count, we won't have | ||
2511 | * enough space - tough. This race is indistinguishable to the | ||
2512 | * caller from the case that the additional cgroup users didn't | ||
2513 | * show up until sometime later on. | ||
2514 | */ | ||
2515 | length = cgroup_task_count(cgrp); | ||
2516 | array = pidlist_allocate(length); | ||
2517 | if (!array) | ||
2518 | return -ENOMEM; | ||
2519 | /* now, populate the array */ | ||
2153 | cgroup_iter_start(cgrp, &it); | 2520 | cgroup_iter_start(cgrp, &it); |
2154 | while ((tsk = cgroup_iter_next(cgrp, &it))) { | 2521 | while ((tsk = cgroup_iter_next(cgrp, &it))) { |
2155 | if (unlikely(n == npids)) | 2522 | if (unlikely(n == length)) |
2156 | break; | 2523 | break; |
2157 | pid = task_pid_vnr(tsk); | 2524 | /* get tgid or pid for procs or tasks file respectively */ |
2158 | if (pid > 0) | 2525 | if (type == CGROUP_FILE_PROCS) |
2159 | pidarray[n++] = pid; | 2526 | pid = task_tgid_vnr(tsk); |
2527 | else | ||
2528 | pid = task_pid_vnr(tsk); | ||
2529 | if (pid > 0) /* make sure to only use valid results */ | ||
2530 | array[n++] = pid; | ||
2160 | } | 2531 | } |
2161 | cgroup_iter_end(cgrp, &it); | 2532 | cgroup_iter_end(cgrp, &it); |
2162 | return n; | 2533 | length = n; |
2534 | /* now sort & (if procs) strip out duplicates */ | ||
2535 | sort(array, length, sizeof(pid_t), cmppid, NULL); | ||
2536 | if (type == CGROUP_FILE_PROCS) | ||
2537 | length = pidlist_uniq(&array, length); | ||
2538 | l = cgroup_pidlist_find(cgrp, type); | ||
2539 | if (!l) { | ||
2540 | pidlist_free(array); | ||
2541 | return -ENOMEM; | ||
2542 | } | ||
2543 | /* store array, freeing old if necessary - lock already held */ | ||
2544 | pidlist_free(l->list); | ||
2545 | l->list = array; | ||
2546 | l->length = length; | ||
2547 | l->use_count++; | ||
2548 | up_write(&l->mutex); | ||
2549 | *lp = l; | ||
2550 | return 0; | ||
2163 | } | 2551 | } |
2164 | 2552 | ||
2165 | /** | 2553 | /** |
@@ -2216,37 +2604,14 @@ err: | |||
2216 | return ret; | 2604 | return ret; |
2217 | } | 2605 | } |
2218 | 2606 | ||
2219 | /* | ||
2220 | * Cache pids for all threads in the same pid namespace that are | ||
2221 | * opening the same "tasks" file. | ||
2222 | */ | ||
2223 | struct cgroup_pids { | ||
2224 | /* The node in cgrp->pids_list */ | ||
2225 | struct list_head list; | ||
2226 | /* The cgroup those pids belong to */ | ||
2227 | struct cgroup *cgrp; | ||
2228 | /* The namepsace those pids belong to */ | ||
2229 | struct pid_namespace *ns; | ||
2230 | /* Array of process ids in the cgroup */ | ||
2231 | pid_t *tasks_pids; | ||
2232 | /* How many files are using the this tasks_pids array */ | ||
2233 | int use_count; | ||
2234 | /* Length of the current tasks_pids array */ | ||
2235 | int length; | ||
2236 | }; | ||
2237 | |||
2238 | static int cmppid(const void *a, const void *b) | ||
2239 | { | ||
2240 | return *(pid_t *)a - *(pid_t *)b; | ||
2241 | } | ||
2242 | 2607 | ||
2243 | /* | 2608 | /* |
2244 | * seq_file methods for the "tasks" file. The seq_file position is the | 2609 | * seq_file methods for the tasks/procs files. The seq_file position is the |
2245 | * next pid to display; the seq_file iterator is a pointer to the pid | 2610 | * next pid to display; the seq_file iterator is a pointer to the pid |
2246 | * in the cgroup->tasks_pids array. | 2611 | * in the cgroup->l->list array. |
2247 | */ | 2612 | */ |
2248 | 2613 | ||
2249 | static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos) | 2614 | static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) |
2250 | { | 2615 | { |
2251 | /* | 2616 | /* |
2252 | * Initially we receive a position value that corresponds to | 2617 | * Initially we receive a position value that corresponds to |
@@ -2254,48 +2619,45 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos) | |||
2254 | * after a seek to the start). Use a binary-search to find the | 2619 | * after a seek to the start). Use a binary-search to find the |
2255 | * next pid to display, if any | 2620 | * next pid to display, if any |
2256 | */ | 2621 | */ |
2257 | struct cgroup_pids *cp = s->private; | 2622 | struct cgroup_pidlist *l = s->private; |
2258 | struct cgroup *cgrp = cp->cgrp; | ||
2259 | int index = 0, pid = *pos; | 2623 | int index = 0, pid = *pos; |
2260 | int *iter; | 2624 | int *iter; |
2261 | 2625 | ||
2262 | down_read(&cgrp->pids_mutex); | 2626 | down_read(&l->mutex); |
2263 | if (pid) { | 2627 | if (pid) { |
2264 | int end = cp->length; | 2628 | int end = l->length; |
2265 | 2629 | ||
2266 | while (index < end) { | 2630 | while (index < end) { |
2267 | int mid = (index + end) / 2; | 2631 | int mid = (index + end) / 2; |
2268 | if (cp->tasks_pids[mid] == pid) { | 2632 | if (l->list[mid] == pid) { |
2269 | index = mid; | 2633 | index = mid; |
2270 | break; | 2634 | break; |
2271 | } else if (cp->tasks_pids[mid] <= pid) | 2635 | } else if (l->list[mid] <= pid) |
2272 | index = mid + 1; | 2636 | index = mid + 1; |
2273 | else | 2637 | else |
2274 | end = mid; | 2638 | end = mid; |
2275 | } | 2639 | } |
2276 | } | 2640 | } |
2277 | /* If we're off the end of the array, we're done */ | 2641 | /* If we're off the end of the array, we're done */ |
2278 | if (index >= cp->length) | 2642 | if (index >= l->length) |
2279 | return NULL; | 2643 | return NULL; |
2280 | /* Update the abstract position to be the actual pid that we found */ | 2644 | /* Update the abstract position to be the actual pid that we found */ |
2281 | iter = cp->tasks_pids + index; | 2645 | iter = l->list + index; |
2282 | *pos = *iter; | 2646 | *pos = *iter; |
2283 | return iter; | 2647 | return iter; |
2284 | } | 2648 | } |
2285 | 2649 | ||
2286 | static void cgroup_tasks_stop(struct seq_file *s, void *v) | 2650 | static void cgroup_pidlist_stop(struct seq_file *s, void *v) |
2287 | { | 2651 | { |
2288 | struct cgroup_pids *cp = s->private; | 2652 | struct cgroup_pidlist *l = s->private; |
2289 | struct cgroup *cgrp = cp->cgrp; | 2653 | up_read(&l->mutex); |
2290 | up_read(&cgrp->pids_mutex); | ||
2291 | } | 2654 | } |
2292 | 2655 | ||
2293 | static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) | 2656 | static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) |
2294 | { | 2657 | { |
2295 | struct cgroup_pids *cp = s->private; | 2658 | struct cgroup_pidlist *l = s->private; |
2296 | int *p = v; | 2659 | pid_t *p = v; |
2297 | int *end = cp->tasks_pids + cp->length; | 2660 | pid_t *end = l->list + l->length; |
2298 | |||
2299 | /* | 2661 | /* |
2300 | * Advance to the next pid in the array. If this goes off the | 2662 | * Advance to the next pid in the array. If this goes off the |
2301 | * end, we're done | 2663 | * end, we're done |
@@ -2309,124 +2671,107 @@ static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) | |||
2309 | } | 2671 | } |
2310 | } | 2672 | } |
2311 | 2673 | ||
2312 | static int cgroup_tasks_show(struct seq_file *s, void *v) | 2674 | static int cgroup_pidlist_show(struct seq_file *s, void *v) |
2313 | { | 2675 | { |
2314 | return seq_printf(s, "%d\n", *(int *)v); | 2676 | return seq_printf(s, "%d\n", *(int *)v); |
2315 | } | 2677 | } |
2316 | 2678 | ||
2317 | static struct seq_operations cgroup_tasks_seq_operations = { | 2679 | /* |
2318 | .start = cgroup_tasks_start, | 2680 | * seq_operations functions for iterating on pidlists through seq_file - |
2319 | .stop = cgroup_tasks_stop, | 2681 | * independent of whether it's tasks or procs |
2320 | .next = cgroup_tasks_next, | 2682 | */ |
2321 | .show = cgroup_tasks_show, | 2683 | static const struct seq_operations cgroup_pidlist_seq_operations = { |
2684 | .start = cgroup_pidlist_start, | ||
2685 | .stop = cgroup_pidlist_stop, | ||
2686 | .next = cgroup_pidlist_next, | ||
2687 | .show = cgroup_pidlist_show, | ||
2322 | }; | 2688 | }; |
2323 | 2689 | ||
2324 | static void release_cgroup_pid_array(struct cgroup_pids *cp) | 2690 | static void cgroup_release_pid_array(struct cgroup_pidlist *l) |
2325 | { | 2691 | { |
2326 | struct cgroup *cgrp = cp->cgrp; | 2692 | /* |
2327 | 2693 | * the case where we're the last user of this particular pidlist will | |
2328 | down_write(&cgrp->pids_mutex); | 2694 | * have us remove it from the cgroup's list, which entails taking the |
2329 | BUG_ON(!cp->use_count); | 2695 | * mutex. since in pidlist_find the pidlist->lock depends on cgroup-> |
2330 | if (!--cp->use_count) { | 2696 | * pidlist_mutex, we have to take pidlist_mutex first. |
2331 | list_del(&cp->list); | 2697 | */ |
2332 | put_pid_ns(cp->ns); | 2698 | mutex_lock(&l->owner->pidlist_mutex); |
2333 | kfree(cp->tasks_pids); | 2699 | down_write(&l->mutex); |
2334 | kfree(cp); | 2700 | BUG_ON(!l->use_count); |
2701 | if (!--l->use_count) { | ||
2702 | /* we're the last user if refcount is 0; remove and free */ | ||
2703 | list_del(&l->links); | ||
2704 | mutex_unlock(&l->owner->pidlist_mutex); | ||
2705 | pidlist_free(l->list); | ||
2706 | put_pid_ns(l->key.ns); | ||
2707 | up_write(&l->mutex); | ||
2708 | kfree(l); | ||
2709 | return; | ||
2335 | } | 2710 | } |
2336 | up_write(&cgrp->pids_mutex); | 2711 | mutex_unlock(&l->owner->pidlist_mutex); |
2712 | up_write(&l->mutex); | ||
2337 | } | 2713 | } |
2338 | 2714 | ||
2339 | static int cgroup_tasks_release(struct inode *inode, struct file *file) | 2715 | static int cgroup_pidlist_release(struct inode *inode, struct file *file) |
2340 | { | 2716 | { |
2341 | struct seq_file *seq; | 2717 | struct cgroup_pidlist *l; |
2342 | struct cgroup_pids *cp; | ||
2343 | |||
2344 | if (!(file->f_mode & FMODE_READ)) | 2718 | if (!(file->f_mode & FMODE_READ)) |
2345 | return 0; | 2719 | return 0; |
2346 | 2720 | /* | |
2347 | seq = file->private_data; | 2721 | * the seq_file will only be initialized if the file was opened for |
2348 | cp = seq->private; | 2722 | * reading; hence we check if it's not null only in that case. |
2349 | 2723 | */ | |
2350 | release_cgroup_pid_array(cp); | 2724 | l = ((struct seq_file *)file->private_data)->private; |
2725 | cgroup_release_pid_array(l); | ||
2351 | return seq_release(inode, file); | 2726 | return seq_release(inode, file); |
2352 | } | 2727 | } |
2353 | 2728 | ||
2354 | static struct file_operations cgroup_tasks_operations = { | 2729 | static const struct file_operations cgroup_pidlist_operations = { |
2355 | .read = seq_read, | 2730 | .read = seq_read, |
2356 | .llseek = seq_lseek, | 2731 | .llseek = seq_lseek, |
2357 | .write = cgroup_file_write, | 2732 | .write = cgroup_file_write, |
2358 | .release = cgroup_tasks_release, | 2733 | .release = cgroup_pidlist_release, |
2359 | }; | 2734 | }; |
2360 | 2735 | ||
2361 | /* | 2736 | /* |
2362 | * Handle an open on 'tasks' file. Prepare an array containing the | 2737 | * The following functions handle opens on a file that displays a pidlist |
2363 | * process id's of tasks currently attached to the cgroup being opened. | 2738 | * (tasks or procs). Prepare an array of the process/thread IDs of whoever's |
2739 | * in the cgroup. | ||
2364 | */ | 2740 | */ |
2365 | 2741 | /* helper function for the two below it */ | |
2366 | static int cgroup_tasks_open(struct inode *unused, struct file *file) | 2742 | static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type) |
2367 | { | 2743 | { |
2368 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); | 2744 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); |
2369 | struct pid_namespace *ns = current->nsproxy->pid_ns; | 2745 | struct cgroup_pidlist *l; |
2370 | struct cgroup_pids *cp; | ||
2371 | pid_t *pidarray; | ||
2372 | int npids; | ||
2373 | int retval; | 2746 | int retval; |
2374 | 2747 | ||
2375 | /* Nothing to do for write-only files */ | 2748 | /* Nothing to do for write-only files */ |
2376 | if (!(file->f_mode & FMODE_READ)) | 2749 | if (!(file->f_mode & FMODE_READ)) |
2377 | return 0; | 2750 | return 0; |
2378 | 2751 | ||
2379 | /* | 2752 | /* have the array populated */ |
2380 | * If cgroup gets more users after we read count, we won't have | 2753 | retval = pidlist_array_load(cgrp, type, &l); |
2381 | * enough space - tough. This race is indistinguishable to the | 2754 | if (retval) |
2382 | * caller from the case that the additional cgroup users didn't | 2755 | return retval; |
2383 | * show up until sometime later on. | 2756 | /* configure file information */ |
2384 | */ | 2757 | file->f_op = &cgroup_pidlist_operations; |
2385 | npids = cgroup_task_count(cgrp); | ||
2386 | pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); | ||
2387 | if (!pidarray) | ||
2388 | return -ENOMEM; | ||
2389 | npids = pid_array_load(pidarray, npids, cgrp); | ||
2390 | sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); | ||
2391 | |||
2392 | /* | ||
2393 | * Store the array in the cgroup, freeing the old | ||
2394 | * array if necessary | ||
2395 | */ | ||
2396 | down_write(&cgrp->pids_mutex); | ||
2397 | |||
2398 | list_for_each_entry(cp, &cgrp->pids_list, list) { | ||
2399 | if (ns == cp->ns) | ||
2400 | goto found; | ||
2401 | } | ||
2402 | |||
2403 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | ||
2404 | if (!cp) { | ||
2405 | up_write(&cgrp->pids_mutex); | ||
2406 | kfree(pidarray); | ||
2407 | return -ENOMEM; | ||
2408 | } | ||
2409 | cp->cgrp = cgrp; | ||
2410 | cp->ns = ns; | ||
2411 | get_pid_ns(ns); | ||
2412 | list_add(&cp->list, &cgrp->pids_list); | ||
2413 | found: | ||
2414 | kfree(cp->tasks_pids); | ||
2415 | cp->tasks_pids = pidarray; | ||
2416 | cp->length = npids; | ||
2417 | cp->use_count++; | ||
2418 | up_write(&cgrp->pids_mutex); | ||
2419 | |||
2420 | file->f_op = &cgroup_tasks_operations; | ||
2421 | 2758 | ||
2422 | retval = seq_open(file, &cgroup_tasks_seq_operations); | 2759 | retval = seq_open(file, &cgroup_pidlist_seq_operations); |
2423 | if (retval) { | 2760 | if (retval) { |
2424 | release_cgroup_pid_array(cp); | 2761 | cgroup_release_pid_array(l); |
2425 | return retval; | 2762 | return retval; |
2426 | } | 2763 | } |
2427 | ((struct seq_file *)file->private_data)->private = cp; | 2764 | ((struct seq_file *)file->private_data)->private = l; |
2428 | return 0; | 2765 | return 0; |
2429 | } | 2766 | } |
2767 | static int cgroup_tasks_open(struct inode *unused, struct file *file) | ||
2768 | { | ||
2769 | return cgroup_pidlist_open(file, CGROUP_FILE_TASKS); | ||
2770 | } | ||
2771 | static int cgroup_procs_open(struct inode *unused, struct file *file) | ||
2772 | { | ||
2773 | return cgroup_pidlist_open(file, CGROUP_FILE_PROCS); | ||
2774 | } | ||
2430 | 2775 | ||
2431 | static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, | 2776 | static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, |
2432 | struct cftype *cft) | 2777 | struct cftype *cft) |
@@ -2449,21 +2794,27 @@ static int cgroup_write_notify_on_release(struct cgroup *cgrp, | |||
2449 | /* | 2794 | /* |
2450 | * for the common functions, 'private' gives the type of file | 2795 | * for the common functions, 'private' gives the type of file |
2451 | */ | 2796 | */ |
2797 | /* for hysterical raisins, we can't put this on the older files */ | ||
2798 | #define CGROUP_FILE_GENERIC_PREFIX "cgroup." | ||
2452 | static struct cftype files[] = { | 2799 | static struct cftype files[] = { |
2453 | { | 2800 | { |
2454 | .name = "tasks", | 2801 | .name = "tasks", |
2455 | .open = cgroup_tasks_open, | 2802 | .open = cgroup_tasks_open, |
2456 | .write_u64 = cgroup_tasks_write, | 2803 | .write_u64 = cgroup_tasks_write, |
2457 | .release = cgroup_tasks_release, | 2804 | .release = cgroup_pidlist_release, |
2458 | .private = FILE_TASKLIST, | ||
2459 | .mode = S_IRUGO | S_IWUSR, | 2805 | .mode = S_IRUGO | S_IWUSR, |
2460 | }, | 2806 | }, |
2461 | 2807 | { | |
2808 | .name = CGROUP_FILE_GENERIC_PREFIX "procs", | ||
2809 | .open = cgroup_procs_open, | ||
2810 | /* .write_u64 = cgroup_procs_write, TODO */ | ||
2811 | .release = cgroup_pidlist_release, | ||
2812 | .mode = S_IRUGO, | ||
2813 | }, | ||
2462 | { | 2814 | { |
2463 | .name = "notify_on_release", | 2815 | .name = "notify_on_release", |
2464 | .read_u64 = cgroup_read_notify_on_release, | 2816 | .read_u64 = cgroup_read_notify_on_release, |
2465 | .write_u64 = cgroup_write_notify_on_release, | 2817 | .write_u64 = cgroup_write_notify_on_release, |
2466 | .private = FILE_NOTIFY_ON_RELEASE, | ||
2467 | }, | 2818 | }, |
2468 | }; | 2819 | }; |
2469 | 2820 | ||
@@ -2472,7 +2823,6 @@ static struct cftype cft_release_agent = { | |||
2472 | .read_seq_string = cgroup_release_agent_show, | 2823 | .read_seq_string = cgroup_release_agent_show, |
2473 | .write_string = cgroup_release_agent_write, | 2824 | .write_string = cgroup_release_agent_write, |
2474 | .max_write_len = PATH_MAX, | 2825 | .max_write_len = PATH_MAX, |
2475 | .private = FILE_RELEASE_AGENT, | ||
2476 | }; | 2826 | }; |
2477 | 2827 | ||
2478 | static int cgroup_populate_dir(struct cgroup *cgrp) | 2828 | static int cgroup_populate_dir(struct cgroup *cgrp) |
@@ -2879,6 +3229,7 @@ int __init cgroup_init_early(void) | |||
2879 | init_task.cgroups = &init_css_set; | 3229 | init_task.cgroups = &init_css_set; |
2880 | 3230 | ||
2881 | init_css_set_link.cg = &init_css_set; | 3231 | init_css_set_link.cg = &init_css_set; |
3232 | init_css_set_link.cgrp = dummytop; | ||
2882 | list_add(&init_css_set_link.cgrp_link_list, | 3233 | list_add(&init_css_set_link.cgrp_link_list, |
2883 | &rootnode.top_cgroup.css_sets); | 3234 | &rootnode.top_cgroup.css_sets); |
2884 | list_add(&init_css_set_link.cg_link_list, | 3235 | list_add(&init_css_set_link.cg_link_list, |
@@ -2933,7 +3284,7 @@ int __init cgroup_init(void) | |||
2933 | /* Add init_css_set to the hash table */ | 3284 | /* Add init_css_set to the hash table */ |
2934 | hhead = css_set_hash(init_css_set.subsys); | 3285 | hhead = css_set_hash(init_css_set.subsys); |
2935 | hlist_add_head(&init_css_set.hlist, hhead); | 3286 | hlist_add_head(&init_css_set.hlist, hhead); |
2936 | 3287 | BUG_ON(!init_root_id(&rootnode)); | |
2937 | err = register_filesystem(&cgroup_fs_type); | 3288 | err = register_filesystem(&cgroup_fs_type); |
2938 | if (err < 0) | 3289 | if (err < 0) |
2939 | goto out; | 3290 | goto out; |
@@ -2986,15 +3337,16 @@ static int proc_cgroup_show(struct seq_file *m, void *v) | |||
2986 | for_each_active_root(root) { | 3337 | for_each_active_root(root) { |
2987 | struct cgroup_subsys *ss; | 3338 | struct cgroup_subsys *ss; |
2988 | struct cgroup *cgrp; | 3339 | struct cgroup *cgrp; |
2989 | int subsys_id; | ||
2990 | int count = 0; | 3340 | int count = 0; |
2991 | 3341 | ||
2992 | seq_printf(m, "%lu:", root->subsys_bits); | 3342 | seq_printf(m, "%d:", root->hierarchy_id); |
2993 | for_each_subsys(root, ss) | 3343 | for_each_subsys(root, ss) |
2994 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); | 3344 | seq_printf(m, "%s%s", count++ ? "," : "", ss->name); |
3345 | if (strlen(root->name)) | ||
3346 | seq_printf(m, "%sname=%s", count ? "," : "", | ||
3347 | root->name); | ||
2995 | seq_putc(m, ':'); | 3348 | seq_putc(m, ':'); |
2996 | get_first_subsys(&root->top_cgroup, NULL, &subsys_id); | 3349 | cgrp = task_cgroup_from_root(tsk, root); |
2997 | cgrp = task_cgroup(tsk, subsys_id); | ||
2998 | retval = cgroup_path(cgrp, buf, PAGE_SIZE); | 3350 | retval = cgroup_path(cgrp, buf, PAGE_SIZE); |
2999 | if (retval < 0) | 3351 | if (retval < 0) |
3000 | goto out_unlock; | 3352 | goto out_unlock; |
@@ -3033,8 +3385,8 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v) | |||
3033 | mutex_lock(&cgroup_mutex); | 3385 | mutex_lock(&cgroup_mutex); |
3034 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 3386 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
3035 | struct cgroup_subsys *ss = subsys[i]; | 3387 | struct cgroup_subsys *ss = subsys[i]; |
3036 | seq_printf(m, "%s\t%lu\t%d\t%d\n", | 3388 | seq_printf(m, "%s\t%d\t%d\t%d\n", |
3037 | ss->name, ss->root->subsys_bits, | 3389 | ss->name, ss->root->hierarchy_id, |
3038 | ss->root->number_of_cgroups, !ss->disabled); | 3390 | ss->root->number_of_cgroups, !ss->disabled); |
3039 | } | 3391 | } |
3040 | mutex_unlock(&cgroup_mutex); | 3392 | mutex_unlock(&cgroup_mutex); |
@@ -3320,13 +3672,11 @@ int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task) | |||
3320 | { | 3672 | { |
3321 | int ret; | 3673 | int ret; |
3322 | struct cgroup *target; | 3674 | struct cgroup *target; |
3323 | int subsys_id; | ||
3324 | 3675 | ||
3325 | if (cgrp == dummytop) | 3676 | if (cgrp == dummytop) |
3326 | return 1; | 3677 | return 1; |
3327 | 3678 | ||
3328 | get_first_subsys(cgrp, NULL, &subsys_id); | 3679 | target = task_cgroup_from_root(task, cgrp->root); |
3329 | target = task_cgroup(task, subsys_id); | ||
3330 | while (cgrp != target && cgrp!= cgrp->top_cgroup) | 3680 | while (cgrp != target && cgrp!= cgrp->top_cgroup) |
3331 | cgrp = cgrp->parent; | 3681 | cgrp = cgrp->parent; |
3332 | ret = (cgrp == target); | 3682 | ret = (cgrp == target); |
@@ -3693,3 +4043,154 @@ css_get_next(struct cgroup_subsys *ss, int id, | |||
3693 | return ret; | 4043 | return ret; |
3694 | } | 4044 | } |
3695 | 4045 | ||
4046 | #ifdef CONFIG_CGROUP_DEBUG | ||
4047 | static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss, | ||
4048 | struct cgroup *cont) | ||
4049 | { | ||
4050 | struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); | ||
4051 | |||
4052 | if (!css) | ||
4053 | return ERR_PTR(-ENOMEM); | ||
4054 | |||
4055 | return css; | ||
4056 | } | ||
4057 | |||
4058 | static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | ||
4059 | { | ||
4060 | kfree(cont->subsys[debug_subsys_id]); | ||
4061 | } | ||
4062 | |||
4063 | static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft) | ||
4064 | { | ||
4065 | return atomic_read(&cont->count); | ||
4066 | } | ||
4067 | |||
4068 | static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft) | ||
4069 | { | ||
4070 | return cgroup_task_count(cont); | ||
4071 | } | ||
4072 | |||
4073 | static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft) | ||
4074 | { | ||
4075 | return (u64)(unsigned long)current->cgroups; | ||
4076 | } | ||
4077 | |||
4078 | static u64 current_css_set_refcount_read(struct cgroup *cont, | ||
4079 | struct cftype *cft) | ||
4080 | { | ||
4081 | u64 count; | ||
4082 | |||
4083 | rcu_read_lock(); | ||
4084 | count = atomic_read(¤t->cgroups->refcount); | ||
4085 | rcu_read_unlock(); | ||
4086 | return count; | ||
4087 | } | ||
4088 | |||
4089 | static int current_css_set_cg_links_read(struct cgroup *cont, | ||
4090 | struct cftype *cft, | ||
4091 | struct seq_file *seq) | ||
4092 | { | ||
4093 | struct cg_cgroup_link *link; | ||
4094 | struct css_set *cg; | ||
4095 | |||
4096 | read_lock(&css_set_lock); | ||
4097 | rcu_read_lock(); | ||
4098 | cg = rcu_dereference(current->cgroups); | ||
4099 | list_for_each_entry(link, &cg->cg_links, cg_link_list) { | ||
4100 | struct cgroup *c = link->cgrp; | ||
4101 | const char *name; | ||
4102 | |||
4103 | if (c->dentry) | ||
4104 | name = c->dentry->d_name.name; | ||
4105 | else | ||
4106 | name = "?"; | ||
4107 | seq_printf(seq, "Root %d group %s\n", | ||
4108 | c->root->hierarchy_id, name); | ||
4109 | } | ||
4110 | rcu_read_unlock(); | ||
4111 | read_unlock(&css_set_lock); | ||
4112 | return 0; | ||
4113 | } | ||
4114 | |||
4115 | #define MAX_TASKS_SHOWN_PER_CSS 25 | ||
4116 | static int cgroup_css_links_read(struct cgroup *cont, | ||
4117 | struct cftype *cft, | ||
4118 | struct seq_file *seq) | ||
4119 | { | ||
4120 | struct cg_cgroup_link *link; | ||
4121 | |||
4122 | read_lock(&css_set_lock); | ||
4123 | list_for_each_entry(link, &cont->css_sets, cgrp_link_list) { | ||
4124 | struct css_set *cg = link->cg; | ||
4125 | struct task_struct *task; | ||
4126 | int count = 0; | ||
4127 | seq_printf(seq, "css_set %p\n", cg); | ||
4128 | list_for_each_entry(task, &cg->tasks, cg_list) { | ||
4129 | if (count++ > MAX_TASKS_SHOWN_PER_CSS) { | ||
4130 | seq_puts(seq, " ...\n"); | ||
4131 | break; | ||
4132 | } else { | ||
4133 | seq_printf(seq, " task %d\n", | ||
4134 | task_pid_vnr(task)); | ||
4135 | } | ||
4136 | } | ||
4137 | } | ||
4138 | read_unlock(&css_set_lock); | ||
4139 | return 0; | ||
4140 | } | ||
4141 | |||
4142 | static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft) | ||
4143 | { | ||
4144 | return test_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
4145 | } | ||
4146 | |||
4147 | static struct cftype debug_files[] = { | ||
4148 | { | ||
4149 | .name = "cgroup_refcount", | ||
4150 | .read_u64 = cgroup_refcount_read, | ||
4151 | }, | ||
4152 | { | ||
4153 | .name = "taskcount", | ||
4154 | .read_u64 = debug_taskcount_read, | ||
4155 | }, | ||
4156 | |||
4157 | { | ||
4158 | .name = "current_css_set", | ||
4159 | .read_u64 = current_css_set_read, | ||
4160 | }, | ||
4161 | |||
4162 | { | ||
4163 | .name = "current_css_set_refcount", | ||
4164 | .read_u64 = current_css_set_refcount_read, | ||
4165 | }, | ||
4166 | |||
4167 | { | ||
4168 | .name = "current_css_set_cg_links", | ||
4169 | .read_seq_string = current_css_set_cg_links_read, | ||
4170 | }, | ||
4171 | |||
4172 | { | ||
4173 | .name = "cgroup_css_links", | ||
4174 | .read_seq_string = cgroup_css_links_read, | ||
4175 | }, | ||
4176 | |||
4177 | { | ||
4178 | .name = "releasable", | ||
4179 | .read_u64 = releasable_read, | ||
4180 | }, | ||
4181 | }; | ||
4182 | |||
4183 | static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont) | ||
4184 | { | ||
4185 | return cgroup_add_files(cont, ss, debug_files, | ||
4186 | ARRAY_SIZE(debug_files)); | ||
4187 | } | ||
4188 | |||
4189 | struct cgroup_subsys debug_subsys = { | ||
4190 | .name = "debug", | ||
4191 | .create = debug_create, | ||
4192 | .destroy = debug_destroy, | ||
4193 | .populate = debug_populate, | ||
4194 | .subsys_id = debug_subsys_id, | ||
4195 | }; | ||
4196 | #endif /* CONFIG_CGROUP_DEBUG */ | ||