diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 21:11:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 21:11:21 -0400 |
commit | 0d9cabdccedb79ee5f27b77ff51f29a9e7d23275 (patch) | |
tree | 8bfb64c3672d058eb90aec3c3a9c4f61cef9097c | |
parent | 701085b219016d38f105b031381b9cee6200253a (diff) | |
parent | 3ce3230a0cff484e5130153f244d4fb8a56b3a8b (diff) |
Merge branch 'for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup changes from Tejun Heo:
"Out of the 8 commits, one fixes a long-standing locking issue around
tasklist walking and others are cleanups."
* 'for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
cgroup: Walk task list under tasklist_lock in cgroup_enable_task_cg_list
cgroup: Remove wrong comment on cgroup_enable_task_cg_list()
cgroup: remove cgroup_subsys argument from callbacks
cgroup: remove extra calls to find_existing_css_set
cgroup: replace tasklist_lock with rcu_read_lock
cgroup: simplify double-check locking in cgroup_attach_proc
cgroup: move struct cgroup_pidlist out from the header file
cgroup: remove cgroup_attach_task_current_cg()
-rw-r--r-- | Documentation/cgroups/cgroups.txt | 26 | ||||
-rw-r--r-- | block/blk-cgroup.c | 22 | ||||
-rw-r--r-- | include/linux/cgroup.h | 70 | ||||
-rw-r--r-- | include/net/sock.h | 7 | ||||
-rw-r--r-- | include/net/tcp_memcontrol.h | 2 | ||||
-rw-r--r-- | kernel/cgroup.c | 327 | ||||
-rw-r--r-- | kernel/cgroup_freezer.c | 11 | ||||
-rw-r--r-- | kernel/cpuset.c | 16 | ||||
-rw-r--r-- | kernel/events/core.c | 13 | ||||
-rw-r--r-- | kernel/sched/core.c | 20 | ||||
-rw-r--r-- | mm/memcontrol.c | 48 | ||||
-rw-r--r-- | net/core/netprio_cgroup.c | 10 | ||||
-rw-r--r-- | net/core/sock.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_memcontrol.c | 2 | ||||
-rw-r--r-- | net/sched/cls_cgroup.c | 10 | ||||
-rw-r--r-- | security/device_cgroup.c | 10 |
16 files changed, 228 insertions, 372 deletions
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index a7c96ae5557c..8e74980ab385 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt | |||
@@ -558,8 +558,7 @@ Each subsystem may export the following methods. The only mandatory | |||
558 | methods are create/destroy. Any others that are null are presumed to | 558 | methods are create/destroy. Any others that are null are presumed to |
559 | be successful no-ops. | 559 | be successful no-ops. |
560 | 560 | ||
561 | struct cgroup_subsys_state *create(struct cgroup_subsys *ss, | 561 | struct cgroup_subsys_state *create(struct cgroup *cgrp) |
562 | struct cgroup *cgrp) | ||
563 | (cgroup_mutex held by caller) | 562 | (cgroup_mutex held by caller) |
564 | 563 | ||
565 | Called to create a subsystem state object for a cgroup. The | 564 | Called to create a subsystem state object for a cgroup. The |
@@ -574,7 +573,7 @@ identified by the passed cgroup object having a NULL parent (since | |||
574 | it's the root of the hierarchy) and may be an appropriate place for | 573 | it's the root of the hierarchy) and may be an appropriate place for |
575 | initialization code. | 574 | initialization code. |
576 | 575 | ||
577 | void destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | 576 | void destroy(struct cgroup *cgrp) |
578 | (cgroup_mutex held by caller) | 577 | (cgroup_mutex held by caller) |
579 | 578 | ||
580 | The cgroup system is about to destroy the passed cgroup; the subsystem | 579 | The cgroup system is about to destroy the passed cgroup; the subsystem |
@@ -585,7 +584,7 @@ cgroup->parent is still valid. (Note - can also be called for a | |||
585 | newly-created cgroup if an error occurs after this subsystem's | 584 | newly-created cgroup if an error occurs after this subsystem's |
586 | create() method has been called for the new cgroup). | 585 | create() method has been called for the new cgroup). |
587 | 586 | ||
588 | int pre_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); | 587 | int pre_destroy(struct cgroup *cgrp); |
589 | 588 | ||
590 | Called before checking the reference count on each subsystem. This may | 589 | Called before checking the reference count on each subsystem. This may |
591 | be useful for subsystems which have some extra references even if | 590 | be useful for subsystems which have some extra references even if |
@@ -593,8 +592,7 @@ there are not tasks in the cgroup. If pre_destroy() returns error code, | |||
593 | rmdir() will fail with it. From this behavior, pre_destroy() can be | 592 | rmdir() will fail with it. From this behavior, pre_destroy() can be |
594 | called multiple times against a cgroup. | 593 | called multiple times against a cgroup. |
595 | 594 | ||
596 | int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 595 | int can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
597 | struct cgroup_taskset *tset) | ||
598 | (cgroup_mutex held by caller) | 596 | (cgroup_mutex held by caller) |
599 | 597 | ||
600 | Called prior to moving one or more tasks into a cgroup; if the | 598 | Called prior to moving one or more tasks into a cgroup; if the |
@@ -615,8 +613,7 @@ fork. If this method returns 0 (success) then this should remain valid | |||
615 | while the caller holds cgroup_mutex and it is ensured that either | 613 | while the caller holds cgroup_mutex and it is ensured that either |
616 | attach() or cancel_attach() will be called in future. | 614 | attach() or cancel_attach() will be called in future. |
617 | 615 | ||
618 | void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 616 | void cancel_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
619 | struct cgroup_taskset *tset) | ||
620 | (cgroup_mutex held by caller) | 617 | (cgroup_mutex held by caller) |
621 | 618 | ||
622 | Called when a task attach operation has failed after can_attach() has succeeded. | 619 | Called when a task attach operation has failed after can_attach() has succeeded. |
@@ -625,23 +622,22 @@ function, so that the subsystem can implement a rollback. If not, not necessary. | |||
625 | This will be called only about subsystems whose can_attach() operation have | 622 | This will be called only about subsystems whose can_attach() operation have |
626 | succeeded. The parameters are identical to can_attach(). | 623 | succeeded. The parameters are identical to can_attach(). |
627 | 624 | ||
628 | void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 625 | void attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
629 | struct cgroup_taskset *tset) | ||
630 | (cgroup_mutex held by caller) | 626 | (cgroup_mutex held by caller) |
631 | 627 | ||
632 | Called after the task has been attached to the cgroup, to allow any | 628 | Called after the task has been attached to the cgroup, to allow any |
633 | post-attachment activity that requires memory allocations or blocking. | 629 | post-attachment activity that requires memory allocations or blocking. |
634 | The parameters are identical to can_attach(). | 630 | The parameters are identical to can_attach(). |
635 | 631 | ||
636 | void fork(struct cgroup_subsy *ss, struct task_struct *task) | 632 | void fork(struct task_struct *task) |
637 | 633 | ||
638 | Called when a task is forked into a cgroup. | 634 | Called when a task is forked into a cgroup. |
639 | 635 | ||
640 | void exit(struct cgroup_subsys *ss, struct task_struct *task) | 636 | void exit(struct task_struct *task) |
641 | 637 | ||
642 | Called during task exit. | 638 | Called during task exit. |
643 | 639 | ||
644 | int populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | 640 | int populate(struct cgroup *cgrp) |
645 | (cgroup_mutex held by caller) | 641 | (cgroup_mutex held by caller) |
646 | 642 | ||
647 | Called after creation of a cgroup to allow a subsystem to populate | 643 | Called after creation of a cgroup to allow a subsystem to populate |
@@ -651,7 +647,7 @@ include/linux/cgroup.h for details). Note that although this | |||
651 | method can return an error code, the error code is currently not | 647 | method can return an error code, the error code is currently not |
652 | always handled well. | 648 | always handled well. |
653 | 649 | ||
654 | void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp) | 650 | void post_clone(struct cgroup *cgrp) |
655 | (cgroup_mutex held by caller) | 651 | (cgroup_mutex held by caller) |
656 | 652 | ||
657 | Called during cgroup_create() to do any parameter | 653 | Called during cgroup_create() to do any parameter |
@@ -659,7 +655,7 @@ initialization which might be required before a task could attach. For | |||
659 | example in cpusets, no task may attach before 'cpus' and 'mems' are set | 655 | example in cpusets, no task may attach before 'cpus' and 'mems' are set |
660 | up. | 656 | up. |
661 | 657 | ||
662 | void bind(struct cgroup_subsys *ss, struct cgroup *root) | 658 | void bind(struct cgroup *root) |
663 | (cgroup_mutex and ss->hierarchy_mutex held by caller) | 659 | (cgroup_mutex and ss->hierarchy_mutex held by caller) |
664 | 660 | ||
665 | Called when a cgroup subsystem is rebound to a different hierarchy | 661 | Called when a cgroup subsystem is rebound to a different hierarchy |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 75642a352a8f..ea84a23d5e68 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -28,13 +28,10 @@ static LIST_HEAD(blkio_list); | |||
28 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; | 28 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
29 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); | 29 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
30 | 30 | ||
31 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, | 31 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup *); |
32 | struct cgroup *); | 32 | static int blkiocg_can_attach(struct cgroup *, struct cgroup_taskset *); |
33 | static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, | 33 | static void blkiocg_attach(struct cgroup *, struct cgroup_taskset *); |
34 | struct cgroup_taskset *); | 34 | static void blkiocg_destroy(struct cgroup *); |
35 | static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, | ||
36 | struct cgroup_taskset *); | ||
37 | static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); | ||
38 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); | 35 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); |
39 | 36 | ||
40 | /* for encoding cft->private value on file */ | 37 | /* for encoding cft->private value on file */ |
@@ -1548,7 +1545,7 @@ static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |||
1548 | ARRAY_SIZE(blkio_files)); | 1545 | ARRAY_SIZE(blkio_files)); |
1549 | } | 1546 | } |
1550 | 1547 | ||
1551 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | 1548 | static void blkiocg_destroy(struct cgroup *cgroup) |
1552 | { | 1549 | { |
1553 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | 1550 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); |
1554 | unsigned long flags; | 1551 | unsigned long flags; |
@@ -1598,8 +1595,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |||
1598 | kfree(blkcg); | 1595 | kfree(blkcg); |
1599 | } | 1596 | } |
1600 | 1597 | ||
1601 | static struct cgroup_subsys_state * | 1598 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup) |
1602 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | ||
1603 | { | 1599 | { |
1604 | struct blkio_cgroup *blkcg; | 1600 | struct blkio_cgroup *blkcg; |
1605 | struct cgroup *parent = cgroup->parent; | 1601 | struct cgroup *parent = cgroup->parent; |
@@ -1628,8 +1624,7 @@ done: | |||
1628 | * of the main cic data structures. For now we allow a task to change | 1624 | * of the main cic data structures. For now we allow a task to change |
1629 | * its cgroup only if it's the only owner of its ioc. | 1625 | * its cgroup only if it's the only owner of its ioc. |
1630 | */ | 1626 | */ |
1631 | static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 1627 | static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
1632 | struct cgroup_taskset *tset) | ||
1633 | { | 1628 | { |
1634 | struct task_struct *task; | 1629 | struct task_struct *task; |
1635 | struct io_context *ioc; | 1630 | struct io_context *ioc; |
@@ -1648,8 +1643,7 @@ static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
1648 | return ret; | 1643 | return ret; |
1649 | } | 1644 | } |
1650 | 1645 | ||
1651 | static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 1646 | static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
1652 | struct cgroup_taskset *tset) | ||
1653 | { | 1647 | { |
1654 | struct task_struct *task; | 1648 | struct task_struct *task; |
1655 | struct io_context *ioc; | 1649 | struct io_context *ioc; |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e9b602151caf..501adb1b2f43 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -160,38 +160,6 @@ enum { | |||
160 | CGRP_CLONE_CHILDREN, | 160 | CGRP_CLONE_CHILDREN, |
161 | }; | 161 | }; |
162 | 162 | ||
163 | /* which pidlist file are we talking about? */ | ||
164 | enum cgroup_filetype { | ||
165 | CGROUP_FILE_PROCS, | ||
166 | CGROUP_FILE_TASKS, | ||
167 | }; | ||
168 | |||
169 | /* | ||
170 | * A pidlist is a list of pids that virtually represents the contents of one | ||
171 | * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, | ||
172 | * a pair (one each for procs, tasks) for each pid namespace that's relevant | ||
173 | * to the cgroup. | ||
174 | */ | ||
175 | struct cgroup_pidlist { | ||
176 | /* | ||
177 | * used to find which pidlist is wanted. doesn't change as long as | ||
178 | * this particular list stays in the list. | ||
179 | */ | ||
180 | struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; | ||
181 | /* array of xids */ | ||
182 | pid_t *list; | ||
183 | /* how many elements the above list has */ | ||
184 | int length; | ||
185 | /* how many files are using the current array */ | ||
186 | int use_count; | ||
187 | /* each of these stored in a list by its cgroup */ | ||
188 | struct list_head links; | ||
189 | /* pointer to the cgroup we belong to, for list removal purposes */ | ||
190 | struct cgroup *owner; | ||
191 | /* protects the other fields */ | ||
192 | struct rw_semaphore mutex; | ||
193 | }; | ||
194 | |||
195 | struct cgroup { | 163 | struct cgroup { |
196 | unsigned long flags; /* "unsigned long" so bitops work */ | 164 | unsigned long flags; /* "unsigned long" so bitops work */ |
197 | 165 | ||
@@ -484,23 +452,18 @@ int cgroup_taskset_size(struct cgroup_taskset *tset); | |||
484 | */ | 452 | */ |
485 | 453 | ||
486 | struct cgroup_subsys { | 454 | struct cgroup_subsys { |
487 | struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, | 455 | struct cgroup_subsys_state *(*create)(struct cgroup *cgrp); |
488 | struct cgroup *cgrp); | 456 | int (*pre_destroy)(struct cgroup *cgrp); |
489 | int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 457 | void (*destroy)(struct cgroup *cgrp); |
490 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 458 | int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); |
491 | int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 459 | void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); |
492 | struct cgroup_taskset *tset); | 460 | void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); |
493 | void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 461 | void (*fork)(struct task_struct *task); |
494 | struct cgroup_taskset *tset); | 462 | void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp, |
495 | void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 463 | struct task_struct *task); |
496 | struct cgroup_taskset *tset); | 464 | int (*populate)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
497 | void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); | 465 | void (*post_clone)(struct cgroup *cgrp); |
498 | void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 466 | void (*bind)(struct cgroup *root); |
499 | struct cgroup *old_cgrp, struct task_struct *task); | ||
500 | int (*populate)(struct cgroup_subsys *ss, | ||
501 | struct cgroup *cgrp); | ||
502 | void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp); | ||
503 | void (*bind)(struct cgroup_subsys *ss, struct cgroup *root); | ||
504 | 467 | ||
505 | int subsys_id; | 468 | int subsys_id; |
506 | int active; | 469 | int active; |
@@ -602,11 +565,6 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan); | |||
602 | int cgroup_attach_task(struct cgroup *, struct task_struct *); | 565 | int cgroup_attach_task(struct cgroup *, struct task_struct *); |
603 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); | 566 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
604 | 567 | ||
605 | static inline int cgroup_attach_task_current_cg(struct task_struct *tsk) | ||
606 | { | ||
607 | return cgroup_attach_task_all(current, tsk); | ||
608 | } | ||
609 | |||
610 | /* | 568 | /* |
611 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | 569 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works |
612 | * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. | 570 | * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. |
@@ -669,10 +627,6 @@ static inline int cgroup_attach_task_all(struct task_struct *from, | |||
669 | { | 627 | { |
670 | return 0; | 628 | return 0; |
671 | } | 629 | } |
672 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) | ||
673 | { | ||
674 | return 0; | ||
675 | } | ||
676 | 630 | ||
677 | #endif /* !CONFIG_CGROUPS */ | 631 | #endif /* !CONFIG_CGROUPS */ |
678 | 632 | ||
diff --git a/include/net/sock.h b/include/net/sock.h index dcde2d9268cd..7ef5c58f3f49 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -69,7 +69,7 @@ struct cgroup; | |||
69 | struct cgroup_subsys; | 69 | struct cgroup_subsys; |
70 | #ifdef CONFIG_NET | 70 | #ifdef CONFIG_NET |
71 | int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss); | 71 | int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss); |
72 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss); | 72 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp); |
73 | #else | 73 | #else |
74 | static inline | 74 | static inline |
75 | int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) | 75 | int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) |
@@ -77,7 +77,7 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
79 | static inline | 79 | static inline |
80 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) | 80 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp) |
81 | { | 81 | { |
82 | } | 82 | } |
83 | #endif | 83 | #endif |
@@ -871,8 +871,7 @@ struct proto { | |||
871 | */ | 871 | */ |
872 | int (*init_cgroup)(struct cgroup *cgrp, | 872 | int (*init_cgroup)(struct cgroup *cgrp, |
873 | struct cgroup_subsys *ss); | 873 | struct cgroup_subsys *ss); |
874 | void (*destroy_cgroup)(struct cgroup *cgrp, | 874 | void (*destroy_cgroup)(struct cgroup *cgrp); |
875 | struct cgroup_subsys *ss); | ||
876 | struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); | 875 | struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); |
877 | #endif | 876 | #endif |
878 | }; | 877 | }; |
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h index 3512082fa909..48410ff25c9e 100644 --- a/include/net/tcp_memcontrol.h +++ b/include/net/tcp_memcontrol.h | |||
@@ -13,7 +13,7 @@ struct tcp_memcontrol { | |||
13 | 13 | ||
14 | struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); | 14 | struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); |
15 | int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss); | 15 | int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss); |
16 | void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss); | 16 | void tcp_destroy_cgroup(struct cgroup *cgrp); |
17 | unsigned long long tcp_max_memory(const struct mem_cgroup *memcg); | 17 | unsigned long long tcp_max_memory(const struct mem_cgroup *memcg); |
18 | void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx); | 18 | void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx); |
19 | #endif /* _TCP_MEMCG_H */ | 19 | #endif /* _TCP_MEMCG_H */ |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a5d3b5325f77..c6877fe9a831 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -818,7 +818,7 @@ static int cgroup_call_pre_destroy(struct cgroup *cgrp) | |||
818 | 818 | ||
819 | for_each_subsys(cgrp->root, ss) | 819 | for_each_subsys(cgrp->root, ss) |
820 | if (ss->pre_destroy) { | 820 | if (ss->pre_destroy) { |
821 | ret = ss->pre_destroy(ss, cgrp); | 821 | ret = ss->pre_destroy(cgrp); |
822 | if (ret) | 822 | if (ret) |
823 | break; | 823 | break; |
824 | } | 824 | } |
@@ -846,7 +846,7 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) | |||
846 | * Release the subsystem state objects. | 846 | * Release the subsystem state objects. |
847 | */ | 847 | */ |
848 | for_each_subsys(cgrp->root, ss) | 848 | for_each_subsys(cgrp->root, ss) |
849 | ss->destroy(ss, cgrp); | 849 | ss->destroy(cgrp); |
850 | 850 | ||
851 | cgrp->root->number_of_cgroups--; | 851 | cgrp->root->number_of_cgroups--; |
852 | mutex_unlock(&cgroup_mutex); | 852 | mutex_unlock(&cgroup_mutex); |
@@ -1015,7 +1015,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, | |||
1015 | list_move(&ss->sibling, &root->subsys_list); | 1015 | list_move(&ss->sibling, &root->subsys_list); |
1016 | ss->root = root; | 1016 | ss->root = root; |
1017 | if (ss->bind) | 1017 | if (ss->bind) |
1018 | ss->bind(ss, cgrp); | 1018 | ss->bind(cgrp); |
1019 | mutex_unlock(&ss->hierarchy_mutex); | 1019 | mutex_unlock(&ss->hierarchy_mutex); |
1020 | /* refcount was already taken, and we're keeping it */ | 1020 | /* refcount was already taken, and we're keeping it */ |
1021 | } else if (bit & removed_bits) { | 1021 | } else if (bit & removed_bits) { |
@@ -1025,7 +1025,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, | |||
1025 | BUG_ON(cgrp->subsys[i]->cgroup != cgrp); | 1025 | BUG_ON(cgrp->subsys[i]->cgroup != cgrp); |
1026 | mutex_lock(&ss->hierarchy_mutex); | 1026 | mutex_lock(&ss->hierarchy_mutex); |
1027 | if (ss->bind) | 1027 | if (ss->bind) |
1028 | ss->bind(ss, dummytop); | 1028 | ss->bind(dummytop); |
1029 | dummytop->subsys[i]->cgroup = dummytop; | 1029 | dummytop->subsys[i]->cgroup = dummytop; |
1030 | cgrp->subsys[i] = NULL; | 1030 | cgrp->subsys[i] = NULL; |
1031 | subsys[i]->root = &rootnode; | 1031 | subsys[i]->root = &rootnode; |
@@ -1763,6 +1763,7 @@ EXPORT_SYMBOL_GPL(cgroup_path); | |||
1763 | struct task_and_cgroup { | 1763 | struct task_and_cgroup { |
1764 | struct task_struct *task; | 1764 | struct task_struct *task; |
1765 | struct cgroup *cgrp; | 1765 | struct cgroup *cgrp; |
1766 | struct css_set *cg; | ||
1766 | }; | 1767 | }; |
1767 | 1768 | ||
1768 | struct cgroup_taskset { | 1769 | struct cgroup_taskset { |
@@ -1843,11 +1844,10 @@ EXPORT_SYMBOL_GPL(cgroup_taskset_size); | |||
1843 | * will already exist. If not set, this function might sleep, and can fail with | 1844 | * will already exist. If not set, this function might sleep, and can fail with |
1844 | * -ENOMEM. Must be called with cgroup_mutex and threadgroup locked. | 1845 | * -ENOMEM. Must be called with cgroup_mutex and threadgroup locked. |
1845 | */ | 1846 | */ |
1846 | static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, | 1847 | static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, |
1847 | struct task_struct *tsk, bool guarantee) | 1848 | struct task_struct *tsk, struct css_set *newcg) |
1848 | { | 1849 | { |
1849 | struct css_set *oldcg; | 1850 | struct css_set *oldcg; |
1850 | struct css_set *newcg; | ||
1851 | 1851 | ||
1852 | /* | 1852 | /* |
1853 | * We are synchronized through threadgroup_lock() against PF_EXITING | 1853 | * We are synchronized through threadgroup_lock() against PF_EXITING |
@@ -1857,23 +1857,6 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, | |||
1857 | WARN_ON_ONCE(tsk->flags & PF_EXITING); | 1857 | WARN_ON_ONCE(tsk->flags & PF_EXITING); |
1858 | oldcg = tsk->cgroups; | 1858 | oldcg = tsk->cgroups; |
1859 | 1859 | ||
1860 | /* locate or allocate a new css_set for this task. */ | ||
1861 | if (guarantee) { | ||
1862 | /* we know the css_set we want already exists. */ | ||
1863 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; | ||
1864 | read_lock(&css_set_lock); | ||
1865 | newcg = find_existing_css_set(oldcg, cgrp, template); | ||
1866 | BUG_ON(!newcg); | ||
1867 | get_css_set(newcg); | ||
1868 | read_unlock(&css_set_lock); | ||
1869 | } else { | ||
1870 | might_sleep(); | ||
1871 | /* find_css_set will give us newcg already referenced. */ | ||
1872 | newcg = find_css_set(oldcg, cgrp); | ||
1873 | if (!newcg) | ||
1874 | return -ENOMEM; | ||
1875 | } | ||
1876 | |||
1877 | task_lock(tsk); | 1860 | task_lock(tsk); |
1878 | rcu_assign_pointer(tsk->cgroups, newcg); | 1861 | rcu_assign_pointer(tsk->cgroups, newcg); |
1879 | task_unlock(tsk); | 1862 | task_unlock(tsk); |
@@ -1892,7 +1875,6 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, | |||
1892 | put_css_set(oldcg); | 1875 | put_css_set(oldcg); |
1893 | 1876 | ||
1894 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); | 1877 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); |
1895 | return 0; | ||
1896 | } | 1878 | } |
1897 | 1879 | ||
1898 | /** | 1880 | /** |
@@ -1910,6 +1892,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1910 | struct cgroup *oldcgrp; | 1892 | struct cgroup *oldcgrp; |
1911 | struct cgroupfs_root *root = cgrp->root; | 1893 | struct cgroupfs_root *root = cgrp->root; |
1912 | struct cgroup_taskset tset = { }; | 1894 | struct cgroup_taskset tset = { }; |
1895 | struct css_set *newcg; | ||
1913 | 1896 | ||
1914 | /* @tsk either already exited or can't exit until the end */ | 1897 | /* @tsk either already exited or can't exit until the end */ |
1915 | if (tsk->flags & PF_EXITING) | 1898 | if (tsk->flags & PF_EXITING) |
@@ -1925,7 +1908,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1925 | 1908 | ||
1926 | for_each_subsys(root, ss) { | 1909 | for_each_subsys(root, ss) { |
1927 | if (ss->can_attach) { | 1910 | if (ss->can_attach) { |
1928 | retval = ss->can_attach(ss, cgrp, &tset); | 1911 | retval = ss->can_attach(cgrp, &tset); |
1929 | if (retval) { | 1912 | if (retval) { |
1930 | /* | 1913 | /* |
1931 | * Remember on which subsystem the can_attach() | 1914 | * Remember on which subsystem the can_attach() |
@@ -1939,13 +1922,17 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1939 | } | 1922 | } |
1940 | } | 1923 | } |
1941 | 1924 | ||
1942 | retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false); | 1925 | newcg = find_css_set(tsk->cgroups, cgrp); |
1943 | if (retval) | 1926 | if (!newcg) { |
1927 | retval = -ENOMEM; | ||
1944 | goto out; | 1928 | goto out; |
1929 | } | ||
1930 | |||
1931 | cgroup_task_migrate(cgrp, oldcgrp, tsk, newcg); | ||
1945 | 1932 | ||
1946 | for_each_subsys(root, ss) { | 1933 | for_each_subsys(root, ss) { |
1947 | if (ss->attach) | 1934 | if (ss->attach) |
1948 | ss->attach(ss, cgrp, &tset); | 1935 | ss->attach(cgrp, &tset); |
1949 | } | 1936 | } |
1950 | 1937 | ||
1951 | synchronize_rcu(); | 1938 | synchronize_rcu(); |
@@ -1967,7 +1954,7 @@ out: | |||
1967 | */ | 1954 | */ |
1968 | break; | 1955 | break; |
1969 | if (ss->cancel_attach) | 1956 | if (ss->cancel_attach) |
1970 | ss->cancel_attach(ss, cgrp, &tset); | 1957 | ss->cancel_attach(cgrp, &tset); |
1971 | } | 1958 | } |
1972 | } | 1959 | } |
1973 | return retval; | 1960 | return retval; |
@@ -1997,66 +1984,6 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) | |||
1997 | } | 1984 | } |
1998 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); | 1985 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); |
1999 | 1986 | ||
2000 | /* | ||
2001 | * cgroup_attach_proc works in two stages, the first of which prefetches all | ||
2002 | * new css_sets needed (to make sure we have enough memory before committing | ||
2003 | * to the move) and stores them in a list of entries of the following type. | ||
2004 | * TODO: possible optimization: use css_set->rcu_head for chaining instead | ||
2005 | */ | ||
2006 | struct cg_list_entry { | ||
2007 | struct css_set *cg; | ||
2008 | struct list_head links; | ||
2009 | }; | ||
2010 | |||
2011 | static bool css_set_check_fetched(struct cgroup *cgrp, | ||
2012 | struct task_struct *tsk, struct css_set *cg, | ||
2013 | struct list_head *newcg_list) | ||
2014 | { | ||
2015 | struct css_set *newcg; | ||
2016 | struct cg_list_entry *cg_entry; | ||
2017 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; | ||
2018 | |||
2019 | read_lock(&css_set_lock); | ||
2020 | newcg = find_existing_css_set(cg, cgrp, template); | ||
2021 | read_unlock(&css_set_lock); | ||
2022 | |||
2023 | /* doesn't exist at all? */ | ||
2024 | if (!newcg) | ||
2025 | return false; | ||
2026 | /* see if it's already in the list */ | ||
2027 | list_for_each_entry(cg_entry, newcg_list, links) | ||
2028 | if (cg_entry->cg == newcg) | ||
2029 | return true; | ||
2030 | |||
2031 | /* not found */ | ||
2032 | return false; | ||
2033 | } | ||
2034 | |||
2035 | /* | ||
2036 | * Find the new css_set and store it in the list in preparation for moving the | ||
2037 | * given task to the given cgroup. Returns 0 or -ENOMEM. | ||
2038 | */ | ||
2039 | static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg, | ||
2040 | struct list_head *newcg_list) | ||
2041 | { | ||
2042 | struct css_set *newcg; | ||
2043 | struct cg_list_entry *cg_entry; | ||
2044 | |||
2045 | /* ensure a new css_set will exist for this thread */ | ||
2046 | newcg = find_css_set(cg, cgrp); | ||
2047 | if (!newcg) | ||
2048 | return -ENOMEM; | ||
2049 | /* add it to the list */ | ||
2050 | cg_entry = kmalloc(sizeof(struct cg_list_entry), GFP_KERNEL); | ||
2051 | if (!cg_entry) { | ||
2052 | put_css_set(newcg); | ||
2053 | return -ENOMEM; | ||
2054 | } | ||
2055 | cg_entry->cg = newcg; | ||
2056 | list_add(&cg_entry->links, newcg_list); | ||
2057 | return 0; | ||
2058 | } | ||
2059 | |||
2060 | /** | 1987 | /** |
2061 | * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup | 1988 | * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup |
2062 | * @cgrp: the cgroup to attach to | 1989 | * @cgrp: the cgroup to attach to |
@@ -2070,20 +1997,12 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2070 | int retval, i, group_size; | 1997 | int retval, i, group_size; |
2071 | struct cgroup_subsys *ss, *failed_ss = NULL; | 1998 | struct cgroup_subsys *ss, *failed_ss = NULL; |
2072 | /* guaranteed to be initialized later, but the compiler needs this */ | 1999 | /* guaranteed to be initialized later, but the compiler needs this */ |
2073 | struct css_set *oldcg; | ||
2074 | struct cgroupfs_root *root = cgrp->root; | 2000 | struct cgroupfs_root *root = cgrp->root; |
2075 | /* threadgroup list cursor and array */ | 2001 | /* threadgroup list cursor and array */ |
2076 | struct task_struct *tsk; | 2002 | struct task_struct *tsk; |
2077 | struct task_and_cgroup *tc; | 2003 | struct task_and_cgroup *tc; |
2078 | struct flex_array *group; | 2004 | struct flex_array *group; |
2079 | struct cgroup_taskset tset = { }; | 2005 | struct cgroup_taskset tset = { }; |
2080 | /* | ||
2081 | * we need to make sure we have css_sets for all the tasks we're | ||
2082 | * going to move -before- we actually start moving them, so that in | ||
2083 | * case we get an ENOMEM we can bail out before making any changes. | ||
2084 | */ | ||
2085 | struct list_head newcg_list; | ||
2086 | struct cg_list_entry *cg_entry, *temp_nobe; | ||
2087 | 2006 | ||
2088 | /* | 2007 | /* |
2089 | * step 0: in order to do expensive, possibly blocking operations for | 2008 | * step 0: in order to do expensive, possibly blocking operations for |
@@ -2102,23 +2021,14 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2102 | if (retval) | 2021 | if (retval) |
2103 | goto out_free_group_list; | 2022 | goto out_free_group_list; |
2104 | 2023 | ||
2105 | /* prevent changes to the threadgroup list while we take a snapshot. */ | ||
2106 | read_lock(&tasklist_lock); | ||
2107 | if (!thread_group_leader(leader)) { | ||
2108 | /* | ||
2109 | * a race with de_thread from another thread's exec() may strip | ||
2110 | * us of our leadership, making while_each_thread unsafe to use | ||
2111 | * on this task. if this happens, there is no choice but to | ||
2112 | * throw this task away and try again (from cgroup_procs_write); | ||
2113 | * this is "double-double-toil-and-trouble-check locking". | ||
2114 | */ | ||
2115 | read_unlock(&tasklist_lock); | ||
2116 | retval = -EAGAIN; | ||
2117 | goto out_free_group_list; | ||
2118 | } | ||
2119 | |||
2120 | tsk = leader; | 2024 | tsk = leader; |
2121 | i = 0; | 2025 | i = 0; |
2026 | /* | ||
2027 | * Prevent freeing of tasks while we take a snapshot. Tasks that are | ||
2028 | * already PF_EXITING could be freed from underneath us unless we | ||
2029 | * take an rcu_read_lock. | ||
2030 | */ | ||
2031 | rcu_read_lock(); | ||
2122 | do { | 2032 | do { |
2123 | struct task_and_cgroup ent; | 2033 | struct task_and_cgroup ent; |
2124 | 2034 | ||
@@ -2128,24 +2038,24 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2128 | 2038 | ||
2129 | /* as per above, nr_threads may decrease, but not increase. */ | 2039 | /* as per above, nr_threads may decrease, but not increase. */ |
2130 | BUG_ON(i >= group_size); | 2040 | BUG_ON(i >= group_size); |
2131 | /* | ||
2132 | * saying GFP_ATOMIC has no effect here because we did prealloc | ||
2133 | * earlier, but it's good form to communicate our expectations. | ||
2134 | */ | ||
2135 | ent.task = tsk; | 2041 | ent.task = tsk; |
2136 | ent.cgrp = task_cgroup_from_root(tsk, root); | 2042 | ent.cgrp = task_cgroup_from_root(tsk, root); |
2137 | /* nothing to do if this task is already in the cgroup */ | 2043 | /* nothing to do if this task is already in the cgroup */ |
2138 | if (ent.cgrp == cgrp) | 2044 | if (ent.cgrp == cgrp) |
2139 | continue; | 2045 | continue; |
2046 | /* | ||
2047 | * saying GFP_ATOMIC has no effect here because we did prealloc | ||
2048 | * earlier, but it's good form to communicate our expectations. | ||
2049 | */ | ||
2140 | retval = flex_array_put(group, i, &ent, GFP_ATOMIC); | 2050 | retval = flex_array_put(group, i, &ent, GFP_ATOMIC); |
2141 | BUG_ON(retval != 0); | 2051 | BUG_ON(retval != 0); |
2142 | i++; | 2052 | i++; |
2143 | } while_each_thread(leader, tsk); | 2053 | } while_each_thread(leader, tsk); |
2054 | rcu_read_unlock(); | ||
2144 | /* remember the number of threads in the array for later. */ | 2055 | /* remember the number of threads in the array for later. */ |
2145 | group_size = i; | 2056 | group_size = i; |
2146 | tset.tc_array = group; | 2057 | tset.tc_array = group; |
2147 | tset.tc_array_len = group_size; | 2058 | tset.tc_array_len = group_size; |
2148 | read_unlock(&tasklist_lock); | ||
2149 | 2059 | ||
2150 | /* methods shouldn't be called if no task is actually migrating */ | 2060 | /* methods shouldn't be called if no task is actually migrating */ |
2151 | retval = 0; | 2061 | retval = 0; |
@@ -2157,7 +2067,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2157 | */ | 2067 | */ |
2158 | for_each_subsys(root, ss) { | 2068 | for_each_subsys(root, ss) { |
2159 | if (ss->can_attach) { | 2069 | if (ss->can_attach) { |
2160 | retval = ss->can_attach(ss, cgrp, &tset); | 2070 | retval = ss->can_attach(cgrp, &tset); |
2161 | if (retval) { | 2071 | if (retval) { |
2162 | failed_ss = ss; | 2072 | failed_ss = ss; |
2163 | goto out_cancel_attach; | 2073 | goto out_cancel_attach; |
@@ -2169,17 +2079,12 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2169 | * step 2: make sure css_sets exist for all threads to be migrated. | 2079 | * step 2: make sure css_sets exist for all threads to be migrated. |
2170 | * we use find_css_set, which allocates a new one if necessary. | 2080 | * we use find_css_set, which allocates a new one if necessary. |
2171 | */ | 2081 | */ |
2172 | INIT_LIST_HEAD(&newcg_list); | ||
2173 | for (i = 0; i < group_size; i++) { | 2082 | for (i = 0; i < group_size; i++) { |
2174 | tc = flex_array_get(group, i); | 2083 | tc = flex_array_get(group, i); |
2175 | oldcg = tc->task->cgroups; | 2084 | tc->cg = find_css_set(tc->task->cgroups, cgrp); |
2176 | 2085 | if (!tc->cg) { | |
2177 | /* if we don't already have it in the list get a new one */ | 2086 | retval = -ENOMEM; |
2178 | if (!css_set_check_fetched(cgrp, tc->task, oldcg, | 2087 | goto out_put_css_set_refs; |
2179 | &newcg_list)) { | ||
2180 | retval = css_set_prefetch(cgrp, oldcg, &newcg_list); | ||
2181 | if (retval) | ||
2182 | goto out_list_teardown; | ||
2183 | } | 2088 | } |
2184 | } | 2089 | } |
2185 | 2090 | ||
@@ -2190,8 +2095,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2190 | */ | 2095 | */ |
2191 | for (i = 0; i < group_size; i++) { | 2096 | for (i = 0; i < group_size; i++) { |
2192 | tc = flex_array_get(group, i); | 2097 | tc = flex_array_get(group, i); |
2193 | retval = cgroup_task_migrate(cgrp, tc->cgrp, tc->task, true); | 2098 | cgroup_task_migrate(cgrp, tc->cgrp, tc->task, tc->cg); |
2194 | BUG_ON(retval); | ||
2195 | } | 2099 | } |
2196 | /* nothing is sensitive to fork() after this point. */ | 2100 | /* nothing is sensitive to fork() after this point. */ |
2197 | 2101 | ||
@@ -2200,7 +2104,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2200 | */ | 2104 | */ |
2201 | for_each_subsys(root, ss) { | 2105 | for_each_subsys(root, ss) { |
2202 | if (ss->attach) | 2106 | if (ss->attach) |
2203 | ss->attach(ss, cgrp, &tset); | 2107 | ss->attach(cgrp, &tset); |
2204 | } | 2108 | } |
2205 | 2109 | ||
2206 | /* | 2110 | /* |
@@ -2209,21 +2113,22 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2209 | synchronize_rcu(); | 2113 | synchronize_rcu(); |
2210 | cgroup_wakeup_rmdir_waiter(cgrp); | 2114 | cgroup_wakeup_rmdir_waiter(cgrp); |
2211 | retval = 0; | 2115 | retval = 0; |
2212 | out_list_teardown: | 2116 | out_put_css_set_refs: |
2213 | /* clean up the list of prefetched css_sets. */ | 2117 | if (retval) { |
2214 | list_for_each_entry_safe(cg_entry, temp_nobe, &newcg_list, links) { | 2118 | for (i = 0; i < group_size; i++) { |
2215 | list_del(&cg_entry->links); | 2119 | tc = flex_array_get(group, i); |
2216 | put_css_set(cg_entry->cg); | 2120 | if (!tc->cg) |
2217 | kfree(cg_entry); | 2121 | break; |
2122 | put_css_set(tc->cg); | ||
2123 | } | ||
2218 | } | 2124 | } |
2219 | out_cancel_attach: | 2125 | out_cancel_attach: |
2220 | /* same deal as in cgroup_attach_task */ | ||
2221 | if (retval) { | 2126 | if (retval) { |
2222 | for_each_subsys(root, ss) { | 2127 | for_each_subsys(root, ss) { |
2223 | if (ss == failed_ss) | 2128 | if (ss == failed_ss) |
2224 | break; | 2129 | break; |
2225 | if (ss->cancel_attach) | 2130 | if (ss->cancel_attach) |
2226 | ss->cancel_attach(ss, cgrp, &tset); | 2131 | ss->cancel_attach(cgrp, &tset); |
2227 | } | 2132 | } |
2228 | } | 2133 | } |
2229 | out_free_group_list: | 2134 | out_free_group_list: |
@@ -2245,22 +2150,14 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) | |||
2245 | if (!cgroup_lock_live_group(cgrp)) | 2150 | if (!cgroup_lock_live_group(cgrp)) |
2246 | return -ENODEV; | 2151 | return -ENODEV; |
2247 | 2152 | ||
2153 | retry_find_task: | ||
2154 | rcu_read_lock(); | ||
2248 | if (pid) { | 2155 | if (pid) { |
2249 | rcu_read_lock(); | ||
2250 | tsk = find_task_by_vpid(pid); | 2156 | tsk = find_task_by_vpid(pid); |
2251 | if (!tsk) { | 2157 | if (!tsk) { |
2252 | rcu_read_unlock(); | 2158 | rcu_read_unlock(); |
2253 | cgroup_unlock(); | 2159 | ret= -ESRCH; |
2254 | return -ESRCH; | 2160 | goto out_unlock_cgroup; |
2255 | } | ||
2256 | if (threadgroup) { | ||
2257 | /* | ||
2258 | * RCU protects this access, since tsk was found in the | ||
2259 | * tid map. a race with de_thread may cause group_leader | ||
2260 | * to stop being the leader, but cgroup_attach_proc will | ||
2261 | * detect it later. | ||
2262 | */ | ||
2263 | tsk = tsk->group_leader; | ||
2264 | } | 2161 | } |
2265 | /* | 2162 | /* |
2266 | * even if we're attaching all tasks in the thread group, we | 2163 | * even if we're attaching all tasks in the thread group, we |
@@ -2271,29 +2168,38 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) | |||
2271 | cred->euid != tcred->uid && | 2168 | cred->euid != tcred->uid && |
2272 | cred->euid != tcred->suid) { | 2169 | cred->euid != tcred->suid) { |
2273 | rcu_read_unlock(); | 2170 | rcu_read_unlock(); |
2274 | cgroup_unlock(); | 2171 | ret = -EACCES; |
2275 | return -EACCES; | 2172 | goto out_unlock_cgroup; |
2276 | } | 2173 | } |
2277 | get_task_struct(tsk); | 2174 | } else |
2278 | rcu_read_unlock(); | 2175 | tsk = current; |
2279 | } else { | ||
2280 | if (threadgroup) | ||
2281 | tsk = current->group_leader; | ||
2282 | else | ||
2283 | tsk = current; | ||
2284 | get_task_struct(tsk); | ||
2285 | } | ||
2286 | |||
2287 | threadgroup_lock(tsk); | ||
2288 | 2176 | ||
2289 | if (threadgroup) | 2177 | if (threadgroup) |
2178 | tsk = tsk->group_leader; | ||
2179 | get_task_struct(tsk); | ||
2180 | rcu_read_unlock(); | ||
2181 | |||
2182 | threadgroup_lock(tsk); | ||
2183 | if (threadgroup) { | ||
2184 | if (!thread_group_leader(tsk)) { | ||
2185 | /* | ||
2186 | * a race with de_thread from another thread's exec() | ||
2187 | * may strip us of our leadership, if this happens, | ||
2188 | * there is no choice but to throw this task away and | ||
2189 | * try again; this is | ||
2190 | * "double-double-toil-and-trouble-check locking". | ||
2191 | */ | ||
2192 | threadgroup_unlock(tsk); | ||
2193 | put_task_struct(tsk); | ||
2194 | goto retry_find_task; | ||
2195 | } | ||
2290 | ret = cgroup_attach_proc(cgrp, tsk); | 2196 | ret = cgroup_attach_proc(cgrp, tsk); |
2291 | else | 2197 | } else |
2292 | ret = cgroup_attach_task(cgrp, tsk); | 2198 | ret = cgroup_attach_task(cgrp, tsk); |
2293 | |||
2294 | threadgroup_unlock(tsk); | 2199 | threadgroup_unlock(tsk); |
2295 | 2200 | ||
2296 | put_task_struct(tsk); | 2201 | put_task_struct(tsk); |
2202 | out_unlock_cgroup: | ||
2297 | cgroup_unlock(); | 2203 | cgroup_unlock(); |
2298 | return ret; | 2204 | return ret; |
2299 | } | 2205 | } |
@@ -2305,16 +2211,7 @@ static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid) | |||
2305 | 2211 | ||
2306 | static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid) | 2212 | static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid) |
2307 | { | 2213 | { |
2308 | int ret; | 2214 | return attach_task_by_pid(cgrp, tgid, true); |
2309 | do { | ||
2310 | /* | ||
2311 | * attach_proc fails with -EAGAIN if threadgroup leadership | ||
2312 | * changes in the middle of the operation, in which case we need | ||
2313 | * to find the task_struct for the new leader and start over. | ||
2314 | */ | ||
2315 | ret = attach_task_by_pid(cgrp, tgid, true); | ||
2316 | } while (ret == -EAGAIN); | ||
2317 | return ret; | ||
2318 | } | 2215 | } |
2319 | 2216 | ||
2320 | /** | 2217 | /** |
@@ -2804,15 +2701,20 @@ static void cgroup_advance_iter(struct cgroup *cgrp, | |||
2804 | * using their cgroups capability, we don't maintain the lists running | 2701 | * using their cgroups capability, we don't maintain the lists running |
2805 | * through each css_set to its tasks until we see the list actually | 2702 | * through each css_set to its tasks until we see the list actually |
2806 | * used - in other words after the first call to cgroup_iter_start(). | 2703 | * used - in other words after the first call to cgroup_iter_start(). |
2807 | * | ||
2808 | * The tasklist_lock is not held here, as do_each_thread() and | ||
2809 | * while_each_thread() are protected by RCU. | ||
2810 | */ | 2704 | */ |
2811 | static void cgroup_enable_task_cg_lists(void) | 2705 | static void cgroup_enable_task_cg_lists(void) |
2812 | { | 2706 | { |
2813 | struct task_struct *p, *g; | 2707 | struct task_struct *p, *g; |
2814 | write_lock(&css_set_lock); | 2708 | write_lock(&css_set_lock); |
2815 | use_task_css_set_links = 1; | 2709 | use_task_css_set_links = 1; |
2710 | /* | ||
2711 | * We need tasklist_lock because RCU is not safe against | ||
2712 | * while_each_thread(). Besides, a forking task that has passed | ||
2713 | * cgroup_post_fork() without seeing use_task_css_set_links = 1 | ||
2714 | * is not guaranteed to have its child immediately visible in the | ||
2715 | * tasklist if we walk through it with RCU. | ||
2716 | */ | ||
2717 | read_lock(&tasklist_lock); | ||
2816 | do_each_thread(g, p) { | 2718 | do_each_thread(g, p) { |
2817 | task_lock(p); | 2719 | task_lock(p); |
2818 | /* | 2720 | /* |
@@ -2824,6 +2726,7 @@ static void cgroup_enable_task_cg_lists(void) | |||
2824 | list_add(&p->cg_list, &p->cgroups->tasks); | 2726 | list_add(&p->cg_list, &p->cgroups->tasks); |
2825 | task_unlock(p); | 2727 | task_unlock(p); |
2826 | } while_each_thread(g, p); | 2728 | } while_each_thread(g, p); |
2729 | read_unlock(&tasklist_lock); | ||
2827 | write_unlock(&css_set_lock); | 2730 | write_unlock(&css_set_lock); |
2828 | } | 2731 | } |
2829 | 2732 | ||
@@ -3043,6 +2946,38 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan) | |||
3043 | * | 2946 | * |
3044 | */ | 2947 | */ |
3045 | 2948 | ||
2949 | /* which pidlist file are we talking about? */ | ||
2950 | enum cgroup_filetype { | ||
2951 | CGROUP_FILE_PROCS, | ||
2952 | CGROUP_FILE_TASKS, | ||
2953 | }; | ||
2954 | |||
2955 | /* | ||
2956 | * A pidlist is a list of pids that virtually represents the contents of one | ||
2957 | * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, | ||
2958 | * a pair (one each for procs, tasks) for each pid namespace that's relevant | ||
2959 | * to the cgroup. | ||
2960 | */ | ||
2961 | struct cgroup_pidlist { | ||
2962 | /* | ||
2963 | * used to find which pidlist is wanted. doesn't change as long as | ||
2964 | * this particular list stays in the list. | ||
2965 | */ | ||
2966 | struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; | ||
2967 | /* array of xids */ | ||
2968 | pid_t *list; | ||
2969 | /* how many elements the above list has */ | ||
2970 | int length; | ||
2971 | /* how many files are using the current array */ | ||
2972 | int use_count; | ||
2973 | /* each of these stored in a list by its cgroup */ | ||
2974 | struct list_head links; | ||
2975 | /* pointer to the cgroup we belong to, for list removal purposes */ | ||
2976 | struct cgroup *owner; | ||
2977 | /* protects the other fields */ | ||
2978 | struct rw_semaphore mutex; | ||
2979 | }; | ||
2980 | |||
3046 | /* | 2981 | /* |
3047 | * The following two functions "fix" the issue where there are more pids | 2982 | * The following two functions "fix" the issue where there are more pids |
3048 | * than kmalloc will give memory for; in such cases, we use vmalloc/vfree. | 2983 | * than kmalloc will give memory for; in such cases, we use vmalloc/vfree. |
@@ -3827,7 +3762,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
3827 | set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags); | 3762 | set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags); |
3828 | 3763 | ||
3829 | for_each_subsys(root, ss) { | 3764 | for_each_subsys(root, ss) { |
3830 | struct cgroup_subsys_state *css = ss->create(ss, cgrp); | 3765 | struct cgroup_subsys_state *css = ss->create(cgrp); |
3831 | 3766 | ||
3832 | if (IS_ERR(css)) { | 3767 | if (IS_ERR(css)) { |
3833 | err = PTR_ERR(css); | 3768 | err = PTR_ERR(css); |
@@ -3841,7 +3776,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
3841 | } | 3776 | } |
3842 | /* At error, ->destroy() callback has to free assigned ID. */ | 3777 | /* At error, ->destroy() callback has to free assigned ID. */ |
3843 | if (clone_children(parent) && ss->post_clone) | 3778 | if (clone_children(parent) && ss->post_clone) |
3844 | ss->post_clone(ss, cgrp); | 3779 | ss->post_clone(cgrp); |
3845 | } | 3780 | } |
3846 | 3781 | ||
3847 | cgroup_lock_hierarchy(root); | 3782 | cgroup_lock_hierarchy(root); |
@@ -3875,7 +3810,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
3875 | 3810 | ||
3876 | for_each_subsys(root, ss) { | 3811 | for_each_subsys(root, ss) { |
3877 | if (cgrp->subsys[ss->subsys_id]) | 3812 | if (cgrp->subsys[ss->subsys_id]) |
3878 | ss->destroy(ss, cgrp); | 3813 | ss->destroy(cgrp); |
3879 | } | 3814 | } |
3880 | 3815 | ||
3881 | mutex_unlock(&cgroup_mutex); | 3816 | mutex_unlock(&cgroup_mutex); |
@@ -4099,7 +4034,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) | |||
4099 | /* Create the top cgroup state for this subsystem */ | 4034 | /* Create the top cgroup state for this subsystem */ |
4100 | list_add(&ss->sibling, &rootnode.subsys_list); | 4035 | list_add(&ss->sibling, &rootnode.subsys_list); |
4101 | ss->root = &rootnode; | 4036 | ss->root = &rootnode; |
4102 | css = ss->create(ss, dummytop); | 4037 | css = ss->create(dummytop); |
4103 | /* We don't handle early failures gracefully */ | 4038 | /* We don't handle early failures gracefully */ |
4104 | BUG_ON(IS_ERR(css)); | 4039 | BUG_ON(IS_ERR(css)); |
4105 | init_cgroup_css(css, ss, dummytop); | 4040 | init_cgroup_css(css, ss, dummytop); |
@@ -4188,7 +4123,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) | |||
4188 | * no ss->create seems to need anything important in the ss struct, so | 4123 | * no ss->create seems to need anything important in the ss struct, so |
4189 | * this can happen first (i.e. before the rootnode attachment). | 4124 | * this can happen first (i.e. before the rootnode attachment). |
4190 | */ | 4125 | */ |
4191 | css = ss->create(ss, dummytop); | 4126 | css = ss->create(dummytop); |
4192 | if (IS_ERR(css)) { | 4127 | if (IS_ERR(css)) { |
4193 | /* failure case - need to deassign the subsys[] slot. */ | 4128 | /* failure case - need to deassign the subsys[] slot. */ |
4194 | subsys[i] = NULL; | 4129 | subsys[i] = NULL; |
@@ -4206,7 +4141,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) | |||
4206 | int ret = cgroup_init_idr(ss, css); | 4141 | int ret = cgroup_init_idr(ss, css); |
4207 | if (ret) { | 4142 | if (ret) { |
4208 | dummytop->subsys[ss->subsys_id] = NULL; | 4143 | dummytop->subsys[ss->subsys_id] = NULL; |
4209 | ss->destroy(ss, dummytop); | 4144 | ss->destroy(dummytop); |
4210 | subsys[i] = NULL; | 4145 | subsys[i] = NULL; |
4211 | mutex_unlock(&cgroup_mutex); | 4146 | mutex_unlock(&cgroup_mutex); |
4212 | return ret; | 4147 | return ret; |
@@ -4304,7 +4239,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) | |||
4304 | * pointer to find their state. note that this also takes care of | 4239 | * pointer to find their state. note that this also takes care of |
4305 | * freeing the css_id. | 4240 | * freeing the css_id. |
4306 | */ | 4241 | */ |
4307 | ss->destroy(ss, dummytop); | 4242 | ss->destroy(dummytop); |
4308 | dummytop->subsys[ss->subsys_id] = NULL; | 4243 | dummytop->subsys[ss->subsys_id] = NULL; |
4309 | 4244 | ||
4310 | mutex_unlock(&cgroup_mutex); | 4245 | mutex_unlock(&cgroup_mutex); |
@@ -4580,7 +4515,7 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
4580 | for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) { | 4515 | for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) { |
4581 | struct cgroup_subsys *ss = subsys[i]; | 4516 | struct cgroup_subsys *ss = subsys[i]; |
4582 | if (ss->fork) | 4517 | if (ss->fork) |
4583 | ss->fork(ss, child); | 4518 | ss->fork(child); |
4584 | } | 4519 | } |
4585 | } | 4520 | } |
4586 | } | 4521 | } |
@@ -4596,6 +4531,17 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
4596 | */ | 4531 | */ |
4597 | void cgroup_post_fork(struct task_struct *child) | 4532 | void cgroup_post_fork(struct task_struct *child) |
4598 | { | 4533 | { |
4534 | /* | ||
4535 | * use_task_css_set_links is set to 1 before we walk the tasklist | ||
4536 | * under the tasklist_lock and we read it here after we added the child | ||
4537 | * to the tasklist under the tasklist_lock as well. If the child wasn't | ||
4538 | * yet in the tasklist when we walked through it from | ||
4539 | * cgroup_enable_task_cg_lists(), then use_task_css_set_links value | ||
4540 | * should be visible now due to the paired locking and barriers implied | ||
4541 | * by LOCK/UNLOCK: it is written before the tasklist_lock unlock | ||
4542 | * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock | ||
4543 | * lock on fork. | ||
4544 | */ | ||
4599 | if (use_task_css_set_links) { | 4545 | if (use_task_css_set_links) { |
4600 | write_lock(&css_set_lock); | 4546 | write_lock(&css_set_lock); |
4601 | if (list_empty(&child->cg_list)) { | 4547 | if (list_empty(&child->cg_list)) { |
@@ -4682,7 +4628,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) | |||
4682 | struct cgroup *old_cgrp = | 4628 | struct cgroup *old_cgrp = |
4683 | rcu_dereference_raw(cg->subsys[i])->cgroup; | 4629 | rcu_dereference_raw(cg->subsys[i])->cgroup; |
4684 | struct cgroup *cgrp = task_cgroup(tsk, i); | 4630 | struct cgroup *cgrp = task_cgroup(tsk, i); |
4685 | ss->exit(ss, cgrp, old_cgrp, tsk); | 4631 | ss->exit(cgrp, old_cgrp, tsk); |
4686 | } | 4632 | } |
4687 | } | 4633 | } |
4688 | } | 4634 | } |
@@ -5137,8 +5083,7 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id) | |||
5137 | } | 5083 | } |
5138 | 5084 | ||
5139 | #ifdef CONFIG_CGROUP_DEBUG | 5085 | #ifdef CONFIG_CGROUP_DEBUG |
5140 | static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss, | 5086 | static struct cgroup_subsys_state *debug_create(struct cgroup *cont) |
5141 | struct cgroup *cont) | ||
5142 | { | 5087 | { |
5143 | struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); | 5088 | struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); |
5144 | 5089 | ||
@@ -5148,7 +5093,7 @@ static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss, | |||
5148 | return css; | 5093 | return css; |
5149 | } | 5094 | } |
5150 | 5095 | ||
5151 | static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | 5096 | static void debug_destroy(struct cgroup *cont) |
5152 | { | 5097 | { |
5153 | kfree(cont->subsys[debug_subsys_id]); | 5098 | kfree(cont->subsys[debug_subsys_id]); |
5154 | } | 5099 | } |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index fc0646b78a64..f86e93920b62 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -128,8 +128,7 @@ struct cgroup_subsys freezer_subsys; | |||
128 | * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator()) | 128 | * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator()) |
129 | * sighand->siglock | 129 | * sighand->siglock |
130 | */ | 130 | */ |
131 | static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, | 131 | static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup) |
132 | struct cgroup *cgroup) | ||
133 | { | 132 | { |
134 | struct freezer *freezer; | 133 | struct freezer *freezer; |
135 | 134 | ||
@@ -142,8 +141,7 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, | |||
142 | return &freezer->css; | 141 | return &freezer->css; |
143 | } | 142 | } |
144 | 143 | ||
145 | static void freezer_destroy(struct cgroup_subsys *ss, | 144 | static void freezer_destroy(struct cgroup *cgroup) |
146 | struct cgroup *cgroup) | ||
147 | { | 145 | { |
148 | struct freezer *freezer = cgroup_freezer(cgroup); | 146 | struct freezer *freezer = cgroup_freezer(cgroup); |
149 | 147 | ||
@@ -164,8 +162,7 @@ static bool is_task_frozen_enough(struct task_struct *task) | |||
164 | * a write to that file racing against an attach, and hence the | 162 | * a write to that file racing against an attach, and hence the |
165 | * can_attach() result will remain valid until the attach completes. | 163 | * can_attach() result will remain valid until the attach completes. |
166 | */ | 164 | */ |
167 | static int freezer_can_attach(struct cgroup_subsys *ss, | 165 | static int freezer_can_attach(struct cgroup *new_cgroup, |
168 | struct cgroup *new_cgroup, | ||
169 | struct cgroup_taskset *tset) | 166 | struct cgroup_taskset *tset) |
170 | { | 167 | { |
171 | struct freezer *freezer; | 168 | struct freezer *freezer; |
@@ -185,7 +182,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss, | |||
185 | return 0; | 182 | return 0; |
186 | } | 183 | } |
187 | 184 | ||
188 | static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) | 185 | static void freezer_fork(struct task_struct *task) |
189 | { | 186 | { |
190 | struct freezer *freezer; | 187 | struct freezer *freezer; |
191 | 188 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a09ac2b9a661..5d575836dba6 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1399,8 +1399,7 @@ static nodemask_t cpuset_attach_nodemask_from; | |||
1399 | static nodemask_t cpuset_attach_nodemask_to; | 1399 | static nodemask_t cpuset_attach_nodemask_to; |
1400 | 1400 | ||
1401 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ | 1401 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ |
1402 | static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 1402 | static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
1403 | struct cgroup_taskset *tset) | ||
1404 | { | 1403 | { |
1405 | struct cpuset *cs = cgroup_cs(cgrp); | 1404 | struct cpuset *cs = cgroup_cs(cgrp); |
1406 | struct task_struct *task; | 1405 | struct task_struct *task; |
@@ -1436,8 +1435,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
1436 | return 0; | 1435 | return 0; |
1437 | } | 1436 | } |
1438 | 1437 | ||
1439 | static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 1438 | static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
1440 | struct cgroup_taskset *tset) | ||
1441 | { | 1439 | { |
1442 | struct mm_struct *mm; | 1440 | struct mm_struct *mm; |
1443 | struct task_struct *task; | 1441 | struct task_struct *task; |
@@ -1833,8 +1831,7 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) | |||
1833 | * (and likewise for mems) to the new cgroup. Called with cgroup_mutex | 1831 | * (and likewise for mems) to the new cgroup. Called with cgroup_mutex |
1834 | * held. | 1832 | * held. |
1835 | */ | 1833 | */ |
1836 | static void cpuset_post_clone(struct cgroup_subsys *ss, | 1834 | static void cpuset_post_clone(struct cgroup *cgroup) |
1837 | struct cgroup *cgroup) | ||
1838 | { | 1835 | { |
1839 | struct cgroup *parent, *child; | 1836 | struct cgroup *parent, *child; |
1840 | struct cpuset *cs, *parent_cs; | 1837 | struct cpuset *cs, *parent_cs; |
@@ -1857,13 +1854,10 @@ static void cpuset_post_clone(struct cgroup_subsys *ss, | |||
1857 | 1854 | ||
1858 | /* | 1855 | /* |
1859 | * cpuset_create - create a cpuset | 1856 | * cpuset_create - create a cpuset |
1860 | * ss: cpuset cgroup subsystem | ||
1861 | * cont: control group that the new cpuset will be part of | 1857 | * cont: control group that the new cpuset will be part of |
1862 | */ | 1858 | */ |
1863 | 1859 | ||
1864 | static struct cgroup_subsys_state *cpuset_create( | 1860 | static struct cgroup_subsys_state *cpuset_create(struct cgroup *cont) |
1865 | struct cgroup_subsys *ss, | ||
1866 | struct cgroup *cont) | ||
1867 | { | 1861 | { |
1868 | struct cpuset *cs; | 1862 | struct cpuset *cs; |
1869 | struct cpuset *parent; | 1863 | struct cpuset *parent; |
@@ -1902,7 +1896,7 @@ static struct cgroup_subsys_state *cpuset_create( | |||
1902 | * will call async_rebuild_sched_domains(). | 1896 | * will call async_rebuild_sched_domains(). |
1903 | */ | 1897 | */ |
1904 | 1898 | ||
1905 | static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | 1899 | static void cpuset_destroy(struct cgroup *cont) |
1906 | { | 1900 | { |
1907 | struct cpuset *cs = cgroup_cs(cont); | 1901 | struct cpuset *cs = cgroup_cs(cont); |
1908 | 1902 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index c61234b1a988..4b50357914fb 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -7147,8 +7147,7 @@ unlock: | |||
7147 | device_initcall(perf_event_sysfs_init); | 7147 | device_initcall(perf_event_sysfs_init); |
7148 | 7148 | ||
7149 | #ifdef CONFIG_CGROUP_PERF | 7149 | #ifdef CONFIG_CGROUP_PERF |
7150 | static struct cgroup_subsys_state *perf_cgroup_create( | 7150 | static struct cgroup_subsys_state *perf_cgroup_create(struct cgroup *cont) |
7151 | struct cgroup_subsys *ss, struct cgroup *cont) | ||
7152 | { | 7151 | { |
7153 | struct perf_cgroup *jc; | 7152 | struct perf_cgroup *jc; |
7154 | 7153 | ||
@@ -7165,8 +7164,7 @@ static struct cgroup_subsys_state *perf_cgroup_create( | |||
7165 | return &jc->css; | 7164 | return &jc->css; |
7166 | } | 7165 | } |
7167 | 7166 | ||
7168 | static void perf_cgroup_destroy(struct cgroup_subsys *ss, | 7167 | static void perf_cgroup_destroy(struct cgroup *cont) |
7169 | struct cgroup *cont) | ||
7170 | { | 7168 | { |
7171 | struct perf_cgroup *jc; | 7169 | struct perf_cgroup *jc; |
7172 | jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), | 7170 | jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), |
@@ -7182,8 +7180,7 @@ static int __perf_cgroup_move(void *info) | |||
7182 | return 0; | 7180 | return 0; |
7183 | } | 7181 | } |
7184 | 7182 | ||
7185 | static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 7183 | static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
7186 | struct cgroup_taskset *tset) | ||
7187 | { | 7184 | { |
7188 | struct task_struct *task; | 7185 | struct task_struct *task; |
7189 | 7186 | ||
@@ -7191,8 +7188,8 @@ static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
7191 | task_function_call(task, __perf_cgroup_move, task); | 7188 | task_function_call(task, __perf_cgroup_move, task); |
7192 | } | 7189 | } |
7193 | 7190 | ||
7194 | static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, | 7191 | static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, |
7195 | struct cgroup *old_cgrp, struct task_struct *task) | 7192 | struct task_struct *task) |
7196 | { | 7193 | { |
7197 | /* | 7194 | /* |
7198 | * cgroup_exit() is called in the copy_process() failure path. | 7195 | * cgroup_exit() is called in the copy_process() failure path. |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d2bd4647586c..a35cb8dbd8c4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -7571,8 +7571,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp) | |||
7571 | struct task_group, css); | 7571 | struct task_group, css); |
7572 | } | 7572 | } |
7573 | 7573 | ||
7574 | static struct cgroup_subsys_state * | 7574 | static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp) |
7575 | cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | ||
7576 | { | 7575 | { |
7577 | struct task_group *tg, *parent; | 7576 | struct task_group *tg, *parent; |
7578 | 7577 | ||
@@ -7589,15 +7588,14 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
7589 | return &tg->css; | 7588 | return &tg->css; |
7590 | } | 7589 | } |
7591 | 7590 | ||
7592 | static void | 7591 | static void cpu_cgroup_destroy(struct cgroup *cgrp) |
7593 | cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | ||
7594 | { | 7592 | { |
7595 | struct task_group *tg = cgroup_tg(cgrp); | 7593 | struct task_group *tg = cgroup_tg(cgrp); |
7596 | 7594 | ||
7597 | sched_destroy_group(tg); | 7595 | sched_destroy_group(tg); |
7598 | } | 7596 | } |
7599 | 7597 | ||
7600 | static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 7598 | static int cpu_cgroup_can_attach(struct cgroup *cgrp, |
7601 | struct cgroup_taskset *tset) | 7599 | struct cgroup_taskset *tset) |
7602 | { | 7600 | { |
7603 | struct task_struct *task; | 7601 | struct task_struct *task; |
@@ -7615,7 +7613,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
7615 | return 0; | 7613 | return 0; |
7616 | } | 7614 | } |
7617 | 7615 | ||
7618 | static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 7616 | static void cpu_cgroup_attach(struct cgroup *cgrp, |
7619 | struct cgroup_taskset *tset) | 7617 | struct cgroup_taskset *tset) |
7620 | { | 7618 | { |
7621 | struct task_struct *task; | 7619 | struct task_struct *task; |
@@ -7625,8 +7623,8 @@ static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
7625 | } | 7623 | } |
7626 | 7624 | ||
7627 | static void | 7625 | static void |
7628 | cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, | 7626 | cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp, |
7629 | struct cgroup *old_cgrp, struct task_struct *task) | 7627 | struct task_struct *task) |
7630 | { | 7628 | { |
7631 | /* | 7629 | /* |
7632 | * cgroup_exit() is called in the copy_process() failure path. | 7630 | * cgroup_exit() is called in the copy_process() failure path. |
@@ -7976,8 +7974,7 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
7976 | */ | 7974 | */ |
7977 | 7975 | ||
7978 | /* create a new cpu accounting group */ | 7976 | /* create a new cpu accounting group */ |
7979 | static struct cgroup_subsys_state *cpuacct_create( | 7977 | static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp) |
7980 | struct cgroup_subsys *ss, struct cgroup *cgrp) | ||
7981 | { | 7978 | { |
7982 | struct cpuacct *ca; | 7979 | struct cpuacct *ca; |
7983 | 7980 | ||
@@ -8007,8 +8004,7 @@ out: | |||
8007 | } | 8004 | } |
8008 | 8005 | ||
8009 | /* destroy an existing cpu accounting group */ | 8006 | /* destroy an existing cpu accounting group */ |
8010 | static void | 8007 | static void cpuacct_destroy(struct cgroup *cgrp) |
8011 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | ||
8012 | { | 8008 | { |
8013 | struct cpuacct *ca = cgroup_ca(cgrp); | 8009 | struct cpuacct *ca = cgroup_ca(cgrp); |
8014 | 8010 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 58a08fc7414a..26c6f4ec20f4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -4602,10 +4602,9 @@ static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) | |||
4602 | return mem_cgroup_sockets_init(cont, ss); | 4602 | return mem_cgroup_sockets_init(cont, ss); |
4603 | }; | 4603 | }; |
4604 | 4604 | ||
4605 | static void kmem_cgroup_destroy(struct cgroup_subsys *ss, | 4605 | static void kmem_cgroup_destroy(struct cgroup *cont) |
4606 | struct cgroup *cont) | ||
4607 | { | 4606 | { |
4608 | mem_cgroup_sockets_destroy(cont, ss); | 4607 | mem_cgroup_sockets_destroy(cont); |
4609 | } | 4608 | } |
4610 | #else | 4609 | #else |
4611 | static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) | 4610 | static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) |
@@ -4613,8 +4612,7 @@ static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) | |||
4613 | return 0; | 4612 | return 0; |
4614 | } | 4613 | } |
4615 | 4614 | ||
4616 | static void kmem_cgroup_destroy(struct cgroup_subsys *ss, | 4615 | static void kmem_cgroup_destroy(struct cgroup *cont) |
4617 | struct cgroup *cont) | ||
4618 | { | 4616 | { |
4619 | } | 4617 | } |
4620 | #endif | 4618 | #endif |
@@ -4927,7 +4925,7 @@ err_cleanup: | |||
4927 | } | 4925 | } |
4928 | 4926 | ||
4929 | static struct cgroup_subsys_state * __ref | 4927 | static struct cgroup_subsys_state * __ref |
4930 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | 4928 | mem_cgroup_create(struct cgroup *cont) |
4931 | { | 4929 | { |
4932 | struct mem_cgroup *memcg, *parent; | 4930 | struct mem_cgroup *memcg, *parent; |
4933 | long error = -ENOMEM; | 4931 | long error = -ENOMEM; |
@@ -4989,20 +4987,18 @@ free_out: | |||
4989 | return ERR_PTR(error); | 4987 | return ERR_PTR(error); |
4990 | } | 4988 | } |
4991 | 4989 | ||
4992 | static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, | 4990 | static int mem_cgroup_pre_destroy(struct cgroup *cont) |
4993 | struct cgroup *cont) | ||
4994 | { | 4991 | { |
4995 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); | 4992 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
4996 | 4993 | ||
4997 | return mem_cgroup_force_empty(memcg, false); | 4994 | return mem_cgroup_force_empty(memcg, false); |
4998 | } | 4995 | } |
4999 | 4996 | ||
5000 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, | 4997 | static void mem_cgroup_destroy(struct cgroup *cont) |
5001 | struct cgroup *cont) | ||
5002 | { | 4998 | { |
5003 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); | 4999 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
5004 | 5000 | ||
5005 | kmem_cgroup_destroy(ss, cont); | 5001 | kmem_cgroup_destroy(cont); |
5006 | 5002 | ||
5007 | mem_cgroup_put(memcg); | 5003 | mem_cgroup_put(memcg); |
5008 | } | 5004 | } |
@@ -5339,9 +5335,8 @@ static void mem_cgroup_clear_mc(void) | |||
5339 | mem_cgroup_end_move(from); | 5335 | mem_cgroup_end_move(from); |
5340 | } | 5336 | } |
5341 | 5337 | ||
5342 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | 5338 | static int mem_cgroup_can_attach(struct cgroup *cgroup, |
5343 | struct cgroup *cgroup, | 5339 | struct cgroup_taskset *tset) |
5344 | struct cgroup_taskset *tset) | ||
5345 | { | 5340 | { |
5346 | struct task_struct *p = cgroup_taskset_first(tset); | 5341 | struct task_struct *p = cgroup_taskset_first(tset); |
5347 | int ret = 0; | 5342 | int ret = 0; |
@@ -5379,9 +5374,8 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
5379 | return ret; | 5374 | return ret; |
5380 | } | 5375 | } |
5381 | 5376 | ||
5382 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | 5377 | static void mem_cgroup_cancel_attach(struct cgroup *cgroup, |
5383 | struct cgroup *cgroup, | 5378 | struct cgroup_taskset *tset) |
5384 | struct cgroup_taskset *tset) | ||
5385 | { | 5379 | { |
5386 | mem_cgroup_clear_mc(); | 5380 | mem_cgroup_clear_mc(); |
5387 | } | 5381 | } |
@@ -5496,9 +5490,8 @@ retry: | |||
5496 | up_read(&mm->mmap_sem); | 5490 | up_read(&mm->mmap_sem); |
5497 | } | 5491 | } |
5498 | 5492 | ||
5499 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 5493 | static void mem_cgroup_move_task(struct cgroup *cont, |
5500 | struct cgroup *cont, | 5494 | struct cgroup_taskset *tset) |
5501 | struct cgroup_taskset *tset) | ||
5502 | { | 5495 | { |
5503 | struct task_struct *p = cgroup_taskset_first(tset); | 5496 | struct task_struct *p = cgroup_taskset_first(tset); |
5504 | struct mm_struct *mm = get_task_mm(p); | 5497 | struct mm_struct *mm = get_task_mm(p); |
@@ -5513,20 +5506,17 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
5513 | mem_cgroup_clear_mc(); | 5506 | mem_cgroup_clear_mc(); |
5514 | } | 5507 | } |
5515 | #else /* !CONFIG_MMU */ | 5508 | #else /* !CONFIG_MMU */ |
5516 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | 5509 | static int mem_cgroup_can_attach(struct cgroup *cgroup, |
5517 | struct cgroup *cgroup, | 5510 | struct cgroup_taskset *tset) |
5518 | struct cgroup_taskset *tset) | ||
5519 | { | 5511 | { |
5520 | return 0; | 5512 | return 0; |
5521 | } | 5513 | } |
5522 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | 5514 | static void mem_cgroup_cancel_attach(struct cgroup *cgroup, |
5523 | struct cgroup *cgroup, | 5515 | struct cgroup_taskset *tset) |
5524 | struct cgroup_taskset *tset) | ||
5525 | { | 5516 | { |
5526 | } | 5517 | } |
5527 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 5518 | static void mem_cgroup_move_task(struct cgroup *cont, |
5528 | struct cgroup *cont, | 5519 | struct cgroup_taskset *tset) |
5529 | struct cgroup_taskset *tset) | ||
5530 | { | 5520 | { |
5531 | } | 5521 | } |
5532 | #endif | 5522 | #endif |
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 4dacc44637ef..ba6900f73900 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c | |||
@@ -23,9 +23,8 @@ | |||
23 | #include <net/sock.h> | 23 | #include <net/sock.h> |
24 | #include <net/netprio_cgroup.h> | 24 | #include <net/netprio_cgroup.h> |
25 | 25 | ||
26 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 26 | static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp); |
27 | struct cgroup *cgrp); | 27 | static void cgrp_destroy(struct cgroup *cgrp); |
28 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); | ||
29 | static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); | 28 | static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); |
30 | 29 | ||
31 | struct cgroup_subsys net_prio_subsys = { | 30 | struct cgroup_subsys net_prio_subsys = { |
@@ -121,8 +120,7 @@ static void update_netdev_tables(void) | |||
121 | rtnl_unlock(); | 120 | rtnl_unlock(); |
122 | } | 121 | } |
123 | 122 | ||
124 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 123 | static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) |
125 | struct cgroup *cgrp) | ||
126 | { | 124 | { |
127 | struct cgroup_netprio_state *cs; | 125 | struct cgroup_netprio_state *cs; |
128 | int ret; | 126 | int ret; |
@@ -146,7 +144,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | |||
146 | return &cs->css; | 144 | return &cs->css; |
147 | } | 145 | } |
148 | 146 | ||
149 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | 147 | static void cgrp_destroy(struct cgroup *cgrp) |
150 | { | 148 | { |
151 | struct cgroup_netprio_state *cs; | 149 | struct cgroup_netprio_state *cs; |
152 | struct net_device *dev; | 150 | struct net_device *dev; |
diff --git a/net/core/sock.c b/net/core/sock.c index 95aff9c7419b..1fb21b51593b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -160,19 +160,19 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
160 | out: | 160 | out: |
161 | list_for_each_entry_continue_reverse(proto, &proto_list, node) | 161 | list_for_each_entry_continue_reverse(proto, &proto_list, node) |
162 | if (proto->destroy_cgroup) | 162 | if (proto->destroy_cgroup) |
163 | proto->destroy_cgroup(cgrp, ss); | 163 | proto->destroy_cgroup(cgrp); |
164 | mutex_unlock(&proto_list_mutex); | 164 | mutex_unlock(&proto_list_mutex); |
165 | return ret; | 165 | return ret; |
166 | } | 166 | } |
167 | 167 | ||
168 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) | 168 | void mem_cgroup_sockets_destroy(struct cgroup *cgrp) |
169 | { | 169 | { |
170 | struct proto *proto; | 170 | struct proto *proto; |
171 | 171 | ||
172 | mutex_lock(&proto_list_mutex); | 172 | mutex_lock(&proto_list_mutex); |
173 | list_for_each_entry_reverse(proto, &proto_list, node) | 173 | list_for_each_entry_reverse(proto, &proto_list, node) |
174 | if (proto->destroy_cgroup) | 174 | if (proto->destroy_cgroup) |
175 | proto->destroy_cgroup(cgrp, ss); | 175 | proto->destroy_cgroup(cgrp); |
176 | mutex_unlock(&proto_list_mutex); | 176 | mutex_unlock(&proto_list_mutex); |
177 | } | 177 | } |
178 | #endif | 178 | #endif |
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 602fb305365f..e795272fbe9e 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c | |||
@@ -94,7 +94,7 @@ create_files: | |||
94 | } | 94 | } |
95 | EXPORT_SYMBOL(tcp_init_cgroup); | 95 | EXPORT_SYMBOL(tcp_init_cgroup); |
96 | 96 | ||
97 | void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) | 97 | void tcp_destroy_cgroup(struct cgroup *cgrp) |
98 | { | 98 | { |
99 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); | 99 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); |
100 | struct cg_proto *cg_proto; | 100 | struct cg_proto *cg_proto; |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index f84fdc3a7f27..1afaa284fcd7 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -22,9 +22,8 @@ | |||
22 | #include <net/sock.h> | 22 | #include <net/sock.h> |
23 | #include <net/cls_cgroup.h> | 23 | #include <net/cls_cgroup.h> |
24 | 24 | ||
25 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 25 | static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp); |
26 | struct cgroup *cgrp); | 26 | static void cgrp_destroy(struct cgroup *cgrp); |
27 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); | ||
28 | static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); | 27 | static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); |
29 | 28 | ||
30 | struct cgroup_subsys net_cls_subsys = { | 29 | struct cgroup_subsys net_cls_subsys = { |
@@ -51,8 +50,7 @@ static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p) | |||
51 | struct cgroup_cls_state, css); | 50 | struct cgroup_cls_state, css); |
52 | } | 51 | } |
53 | 52 | ||
54 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | 53 | static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp) |
55 | struct cgroup *cgrp) | ||
56 | { | 54 | { |
57 | struct cgroup_cls_state *cs; | 55 | struct cgroup_cls_state *cs; |
58 | 56 | ||
@@ -66,7 +64,7 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, | |||
66 | return &cs->css; | 64 | return &cs->css; |
67 | } | 65 | } |
68 | 66 | ||
69 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | 67 | static void cgrp_destroy(struct cgroup *cgrp) |
70 | { | 68 | { |
71 | kfree(cgrp_cls_state(cgrp)); | 69 | kfree(cgrp_cls_state(cgrp)); |
72 | } | 70 | } |
diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 8b5b5d8612c6..c43a3323feea 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c | |||
@@ -61,8 +61,8 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) | |||
61 | 61 | ||
62 | struct cgroup_subsys devices_subsys; | 62 | struct cgroup_subsys devices_subsys; |
63 | 63 | ||
64 | static int devcgroup_can_attach(struct cgroup_subsys *ss, | 64 | static int devcgroup_can_attach(struct cgroup *new_cgrp, |
65 | struct cgroup *new_cgrp, struct cgroup_taskset *set) | 65 | struct cgroup_taskset *set) |
66 | { | 66 | { |
67 | struct task_struct *task = cgroup_taskset_first(set); | 67 | struct task_struct *task = cgroup_taskset_first(set); |
68 | 68 | ||
@@ -156,8 +156,7 @@ remove: | |||
156 | /* | 156 | /* |
157 | * called from kernel/cgroup.c with cgroup_lock() held. | 157 | * called from kernel/cgroup.c with cgroup_lock() held. |
158 | */ | 158 | */ |
159 | static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss, | 159 | static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup) |
160 | struct cgroup *cgroup) | ||
161 | { | 160 | { |
162 | struct dev_cgroup *dev_cgroup, *parent_dev_cgroup; | 161 | struct dev_cgroup *dev_cgroup, *parent_dev_cgroup; |
163 | struct cgroup *parent_cgroup; | 162 | struct cgroup *parent_cgroup; |
@@ -195,8 +194,7 @@ static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss, | |||
195 | return &dev_cgroup->css; | 194 | return &dev_cgroup->css; |
196 | } | 195 | } |
197 | 196 | ||
198 | static void devcgroup_destroy(struct cgroup_subsys *ss, | 197 | static void devcgroup_destroy(struct cgroup *cgroup) |
199 | struct cgroup *cgroup) | ||
200 | { | 198 | { |
201 | struct dev_cgroup *dev_cgroup; | 199 | struct dev_cgroup *dev_cgroup; |
202 | struct dev_whitelist_item *wh, *tmp; | 200 | struct dev_whitelist_item *wh, *tmp; |