diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-12 21:12:21 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-12-12 21:12:21 -0500 |
commit | 2f7ee5691eecb67c8108b92001a85563ea336ac5 (patch) | |
tree | 18cf60ea8a463f4a6cd59c68926ba4357ae8ff4c | |
parent | 134d33737f9015761c3832f6b268fae6274aac7f (diff) |
cgroup: introduce cgroup_taskset and use it in subsys->can_attach(), cancel_attach() and attach()
Currently, there's no way to pass multiple tasks to cgroup_subsys
methods necessitating the need for separate per-process and per-task
methods. This patch introduces cgroup_taskset which can be used to
pass multiple tasks and their associated cgroups to cgroup_subsys
methods.
Three methods - can_attach(), cancel_attach() and attach() - are
converted to use cgroup_taskset. This unifies passed parameters so
that all methods have access to all information. Conversions in this
patchset are identical and don't introduce any behavior change.
-v2: documentation updated as per Paul Menage's suggestion.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Paul Menage <paul@paulmenage.org>
Acked-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: James Morris <jmorris@namei.org>
-rw-r--r-- | Documentation/cgroups/cgroups.txt | 31 | ||||
-rw-r--r-- | include/linux/cgroup.h | 28 | ||||
-rw-r--r-- | kernel/cgroup.c | 99 | ||||
-rw-r--r-- | kernel/cgroup_freezer.c | 2 | ||||
-rw-r--r-- | kernel/cpuset.c | 18 | ||||
-rw-r--r-- | mm/memcontrol.c | 16 | ||||
-rw-r--r-- | security/device_cgroup.c | 7 |
7 files changed, 158 insertions, 43 deletions
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index 9c452ef2328c..8a2f302327fa 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt | |||
@@ -594,15 +594,25 @@ rmdir() will fail with it. From this behavior, pre_destroy() can be | |||
594 | called multiple times against a cgroup. | 594 | called multiple times against a cgroup. |
595 | 595 | ||
596 | int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 596 | int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
597 | struct task_struct *task) | 597 | struct cgroup_taskset *tset) |
598 | (cgroup_mutex held by caller) | 598 | (cgroup_mutex held by caller) |
599 | 599 | ||
600 | Called prior to moving a task into a cgroup; if the subsystem | 600 | Called prior to moving one or more tasks into a cgroup; if the |
601 | returns an error, this will abort the attach operation. If a NULL | 601 | subsystem returns an error, this will abort the attach operation. |
602 | task is passed, then a successful result indicates that *any* | 602 | @tset contains the tasks to be attached and is guaranteed to have at |
603 | unspecified task can be moved into the cgroup. Note that this isn't | 603 | least one task in it. |
604 | called on a fork. If this method returns 0 (success) then this should | 604 | |
605 | remain valid while the caller holds cgroup_mutex and it is ensured that either | 605 | If there are multiple tasks in the taskset, then: |
606 | - it's guaranteed that all are from the same thread group | ||
607 | - @tset contains all tasks from the thread group whether or not | ||
608 | they're switching cgroups | ||
609 | - the first task is the leader | ||
610 | |||
611 | Each @tset entry also contains the task's old cgroup and tasks which | ||
612 | aren't switching cgroup can be skipped easily using the | ||
613 | cgroup_taskset_for_each() iterator. Note that this isn't called on a | ||
614 | fork. If this method returns 0 (success) then this should remain valid | ||
615 | while the caller holds cgroup_mutex and it is ensured that either | ||
606 | attach() or cancel_attach() will be called in future. | 616 | attach() or cancel_attach() will be called in future. |
607 | 617 | ||
608 | int can_attach_task(struct cgroup *cgrp, struct task_struct *tsk); | 618 | int can_attach_task(struct cgroup *cgrp, struct task_struct *tsk); |
@@ -613,14 +623,14 @@ attached (possibly many when using cgroup_attach_proc). Called after | |||
613 | can_attach. | 623 | can_attach. |
614 | 624 | ||
615 | void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 625 | void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
616 | struct task_struct *task, bool threadgroup) | 626 | struct cgroup_taskset *tset) |
617 | (cgroup_mutex held by caller) | 627 | (cgroup_mutex held by caller) |
618 | 628 | ||
619 | Called when a task attach operation has failed after can_attach() has succeeded. | 629 | Called when a task attach operation has failed after can_attach() has succeeded. |
620 | A subsystem whose can_attach() has some side-effects should provide this | 630 | A subsystem whose can_attach() has some side-effects should provide this |
621 | function, so that the subsystem can implement a rollback. If not, not necessary. | 631 | function, so that the subsystem can implement a rollback. If not, not necessary. |
622 | This will be called only about subsystems whose can_attach() operation have | 632 | This will be called only about subsystems whose can_attach() operation have |
623 | succeeded. | 633 | succeeded. The parameters are identical to can_attach(). |
624 | 634 | ||
625 | void pre_attach(struct cgroup *cgrp); | 635 | void pre_attach(struct cgroup *cgrp); |
626 | (cgroup_mutex held by caller) | 636 | (cgroup_mutex held by caller) |
@@ -629,11 +639,12 @@ For any non-per-thread attachment work that needs to happen before | |||
629 | attach_task. Needed by cpuset. | 639 | attach_task. Needed by cpuset. |
630 | 640 | ||
631 | void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 641 | void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
632 | struct cgroup *old_cgrp, struct task_struct *task) | 642 | struct cgroup_taskset *tset) |
633 | (cgroup_mutex held by caller) | 643 | (cgroup_mutex held by caller) |
634 | 644 | ||
635 | Called after the task has been attached to the cgroup, to allow any | 645 | Called after the task has been attached to the cgroup, to allow any |
636 | post-attachment activity that requires memory allocations or blocking. | 646 | post-attachment activity that requires memory allocations or blocking. |
647 | The parameters are identical to can_attach(). | ||
637 | 648 | ||
638 | void attach_task(struct cgroup *cgrp, struct task_struct *tsk); | 649 | void attach_task(struct cgroup *cgrp, struct task_struct *tsk); |
639 | (cgroup_mutex held by caller) | 650 | (cgroup_mutex held by caller) |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1b7f9d525013..34256ad9e553 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -457,6 +457,28 @@ void cgroup_exclude_rmdir(struct cgroup_subsys_state *css); | |||
457 | void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css); | 457 | void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css); |
458 | 458 | ||
459 | /* | 459 | /* |
460 | * Control Group taskset, used to pass around set of tasks to cgroup_subsys | ||
461 | * methods. | ||
462 | */ | ||
463 | struct cgroup_taskset; | ||
464 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); | ||
465 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); | ||
466 | struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset); | ||
467 | int cgroup_taskset_size(struct cgroup_taskset *tset); | ||
468 | |||
469 | /** | ||
470 | * cgroup_taskset_for_each - iterate cgroup_taskset | ||
471 | * @task: the loop cursor | ||
472 | * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all | ||
473 | * @tset: taskset to iterate | ||
474 | */ | ||
475 | #define cgroup_taskset_for_each(task, skip_cgrp, tset) \ | ||
476 | for ((task) = cgroup_taskset_first((tset)); (task); \ | ||
477 | (task) = cgroup_taskset_next((tset))) \ | ||
478 | if (!(skip_cgrp) || \ | ||
479 | cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp)) | ||
480 | |||
481 | /* | ||
460 | * Control Group subsystem type. | 482 | * Control Group subsystem type. |
461 | * See Documentation/cgroups/cgroups.txt for details | 483 | * See Documentation/cgroups/cgroups.txt for details |
462 | */ | 484 | */ |
@@ -467,14 +489,14 @@ struct cgroup_subsys { | |||
467 | int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 489 | int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
468 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 490 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
469 | int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 491 | int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
470 | struct task_struct *tsk); | 492 | struct cgroup_taskset *tset); |
471 | int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk); | 493 | int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk); |
472 | void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 494 | void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
473 | struct task_struct *tsk); | 495 | struct cgroup_taskset *tset); |
474 | void (*pre_attach)(struct cgroup *cgrp); | 496 | void (*pre_attach)(struct cgroup *cgrp); |
475 | void (*attach_task)(struct cgroup *cgrp, struct task_struct *tsk); | 497 | void (*attach_task)(struct cgroup *cgrp, struct task_struct *tsk); |
476 | void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 498 | void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
477 | struct cgroup *old_cgrp, struct task_struct *tsk); | 499 | struct cgroup_taskset *tset); |
478 | void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); | 500 | void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); |
479 | void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 501 | void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
480 | struct cgroup *old_cgrp, struct task_struct *task); | 502 | struct cgroup *old_cgrp, struct task_struct *task); |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0f2d00519d37..41ee01e392e6 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1757,11 +1757,85 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) | |||
1757 | } | 1757 | } |
1758 | EXPORT_SYMBOL_GPL(cgroup_path); | 1758 | EXPORT_SYMBOL_GPL(cgroup_path); |
1759 | 1759 | ||
1760 | /* | ||
1761 | * Control Group taskset | ||
1762 | */ | ||
1760 | struct task_and_cgroup { | 1763 | struct task_and_cgroup { |
1761 | struct task_struct *task; | 1764 | struct task_struct *task; |
1762 | struct cgroup *cgrp; | 1765 | struct cgroup *cgrp; |
1763 | }; | 1766 | }; |
1764 | 1767 | ||
1768 | struct cgroup_taskset { | ||
1769 | struct task_and_cgroup single; | ||
1770 | struct flex_array *tc_array; | ||
1771 | int tc_array_len; | ||
1772 | int idx; | ||
1773 | struct cgroup *cur_cgrp; | ||
1774 | }; | ||
1775 | |||
1776 | /** | ||
1777 | * cgroup_taskset_first - reset taskset and return the first task | ||
1778 | * @tset: taskset of interest | ||
1779 | * | ||
1780 | * @tset iteration is initialized and the first task is returned. | ||
1781 | */ | ||
1782 | struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) | ||
1783 | { | ||
1784 | if (tset->tc_array) { | ||
1785 | tset->idx = 0; | ||
1786 | return cgroup_taskset_next(tset); | ||
1787 | } else { | ||
1788 | tset->cur_cgrp = tset->single.cgrp; | ||
1789 | return tset->single.task; | ||
1790 | } | ||
1791 | } | ||
1792 | EXPORT_SYMBOL_GPL(cgroup_taskset_first); | ||
1793 | |||
1794 | /** | ||
1795 | * cgroup_taskset_next - iterate to the next task in taskset | ||
1796 | * @tset: taskset of interest | ||
1797 | * | ||
1798 | * Return the next task in @tset. Iteration must have been initialized | ||
1799 | * with cgroup_taskset_first(). | ||
1800 | */ | ||
1801 | struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) | ||
1802 | { | ||
1803 | struct task_and_cgroup *tc; | ||
1804 | |||
1805 | if (!tset->tc_array || tset->idx >= tset->tc_array_len) | ||
1806 | return NULL; | ||
1807 | |||
1808 | tc = flex_array_get(tset->tc_array, tset->idx++); | ||
1809 | tset->cur_cgrp = tc->cgrp; | ||
1810 | return tc->task; | ||
1811 | } | ||
1812 | EXPORT_SYMBOL_GPL(cgroup_taskset_next); | ||
1813 | |||
1814 | /** | ||
1815 | * cgroup_taskset_cur_cgroup - return the matching cgroup for the current task | ||
1816 | * @tset: taskset of interest | ||
1817 | * | ||
1818 | * Return the cgroup for the current (last returned) task of @tset. This | ||
1819 | * function must be preceded by either cgroup_taskset_first() or | ||
1820 | * cgroup_taskset_next(). | ||
1821 | */ | ||
1822 | struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset) | ||
1823 | { | ||
1824 | return tset->cur_cgrp; | ||
1825 | } | ||
1826 | EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup); | ||
1827 | |||
1828 | /** | ||
1829 | * cgroup_taskset_size - return the number of tasks in taskset | ||
1830 | * @tset: taskset of interest | ||
1831 | */ | ||
1832 | int cgroup_taskset_size(struct cgroup_taskset *tset) | ||
1833 | { | ||
1834 | return tset->tc_array ? tset->tc_array_len : 1; | ||
1835 | } | ||
1836 | EXPORT_SYMBOL_GPL(cgroup_taskset_size); | ||
1837 | |||
1838 | |||
1765 | /* | 1839 | /* |
1766 | * cgroup_task_migrate - move a task from one cgroup to another. | 1840 | * cgroup_task_migrate - move a task from one cgroup to another. |
1767 | * | 1841 | * |
@@ -1842,6 +1916,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1842 | struct cgroup_subsys *ss, *failed_ss = NULL; | 1916 | struct cgroup_subsys *ss, *failed_ss = NULL; |
1843 | struct cgroup *oldcgrp; | 1917 | struct cgroup *oldcgrp; |
1844 | struct cgroupfs_root *root = cgrp->root; | 1918 | struct cgroupfs_root *root = cgrp->root; |
1919 | struct cgroup_taskset tset = { }; | ||
1845 | 1920 | ||
1846 | /* @tsk either already exited or can't exit until the end */ | 1921 | /* @tsk either already exited or can't exit until the end */ |
1847 | if (tsk->flags & PF_EXITING) | 1922 | if (tsk->flags & PF_EXITING) |
@@ -1852,9 +1927,12 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1852 | if (cgrp == oldcgrp) | 1927 | if (cgrp == oldcgrp) |
1853 | return 0; | 1928 | return 0; |
1854 | 1929 | ||
1930 | tset.single.task = tsk; | ||
1931 | tset.single.cgrp = oldcgrp; | ||
1932 | |||
1855 | for_each_subsys(root, ss) { | 1933 | for_each_subsys(root, ss) { |
1856 | if (ss->can_attach) { | 1934 | if (ss->can_attach) { |
1857 | retval = ss->can_attach(ss, cgrp, tsk); | 1935 | retval = ss->can_attach(ss, cgrp, &tset); |
1858 | if (retval) { | 1936 | if (retval) { |
1859 | /* | 1937 | /* |
1860 | * Remember on which subsystem the can_attach() | 1938 | * Remember on which subsystem the can_attach() |
@@ -1885,7 +1963,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1885 | if (ss->attach_task) | 1963 | if (ss->attach_task) |
1886 | ss->attach_task(cgrp, tsk); | 1964 | ss->attach_task(cgrp, tsk); |
1887 | if (ss->attach) | 1965 | if (ss->attach) |
1888 | ss->attach(ss, cgrp, oldcgrp, tsk); | 1966 | ss->attach(ss, cgrp, &tset); |
1889 | } | 1967 | } |
1890 | 1968 | ||
1891 | synchronize_rcu(); | 1969 | synchronize_rcu(); |
@@ -1907,7 +1985,7 @@ out: | |||
1907 | */ | 1985 | */ |
1908 | break; | 1986 | break; |
1909 | if (ss->cancel_attach) | 1987 | if (ss->cancel_attach) |
1910 | ss->cancel_attach(ss, cgrp, tsk); | 1988 | ss->cancel_attach(ss, cgrp, &tset); |
1911 | } | 1989 | } |
1912 | } | 1990 | } |
1913 | return retval; | 1991 | return retval; |
@@ -2023,6 +2101,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2023 | struct task_struct *tsk; | 2101 | struct task_struct *tsk; |
2024 | struct task_and_cgroup *tc; | 2102 | struct task_and_cgroup *tc; |
2025 | struct flex_array *group; | 2103 | struct flex_array *group; |
2104 | struct cgroup_taskset tset = { }; | ||
2026 | /* | 2105 | /* |
2027 | * we need to make sure we have css_sets for all the tasks we're | 2106 | * we need to make sure we have css_sets for all the tasks we're |
2028 | * going to move -before- we actually start moving them, so that in | 2107 | * going to move -before- we actually start moving them, so that in |
@@ -2089,6 +2168,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2089 | } while_each_thread(leader, tsk); | 2168 | } while_each_thread(leader, tsk); |
2090 | /* remember the number of threads in the array for later. */ | 2169 | /* remember the number of threads in the array for later. */ |
2091 | group_size = i; | 2170 | group_size = i; |
2171 | tset.tc_array = group; | ||
2172 | tset.tc_array_len = group_size; | ||
2092 | read_unlock(&tasklist_lock); | 2173 | read_unlock(&tasklist_lock); |
2093 | 2174 | ||
2094 | /* methods shouldn't be called if no task is actually migrating */ | 2175 | /* methods shouldn't be called if no task is actually migrating */ |
@@ -2101,7 +2182,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2101 | */ | 2182 | */ |
2102 | for_each_subsys(root, ss) { | 2183 | for_each_subsys(root, ss) { |
2103 | if (ss->can_attach) { | 2184 | if (ss->can_attach) { |
2104 | retval = ss->can_attach(ss, cgrp, leader); | 2185 | retval = ss->can_attach(ss, cgrp, &tset); |
2105 | if (retval) { | 2186 | if (retval) { |
2106 | failed_ss = ss; | 2187 | failed_ss = ss; |
2107 | goto out_cancel_attach; | 2188 | goto out_cancel_attach; |
@@ -2183,10 +2264,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
2183 | * being moved, this call will need to be reworked to communicate that. | 2264 | * being moved, this call will need to be reworked to communicate that. |
2184 | */ | 2265 | */ |
2185 | for_each_subsys(root, ss) { | 2266 | for_each_subsys(root, ss) { |
2186 | if (ss->attach) { | 2267 | if (ss->attach) |
2187 | tc = flex_array_get(group, 0); | 2268 | ss->attach(ss, cgrp, &tset); |
2188 | ss->attach(ss, cgrp, tc->cgrp, tc->task); | ||
2189 | } | ||
2190 | } | 2269 | } |
2191 | 2270 | ||
2192 | /* | 2271 | /* |
@@ -2208,11 +2287,11 @@ out_cancel_attach: | |||
2208 | for_each_subsys(root, ss) { | 2287 | for_each_subsys(root, ss) { |
2209 | if (ss == failed_ss) { | 2288 | if (ss == failed_ss) { |
2210 | if (cancel_failed_ss && ss->cancel_attach) | 2289 | if (cancel_failed_ss && ss->cancel_attach) |
2211 | ss->cancel_attach(ss, cgrp, leader); | 2290 | ss->cancel_attach(ss, cgrp, &tset); |
2212 | break; | 2291 | break; |
2213 | } | 2292 | } |
2214 | if (ss->cancel_attach) | 2293 | if (ss->cancel_attach) |
2215 | ss->cancel_attach(ss, cgrp, leader); | 2294 | ss->cancel_attach(ss, cgrp, &tset); |
2216 | } | 2295 | } |
2217 | } | 2296 | } |
2218 | out_put_tasks: | 2297 | out_put_tasks: |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index e411a60cc2c8..e95c6fb65cc0 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -159,7 +159,7 @@ static void freezer_destroy(struct cgroup_subsys *ss, | |||
159 | */ | 159 | */ |
160 | static int freezer_can_attach(struct cgroup_subsys *ss, | 160 | static int freezer_can_attach(struct cgroup_subsys *ss, |
161 | struct cgroup *new_cgroup, | 161 | struct cgroup *new_cgroup, |
162 | struct task_struct *task) | 162 | struct cgroup_taskset *tset) |
163 | { | 163 | { |
164 | struct freezer *freezer; | 164 | struct freezer *freezer; |
165 | 165 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 9fe58c46a426..512bd59e8627 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1371,10 +1371,10 @@ static int fmeter_getrate(struct fmeter *fmp) | |||
1371 | } | 1371 | } |
1372 | 1372 | ||
1373 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ | 1373 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ |
1374 | static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, | 1374 | static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
1375 | struct task_struct *tsk) | 1375 | struct cgroup_taskset *tset) |
1376 | { | 1376 | { |
1377 | struct cpuset *cs = cgroup_cs(cont); | 1377 | struct cpuset *cs = cgroup_cs(cgrp); |
1378 | 1378 | ||
1379 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 1379 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
1380 | return -ENOSPC; | 1380 | return -ENOSPC; |
@@ -1387,7 +1387,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
1387 | * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may | 1387 | * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may |
1388 | * be changed. | 1388 | * be changed. |
1389 | */ | 1389 | */ |
1390 | if (tsk->flags & PF_THREAD_BOUND) | 1390 | if (cgroup_taskset_first(tset)->flags & PF_THREAD_BOUND) |
1391 | return -EINVAL; | 1391 | return -EINVAL; |
1392 | 1392 | ||
1393 | return 0; | 1393 | return 0; |
@@ -1437,12 +1437,14 @@ static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk) | |||
1437 | cpuset_update_task_spread_flag(cs, tsk); | 1437 | cpuset_update_task_spread_flag(cs, tsk); |
1438 | } | 1438 | } |
1439 | 1439 | ||
1440 | static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, | 1440 | static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
1441 | struct cgroup *oldcont, struct task_struct *tsk) | 1441 | struct cgroup_taskset *tset) |
1442 | { | 1442 | { |
1443 | struct mm_struct *mm; | 1443 | struct mm_struct *mm; |
1444 | struct cpuset *cs = cgroup_cs(cont); | 1444 | struct task_struct *tsk = cgroup_taskset_first(tset); |
1445 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1445 | struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset); |
1446 | struct cpuset *cs = cgroup_cs(cgrp); | ||
1447 | struct cpuset *oldcs = cgroup_cs(oldcgrp); | ||
1446 | 1448 | ||
1447 | /* | 1449 | /* |
1448 | * Change mm, possibly for multiple threads in a threadgroup. This is | 1450 | * Change mm, possibly for multiple threads in a threadgroup. This is |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6aff93c98aca..81640e74a709 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5298,8 +5298,9 @@ static void mem_cgroup_clear_mc(void) | |||
5298 | 5298 | ||
5299 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | 5299 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, |
5300 | struct cgroup *cgroup, | 5300 | struct cgroup *cgroup, |
5301 | struct task_struct *p) | 5301 | struct cgroup_taskset *tset) |
5302 | { | 5302 | { |
5303 | struct task_struct *p = cgroup_taskset_first(tset); | ||
5303 | int ret = 0; | 5304 | int ret = 0; |
5304 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup); | 5305 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup); |
5305 | 5306 | ||
@@ -5337,7 +5338,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
5337 | 5338 | ||
5338 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | 5339 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, |
5339 | struct cgroup *cgroup, | 5340 | struct cgroup *cgroup, |
5340 | struct task_struct *p) | 5341 | struct cgroup_taskset *tset) |
5341 | { | 5342 | { |
5342 | mem_cgroup_clear_mc(); | 5343 | mem_cgroup_clear_mc(); |
5343 | } | 5344 | } |
@@ -5454,9 +5455,9 @@ retry: | |||
5454 | 5455 | ||
5455 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 5456 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
5456 | struct cgroup *cont, | 5457 | struct cgroup *cont, |
5457 | struct cgroup *old_cont, | 5458 | struct cgroup_taskset *tset) |
5458 | struct task_struct *p) | ||
5459 | { | 5459 | { |
5460 | struct task_struct *p = cgroup_taskset_first(tset); | ||
5460 | struct mm_struct *mm = get_task_mm(p); | 5461 | struct mm_struct *mm = get_task_mm(p); |
5461 | 5462 | ||
5462 | if (mm) { | 5463 | if (mm) { |
@@ -5471,19 +5472,18 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
5471 | #else /* !CONFIG_MMU */ | 5472 | #else /* !CONFIG_MMU */ |
5472 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | 5473 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, |
5473 | struct cgroup *cgroup, | 5474 | struct cgroup *cgroup, |
5474 | struct task_struct *p) | 5475 | struct cgroup_taskset *tset) |
5475 | { | 5476 | { |
5476 | return 0; | 5477 | return 0; |
5477 | } | 5478 | } |
5478 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | 5479 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, |
5479 | struct cgroup *cgroup, | 5480 | struct cgroup *cgroup, |
5480 | struct task_struct *p) | 5481 | struct cgroup_taskset *tset) |
5481 | { | 5482 | { |
5482 | } | 5483 | } |
5483 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 5484 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
5484 | struct cgroup *cont, | 5485 | struct cgroup *cont, |
5485 | struct cgroup *old_cont, | 5486 | struct cgroup_taskset *tset) |
5486 | struct task_struct *p) | ||
5487 | { | 5487 | { |
5488 | } | 5488 | } |
5489 | #endif | 5489 | #endif |
diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 4450fbeec411..8b5b5d8612c6 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c | |||
@@ -62,11 +62,12 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) | |||
62 | struct cgroup_subsys devices_subsys; | 62 | struct cgroup_subsys devices_subsys; |
63 | 63 | ||
64 | static int devcgroup_can_attach(struct cgroup_subsys *ss, | 64 | static int devcgroup_can_attach(struct cgroup_subsys *ss, |
65 | struct cgroup *new_cgroup, struct task_struct *task) | 65 | struct cgroup *new_cgrp, struct cgroup_taskset *set) |
66 | { | 66 | { |
67 | if (current != task && !capable(CAP_SYS_ADMIN)) | 67 | struct task_struct *task = cgroup_taskset_first(set); |
68 | return -EPERM; | ||
69 | 68 | ||
69 | if (current != task && !capable(CAP_SYS_ADMIN)) | ||
70 | return -EPERM; | ||
70 | return 0; | 71 | return 0; |
71 | } | 72 | } |
72 | 73 | ||