aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
committerTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
commit2f7ee5691eecb67c8108b92001a85563ea336ac5 (patch)
tree18cf60ea8a463f4a6cd59c68926ba4357ae8ff4c /kernel
parent134d33737f9015761c3832f6b268fae6274aac7f (diff)
cgroup: introduce cgroup_taskset and use it in subsys->can_attach(), cancel_attach() and attach()
Currently, there's no way to pass multiple tasks to cgroup_subsys methods necessitating the need for separate per-process and per-task methods. This patch introduces cgroup_taskset which can be used to pass multiple tasks and their associated cgroups to cgroup_subsys methods. Three methods - can_attach(), cancel_attach() and attach() - are converted to use cgroup_taskset. This unifies passed parameters so that all methods have access to all information. Conversions in this patchset are identical and don't introduce any behavior change. -v2: documentation updated as per Paul Menage's suggestion. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Paul Menage <paul@paulmenage.org> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: James Morris <jmorris@namei.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c99
-rw-r--r--kernel/cgroup_freezer.c2
-rw-r--r--kernel/cpuset.c18
3 files changed, 100 insertions, 19 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0f2d00519d37..41ee01e392e6 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1757,11 +1757,85 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1757} 1757}
1758EXPORT_SYMBOL_GPL(cgroup_path); 1758EXPORT_SYMBOL_GPL(cgroup_path);
1759 1759
1760/*
1761 * Control Group taskset
1762 */
1760struct task_and_cgroup { 1763struct task_and_cgroup {
1761 struct task_struct *task; 1764 struct task_struct *task;
1762 struct cgroup *cgrp; 1765 struct cgroup *cgrp;
1763}; 1766};
1764 1767
1768struct cgroup_taskset {
1769 struct task_and_cgroup single;
1770 struct flex_array *tc_array;
1771 int tc_array_len;
1772 int idx;
1773 struct cgroup *cur_cgrp;
1774};
1775
1776/**
1777 * cgroup_taskset_first - reset taskset and return the first task
1778 * @tset: taskset of interest
1779 *
1780 * @tset iteration is initialized and the first task is returned.
1781 */
1782struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
1783{
1784 if (tset->tc_array) {
1785 tset->idx = 0;
1786 return cgroup_taskset_next(tset);
1787 } else {
1788 tset->cur_cgrp = tset->single.cgrp;
1789 return tset->single.task;
1790 }
1791}
1792EXPORT_SYMBOL_GPL(cgroup_taskset_first);
1793
1794/**
1795 * cgroup_taskset_next - iterate to the next task in taskset
1796 * @tset: taskset of interest
1797 *
1798 * Return the next task in @tset. Iteration must have been initialized
1799 * with cgroup_taskset_first().
1800 */
1801struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
1802{
1803 struct task_and_cgroup *tc;
1804
1805 if (!tset->tc_array || tset->idx >= tset->tc_array_len)
1806 return NULL;
1807
1808 tc = flex_array_get(tset->tc_array, tset->idx++);
1809 tset->cur_cgrp = tc->cgrp;
1810 return tc->task;
1811}
1812EXPORT_SYMBOL_GPL(cgroup_taskset_next);
1813
1814/**
1815 * cgroup_taskset_cur_cgroup - return the matching cgroup for the current task
1816 * @tset: taskset of interest
1817 *
1818 * Return the cgroup for the current (last returned) task of @tset. This
1819 * function must be preceded by either cgroup_taskset_first() or
1820 * cgroup_taskset_next().
1821 */
1822struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset)
1823{
1824 return tset->cur_cgrp;
1825}
1826EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup);
1827
1828/**
1829 * cgroup_taskset_size - return the number of tasks in taskset
1830 * @tset: taskset of interest
1831 */
1832int cgroup_taskset_size(struct cgroup_taskset *tset)
1833{
1834 return tset->tc_array ? tset->tc_array_len : 1;
1835}
1836EXPORT_SYMBOL_GPL(cgroup_taskset_size);
1837
1838
1765/* 1839/*
1766 * cgroup_task_migrate - move a task from one cgroup to another. 1840 * cgroup_task_migrate - move a task from one cgroup to another.
1767 * 1841 *
@@ -1842,6 +1916,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1842 struct cgroup_subsys *ss, *failed_ss = NULL; 1916 struct cgroup_subsys *ss, *failed_ss = NULL;
1843 struct cgroup *oldcgrp; 1917 struct cgroup *oldcgrp;
1844 struct cgroupfs_root *root = cgrp->root; 1918 struct cgroupfs_root *root = cgrp->root;
1919 struct cgroup_taskset tset = { };
1845 1920
1846 /* @tsk either already exited or can't exit until the end */ 1921 /* @tsk either already exited or can't exit until the end */
1847 if (tsk->flags & PF_EXITING) 1922 if (tsk->flags & PF_EXITING)
@@ -1852,9 +1927,12 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1852 if (cgrp == oldcgrp) 1927 if (cgrp == oldcgrp)
1853 return 0; 1928 return 0;
1854 1929
1930 tset.single.task = tsk;
1931 tset.single.cgrp = oldcgrp;
1932
1855 for_each_subsys(root, ss) { 1933 for_each_subsys(root, ss) {
1856 if (ss->can_attach) { 1934 if (ss->can_attach) {
1857 retval = ss->can_attach(ss, cgrp, tsk); 1935 retval = ss->can_attach(ss, cgrp, &tset);
1858 if (retval) { 1936 if (retval) {
1859 /* 1937 /*
1860 * Remember on which subsystem the can_attach() 1938 * Remember on which subsystem the can_attach()
@@ -1885,7 +1963,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1885 if (ss->attach_task) 1963 if (ss->attach_task)
1886 ss->attach_task(cgrp, tsk); 1964 ss->attach_task(cgrp, tsk);
1887 if (ss->attach) 1965 if (ss->attach)
1888 ss->attach(ss, cgrp, oldcgrp, tsk); 1966 ss->attach(ss, cgrp, &tset);
1889 } 1967 }
1890 1968
1891 synchronize_rcu(); 1969 synchronize_rcu();
@@ -1907,7 +1985,7 @@ out:
1907 */ 1985 */
1908 break; 1986 break;
1909 if (ss->cancel_attach) 1987 if (ss->cancel_attach)
1910 ss->cancel_attach(ss, cgrp, tsk); 1988 ss->cancel_attach(ss, cgrp, &tset);
1911 } 1989 }
1912 } 1990 }
1913 return retval; 1991 return retval;
@@ -2023,6 +2101,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2023 struct task_struct *tsk; 2101 struct task_struct *tsk;
2024 struct task_and_cgroup *tc; 2102 struct task_and_cgroup *tc;
2025 struct flex_array *group; 2103 struct flex_array *group;
2104 struct cgroup_taskset tset = { };
2026 /* 2105 /*
2027 * we need to make sure we have css_sets for all the tasks we're 2106 * we need to make sure we have css_sets for all the tasks we're
2028 * going to move -before- we actually start moving them, so that in 2107 * going to move -before- we actually start moving them, so that in
@@ -2089,6 +2168,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2089 } while_each_thread(leader, tsk); 2168 } while_each_thread(leader, tsk);
2090 /* remember the number of threads in the array for later. */ 2169 /* remember the number of threads in the array for later. */
2091 group_size = i; 2170 group_size = i;
2171 tset.tc_array = group;
2172 tset.tc_array_len = group_size;
2092 read_unlock(&tasklist_lock); 2173 read_unlock(&tasklist_lock);
2093 2174
2094 /* methods shouldn't be called if no task is actually migrating */ 2175 /* methods shouldn't be called if no task is actually migrating */
@@ -2101,7 +2182,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2101 */ 2182 */
2102 for_each_subsys(root, ss) { 2183 for_each_subsys(root, ss) {
2103 if (ss->can_attach) { 2184 if (ss->can_attach) {
2104 retval = ss->can_attach(ss, cgrp, leader); 2185 retval = ss->can_attach(ss, cgrp, &tset);
2105 if (retval) { 2186 if (retval) {
2106 failed_ss = ss; 2187 failed_ss = ss;
2107 goto out_cancel_attach; 2188 goto out_cancel_attach;
@@ -2183,10 +2264,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
2183 * being moved, this call will need to be reworked to communicate that. 2264 * being moved, this call will need to be reworked to communicate that.
2184 */ 2265 */
2185 for_each_subsys(root, ss) { 2266 for_each_subsys(root, ss) {
2186 if (ss->attach) { 2267 if (ss->attach)
2187 tc = flex_array_get(group, 0); 2268 ss->attach(ss, cgrp, &tset);
2188 ss->attach(ss, cgrp, tc->cgrp, tc->task);
2189 }
2190 } 2269 }
2191 2270
2192 /* 2271 /*
@@ -2208,11 +2287,11 @@ out_cancel_attach:
2208 for_each_subsys(root, ss) { 2287 for_each_subsys(root, ss) {
2209 if (ss == failed_ss) { 2288 if (ss == failed_ss) {
2210 if (cancel_failed_ss && ss->cancel_attach) 2289 if (cancel_failed_ss && ss->cancel_attach)
2211 ss->cancel_attach(ss, cgrp, leader); 2290 ss->cancel_attach(ss, cgrp, &tset);
2212 break; 2291 break;
2213 } 2292 }
2214 if (ss->cancel_attach) 2293 if (ss->cancel_attach)
2215 ss->cancel_attach(ss, cgrp, leader); 2294 ss->cancel_attach(ss, cgrp, &tset);
2216 } 2295 }
2217 } 2296 }
2218out_put_tasks: 2297out_put_tasks:
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e411a60cc2c8..e95c6fb65cc0 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -159,7 +159,7 @@ static void freezer_destroy(struct cgroup_subsys *ss,
159 */ 159 */
160static int freezer_can_attach(struct cgroup_subsys *ss, 160static int freezer_can_attach(struct cgroup_subsys *ss,
161 struct cgroup *new_cgroup, 161 struct cgroup *new_cgroup,
162 struct task_struct *task) 162 struct cgroup_taskset *tset)
163{ 163{
164 struct freezer *freezer; 164 struct freezer *freezer;
165 165
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fe58c46a426..512bd59e8627 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1371,10 +1371,10 @@ static int fmeter_getrate(struct fmeter *fmp)
1371} 1371}
1372 1372
1373/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ 1373/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1374static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, 1374static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1375 struct task_struct *tsk) 1375 struct cgroup_taskset *tset)
1376{ 1376{
1377 struct cpuset *cs = cgroup_cs(cont); 1377 struct cpuset *cs = cgroup_cs(cgrp);
1378 1378
1379 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1379 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1380 return -ENOSPC; 1380 return -ENOSPC;
@@ -1387,7 +1387,7 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1387 * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may 1387 * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
1388 * be changed. 1388 * be changed.
1389 */ 1389 */
1390 if (tsk->flags & PF_THREAD_BOUND) 1390 if (cgroup_taskset_first(tset)->flags & PF_THREAD_BOUND)
1391 return -EINVAL; 1391 return -EINVAL;
1392 1392
1393 return 0; 1393 return 0;
@@ -1437,12 +1437,14 @@ static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
1437 cpuset_update_task_spread_flag(cs, tsk); 1437 cpuset_update_task_spread_flag(cs, tsk);
1438} 1438}
1439 1439
1440static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, 1440static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1441 struct cgroup *oldcont, struct task_struct *tsk) 1441 struct cgroup_taskset *tset)
1442{ 1442{
1443 struct mm_struct *mm; 1443 struct mm_struct *mm;
1444 struct cpuset *cs = cgroup_cs(cont); 1444 struct task_struct *tsk = cgroup_taskset_first(tset);
1445 struct cpuset *oldcs = cgroup_cs(oldcont); 1445 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1446 struct cpuset *cs = cgroup_cs(cgrp);
1447 struct cpuset *oldcs = cgroup_cs(oldcgrp);
1446 1448
1447 /* 1449 /*
1448 * Change mm, possibly for multiple threads in a threadgroup. This is 1450 * Change mm, possibly for multiple threads in a threadgroup. This is