aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
committerTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
commitbb9d97b6dffa10cec5e1ce9adbce60f3c2b5eabc (patch)
treefb8351518fcfb91927e9e138f48284c44553f011 /kernel/cpuset.c
parent2f7ee5691eecb67c8108b92001a85563ea336ac5 (diff)
cgroup: don't use subsys->can_attach_task() or ->attach_task()
Now that subsys->can_attach() and attach() take @tset instead of @task, they can handle per-task operations. Convert ->can_attach_task() and ->attach_task() users to use ->can_attach() and attach() instead. Most converions are straight-forward. Noteworthy changes are, * In cgroup_freezer, remove unnecessary NULL assignments to unused methods. It's useless and very prone to get out of sync, which already happened. * In cpuset, PF_THREAD_BOUND test is checked for each task. This doesn't make any practical difference but is conceptually cleaner. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Paul Menage <paul@paulmenage.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: James Morris <jmorris@namei.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c70
1 files changed, 32 insertions, 38 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 512bd59e8627..9a8a61301524 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1375,33 +1375,34 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1375 struct cgroup_taskset *tset) 1375 struct cgroup_taskset *tset)
1376{ 1376{
1377 struct cpuset *cs = cgroup_cs(cgrp); 1377 struct cpuset *cs = cgroup_cs(cgrp);
1378 struct task_struct *task;
1379 int ret;
1378 1380
1379 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1381 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1380 return -ENOSPC; 1382 return -ENOSPC;
1381 1383
1382 /* 1384 cgroup_taskset_for_each(task, cgrp, tset) {
1383 * Kthreads bound to specific cpus cannot be moved to a new cpuset; we 1385 /*
1384 * cannot change their cpu affinity and isolating such threads by their 1386 * Kthreads bound to specific cpus cannot be moved to a new
1385 * set of allowed nodes is unnecessary. Thus, cpusets are not 1387 * cpuset; we cannot change their cpu affinity and
1386 * applicable for such threads. This prevents checking for success of 1388 * isolating such threads by their set of allowed nodes is
1387 * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may 1389 * unnecessary. Thus, cpusets are not applicable for such
1388 * be changed. 1390 * threads. This prevents checking for success of
1389 */ 1391 * set_cpus_allowed_ptr() on all attached tasks before
1390 if (cgroup_taskset_first(tset)->flags & PF_THREAD_BOUND) 1392 * cpus_allowed may be changed.
1391 return -EINVAL; 1393 */
1392 1394 if (task->flags & PF_THREAD_BOUND)
1395 return -EINVAL;
1396 if ((ret = security_task_setscheduler(task)))
1397 return ret;
1398 }
1393 return 0; 1399 return 0;
1394} 1400}
1395 1401
1396static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
1397{
1398 return security_task_setscheduler(task);
1399}
1400
1401/* 1402/*
1402 * Protected by cgroup_lock. The nodemasks must be stored globally because 1403 * Protected by cgroup_lock. The nodemasks must be stored globally because
1403 * dynamically allocating them is not allowed in pre_attach, and they must 1404 * dynamically allocating them is not allowed in pre_attach, and they must
1404 * persist among pre_attach, attach_task, and attach. 1405 * persist among pre_attach, and attach.
1405 */ 1406 */
1406static cpumask_var_t cpus_attach; 1407static cpumask_var_t cpus_attach;
1407static nodemask_t cpuset_attach_nodemask_from; 1408static nodemask_t cpuset_attach_nodemask_from;
@@ -1420,39 +1421,34 @@ static void cpuset_pre_attach(struct cgroup *cont)
1420 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 1421 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1421} 1422}
1422 1423
1423/* Per-thread attachment work. */
1424static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
1425{
1426 int err;
1427 struct cpuset *cs = cgroup_cs(cont);
1428
1429 /*
1430 * can_attach beforehand should guarantee that this doesn't fail.
1431 * TODO: have a better way to handle failure here
1432 */
1433 err = set_cpus_allowed_ptr(tsk, cpus_attach);
1434 WARN_ON_ONCE(err);
1435
1436 cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
1437 cpuset_update_task_spread_flag(cs, tsk);
1438}
1439
1440static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 1424static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1441 struct cgroup_taskset *tset) 1425 struct cgroup_taskset *tset)
1442{ 1426{
1443 struct mm_struct *mm; 1427 struct mm_struct *mm;
1444 struct task_struct *tsk = cgroup_taskset_first(tset); 1428 struct task_struct *task;
1429 struct task_struct *leader = cgroup_taskset_first(tset);
1445 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset); 1430 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1446 struct cpuset *cs = cgroup_cs(cgrp); 1431 struct cpuset *cs = cgroup_cs(cgrp);
1447 struct cpuset *oldcs = cgroup_cs(oldcgrp); 1432 struct cpuset *oldcs = cgroup_cs(oldcgrp);
1448 1433
1434 cgroup_taskset_for_each(task, cgrp, tset) {
1435 /*
1436 * can_attach beforehand should guarantee that this doesn't
1437 * fail. TODO: have a better way to handle failure here
1438 */
1439 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1440
1441 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1442 cpuset_update_task_spread_flag(cs, task);
1443 }
1444
1449 /* 1445 /*
1450 * Change mm, possibly for multiple threads in a threadgroup. This is 1446 * Change mm, possibly for multiple threads in a threadgroup. This is
1451 * expensive and may sleep. 1447 * expensive and may sleep.
1452 */ 1448 */
1453 cpuset_attach_nodemask_from = oldcs->mems_allowed; 1449 cpuset_attach_nodemask_from = oldcs->mems_allowed;
1454 cpuset_attach_nodemask_to = cs->mems_allowed; 1450 cpuset_attach_nodemask_to = cs->mems_allowed;
1455 mm = get_task_mm(tsk); 1451 mm = get_task_mm(leader);
1456 if (mm) { 1452 if (mm) {
1457 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); 1453 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1458 if (is_memory_migrate(cs)) 1454 if (is_memory_migrate(cs))
@@ -1908,9 +1904,7 @@ struct cgroup_subsys cpuset_subsys = {
1908 .create = cpuset_create, 1904 .create = cpuset_create,
1909 .destroy = cpuset_destroy, 1905 .destroy = cpuset_destroy,
1910 .can_attach = cpuset_can_attach, 1906 .can_attach = cpuset_can_attach,
1911 .can_attach_task = cpuset_can_attach_task,
1912 .pre_attach = cpuset_pre_attach, 1907 .pre_attach = cpuset_pre_attach,
1913 .attach_task = cpuset_attach_task,
1914 .attach = cpuset_attach, 1908 .attach = cpuset_attach,
1915 .populate = cpuset_populate, 1909 .populate = cpuset_populate,
1916 .post_clone = cpuset_post_clone, 1910 .post_clone = cpuset_post_clone,