diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-07 11:51:07 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-07 11:51:07 -0500 |
commit | 4e4c9a140fc2ecf5e086922ccd2022bdabe509b6 (patch) | |
tree | 80bc4759956732ae68d1972cc6cd538c64bba1c8 /kernel/cpuset.c | |
parent | ae8086ce15fdab2b57599d7a3242a114ba4b8597 (diff) |
cpuset: cleanup cpuset[_can]_attach()
cpuset_can_attach() prepare global variables cpus_attach and
cpuset_attach_nodemask_{to|from} which are used by cpuset_attach().
There is no reason to prepare in cpuset_can_attach(). The same
information can be accessed from cpuset_attach().
Move the prepartion logic from cpuset_can_attach() to cpuset_attach()
and make the global variables static ones inside cpuset_attach().
With this change, there's no reason to keep
cpuset_attach_nodemask_{from|to} global. Move them inside
cpuset_attach(). Unfortunately, we need to keep cpus_attach global as
it can't be allocated from cpuset_attach().
v2: cpus_attach not converted to cpumask_t as per Li Zefan and Rusty
Russell.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4b054b9faf3d..c5edc6b3eb28 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1395,15 +1395,6 @@ static int fmeter_getrate(struct fmeter *fmp) | |||
1395 | return val; | 1395 | return val; |
1396 | } | 1396 | } |
1397 | 1397 | ||
1398 | /* | ||
1399 | * Protected by cgroup_lock. The nodemasks must be stored globally because | ||
1400 | * dynamically allocating them is not allowed in can_attach, and they must | ||
1401 | * persist until attach. | ||
1402 | */ | ||
1403 | static cpumask_var_t cpus_attach; | ||
1404 | static nodemask_t cpuset_attach_nodemask_from; | ||
1405 | static nodemask_t cpuset_attach_nodemask_to; | ||
1406 | |||
1407 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ | 1398 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ |
1408 | static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | 1399 | static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
1409 | { | 1400 | { |
@@ -1430,19 +1421,21 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | |||
1430 | return ret; | 1421 | return ret; |
1431 | } | 1422 | } |
1432 | 1423 | ||
1433 | /* prepare for attach */ | ||
1434 | if (cs == &top_cpuset) | ||
1435 | cpumask_copy(cpus_attach, cpu_possible_mask); | ||
1436 | else | ||
1437 | guarantee_online_cpus(cs, cpus_attach); | ||
1438 | |||
1439 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); | ||
1440 | |||
1441 | return 0; | 1424 | return 0; |
1442 | } | 1425 | } |
1443 | 1426 | ||
1427 | /* | ||
1428 | * Protected by cgroup_mutex. cpus_attach is used only by cpuset_attach() | ||
1429 | * but we can't allocate it dynamically there. Define it global and | ||
1430 | * allocate from cpuset_init(). | ||
1431 | */ | ||
1432 | static cpumask_var_t cpus_attach; | ||
1433 | |||
1444 | static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | 1434 | static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
1445 | { | 1435 | { |
1436 | /* static bufs protected by cgroup_mutex */ | ||
1437 | static nodemask_t cpuset_attach_nodemask_from; | ||
1438 | static nodemask_t cpuset_attach_nodemask_to; | ||
1446 | struct mm_struct *mm; | 1439 | struct mm_struct *mm; |
1447 | struct task_struct *task; | 1440 | struct task_struct *task; |
1448 | struct task_struct *leader = cgroup_taskset_first(tset); | 1441 | struct task_struct *leader = cgroup_taskset_first(tset); |
@@ -1450,6 +1443,14 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) | |||
1450 | struct cpuset *cs = cgroup_cs(cgrp); | 1443 | struct cpuset *cs = cgroup_cs(cgrp); |
1451 | struct cpuset *oldcs = cgroup_cs(oldcgrp); | 1444 | struct cpuset *oldcs = cgroup_cs(oldcgrp); |
1452 | 1445 | ||
1446 | /* prepare for attach */ | ||
1447 | if (cs == &top_cpuset) | ||
1448 | cpumask_copy(cpus_attach, cpu_possible_mask); | ||
1449 | else | ||
1450 | guarantee_online_cpus(cs, cpus_attach); | ||
1451 | |||
1452 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); | ||
1453 | |||
1453 | cgroup_taskset_for_each(task, cgrp, tset) { | 1454 | cgroup_taskset_for_each(task, cgrp, tset) { |
1454 | /* | 1455 | /* |
1455 | * can_attach beforehand should guarantee that this doesn't | 1456 | * can_attach beforehand should guarantee that this doesn't |