diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-10-25 12:30:53 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-25 12:30:53 -0400 |
commit | 0b9e31e9264f1bad89856afb96da1688292f13b4 (patch) | |
tree | 7a9e9b6456dce993efeed8734de0a15a1f16ae94 /kernel/cpuset.c | |
parent | cf82ff7ea7695b0e82ba07bc5e9f1bd03a74e1aa (diff) | |
parent | 964fe080d94db82a3268443e9b9ece4c60246414 (diff) |
Merge branch 'linus' into sched/core
Conflicts:
fs/proc/array.c
Merge reason: resolve conflict and queue up dependent patch.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 66 |
1 files changed, 52 insertions, 14 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index b81f7f096e1c..d247381e7371 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1324,9 +1324,10 @@ static int fmeter_getrate(struct fmeter *fmp) | |||
1324 | static cpumask_var_t cpus_attach; | 1324 | static cpumask_var_t cpus_attach; |
1325 | 1325 | ||
1326 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ | 1326 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ |
1327 | static int cpuset_can_attach(struct cgroup_subsys *ss, | 1327 | static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, |
1328 | struct cgroup *cont, struct task_struct *tsk) | 1328 | struct task_struct *tsk, bool threadgroup) |
1329 | { | 1329 | { |
1330 | int ret; | ||
1330 | struct cpuset *cs = cgroup_cs(cont); | 1331 | struct cpuset *cs = cgroup_cs(cont); |
1331 | 1332 | ||
1332 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 1333 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
@@ -1343,18 +1344,51 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, | |||
1343 | if (tsk->flags & PF_THREAD_BOUND) | 1344 | if (tsk->flags & PF_THREAD_BOUND) |
1344 | return -EINVAL; | 1345 | return -EINVAL; |
1345 | 1346 | ||
1346 | return security_task_setscheduler(tsk, 0, NULL); | 1347 | ret = security_task_setscheduler(tsk, 0, NULL); |
1348 | if (ret) | ||
1349 | return ret; | ||
1350 | if (threadgroup) { | ||
1351 | struct task_struct *c; | ||
1352 | |||
1353 | rcu_read_lock(); | ||
1354 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
1355 | ret = security_task_setscheduler(c, 0, NULL); | ||
1356 | if (ret) { | ||
1357 | rcu_read_unlock(); | ||
1358 | return ret; | ||
1359 | } | ||
1360 | } | ||
1361 | rcu_read_unlock(); | ||
1362 | } | ||
1363 | return 0; | ||
1364 | } | ||
1365 | |||
1366 | static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to, | ||
1367 | struct cpuset *cs) | ||
1368 | { | ||
1369 | int err; | ||
1370 | /* | ||
1371 | * can_attach beforehand should guarantee that this doesn't fail. | ||
1372 | * TODO: have a better way to handle failure here | ||
1373 | */ | ||
1374 | err = set_cpus_allowed_ptr(tsk, cpus_attach); | ||
1375 | WARN_ON_ONCE(err); | ||
1376 | |||
1377 | task_lock(tsk); | ||
1378 | cpuset_change_task_nodemask(tsk, to); | ||
1379 | task_unlock(tsk); | ||
1380 | cpuset_update_task_spread_flag(cs, tsk); | ||
1381 | |||
1347 | } | 1382 | } |
1348 | 1383 | ||
1349 | static void cpuset_attach(struct cgroup_subsys *ss, | 1384 | static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, |
1350 | struct cgroup *cont, struct cgroup *oldcont, | 1385 | struct cgroup *oldcont, struct task_struct *tsk, |
1351 | struct task_struct *tsk) | 1386 | bool threadgroup) |
1352 | { | 1387 | { |
1353 | nodemask_t from, to; | 1388 | nodemask_t from, to; |
1354 | struct mm_struct *mm; | 1389 | struct mm_struct *mm; |
1355 | struct cpuset *cs = cgroup_cs(cont); | 1390 | struct cpuset *cs = cgroup_cs(cont); |
1356 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1391 | struct cpuset *oldcs = cgroup_cs(oldcont); |
1357 | int err; | ||
1358 | 1392 | ||
1359 | if (cs == &top_cpuset) { | 1393 | if (cs == &top_cpuset) { |
1360 | cpumask_copy(cpus_attach, cpu_possible_mask); | 1394 | cpumask_copy(cpus_attach, cpu_possible_mask); |
@@ -1363,15 +1397,19 @@ static void cpuset_attach(struct cgroup_subsys *ss, | |||
1363 | guarantee_online_cpus(cs, cpus_attach); | 1397 | guarantee_online_cpus(cs, cpus_attach); |
1364 | guarantee_online_mems(cs, &to); | 1398 | guarantee_online_mems(cs, &to); |
1365 | } | 1399 | } |
1366 | err = set_cpus_allowed_ptr(tsk, cpus_attach); | ||
1367 | if (err) | ||
1368 | return; | ||
1369 | 1400 | ||
1370 | task_lock(tsk); | 1401 | /* do per-task migration stuff possibly for each in the threadgroup */ |
1371 | cpuset_change_task_nodemask(tsk, &to); | 1402 | cpuset_attach_task(tsk, &to, cs); |
1372 | task_unlock(tsk); | 1403 | if (threadgroup) { |
1373 | cpuset_update_task_spread_flag(cs, tsk); | 1404 | struct task_struct *c; |
1405 | rcu_read_lock(); | ||
1406 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
1407 | cpuset_attach_task(c, &to, cs); | ||
1408 | } | ||
1409 | rcu_read_unlock(); | ||
1410 | } | ||
1374 | 1411 | ||
1412 | /* change mm; only needs to be done once even if threadgroup */ | ||
1375 | from = oldcs->mems_allowed; | 1413 | from = oldcs->mems_allowed; |
1376 | to = cs->mems_allowed; | 1414 | to = cs->mems_allowed; |
1377 | mm = get_task_mm(tsk); | 1415 | mm = get_task_mm(tsk); |