diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 4 | ||||
-rw-r--r-- | kernel/cgroup_freezer.c | 15 | ||||
-rw-r--r-- | kernel/cpuset.c | 66 | ||||
-rw-r--r-- | kernel/ns_cgroup.c | 16 | ||||
-rw-r--r-- | kernel/sched.c | 35 |
5 files changed, 114 insertions, 22 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index bf8dd1a9f2d1..7ccba4bc5e3b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1552,7 +1552,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1552 | 1552 | ||
1553 | for_each_subsys(root, ss) { | 1553 | for_each_subsys(root, ss) { |
1554 | if (ss->can_attach) { | 1554 | if (ss->can_attach) { |
1555 | retval = ss->can_attach(ss, cgrp, tsk); | 1555 | retval = ss->can_attach(ss, cgrp, tsk, false); |
1556 | if (retval) | 1556 | if (retval) |
1557 | return retval; | 1557 | return retval; |
1558 | } | 1558 | } |
@@ -1590,7 +1590,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1590 | 1590 | ||
1591 | for_each_subsys(root, ss) { | 1591 | for_each_subsys(root, ss) { |
1592 | if (ss->attach) | 1592 | if (ss->attach) |
1593 | ss->attach(ss, cgrp, oldcgrp, tsk); | 1593 | ss->attach(ss, cgrp, oldcgrp, tsk, false); |
1594 | } | 1594 | } |
1595 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); | 1595 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); |
1596 | synchronize_rcu(); | 1596 | synchronize_rcu(); |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index fb249e2bcada..59e9ef6aab40 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -159,7 +159,7 @@ static bool is_task_frozen_enough(struct task_struct *task) | |||
159 | */ | 159 | */ |
160 | static int freezer_can_attach(struct cgroup_subsys *ss, | 160 | static int freezer_can_attach(struct cgroup_subsys *ss, |
161 | struct cgroup *new_cgroup, | 161 | struct cgroup *new_cgroup, |
162 | struct task_struct *task) | 162 | struct task_struct *task, bool threadgroup) |
163 | { | 163 | { |
164 | struct freezer *freezer; | 164 | struct freezer *freezer; |
165 | 165 | ||
@@ -177,6 +177,19 @@ static int freezer_can_attach(struct cgroup_subsys *ss, | |||
177 | if (freezer->state == CGROUP_FROZEN) | 177 | if (freezer->state == CGROUP_FROZEN) |
178 | return -EBUSY; | 178 | return -EBUSY; |
179 | 179 | ||
180 | if (threadgroup) { | ||
181 | struct task_struct *c; | ||
182 | |||
183 | rcu_read_lock(); | ||
184 | list_for_each_entry_rcu(c, &task->thread_group, thread_group) { | ||
185 | if (is_task_frozen_enough(c)) { | ||
186 | rcu_read_unlock(); | ||
187 | return -EBUSY; | ||
188 | } | ||
189 | } | ||
190 | rcu_read_unlock(); | ||
191 | } | ||
192 | |||
180 | return 0; | 193 | return 0; |
181 | } | 194 | } |
182 | 195 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 7e75a41bd508..b5cb469d2545 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1324,9 +1324,10 @@ static int fmeter_getrate(struct fmeter *fmp) | |||
1324 | static cpumask_var_t cpus_attach; | 1324 | static cpumask_var_t cpus_attach; |
1325 | 1325 | ||
1326 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ | 1326 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ |
1327 | static int cpuset_can_attach(struct cgroup_subsys *ss, | 1327 | static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, |
1328 | struct cgroup *cont, struct task_struct *tsk) | 1328 | struct task_struct *tsk, bool threadgroup) |
1329 | { | 1329 | { |
1330 | int ret; | ||
1330 | struct cpuset *cs = cgroup_cs(cont); | 1331 | struct cpuset *cs = cgroup_cs(cont); |
1331 | 1332 | ||
1332 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 1333 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
@@ -1343,18 +1344,51 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, | |||
1343 | if (tsk->flags & PF_THREAD_BOUND) | 1344 | if (tsk->flags & PF_THREAD_BOUND) |
1344 | return -EINVAL; | 1345 | return -EINVAL; |
1345 | 1346 | ||
1346 | return security_task_setscheduler(tsk, 0, NULL); | 1347 | ret = security_task_setscheduler(tsk, 0, NULL); |
1348 | if (ret) | ||
1349 | return ret; | ||
1350 | if (threadgroup) { | ||
1351 | struct task_struct *c; | ||
1352 | |||
1353 | rcu_read_lock(); | ||
1354 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
1355 | ret = security_task_setscheduler(c, 0, NULL); | ||
1356 | if (ret) { | ||
1357 | rcu_read_unlock(); | ||
1358 | return ret; | ||
1359 | } | ||
1360 | } | ||
1361 | rcu_read_unlock(); | ||
1362 | } | ||
1363 | return 0; | ||
1364 | } | ||
1365 | |||
1366 | static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to, | ||
1367 | struct cpuset *cs) | ||
1368 | { | ||
1369 | int err; | ||
1370 | /* | ||
1371 | * can_attach beforehand should guarantee that this doesn't fail. | ||
1372 | * TODO: have a better way to handle failure here | ||
1373 | */ | ||
1374 | err = set_cpus_allowed_ptr(tsk, cpus_attach); | ||
1375 | WARN_ON_ONCE(err); | ||
1376 | |||
1377 | task_lock(tsk); | ||
1378 | cpuset_change_task_nodemask(tsk, to); | ||
1379 | task_unlock(tsk); | ||
1380 | cpuset_update_task_spread_flag(cs, tsk); | ||
1381 | |||
1347 | } | 1382 | } |
1348 | 1383 | ||
1349 | static void cpuset_attach(struct cgroup_subsys *ss, | 1384 | static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, |
1350 | struct cgroup *cont, struct cgroup *oldcont, | 1385 | struct cgroup *oldcont, struct task_struct *tsk, |
1351 | struct task_struct *tsk) | 1386 | bool threadgroup) |
1352 | { | 1387 | { |
1353 | nodemask_t from, to; | 1388 | nodemask_t from, to; |
1354 | struct mm_struct *mm; | 1389 | struct mm_struct *mm; |
1355 | struct cpuset *cs = cgroup_cs(cont); | 1390 | struct cpuset *cs = cgroup_cs(cont); |
1356 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1391 | struct cpuset *oldcs = cgroup_cs(oldcont); |
1357 | int err; | ||
1358 | 1392 | ||
1359 | if (cs == &top_cpuset) { | 1393 | if (cs == &top_cpuset) { |
1360 | cpumask_copy(cpus_attach, cpu_possible_mask); | 1394 | cpumask_copy(cpus_attach, cpu_possible_mask); |
@@ -1363,15 +1397,19 @@ static void cpuset_attach(struct cgroup_subsys *ss, | |||
1363 | guarantee_online_cpus(cs, cpus_attach); | 1397 | guarantee_online_cpus(cs, cpus_attach); |
1364 | guarantee_online_mems(cs, &to); | 1398 | guarantee_online_mems(cs, &to); |
1365 | } | 1399 | } |
1366 | err = set_cpus_allowed_ptr(tsk, cpus_attach); | ||
1367 | if (err) | ||
1368 | return; | ||
1369 | 1400 | ||
1370 | task_lock(tsk); | 1401 | /* do per-task migration stuff possibly for each in the threadgroup */ |
1371 | cpuset_change_task_nodemask(tsk, &to); | 1402 | cpuset_attach_task(tsk, &to, cs); |
1372 | task_unlock(tsk); | 1403 | if (threadgroup) { |
1373 | cpuset_update_task_spread_flag(cs, tsk); | 1404 | struct task_struct *c; |
1405 | rcu_read_lock(); | ||
1406 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
1407 | cpuset_attach_task(c, &to, cs); | ||
1408 | } | ||
1409 | rcu_read_unlock(); | ||
1410 | } | ||
1374 | 1411 | ||
1412 | /* change mm; only needs to be done once even if threadgroup */ | ||
1375 | from = oldcs->mems_allowed; | 1413 | from = oldcs->mems_allowed; |
1376 | to = cs->mems_allowed; | 1414 | to = cs->mems_allowed; |
1377 | mm = get_task_mm(tsk); | 1415 | mm = get_task_mm(tsk); |
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c index 5aa854f9e5ae..2a5dfec8efe0 100644 --- a/kernel/ns_cgroup.c +++ b/kernel/ns_cgroup.c | |||
@@ -42,8 +42,8 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid) | |||
42 | * (hence either you are in the same cgroup as task, or in an | 42 | * (hence either you are in the same cgroup as task, or in an |
43 | * ancestor cgroup thereof) | 43 | * ancestor cgroup thereof) |
44 | */ | 44 | */ |
45 | static int ns_can_attach(struct cgroup_subsys *ss, | 45 | static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup, |
46 | struct cgroup *new_cgroup, struct task_struct *task) | 46 | struct task_struct *task, bool threadgroup) |
47 | { | 47 | { |
48 | if (current != task) { | 48 | if (current != task) { |
49 | if (!capable(CAP_SYS_ADMIN)) | 49 | if (!capable(CAP_SYS_ADMIN)) |
@@ -56,6 +56,18 @@ static int ns_can_attach(struct cgroup_subsys *ss, | |||
56 | if (!cgroup_is_descendant(new_cgroup, task)) | 56 | if (!cgroup_is_descendant(new_cgroup, task)) |
57 | return -EPERM; | 57 | return -EPERM; |
58 | 58 | ||
59 | if (threadgroup) { | ||
60 | struct task_struct *c; | ||
61 | rcu_read_lock(); | ||
62 | list_for_each_entry_rcu(c, &task->thread_group, thread_group) { | ||
63 | if (!cgroup_is_descendant(new_cgroup, c)) { | ||
64 | rcu_read_unlock(); | ||
65 | return -EPERM; | ||
66 | } | ||
67 | } | ||
68 | rcu_read_unlock(); | ||
69 | } | ||
70 | |||
59 | return 0; | 71 | return 0; |
60 | } | 72 | } |
61 | 73 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 2f76e06bea58..0d0361b9dbb3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -10377,8 +10377,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
10377 | } | 10377 | } |
10378 | 10378 | ||
10379 | static int | 10379 | static int |
10380 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 10380 | cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
10381 | struct task_struct *tsk) | ||
10382 | { | 10381 | { |
10383 | #ifdef CONFIG_RT_GROUP_SCHED | 10382 | #ifdef CONFIG_RT_GROUP_SCHED |
10384 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) | 10383 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
@@ -10388,15 +10387,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
10388 | if (tsk->sched_class != &fair_sched_class) | 10387 | if (tsk->sched_class != &fair_sched_class) |
10389 | return -EINVAL; | 10388 | return -EINVAL; |
10390 | #endif | 10389 | #endif |
10390 | return 0; | ||
10391 | } | ||
10391 | 10392 | ||
10393 | static int | ||
10394 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | ||
10395 | struct task_struct *tsk, bool threadgroup) | ||
10396 | { | ||
10397 | int retval = cpu_cgroup_can_attach_task(cgrp, tsk); | ||
10398 | if (retval) | ||
10399 | return retval; | ||
10400 | if (threadgroup) { | ||
10401 | struct task_struct *c; | ||
10402 | rcu_read_lock(); | ||
10403 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
10404 | retval = cpu_cgroup_can_attach_task(cgrp, c); | ||
10405 | if (retval) { | ||
10406 | rcu_read_unlock(); | ||
10407 | return retval; | ||
10408 | } | ||
10409 | } | ||
10410 | rcu_read_unlock(); | ||
10411 | } | ||
10392 | return 0; | 10412 | return 0; |
10393 | } | 10413 | } |
10394 | 10414 | ||
10395 | static void | 10415 | static void |
10396 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 10416 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
10397 | struct cgroup *old_cont, struct task_struct *tsk) | 10417 | struct cgroup *old_cont, struct task_struct *tsk, |
10418 | bool threadgroup) | ||
10398 | { | 10419 | { |
10399 | sched_move_task(tsk); | 10420 | sched_move_task(tsk); |
10421 | if (threadgroup) { | ||
10422 | struct task_struct *c; | ||
10423 | rcu_read_lock(); | ||
10424 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
10425 | sched_move_task(c); | ||
10426 | } | ||
10427 | rcu_read_unlock(); | ||
10428 | } | ||
10400 | } | 10429 | } |
10401 | 10430 | ||
10402 | #ifdef CONFIG_FAIR_GROUP_SCHED | 10431 | #ifdef CONFIG_FAIR_GROUP_SCHED |