aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c45
-rw-r--r--kernel/cgroup_freezer.c14
-rw-r--r--kernel/cpuset.c70
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/sched.c31
5 files changed, 91 insertions, 82 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 8f630cec906e..b8c143d68ee0 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -30,8 +30,10 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30 30
31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, 31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 struct cgroup *); 32 struct cgroup *);
33static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *); 33static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34static void blkiocg_attach_task(struct cgroup *, struct task_struct *); 34 struct cgroup_taskset *);
35static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup_taskset *);
35static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); 37static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); 38static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
37 39
@@ -44,8 +46,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
44struct cgroup_subsys blkio_subsys = { 46struct cgroup_subsys blkio_subsys = {
45 .name = "blkio", 47 .name = "blkio",
46 .create = blkiocg_create, 48 .create = blkiocg_create,
47 .can_attach_task = blkiocg_can_attach_task, 49 .can_attach = blkiocg_can_attach,
48 .attach_task = blkiocg_attach_task, 50 .attach = blkiocg_attach,
49 .destroy = blkiocg_destroy, 51 .destroy = blkiocg_destroy,
50 .populate = blkiocg_populate, 52 .populate = blkiocg_populate,
51#ifdef CONFIG_BLK_CGROUP 53#ifdef CONFIG_BLK_CGROUP
@@ -1626,30 +1628,39 @@ done:
1626 * of the main cic data structures. For now we allow a task to change 1628 * of the main cic data structures. For now we allow a task to change
1627 * its cgroup only if it's the only owner of its ioc. 1629 * its cgroup only if it's the only owner of its ioc.
1628 */ 1630 */
1629static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 1631static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1632 struct cgroup_taskset *tset)
1630{ 1633{
1634 struct task_struct *task;
1631 struct io_context *ioc; 1635 struct io_context *ioc;
1632 int ret = 0; 1636 int ret = 0;
1633 1637
1634 /* task_lock() is needed to avoid races with exit_io_context() */ 1638 /* task_lock() is needed to avoid races with exit_io_context() */
1635 task_lock(tsk); 1639 cgroup_taskset_for_each(task, cgrp, tset) {
1636 ioc = tsk->io_context; 1640 task_lock(task);
1637 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 1641 ioc = task->io_context;
1638 ret = -EINVAL; 1642 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1639 task_unlock(tsk); 1643 ret = -EINVAL;
1640 1644 task_unlock(task);
1645 if (ret)
1646 break;
1647 }
1641 return ret; 1648 return ret;
1642} 1649}
1643 1650
1644static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 1651static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1652 struct cgroup_taskset *tset)
1645{ 1653{
1654 struct task_struct *task;
1646 struct io_context *ioc; 1655 struct io_context *ioc;
1647 1656
1648 task_lock(tsk); 1657 cgroup_taskset_for_each(task, cgrp, tset) {
1649 ioc = tsk->io_context; 1658 task_lock(task);
1650 if (ioc) 1659 ioc = task->io_context;
1651 ioc->cgroup_changed = 1; 1660 if (ioc)
1652 task_unlock(tsk); 1661 ioc->cgroup_changed = 1;
1662 task_unlock(task);
1663 }
1653} 1664}
1654 1665
1655void blkio_policy_register(struct blkio_policy_type *blkiop) 1666void blkio_policy_register(struct blkio_policy_type *blkiop)
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e95c6fb65cc0..0e748059ba87 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -162,10 +162,14 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
162 struct cgroup_taskset *tset) 162 struct cgroup_taskset *tset)
163{ 163{
164 struct freezer *freezer; 164 struct freezer *freezer;
165 struct task_struct *task;
165 166
166 /* 167 /*
167 * Anything frozen can't move or be moved to/from. 168 * Anything frozen can't move or be moved to/from.
168 */ 169 */
170 cgroup_taskset_for_each(task, new_cgroup, tset)
171 if (cgroup_freezing(task))
172 return -EBUSY;
169 173
170 freezer = cgroup_freezer(new_cgroup); 174 freezer = cgroup_freezer(new_cgroup);
171 if (freezer->state != CGROUP_THAWED) 175 if (freezer->state != CGROUP_THAWED)
@@ -174,11 +178,6 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
174 return 0; 178 return 0;
175} 179}
176 180
177static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
178{
179 return cgroup_freezing(tsk) ? -EBUSY : 0;
180}
181
182static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) 181static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
183{ 182{
184 struct freezer *freezer; 183 struct freezer *freezer;
@@ -374,10 +373,5 @@ struct cgroup_subsys freezer_subsys = {
374 .populate = freezer_populate, 373 .populate = freezer_populate,
375 .subsys_id = freezer_subsys_id, 374 .subsys_id = freezer_subsys_id,
376 .can_attach = freezer_can_attach, 375 .can_attach = freezer_can_attach,
377 .can_attach_task = freezer_can_attach_task,
378 .pre_attach = NULL,
379 .attach_task = NULL,
380 .attach = NULL,
381 .fork = freezer_fork, 376 .fork = freezer_fork,
382 .exit = NULL,
383}; 377};
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 512bd59e8627..9a8a61301524 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1375,33 +1375,34 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1375 struct cgroup_taskset *tset) 1375 struct cgroup_taskset *tset)
1376{ 1376{
1377 struct cpuset *cs = cgroup_cs(cgrp); 1377 struct cpuset *cs = cgroup_cs(cgrp);
1378 struct task_struct *task;
1379 int ret;
1378 1380
1379 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1381 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1380 return -ENOSPC; 1382 return -ENOSPC;
1381 1383
1382 /* 1384 cgroup_taskset_for_each(task, cgrp, tset) {
1383 * Kthreads bound to specific cpus cannot be moved to a new cpuset; we 1385 /*
1384 * cannot change their cpu affinity and isolating such threads by their 1386 * Kthreads bound to specific cpus cannot be moved to a new
1385 * set of allowed nodes is unnecessary. Thus, cpusets are not 1387 * cpuset; we cannot change their cpu affinity and
1386 * applicable for such threads. This prevents checking for success of 1388 * isolating such threads by their set of allowed nodes is
1387 * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may 1389 * unnecessary. Thus, cpusets are not applicable for such
1388 * be changed. 1390 * threads. This prevents checking for success of
1389 */ 1391 * set_cpus_allowed_ptr() on all attached tasks before
1390 if (cgroup_taskset_first(tset)->flags & PF_THREAD_BOUND) 1392 * cpus_allowed may be changed.
1391 return -EINVAL; 1393 */
1392 1394 if (task->flags & PF_THREAD_BOUND)
1395 return -EINVAL;
1396 if ((ret = security_task_setscheduler(task)))
1397 return ret;
1398 }
1393 return 0; 1399 return 0;
1394} 1400}
1395 1401
1396static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
1397{
1398 return security_task_setscheduler(task);
1399}
1400
1401/* 1402/*
1402 * Protected by cgroup_lock. The nodemasks must be stored globally because 1403 * Protected by cgroup_lock. The nodemasks must be stored globally because
1403 * dynamically allocating them is not allowed in pre_attach, and they must 1404 * dynamically allocating them is not allowed in pre_attach, and they must
1404 * persist among pre_attach, attach_task, and attach. 1405 * persist among pre_attach, and attach.
1405 */ 1406 */
1406static cpumask_var_t cpus_attach; 1407static cpumask_var_t cpus_attach;
1407static nodemask_t cpuset_attach_nodemask_from; 1408static nodemask_t cpuset_attach_nodemask_from;
@@ -1420,39 +1421,34 @@ static void cpuset_pre_attach(struct cgroup *cont)
1420 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 1421 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1421} 1422}
1422 1423
1423/* Per-thread attachment work. */
1424static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
1425{
1426 int err;
1427 struct cpuset *cs = cgroup_cs(cont);
1428
1429 /*
1430 * can_attach beforehand should guarantee that this doesn't fail.
1431 * TODO: have a better way to handle failure here
1432 */
1433 err = set_cpus_allowed_ptr(tsk, cpus_attach);
1434 WARN_ON_ONCE(err);
1435
1436 cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
1437 cpuset_update_task_spread_flag(cs, tsk);
1438}
1439
1440static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 1424static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1441 struct cgroup_taskset *tset) 1425 struct cgroup_taskset *tset)
1442{ 1426{
1443 struct mm_struct *mm; 1427 struct mm_struct *mm;
1444 struct task_struct *tsk = cgroup_taskset_first(tset); 1428 struct task_struct *task;
1429 struct task_struct *leader = cgroup_taskset_first(tset);
1445 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset); 1430 struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1446 struct cpuset *cs = cgroup_cs(cgrp); 1431 struct cpuset *cs = cgroup_cs(cgrp);
1447 struct cpuset *oldcs = cgroup_cs(oldcgrp); 1432 struct cpuset *oldcs = cgroup_cs(oldcgrp);
1448 1433
1434 cgroup_taskset_for_each(task, cgrp, tset) {
1435 /*
1436 * can_attach beforehand should guarantee that this doesn't
1437 * fail. TODO: have a better way to handle failure here
1438 */
1439 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1440
1441 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1442 cpuset_update_task_spread_flag(cs, task);
1443 }
1444
1449 /* 1445 /*
1450 * Change mm, possibly for multiple threads in a threadgroup. This is 1446 * Change mm, possibly for multiple threads in a threadgroup. This is
1451 * expensive and may sleep. 1447 * expensive and may sleep.
1452 */ 1448 */
1453 cpuset_attach_nodemask_from = oldcs->mems_allowed; 1449 cpuset_attach_nodemask_from = oldcs->mems_allowed;
1454 cpuset_attach_nodemask_to = cs->mems_allowed; 1450 cpuset_attach_nodemask_to = cs->mems_allowed;
1455 mm = get_task_mm(tsk); 1451 mm = get_task_mm(leader);
1456 if (mm) { 1452 if (mm) {
1457 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); 1453 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1458 if (is_memory_migrate(cs)) 1454 if (is_memory_migrate(cs))
@@ -1908,9 +1904,7 @@ struct cgroup_subsys cpuset_subsys = {
1908 .create = cpuset_create, 1904 .create = cpuset_create,
1909 .destroy = cpuset_destroy, 1905 .destroy = cpuset_destroy,
1910 .can_attach = cpuset_can_attach, 1906 .can_attach = cpuset_can_attach,
1911 .can_attach_task = cpuset_can_attach_task,
1912 .pre_attach = cpuset_pre_attach, 1907 .pre_attach = cpuset_pre_attach,
1913 .attach_task = cpuset_attach_task,
1914 .attach = cpuset_attach, 1908 .attach = cpuset_attach,
1915 .populate = cpuset_populate, 1909 .populate = cpuset_populate,
1916 .post_clone = cpuset_post_clone, 1910 .post_clone = cpuset_post_clone,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e8457da6f95..3b8e0edbe693 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7044,10 +7044,13 @@ static int __perf_cgroup_move(void *info)
7044 return 0; 7044 return 0;
7045} 7045}
7046 7046
7047static void 7047static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7048perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task) 7048 struct cgroup_taskset *tset)
7049{ 7049{
7050 task_function_call(task, __perf_cgroup_move, task); 7050 struct task_struct *task;
7051
7052 cgroup_taskset_for_each(task, cgrp, tset)
7053 task_function_call(task, __perf_cgroup_move, task);
7051} 7054}
7052 7055
7053static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, 7056static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
@@ -7061,7 +7064,7 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7061 if (!(task->flags & PF_EXITING)) 7064 if (!(task->flags & PF_EXITING))
7062 return; 7065 return;
7063 7066
7064 perf_cgroup_attach_task(cgrp, task); 7067 task_function_call(task, __perf_cgroup_move, task);
7065} 7068}
7066 7069
7067struct cgroup_subsys perf_subsys = { 7070struct cgroup_subsys perf_subsys = {
@@ -7070,6 +7073,6 @@ struct cgroup_subsys perf_subsys = {
7070 .create = perf_cgroup_create, 7073 .create = perf_cgroup_create,
7071 .destroy = perf_cgroup_destroy, 7074 .destroy = perf_cgroup_destroy,
7072 .exit = perf_cgroup_exit, 7075 .exit = perf_cgroup_exit,
7073 .attach_task = perf_cgroup_attach_task, 7076 .attach = perf_cgroup_attach,
7074}; 7077};
7075#endif /* CONFIG_CGROUP_PERF */ 7078#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/sched.c b/kernel/sched.c
index 0e9344a71be3..161184da7b81 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -9127,24 +9127,31 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
9127 sched_destroy_group(tg); 9127 sched_destroy_group(tg);
9128} 9128}
9129 9129
9130static int 9130static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9131cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 9131 struct cgroup_taskset *tset)
9132{ 9132{
9133 struct task_struct *task;
9134
9135 cgroup_taskset_for_each(task, cgrp, tset) {
9133#ifdef CONFIG_RT_GROUP_SCHED 9136#ifdef CONFIG_RT_GROUP_SCHED
9134 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) 9137 if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
9135 return -EINVAL; 9138 return -EINVAL;
9136#else 9139#else
9137 /* We don't support RT-tasks being in separate groups */ 9140 /* We don't support RT-tasks being in separate groups */
9138 if (tsk->sched_class != &fair_sched_class) 9141 if (task->sched_class != &fair_sched_class)
9139 return -EINVAL; 9142 return -EINVAL;
9140#endif 9143#endif
9144 }
9141 return 0; 9145 return 0;
9142} 9146}
9143 9147
9144static void 9148static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9145cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 9149 struct cgroup_taskset *tset)
9146{ 9150{
9147 sched_move_task(tsk); 9151 struct task_struct *task;
9152
9153 cgroup_taskset_for_each(task, cgrp, tset)
9154 sched_move_task(task);
9148} 9155}
9149 9156
9150static void 9157static void
@@ -9480,8 +9487,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
9480 .name = "cpu", 9487 .name = "cpu",
9481 .create = cpu_cgroup_create, 9488 .create = cpu_cgroup_create,
9482 .destroy = cpu_cgroup_destroy, 9489 .destroy = cpu_cgroup_destroy,
9483 .can_attach_task = cpu_cgroup_can_attach_task, 9490 .can_attach = cpu_cgroup_can_attach,
9484 .attach_task = cpu_cgroup_attach_task, 9491 .attach = cpu_cgroup_attach,
9485 .exit = cpu_cgroup_exit, 9492 .exit = cpu_cgroup_exit,
9486 .populate = cpu_cgroup_populate, 9493 .populate = cpu_cgroup_populate,
9487 .subsys_id = cpu_cgroup_subsys_id, 9494 .subsys_id = cpu_cgroup_subsys_id,