aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
committerTejun Heo <tj@kernel.org>2011-12-12 21:12:21 -0500
commitbb9d97b6dffa10cec5e1ce9adbce60f3c2b5eabc (patch)
treefb8351518fcfb91927e9e138f48284c44553f011 /block/blk-cgroup.c
parent2f7ee5691eecb67c8108b92001a85563ea336ac5 (diff)
cgroup: don't use subsys->can_attach_task() or ->attach_task()
Now that subsys->can_attach() and attach() take @tset instead of @task, they can handle per-task operations. Convert ->can_attach_task() and ->attach_task() users to use ->can_attach() and attach() instead. Most converions are straight-forward. Noteworthy changes are, * In cgroup_freezer, remove unnecessary NULL assignments to unused methods. It's useless and very prone to get out of sync, which already happened. * In cpuset, PF_THREAD_BOUND test is checked for each task. This doesn't make any practical difference but is conceptually cleaner. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Paul Menage <paul@paulmenage.org> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: James Morris <jmorris@namei.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c45
1 files changed, 28 insertions, 17 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 8f630cec906e..b8c143d68ee0 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -30,8 +30,10 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30 30
31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, 31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 struct cgroup *); 32 struct cgroup *);
33static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *); 33static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34static void blkiocg_attach_task(struct cgroup *, struct task_struct *); 34 struct cgroup_taskset *);
35static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup_taskset *);
35static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); 37static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); 38static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
37 39
@@ -44,8 +46,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
44struct cgroup_subsys blkio_subsys = { 46struct cgroup_subsys blkio_subsys = {
45 .name = "blkio", 47 .name = "blkio",
46 .create = blkiocg_create, 48 .create = blkiocg_create,
47 .can_attach_task = blkiocg_can_attach_task, 49 .can_attach = blkiocg_can_attach,
48 .attach_task = blkiocg_attach_task, 50 .attach = blkiocg_attach,
49 .destroy = blkiocg_destroy, 51 .destroy = blkiocg_destroy,
50 .populate = blkiocg_populate, 52 .populate = blkiocg_populate,
51#ifdef CONFIG_BLK_CGROUP 53#ifdef CONFIG_BLK_CGROUP
@@ -1626,30 +1628,39 @@ done:
1626 * of the main cic data structures. For now we allow a task to change 1628 * of the main cic data structures. For now we allow a task to change
1627 * its cgroup only if it's the only owner of its ioc. 1629 * its cgroup only if it's the only owner of its ioc.
1628 */ 1630 */
1629static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 1631static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1632 struct cgroup_taskset *tset)
1630{ 1633{
1634 struct task_struct *task;
1631 struct io_context *ioc; 1635 struct io_context *ioc;
1632 int ret = 0; 1636 int ret = 0;
1633 1637
1634 /* task_lock() is needed to avoid races with exit_io_context() */ 1638 /* task_lock() is needed to avoid races with exit_io_context() */
1635 task_lock(tsk); 1639 cgroup_taskset_for_each(task, cgrp, tset) {
1636 ioc = tsk->io_context; 1640 task_lock(task);
1637 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 1641 ioc = task->io_context;
1638 ret = -EINVAL; 1642 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1639 task_unlock(tsk); 1643 ret = -EINVAL;
1640 1644 task_unlock(task);
1645 if (ret)
1646 break;
1647 }
1641 return ret; 1648 return ret;
1642} 1649}
1643 1650
1644static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 1651static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1652 struct cgroup_taskset *tset)
1645{ 1653{
1654 struct task_struct *task;
1646 struct io_context *ioc; 1655 struct io_context *ioc;
1647 1656
1648 task_lock(tsk); 1657 cgroup_taskset_for_each(task, cgrp, tset) {
1649 ioc = tsk->io_context; 1658 task_lock(task);
1650 if (ioc) 1659 ioc = task->io_context;
1651 ioc->cgroup_changed = 1; 1660 if (ioc)
1652 task_unlock(tsk); 1661 ioc->cgroup_changed = 1;
1662 task_unlock(task);
1663 }
1653} 1664}
1654 1665
1655void blkio_policy_register(struct blkio_policy_type *blkiop) 1666void blkio_policy_register(struct blkio_policy_type *blkiop)