aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 290792a13e3c..79fd9f4fadb7 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -765,18 +765,18 @@ struct cftype blkcg_files[] = {
765 765
766/** 766/**
767 * blkcg_css_offline - cgroup css_offline callback 767 * blkcg_css_offline - cgroup css_offline callback
768 * @cgroup: cgroup of interest 768 * @css: css of interest
769 * 769 *
770 * This function is called when @cgroup is about to go away and responsible 770 * This function is called when @css is about to go away and responsible
771 * for shooting down all blkgs associated with @cgroup. blkgs should be 771 * for shooting down all blkgs associated with @css. blkgs should be
772 * removed while holding both q and blkcg locks. As blkcg lock is nested 772 * removed while holding both q and blkcg locks. As blkcg lock is nested
773 * inside q lock, this function performs reverse double lock dancing. 773 * inside q lock, this function performs reverse double lock dancing.
774 * 774 *
775 * This is the blkcg counterpart of ioc_release_fn(). 775 * This is the blkcg counterpart of ioc_release_fn().
776 */ 776 */
777static void blkcg_css_offline(struct cgroup *cgroup) 777static void blkcg_css_offline(struct cgroup_subsys_state *css)
778{ 778{
779 struct blkcg *blkcg = cgroup_to_blkcg(cgroup); 779 struct blkcg *blkcg = css_to_blkcg(css);
780 780
781 spin_lock_irq(&blkcg->lock); 781 spin_lock_irq(&blkcg->lock);
782 782
@@ -798,21 +798,21 @@ static void blkcg_css_offline(struct cgroup *cgroup)
798 spin_unlock_irq(&blkcg->lock); 798 spin_unlock_irq(&blkcg->lock);
799} 799}
800 800
801static void blkcg_css_free(struct cgroup *cgroup) 801static void blkcg_css_free(struct cgroup_subsys_state *css)
802{ 802{
803 struct blkcg *blkcg = cgroup_to_blkcg(cgroup); 803 struct blkcg *blkcg = css_to_blkcg(css);
804 804
805 if (blkcg != &blkcg_root) 805 if (blkcg != &blkcg_root)
806 kfree(blkcg); 806 kfree(blkcg);
807} 807}
808 808
809static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup) 809static struct cgroup_subsys_state *
810blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
810{ 811{
811 static atomic64_t id_seq = ATOMIC64_INIT(0); 812 static atomic64_t id_seq = ATOMIC64_INIT(0);
812 struct blkcg *blkcg; 813 struct blkcg *blkcg;
813 struct cgroup *parent = cgroup->parent;
814 814
815 if (!parent) { 815 if (!parent_css) {
816 blkcg = &blkcg_root; 816 blkcg = &blkcg_root;
817 goto done; 817 goto done;
818 } 818 }
@@ -883,14 +883,15 @@ void blkcg_exit_queue(struct request_queue *q)
883 * of the main cic data structures. For now we allow a task to change 883 * of the main cic data structures. For now we allow a task to change
884 * its cgroup only if it's the only owner of its ioc. 884 * its cgroup only if it's the only owner of its ioc.
885 */ 885 */
886static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) 886static int blkcg_can_attach(struct cgroup_subsys_state *css,
887 struct cgroup_taskset *tset)
887{ 888{
888 struct task_struct *task; 889 struct task_struct *task;
889 struct io_context *ioc; 890 struct io_context *ioc;
890 int ret = 0; 891 int ret = 0;
891 892
892 /* task_lock() is needed to avoid races with exit_io_context() */ 893 /* task_lock() is needed to avoid races with exit_io_context() */
893 cgroup_taskset_for_each(task, cgrp, tset) { 894 cgroup_taskset_for_each(task, css->cgroup, tset) {
894 task_lock(task); 895 task_lock(task);
895 ioc = task->io_context; 896 ioc = task->io_context;
896 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 897 if (ioc && atomic_read(&ioc->nr_tasks) > 1)