aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-13 16:11:33 -0400
committerJens Axboe <axboe@kernel.dk>2012-04-20 04:06:06 -0400
commita2b1693bac45ea3fe3ba612fd22c45f17449f610 (patch)
tree2e05859caab6453efbc85d584dd72dca7ef03cd0
parent03d8e11142a893ad322285d3c8a08e88b570cda1 (diff)
blkcg: implement per-queue policy activation
All blkcg policies were assumed to be enabled on all request_queues. Due to various implementation obstacles, during the recent blkcg core updates, this was temporarily implemented as shooting down all !root blkgs on elevator switch and policy [de]registration combined with half-broken in-place root blkg updates. In addition to being buggy and racy, this meant losing all blkcg configurations across those events. Now that blkcg is cleaned up enough, this patch replaces the temporary implementation with proper per-queue policy activation. Each blkcg policy should call the new blkcg_[de]activate_policy() to enable and disable the policy on a specific queue. blkcg_activate_policy() allocates and installs policy data for the policy for all existing blkgs. blkcg_deactivate_policy() does the reverse. If a policy is not enabled for a given queue, blkg printing / config functions skip the respective blkg for the queue. blkcg_activate_policy() also takes care of root blkg creation, and cfq_init_queue() and blk_throtl_init() are updated accordingly. This replaces blkcg_bypass_{start|end}() and update_root_blkg_pd() unnecessary. Dropped. v2: cfq_init_queue() was returning uninitialized @ret on root_group alloc failure if !CONFIG_CFQ_GROUP_IOSCHED. Fixed. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-cgroup.c228
-rw-r--r--block/blk-cgroup.h15
-rw-r--r--block/blk-throttle.c52
-rw-r--r--block/cfq-iosched.c37
-rw-r--r--block/elevator.c2
-rw-r--r--include/linux/blkdev.h1
6 files changed, 201 insertions, 134 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d6e4555c982f..d6d59ad105b4 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -54,6 +54,17 @@ struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
54} 54}
55EXPORT_SYMBOL_GPL(bio_blkio_cgroup); 55EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
56 56
57static bool blkcg_policy_enabled(struct request_queue *q,
58 const struct blkio_policy_type *pol)
59{
60 return pol && test_bit(pol->plid, q->blkcg_pols);
61}
62
63static size_t blkg_pd_size(const struct blkio_policy_type *pol)
64{
65 return sizeof(struct blkg_policy_data) + pol->pdata_size;
66}
67
57/** 68/**
58 * blkg_free - free a blkg 69 * blkg_free - free a blkg
59 * @blkg: blkg to free 70 * @blkg: blkg to free
@@ -111,12 +122,11 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
111 struct blkio_policy_type *pol = blkio_policy[i]; 122 struct blkio_policy_type *pol = blkio_policy[i];
112 struct blkg_policy_data *pd; 123 struct blkg_policy_data *pd;
113 124
114 if (!pol) 125 if (!blkcg_policy_enabled(q, pol))
115 continue; 126 continue;
116 127
117 /* alloc per-policy data and attach it to blkg */ 128 /* alloc per-policy data and attach it to blkg */
118 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC, 129 pd = kzalloc_node(blkg_pd_size(pol), GFP_ATOMIC, q->node);
119 q->node);
120 if (!pd) { 130 if (!pd) {
121 blkg_free(blkg); 131 blkg_free(blkg);
122 return NULL; 132 return NULL;
@@ -130,7 +140,7 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
130 for (i = 0; i < BLKCG_MAX_POLS; i++) { 140 for (i = 0; i < BLKCG_MAX_POLS; i++) {
131 struct blkio_policy_type *pol = blkio_policy[i]; 141 struct blkio_policy_type *pol = blkio_policy[i];
132 142
133 if (pol) 143 if (blkcg_policy_enabled(blkg->q, pol))
134 pol->ops.blkio_init_group_fn(blkg); 144 pol->ops.blkio_init_group_fn(blkg);
135 } 145 }
136 146
@@ -236,36 +246,6 @@ static void blkg_destroy(struct blkio_group *blkg)
236 blkg_put(blkg); 246 blkg_put(blkg);
237} 247}
238 248
239/*
240 * XXX: This updates blkg policy data in-place for root blkg, which is
241 * necessary across elevator switch and policy registration as root blkgs
242 * aren't shot down. This broken and racy implementation is temporary.
243 * Eventually, blkg shoot down will be replaced by proper in-place update.
244 */
245void update_root_blkg_pd(struct request_queue *q,
246 const struct blkio_policy_type *pol)
247{
248 struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q);
249 struct blkg_policy_data *pd;
250
251 if (!blkg)
252 return;
253
254 kfree(blkg->pd[pol->plid]);
255 blkg->pd[pol->plid] = NULL;
256
257 if (!pol)
258 return;
259
260 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
261 WARN_ON_ONCE(!pd);
262
263 blkg->pd[pol->plid] = pd;
264 pd->blkg = blkg;
265 pol->ops.blkio_init_group_fn(blkg);
266}
267EXPORT_SYMBOL_GPL(update_root_blkg_pd);
268
269/** 249/**
270 * blkg_destroy_all - destroy all blkgs associated with a request_queue 250 * blkg_destroy_all - destroy all blkgs associated with a request_queue
271 * @q: request_queue of interest 251 * @q: request_queue of interest
@@ -339,7 +319,8 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
339 for (i = 0; i < BLKCG_MAX_POLS; i++) { 319 for (i = 0; i < BLKCG_MAX_POLS; i++) {
340 struct blkio_policy_type *pol = blkio_policy[i]; 320 struct blkio_policy_type *pol = blkio_policy[i];
341 321
342 if (pol && pol->ops.blkio_reset_group_stats_fn) 322 if (blkcg_policy_enabled(blkg->q, pol) &&
323 pol->ops.blkio_reset_group_stats_fn)
343 pol->ops.blkio_reset_group_stats_fn(blkg); 324 pol->ops.blkio_reset_group_stats_fn(blkg);
344 } 325 }
345 } 326 }
@@ -385,7 +366,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
385 366
386 spin_lock_irq(&blkcg->lock); 367 spin_lock_irq(&blkcg->lock);
387 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) 368 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
388 if (blkg->pd[pol->plid]) 369 if (blkcg_policy_enabled(blkg->q, pol))
389 total += prfill(sf, blkg->pd[pol->plid]->pdata, data); 370 total += prfill(sf, blkg->pd[pol->plid]->pdata, data);
390 spin_unlock_irq(&blkcg->lock); 371 spin_unlock_irq(&blkcg->lock);
391 372
@@ -510,7 +491,10 @@ int blkg_conf_prep(struct blkio_cgroup *blkcg,
510 rcu_read_lock(); 491 rcu_read_lock();
511 spin_lock_irq(disk->queue->queue_lock); 492 spin_lock_irq(disk->queue->queue_lock);
512 493
513 blkg = blkg_lookup_create(blkcg, disk->queue, false); 494 if (blkcg_policy_enabled(disk->queue, pol))
495 blkg = blkg_lookup_create(blkcg, disk->queue, false);
496 else
497 blkg = ERR_PTR(-EINVAL);
514 498
515 if (IS_ERR(blkg)) { 499 if (IS_ERR(blkg)) {
516 ret = PTR_ERR(blkg); 500 ret = PTR_ERR(blkg);
@@ -712,30 +696,6 @@ static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
712 return ret; 696 return ret;
713} 697}
714 698
715static void blkcg_bypass_start(void)
716 __acquires(&all_q_mutex)
717{
718 struct request_queue *q;
719
720 mutex_lock(&all_q_mutex);
721
722 list_for_each_entry(q, &all_q_list, all_q_node) {
723 blk_queue_bypass_start(q);
724 blkg_destroy_all(q, false);
725 }
726}
727
728static void blkcg_bypass_end(void)
729 __releases(&all_q_mutex)
730{
731 struct request_queue *q;
732
733 list_for_each_entry(q, &all_q_list, all_q_node)
734 blk_queue_bypass_end(q);
735
736 mutex_unlock(&all_q_mutex);
737}
738
739struct cgroup_subsys blkio_subsys = { 699struct cgroup_subsys blkio_subsys = {
740 .name = "blkio", 700 .name = "blkio",
741 .create = blkiocg_create, 701 .create = blkiocg_create,
@@ -749,6 +709,139 @@ struct cgroup_subsys blkio_subsys = {
749EXPORT_SYMBOL_GPL(blkio_subsys); 709EXPORT_SYMBOL_GPL(blkio_subsys);
750 710
751/** 711/**
712 * blkcg_activate_policy - activate a blkcg policy on a request_queue
713 * @q: request_queue of interest
714 * @pol: blkcg policy to activate
715 *
716 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
717 * bypass mode to populate its blkgs with policy_data for @pol.
718 *
719 * Activation happens with @q bypassed, so nobody would be accessing blkgs
720 * from IO path. Update of each blkg is protected by both queue and blkcg
721 * locks so that holding either lock and testing blkcg_policy_enabled() is
722 * always enough for dereferencing policy data.
723 *
724 * The caller is responsible for synchronizing [de]activations and policy
725 * [un]registerations. Returns 0 on success, -errno on failure.
726 */
727int blkcg_activate_policy(struct request_queue *q,
728 const struct blkio_policy_type *pol)
729{
730 LIST_HEAD(pds);
731 struct blkio_group *blkg;
732 struct blkg_policy_data *pd, *n;
733 int cnt = 0, ret;
734
735 if (blkcg_policy_enabled(q, pol))
736 return 0;
737
738 blk_queue_bypass_start(q);
739
740 /* make sure the root blkg exists and count the existing blkgs */
741 spin_lock_irq(q->queue_lock);
742
743 rcu_read_lock();
744 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
745 rcu_read_unlock();
746
747 if (IS_ERR(blkg)) {
748 ret = PTR_ERR(blkg);
749 goto out_unlock;
750 }
751 q->root_blkg = blkg;
752
753 list_for_each_entry(blkg, &q->blkg_list, q_node)
754 cnt++;
755
756 spin_unlock_irq(q->queue_lock);
757
758 /* allocate policy_data for all existing blkgs */
759 while (cnt--) {
760 pd = kzalloc_node(blkg_pd_size(pol), GFP_KERNEL, q->node);
761 if (!pd) {
762 ret = -ENOMEM;
763 goto out_free;
764 }
765 list_add_tail(&pd->alloc_node, &pds);
766 }
767
768 /*
769 * Install the allocated pds. With @q bypassing, no new blkg
770 * should have been created while the queue lock was dropped.
771 */
772 spin_lock_irq(q->queue_lock);
773
774 list_for_each_entry(blkg, &q->blkg_list, q_node) {
775 if (WARN_ON(list_empty(&pds))) {
776 /* umm... this shouldn't happen, just abort */
777 ret = -ENOMEM;
778 goto out_unlock;
779 }
780 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
781 list_del_init(&pd->alloc_node);
782
783 /* grab blkcg lock too while installing @pd on @blkg */
784 spin_lock(&blkg->blkcg->lock);
785
786 blkg->pd[pol->plid] = pd;
787 pd->blkg = blkg;
788 pol->ops.blkio_init_group_fn(blkg);
789
790 spin_unlock(&blkg->blkcg->lock);
791 }
792
793 __set_bit(pol->plid, q->blkcg_pols);
794 ret = 0;
795out_unlock:
796 spin_unlock_irq(q->queue_lock);
797out_free:
798 blk_queue_bypass_end(q);
799 list_for_each_entry_safe(pd, n, &pds, alloc_node)
800 kfree(pd);
801 return ret;
802}
803EXPORT_SYMBOL_GPL(blkcg_activate_policy);
804
805/**
806 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
807 * @q: request_queue of interest
808 * @pol: blkcg policy to deactivate
809 *
810 * Deactivate @pol on @q. Follows the same synchronization rules as
811 * blkcg_activate_policy().
812 */
813void blkcg_deactivate_policy(struct request_queue *q,
814 const struct blkio_policy_type *pol)
815{
816 struct blkio_group *blkg;
817
818 if (!blkcg_policy_enabled(q, pol))
819 return;
820
821 blk_queue_bypass_start(q);
822 spin_lock_irq(q->queue_lock);
823
824 __clear_bit(pol->plid, q->blkcg_pols);
825
826 list_for_each_entry(blkg, &q->blkg_list, q_node) {
827 /* grab blkcg lock too while removing @pd from @blkg */
828 spin_lock(&blkg->blkcg->lock);
829
830 if (pol->ops.blkio_exit_group_fn)
831 pol->ops.blkio_exit_group_fn(blkg);
832
833 kfree(blkg->pd[pol->plid]);
834 blkg->pd[pol->plid] = NULL;
835
836 spin_unlock(&blkg->blkcg->lock);
837 }
838
839 spin_unlock_irq(q->queue_lock);
840 blk_queue_bypass_end(q);
841}
842EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
843
844/**
752 * blkio_policy_register - register a blkcg policy 845 * blkio_policy_register - register a blkcg policy
753 * @blkiop: blkcg policy to register 846 * @blkiop: blkcg policy to register
754 * 847 *
@@ -758,7 +851,6 @@ EXPORT_SYMBOL_GPL(blkio_subsys);
758 */ 851 */
759int blkio_policy_register(struct blkio_policy_type *blkiop) 852int blkio_policy_register(struct blkio_policy_type *blkiop)
760{ 853{
761 struct request_queue *q;
762 int i, ret; 854 int i, ret;
763 855
764 mutex_lock(&blkcg_pol_mutex); 856 mutex_lock(&blkcg_pol_mutex);
@@ -775,11 +867,6 @@ int blkio_policy_register(struct blkio_policy_type *blkiop)
775 blkiop->plid = i; 867 blkiop->plid = i;
776 blkio_policy[i] = blkiop; 868 blkio_policy[i] = blkiop;
777 869
778 blkcg_bypass_start();
779 list_for_each_entry(q, &all_q_list, all_q_node)
780 update_root_blkg_pd(q, blkiop);
781 blkcg_bypass_end();
782
783 /* everything is in place, add intf files for the new policy */ 870 /* everything is in place, add intf files for the new policy */
784 if (blkiop->cftypes) 871 if (blkiop->cftypes)
785 WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes)); 872 WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes));
@@ -798,8 +885,6 @@ EXPORT_SYMBOL_GPL(blkio_policy_register);
798 */ 885 */
799void blkio_policy_unregister(struct blkio_policy_type *blkiop) 886void blkio_policy_unregister(struct blkio_policy_type *blkiop)
800{ 887{
801 struct request_queue *q;
802
803 mutex_lock(&blkcg_pol_mutex); 888 mutex_lock(&blkcg_pol_mutex);
804 889
805 if (WARN_ON(blkio_policy[blkiop->plid] != blkiop)) 890 if (WARN_ON(blkio_policy[blkiop->plid] != blkiop))
@@ -811,11 +896,6 @@ void blkio_policy_unregister(struct blkio_policy_type *blkiop)
811 896
812 /* unregister and update blkgs */ 897 /* unregister and update blkgs */
813 blkio_policy[blkiop->plid] = NULL; 898 blkio_policy[blkiop->plid] = NULL;
814
815 blkcg_bypass_start();
816 list_for_each_entry(q, &all_q_list, all_q_node)
817 update_root_blkg_pd(q, blkiop);
818 blkcg_bypass_end();
819out_unlock: 899out_unlock:
820 mutex_unlock(&blkcg_pol_mutex); 900 mutex_unlock(&blkcg_pol_mutex);
821} 901}
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index df1c7b290c22..66253a7c8ff4 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -64,6 +64,9 @@ struct blkg_policy_data {
64 /* the blkg this per-policy data belongs to */ 64 /* the blkg this per-policy data belongs to */
65 struct blkio_group *blkg; 65 struct blkio_group *blkg;
66 66
67 /* used during policy activation */
68 struct list_head alloc_node;
69
67 /* pol->pdata_size bytes of private data used by policy impl */ 70 /* pol->pdata_size bytes of private data used by policy impl */
68 char pdata[] __aligned(__alignof__(unsigned long long)); 71 char pdata[] __aligned(__alignof__(unsigned long long));
69}; 72};
@@ -108,9 +111,11 @@ extern void blkcg_exit_queue(struct request_queue *q);
108/* Blkio controller policy registration */ 111/* Blkio controller policy registration */
109extern int blkio_policy_register(struct blkio_policy_type *); 112extern int blkio_policy_register(struct blkio_policy_type *);
110extern void blkio_policy_unregister(struct blkio_policy_type *); 113extern void blkio_policy_unregister(struct blkio_policy_type *);
114extern int blkcg_activate_policy(struct request_queue *q,
115 const struct blkio_policy_type *pol);
116extern void blkcg_deactivate_policy(struct request_queue *q,
117 const struct blkio_policy_type *pol);
111extern void blkg_destroy_all(struct request_queue *q, bool destroy_root); 118extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
112extern void update_root_blkg_pd(struct request_queue *q,
113 const struct blkio_policy_type *pol);
114 119
115void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, 120void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
116 u64 (*prfill)(struct seq_file *, void *, int), 121 u64 (*prfill)(struct seq_file *, void *, int),
@@ -325,10 +330,12 @@ static inline void blkcg_drain_queue(struct request_queue *q) { }
325static inline void blkcg_exit_queue(struct request_queue *q) { } 330static inline void blkcg_exit_queue(struct request_queue *q) { }
326static inline int blkio_policy_register(struct blkio_policy_type *blkiop) { return 0; } 331static inline int blkio_policy_register(struct blkio_policy_type *blkiop) { return 0; }
327static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } 332static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
333static inline int blkcg_activate_policy(struct request_queue *q,
334 const struct blkio_policy_type *pol) { return 0; }
335static inline void blkcg_deactivate_policy(struct request_queue *q,
336 const struct blkio_policy_type *pol) { }
328static inline void blkg_destroy_all(struct request_queue *q, 337static inline void blkg_destroy_all(struct request_queue *q,
329 bool destory_root) { } 338 bool destory_root) { }
330static inline void update_root_blkg_pd(struct request_queue *q,
331 const struct blkio_policy_type *pol) { }
332 339
333static inline void *blkg_to_pdata(struct blkio_group *blkg, 340static inline void *blkg_to_pdata(struct blkio_group *blkg,
334 struct blkio_policy_type *pol) { return NULL; } 341 struct blkio_policy_type *pol) { return NULL; }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8c520fad6885..2fc964e06ea4 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -995,35 +995,31 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
995 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); 995 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
996 struct blkg_conf_ctx ctx; 996 struct blkg_conf_ctx ctx;
997 struct throtl_grp *tg; 997 struct throtl_grp *tg;
998 struct throtl_data *td;
998 int ret; 999 int ret;
999 1000
1000 ret = blkg_conf_prep(blkcg, &blkio_policy_throtl, buf, &ctx); 1001 ret = blkg_conf_prep(blkcg, &blkio_policy_throtl, buf, &ctx);
1001 if (ret) 1002 if (ret)
1002 return ret; 1003 return ret;
1003 1004
1004 ret = -EINVAL;
1005 tg = blkg_to_tg(ctx.blkg); 1005 tg = blkg_to_tg(ctx.blkg);
1006 if (tg) { 1006 td = ctx.blkg->q->td;
1007 struct throtl_data *td = ctx.blkg->q->td;
1008
1009 if (!ctx.v)
1010 ctx.v = -1;
1011 1007
1012 if (is_u64) 1008 if (!ctx.v)
1013 *(u64 *)((void *)tg + cft->private) = ctx.v; 1009 ctx.v = -1;
1014 else
1015 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
1016 1010
1017 /* XXX: we don't need the following deferred processing */ 1011 if (is_u64)
1018 xchg(&tg->limits_changed, true); 1012 *(u64 *)((void *)tg + cft->private) = ctx.v;
1019 xchg(&td->limits_changed, true); 1013 else
1020 throtl_schedule_delayed_work(td, 0); 1014 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
1021 1015
1022 ret = 0; 1016 /* XXX: we don't need the following deferred processing */
1023 } 1017 xchg(&tg->limits_changed, true);
1018 xchg(&td->limits_changed, true);
1019 throtl_schedule_delayed_work(td, 0);
1024 1020
1025 blkg_conf_finish(&ctx); 1021 blkg_conf_finish(&ctx);
1026 return ret; 1022 return 0;
1027} 1023}
1028 1024
1029static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft, 1025static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
@@ -1230,7 +1226,7 @@ void blk_throtl_drain(struct request_queue *q)
1230int blk_throtl_init(struct request_queue *q) 1226int blk_throtl_init(struct request_queue *q)
1231{ 1227{
1232 struct throtl_data *td; 1228 struct throtl_data *td;
1233 struct blkio_group *blkg; 1229 int ret;
1234 1230
1235 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); 1231 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1236 if (!td) 1232 if (!td)
@@ -1243,28 +1239,18 @@ int blk_throtl_init(struct request_queue *q)
1243 q->td = td; 1239 q->td = td;
1244 td->queue = q; 1240 td->queue = q;
1245 1241
1246 /* alloc and init root group. */ 1242 /* activate policy */
1247 rcu_read_lock(); 1243 ret = blkcg_activate_policy(q, &blkio_policy_throtl);
1248 spin_lock_irq(q->queue_lock); 1244 if (ret)
1249
1250 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
1251 if (!IS_ERR(blkg))
1252 q->root_blkg = blkg;
1253
1254 spin_unlock_irq(q->queue_lock);
1255 rcu_read_unlock();
1256
1257 if (!q->root_blkg) {
1258 kfree(td); 1245 kfree(td);
1259 return -ENOMEM; 1246 return ret;
1260 }
1261 return 0;
1262} 1247}
1263 1248
1264void blk_throtl_exit(struct request_queue *q) 1249void blk_throtl_exit(struct request_queue *q)
1265{ 1250{
1266 BUG_ON(!q->td); 1251 BUG_ON(!q->td);
1267 throtl_shutdown_wq(q); 1252 throtl_shutdown_wq(q);
1253 blkcg_deactivate_policy(q, &blkio_policy_throtl);
1268 kfree(q->td); 1254 kfree(q->td);
1269} 1255}
1270 1256
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 86440e04f3ee..0203652e1f34 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1406,8 +1406,7 @@ static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1406 1406
1407 ret = -EINVAL; 1407 ret = -EINVAL;
1408 cfqg = blkg_to_cfqg(ctx.blkg); 1408 cfqg = blkg_to_cfqg(ctx.blkg);
1409 if (cfqg && (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && 1409 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
1410 ctx.v <= CFQ_WEIGHT_MAX))) {
1411 cfqg->dev_weight = ctx.v; 1410 cfqg->dev_weight = ctx.v;
1412 cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight; 1411 cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
1413 ret = 0; 1412 ret = 0;
@@ -3938,7 +3937,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
3938#ifndef CONFIG_CFQ_GROUP_IOSCHED 3937#ifndef CONFIG_CFQ_GROUP_IOSCHED
3939 kfree(cfqd->root_group); 3938 kfree(cfqd->root_group);
3940#endif 3939#endif
3941 update_root_blkg_pd(q, &blkio_policy_cfq); 3940 blkcg_deactivate_policy(q, &blkio_policy_cfq);
3942 kfree(cfqd); 3941 kfree(cfqd);
3943} 3942}
3944 3943
@@ -3946,7 +3945,7 @@ static int cfq_init_queue(struct request_queue *q)
3946{ 3945{
3947 struct cfq_data *cfqd; 3946 struct cfq_data *cfqd;
3948 struct blkio_group *blkg __maybe_unused; 3947 struct blkio_group *blkg __maybe_unused;
3949 int i; 3948 int i, ret;
3950 3949
3951 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 3950 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3952 if (!cfqd) 3951 if (!cfqd)
@@ -3960,28 +3959,20 @@ static int cfq_init_queue(struct request_queue *q)
3960 3959
3961 /* Init root group and prefer root group over other groups by default */ 3960 /* Init root group and prefer root group over other groups by default */
3962#ifdef CONFIG_CFQ_GROUP_IOSCHED 3961#ifdef CONFIG_CFQ_GROUP_IOSCHED
3963 rcu_read_lock(); 3962 ret = blkcg_activate_policy(q, &blkio_policy_cfq);
3964 spin_lock_irq(q->queue_lock); 3963 if (ret)
3965 3964 goto out_free;
3966 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
3967 if (!IS_ERR(blkg)) {
3968 q->root_blkg = blkg;
3969 cfqd->root_group = blkg_to_cfqg(blkg);
3970 }
3971 3965
3972 spin_unlock_irq(q->queue_lock); 3966 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
3973 rcu_read_unlock();
3974#else 3967#else
3968 ret = -ENOMEM;
3975 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group), 3969 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3976 GFP_KERNEL, cfqd->queue->node); 3970 GFP_KERNEL, cfqd->queue->node);
3977 if (cfqd->root_group) 3971 if (!cfqd->root_group)
3978 cfq_init_cfqg_base(cfqd->root_group); 3972 goto out_free;
3979#endif
3980 if (!cfqd->root_group) {
3981 kfree(cfqd);
3982 return -ENOMEM;
3983 }
3984 3973
3974 cfq_init_cfqg_base(cfqd->root_group);
3975#endif
3985 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT; 3976 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
3986 3977
3987 /* 3978 /*
@@ -4031,6 +4022,10 @@ static int cfq_init_queue(struct request_queue *q)
4031 */ 4022 */
4032 cfqd->last_delayed_sync = jiffies - HZ; 4023 cfqd->last_delayed_sync = jiffies - HZ;
4033 return 0; 4024 return 0;
4025
4026out_free:
4027 kfree(cfqd);
4028 return ret;
4034} 4029}
4035 4030
4036/* 4031/*
diff --git a/block/elevator.c b/block/elevator.c
index be3ab6df0fea..6a55d418896f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -896,8 +896,6 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
896 ioc_clear_queue(q); 896 ioc_clear_queue(q);
897 spin_unlock_irq(q->queue_lock); 897 spin_unlock_irq(q->queue_lock);
898 898
899 blkg_destroy_all(q, false);
900
901 /* allocate, init and register new elevator */ 899 /* allocate, init and register new elevator */
902 err = -ENOMEM; 900 err = -ENOMEM;
903 q->elevator = elevator_alloc(q, new_e); 901 q->elevator = elevator_alloc(q, new_e);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b01c377fd739..68720ab275d4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -370,6 +370,7 @@ struct request_queue {
370 370
371 struct list_head icq_list; 371 struct list_head icq_list;
372#ifdef CONFIG_BLK_CGROUP 372#ifdef CONFIG_BLK_CGROUP
373 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
373 struct blkio_group *root_blkg; 374 struct blkio_group *root_blkg;
374 struct list_head blkg_list; 375 struct list_head blkg_list;
375#endif 376#endif