aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-06-04 23:40:52 -0400
committerJens Axboe <axboe@kernel.dk>2012-06-25 05:53:46 -0400
commit159749937a3e1605068a454b1607cdc5714f16e6 (patch)
tree28be38dcf91e5714c9e264da9c9502c5b642aa00 /block/blk-cgroup.c
parent13589864be74736ca4e6def7376742eb1d2099bf (diff)
blkcg: make root blkcg allocation use %GFP_KERNEL
Currently, blkcg_activate_policy() depends on %GFP_ATOMIC allocation from __blkg_lookup_create() for root blkcg creation. This could make policy fail unnecessarily. Make blkg_alloc() take @gfp_mask, __blkg_lookup_create() take an optional @new_blkg for preallocated blkg, and blkcg_activate_policy() preload radix tree and preallocate blkg with %GFP_KERNEL before trying to create the root blkg. v2: __blkg_lookup_create() was returning %NULL on blkg alloc failure instead of ERR_PTR() value. Fixed. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c59
1 files changed, 43 insertions, 16 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index c3882bbbf0fc..96248d2578db 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -91,16 +91,18 @@ static void blkg_free(struct blkcg_gq *blkg)
91 * blkg_alloc - allocate a blkg 91 * blkg_alloc - allocate a blkg
92 * @blkcg: block cgroup the new blkg is associated with 92 * @blkcg: block cgroup the new blkg is associated with
93 * @q: request_queue the new blkg is associated with 93 * @q: request_queue the new blkg is associated with
94 * @gfp_mask: allocation mask to use
94 * 95 *
95 * Allocate a new blkg assocating @blkcg and @q. 96 * Allocate a new blkg assocating @blkcg and @q.
96 */ 97 */
97static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q) 98static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
99 gfp_t gfp_mask)
98{ 100{
99 struct blkcg_gq *blkg; 101 struct blkcg_gq *blkg;
100 int i; 102 int i;
101 103
102 /* alloc and init base part */ 104 /* alloc and init base part */
103 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); 105 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
104 if (!blkg) 106 if (!blkg)
105 return NULL; 107 return NULL;
106 108
@@ -117,7 +119,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
117 continue; 119 continue;
118 120
119 /* alloc per-policy data and attach it to blkg */ 121 /* alloc per-policy data and attach it to blkg */
120 pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node); 122 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
121 if (!pd) { 123 if (!pd) {
122 blkg_free(blkg); 124 blkg_free(blkg);
123 return NULL; 125 return NULL;
@@ -175,8 +177,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
175} 177}
176EXPORT_SYMBOL_GPL(blkg_lookup); 178EXPORT_SYMBOL_GPL(blkg_lookup);
177 179
180/*
181 * If @new_blkg is %NULL, this function tries to allocate a new one as
182 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
183 */
178static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, 184static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
179 struct request_queue *q) 185 struct request_queue *q,
186 struct blkcg_gq *new_blkg)
180{ 187{
181 struct blkcg_gq *blkg; 188 struct blkcg_gq *blkg;
182 int ret; 189 int ret;
@@ -188,18 +195,24 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
188 blkg = __blkg_lookup(blkcg, q); 195 blkg = __blkg_lookup(blkcg, q);
189 if (blkg) { 196 if (blkg) {
190 rcu_assign_pointer(blkcg->blkg_hint, blkg); 197 rcu_assign_pointer(blkcg->blkg_hint, blkg);
191 return blkg; 198 goto out_free;
192 } 199 }
193 200
194 /* blkg holds a reference to blkcg */ 201 /* blkg holds a reference to blkcg */
195 if (!css_tryget(&blkcg->css)) 202 if (!css_tryget(&blkcg->css)) {
196 return ERR_PTR(-EINVAL); 203 blkg = ERR_PTR(-EINVAL);
204 goto out_free;
205 }
197 206
198 /* allocate */ 207 /* allocate */
199 ret = -ENOMEM; 208 if (!new_blkg) {
200 blkg = blkg_alloc(blkcg, q); 209 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
201 if (unlikely(!blkg)) 210 if (unlikely(!new_blkg)) {
202 goto err_put; 211 blkg = ERR_PTR(-ENOMEM);
212 goto out_put;
213 }
214 }
215 blkg = new_blkg;
203 216
204 /* insert */ 217 /* insert */
205 spin_lock(&blkcg->lock); 218 spin_lock(&blkcg->lock);
@@ -212,10 +225,13 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
212 225
213 if (!ret) 226 if (!ret)
214 return blkg; 227 return blkg;
215err_put: 228
229 blkg = ERR_PTR(ret);
230out_put:
216 css_put(&blkcg->css); 231 css_put(&blkcg->css);
217 blkg_free(blkg); 232out_free:
218 return ERR_PTR(ret); 233 blkg_free(new_blkg);
234 return blkg;
219} 235}
220 236
221struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 237struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
@@ -227,7 +243,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
227 */ 243 */
228 if (unlikely(blk_queue_bypass(q))) 244 if (unlikely(blk_queue_bypass(q)))
229 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); 245 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
230 return __blkg_lookup_create(blkcg, q); 246 return __blkg_lookup_create(blkcg, q, NULL);
231} 247}
232EXPORT_SYMBOL_GPL(blkg_lookup_create); 248EXPORT_SYMBOL_GPL(blkg_lookup_create);
233 249
@@ -726,19 +742,30 @@ int blkcg_activate_policy(struct request_queue *q,
726 struct blkcg_gq *blkg; 742 struct blkcg_gq *blkg;
727 struct blkg_policy_data *pd, *n; 743 struct blkg_policy_data *pd, *n;
728 int cnt = 0, ret; 744 int cnt = 0, ret;
745 bool preloaded;
729 746
730 if (blkcg_policy_enabled(q, pol)) 747 if (blkcg_policy_enabled(q, pol))
731 return 0; 748 return 0;
732 749
750 /* preallocations for root blkg */
751 blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
752 if (!blkg)
753 return -ENOMEM;
754
755 preloaded = !radix_tree_preload(GFP_KERNEL);
756
733 blk_queue_bypass_start(q); 757 blk_queue_bypass_start(q);
734 758
735 /* make sure the root blkg exists and count the existing blkgs */ 759 /* make sure the root blkg exists and count the existing blkgs */
736 spin_lock_irq(q->queue_lock); 760 spin_lock_irq(q->queue_lock);
737 761
738 rcu_read_lock(); 762 rcu_read_lock();
739 blkg = __blkg_lookup_create(&blkcg_root, q); 763 blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
740 rcu_read_unlock(); 764 rcu_read_unlock();
741 765
766 if (preloaded)
767 radix_tree_preload_end();
768
742 if (IS_ERR(blkg)) { 769 if (IS_ERR(blkg)) {
743 ret = PTR_ERR(blkg); 770 ret = PTR_ERR(blkg);
744 goto out_unlock; 771 goto out_unlock;