aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2019-06-13 18:30:39 -0400
committerJens Axboe <axboe@kernel.dk>2019-06-15 12:39:39 -0400
commitef069b97feec11c2399bbc5f6f347b35482105dc (patch)
tree72014b442cbeacfac5af39651a51f33e28fcb2b9 /block
parentf539da82f2158916e154d206054e0efd5df7ab61 (diff)
blkcg: perpcu_ref init/exit should be done from blkg_alloc/free()
blkg alloc is performed as a separate step from the rest of blkg creation so that GFP_KERNEL allocations can be used when creating blkgs from configuration file writes because otherwise user actions may fail due to failures of opportunistic GFP_NOWAIT allocations. While making blkgs use percpu_ref, 7fcf2b033b84 ("blkcg: change blkg reference counting to use percpu_ref") incorrectly added unconditional opportunistic percpu_ref_init() to blkg_create() breaking this guarantee. This patch moves percpu_ref_init() to blkg_alloc() so makes it use @gfp_mask that blkg_alloc() is called with. Also, percpu_ref_exit() is moved to blkg_free() for consistency. Signed-off-by: Tejun Heo <tj@kernel.org> Fixes: 7fcf2b033b84 ("blkcg: change blkg reference counting to use percpu_ref") Cc: Dennis Zhou <dennis@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index e4715b35d42c..04d286934c5e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -79,6 +79,7 @@ static void blkg_free(struct blkcg_gq *blkg)
79 79
80 blkg_rwstat_exit(&blkg->stat_ios); 80 blkg_rwstat_exit(&blkg->stat_ios);
81 blkg_rwstat_exit(&blkg->stat_bytes); 81 blkg_rwstat_exit(&blkg->stat_bytes);
82 percpu_ref_exit(&blkg->refcnt);
82 kfree(blkg); 83 kfree(blkg);
83} 84}
84 85
@@ -86,8 +87,6 @@ static void __blkg_release(struct rcu_head *rcu)
86{ 87{
87 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); 88 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
88 89
89 percpu_ref_exit(&blkg->refcnt);
90
91 /* release the blkcg and parent blkg refs this blkg has been holding */ 90 /* release the blkcg and parent blkg refs this blkg has been holding */
92 css_put(&blkg->blkcg->css); 91 css_put(&blkg->blkcg->css);
93 if (blkg->parent) 92 if (blkg->parent)
@@ -132,6 +131,9 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
132 if (!blkg) 131 if (!blkg)
133 return NULL; 132 return NULL;
134 133
134 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
135 goto err_free;
136
135 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || 137 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
136 blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) 138 blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
137 goto err_free; 139 goto err_free;
@@ -244,11 +246,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
244 blkg_get(blkg->parent); 246 blkg_get(blkg->parent);
245 } 247 }
246 248
247 ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
248 GFP_NOWAIT | __GFP_NOWARN);
249 if (ret)
250 goto err_cancel_ref;
251
252 /* invoke per-policy init */ 249 /* invoke per-policy init */
253 for (i = 0; i < BLKCG_MAX_POLS; i++) { 250 for (i = 0; i < BLKCG_MAX_POLS; i++) {
254 struct blkcg_policy *pol = blkcg_policy[i]; 251 struct blkcg_policy *pol = blkcg_policy[i];
@@ -281,8 +278,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
281 blkg_put(blkg); 278 blkg_put(blkg);
282 return ERR_PTR(ret); 279 return ERR_PTR(ret);
283 280
284err_cancel_ref:
285 percpu_ref_exit(&blkg->refcnt);
286err_put_congested: 281err_put_congested:
287 wb_congested_put(wb_congested); 282 wb_congested_put(wb_congested);
288err_put_css: 283err_put_css: