aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiang Biao <jiang.biao2@zte.com.cn>2018-04-19 00:06:09 -0400
committerJens Axboe <axboe@kernel.dk>2018-04-19 10:51:59 -0400
commit901932a3f9b2b80352896be946c6d577c0a9652c (patch)
tree164d6ced670758d58973b555dc54126a88d17773
parentbea548831b8cee347181132eacd8b9711dfced92 (diff)
blkcg: init root blkcg_gq under lock
The initializing of q->root_blkg is currently outside of queue lock and rcu, so the blkg may be destroied before the initializing, which may cause dangling/null references. On the other side, the destroys of blkg are protected by queue lock or rcu. Put the initializing inside the queue lock and rcu to make it safer. Signed-off-by: Jiang Biao <jiang.biao2@zte.com.cn> Signed-off-by: Wen Yang <wen.yang99@zte.com.cn> CC: Tejun Heo <tj@kernel.org> CC: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-cgroup.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 79da2a723b68..eb85cb87c40f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1181,18 +1181,16 @@ int blkcg_init_queue(struct request_queue *q)
1181 rcu_read_lock(); 1181 rcu_read_lock();
1182 spin_lock_irq(q->queue_lock); 1182 spin_lock_irq(q->queue_lock);
1183 blkg = blkg_create(&blkcg_root, q, new_blkg); 1183 blkg = blkg_create(&blkcg_root, q, new_blkg);
1184 if (IS_ERR(blkg))
1185 goto err_unlock;
1186 q->root_blkg = blkg;
1187 q->root_rl.blkg = blkg;
1184 spin_unlock_irq(q->queue_lock); 1188 spin_unlock_irq(q->queue_lock);
1185 rcu_read_unlock(); 1189 rcu_read_unlock();
1186 1190
1187 if (preloaded) 1191 if (preloaded)
1188 radix_tree_preload_end(); 1192 radix_tree_preload_end();
1189 1193
1190 if (IS_ERR(blkg))
1191 return PTR_ERR(blkg);
1192
1193 q->root_blkg = blkg;
1194 q->root_rl.blkg = blkg;
1195
1196 ret = blk_throtl_init(q); 1194 ret = blk_throtl_init(q);
1197 if (ret) { 1195 if (ret) {
1198 spin_lock_irq(q->queue_lock); 1196 spin_lock_irq(q->queue_lock);
@@ -1200,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q)
1200 spin_unlock_irq(q->queue_lock); 1198 spin_unlock_irq(q->queue_lock);
1201 } 1199 }
1202 return ret; 1200 return ret;
1201
1202err_unlock:
1203 spin_unlock_irq(q->queue_lock);
1204 rcu_read_unlock();
1205 if (preloaded)
1206 radix_tree_preload_end();
1207 return PTR_ERR(blkg);
1203} 1208}
1204 1209
1205/** 1210/**