aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-10-19 08:42:16 -0400
committerJens Axboe <axboe@kernel.dk>2011-10-19 08:42:16 -0400
commitc9a929dde3913780b5c416f4bb9d9ed804f509ce (patch)
tree1acadc374d8f1faebdf07f08fae0993a38a8fd0d /block/blk-throttle.c
parentbd87b5898a72b1aef6acf3705c61c9f6372adf0c (diff)
block: fix request_queue lifetime handling by making blk_queue_cleanup() properly shutdown
request_queue is refcounted but actually depdends on lifetime management from the queue owner - on blk_cleanup_queue(), block layer expects that there's no request passing through request_queue and no new one will. This is fundamentally broken. The queue owner (e.g. SCSI layer) doesn't have a way to know whether there are other active users before calling blk_cleanup_queue() and other users (e.g. bsg) don't have any guarantee that the queue is and would stay valid while it's holding a reference. With delay added in blk_queue_bio() before queue_lock is grabbed, the following oops can be easily triggered when a device is removed with in-flight IOs. sd 0:0:1:0: [sdb] Stopping disk ata1.01: disabled general protection fault: 0000 [#1] PREEMPT SMP CPU 2 Modules linked in: Pid: 648, comm: test_rawio Not tainted 3.1.0-rc3-work+ #56 Bochs Bochs RIP: 0010:[<ffffffff8137d651>] [<ffffffff8137d651>] elv_rqhash_find+0x61/0x100 ... Process test_rawio (pid: 648, threadinfo ffff880019efa000, task ffff880019ef8a80) ... Call Trace: [<ffffffff8137d774>] elv_merge+0x84/0xe0 [<ffffffff81385b54>] blk_queue_bio+0xf4/0x400 [<ffffffff813838ea>] generic_make_request+0xca/0x100 [<ffffffff81383994>] submit_bio+0x74/0x100 [<ffffffff811c53ec>] dio_bio_submit+0xbc/0xc0 [<ffffffff811c610e>] __blockdev_direct_IO+0x92e/0xb40 [<ffffffff811c39f7>] blkdev_direct_IO+0x57/0x60 [<ffffffff8113b1c5>] generic_file_aio_read+0x6d5/0x760 [<ffffffff8118c1ca>] do_sync_read+0xda/0x120 [<ffffffff8118ce55>] vfs_read+0xc5/0x180 [<ffffffff8118cfaa>] sys_pread64+0x9a/0xb0 [<ffffffff81afaf6b>] system_call_fastpath+0x16/0x1b This happens because blk_queue_cleanup() destroys the queue and elevator whether IOs are in progress or not and DEAD tests are sprinkled in the request processing path without proper synchronization. Similar problem exists for blk-throtl. On queue cleanup, blk-throtl is shutdown whether it has requests in it or not. Depending on timing, it either oopses or throttled bios are lost putting tasks which are waiting for bio completion into eternal D state. The way it should work is having the usual clear distinction between shutdown and release. Shutdown drains all currently pending requests, marks the queue dead, and performs partial teardown of the now unnecessary part of the queue. Even after shutdown is complete, reference holders are still allowed to issue requests to the queue although they will be immmediately failed. The rest of teardown happens on release. This patch makes the following changes to make blk_queue_cleanup() behave as proper shutdown. * QUEUE_FLAG_DEAD is now set while holding both q->exit_mutex and queue_lock. * Unsynchronized DEAD check in generic_make_request_checks() removed. This couldn't make any meaningful difference as the queue could die after the check. * blk_drain_queue() updated such that it can drain all requests and is now called during cleanup. * blk_throtl updated such that it checks DEAD on grabbing queue_lock, drains all throttled bios during cleanup and free td when queue is released. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c50
1 files changed, 44 insertions, 6 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 900a0c98745b..8edb9499b509 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -309,6 +309,10 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
309 struct blkio_cgroup *blkcg; 309 struct blkio_cgroup *blkcg;
310 struct request_queue *q = td->queue; 310 struct request_queue *q = td->queue;
311 311
312 /* no throttling for dead queue */
313 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
314 return NULL;
315
312 rcu_read_lock(); 316 rcu_read_lock();
313 blkcg = task_blkio_cgroup(current); 317 blkcg = task_blkio_cgroup(current);
314 tg = throtl_find_tg(td, blkcg); 318 tg = throtl_find_tg(td, blkcg);
@@ -1001,11 +1005,6 @@ static void throtl_release_tgs(struct throtl_data *td)
1001 } 1005 }
1002} 1006}
1003 1007
1004static void throtl_td_free(struct throtl_data *td)
1005{
1006 kfree(td);
1007}
1008
1009/* 1008/*
1010 * Blk cgroup controller notification saying that blkio_group object is being 1009 * Blk cgroup controller notification saying that blkio_group object is being
1011 * delinked as associated cgroup object is going away. That also means that 1010 * delinked as associated cgroup object is going away. That also means that
@@ -1204,6 +1203,41 @@ out:
1204 return throttled; 1203 return throttled;
1205} 1204}
1206 1205
1206/**
1207 * blk_throtl_drain - drain throttled bios
1208 * @q: request_queue to drain throttled bios for
1209 *
1210 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1211 */
1212void blk_throtl_drain(struct request_queue *q)
1213 __releases(q->queue_lock) __acquires(q->queue_lock)
1214{
1215 struct throtl_data *td = q->td;
1216 struct throtl_rb_root *st = &td->tg_service_tree;
1217 struct throtl_grp *tg;
1218 struct bio_list bl;
1219 struct bio *bio;
1220
1221 lockdep_is_held(q->queue_lock);
1222
1223 bio_list_init(&bl);
1224
1225 while ((tg = throtl_rb_first(st))) {
1226 throtl_dequeue_tg(td, tg);
1227
1228 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1229 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1230 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1231 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1232 }
1233 spin_unlock_irq(q->queue_lock);
1234
1235 while ((bio = bio_list_pop(&bl)))
1236 generic_make_request(bio);
1237
1238 spin_lock_irq(q->queue_lock);
1239}
1240
1207int blk_throtl_init(struct request_queue *q) 1241int blk_throtl_init(struct request_queue *q)
1208{ 1242{
1209 struct throtl_data *td; 1243 struct throtl_data *td;
@@ -1276,7 +1310,11 @@ void blk_throtl_exit(struct request_queue *q)
1276 * it. 1310 * it.
1277 */ 1311 */
1278 throtl_shutdown_wq(q); 1312 throtl_shutdown_wq(q);
1279 throtl_td_free(td); 1313}
1314
1315void blk_throtl_release(struct request_queue *q)
1316{
1317 kfree(q->td);
1280} 1318}
1281 1319
1282static int __init throtl_init(void) 1320static int __init throtl_init(void)