aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2012-12-06 08:32:01 -0500
committerJens Axboe <axboe@kernel.dk>2012-12-06 08:32:01 -0500
commitc246e80d86736312933646896c4157daf511dadc (patch)
tree183119080e120f5a3e98edf5bb824e940e5b8f18
parent807592a4fafba1fea6e98b9cf1fb02b7c38fb24c (diff)
block: Avoid that request_fn is invoked on a dead queue
A block driver may start cleaning up resources needed by its request_fn as soon as blk_cleanup_queue() finished, so request_fn must not be invoked after draining finished. This is important when blk_run_queue() is invoked without any requests in progress. As an example, if blk_drain_queue() and scsi_run_queue() run in parallel, blk_drain_queue() may have finished all requests after scsi_run_queue() has taken a SCSI device off the starved list but before that last function has had a chance to run the queue. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Cc: James Bottomley <JBottomley@Parallels.com> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: Chanho Min <chanho.min@lge.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c31
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk.h2
-rw-r--r--include/linux/blkdev.h2
4 files changed, 32 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index a182b586b06a..f52d05ff5d24 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -293,6 +293,25 @@ void blk_sync_queue(struct request_queue *q)
293EXPORT_SYMBOL(blk_sync_queue); 293EXPORT_SYMBOL(blk_sync_queue);
294 294
295/** 295/**
296 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
297 * @q: The queue to run
298 *
299 * Description:
300 * Invoke request handling on a queue if there are any pending requests.
301 * May be used to restart request handling after a request has completed.
302 * This variant runs the queue whether or not the queue has been
303 * stopped. Must be called with the queue lock held and interrupts
304 * disabled. See also @blk_run_queue.
305 */
306inline void __blk_run_queue_uncond(struct request_queue *q)
307{
308 if (unlikely(blk_queue_dead(q)))
309 return;
310
311 q->request_fn(q);
312}
313
314/**
296 * __blk_run_queue - run a single device queue 315 * __blk_run_queue - run a single device queue
297 * @q: The queue to run 316 * @q: The queue to run
298 * 317 *
@@ -305,7 +324,7 @@ void __blk_run_queue(struct request_queue *q)
305 if (unlikely(blk_queue_stopped(q))) 324 if (unlikely(blk_queue_stopped(q)))
306 return; 325 return;
307 326
308 q->request_fn(q); 327 __blk_run_queue_uncond(q);
309} 328}
310EXPORT_SYMBOL(__blk_run_queue); 329EXPORT_SYMBOL(__blk_run_queue);
311 330
@@ -477,8 +496,8 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
477 * blk_cleanup_queue - shutdown a request queue 496 * blk_cleanup_queue - shutdown a request queue
478 * @q: request queue to shutdown 497 * @q: request queue to shutdown
479 * 498 *
480 * Mark @q DYING, drain all pending requests, destroy and put it. All 499 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
481 * future requests will be failed immediately with -ENODEV. 500 * put it. All future requests will be failed immediately with -ENODEV.
482 */ 501 */
483void blk_cleanup_queue(struct request_queue *q) 502void blk_cleanup_queue(struct request_queue *q)
484{ 503{
@@ -507,9 +526,13 @@ void blk_cleanup_queue(struct request_queue *q)
507 spin_unlock_irq(lock); 526 spin_unlock_irq(lock);
508 mutex_unlock(&q->sysfs_lock); 527 mutex_unlock(&q->sysfs_lock);
509 528
510 /* drain all requests queued before DYING marking */ 529 /*
530 * Drain all requests queued before DYING marking. Set DEAD flag to
531 * prevent that q->request_fn() gets invoked after draining finished.
532 */
511 spin_lock_irq(lock); 533 spin_lock_irq(lock);
512 __blk_drain_queue(q, true); 534 __blk_drain_queue(q, true);
535 queue_flag_set(QUEUE_FLAG_DEAD, q);
513 spin_unlock_irq(lock); 536 spin_unlock_irq(lock);
514 537
515 /* @q won't process any more request, flush async actions */ 538 /* @q won't process any more request, flush async actions */
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 4aec98df7ba5..1320e74d79b8 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -72,7 +72,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
72 __blk_run_queue(q); 72 __blk_run_queue(q);
73 /* the queue is stopped so it won't be run */ 73 /* the queue is stopped so it won't be run */
74 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 74 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
75 q->request_fn(q); 75 __blk_run_queue_uncond(q);
76 spin_unlock_irq(q->queue_lock); 76 spin_unlock_irq(q->queue_lock);
77} 77}
78EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); 78EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
diff --git a/block/blk.h b/block/blk.h
index 2218a8a78292..47fdfdd41520 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -145,6 +145,8 @@ int blk_try_merge(struct request *rq, struct bio *bio);
145 145
146void blk_queue_congestion_threshold(struct request_queue *q); 146void blk_queue_congestion_threshold(struct request_queue *q);
147 147
148void __blk_run_queue_uncond(struct request_queue *q);
149
148int blk_dev_init(void); 150int blk_dev_init(void);
149 151
150 152
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aba8246afe72..8bc46c250ca4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -452,6 +452,7 @@ struct request_queue {
452#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 452#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
453#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 453#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
454#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 454#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
455#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
455 456
456#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 457#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
457 (1 << QUEUE_FLAG_STACKABLE) | \ 458 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -522,6 +523,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
522#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 523#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
523#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 524#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
524#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 525#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
526#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
525#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 527#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
526#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 528#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
527#define blk_queue_noxmerges(q) \ 529#define blk_queue_noxmerges(q) \