diff options
author | Bart Van Assche <bvanassche@acm.org> | 2012-12-06 08:32:01 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-12-06 08:32:01 -0500 |
commit | c246e80d86736312933646896c4157daf511dadc (patch) | |
tree | 183119080e120f5a3e98edf5bb824e940e5b8f18 /include/linux/blkdev.h | |
parent | 807592a4fafba1fea6e98b9cf1fb02b7c38fb24c (diff) |
block: Avoid that request_fn is invoked on a dead queue
A block driver may start cleaning up resources needed by its
request_fn as soon as blk_cleanup_queue() finished, so request_fn
must not be invoked after draining finished. This is important
when blk_run_queue() is invoked without any requests in progress.
As an example, if blk_drain_queue() and scsi_run_queue() run in
parallel, blk_drain_queue() may have finished all requests after
scsi_run_queue() has taken a SCSI device off the starved list but
before that last function has had a chance to run the queue.
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Cc: James Bottomley <JBottomley@Parallels.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: Chanho Min <chanho.min@lge.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aba8246afe72..8bc46c250ca4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -452,6 +452,7 @@ struct request_queue { | |||
452 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ | 452 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
453 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ | 453 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
454 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ | 454 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ |
455 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ | ||
455 | 456 | ||
456 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 457 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
457 | (1 << QUEUE_FLAG_STACKABLE) | \ | 458 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -522,6 +523,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
522 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 523 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
523 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 524 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
524 | #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) | 525 | #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) |
526 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) | ||
525 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) | 527 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) |
526 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 528 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
527 | #define blk_queue_noxmerges(q) \ | 529 | #define blk_queue_noxmerges(q) \ |