aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-12-13 18:33:37 -0500
committerJens Axboe <axboe@kernel.dk>2011-12-13 18:33:37 -0500
commit8ba61435d73f2274e12d4d823fde06735e8f6a54 (patch)
tree4b63993dc0fdc26918bd990fb47a142b8d24ef80
parent481a7d64790cd7ca61a8bbcbd9d017ce58e6fe39 (diff)
block: add missing blk_queue_dead() checks
blk_insert_cloned_request(), blk_execute_rq_nowait() and blk_flush_plug_list() either didn't check whether the queue was dead or did it without holding queue_lock. Update them so that dead state is checked while holding queue_lock. AFAICS, this plugs all holes (requeue doesn't matter as the request is transitioning atomically from in_flight to queued). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-exec.c6
2 files changed, 25 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c37e9e7c9d07..30add45a87ef 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1731,6 +1731,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1731 return -EIO; 1731 return -EIO;
1732 1732
1733 spin_lock_irqsave(q->queue_lock, flags); 1733 spin_lock_irqsave(q->queue_lock, flags);
1734 if (unlikely(blk_queue_dead(q))) {
1735 spin_unlock_irqrestore(q->queue_lock, flags);
1736 return -ENODEV;
1737 }
1734 1738
1735 /* 1739 /*
1736 * Submitting request must be dequeued before calling this function 1740 * Submitting request must be dequeued before calling this function
@@ -2705,6 +2709,14 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
2705 trace_block_unplug(q, depth, !from_schedule); 2709 trace_block_unplug(q, depth, !from_schedule);
2706 2710
2707 /* 2711 /*
2712 * Don't mess with dead queue.
2713 */
2714 if (unlikely(blk_queue_dead(q))) {
2715 spin_unlock(q->queue_lock);
2716 return;
2717 }
2718
2719 /*
2708 * If we are punting this to kblockd, then we can safely drop 2720 * If we are punting this to kblockd, then we can safely drop
2709 * the queue_lock before waking kblockd (which needs to take 2721 * the queue_lock before waking kblockd (which needs to take
2710 * this lock). 2722 * this lock).
@@ -2780,6 +2792,15 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2780 depth = 0; 2792 depth = 0;
2781 spin_lock(q->queue_lock); 2793 spin_lock(q->queue_lock);
2782 } 2794 }
2795
2796 /*
2797 * Short-circuit if @q is dead
2798 */
2799 if (unlikely(blk_queue_dead(q))) {
2800 __blk_end_request_all(rq, -ENODEV);
2801 continue;
2802 }
2803
2783 /* 2804 /*
2784 * rq is already accounted, so use raw insert 2805 * rq is already accounted, so use raw insert
2785 */ 2806 */
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 60532852b3ab..fb2cbd551621 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -50,7 +50,11 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
50{ 50{
51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52 52
53 WARN_ON(irqs_disabled());
54 spin_lock_irq(q->queue_lock);
55
53 if (unlikely(blk_queue_dead(q))) { 56 if (unlikely(blk_queue_dead(q))) {
57 spin_unlock_irq(q->queue_lock);
54 rq->errors = -ENXIO; 58 rq->errors = -ENXIO;
55 if (rq->end_io) 59 if (rq->end_io)
56 rq->end_io(rq, rq->errors); 60 rq->end_io(rq, rq->errors);
@@ -59,8 +63,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
59 63
60 rq->rq_disk = bd_disk; 64 rq->rq_disk = bd_disk;
61 rq->end_io = done; 65 rq->end_io = done;
62 WARN_ON(irqs_disabled());
63 spin_lock_irq(q->queue_lock);
64 __elv_add_request(q, rq, where); 66 __elv_add_request(q, rq, where);
65 __blk_run_queue(q); 67 __blk_run_queue(q);
66 /* the queue is stopped so it won't be run */ 68 /* the queue is stopped so it won't be run */