aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-exec.c6
2 files changed, 25 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c37e9e7c9d0..30add45a87e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1731,6 +1731,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1731 return -EIO; 1731 return -EIO;
1732 1732
1733 spin_lock_irqsave(q->queue_lock, flags); 1733 spin_lock_irqsave(q->queue_lock, flags);
1734 if (unlikely(blk_queue_dead(q))) {
1735 spin_unlock_irqrestore(q->queue_lock, flags);
1736 return -ENODEV;
1737 }
1734 1738
1735 /* 1739 /*
1736 * Submitting request must be dequeued before calling this function 1740 * Submitting request must be dequeued before calling this function
@@ -2705,6 +2709,14 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
2705 trace_block_unplug(q, depth, !from_schedule); 2709 trace_block_unplug(q, depth, !from_schedule);
2706 2710
2707 /* 2711 /*
2712 * Don't mess with dead queue.
2713 */
2714 if (unlikely(blk_queue_dead(q))) {
2715 spin_unlock(q->queue_lock);
2716 return;
2717 }
2718
2719 /*
2708 * If we are punting this to kblockd, then we can safely drop 2720 * If we are punting this to kblockd, then we can safely drop
2709 * the queue_lock before waking kblockd (which needs to take 2721 * the queue_lock before waking kblockd (which needs to take
2710 * this lock). 2722 * this lock).
@@ -2780,6 +2792,15 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2780 depth = 0; 2792 depth = 0;
2781 spin_lock(q->queue_lock); 2793 spin_lock(q->queue_lock);
2782 } 2794 }
2795
2796 /*
2797 * Short-circuit if @q is dead
2798 */
2799 if (unlikely(blk_queue_dead(q))) {
2800 __blk_end_request_all(rq, -ENODEV);
2801 continue;
2802 }
2803
2783 /* 2804 /*
2784 * rq is already accounted, so use raw insert 2805 * rq is already accounted, so use raw insert
2785 */ 2806 */
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 60532852b3a..fb2cbd55162 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -50,7 +50,11 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
50{ 50{
51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52 52
53 WARN_ON(irqs_disabled());
54 spin_lock_irq(q->queue_lock);
55
53 if (unlikely(blk_queue_dead(q))) { 56 if (unlikely(blk_queue_dead(q))) {
57 spin_unlock_irq(q->queue_lock);
54 rq->errors = -ENXIO; 58 rq->errors = -ENXIO;
55 if (rq->end_io) 59 if (rq->end_io)
56 rq->end_io(rq, rq->errors); 60 rq->end_io(rq, rq->errors);
@@ -59,8 +63,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
59 63
60 rq->rq_disk = bd_disk; 64 rq->rq_disk = bd_disk;
61 rq->end_io = done; 65 rq->end_io = done;
62 WARN_ON(irqs_disabled());
63 spin_lock_irq(q->queue_lock);
64 __elv_add_request(q, rq, where); 66 __elv_add_request(q, rq, where);
65 __blk_run_queue(q); 67 __blk_run_queue(q);
66 /* the queue is stopped so it won't be run */ 68 /* the queue is stopped so it won't be run */