diff options
author | Muthukumar Ratty <muthur@gmail.com> | 2012-06-29 11:31:49 -0400 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2012-07-20 03:58:39 -0400 |
commit | e81ca6fe85b77109a32489a5db82f575d51dfc98 (patch) | |
tree | a881c5b85b07525f28cc546dfde2038cd900cb45 | |
parent | 6548b0e5b875a07e32e924b22a7df3669892c75a (diff) |
[SCSI] block: Fix blk_execute_rq_nowait() dead queue handling
If the queue is dead blk_execute_rq_nowait() doesn't invoke the done()
callback function. That will result in blk_execute_rq() being stuck
in wait_for_completion(). Avoid this by initializing rq->end_io to the
done() callback before we check the queue state. Also, make sure the
queue lock is held around the invocation of the done() callback. Found
this through source code review.
Signed-off-by: Muthukumar Ratty <muthur@gmail.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Tejun Heo <tj@kernel.org>
Acked-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
-rw-r--r-- | block/blk-exec.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c index fb2cbd551621..8b6dc5bd4dd0 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -43,6 +43,9 @@ static void blk_end_sync_rq(struct request *rq, int error) | |||
43 | * Description: | 43 | * Description: |
44 | * Insert a fully prepared request at the back of the I/O scheduler queue | 44 | * Insert a fully prepared request at the back of the I/O scheduler queue |
45 | * for execution. Don't wait for completion. | 45 | * for execution. Don't wait for completion. |
46 | * | ||
47 | * Note: | ||
48 | * This function will invoke @done directly if the queue is dead. | ||
46 | */ | 49 | */ |
47 | void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | 50 | void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, |
48 | struct request *rq, int at_head, | 51 | struct request *rq, int at_head, |
@@ -51,18 +54,20 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
51 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; | 54 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; |
52 | 55 | ||
53 | WARN_ON(irqs_disabled()); | 56 | WARN_ON(irqs_disabled()); |
57 | |||
58 | rq->rq_disk = bd_disk; | ||
59 | rq->end_io = done; | ||
60 | |||
54 | spin_lock_irq(q->queue_lock); | 61 | spin_lock_irq(q->queue_lock); |
55 | 62 | ||
56 | if (unlikely(blk_queue_dead(q))) { | 63 | if (unlikely(blk_queue_dead(q))) { |
57 | spin_unlock_irq(q->queue_lock); | ||
58 | rq->errors = -ENXIO; | 64 | rq->errors = -ENXIO; |
59 | if (rq->end_io) | 65 | if (rq->end_io) |
60 | rq->end_io(rq, rq->errors); | 66 | rq->end_io(rq, rq->errors); |
67 | spin_unlock_irq(q->queue_lock); | ||
61 | return; | 68 | return; |
62 | } | 69 | } |
63 | 70 | ||
64 | rq->rq_disk = bd_disk; | ||
65 | rq->end_io = done; | ||
66 | __elv_add_request(q, rq, where); | 71 | __elv_add_request(q, rq, where); |
67 | __blk_run_queue(q); | 72 | __blk_run_queue(q); |
68 | /* the queue is stopped so it won't be run */ | 73 | /* the queue is stopped so it won't be run */ |