aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-03 05:56:17 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-09-10 06:35:37 -0400
commit47f70d5a6ca78c40a1c799d43506efbfed914f7b (patch)
treefff5197359b989197eda76dd019746fbba054e88 /block
parent337238be1bf52e1242f940fc6fe83fb395e55057 (diff)
block: kick queue after sequencing REQ_FLUSH/FUA
While completing a request from a REQ_FLUSH/FUA sequence, another request can be pushed to the request queue. If a driver tests elv_queue_empty() before completing a request and runs the queue again only if the queue wasn't empty, this may lead to hang. Please note that most drivers either kick the queue unconditionally or test queue emptiness after completing the current request and don't have this problem. This patch removes this possibility by making REQ_FLUSH/FUA sequence code kick the queue if the queue was empty before completing a request from REQ_FLUSH/FUA sequence. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-flush.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index f357f1fc411c..cb4c8440a1fc 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -56,22 +56,38 @@ static struct request *blk_flush_complete_seq(struct request_queue *q,
56 return next_rq; 56 return next_rq;
57} 57}
58 58
59static void blk_flush_complete_seq_end_io(struct request_queue *q,
60 unsigned seq, int error)
61{
62 bool was_empty = elv_queue_empty(q);
63 struct request *next_rq;
64
65 next_rq = blk_flush_complete_seq(q, seq, error);
66
67 /*
68 * Moving a request silently to empty queue_head may stall the
69 * queue. Kick the queue in those cases.
70 */
71 if (was_empty && next_rq)
72 __blk_run_queue(q);
73}
74
59static void pre_flush_end_io(struct request *rq, int error) 75static void pre_flush_end_io(struct request *rq, int error)
60{ 76{
61 elv_completed_request(rq->q, rq); 77 elv_completed_request(rq->q, rq);
62 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_PREFLUSH, error); 78 blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_PREFLUSH, error);
63} 79}
64 80
65static void flush_data_end_io(struct request *rq, int error) 81static void flush_data_end_io(struct request *rq, int error)
66{ 82{
67 elv_completed_request(rq->q, rq); 83 elv_completed_request(rq->q, rq);
68 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_DATA, error); 84 blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_DATA, error);
69} 85}
70 86
71static void post_flush_end_io(struct request *rq, int error) 87static void post_flush_end_io(struct request *rq, int error)
72{ 88{
73 elv_completed_request(rq->q, rq); 89 elv_completed_request(rq->q, rq);
74 blk_flush_complete_seq(rq->q, QUEUE_FSEQ_POSTFLUSH, error); 90 blk_flush_complete_seq_end_io(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
75} 91}
76 92
77static void init_flush_request(struct request *rq, struct gendisk *disk) 93static void init_flush_request(struct request *rq, struct gendisk *disk)