aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-09-13 19:40:09 -0400
committerJens Axboe <axboe@fb.com>2014-09-22 14:00:07 -0400
commite2490073cd7c3d6f6ef6e029a208edd4d38efac4 (patch)
tree48465e67b335f6d696d698641365b4a4d1e89d32 /block
parentbf57229745f849e500ba69ff91e35bc8160a7373 (diff)
blk-mq: call blk_mq_start_request from ->queue_rq
When we call blk_mq_start_request from the core blk-mq code before calling into ->queue_rq there is a racy window where the timeout handler can hit before we've fully set up the driver specific part of the command. Move the call to blk_mq_start_request into the driver so the driver can start the request only once it is fully set up. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 32b4797f4186..141f2e06803a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -384,7 +384,7 @@ void blk_mq_complete_request(struct request *rq)
384} 384}
385EXPORT_SYMBOL(blk_mq_complete_request); 385EXPORT_SYMBOL(blk_mq_complete_request);
386 386
387static void blk_mq_start_request(struct request *rq) 387void blk_mq_start_request(struct request *rq)
388{ 388{
389 struct request_queue *q = rq->q; 389 struct request_queue *q = rq->q;
390 390
@@ -422,16 +422,18 @@ static void blk_mq_start_request(struct request *rq)
422 rq->nr_phys_segments++; 422 rq->nr_phys_segments++;
423 } 423 }
424} 424}
425EXPORT_SYMBOL(blk_mq_start_request);
425 426
426static void __blk_mq_requeue_request(struct request *rq) 427static void __blk_mq_requeue_request(struct request *rq)
427{ 428{
428 struct request_queue *q = rq->q; 429 struct request_queue *q = rq->q;
429 430
430 trace_block_rq_requeue(q, rq); 431 trace_block_rq_requeue(q, rq);
431 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
432 432
433 if (q->dma_drain_size && blk_rq_bytes(rq)) 433 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
434 rq->nr_phys_segments--; 434 if (q->dma_drain_size && blk_rq_bytes(rq))
435 rq->nr_phys_segments--;
436 }
435} 437}
436 438
437void blk_mq_requeue_request(struct request *rq) 439void blk_mq_requeue_request(struct request *rq)
@@ -743,8 +745,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
743 rq = list_first_entry(&rq_list, struct request, queuelist); 745 rq = list_first_entry(&rq_list, struct request, queuelist);
744 list_del_init(&rq->queuelist); 746 list_del_init(&rq->queuelist);
745 747
746 blk_mq_start_request(rq);
747
748 ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list)); 748 ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list));
749 switch (ret) { 749 switch (ret) {
750 case BLK_MQ_RQ_QUEUE_OK: 750 case BLK_MQ_RQ_QUEUE_OK:
@@ -1186,7 +1186,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1186 int ret; 1186 int ret;
1187 1187
1188 blk_mq_bio_to_request(rq, bio); 1188 blk_mq_bio_to_request(rq, bio);
1189 blk_mq_start_request(rq);
1190 1189
1191 /* 1190 /*
1192 * For OK queue, we are done. For error, kill it. Any other 1191 * For OK queue, we are done. For error, kill it. Any other