diff options
author | Christoph Hellwig <hch@lst.de> | 2014-09-13 19:40:08 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-22 14:00:07 -0400 |
commit | bf57229745f849e500ba69ff91e35bc8160a7373 (patch) | |
tree | 9b90e6c5b32e96b082ad5c135c22de2c9cc82b77 /block | |
parent | 6d11fb454b161a4565c57be6f1c5527235741003 (diff) |
blk-mq: remove REQ_END
Pass an explicit parameter for the last request in a batch to ->queue_rq
instead of using a request flag. Besides being a cleaner and non-stateful
interface this is also required for the next patch, which fixes the blk-mq
I/O submission code to not start a time too early.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 22 |
1 files changed, 5 insertions, 17 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index e743d28620b2..32b4797f4186 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -384,7 +384,7 @@ void blk_mq_complete_request(struct request *rq) | |||
384 | } | 384 | } |
385 | EXPORT_SYMBOL(blk_mq_complete_request); | 385 | EXPORT_SYMBOL(blk_mq_complete_request); |
386 | 386 | ||
387 | static void blk_mq_start_request(struct request *rq, bool last) | 387 | static void blk_mq_start_request(struct request *rq) |
388 | { | 388 | { |
389 | struct request_queue *q = rq->q; | 389 | struct request_queue *q = rq->q; |
390 | 390 | ||
@@ -421,16 +421,6 @@ static void blk_mq_start_request(struct request *rq, bool last) | |||
421 | */ | 421 | */ |
422 | rq->nr_phys_segments++; | 422 | rq->nr_phys_segments++; |
423 | } | 423 | } |
424 | |||
425 | /* | ||
426 | * Flag the last request in the series so that drivers know when IO | ||
427 | * should be kicked off, if they don't do it on a per-request basis. | ||
428 | * | ||
429 | * Note: the flag isn't the only condition drivers should do kick off. | ||
430 | * If drive is busy, the last request might not have the bit set. | ||
431 | */ | ||
432 | if (last) | ||
433 | rq->cmd_flags |= REQ_END; | ||
434 | } | 424 | } |
435 | 425 | ||
436 | static void __blk_mq_requeue_request(struct request *rq) | 426 | static void __blk_mq_requeue_request(struct request *rq) |
@@ -440,8 +430,6 @@ static void __blk_mq_requeue_request(struct request *rq) | |||
440 | trace_block_rq_requeue(q, rq); | 430 | trace_block_rq_requeue(q, rq); |
441 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 431 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
442 | 432 | ||
443 | rq->cmd_flags &= ~REQ_END; | ||
444 | |||
445 | if (q->dma_drain_size && blk_rq_bytes(rq)) | 433 | if (q->dma_drain_size && blk_rq_bytes(rq)) |
446 | rq->nr_phys_segments--; | 434 | rq->nr_phys_segments--; |
447 | } | 435 | } |
@@ -755,9 +743,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) | |||
755 | rq = list_first_entry(&rq_list, struct request, queuelist); | 743 | rq = list_first_entry(&rq_list, struct request, queuelist); |
756 | list_del_init(&rq->queuelist); | 744 | list_del_init(&rq->queuelist); |
757 | 745 | ||
758 | blk_mq_start_request(rq, list_empty(&rq_list)); | 746 | blk_mq_start_request(rq); |
759 | 747 | ||
760 | ret = q->mq_ops->queue_rq(hctx, rq); | 748 | ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list)); |
761 | switch (ret) { | 749 | switch (ret) { |
762 | case BLK_MQ_RQ_QUEUE_OK: | 750 | case BLK_MQ_RQ_QUEUE_OK: |
763 | queued++; | 751 | queued++; |
@@ -1198,14 +1186,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
1198 | int ret; | 1186 | int ret; |
1199 | 1187 | ||
1200 | blk_mq_bio_to_request(rq, bio); | 1188 | blk_mq_bio_to_request(rq, bio); |
1201 | blk_mq_start_request(rq, true); | 1189 | blk_mq_start_request(rq); |
1202 | 1190 | ||
1203 | /* | 1191 | /* |
1204 | * For OK queue, we are done. For error, kill it. Any other | 1192 | * For OK queue, we are done. For error, kill it. Any other |
1205 | * error (busy), just add it to our list as we previously | 1193 | * error (busy), just add it to our list as we previously |
1206 | * would have done | 1194 | * would have done |
1207 | */ | 1195 | */ |
1208 | ret = q->mq_ops->queue_rq(data.hctx, rq); | 1196 | ret = q->mq_ops->queue_rq(data.hctx, rq, true); |
1209 | if (ret == BLK_MQ_RQ_QUEUE_OK) | 1197 | if (ret == BLK_MQ_RQ_QUEUE_OK) |
1210 | goto done; | 1198 | goto done; |
1211 | else { | 1199 | else { |