diff options
| author | Bart Van Assche <bvanassche@acm.org> | 2019-07-01 11:47:30 -0400 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2019-07-02 23:03:38 -0400 |
| commit | 970d168de636ddac8221cbd4a11d7678943e7379 (patch) | |
| tree | 622620d4308df3a0b437f1ae4f84e40d73d68333 /block | |
| parent | c05f42206f4de12b6807270fc669b45472f1bdb7 (diff) | |
blk-mq: simplify blk_mq_make_request()
Move the blk_mq_bio_to_request() call in front of the if-statement.
Cc: Hannes Reinecke <hare@suse.com>
Cc: Omar Sandoval <osandov@fb.com>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
| -rw-r--r-- | block/blk-mq.c | 10 |
1 files changed, 2 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 0cb1b152f320..e5ef40c603ca 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1971,10 +1971,10 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1971 | 1971 | ||
| 1972 | cookie = request_to_qc_t(data.hctx, rq); | 1972 | cookie = request_to_qc_t(data.hctx, rq); |
| 1973 | 1973 | ||
| 1974 | blk_mq_bio_to_request(rq, bio, nr_segs); | ||
| 1975 | |||
| 1974 | plug = current->plug; | 1976 | plug = current->plug; |
| 1975 | if (unlikely(is_flush_fua)) { | 1977 | if (unlikely(is_flush_fua)) { |
| 1976 | blk_mq_bio_to_request(rq, bio, nr_segs); | ||
| 1977 | |||
| 1978 | /* bypass scheduler for flush rq */ | 1978 | /* bypass scheduler for flush rq */ |
| 1979 | blk_insert_flush(rq); | 1979 | blk_insert_flush(rq); |
| 1980 | blk_mq_run_hw_queue(data.hctx, true); | 1980 | blk_mq_run_hw_queue(data.hctx, true); |
| @@ -1986,8 +1986,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1986 | unsigned int request_count = plug->rq_count; | 1986 | unsigned int request_count = plug->rq_count; |
| 1987 | struct request *last = NULL; | 1987 | struct request *last = NULL; |
| 1988 | 1988 | ||
| 1989 | blk_mq_bio_to_request(rq, bio, nr_segs); | ||
| 1990 | |||
| 1991 | if (!request_count) | 1989 | if (!request_count) |
| 1992 | trace_block_plug(q); | 1990 | trace_block_plug(q); |
| 1993 | else | 1991 | else |
| @@ -2001,8 +1999,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 2001 | 1999 | ||
| 2002 | blk_add_rq_to_plug(plug, rq); | 2000 | blk_add_rq_to_plug(plug, rq); |
| 2003 | } else if (plug && !blk_queue_nomerges(q)) { | 2001 | } else if (plug && !blk_queue_nomerges(q)) { |
| 2004 | blk_mq_bio_to_request(rq, bio, nr_segs); | ||
| 2005 | |||
| 2006 | /* | 2002 | /* |
| 2007 | * We do limited plugging. If the bio can be merged, do that. | 2003 | * We do limited plugging. If the bio can be merged, do that. |
| 2008 | * Otherwise the existing request in the plug list will be | 2004 | * Otherwise the existing request in the plug list will be |
| @@ -2027,10 +2023,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 2027 | } | 2023 | } |
| 2028 | } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && | 2024 | } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && |
| 2029 | !data.hctx->dispatch_busy)) { | 2025 | !data.hctx->dispatch_busy)) { |
| 2030 | blk_mq_bio_to_request(rq, bio, nr_segs); | ||
| 2031 | blk_mq_try_issue_directly(data.hctx, rq, &cookie); | 2026 | blk_mq_try_issue_directly(data.hctx, rq, &cookie); |
| 2032 | } else { | 2027 | } else { |
| 2033 | blk_mq_bio_to_request(rq, bio, nr_segs); | ||
| 2034 | blk_mq_sched_insert_request(rq, false, true, true); | 2028 | blk_mq_sched_insert_request(rq, false, true, true); |
| 2035 | } | 2029 | } |
| 2036 | 2030 | ||
