summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-05-21 03:01:40 -0400
committerJens Axboe <axboe@kernel.dk>2019-05-23 12:25:26 -0400
commiteded341c085bebdd653f8086c02179098cb81748 (patch)
treea454ef941f258d1d0e7ec4c6bd7d5580d9192e51 /block
parenta0934fd2b1208458e55fc4b48f55889809fce666 (diff)
block: don't decrement nr_phys_segments for physically contigous segments
Currently ll_merge_requests_fn, unlike all other merge functions, reduces nr_phys_segments by one if the last segment of the previous, and the first segment of the next segement are contigous. While this seems like a nice solution to avoid building smaller than possible requests it causes a mismatch between the segments actually present in the request and those iterated over by the bvec iterators, including __rq_for_each_bio. This can for example mistrigger the single segment optimization in the nvme-pci driver, and might lead to mismatching nr_phys_segments number when recalculating the number of request when inserting a cloned request. We could possibly work around this by making the bvec iterators take the front and back segment size into account, but that would require moving them from the bio to the bio_iter and spreading this mess over all users of bvecs. Or we could simply remove this optimization under the assumption that most users already build good enough bvecs, and that the bio merge patch never cared about this optimization either. The latter is what this patch does. dff824b2aadb ("nvme-pci: optimize mapping of small single segment requests"). Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c23
1 files changed, 1 insertions, 22 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 21e87a714a73..80a5a0facb87 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -358,7 +358,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
358 unsigned front_seg_size; 358 unsigned front_seg_size;
359 struct bio *fbio, *bbio; 359 struct bio *fbio, *bbio;
360 struct bvec_iter iter; 360 struct bvec_iter iter;
361 bool new_bio = false;
362 361
363 if (!bio) 362 if (!bio)
364 return 0; 363 return 0;
@@ -379,31 +378,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
379 nr_phys_segs = 0; 378 nr_phys_segs = 0;
380 for_each_bio(bio) { 379 for_each_bio(bio) {
381 bio_for_each_bvec(bv, bio, iter) { 380 bio_for_each_bvec(bv, bio, iter) {
382 if (new_bio) {
383 if (seg_size + bv.bv_len
384 > queue_max_segment_size(q))
385 goto new_segment;
386 if (!biovec_phys_mergeable(q, &bvprv, &bv))
387 goto new_segment;
388
389 seg_size += bv.bv_len;
390
391 if (nr_phys_segs == 1 && seg_size >
392 front_seg_size)
393 front_seg_size = seg_size;
394
395 continue;
396 }
397new_segment:
398 bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, 381 bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
399 &front_seg_size, NULL, UINT_MAX); 382 &front_seg_size, NULL, UINT_MAX);
400 new_bio = false;
401 } 383 }
402 bbio = bio; 384 bbio = bio;
403 if (likely(bio->bi_iter.bi_size)) { 385 if (likely(bio->bi_iter.bi_size))
404 bvprv = bv; 386 bvprv = bv;
405 new_bio = true;
406 }
407 } 387 }
408 388
409 fbio->bi_seg_front_size = front_seg_size; 389 fbio->bi_seg_front_size = front_seg_size;
@@ -725,7 +705,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
725 req->bio->bi_seg_front_size = seg_size; 705 req->bio->bi_seg_front_size = seg_size;
726 if (next->nr_phys_segments == 1) 706 if (next->nr_phys_segments == 1)
727 next->biotail->bi_seg_back_size = seg_size; 707 next->biotail->bi_seg_back_size = seg_size;
728 total_phys_segments--;
729 } 708 }
730 709
731 if (total_phys_segments > queue_max_segments(q)) 710 if (total_phys_segments > queue_max_segments(q))