summaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-02-15 06:13:23 -0500
committerJens Axboe <axboe@kernel.dk>2019-02-15 10:40:12 -0500
commit2705c93742e91730d335838025d75d8043861174 (patch)
tree16e379f17a745cbb3a1f8d7ba5ffe7e2b8ddee86 /block/blk-merge.c
parentac4fa1d107addb2c6b21067d8945a39316a09fc8 (diff)
block: kill QUEUE_FLAG_NO_SG_MERGE
Since bdced438acd83ad83a6c ("block: setup bi_phys_segments after splitting"), physical segment number is mainly figured out in blk_queue_split() for fast path, and the flag of BIO_SEG_VALID is set there too. Now only blk_recount_segments() and blk_recalc_rq_segments() use this flag. Basically blk_recount_segments() is bypassed in fast path given BIO_SEG_VALID is set in blk_queue_split(). For another user of blk_recalc_rq_segments(): - run in partial completion branch of blk_update_request, which is an unusual case - run in blk_cloned_rq_check_limits(), still not a big problem if the flag is killed since dm-rq is the only user. Multi-page bvec is enabled now, not doing S/G merging is rather pointless with the current setup of the I/O path, as it isn't going to save you a significant amount of cycles. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c31
1 files changed, 6 insertions, 25 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1912499b08b7..bed065904677 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -358,8 +358,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
358EXPORT_SYMBOL(blk_queue_split); 358EXPORT_SYMBOL(blk_queue_split);
359 359
360static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 360static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
361 struct bio *bio, 361 struct bio *bio)
362 bool no_sg_merge)
363{ 362{
364 struct bio_vec bv, bvprv = { NULL }; 363 struct bio_vec bv, bvprv = { NULL };
365 int prev = 0; 364 int prev = 0;
@@ -385,13 +384,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
385 nr_phys_segs = 0; 384 nr_phys_segs = 0;
386 for_each_bio(bio) { 385 for_each_bio(bio) {
387 bio_for_each_bvec(bv, bio, iter) { 386 bio_for_each_bvec(bv, bio, iter) {
388 /*
389 * If SG merging is disabled, each bio vector is
390 * a segment
391 */
392 if (no_sg_merge)
393 goto new_segment;
394
395 if (prev) { 387 if (prev) {
396 if (seg_size + bv.bv_len 388 if (seg_size + bv.bv_len
397 > queue_max_segment_size(q)) 389 > queue_max_segment_size(q))
@@ -421,27 +413,16 @@ new_segment:
421 413
422void blk_recalc_rq_segments(struct request *rq) 414void blk_recalc_rq_segments(struct request *rq)
423{ 415{
424 bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, 416 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
425 &rq->q->queue_flags);
426
427 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
428 no_sg_merge);
429} 417}
430 418
431void blk_recount_segments(struct request_queue *q, struct bio *bio) 419void blk_recount_segments(struct request_queue *q, struct bio *bio)
432{ 420{
433 unsigned short seg_cnt = bio_segments(bio); 421 struct bio *nxt = bio->bi_next;
434
435 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
436 (seg_cnt < queue_max_segments(q)))
437 bio->bi_phys_segments = seg_cnt;
438 else {
439 struct bio *nxt = bio->bi_next;
440 422
441 bio->bi_next = NULL; 423 bio->bi_next = NULL;
442 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); 424 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
443 bio->bi_next = nxt; 425 bio->bi_next = nxt;
444 }
445 426
446 bio_set_flag(bio, BIO_SEG_VALID); 427 bio_set_flag(bio, BIO_SEG_VALID);
447} 428}