diff options
author | Jens Axboe <axboe@fb.com> | 2015-09-03 12:28:20 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-09-03 12:33:09 -0400 |
commit | 5e7c4274a70aa2d6f485996d0ca1dad52d0039ca (patch) | |
tree | dce51421028d162af78514b631dc75a6be92cf11 /block | |
parent | 5fdb7e1b976dc9d18aff8c711e51d17c4c324a0e (diff) |
block: Check for gaps on front and back merges
We are checking for gaps to previous bio_vec, which can
only detect back merges gaps. Moreover, at the point where
we check for a gap, we don't know if we will attempt a back
or a front merge. Thus, check for gap to prev in a back merge
attempt and check for a gap to next in a front merge attempt.
Signed-off-by: Jens Axboe <axboe@fb.com>
[sagig: Minor rename change]
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-merge.c | 19 |
1 files changed, 6 insertions, 13 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index cce23ba1ae5f..d9eddbc189f5 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -438,6 +438,8 @@ no_merge: | |||
438 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | 438 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
439 | struct bio *bio) | 439 | struct bio *bio) |
440 | { | 440 | { |
441 | if (req_gap_back_merge(req, bio)) | ||
442 | return 0; | ||
441 | if (blk_rq_sectors(req) + bio_sectors(bio) > | 443 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
442 | blk_rq_get_max_sectors(req)) { | 444 | blk_rq_get_max_sectors(req)) { |
443 | req->cmd_flags |= REQ_NOMERGE; | 445 | req->cmd_flags |= REQ_NOMERGE; |
@@ -456,6 +458,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
456 | int ll_front_merge_fn(struct request_queue *q, struct request *req, | 458 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
457 | struct bio *bio) | 459 | struct bio *bio) |
458 | { | 460 | { |
461 | |||
462 | if (req_gap_front_merge(req, bio)) | ||
463 | return 0; | ||
459 | if (blk_rq_sectors(req) + bio_sectors(bio) > | 464 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
460 | blk_rq_get_max_sectors(req)) { | 465 | blk_rq_get_max_sectors(req)) { |
461 | req->cmd_flags |= REQ_NOMERGE; | 466 | req->cmd_flags |= REQ_NOMERGE; |
@@ -482,14 +487,6 @@ static bool req_no_special_merge(struct request *req) | |||
482 | return !q->mq_ops && req->special; | 487 | return !q->mq_ops && req->special; |
483 | } | 488 | } |
484 | 489 | ||
485 | static int req_gap_to_prev(struct request *req, struct bio *next) | ||
486 | { | ||
487 | struct bio *prev = req->biotail; | ||
488 | |||
489 | return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1], | ||
490 | next->bi_io_vec[0].bv_offset); | ||
491 | } | ||
492 | |||
493 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | 490 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
494 | struct request *next) | 491 | struct request *next) |
495 | { | 492 | { |
@@ -504,7 +501,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
504 | if (req_no_special_merge(req) || req_no_special_merge(next)) | 501 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
505 | return 0; | 502 | return 0; |
506 | 503 | ||
507 | if (req_gap_to_prev(req, next->bio)) | 504 | if (req_gap_back_merge(req, next->bio)) |
508 | return 0; | 505 | return 0; |
509 | 506 | ||
510 | /* | 507 | /* |
@@ -712,10 +709,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
712 | !blk_write_same_mergeable(rq->bio, bio)) | 709 | !blk_write_same_mergeable(rq->bio, bio)) |
713 | return false; | 710 | return false; |
714 | 711 | ||
715 | /* Only check gaps if the bio carries data */ | ||
716 | if (bio_has_data(bio) && req_gap_to_prev(rq, bio)) | ||
717 | return false; | ||
718 | |||
719 | return true; | 712 | return true; |
720 | } | 713 | } |
721 | 714 | ||