diff options
author | Jianchao Wang <jianchao.w.wang@oracle.com> | 2018-10-27 07:52:14 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-10-29 11:32:40 -0400 |
commit | 69840466086d2248898020a08dda52732686c4e6 (patch) | |
tree | 4fd830c19b4c91854952e9ca56ba29cbdd99e376 /block/blk-merge.c | |
parent | a435ab4f80f983c53b4ca4f8c12b3ddd3ca17670 (diff) |
block: fix the DISCARD request merge
There are two cases when handle DISCARD merge.
If max_discard_segments == 1, the bios/requests need to be contiguous
to merge. If max_discard_segments > 1, it takes every bio as a range
and different range needn't to be contiguous.
But now, attempt_merge screws this up. It always consider contiguity
for DISCARD for the case max_discard_segments > 1 and cannot merge
contiguous DISCARD for the case max_discard_segments == 1, because
rq_attempt_discard_merge always returns false in this case.
This patch fixes both of the two cases above.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r-- | block/blk-merge.c | 46 |
1 files changed, 36 insertions, 10 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 42a46744c11b..6b5ad275ed56 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -714,6 +714,31 @@ static void blk_account_io_merge(struct request *req) | |||
714 | part_stat_unlock(); | 714 | part_stat_unlock(); |
715 | } | 715 | } |
716 | } | 716 | } |
717 | /* | ||
718 | * Two cases of handling DISCARD merge: | ||
719 | * If max_discard_segments > 1, the driver takes every bio | ||
720 | * as a range and send them to controller together. The ranges | ||
721 | * needn't to be contiguous. | ||
722 | * Otherwise, the bios/requests will be handled as same as | ||
723 | * others which should be contiguous. | ||
724 | */ | ||
725 | static inline bool blk_discard_mergable(struct request *req) | ||
726 | { | ||
727 | if (req_op(req) == REQ_OP_DISCARD && | ||
728 | queue_max_discard_segments(req->q) > 1) | ||
729 | return true; | ||
730 | return false; | ||
731 | } | ||
732 | |||
733 | enum elv_merge blk_try_req_merge(struct request *req, struct request *next) | ||
734 | { | ||
735 | if (blk_discard_mergable(req)) | ||
736 | return ELEVATOR_DISCARD_MERGE; | ||
737 | else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) | ||
738 | return ELEVATOR_BACK_MERGE; | ||
739 | |||
740 | return ELEVATOR_NO_MERGE; | ||
741 | } | ||
717 | 742 | ||
718 | /* | 743 | /* |
719 | * For non-mq, this has to be called with the request spinlock acquired. | 744 | * For non-mq, this has to be called with the request spinlock acquired. |
@@ -731,12 +756,6 @@ static struct request *attempt_merge(struct request_queue *q, | |||
731 | if (req_op(req) != req_op(next)) | 756 | if (req_op(req) != req_op(next)) |
732 | return NULL; | 757 | return NULL; |
733 | 758 | ||
734 | /* | ||
735 | * not contiguous | ||
736 | */ | ||
737 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) | ||
738 | return NULL; | ||
739 | |||
740 | if (rq_data_dir(req) != rq_data_dir(next) | 759 | if (rq_data_dir(req) != rq_data_dir(next) |
741 | || req->rq_disk != next->rq_disk | 760 | || req->rq_disk != next->rq_disk |
742 | || req_no_special_merge(next)) | 761 | || req_no_special_merge(next)) |
@@ -760,11 +779,19 @@ static struct request *attempt_merge(struct request_queue *q, | |||
760 | * counts here. Handle DISCARDs separately, as they | 779 | * counts here. Handle DISCARDs separately, as they |
761 | * have separate settings. | 780 | * have separate settings. |
762 | */ | 781 | */ |
763 | if (req_op(req) == REQ_OP_DISCARD) { | 782 | |
783 | switch (blk_try_req_merge(req, next)) { | ||
784 | case ELEVATOR_DISCARD_MERGE: | ||
764 | if (!req_attempt_discard_merge(q, req, next)) | 785 | if (!req_attempt_discard_merge(q, req, next)) |
765 | return NULL; | 786 | return NULL; |
766 | } else if (!ll_merge_requests_fn(q, req, next)) | 787 | break; |
788 | case ELEVATOR_BACK_MERGE: | ||
789 | if (!ll_merge_requests_fn(q, req, next)) | ||
790 | return NULL; | ||
791 | break; | ||
792 | default: | ||
767 | return NULL; | 793 | return NULL; |
794 | } | ||
768 | 795 | ||
769 | /* | 796 | /* |
770 | * If failfast settings disagree or any of the two is already | 797 | * If failfast settings disagree or any of the two is already |
@@ -888,8 +915,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
888 | 915 | ||
889 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) | 916 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
890 | { | 917 | { |
891 | if (req_op(rq) == REQ_OP_DISCARD && | 918 | if (blk_discard_mergable(rq)) |
892 | queue_max_discard_segments(rq->q) > 1) | ||
893 | return ELEVATOR_DISCARD_MERGE; | 919 | return ELEVATOR_DISCARD_MERGE; |
894 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) | 920 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
895 | return ELEVATOR_BACK_MERGE; | 921 | return ELEVATOR_BACK_MERGE; |