summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-03-29 03:08:00 -0400
committerJens Axboe <axboe@kernel.dk>2019-04-01 14:11:39 -0400
commit489fbbcb51d0249569d863f9220de69cb31f1922 (patch)
tree755f9dc717057372d6b6e6207887c714b2a6c311 /block
parent190470871ae28da7bdb3909f6124385c8472fc97 (diff)
block: enable multi-page bvec for passthrough IO
Now block IO stack is basically ready for supporting multi-page bvec, however it isn't enabled on passthrough IO. One reason is that passthrough IO is dispatched to LLD directly and bio split is bypassed, so the bio has to be built correctly for dispatch to LLD from the beginning. Implement multi-page support for passthrough IO by limitting each bvec as block device's segment and applying all kinds of queue limit in blk_add_pc_page(). Then we don't need to calculate segments any more for passthrough IO any more, turns out code is simplified much. Cc: Omar Sandoval <osandov@fb.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c60
1 files changed, 31 insertions, 29 deletions
diff --git a/block/bio.c b/block/bio.c
index 26853e072cd7..8d516d508ae3 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -665,6 +665,27 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
665 return true; 665 return true;
666} 666}
667 667
668/*
669 * Check if the @page can be added to the current segment(@bv), and make
670 * sure to call it only if page_is_mergeable(@bv, @page) is true
671 */
672static bool can_add_page_to_seg(struct request_queue *q,
673 struct bio_vec *bv, struct page *page, unsigned len,
674 unsigned offset)
675{
676 unsigned long mask = queue_segment_boundary(q);
677 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
678 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
679
680 if ((addr1 | mask) != (addr2 | mask))
681 return false;
682
683 if (bv->bv_len + len > queue_max_segment_size(q))
684 return false;
685
686 return true;
687}
688
668/** 689/**
669 * __bio_add_pc_page - attempt to add page to passthrough bio 690 * __bio_add_pc_page - attempt to add page to passthrough bio
670 * @q: the target queue 691 * @q: the target queue
@@ -685,7 +706,6 @@ int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
685 struct page *page, unsigned int len, unsigned int offset, 706 struct page *page, unsigned int len, unsigned int offset,
686 bool put_same_page) 707 bool put_same_page)
687{ 708{
688 int retried_segments = 0;
689 struct bio_vec *bvec; 709 struct bio_vec *bvec;
690 710
691 /* 711 /*
@@ -709,6 +729,7 @@ int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
709 offset == bvec->bv_offset + bvec->bv_len) { 729 offset == bvec->bv_offset + bvec->bv_len) {
710 if (put_same_page) 730 if (put_same_page)
711 put_page(page); 731 put_page(page);
732 bvec_merge:
712 bvec->bv_len += len; 733 bvec->bv_len += len;
713 bio->bi_iter.bi_size += len; 734 bio->bi_iter.bi_size += len;
714 goto done; 735 goto done;
@@ -720,11 +741,18 @@ int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
720 */ 741 */
721 if (bvec_gap_to_prev(q, bvec, offset)) 742 if (bvec_gap_to_prev(q, bvec, offset))
722 return 0; 743 return 0;
744
745 if (page_is_mergeable(bvec, page, len, offset, false) &&
746 can_add_page_to_seg(q, bvec, page, len, offset))
747 goto bvec_merge;
723 } 748 }
724 749
725 if (bio_full(bio)) 750 if (bio_full(bio))
726 return 0; 751 return 0;
727 752
753 if (bio->bi_phys_segments >= queue_max_segments(q))
754 return 0;
755
728 /* 756 /*
729 * setup the new entry, we might clear it again later if we 757 * setup the new entry, we might clear it again later if we
730 * cannot add the page 758 * cannot add the page
@@ -734,38 +762,12 @@ int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
734 bvec->bv_len = len; 762 bvec->bv_len = len;
735 bvec->bv_offset = offset; 763 bvec->bv_offset = offset;
736 bio->bi_vcnt++; 764 bio->bi_vcnt++;
737 bio->bi_phys_segments++;
738 bio->bi_iter.bi_size += len; 765 bio->bi_iter.bi_size += len;
739 766
740 /*
741 * Perform a recount if the number of segments is greater
742 * than queue_max_segments(q).
743 */
744
745 while (bio->bi_phys_segments > queue_max_segments(q)) {
746
747 if (retried_segments)
748 goto failed;
749
750 retried_segments = 1;
751 blk_recount_segments(q, bio);
752 }
753
754 /* If we may be able to merge these biovecs, force a recount */
755 if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
756 bio_clear_flag(bio, BIO_SEG_VALID);
757
758 done: 767 done:
768 bio->bi_phys_segments = bio->bi_vcnt;
769 bio_set_flag(bio, BIO_SEG_VALID);
759 return len; 770 return len;
760
761 failed:
762 bvec->bv_page = NULL;
763 bvec->bv_len = 0;
764 bvec->bv_offset = 0;
765 bio->bi_vcnt--;
766 bio->bi_iter.bi_size -= len;
767 blk_recount_segments(q, bio);
768 return 0;
769} 771}
770EXPORT_SYMBOL(__bio_add_pc_page); 772EXPORT_SYMBOL(__bio_add_pc_page);
771 773