summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-07-01 03:14:46 -0400
committerJens Axboe <axboe@kernel.dk>2019-07-01 10:18:54 -0400
commit79d08f89bb1b5c2c1ff90d9bb95497ab9e8aa7e0 (patch)
tree9394c85b2fa7022176c9bcfad49fa809dc6a15aa
parent5be1f9d82fa73c199ebeee2866dbac83e419c897 (diff)
block: fix .bi_size overflow
'bio->bi_iter.bi_size' is 'unsigned int', which at most hold 4G - 1 bytes. Before 07173c3ec276 ("block: enable multipage bvecs"), one bio can include very limited pages, and usually at most 256, so the fs bio size won't be bigger than 1M bytes most of times. Since we support multi-page bvec, in theory one fs bio really can be added > 1M pages, especially in case of hugepage, or big writeback with too many dirty pages. Then there is chance in which .bi_size is overflowed. Fixes this issue by using bio_full() to check if the added segment may overflow .bi_size. Cc: Liu Yiding <liuyd.fnst@cn.fujitsu.com> Cc: kernel test robot <rong.a.chen@intel.com> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: linux-xfs@vger.kernel.org Cc: linux-fsdevel@vger.kernel.org Cc: stable@vger.kernel.org Fixes: 07173c3ec276 ("block: enable multipage bvecs") Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/bio.c10
-rw-r--r--fs/iomap.c2
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--include/linux/bio.h18
4 files changed, 23 insertions, 9 deletions
diff --git a/block/bio.c b/block/bio.c
index 933c1e36643b..29cd6cf4da51 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -723,7 +723,7 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
723 } 723 }
724 } 724 }
725 725
726 if (bio_full(bio)) 726 if (bio_full(bio, len))
727 return 0; 727 return 0;
728 728
729 if (bio->bi_vcnt >= queue_max_segments(q)) 729 if (bio->bi_vcnt >= queue_max_segments(q))
@@ -797,7 +797,7 @@ void __bio_add_page(struct bio *bio, struct page *page,
797 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; 797 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
798 798
799 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 799 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
800 WARN_ON_ONCE(bio_full(bio)); 800 WARN_ON_ONCE(bio_full(bio, len));
801 801
802 bv->bv_page = page; 802 bv->bv_page = page;
803 bv->bv_offset = off; 803 bv->bv_offset = off;
@@ -824,7 +824,7 @@ int bio_add_page(struct bio *bio, struct page *page,
824 bool same_page = false; 824 bool same_page = false;
825 825
826 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { 826 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
827 if (bio_full(bio)) 827 if (bio_full(bio, len))
828 return 0; 828 return 0;
829 __bio_add_page(bio, page, len, offset); 829 __bio_add_page(bio, page, len, offset);
830 } 830 }
@@ -909,7 +909,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
909 if (same_page) 909 if (same_page)
910 put_page(page); 910 put_page(page);
911 } else { 911 } else {
912 if (WARN_ON_ONCE(bio_full(bio))) 912 if (WARN_ON_ONCE(bio_full(bio, len)))
913 return -EINVAL; 913 return -EINVAL;
914 __bio_add_page(bio, page, len, offset); 914 __bio_add_page(bio, page, len, offset);
915 } 915 }
@@ -953,7 +953,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
953 ret = __bio_iov_bvec_add_pages(bio, iter); 953 ret = __bio_iov_bvec_add_pages(bio, iter);
954 else 954 else
955 ret = __bio_iov_iter_get_pages(bio, iter); 955 ret = __bio_iov_iter_get_pages(bio, iter);
956 } while (!ret && iov_iter_count(iter) && !bio_full(bio)); 956 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
957 957
958 if (is_bvec) 958 if (is_bvec)
959 bio_set_flag(bio, BIO_NO_PAGE_REF); 959 bio_set_flag(bio, BIO_NO_PAGE_REF);
diff --git a/fs/iomap.c b/fs/iomap.c
index 4f94788db43b..7a147aa0c4d9 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -333,7 +333,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
333 if (iop) 333 if (iop)
334 atomic_inc(&iop->read_count); 334 atomic_inc(&iop->read_count);
335 335
336 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) { 336 if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
337 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); 337 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
338 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; 338 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
339 339
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 8da5e6637771..11f703d4a605 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -782,7 +782,7 @@ xfs_add_to_ioend(
782 atomic_inc(&iop->write_count); 782 atomic_inc(&iop->write_count);
783 783
784 if (!merged) { 784 if (!merged) {
785 if (bio_full(wpc->ioend->io_bio)) 785 if (bio_full(wpc->ioend->io_bio, len))
786 xfs_chain_bio(wpc->ioend, wbc, bdev, sector); 786 xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
787 bio_add_page(wpc->ioend->io_bio, page, len, poff); 787 bio_add_page(wpc->ioend->io_bio, page, len, poff);
788 } 788 }
diff --git a/include/linux/bio.h b/include/linux/bio.h
index dc630b05e6e5..3cdb84cdc488 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -102,9 +102,23 @@ static inline void *bio_data(struct bio *bio)
102 return NULL; 102 return NULL;
103} 103}
104 104
105static inline bool bio_full(struct bio *bio) 105/**
106 * bio_full - check if the bio is full
107 * @bio: bio to check
108 * @len: length of one segment to be added
109 *
110 * Return true if @bio is full and one segment with @len bytes can't be
111 * added to the bio, otherwise return false
112 */
113static inline bool bio_full(struct bio *bio, unsigned len)
106{ 114{
107 return bio->bi_vcnt >= bio->bi_max_vecs; 115 if (bio->bi_vcnt >= bio->bi_max_vecs)
116 return true;
117
118 if (bio->bi_iter.bi_size > UINT_MAX - len)
119 return true;
120
121 return false;
108} 122}
109 123
110static inline bool bio_next_segment(const struct bio *bio, 124static inline bool bio_next_segment(const struct bio *bio,