diff options
author | Jens Axboe <axboe@fb.com> | 2014-06-10 14:53:56 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-06-10 14:53:56 -0400 |
commit | 58a4915ad2f8a87f4456aac260396df7e300e6f2 (patch) | |
tree | 6481a87de33d3caa7b647c3347324eea1c474216 | |
parent | 2b8393b43ec672bb263009cd74c056ab01d6ac17 (diff) |
block: ensure that bio_add_page() always accepts a page for an empty bio
With commit 762380ad9322 added support for chunk sizes and no merging
across them, it broke the rule of always allowing adding of a single
page to an empty bio. So relax the restriction a bit to allow for that,
similarly to what we have always done.
This fixes a crash with mkfs.xfs and 512b sector sizes on NVMe.
Reported-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/bio.c | 7 | ||||
-rw-r--r-- | block/blk-settings.c | 5 |
2 files changed, 10 insertions, 2 deletions
diff --git a/block/bio.c b/block/bio.c index 97e832cc9b9c..2d64488e51c6 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -849,8 +849,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | |||
849 | unsigned int offset) | 849 | unsigned int offset) |
850 | { | 850 | { |
851 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 851 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
852 | unsigned int max_sectors; | ||
852 | 853 | ||
853 | return __bio_add_page(q, bio, page, len, offset, blk_max_size_offset(q, bio->bi_iter.bi_sector)); | 854 | max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); |
855 | if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size) | ||
856 | max_sectors = len >> 9; | ||
857 | |||
858 | return __bio_add_page(q, bio, page, len, offset, max_sectors); | ||
854 | } | 859 | } |
855 | EXPORT_SYMBOL(bio_add_page); | 860 | EXPORT_SYMBOL(bio_add_page); |
856 | 861 | ||
diff --git a/block/blk-settings.c b/block/blk-settings.c index a2b9cb195e70..f1a1795a5683 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -285,7 +285,10 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors); | |||
285 | * Description: | 285 | * Description: |
286 | * If a driver doesn't want IOs to cross a given chunk size, it can set | 286 | * If a driver doesn't want IOs to cross a given chunk size, it can set |
287 | * this limit and prevent merging across chunks. Note that the chunk size | 287 | * this limit and prevent merging across chunks. Note that the chunk size |
288 | * must currently be a power-of-2 in sectors. | 288 | * must currently be a power-of-2 in sectors. Also note that the block |
289 | * layer must accept a page worth of data at any offset. So if the | ||
290 | * crossing of chunks is a hard limitation in the driver, it must still be | ||
291 | * prepared to split single page bios. | ||
289 | **/ | 292 | **/ |
290 | void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) | 293 | void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) |
291 | { | 294 | { |