aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <tom.leiming@gmail.com>2016-01-22 19:05:33 -0500
committerJens Axboe <axboe@fb.com>2016-01-22 22:42:30 -0500
commitd0e5fbb01a67e400e82fefe4896ea40c6447ab98 (patch)
tree3c8d2fd4fe59ab2c278d26a2bc37209ba4765195 /block
parentb82dde0230439215b55e545880e90337ee16f51a (diff)
block: fix bio splitting on max sectors
After commit e36f62042880(block: split bios to maxpossible length), bio can be splitted in the middle of a vector entry, then it is easy to split out one bio which size isn't aligned with block size, especially when the block size is bigger than 512. This patch fixes the issue by making the max io size aligned to logical block size. Fixes: e36f62042880(block: split bios to maxpossible length) Reported-by: Stefan Haberland <sth@linux.vnet.ibm.com> Cc: Keith Busch <keith.busch@intel.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1699df5b0493..888a7fec81f7 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -70,6 +70,18 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
70 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); 70 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
71} 71}
72 72
73static inline unsigned get_max_io_size(struct request_queue *q,
74 struct bio *bio)
75{
76 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
77 unsigned mask = queue_logical_block_size(q) - 1;
78
79 /* aligned to logical block size */
80 sectors &= ~(mask >> 9);
81
82 return sectors;
83}
84
73static struct bio *blk_bio_segment_split(struct request_queue *q, 85static struct bio *blk_bio_segment_split(struct request_queue *q,
74 struct bio *bio, 86 struct bio *bio,
75 struct bio_set *bs, 87 struct bio_set *bs,
@@ -81,6 +93,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
81 unsigned front_seg_size = bio->bi_seg_front_size; 93 unsigned front_seg_size = bio->bi_seg_front_size;
82 bool do_split = true; 94 bool do_split = true;
83 struct bio *new = NULL; 95 struct bio *new = NULL;
96 const unsigned max_sectors = get_max_io_size(q, bio);
84 97
85 bio_for_each_segment(bv, bio, iter) { 98 bio_for_each_segment(bv, bio, iter) {
86 /* 99 /*
@@ -90,20 +103,19 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
90 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) 103 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
91 goto split; 104 goto split;
92 105
93 if (sectors + (bv.bv_len >> 9) > 106 if (sectors + (bv.bv_len >> 9) > max_sectors) {
94 blk_max_size_offset(q, bio->bi_iter.bi_sector)) {
95 /* 107 /*
96 * Consider this a new segment if we're splitting in 108 * Consider this a new segment if we're splitting in
97 * the middle of this vector. 109 * the middle of this vector.
98 */ 110 */
99 if (nsegs < queue_max_segments(q) && 111 if (nsegs < queue_max_segments(q) &&
100 sectors < blk_max_size_offset(q, 112 sectors < max_sectors) {
101 bio->bi_iter.bi_sector)) {
102 nsegs++; 113 nsegs++;
103 sectors = blk_max_size_offset(q, 114 sectors = max_sectors;
104 bio->bi_iter.bi_sector);
105 } 115 }
106 goto split; 116 if (sectors)
117 goto split;
118 /* Make this single bvec as the 1st segment */
107 } 119 }
108 120
109 if (bvprvp && blk_queue_cluster(q)) { 121 if (bvprvp && blk_queue_cluster(q)) {