summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-01-09 21:51:29 -0500
committerJens Axboe <axboe@kernel.dk>2018-01-09 22:23:19 -0500
commitb4b6cb613519b7449da510bccf08986371b328cb (patch)
tree5d20eb6cae58d2e0b0a1dc84935cad88244827b5 /block
parent5448aca41cd58e1a20574b6f29a8478bbb123dc3 (diff)
Revert "block: blk-merge: try to make front segments in full size"
This reverts commit a2d37968d784363842f87820a21e106741d28004. If max segment size isn't 512-aligned, this patch won't work well. Also once multipage bvec is enabled, adjacent bvecs won't be physically contiguous if page is added via bio_add_page(), so we don't need this kind of complicated logic. Reported-by: Dmitry Osipenko <digetx@gmail.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c54
1 files changed, 5 insertions, 49 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 446f63e076aa..8452fc7164cc 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -109,7 +109,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
109 bool do_split = true; 109 bool do_split = true;
110 struct bio *new = NULL; 110 struct bio *new = NULL;
111 const unsigned max_sectors = get_max_io_size(q, bio); 111 const unsigned max_sectors = get_max_io_size(q, bio);
112 unsigned advance = 0;
113 112
114 bio_for_each_segment(bv, bio, iter) { 113 bio_for_each_segment(bv, bio, iter) {
115 /* 114 /*
@@ -133,32 +132,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
133 } 132 }
134 133
135 if (bvprvp && blk_queue_cluster(q)) { 134 if (bvprvp && blk_queue_cluster(q)) {
135 if (seg_size + bv.bv_len > queue_max_segment_size(q))
136 goto new_segment;
136 if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) 137 if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
137 goto new_segment; 138 goto new_segment;
138 if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) 139 if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
139 goto new_segment; 140 goto new_segment;
140 if (seg_size + bv.bv_len > queue_max_segment_size(q)) {
141 /*
142 * One assumption is that initial value of
143 * @seg_size(equals to bv.bv_len) won't be
144 * bigger than max segment size, but this
145 * becomes false after multipage bvecs.
146 */
147 advance = queue_max_segment_size(q) - seg_size;
148
149 if (advance > 0) {
150 seg_size += advance;
151 sectors += advance >> 9;
152 bv.bv_len -= advance;
153 bv.bv_offset += advance;
154 }
155
156 /*
157 * Still need to put remainder of current
158 * bvec into a new segment.
159 */
160 goto new_segment;
161 }
162 141
163 seg_size += bv.bv_len; 142 seg_size += bv.bv_len;
164 bvprv = bv; 143 bvprv = bv;
@@ -180,12 +159,6 @@ new_segment:
180 seg_size = bv.bv_len; 159 seg_size = bv.bv_len;
181 sectors += bv.bv_len >> 9; 160 sectors += bv.bv_len >> 9;
182 161
183 /* restore the bvec for iterator */
184 if (advance) {
185 bv.bv_len += advance;
186 bv.bv_offset -= advance;
187 advance = 0;
188 }
189 } 162 }
190 163
191 do_split = false; 164 do_split = false;
@@ -386,29 +359,16 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
386{ 359{
387 360
388 int nbytes = bvec->bv_len; 361 int nbytes = bvec->bv_len;
389 unsigned advance = 0;
390 362
391 if (*sg && *cluster) { 363 if (*sg && *cluster) {
364 if ((*sg)->length + nbytes > queue_max_segment_size(q))
365 goto new_segment;
366
392 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 367 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
393 goto new_segment; 368 goto new_segment;
394 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) 369 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
395 goto new_segment; 370 goto new_segment;
396 371
397 /*
398 * try best to merge part of the bvec into previous
399 * segment and follow same policy with
400 * blk_bio_segment_split()
401 */
402 if ((*sg)->length + nbytes > queue_max_segment_size(q)) {
403 advance = queue_max_segment_size(q) - (*sg)->length;
404 if (advance) {
405 (*sg)->length += advance;
406 bvec->bv_offset += advance;
407 bvec->bv_len -= advance;
408 }
409 goto new_segment;
410 }
411
412 (*sg)->length += nbytes; 372 (*sg)->length += nbytes;
413 } else { 373 } else {
414new_segment: 374new_segment:
@@ -431,10 +391,6 @@ new_segment:
431 391
432 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 392 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
433 (*nsegs)++; 393 (*nsegs)++;
434
435 /* for making iterator happy */
436 bvec->bv_offset -= advance;
437 bvec->bv_len += advance;
438 } 394 }
439 *bvprv = *bvec; 395 *bvprv = *bvec;
440} 396}