summaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-02-15 06:13:12 -0500
committerJens Axboe <axboe@kernel.dk>2019-02-15 10:40:11 -0500
commitdcebd755926b0f39dd1e3ef75bd3b46943400df0 (patch)
tree922b2370aa43de5b73e1b4f57c4267fd48b84de9 /block/blk-merge.c
parentd18d91740ad22e9d7998884c4d80523d0ba95ddf (diff)
block: use bio_for_each_bvec() to compute multi-page bvec count
First it is more efficient to use bio_for_each_bvec() in both blk_bio_segment_split() and __blk_recalc_rq_segments() to compute how many multi-page bvecs there are in the bio. Secondly once bio_for_each_bvec() is used, the bvec may need to be splitted because its length can be very longer than max segment size, so we have to split the big bvec into several segments. Thirdly when splitting multi-page bvec into segments, the max segment limit may be reached, so the bio split need to be considered under this situation too. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c103
1 files changed, 83 insertions, 20 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index f85d878f313d..4ef56b2d2aa5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -161,6 +161,73 @@ static inline unsigned get_max_io_size(struct request_queue *q,
161 return sectors; 161 return sectors;
162} 162}
163 163
164static unsigned get_max_segment_size(struct request_queue *q,
165 unsigned offset)
166{
167 unsigned long mask = queue_segment_boundary(q);
168
169 /* default segment boundary mask means no boundary limit */
170 if (mask == BLK_SEG_BOUNDARY_MASK)
171 return queue_max_segment_size(q);
172
173 return min_t(unsigned long, mask - (mask & offset) + 1,
174 queue_max_segment_size(q));
175}
176
177/*
178 * Split the bvec @bv into segments, and update all kinds of
179 * variables.
180 */
181static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
182 unsigned *nsegs, unsigned *last_seg_size,
183 unsigned *front_seg_size, unsigned *sectors)
184{
185 unsigned len = bv->bv_len;
186 unsigned total_len = 0;
187 unsigned new_nsegs = 0, seg_size = 0;
188
189 /*
190 * Multi-page bvec may be too big to hold in one segment, so the
191 * current bvec has to be splitted as multiple segments.
192 */
193 while (len && new_nsegs + *nsegs < queue_max_segments(q)) {
194 seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
195 seg_size = min(seg_size, len);
196
197 new_nsegs++;
198 total_len += seg_size;
199 len -= seg_size;
200
201 if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
202 break;
203 }
204
205 if (!new_nsegs)
206 return !!len;
207
208 /* update front segment size */
209 if (!*nsegs) {
210 unsigned first_seg_size;
211
212 if (new_nsegs == 1)
213 first_seg_size = get_max_segment_size(q, bv->bv_offset);
214 else
215 first_seg_size = queue_max_segment_size(q);
216
217 if (*front_seg_size < first_seg_size)
218 *front_seg_size = first_seg_size;
219 }
220
221 /* update other varibles */
222 *last_seg_size = seg_size;
223 *nsegs += new_nsegs;
224 if (sectors)
225 *sectors += total_len >> 9;
226
227 /* split in the middle of the bvec if len != 0 */
228 return !!len;
229}
230
164static struct bio *blk_bio_segment_split(struct request_queue *q, 231static struct bio *blk_bio_segment_split(struct request_queue *q,
165 struct bio *bio, 232 struct bio *bio,
166 struct bio_set *bs, 233 struct bio_set *bs,
@@ -174,7 +241,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
174 struct bio *new = NULL; 241 struct bio *new = NULL;
175 const unsigned max_sectors = get_max_io_size(q, bio); 242 const unsigned max_sectors = get_max_io_size(q, bio);
176 243
177 bio_for_each_segment(bv, bio, iter) { 244 bio_for_each_bvec(bv, bio, iter) {
178 /* 245 /*
179 * If the queue doesn't support SG gaps and adding this 246 * If the queue doesn't support SG gaps and adding this
180 * offset would create a gap, disallow it. 247 * offset would create a gap, disallow it.
@@ -189,8 +256,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
189 */ 256 */
190 if (nsegs < queue_max_segments(q) && 257 if (nsegs < queue_max_segments(q) &&
191 sectors < max_sectors) { 258 sectors < max_sectors) {
192 nsegs++; 259 /* split in the middle of bvec */
193 sectors = max_sectors; 260 bv.bv_len = (max_sectors - sectors) << 9;
261 bvec_split_segs(q, &bv, &nsegs,
262 &seg_size,
263 &front_seg_size,
264 &sectors);
194 } 265 }
195 goto split; 266 goto split;
196 } 267 }
@@ -212,14 +283,12 @@ new_segment:
212 if (nsegs == queue_max_segments(q)) 283 if (nsegs == queue_max_segments(q))
213 goto split; 284 goto split;
214 285
215 if (nsegs == 1 && seg_size > front_seg_size)
216 front_seg_size = seg_size;
217
218 nsegs++;
219 bvprv = bv; 286 bvprv = bv;
220 bvprvp = &bvprv; 287 bvprvp = &bvprv;
221 seg_size = bv.bv_len; 288
222 sectors += bv.bv_len >> 9; 289 if (bvec_split_segs(q, &bv, &nsegs, &seg_size,
290 &front_seg_size, &sectors))
291 goto split;
223 292
224 } 293 }
225 294
@@ -233,8 +302,6 @@ split:
233 bio = new; 302 bio = new;
234 } 303 }
235 304
236 if (nsegs == 1 && seg_size > front_seg_size)
237 front_seg_size = seg_size;
238 bio->bi_seg_front_size = front_seg_size; 305 bio->bi_seg_front_size = front_seg_size;
239 if (seg_size > bio->bi_seg_back_size) 306 if (seg_size > bio->bi_seg_back_size)
240 bio->bi_seg_back_size = seg_size; 307 bio->bi_seg_back_size = seg_size;
@@ -297,6 +364,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
297 struct bio_vec bv, bvprv = { NULL }; 364 struct bio_vec bv, bvprv = { NULL };
298 int prev = 0; 365 int prev = 0;
299 unsigned int seg_size, nr_phys_segs; 366 unsigned int seg_size, nr_phys_segs;
367 unsigned front_seg_size = bio->bi_seg_front_size;
300 struct bio *fbio, *bbio; 368 struct bio *fbio, *bbio;
301 struct bvec_iter iter; 369 struct bvec_iter iter;
302 370
@@ -316,7 +384,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
316 seg_size = 0; 384 seg_size = 0;
317 nr_phys_segs = 0; 385 nr_phys_segs = 0;
318 for_each_bio(bio) { 386 for_each_bio(bio) {
319 bio_for_each_segment(bv, bio, iter) { 387 bio_for_each_bvec(bv, bio, iter) {
320 /* 388 /*
321 * If SG merging is disabled, each bio vector is 389 * If SG merging is disabled, each bio vector is
322 * a segment 390 * a segment
@@ -336,20 +404,15 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
336 continue; 404 continue;
337 } 405 }
338new_segment: 406new_segment:
339 if (nr_phys_segs == 1 && seg_size >
340 fbio->bi_seg_front_size)
341 fbio->bi_seg_front_size = seg_size;
342
343 nr_phys_segs++;
344 bvprv = bv; 407 bvprv = bv;
345 prev = 1; 408 prev = 1;
346 seg_size = bv.bv_len; 409 bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
410 &front_seg_size, NULL);
347 } 411 }
348 bbio = bio; 412 bbio = bio;
349 } 413 }
350 414
351 if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) 415 fbio->bi_seg_front_size = front_seg_size;
352 fbio->bi_seg_front_size = seg_size;
353 if (seg_size > bbio->bi_seg_back_size) 416 if (seg_size > bbio->bi_seg_back_size)
354 bbio->bi_seg_back_size = seg_size; 417 bbio->bi_seg_back_size = seg_size;
355 418