diff options
author | Christoph Hellwig <hch@lst.de> | 2019-05-21 03:01:43 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-05-23 12:25:26 -0400 |
commit | 6869875fbc04042ad01654591da60862706e86e3 (patch) | |
tree | d4949f0c0d2a96614302a1873c148f915d5eaa1f /block/blk-merge.c | |
parent | 200a9aff7b02feea30b01141b0df9bc19457a232 (diff) |
block: remove the bi_seg_{front,back}_size fields in struct bio
At this point these fields aren't used for anything, so we can remove
them.
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r-- | block/blk-merge.c | 94 |
1 files changed, 12 insertions, 82 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index eee2c02c50ce..17713d7d98d5 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -162,8 +162,7 @@ static unsigned get_max_segment_size(struct request_queue *q, | |||
162 | * variables. | 162 | * variables. |
163 | */ | 163 | */ |
164 | static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, | 164 | static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, |
165 | unsigned *nsegs, unsigned *last_seg_size, | 165 | unsigned *nsegs, unsigned *sectors, unsigned max_segs) |
166 | unsigned *front_seg_size, unsigned *sectors, unsigned max_segs) | ||
167 | { | 166 | { |
168 | unsigned len = bv->bv_len; | 167 | unsigned len = bv->bv_len; |
169 | unsigned total_len = 0; | 168 | unsigned total_len = 0; |
@@ -185,28 +184,12 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, | |||
185 | break; | 184 | break; |
186 | } | 185 | } |
187 | 186 | ||
188 | if (!new_nsegs) | 187 | if (new_nsegs) { |
189 | return !!len; | 188 | *nsegs += new_nsegs; |
190 | 189 | if (sectors) | |
191 | /* update front segment size */ | 190 | *sectors += total_len >> 9; |
192 | if (!*nsegs) { | ||
193 | unsigned first_seg_size; | ||
194 | |||
195 | if (new_nsegs == 1) | ||
196 | first_seg_size = get_max_segment_size(q, bv->bv_offset); | ||
197 | else | ||
198 | first_seg_size = queue_max_segment_size(q); | ||
199 | |||
200 | if (*front_seg_size < first_seg_size) | ||
201 | *front_seg_size = first_seg_size; | ||
202 | } | 191 | } |
203 | 192 | ||
204 | /* update other varibles */ | ||
205 | *last_seg_size = seg_size; | ||
206 | *nsegs += new_nsegs; | ||
207 | if (sectors) | ||
208 | *sectors += total_len >> 9; | ||
209 | |||
210 | /* split in the middle of the bvec if len != 0 */ | 193 | /* split in the middle of the bvec if len != 0 */ |
211 | return !!len; | 194 | return !!len; |
212 | } | 195 | } |
@@ -218,8 +201,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
218 | { | 201 | { |
219 | struct bio_vec bv, bvprv, *bvprvp = NULL; | 202 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
220 | struct bvec_iter iter; | 203 | struct bvec_iter iter; |
221 | unsigned seg_size = 0, nsegs = 0, sectors = 0; | 204 | unsigned nsegs = 0, sectors = 0; |
222 | unsigned front_seg_size = bio->bi_seg_front_size; | ||
223 | bool do_split = true; | 205 | bool do_split = true; |
224 | struct bio *new = NULL; | 206 | struct bio *new = NULL; |
225 | const unsigned max_sectors = get_max_io_size(q, bio); | 207 | const unsigned max_sectors = get_max_io_size(q, bio); |
@@ -243,8 +225,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
243 | /* split in the middle of bvec */ | 225 | /* split in the middle of bvec */ |
244 | bv.bv_len = (max_sectors - sectors) << 9; | 226 | bv.bv_len = (max_sectors - sectors) << 9; |
245 | bvec_split_segs(q, &bv, &nsegs, | 227 | bvec_split_segs(q, &bv, &nsegs, |
246 | &seg_size, | ||
247 | &front_seg_size, | ||
248 | §ors, max_segs); | 228 | §ors, max_segs); |
249 | } | 229 | } |
250 | goto split; | 230 | goto split; |
@@ -258,12 +238,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
258 | 238 | ||
259 | if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) { | 239 | if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) { |
260 | nsegs++; | 240 | nsegs++; |
261 | seg_size = bv.bv_len; | ||
262 | sectors += bv.bv_len >> 9; | 241 | sectors += bv.bv_len >> 9; |
263 | if (nsegs == 1 && seg_size > front_seg_size) | 242 | } else if (bvec_split_segs(q, &bv, &nsegs, §ors, |
264 | front_seg_size = seg_size; | 243 | max_segs)) { |
265 | } else if (bvec_split_segs(q, &bv, &nsegs, &seg_size, | ||
266 | &front_seg_size, §ors, max_segs)) { | ||
267 | goto split; | 244 | goto split; |
268 | } | 245 | } |
269 | } | 246 | } |
@@ -278,10 +255,6 @@ split: | |||
278 | bio = new; | 255 | bio = new; |
279 | } | 256 | } |
280 | 257 | ||
281 | bio->bi_seg_front_size = front_seg_size; | ||
282 | if (seg_size > bio->bi_seg_back_size) | ||
283 | bio->bi_seg_back_size = seg_size; | ||
284 | |||
285 | return do_split ? new : NULL; | 258 | return do_split ? new : NULL; |
286 | } | 259 | } |
287 | 260 | ||
@@ -336,17 +309,13 @@ EXPORT_SYMBOL(blk_queue_split); | |||
336 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | 309 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
337 | struct bio *bio) | 310 | struct bio *bio) |
338 | { | 311 | { |
339 | struct bio_vec uninitialized_var(bv), bvprv = { NULL }; | 312 | unsigned int nr_phys_segs = 0; |
340 | unsigned int seg_size, nr_phys_segs; | ||
341 | unsigned front_seg_size; | ||
342 | struct bio *fbio, *bbio; | ||
343 | struct bvec_iter iter; | 313 | struct bvec_iter iter; |
314 | struct bio_vec bv; | ||
344 | 315 | ||
345 | if (!bio) | 316 | if (!bio) |
346 | return 0; | 317 | return 0; |
347 | 318 | ||
348 | front_seg_size = bio->bi_seg_front_size; | ||
349 | |||
350 | switch (bio_op(bio)) { | 319 | switch (bio_op(bio)) { |
351 | case REQ_OP_DISCARD: | 320 | case REQ_OP_DISCARD: |
352 | case REQ_OP_SECURE_ERASE: | 321 | case REQ_OP_SECURE_ERASE: |
@@ -356,23 +325,11 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
356 | return 1; | 325 | return 1; |
357 | } | 326 | } |
358 | 327 | ||
359 | fbio = bio; | ||
360 | seg_size = 0; | ||
361 | nr_phys_segs = 0; | ||
362 | for_each_bio(bio) { | 328 | for_each_bio(bio) { |
363 | bio_for_each_bvec(bv, bio, iter) { | 329 | bio_for_each_bvec(bv, bio, iter) |
364 | bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, | 330 | bvec_split_segs(q, &bv, &nr_phys_segs, NULL, UINT_MAX); |
365 | &front_seg_size, NULL, UINT_MAX); | ||
366 | } | ||
367 | bbio = bio; | ||
368 | if (likely(bio->bi_iter.bi_size)) | ||
369 | bvprv = bv; | ||
370 | } | 331 | } |
371 | 332 | ||
372 | fbio->bi_seg_front_size = front_seg_size; | ||
373 | if (seg_size > bbio->bi_seg_back_size) | ||
374 | bbio->bi_seg_back_size = seg_size; | ||
375 | |||
376 | return nr_phys_segs; | 333 | return nr_phys_segs; |
377 | } | 334 | } |
378 | 335 | ||
@@ -392,24 +349,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio) | |||
392 | bio_set_flag(bio, BIO_SEG_VALID); | 349 | bio_set_flag(bio, BIO_SEG_VALID); |
393 | } | 350 | } |
394 | 351 | ||
395 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | ||
396 | struct bio *nxt) | ||
397 | { | ||
398 | struct bio_vec end_bv = { NULL }, nxt_bv; | ||
399 | |||
400 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > | ||
401 | queue_max_segment_size(q)) | ||
402 | return 0; | ||
403 | |||
404 | if (!bio_has_data(bio)) | ||
405 | return 1; | ||
406 | |||
407 | bio_get_last_bvec(bio, &end_bv); | ||
408 | bio_get_first_bvec(nxt, &nxt_bv); | ||
409 | |||
410 | return biovec_phys_mergeable(q, &end_bv, &nxt_bv); | ||
411 | } | ||
412 | |||
413 | static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, | 352 | static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, |
414 | struct scatterlist *sglist) | 353 | struct scatterlist *sglist) |
415 | { | 354 | { |
@@ -669,8 +608,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
669 | struct request *next) | 608 | struct request *next) |
670 | { | 609 | { |
671 | int total_phys_segments; | 610 | int total_phys_segments; |
672 | unsigned int seg_size = | ||
673 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | ||
674 | 611 | ||
675 | if (req_gap_back_merge(req, next->bio)) | 612 | if (req_gap_back_merge(req, next->bio)) |
676 | return 0; | 613 | return 0; |
@@ -683,13 +620,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
683 | return 0; | 620 | return 0; |
684 | 621 | ||
685 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | 622 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
686 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { | ||
687 | if (req->nr_phys_segments == 1) | ||
688 | req->bio->bi_seg_front_size = seg_size; | ||
689 | if (next->nr_phys_segments == 1) | ||
690 | next->biotail->bi_seg_back_size = seg_size; | ||
691 | } | ||
692 | |||
693 | if (total_phys_segments > queue_max_segments(q)) | 623 | if (total_phys_segments > queue_max_segments(q)) |
694 | return 0; | 624 | return 0; |
695 | 625 | ||