summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-06 06:29:01 -0400
committerJens Axboe <axboe@kernel.dk>2019-06-20 12:29:22 -0400
commit14ccb66b3f585b2bc21e7256c96090abed5a512c (patch)
tree7b0d48d59ee474ac1a590352507c7890c16f1e8d
parentf924cddebc900f7cb10d5538d69523e558fa681c (diff)
block: remove the bi_phys_segments field in struct bio
We only need the number of segments in the blk-mq submission path. Remove the field from struct bio, and return it from a variant of blk_queue_split instead of that it can passed as an argument to those functions that need the value. This also means we stop recounting segments except for cloning and partial segments. To keep the number of arguments in this how path down remove pointless struct request_queue arguments from any of the functions that had it and grew a nr_segs argument. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--Documentation/block/biodoc.txt1
-rw-r--r--block/bfq-iosched.c5
-rw-r--r--block/bio.c15
-rw-r--r--block/blk-core.c32
-rw-r--r--block/blk-map.c10
-rw-r--r--block/blk-merge.c75
-rw-r--r--block/blk-mq-sched.c26
-rw-r--r--block/blk-mq-sched.h10
-rw-r--r--block/blk-mq.c23
-rw-r--r--block/blk.h23
-rw-r--r--block/kyber-iosched.c5
-rw-r--r--block/mq-deadline.c5
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--include/linux/bio.h1
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blk_types.h6
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/elevator.h2
18 files changed, 106 insertions, 137 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index ac18b488cb5e..31c177663ed5 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -436,7 +436,6 @@ struct bio {
436 struct bvec_iter bi_iter; /* current index into bio_vec array */ 436 struct bvec_iter bi_iter; /* current index into bio_vec array */
437 437
438 unsigned int bi_size; /* total size in bytes */ 438 unsigned int bi_size; /* total size in bytes */
439 unsigned short bi_phys_segments; /* segments after physaddr coalesce*/
440 unsigned short bi_hw_segments; /* segments after DMA remapping */ 439 unsigned short bi_hw_segments; /* segments after DMA remapping */
441 unsigned int bi_max; /* max bio_vecs we can hold 440 unsigned int bi_max; /* max bio_vecs we can hold
442 used as index into pool */ 441 used as index into pool */
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f8d430f88d25..a6bf842cbe16 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2027,7 +2027,8 @@ static void bfq_remove_request(struct request_queue *q,
2027 2027
2028} 2028}
2029 2029
2030static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) 2030static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
2031 unsigned int nr_segs)
2031{ 2032{
2032 struct request_queue *q = hctx->queue; 2033 struct request_queue *q = hctx->queue;
2033 struct bfq_data *bfqd = q->elevator->elevator_data; 2034 struct bfq_data *bfqd = q->elevator->elevator_data;
@@ -2050,7 +2051,7 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
2050 bfqd->bio_bfqq = NULL; 2051 bfqd->bio_bfqq = NULL;
2051 bfqd->bio_bic = bic; 2052 bfqd->bio_bic = bic;
2052 2053
2053 ret = blk_mq_sched_try_merge(q, bio, &free); 2054 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
2054 2055
2055 if (free) 2056 if (free)
2056 blk_mq_free_request(free); 2057 blk_mq_free_request(free);
diff --git a/block/bio.c b/block/bio.c
index 4bcdcd3f63f4..ad9c3aa9bf7d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -558,14 +558,6 @@ void bio_put(struct bio *bio)
558} 558}
559EXPORT_SYMBOL(bio_put); 559EXPORT_SYMBOL(bio_put);
560 560
561int bio_phys_segments(struct request_queue *q, struct bio *bio)
562{
563 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
564 blk_recount_segments(q, bio);
565
566 return bio->bi_phys_segments;
567}
568
569/** 561/**
570 * __bio_clone_fast - clone a bio that shares the original bio's biovec 562 * __bio_clone_fast - clone a bio that shares the original bio's biovec
571 * @bio: destination bio 563 * @bio: destination bio
@@ -739,7 +731,7 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
739 if (bio_full(bio)) 731 if (bio_full(bio))
740 return 0; 732 return 0;
741 733
742 if (bio->bi_phys_segments >= queue_max_segments(q)) 734 if (bio->bi_vcnt >= queue_max_segments(q))
743 return 0; 735 return 0;
744 736
745 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 737 bvec = &bio->bi_io_vec[bio->bi_vcnt];
@@ -749,8 +741,6 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
749 bio->bi_vcnt++; 741 bio->bi_vcnt++;
750 done: 742 done:
751 bio->bi_iter.bi_size += len; 743 bio->bi_iter.bi_size += len;
752 bio->bi_phys_segments = bio->bi_vcnt;
753 bio_set_flag(bio, BIO_SEG_VALID);
754 return len; 744 return len;
755} 745}
756 746
@@ -1909,10 +1899,7 @@ void bio_trim(struct bio *bio, int offset, int size)
1909 if (offset == 0 && size == bio->bi_iter.bi_size) 1899 if (offset == 0 && size == bio->bi_iter.bi_size)
1910 return; 1900 return;
1911 1901
1912 bio_clear_flag(bio, BIO_SEG_VALID);
1913
1914 bio_advance(bio, offset << 9); 1902 bio_advance(bio, offset << 9);
1915
1916 bio->bi_iter.bi_size = size; 1903 bio->bi_iter.bi_size = size;
1917 1904
1918 if (bio_integrity(bio)) 1905 if (bio_integrity(bio))
diff --git a/block/blk-core.c b/block/blk-core.c
index d1c7c69a20dd..ef998a724b27 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -550,15 +550,15 @@ void blk_put_request(struct request *req)
550} 550}
551EXPORT_SYMBOL(blk_put_request); 551EXPORT_SYMBOL(blk_put_request);
552 552
553bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 553bool bio_attempt_back_merge(struct request *req, struct bio *bio,
554 struct bio *bio) 554 unsigned int nr_segs)
555{ 555{
556 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 556 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
557 557
558 if (!ll_back_merge_fn(q, req, bio)) 558 if (!ll_back_merge_fn(req, bio, nr_segs))
559 return false; 559 return false;
560 560
561 trace_block_bio_backmerge(q, req, bio); 561 trace_block_bio_backmerge(req->q, req, bio);
562 562
563 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 563 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
564 blk_rq_set_mixed_merge(req); 564 blk_rq_set_mixed_merge(req);
@@ -571,15 +571,15 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
571 return true; 571 return true;
572} 572}
573 573
574bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 574bool bio_attempt_front_merge(struct request *req, struct bio *bio,
575 struct bio *bio) 575 unsigned int nr_segs)
576{ 576{
577 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; 577 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
578 578
579 if (!ll_front_merge_fn(q, req, bio)) 579 if (!ll_front_merge_fn(req, bio, nr_segs))
580 return false; 580 return false;
581 581
582 trace_block_bio_frontmerge(q, req, bio); 582 trace_block_bio_frontmerge(req->q, req, bio);
583 583
584 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) 584 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
585 blk_rq_set_mixed_merge(req); 585 blk_rq_set_mixed_merge(req);
@@ -621,6 +621,7 @@ no_merge:
621 * blk_attempt_plug_merge - try to merge with %current's plugged list 621 * blk_attempt_plug_merge - try to merge with %current's plugged list
622 * @q: request_queue new bio is being queued at 622 * @q: request_queue new bio is being queued at
623 * @bio: new bio being queued 623 * @bio: new bio being queued
624 * @nr_segs: number of segments in @bio
624 * @same_queue_rq: pointer to &struct request that gets filled in when 625 * @same_queue_rq: pointer to &struct request that gets filled in when
625 * another request associated with @q is found on the plug list 626 * another request associated with @q is found on the plug list
626 * (optional, may be %NULL) 627 * (optional, may be %NULL)
@@ -639,7 +640,7 @@ no_merge:
639 * Caller must ensure !blk_queue_nomerges(q) beforehand. 640 * Caller must ensure !blk_queue_nomerges(q) beforehand.
640 */ 641 */
641bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 642bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
642 struct request **same_queue_rq) 643 unsigned int nr_segs, struct request **same_queue_rq)
643{ 644{
644 struct blk_plug *plug; 645 struct blk_plug *plug;
645 struct request *rq; 646 struct request *rq;
@@ -668,10 +669,10 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
668 669
669 switch (blk_try_merge(rq, bio)) { 670 switch (blk_try_merge(rq, bio)) {
670 case ELEVATOR_BACK_MERGE: 671 case ELEVATOR_BACK_MERGE:
671 merged = bio_attempt_back_merge(q, rq, bio); 672 merged = bio_attempt_back_merge(rq, bio, nr_segs);
672 break; 673 break;
673 case ELEVATOR_FRONT_MERGE: 674 case ELEVATOR_FRONT_MERGE:
674 merged = bio_attempt_front_merge(q, rq, bio); 675 merged = bio_attempt_front_merge(rq, bio, nr_segs);
675 break; 676 break;
676 case ELEVATOR_DISCARD_MERGE: 677 case ELEVATOR_DISCARD_MERGE:
677 merged = bio_attempt_discard_merge(q, rq, bio); 678 merged = bio_attempt_discard_merge(q, rq, bio);
@@ -1427,14 +1428,9 @@ bool blk_update_request(struct request *req, blk_status_t error,
1427} 1428}
1428EXPORT_SYMBOL_GPL(blk_update_request); 1429EXPORT_SYMBOL_GPL(blk_update_request);
1429 1430
1430void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 1431void blk_rq_bio_prep(struct request *rq, struct bio *bio, unsigned int nr_segs)
1431 struct bio *bio)
1432{ 1432{
1433 if (bio_has_data(bio)) 1433 rq->nr_phys_segments = nr_segs;
1434 rq->nr_phys_segments = bio_phys_segments(q, bio);
1435 else if (bio_op(bio) == REQ_OP_DISCARD)
1436 rq->nr_phys_segments = 1;
1437
1438 rq->__data_len = bio->bi_iter.bi_size; 1434 rq->__data_len = bio->bi_iter.bi_size;
1439 rq->bio = rq->biotail = bio; 1435 rq->bio = rq->biotail = bio;
1440 rq->ioprio = bio_prio(bio); 1436 rq->ioprio = bio_prio(bio);
diff --git a/block/blk-map.c b/block/blk-map.c
index db9373bd31ac..3a62e471d81b 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -18,13 +18,19 @@
18int blk_rq_append_bio(struct request *rq, struct bio **bio) 18int blk_rq_append_bio(struct request *rq, struct bio **bio)
19{ 19{
20 struct bio *orig_bio = *bio; 20 struct bio *orig_bio = *bio;
21 struct bvec_iter iter;
22 struct bio_vec bv;
23 unsigned int nr_segs = 0;
21 24
22 blk_queue_bounce(rq->q, bio); 25 blk_queue_bounce(rq->q, bio);
23 26
27 bio_for_each_bvec(bv, *bio, iter)
28 nr_segs++;
29
24 if (!rq->bio) { 30 if (!rq->bio) {
25 blk_rq_bio_prep(rq->q, rq, *bio); 31 blk_rq_bio_prep(rq, *bio, nr_segs);
26 } else { 32 } else {
27 if (!ll_back_merge_fn(rq->q, rq, *bio)) { 33 if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
28 if (orig_bio != *bio) { 34 if (orig_bio != *bio) {
29 bio_put(*bio); 35 bio_put(*bio);
30 *bio = orig_bio; 36 *bio = orig_bio;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 17713d7d98d5..72b4fd89a22d 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -258,32 +258,29 @@ split:
258 return do_split ? new : NULL; 258 return do_split ? new : NULL;
259} 259}
260 260
261void blk_queue_split(struct request_queue *q, struct bio **bio) 261void __blk_queue_split(struct request_queue *q, struct bio **bio,
262 unsigned int *nr_segs)
262{ 263{
263 struct bio *split, *res; 264 struct bio *split;
264 unsigned nsegs;
265 265
266 switch (bio_op(*bio)) { 266 switch (bio_op(*bio)) {
267 case REQ_OP_DISCARD: 267 case REQ_OP_DISCARD:
268 case REQ_OP_SECURE_ERASE: 268 case REQ_OP_SECURE_ERASE:
269 split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs); 269 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
270 break; 270 break;
271 case REQ_OP_WRITE_ZEROES: 271 case REQ_OP_WRITE_ZEROES:
272 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs); 272 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
273 nr_segs);
273 break; 274 break;
274 case REQ_OP_WRITE_SAME: 275 case REQ_OP_WRITE_SAME:
275 split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs); 276 split = blk_bio_write_same_split(q, *bio, &q->bio_split,
277 nr_segs);
276 break; 278 break;
277 default: 279 default:
278 split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs); 280 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
279 break; 281 break;
280 } 282 }
281 283
282 /* physical segments can be figured out during splitting */
283 res = split ? split : *bio;
284 res->bi_phys_segments = nsegs;
285 bio_set_flag(res, BIO_SEG_VALID);
286
287 if (split) { 284 if (split) {
288 /* there isn't chance to merge the splitted bio */ 285 /* there isn't chance to merge the splitted bio */
289 split->bi_opf |= REQ_NOMERGE; 286 split->bi_opf |= REQ_NOMERGE;
@@ -304,6 +301,13 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
304 *bio = split; 301 *bio = split;
305 } 302 }
306} 303}
304
305void blk_queue_split(struct request_queue *q, struct bio **bio)
306{
307 unsigned int nr_segs;
308
309 __blk_queue_split(q, bio, &nr_segs);
310}
307EXPORT_SYMBOL(blk_queue_split); 311EXPORT_SYMBOL(blk_queue_split);
308 312
309static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 313static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
@@ -338,17 +342,6 @@ void blk_recalc_rq_segments(struct request *rq)
338 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); 342 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
339} 343}
340 344
341void blk_recount_segments(struct request_queue *q, struct bio *bio)
342{
343 struct bio *nxt = bio->bi_next;
344
345 bio->bi_next = NULL;
346 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
347 bio->bi_next = nxt;
348
349 bio_set_flag(bio, BIO_SEG_VALID);
350}
351
352static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, 345static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
353 struct scatterlist *sglist) 346 struct scatterlist *sglist)
354{ 347{
@@ -519,16 +512,13 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
519} 512}
520EXPORT_SYMBOL(blk_rq_map_sg); 513EXPORT_SYMBOL(blk_rq_map_sg);
521 514
522static inline int ll_new_hw_segment(struct request_queue *q, 515static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
523 struct request *req, 516 unsigned int nr_phys_segs)
524 struct bio *bio)
525{ 517{
526 int nr_phys_segs = bio_phys_segments(q, bio); 518 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
527
528 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
529 goto no_merge; 519 goto no_merge;
530 520
531 if (blk_integrity_merge_bio(q, req, bio) == false) 521 if (blk_integrity_merge_bio(req->q, req, bio) == false)
532 goto no_merge; 522 goto no_merge;
533 523
534 /* 524 /*
@@ -539,12 +529,11 @@ static inline int ll_new_hw_segment(struct request_queue *q,
539 return 1; 529 return 1;
540 530
541no_merge: 531no_merge:
542 req_set_nomerge(q, req); 532 req_set_nomerge(req->q, req);
543 return 0; 533 return 0;
544} 534}
545 535
546int ll_back_merge_fn(struct request_queue *q, struct request *req, 536int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
547 struct bio *bio)
548{ 537{
549 if (req_gap_back_merge(req, bio)) 538 if (req_gap_back_merge(req, bio))
550 return 0; 539 return 0;
@@ -553,21 +542,15 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
553 return 0; 542 return 0;
554 if (blk_rq_sectors(req) + bio_sectors(bio) > 543 if (blk_rq_sectors(req) + bio_sectors(bio) >
555 blk_rq_get_max_sectors(req, blk_rq_pos(req))) { 544 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
556 req_set_nomerge(q, req); 545 req_set_nomerge(req->q, req);
557 return 0; 546 return 0;
558 } 547 }
559 if (!bio_flagged(req->biotail, BIO_SEG_VALID))
560 blk_recount_segments(q, req->biotail);
561 if (!bio_flagged(bio, BIO_SEG_VALID))
562 blk_recount_segments(q, bio);
563 548
564 return ll_new_hw_segment(q, req, bio); 549 return ll_new_hw_segment(req, bio, nr_segs);
565} 550}
566 551
567int ll_front_merge_fn(struct request_queue *q, struct request *req, 552int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
568 struct bio *bio)
569{ 553{
570
571 if (req_gap_front_merge(req, bio)) 554 if (req_gap_front_merge(req, bio))
572 return 0; 555 return 0;
573 if (blk_integrity_rq(req) && 556 if (blk_integrity_rq(req) &&
@@ -575,15 +558,11 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
575 return 0; 558 return 0;
576 if (blk_rq_sectors(req) + bio_sectors(bio) > 559 if (blk_rq_sectors(req) + bio_sectors(bio) >
577 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { 560 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
578 req_set_nomerge(q, req); 561 req_set_nomerge(req->q, req);
579 return 0; 562 return 0;
580 } 563 }
581 if (!bio_flagged(bio, BIO_SEG_VALID))
582 blk_recount_segments(q, bio);
583 if (!bio_flagged(req->bio, BIO_SEG_VALID))
584 blk_recount_segments(q, req->bio);
585 564
586 return ll_new_hw_segment(q, req, bio); 565 return ll_new_hw_segment(req, bio, nr_segs);
587} 566}
588 567
589static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, 568static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 2766066a15db..956a7aa9a637 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -224,7 +224,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
224} 224}
225 225
226bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 226bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
227 struct request **merged_request) 227 unsigned int nr_segs, struct request **merged_request)
228{ 228{
229 struct request *rq; 229 struct request *rq;
230 230
@@ -232,7 +232,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
232 case ELEVATOR_BACK_MERGE: 232 case ELEVATOR_BACK_MERGE:
233 if (!blk_mq_sched_allow_merge(q, rq, bio)) 233 if (!blk_mq_sched_allow_merge(q, rq, bio))
234 return false; 234 return false;
235 if (!bio_attempt_back_merge(q, rq, bio)) 235 if (!bio_attempt_back_merge(rq, bio, nr_segs))
236 return false; 236 return false;
237 *merged_request = attempt_back_merge(q, rq); 237 *merged_request = attempt_back_merge(q, rq);
238 if (!*merged_request) 238 if (!*merged_request)
@@ -241,7 +241,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
241 case ELEVATOR_FRONT_MERGE: 241 case ELEVATOR_FRONT_MERGE:
242 if (!blk_mq_sched_allow_merge(q, rq, bio)) 242 if (!blk_mq_sched_allow_merge(q, rq, bio))
243 return false; 243 return false;
244 if (!bio_attempt_front_merge(q, rq, bio)) 244 if (!bio_attempt_front_merge(rq, bio, nr_segs))
245 return false; 245 return false;
246 *merged_request = attempt_front_merge(q, rq); 246 *merged_request = attempt_front_merge(q, rq);
247 if (!*merged_request) 247 if (!*merged_request)
@@ -260,7 +260,7 @@ EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
260 * of them. 260 * of them.
261 */ 261 */
262bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, 262bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
263 struct bio *bio) 263 struct bio *bio, unsigned int nr_segs)
264{ 264{
265 struct request *rq; 265 struct request *rq;
266 int checked = 8; 266 int checked = 8;
@@ -277,11 +277,13 @@ bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
277 switch (blk_try_merge(rq, bio)) { 277 switch (blk_try_merge(rq, bio)) {
278 case ELEVATOR_BACK_MERGE: 278 case ELEVATOR_BACK_MERGE:
279 if (blk_mq_sched_allow_merge(q, rq, bio)) 279 if (blk_mq_sched_allow_merge(q, rq, bio))
280 merged = bio_attempt_back_merge(q, rq, bio); 280 merged = bio_attempt_back_merge(rq, bio,
281 nr_segs);
281 break; 282 break;
282 case ELEVATOR_FRONT_MERGE: 283 case ELEVATOR_FRONT_MERGE:
283 if (blk_mq_sched_allow_merge(q, rq, bio)) 284 if (blk_mq_sched_allow_merge(q, rq, bio))
284 merged = bio_attempt_front_merge(q, rq, bio); 285 merged = bio_attempt_front_merge(rq, bio,
286 nr_segs);
285 break; 287 break;
286 case ELEVATOR_DISCARD_MERGE: 288 case ELEVATOR_DISCARD_MERGE:
287 merged = bio_attempt_discard_merge(q, rq, bio); 289 merged = bio_attempt_discard_merge(q, rq, bio);
@@ -304,13 +306,14 @@ EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);
304 */ 306 */
305static bool blk_mq_attempt_merge(struct request_queue *q, 307static bool blk_mq_attempt_merge(struct request_queue *q,
306 struct blk_mq_hw_ctx *hctx, 308 struct blk_mq_hw_ctx *hctx,
307 struct blk_mq_ctx *ctx, struct bio *bio) 309 struct blk_mq_ctx *ctx, struct bio *bio,
310 unsigned int nr_segs)
308{ 311{
309 enum hctx_type type = hctx->type; 312 enum hctx_type type = hctx->type;
310 313
311 lockdep_assert_held(&ctx->lock); 314 lockdep_assert_held(&ctx->lock);
312 315
313 if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio)) { 316 if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
314 ctx->rq_merged++; 317 ctx->rq_merged++;
315 return true; 318 return true;
316 } 319 }
@@ -318,7 +321,8 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
318 return false; 321 return false;
319} 322}
320 323
321bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 324bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
325 unsigned int nr_segs)
322{ 326{
323 struct elevator_queue *e = q->elevator; 327 struct elevator_queue *e = q->elevator;
324 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 328 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
@@ -328,7 +332,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
328 332
329 if (e && e->type->ops.bio_merge) { 333 if (e && e->type->ops.bio_merge) {
330 blk_mq_put_ctx(ctx); 334 blk_mq_put_ctx(ctx);
331 return e->type->ops.bio_merge(hctx, bio); 335 return e->type->ops.bio_merge(hctx, bio, nr_segs);
332 } 336 }
333 337
334 type = hctx->type; 338 type = hctx->type;
@@ -336,7 +340,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
336 !list_empty_careful(&ctx->rq_lists[type])) { 340 !list_empty_careful(&ctx->rq_lists[type])) {
337 /* default per sw-queue merge */ 341 /* default per sw-queue merge */
338 spin_lock(&ctx->lock); 342 spin_lock(&ctx->lock);
339 ret = blk_mq_attempt_merge(q, hctx, ctx, bio); 343 ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs);
340 spin_unlock(&ctx->lock); 344 spin_unlock(&ctx->lock);
341 } 345 }
342 346
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 3cf92cbbd8ac..cf22ab00fefb 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -12,8 +12,9 @@ void blk_mq_sched_assign_ioc(struct request *rq);
12 12
13void blk_mq_sched_request_inserted(struct request *rq); 13void blk_mq_sched_request_inserted(struct request *rq);
14bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 14bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
15 struct request **merged_request); 15 unsigned int nr_segs, struct request **merged_request);
16bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); 16bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
17 unsigned int nr_segs);
17bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); 18bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
18void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); 19void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
19void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); 20void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
@@ -31,12 +32,13 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
31void blk_mq_sched_free_requests(struct request_queue *q); 32void blk_mq_sched_free_requests(struct request_queue *q);
32 33
33static inline bool 34static inline bool
34blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 35blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
36 unsigned int nr_segs)
35{ 37{
36 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) 38 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
37 return false; 39 return false;
38 40
39 return __blk_mq_sched_bio_merge(q, bio); 41 return __blk_mq_sched_bio_merge(q, bio, nr_segs);
40} 42}
41 43
42static inline bool 44static inline bool
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 61457bffa55f..d89383847d09 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1764,14 +1764,15 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1764 } 1764 }
1765} 1765}
1766 1766
1767static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) 1767static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
1768 unsigned int nr_segs)
1768{ 1769{
1769 if (bio->bi_opf & REQ_RAHEAD) 1770 if (bio->bi_opf & REQ_RAHEAD)
1770 rq->cmd_flags |= REQ_FAILFAST_MASK; 1771 rq->cmd_flags |= REQ_FAILFAST_MASK;
1771 1772
1772 rq->__sector = bio->bi_iter.bi_sector; 1773 rq->__sector = bio->bi_iter.bi_sector;
1773 rq->write_hint = bio->bi_write_hint; 1774 rq->write_hint = bio->bi_write_hint;
1774 blk_rq_bio_prep(rq->q, rq, bio); 1775 blk_rq_bio_prep(rq, bio, nr_segs);
1775 1776
1776 blk_account_io_start(rq, true); 1777 blk_account_io_start(rq, true);
1777} 1778}
@@ -1941,20 +1942,20 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1941 struct request *rq; 1942 struct request *rq;
1942 struct blk_plug *plug; 1943 struct blk_plug *plug;
1943 struct request *same_queue_rq = NULL; 1944 struct request *same_queue_rq = NULL;
1945 unsigned int nr_segs;
1944 blk_qc_t cookie; 1946 blk_qc_t cookie;
1945 1947
1946 blk_queue_bounce(q, &bio); 1948 blk_queue_bounce(q, &bio);
1947 1949 __blk_queue_split(q, &bio, &nr_segs);
1948 blk_queue_split(q, &bio);
1949 1950
1950 if (!bio_integrity_prep(bio)) 1951 if (!bio_integrity_prep(bio))
1951 return BLK_QC_T_NONE; 1952 return BLK_QC_T_NONE;
1952 1953
1953 if (!is_flush_fua && !blk_queue_nomerges(q) && 1954 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1954 blk_attempt_plug_merge(q, bio, &same_queue_rq)) 1955 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
1955 return BLK_QC_T_NONE; 1956 return BLK_QC_T_NONE;
1956 1957
1957 if (blk_mq_sched_bio_merge(q, bio)) 1958 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
1958 return BLK_QC_T_NONE; 1959 return BLK_QC_T_NONE;
1959 1960
1960 rq_qos_throttle(q, bio); 1961 rq_qos_throttle(q, bio);
@@ -1977,7 +1978,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1977 plug = current->plug; 1978 plug = current->plug;
1978 if (unlikely(is_flush_fua)) { 1979 if (unlikely(is_flush_fua)) {
1979 blk_mq_put_ctx(data.ctx); 1980 blk_mq_put_ctx(data.ctx);
1980 blk_mq_bio_to_request(rq, bio); 1981 blk_mq_bio_to_request(rq, bio, nr_segs);
1981 1982
1982 /* bypass scheduler for flush rq */ 1983 /* bypass scheduler for flush rq */
1983 blk_insert_flush(rq); 1984 blk_insert_flush(rq);
@@ -1991,7 +1992,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1991 struct request *last = NULL; 1992 struct request *last = NULL;
1992 1993
1993 blk_mq_put_ctx(data.ctx); 1994 blk_mq_put_ctx(data.ctx);
1994 blk_mq_bio_to_request(rq, bio); 1995 blk_mq_bio_to_request(rq, bio, nr_segs);
1995 1996
1996 if (!request_count) 1997 if (!request_count)
1997 trace_block_plug(q); 1998 trace_block_plug(q);
@@ -2006,7 +2007,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
2006 2007
2007 blk_add_rq_to_plug(plug, rq); 2008 blk_add_rq_to_plug(plug, rq);
2008 } else if (plug && !blk_queue_nomerges(q)) { 2009 } else if (plug && !blk_queue_nomerges(q)) {
2009 blk_mq_bio_to_request(rq, bio); 2010 blk_mq_bio_to_request(rq, bio, nr_segs);
2010 2011
2011 /* 2012 /*
2012 * We do limited plugging. If the bio can be merged, do that. 2013 * We do limited plugging. If the bio can be merged, do that.
@@ -2035,11 +2036,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
2035 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && 2036 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
2036 !data.hctx->dispatch_busy)) { 2037 !data.hctx->dispatch_busy)) {
2037 blk_mq_put_ctx(data.ctx); 2038 blk_mq_put_ctx(data.ctx);
2038 blk_mq_bio_to_request(rq, bio); 2039 blk_mq_bio_to_request(rq, bio, nr_segs);
2039 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 2040 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2040 } else { 2041 } else {
2041 blk_mq_put_ctx(data.ctx); 2042 blk_mq_put_ctx(data.ctx);
2042 blk_mq_bio_to_request(rq, bio); 2043 blk_mq_bio_to_request(rq, bio, nr_segs);
2043 blk_mq_sched_insert_request(rq, false, true, true); 2044 blk_mq_sched_insert_request(rq, false, true, true);
2044 } 2045 }
2045 2046
diff --git a/block/blk.h b/block/blk.h
index 7814aa207153..a1d33cb65842 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -51,8 +51,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
51 int node, int cmd_size, gfp_t flags); 51 int node, int cmd_size, gfp_t flags);
52void blk_free_flush_queue(struct blk_flush_queue *q); 52void blk_free_flush_queue(struct blk_flush_queue *q);
53 53
54void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 54void blk_rq_bio_prep(struct request *rq, struct bio *bio, unsigned int nr_segs);
55 struct bio *bio);
56void blk_freeze_queue(struct request_queue *q); 55void blk_freeze_queue(struct request_queue *q);
57 56
58static inline void blk_queue_enter_live(struct request_queue *q) 57static inline void blk_queue_enter_live(struct request_queue *q)
@@ -154,14 +153,14 @@ static inline bool bio_integrity_endio(struct bio *bio)
154unsigned long blk_rq_timeout(unsigned long timeout); 153unsigned long blk_rq_timeout(unsigned long timeout);
155void blk_add_timer(struct request *req); 154void blk_add_timer(struct request *req);
156 155
157bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 156bool bio_attempt_front_merge(struct request *req, struct bio *bio,
158 struct bio *bio); 157 unsigned int nr_segs);
159bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 158bool bio_attempt_back_merge(struct request *req, struct bio *bio,
160 struct bio *bio); 159 unsigned int nr_segs);
161bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, 160bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
162 struct bio *bio); 161 struct bio *bio);
163bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 162bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
164 struct request **same_queue_rq); 163 unsigned int nr_segs, struct request **same_queue_rq);
165 164
166void blk_account_io_start(struct request *req, bool new_io); 165void blk_account_io_start(struct request *req, bool new_io);
167void blk_account_io_completion(struct request *req, unsigned int bytes); 166void blk_account_io_completion(struct request *req, unsigned int bytes);
@@ -202,10 +201,12 @@ static inline int blk_should_fake_timeout(struct request_queue *q)
202} 201}
203#endif 202#endif
204 203
205int ll_back_merge_fn(struct request_queue *q, struct request *req, 204void __blk_queue_split(struct request_queue *q, struct bio **bio,
206 struct bio *bio); 205 unsigned int *nr_segs);
207int ll_front_merge_fn(struct request_queue *q, struct request *req, 206int ll_back_merge_fn(struct request *req, struct bio *bio,
208 struct bio *bio); 207 unsigned int nr_segs);
208int ll_front_merge_fn(struct request *req, struct bio *bio,
209 unsigned int nr_segs);
209struct request *attempt_back_merge(struct request_queue *q, struct request *rq); 210struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
210struct request *attempt_front_merge(struct request_queue *q, struct request *rq); 211struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
211int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 212int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index c3b05119cebd..3c2602601741 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -562,7 +562,8 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
562 } 562 }
563} 563}
564 564
565static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) 565static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
566 unsigned int nr_segs)
566{ 567{
567 struct kyber_hctx_data *khd = hctx->sched_data; 568 struct kyber_hctx_data *khd = hctx->sched_data;
568 struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue); 569 struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
@@ -572,7 +573,7 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
572 bool merged; 573 bool merged;
573 574
574 spin_lock(&kcq->lock); 575 spin_lock(&kcq->lock);
575 merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio); 576 merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
576 spin_unlock(&kcq->lock); 577 spin_unlock(&kcq->lock);
577 blk_mq_put_ctx(ctx); 578 blk_mq_put_ctx(ctx);
578 579
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 1876f5712bfd..b8a682b5a1bb 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -469,7 +469,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
469 return ELEVATOR_NO_MERGE; 469 return ELEVATOR_NO_MERGE;
470} 470}
471 471
472static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) 472static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
473 unsigned int nr_segs)
473{ 474{
474 struct request_queue *q = hctx->queue; 475 struct request_queue *q = hctx->queue;
475 struct deadline_data *dd = q->elevator->elevator_data; 476 struct deadline_data *dd = q->elevator->elevator_data;
@@ -477,7 +478,7 @@ static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
477 bool ret; 478 bool ret;
478 479
479 spin_lock(&dd->lock); 480 spin_lock(&dd->lock);
480 ret = blk_mq_sched_try_merge(q, bio, &free); 481 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
481 spin_unlock(&dd->lock); 482 spin_unlock(&dd->lock);
482 483
483 if (free) 484 if (free)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index da94cbaa1a9e..3de4e13bde98 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5251,7 +5251,6 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
5251 rcu_read_unlock(); 5251 rcu_read_unlock();
5252 raid_bio->bi_next = (void*)rdev; 5252 raid_bio->bi_next = (void*)rdev;
5253 bio_set_dev(align_bi, rdev->bdev); 5253 bio_set_dev(align_bi, rdev->bdev);
5254 bio_clear_flag(align_bi, BIO_SEG_VALID);
5255 5254
5256 if (is_badblock(rdev, align_bi->bi_iter.bi_sector, 5255 if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
5257 bio_sectors(align_bi), 5256 bio_sectors(align_bi),
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 0f23b5682640..ee11c4324751 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -408,7 +408,6 @@ static inline void bio_wouldblock_error(struct bio *bio)
408} 408}
409 409
410struct request_queue; 410struct request_queue;
411extern int bio_phys_segments(struct request_queue *, struct bio *);
412 411
413extern int submit_bio_wait(struct bio *bio); 412extern int submit_bio_wait(struct bio *bio);
414extern void bio_advance(struct bio *, unsigned); 413extern void bio_advance(struct bio *, unsigned);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 15d1aa53d96c..3fa1fa59f9b2 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -306,7 +306,7 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs
306bool blk_mq_complete_request(struct request *rq); 306bool blk_mq_complete_request(struct request *rq);
307void blk_mq_complete_request_sync(struct request *rq); 307void blk_mq_complete_request_sync(struct request *rq);
308bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, 308bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
309 struct bio *bio); 309 struct bio *bio, unsigned int nr_segs);
310bool blk_mq_queue_stopped(struct request_queue *q); 310bool blk_mq_queue_stopped(struct request_queue *q);
311void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 311void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
312void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 312void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 95202f80676c..6a53799c3fe2 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -154,11 +154,6 @@ struct bio {
154 blk_status_t bi_status; 154 blk_status_t bi_status;
155 u8 bi_partno; 155 u8 bi_partno;
156 156
157 /* Number of segments in this BIO after
158 * physical address coalescing is performed.
159 */
160 unsigned int bi_phys_segments;
161
162 struct bvec_iter bi_iter; 157 struct bvec_iter bi_iter;
163 158
164 atomic_t __bi_remaining; 159 atomic_t __bi_remaining;
@@ -210,7 +205,6 @@ struct bio {
210 */ 205 */
211enum { 206enum {
212 BIO_NO_PAGE_REF, /* don't put release vec pages */ 207 BIO_NO_PAGE_REF, /* don't put release vec pages */
213 BIO_SEG_VALID, /* bi_phys_segments valid */
214 BIO_CLONED, /* doesn't own data */ 208 BIO_CLONED, /* doesn't own data */
215 BIO_BOUNCED, /* bio is a bounce bio */ 209 BIO_BOUNCED, /* bio is a bounce bio */
216 BIO_USER_MAPPED, /* contains user pages */ 210 BIO_USER_MAPPED, /* contains user pages */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2d4dfe82767a..d5d3bb45dfb6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -841,7 +841,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
841 struct request *rq); 841 struct request *rq);
842extern int blk_rq_append_bio(struct request *rq, struct bio **bio); 842extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
843extern void blk_queue_split(struct request_queue *, struct bio **); 843extern void blk_queue_split(struct request_queue *, struct bio **);
844extern void blk_recount_segments(struct request_queue *, struct bio *);
845extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 844extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
846extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 845extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
847 unsigned int, void __user *); 846 unsigned int, void __user *);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 6e8bc53740f0..169bb2e02516 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -34,7 +34,7 @@ struct elevator_mq_ops {
34 void (*depth_updated)(struct blk_mq_hw_ctx *); 34 void (*depth_updated)(struct blk_mq_hw_ctx *);
35 35
36 bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); 36 bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
37 bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); 37 bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
38 int (*request_merge)(struct request_queue *q, struct request **, struct bio *); 38 int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
39 void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); 39 void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
40 void (*requests_merged)(struct request_queue *, struct request *, struct request *); 40 void (*requests_merged)(struct request_queue *, struct request *, struct request *);