diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-19 21:57:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-19 21:57:09 -0400 |
commit | 133bb59585140747fd3938002670cb395f40dc76 (patch) | |
tree | a0fe424f3dd0a1cc91102ef59b7662be4b915609 | |
parent | 590dca3a71875461e8fea3013af74386945191b2 (diff) | |
parent | 994518799930fc363d47cb7cf0d1abed1790bf16 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe:
"This is a bit bigger than it should be, but I could (did) not want to
send it off last week due to both wanting extra testing, and expecting
a fix for the bounce regression as well. In any case, this contains:
- Fix for the blk-merge.c compilation warning on gcc 5.x from me.
- A set of back/front SG gap merge fixes, from me and from Sagi.
This ensures that we honor SG gapping for integrity payloads as
well.
- Two small fixes for null_blk from Matias, fixing a leak and a
capacity propagation issue.
- A blkcg fix from Tejun, fixing a NULL dereference.
- A fast clone optimization from Ming, fixing a performance
regression since the arbitrarily sized bio's were introduced.
- Also from Ming, a regression fix for bouncing IOs"
* 'for-linus' of git://git.kernel.dk/linux-block:
block: fix bounce_end_io
block: blk-merge: fast-clone bio when splitting rw bios
block: blkg_destroy_all() should clear q->root_blkg and ->root_rl.blkg
block: Copy a user iovec if it includes gaps
block: Refuse adding appending a gapped integrity page to a bio
block: Refuse request/bio merges with gaps in the integrity payload
block: Check for gaps on front and back merges
null_blk: fix wrong capacity when bs is not 512 bytes
null_blk: fix memory leak on cleanup
block: fix bogus compiler warnings in blk-merge.c
-rw-r--r-- | block/bio-integrity.c | 5 | ||||
-rw-r--r-- | block/blk-cgroup.c | 3 | ||||
-rw-r--r-- | block/blk-integrity.c | 3 | ||||
-rw-r--r-- | block/blk-map.c | 26 | ||||
-rw-r--r-- | block/blk-merge.c | 59 | ||||
-rw-r--r-- | block/bounce.c | 4 | ||||
-rw-r--r-- | drivers/block/null_blk.c | 36 | ||||
-rw-r--r-- | include/linux/blkdev.h | 50 |
8 files changed, 129 insertions, 57 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 4aecca79374a..14b8faf8b09d 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c | |||
@@ -140,6 +140,11 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, | |||
140 | 140 | ||
141 | iv = bip->bip_vec + bip->bip_vcnt; | 141 | iv = bip->bip_vec + bip->bip_vcnt; |
142 | 142 | ||
143 | if (bip->bip_vcnt && | ||
144 | bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), | ||
145 | &bip->bip_vec[bip->bip_vcnt - 1], offset)) | ||
146 | return 0; | ||
147 | |||
143 | iv->bv_page = page; | 148 | iv->bv_page = page; |
144 | iv->bv_len = len; | 149 | iv->bv_len = len; |
145 | iv->bv_offset = offset; | 150 | iv->bv_offset = offset; |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ac8370cb2515..55512dd62633 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -370,6 +370,9 @@ static void blkg_destroy_all(struct request_queue *q) | |||
370 | blkg_destroy(blkg); | 370 | blkg_destroy(blkg); |
371 | spin_unlock(&blkcg->lock); | 371 | spin_unlock(&blkcg->lock); |
372 | } | 372 | } |
373 | |||
374 | q->root_blkg = NULL; | ||
375 | q->root_rl.blkg = NULL; | ||
373 | } | 376 | } |
374 | 377 | ||
375 | /* | 378 | /* |
diff --git a/block/blk-integrity.c b/block/blk-integrity.c index f548b64be092..75f29cf70188 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c | |||
@@ -204,6 +204,9 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, | |||
204 | q->limits.max_integrity_segments) | 204 | q->limits.max_integrity_segments) |
205 | return false; | 205 | return false; |
206 | 206 | ||
207 | if (integrity_req_gap_back_merge(req, next->bio)) | ||
208 | return false; | ||
209 | |||
207 | return true; | 210 | return true; |
208 | } | 211 | } |
209 | EXPORT_SYMBOL(blk_integrity_merge_rq); | 212 | EXPORT_SYMBOL(blk_integrity_merge_rq); |
diff --git a/block/blk-map.c b/block/blk-map.c index 233841644c9d..f565e11f465a 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -9,6 +9,24 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static bool iovec_gap_to_prv(struct request_queue *q, | ||
13 | struct iovec *prv, struct iovec *cur) | ||
14 | { | ||
15 | unsigned long prev_end; | ||
16 | |||
17 | if (!queue_virt_boundary(q)) | ||
18 | return false; | ||
19 | |||
20 | if (prv->iov_base == NULL && prv->iov_len == 0) | ||
21 | /* prv is not set - don't check */ | ||
22 | return false; | ||
23 | |||
24 | prev_end = (unsigned long)(prv->iov_base + prv->iov_len); | ||
25 | |||
26 | return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || | ||
27 | prev_end & queue_virt_boundary(q)); | ||
28 | } | ||
29 | |||
12 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | 30 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
13 | struct bio *bio) | 31 | struct bio *bio) |
14 | { | 32 | { |
@@ -67,7 +85,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
67 | struct bio *bio; | 85 | struct bio *bio; |
68 | int unaligned = 0; | 86 | int unaligned = 0; |
69 | struct iov_iter i; | 87 | struct iov_iter i; |
70 | struct iovec iov; | 88 | struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; |
71 | 89 | ||
72 | if (!iter || !iter->count) | 90 | if (!iter || !iter->count) |
73 | return -EINVAL; | 91 | return -EINVAL; |
@@ -81,8 +99,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
81 | /* | 99 | /* |
82 | * Keep going so we check length of all segments | 100 | * Keep going so we check length of all segments |
83 | */ | 101 | */ |
84 | if (uaddr & queue_dma_alignment(q)) | 102 | if ((uaddr & queue_dma_alignment(q)) || |
103 | iovec_gap_to_prv(q, &prv, &iov)) | ||
85 | unaligned = 1; | 104 | unaligned = 1; |
105 | |||
106 | prv.iov_base = iov.iov_base; | ||
107 | prv.iov_len = iov.iov_len; | ||
86 | } | 108 | } |
87 | 109 | ||
88 | if (unaligned || (q->dma_pad_mask & iter->count) || map_data) | 110 | if (unaligned || (q->dma_pad_mask & iter->count) || map_data) |
diff --git a/block/blk-merge.c b/block/blk-merge.c index d088cffb8105..c4e9c37f3e38 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -66,36 +66,33 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
66 | struct bio *bio, | 66 | struct bio *bio, |
67 | struct bio_set *bs) | 67 | struct bio_set *bs) |
68 | { | 68 | { |
69 | struct bio *split; | 69 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
70 | struct bio_vec bv, bvprv; | ||
71 | struct bvec_iter iter; | 70 | struct bvec_iter iter; |
72 | unsigned seg_size = 0, nsegs = 0, sectors = 0; | 71 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
73 | int prev = 0; | ||
74 | 72 | ||
75 | bio_for_each_segment(bv, bio, iter) { | 73 | bio_for_each_segment(bv, bio, iter) { |
76 | sectors += bv.bv_len >> 9; | 74 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) |
77 | |||
78 | if (sectors > queue_max_sectors(q)) | ||
79 | goto split; | 75 | goto split; |
80 | 76 | ||
81 | /* | 77 | /* |
82 | * If the queue doesn't support SG gaps and adding this | 78 | * If the queue doesn't support SG gaps and adding this |
83 | * offset would create a gap, disallow it. | 79 | * offset would create a gap, disallow it. |
84 | */ | 80 | */ |
85 | if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset)) | 81 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
86 | goto split; | 82 | goto split; |
87 | 83 | ||
88 | if (prev && blk_queue_cluster(q)) { | 84 | if (bvprvp && blk_queue_cluster(q)) { |
89 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) | 85 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
90 | goto new_segment; | 86 | goto new_segment; |
91 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) | 87 | if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) |
92 | goto new_segment; | 88 | goto new_segment; |
93 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) | 89 | if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) |
94 | goto new_segment; | 90 | goto new_segment; |
95 | 91 | ||
96 | seg_size += bv.bv_len; | 92 | seg_size += bv.bv_len; |
97 | bvprv = bv; | 93 | bvprv = bv; |
98 | prev = 1; | 94 | bvprvp = &bv; |
95 | sectors += bv.bv_len >> 9; | ||
99 | continue; | 96 | continue; |
100 | } | 97 | } |
101 | new_segment: | 98 | new_segment: |
@@ -104,23 +101,14 @@ new_segment: | |||
104 | 101 | ||
105 | nsegs++; | 102 | nsegs++; |
106 | bvprv = bv; | 103 | bvprv = bv; |
107 | prev = 1; | 104 | bvprvp = &bv; |
108 | seg_size = bv.bv_len; | 105 | seg_size = bv.bv_len; |
106 | sectors += bv.bv_len >> 9; | ||
109 | } | 107 | } |
110 | 108 | ||
111 | return NULL; | 109 | return NULL; |
112 | split: | 110 | split: |
113 | split = bio_clone_bioset(bio, GFP_NOIO, bs); | 111 | return bio_split(bio, sectors, GFP_NOIO, bs); |
114 | |||
115 | split->bi_iter.bi_size -= iter.bi_size; | ||
116 | bio->bi_iter = iter; | ||
117 | |||
118 | if (bio_integrity(bio)) { | ||
119 | bio_integrity_advance(bio, split->bi_iter.bi_size); | ||
120 | bio_integrity_trim(split, 0, bio_sectors(split)); | ||
121 | } | ||
122 | |||
123 | return split; | ||
124 | } | 112 | } |
125 | 113 | ||
126 | void blk_queue_split(struct request_queue *q, struct bio **bio, | 114 | void blk_queue_split(struct request_queue *q, struct bio **bio, |
@@ -439,6 +427,11 @@ no_merge: | |||
439 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | 427 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
440 | struct bio *bio) | 428 | struct bio *bio) |
441 | { | 429 | { |
430 | if (req_gap_back_merge(req, bio)) | ||
431 | return 0; | ||
432 | if (blk_integrity_rq(req) && | ||
433 | integrity_req_gap_back_merge(req, bio)) | ||
434 | return 0; | ||
442 | if (blk_rq_sectors(req) + bio_sectors(bio) > | 435 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
443 | blk_rq_get_max_sectors(req)) { | 436 | blk_rq_get_max_sectors(req)) { |
444 | req->cmd_flags |= REQ_NOMERGE; | 437 | req->cmd_flags |= REQ_NOMERGE; |
@@ -457,6 +450,12 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
457 | int ll_front_merge_fn(struct request_queue *q, struct request *req, | 450 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
458 | struct bio *bio) | 451 | struct bio *bio) |
459 | { | 452 | { |
453 | |||
454 | if (req_gap_front_merge(req, bio)) | ||
455 | return 0; | ||
456 | if (blk_integrity_rq(req) && | ||
457 | integrity_req_gap_front_merge(req, bio)) | ||
458 | return 0; | ||
460 | if (blk_rq_sectors(req) + bio_sectors(bio) > | 459 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
461 | blk_rq_get_max_sectors(req)) { | 460 | blk_rq_get_max_sectors(req)) { |
462 | req->cmd_flags |= REQ_NOMERGE; | 461 | req->cmd_flags |= REQ_NOMERGE; |
@@ -483,14 +482,6 @@ static bool req_no_special_merge(struct request *req) | |||
483 | return !q->mq_ops && req->special; | 482 | return !q->mq_ops && req->special; |
484 | } | 483 | } |
485 | 484 | ||
486 | static int req_gap_to_prev(struct request *req, struct bio *next) | ||
487 | { | ||
488 | struct bio *prev = req->biotail; | ||
489 | |||
490 | return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1], | ||
491 | next->bi_io_vec[0].bv_offset); | ||
492 | } | ||
493 | |||
494 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | 485 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
495 | struct request *next) | 486 | struct request *next) |
496 | { | 487 | { |
@@ -505,7 +496,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
505 | if (req_no_special_merge(req) || req_no_special_merge(next)) | 496 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
506 | return 0; | 497 | return 0; |
507 | 498 | ||
508 | if (req_gap_to_prev(req, next->bio)) | 499 | if (req_gap_back_merge(req, next->bio)) |
509 | return 0; | 500 | return 0; |
510 | 501 | ||
511 | /* | 502 | /* |
@@ -713,10 +704,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
713 | !blk_write_same_mergeable(rq->bio, bio)) | 704 | !blk_write_same_mergeable(rq->bio, bio)) |
714 | return false; | 705 | return false; |
715 | 706 | ||
716 | /* Only check gaps if the bio carries data */ | ||
717 | if (bio_has_data(bio) && req_gap_to_prev(rq, bio)) | ||
718 | return false; | ||
719 | |||
720 | return true; | 707 | return true; |
721 | } | 708 | } |
722 | 709 | ||
diff --git a/block/bounce.c b/block/bounce.c index 0611aea1cfe9..1cb5dd3a5da1 100644 --- a/block/bounce.c +++ b/block/bounce.c | |||
@@ -128,12 +128,14 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool) | |||
128 | struct bio *bio_orig = bio->bi_private; | 128 | struct bio *bio_orig = bio->bi_private; |
129 | struct bio_vec *bvec, *org_vec; | 129 | struct bio_vec *bvec, *org_vec; |
130 | int i; | 130 | int i; |
131 | int start = bio_orig->bi_iter.bi_idx; | ||
131 | 132 | ||
132 | /* | 133 | /* |
133 | * free up bounce indirect pages used | 134 | * free up bounce indirect pages used |
134 | */ | 135 | */ |
135 | bio_for_each_segment_all(bvec, bio, i) { | 136 | bio_for_each_segment_all(bvec, bio, i) { |
136 | org_vec = bio_orig->bi_io_vec + i; | 137 | org_vec = bio_orig->bi_io_vec + i + start; |
138 | |||
137 | if (bvec->bv_page == org_vec->bv_page) | 139 | if (bvec->bv_page == org_vec->bv_page) |
138 | continue; | 140 | continue; |
139 | 141 | ||
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 17269a3b85f2..a295b98c6bae 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -406,6 +406,22 @@ static struct blk_mq_ops null_mq_ops = { | |||
406 | .complete = null_softirq_done_fn, | 406 | .complete = null_softirq_done_fn, |
407 | }; | 407 | }; |
408 | 408 | ||
409 | static void cleanup_queue(struct nullb_queue *nq) | ||
410 | { | ||
411 | kfree(nq->tag_map); | ||
412 | kfree(nq->cmds); | ||
413 | } | ||
414 | |||
415 | static void cleanup_queues(struct nullb *nullb) | ||
416 | { | ||
417 | int i; | ||
418 | |||
419 | for (i = 0; i < nullb->nr_queues; i++) | ||
420 | cleanup_queue(&nullb->queues[i]); | ||
421 | |||
422 | kfree(nullb->queues); | ||
423 | } | ||
424 | |||
409 | static void null_del_dev(struct nullb *nullb) | 425 | static void null_del_dev(struct nullb *nullb) |
410 | { | 426 | { |
411 | list_del_init(&nullb->list); | 427 | list_del_init(&nullb->list); |
@@ -415,6 +431,7 @@ static void null_del_dev(struct nullb *nullb) | |||
415 | if (queue_mode == NULL_Q_MQ) | 431 | if (queue_mode == NULL_Q_MQ) |
416 | blk_mq_free_tag_set(&nullb->tag_set); | 432 | blk_mq_free_tag_set(&nullb->tag_set); |
417 | put_disk(nullb->disk); | 433 | put_disk(nullb->disk); |
434 | cleanup_queues(nullb); | ||
418 | kfree(nullb); | 435 | kfree(nullb); |
419 | } | 436 | } |
420 | 437 | ||
@@ -459,22 +476,6 @@ static int setup_commands(struct nullb_queue *nq) | |||
459 | return 0; | 476 | return 0; |
460 | } | 477 | } |
461 | 478 | ||
462 | static void cleanup_queue(struct nullb_queue *nq) | ||
463 | { | ||
464 | kfree(nq->tag_map); | ||
465 | kfree(nq->cmds); | ||
466 | } | ||
467 | |||
468 | static void cleanup_queues(struct nullb *nullb) | ||
469 | { | ||
470 | int i; | ||
471 | |||
472 | for (i = 0; i < nullb->nr_queues; i++) | ||
473 | cleanup_queue(&nullb->queues[i]); | ||
474 | |||
475 | kfree(nullb->queues); | ||
476 | } | ||
477 | |||
478 | static int setup_queues(struct nullb *nullb) | 479 | static int setup_queues(struct nullb *nullb) |
479 | { | 480 | { |
480 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), | 481 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
@@ -588,8 +589,7 @@ static int null_add_dev(void) | |||
588 | blk_queue_physical_block_size(nullb->q, bs); | 589 | blk_queue_physical_block_size(nullb->q, bs); |
589 | 590 | ||
590 | size = gb * 1024 * 1024 * 1024ULL; | 591 | size = gb * 1024 * 1024 * 1024ULL; |
591 | sector_div(size, bs); | 592 | set_capacity(disk, size >> 9); |
592 | set_capacity(disk, size); | ||
593 | 593 | ||
594 | disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; | 594 | disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; |
595 | disk->major = null_major; | 595 | disk->major = null_major; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 38a5ff772a37..99da9ebc7377 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -1368,6 +1368,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, | |||
1368 | ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); | 1368 | ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, | ||
1372 | struct bio *next) | ||
1373 | { | ||
1374 | if (!bio_has_data(prev)) | ||
1375 | return false; | ||
1376 | |||
1377 | return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], | ||
1378 | next->bi_io_vec[0].bv_offset); | ||
1379 | } | ||
1380 | |||
1381 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) | ||
1382 | { | ||
1383 | return bio_will_gap(req->q, req->biotail, bio); | ||
1384 | } | ||
1385 | |||
1386 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) | ||
1387 | { | ||
1388 | return bio_will_gap(req->q, bio, req->bio); | ||
1389 | } | ||
1390 | |||
1371 | struct work_struct; | 1391 | struct work_struct; |
1372 | int kblockd_schedule_work(struct work_struct *work); | 1392 | int kblockd_schedule_work(struct work_struct *work); |
1373 | int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); | 1393 | int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); |
@@ -1494,6 +1514,26 @@ queue_max_integrity_segments(struct request_queue *q) | |||
1494 | return q->limits.max_integrity_segments; | 1514 | return q->limits.max_integrity_segments; |
1495 | } | 1515 | } |
1496 | 1516 | ||
1517 | static inline bool integrity_req_gap_back_merge(struct request *req, | ||
1518 | struct bio *next) | ||
1519 | { | ||
1520 | struct bio_integrity_payload *bip = bio_integrity(req->bio); | ||
1521 | struct bio_integrity_payload *bip_next = bio_integrity(next); | ||
1522 | |||
1523 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], | ||
1524 | bip_next->bip_vec[0].bv_offset); | ||
1525 | } | ||
1526 | |||
1527 | static inline bool integrity_req_gap_front_merge(struct request *req, | ||
1528 | struct bio *bio) | ||
1529 | { | ||
1530 | struct bio_integrity_payload *bip = bio_integrity(bio); | ||
1531 | struct bio_integrity_payload *bip_next = bio_integrity(req->bio); | ||
1532 | |||
1533 | return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], | ||
1534 | bip_next->bip_vec[0].bv_offset); | ||
1535 | } | ||
1536 | |||
1497 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1537 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
1498 | 1538 | ||
1499 | struct bio; | 1539 | struct bio; |
@@ -1560,6 +1600,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g) | |||
1560 | { | 1600 | { |
1561 | return 0; | 1601 | return 0; |
1562 | } | 1602 | } |
1603 | static inline bool integrity_req_gap_back_merge(struct request *req, | ||
1604 | struct bio *next) | ||
1605 | { | ||
1606 | return false; | ||
1607 | } | ||
1608 | static inline bool integrity_req_gap_front_merge(struct request *req, | ||
1609 | struct bio *bio) | ||
1610 | { | ||
1611 | return false; | ||
1612 | } | ||
1563 | 1613 | ||
1564 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1614 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1565 | 1615 | ||