summaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2015-04-24 01:37:18 -0400
committerJens Axboe <axboe@fb.com>2015-08-13 14:31:33 -0400
commit54efd50bfd873e2dbf784e0b21a8027ba4299a3e (patch)
tree37bcd2d148b99b0f53f5a093eecd5cb3d8c9887c /block/blk-merge.c
parent41609892701e26724b8617201f43254cadf2e7ae (diff)
block: make generic_make_request handle arbitrarily sized bios
The way the block layer is currently written, it goes to great lengths to avoid having to split bios; upper layer code (such as bio_add_page()) checks what the underlying device can handle and tries to always create bios that don't need to be split. But this approach becomes unwieldy and eventually breaks down with stacked devices and devices with dynamic limits, and it adds a lot of complexity. If the block layer could split bios as needed, we could eliminate a lot of complexity elsewhere - particularly in stacked drivers. Code that creates bios can then create whatever size bios are convenient, and more importantly stacked drivers don't have to deal with both their own bio size limitations and the limitations of the (potentially multiple) devices underneath them. In the future this will let us delete merge_bvec_fn and a bunch of other code. We do this by adding calls to blk_queue_split() to the various make_request functions that need it - a few can already handle arbitrary size bios. Note that we add the call _after_ any call to blk_queue_bounce(); this means that blk_queue_split() and blk_recalc_rq_segments() don't need to be concerned with bouncing affecting segment merging. Some make_request_fn() callbacks were simple enough to audit and verify they don't need blk_queue_split() calls. The skipped ones are: * nfhd_make_request (arch/m68k/emu/nfblock.c) * axon_ram_make_request (arch/powerpc/sysdev/axonram.c) * simdisk_make_request (arch/xtensa/platforms/iss/simdisk.c) * brd_make_request (ramdisk - drivers/block/brd.c) * mtip_submit_request (drivers/block/mtip32xx/mtip32xx.c) * loop_make_request * null_queue_bio * bcache's make_request fns Some others are almost certainly safe to remove now, but will be left for future patches. Cc: Jens Axboe <axboe@kernel.dk> Cc: Christoph Hellwig <hch@infradead.org> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Ming Lei <ming.lei@canonical.com> Cc: Neil Brown <neilb@suse.de> Cc: Alasdair Kergon <agk@redhat.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: dm-devel@redhat.com Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: drbd-user@lists.linbit.com Cc: Jiri Kosina <jkosina@suse.cz> Cc: Geoff Levand <geoff@infradead.org> Cc: Jim Paris <jim@jtan.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Oleg Drokin <oleg.drokin@intel.com> Cc: Andreas Dilger <andreas.dilger@intel.com> Acked-by: NeilBrown <neilb@suse.de> (for the 'md/md.c' bits) Acked-by: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> [dpark: skip more mq-based drivers, resolve merge conflicts, etc.] Signed-off-by: Dongsu Park <dpark@posteo.net> Signed-off-by: Ming Lin <ming.l@ssi.samsung.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c159
1 files changed, 149 insertions, 10 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index a455b9860143..d9c3a75e4a60 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,12 +9,158 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12static struct bio *blk_bio_discard_split(struct request_queue *q,
13 struct bio *bio,
14 struct bio_set *bs)
15{
16 unsigned int max_discard_sectors, granularity;
17 int alignment;
18 sector_t tmp;
19 unsigned split_sectors;
20
21 /* Zero-sector (unknown) and one-sector granularities are the same. */
22 granularity = max(q->limits.discard_granularity >> 9, 1U);
23
24 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
25 max_discard_sectors -= max_discard_sectors % granularity;
26
27 if (unlikely(!max_discard_sectors)) {
28 /* XXX: warn */
29 return NULL;
30 }
31
32 if (bio_sectors(bio) <= max_discard_sectors)
33 return NULL;
34
35 split_sectors = max_discard_sectors;
36
37 /*
38 * If the next starting sector would be misaligned, stop the discard at
39 * the previous aligned sector.
40 */
41 alignment = (q->limits.discard_alignment >> 9) % granularity;
42
43 tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
44 tmp = sector_div(tmp, granularity);
45
46 if (split_sectors > tmp)
47 split_sectors -= tmp;
48
49 return bio_split(bio, split_sectors, GFP_NOIO, bs);
50}
51
52static struct bio *blk_bio_write_same_split(struct request_queue *q,
53 struct bio *bio,
54 struct bio_set *bs)
55{
56 if (!q->limits.max_write_same_sectors)
57 return NULL;
58
59 if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
60 return NULL;
61
62 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
63}
64
65static struct bio *blk_bio_segment_split(struct request_queue *q,
66 struct bio *bio,
67 struct bio_set *bs)
68{
69 struct bio *split;
70 struct bio_vec bv, bvprv;
71 struct bvec_iter iter;
72 unsigned seg_size = 0, nsegs = 0;
73 int prev = 0;
74
75 struct bvec_merge_data bvm = {
76 .bi_bdev = bio->bi_bdev,
77 .bi_sector = bio->bi_iter.bi_sector,
78 .bi_size = 0,
79 .bi_rw = bio->bi_rw,
80 };
81
82 bio_for_each_segment(bv, bio, iter) {
83 if (q->merge_bvec_fn &&
84 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
85 goto split;
86
87 bvm.bi_size += bv.bv_len;
88
89 if (bvm.bi_size >> 9 > queue_max_sectors(q))
90 goto split;
91
92 /*
93 * If the queue doesn't support SG gaps and adding this
94 * offset would create a gap, disallow it.
95 */
96 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
97 prev && bvec_gap_to_prev(&bvprv, bv.bv_offset))
98 goto split;
99
100 if (prev && blk_queue_cluster(q)) {
101 if (seg_size + bv.bv_len > queue_max_segment_size(q))
102 goto new_segment;
103 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
104 goto new_segment;
105 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
106 goto new_segment;
107
108 seg_size += bv.bv_len;
109 bvprv = bv;
110 prev = 1;
111 continue;
112 }
113new_segment:
114 if (nsegs == queue_max_segments(q))
115 goto split;
116
117 nsegs++;
118 bvprv = bv;
119 prev = 1;
120 seg_size = bv.bv_len;
121 }
122
123 return NULL;
124split:
125 split = bio_clone_bioset(bio, GFP_NOIO, bs);
126
127 split->bi_iter.bi_size -= iter.bi_size;
128 bio->bi_iter = iter;
129
130 if (bio_integrity(bio)) {
131 bio_integrity_advance(bio, split->bi_iter.bi_size);
132 bio_integrity_trim(split, 0, bio_sectors(split));
133 }
134
135 return split;
136}
137
138void blk_queue_split(struct request_queue *q, struct bio **bio,
139 struct bio_set *bs)
140{
141 struct bio *split;
142
143 if ((*bio)->bi_rw & REQ_DISCARD)
144 split = blk_bio_discard_split(q, *bio, bs);
145 else if ((*bio)->bi_rw & REQ_WRITE_SAME)
146 split = blk_bio_write_same_split(q, *bio, bs);
147 else
148 split = blk_bio_segment_split(q, *bio, q->bio_split);
149
150 if (split) {
151 bio_chain(split, *bio);
152 generic_make_request(*bio);
153 *bio = split;
154 }
155}
156EXPORT_SYMBOL(blk_queue_split);
157
12static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 158static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 struct bio *bio, 159 struct bio *bio,
14 bool no_sg_merge) 160 bool no_sg_merge)
15{ 161{
16 struct bio_vec bv, bvprv = { NULL }; 162 struct bio_vec bv, bvprv = { NULL };
17 int cluster, high, highprv = 1; 163 int cluster, prev = 0;
18 unsigned int seg_size, nr_phys_segs; 164 unsigned int seg_size, nr_phys_segs;
19 struct bio *fbio, *bbio; 165 struct bio *fbio, *bbio;
20 struct bvec_iter iter; 166 struct bvec_iter iter;
@@ -36,7 +182,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
36 cluster = blk_queue_cluster(q); 182 cluster = blk_queue_cluster(q);
37 seg_size = 0; 183 seg_size = 0;
38 nr_phys_segs = 0; 184 nr_phys_segs = 0;
39 high = 0;
40 for_each_bio(bio) { 185 for_each_bio(bio) {
41 bio_for_each_segment(bv, bio, iter) { 186 bio_for_each_segment(bv, bio, iter) {
42 /* 187 /*
@@ -46,13 +191,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
46 if (no_sg_merge) 191 if (no_sg_merge)
47 goto new_segment; 192 goto new_segment;
48 193
49 /* 194 if (prev && cluster) {
50 * the trick here is making sure that a high page is
51 * never considered part of another segment, since
52 * that might change with the bounce page.
53 */
54 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
55 if (!high && !highprv && cluster) {
56 if (seg_size + bv.bv_len 195 if (seg_size + bv.bv_len
57 > queue_max_segment_size(q)) 196 > queue_max_segment_size(q))
58 goto new_segment; 197 goto new_segment;
@@ -72,8 +211,8 @@ new_segment:
72 211
73 nr_phys_segs++; 212 nr_phys_segs++;
74 bvprv = bv; 213 bvprv = bv;
214 prev = 1;
75 seg_size = bv.bv_len; 215 seg_size = bv.bv_len;
76 highprv = high;
77 } 216 }
78 bbio = bio; 217 bbio = bio;
79 } 218 }