diff options
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r-- | block/blk-merge.c | 159 |
1 files changed, 149 insertions, 10 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index a455b9860143..d9c3a75e4a60 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -9,12 +9,158 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static struct bio *blk_bio_discard_split(struct request_queue *q, | ||
13 | struct bio *bio, | ||
14 | struct bio_set *bs) | ||
15 | { | ||
16 | unsigned int max_discard_sectors, granularity; | ||
17 | int alignment; | ||
18 | sector_t tmp; | ||
19 | unsigned split_sectors; | ||
20 | |||
21 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | ||
22 | granularity = max(q->limits.discard_granularity >> 9, 1U); | ||
23 | |||
24 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | ||
25 | max_discard_sectors -= max_discard_sectors % granularity; | ||
26 | |||
27 | if (unlikely(!max_discard_sectors)) { | ||
28 | /* XXX: warn */ | ||
29 | return NULL; | ||
30 | } | ||
31 | |||
32 | if (bio_sectors(bio) <= max_discard_sectors) | ||
33 | return NULL; | ||
34 | |||
35 | split_sectors = max_discard_sectors; | ||
36 | |||
37 | /* | ||
38 | * If the next starting sector would be misaligned, stop the discard at | ||
39 | * the previous aligned sector. | ||
40 | */ | ||
41 | alignment = (q->limits.discard_alignment >> 9) % granularity; | ||
42 | |||
43 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | ||
44 | tmp = sector_div(tmp, granularity); | ||
45 | |||
46 | if (split_sectors > tmp) | ||
47 | split_sectors -= tmp; | ||
48 | |||
49 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | ||
50 | } | ||
51 | |||
52 | static struct bio *blk_bio_write_same_split(struct request_queue *q, | ||
53 | struct bio *bio, | ||
54 | struct bio_set *bs) | ||
55 | { | ||
56 | if (!q->limits.max_write_same_sectors) | ||
57 | return NULL; | ||
58 | |||
59 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | ||
60 | return NULL; | ||
61 | |||
62 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | ||
63 | } | ||
64 | |||
65 | static struct bio *blk_bio_segment_split(struct request_queue *q, | ||
66 | struct bio *bio, | ||
67 | struct bio_set *bs) | ||
68 | { | ||
69 | struct bio *split; | ||
70 | struct bio_vec bv, bvprv; | ||
71 | struct bvec_iter iter; | ||
72 | unsigned seg_size = 0, nsegs = 0; | ||
73 | int prev = 0; | ||
74 | |||
75 | struct bvec_merge_data bvm = { | ||
76 | .bi_bdev = bio->bi_bdev, | ||
77 | .bi_sector = bio->bi_iter.bi_sector, | ||
78 | .bi_size = 0, | ||
79 | .bi_rw = bio->bi_rw, | ||
80 | }; | ||
81 | |||
82 | bio_for_each_segment(bv, bio, iter) { | ||
83 | if (q->merge_bvec_fn && | ||
84 | q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) | ||
85 | goto split; | ||
86 | |||
87 | bvm.bi_size += bv.bv_len; | ||
88 | |||
89 | if (bvm.bi_size >> 9 > queue_max_sectors(q)) | ||
90 | goto split; | ||
91 | |||
92 | /* | ||
93 | * If the queue doesn't support SG gaps and adding this | ||
94 | * offset would create a gap, disallow it. | ||
95 | */ | ||
96 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && | ||
97 | prev && bvec_gap_to_prev(&bvprv, bv.bv_offset)) | ||
98 | goto split; | ||
99 | |||
100 | if (prev && blk_queue_cluster(q)) { | ||
101 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) | ||
102 | goto new_segment; | ||
103 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) | ||
104 | goto new_segment; | ||
105 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) | ||
106 | goto new_segment; | ||
107 | |||
108 | seg_size += bv.bv_len; | ||
109 | bvprv = bv; | ||
110 | prev = 1; | ||
111 | continue; | ||
112 | } | ||
113 | new_segment: | ||
114 | if (nsegs == queue_max_segments(q)) | ||
115 | goto split; | ||
116 | |||
117 | nsegs++; | ||
118 | bvprv = bv; | ||
119 | prev = 1; | ||
120 | seg_size = bv.bv_len; | ||
121 | } | ||
122 | |||
123 | return NULL; | ||
124 | split: | ||
125 | split = bio_clone_bioset(bio, GFP_NOIO, bs); | ||
126 | |||
127 | split->bi_iter.bi_size -= iter.bi_size; | ||
128 | bio->bi_iter = iter; | ||
129 | |||
130 | if (bio_integrity(bio)) { | ||
131 | bio_integrity_advance(bio, split->bi_iter.bi_size); | ||
132 | bio_integrity_trim(split, 0, bio_sectors(split)); | ||
133 | } | ||
134 | |||
135 | return split; | ||
136 | } | ||
137 | |||
138 | void blk_queue_split(struct request_queue *q, struct bio **bio, | ||
139 | struct bio_set *bs) | ||
140 | { | ||
141 | struct bio *split; | ||
142 | |||
143 | if ((*bio)->bi_rw & REQ_DISCARD) | ||
144 | split = blk_bio_discard_split(q, *bio, bs); | ||
145 | else if ((*bio)->bi_rw & REQ_WRITE_SAME) | ||
146 | split = blk_bio_write_same_split(q, *bio, bs); | ||
147 | else | ||
148 | split = blk_bio_segment_split(q, *bio, q->bio_split); | ||
149 | |||
150 | if (split) { | ||
151 | bio_chain(split, *bio); | ||
152 | generic_make_request(*bio); | ||
153 | *bio = split; | ||
154 | } | ||
155 | } | ||
156 | EXPORT_SYMBOL(blk_queue_split); | ||
157 | |||
12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | 158 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
13 | struct bio *bio, | 159 | struct bio *bio, |
14 | bool no_sg_merge) | 160 | bool no_sg_merge) |
15 | { | 161 | { |
16 | struct bio_vec bv, bvprv = { NULL }; | 162 | struct bio_vec bv, bvprv = { NULL }; |
17 | int cluster, high, highprv = 1; | 163 | int cluster, prev = 0; |
18 | unsigned int seg_size, nr_phys_segs; | 164 | unsigned int seg_size, nr_phys_segs; |
19 | struct bio *fbio, *bbio; | 165 | struct bio *fbio, *bbio; |
20 | struct bvec_iter iter; | 166 | struct bvec_iter iter; |
@@ -36,7 +182,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
36 | cluster = blk_queue_cluster(q); | 182 | cluster = blk_queue_cluster(q); |
37 | seg_size = 0; | 183 | seg_size = 0; |
38 | nr_phys_segs = 0; | 184 | nr_phys_segs = 0; |
39 | high = 0; | ||
40 | for_each_bio(bio) { | 185 | for_each_bio(bio) { |
41 | bio_for_each_segment(bv, bio, iter) { | 186 | bio_for_each_segment(bv, bio, iter) { |
42 | /* | 187 | /* |
@@ -46,13 +191,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
46 | if (no_sg_merge) | 191 | if (no_sg_merge) |
47 | goto new_segment; | 192 | goto new_segment; |
48 | 193 | ||
49 | /* | 194 | if (prev && cluster) { |
50 | * the trick here is making sure that a high page is | ||
51 | * never considered part of another segment, since | ||
52 | * that might change with the bounce page. | ||
53 | */ | ||
54 | high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); | ||
55 | if (!high && !highprv && cluster) { | ||
56 | if (seg_size + bv.bv_len | 195 | if (seg_size + bv.bv_len |
57 | > queue_max_segment_size(q)) | 196 | > queue_max_segment_size(q)) |
58 | goto new_segment; | 197 | goto new_segment; |
@@ -72,8 +211,8 @@ new_segment: | |||
72 | 211 | ||
73 | nr_phys_segs++; | 212 | nr_phys_segs++; |
74 | bvprv = bv; | 213 | bvprv = bv; |
214 | prev = 1; | ||
75 | seg_size = bv.bv_len; | 215 | seg_size = bv.bv_len; |
76 | highprv = high; | ||
77 | } | 216 | } |
78 | bbio = bio; | 217 | bbio = bio; |
79 | } | 218 | } |