diff options
author | Kent Overstreet <kmo@daterainc.com> | 2014-02-07 15:53:46 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-02-07 15:54:08 -0500 |
commit | 5cb8850c9c4a7605f74f5c9c7ecadd0b02e87a25 (patch) | |
tree | 401b42c4f5898a54c11c36da7ac37a14479bfffe /block | |
parent | 087787959ce851d7bbb19f10f6e9241b7f85a3ca (diff) |
block: Explicitly handle discard/write same segments
Immutable biovecs changed the way biovecs are interpreted - drivers no
longer use bi_vcnt, they have to go by bi_iter.bi_size (to allow for
using part of an existing segment without modifying it).
This breaks with discards and write_same bios, since for those bi_size
has nothing to do with segments in the biovec. So for now, we need a
fairly gross hack - we fortunately know that there will never be more
than one segment for the entire request, so we can special case
discard/write_same.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Tested-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-merge.c | 91 |
1 files changed, 62 insertions, 29 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 8f8adaa95466..6c583f9c5b65 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
21 | if (!bio) | 21 | if (!bio) |
22 | return 0; | 22 | return 0; |
23 | 23 | ||
24 | /* | ||
25 | * This should probably be returning 0, but blk_add_request_payload() | ||
26 | * (Christoph!!!!) | ||
27 | */ | ||
28 | if (bio->bi_rw & REQ_DISCARD) | ||
29 | return 1; | ||
30 | |||
31 | if (bio->bi_rw & REQ_WRITE_SAME) | ||
32 | return 1; | ||
33 | |||
24 | fbio = bio; | 34 | fbio = bio; |
25 | cluster = blk_queue_cluster(q); | 35 | cluster = blk_queue_cluster(q); |
26 | seg_size = 0; | 36 | seg_size = 0; |
@@ -161,30 +171,60 @@ new_segment: | |||
161 | *bvprv = *bvec; | 171 | *bvprv = *bvec; |
162 | } | 172 | } |
163 | 173 | ||
164 | /* | 174 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
165 | * map a request to scatterlist, return number of sg entries setup. Caller | 175 | struct scatterlist *sglist, |
166 | * must make sure sg can hold rq->nr_phys_segments entries | 176 | struct scatterlist **sg) |
167 | */ | ||
168 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | ||
169 | struct scatterlist *sglist) | ||
170 | { | 177 | { |
171 | struct bio_vec bvec, bvprv = { NULL }; | 178 | struct bio_vec bvec, bvprv = { NULL }; |
172 | struct req_iterator iter; | 179 | struct bvec_iter iter; |
173 | struct scatterlist *sg; | ||
174 | int nsegs, cluster; | 180 | int nsegs, cluster; |
175 | 181 | ||
176 | nsegs = 0; | 182 | nsegs = 0; |
177 | cluster = blk_queue_cluster(q); | 183 | cluster = blk_queue_cluster(q); |
178 | 184 | ||
179 | /* | 185 | if (bio->bi_rw & REQ_DISCARD) { |
180 | * for each bio in rq | 186 | /* |
181 | */ | 187 | * This is a hack - drivers should be neither modifying the |
182 | sg = NULL; | 188 | * biovec, nor relying on bi_vcnt - but because of |
183 | rq_for_each_segment(bvec, rq, iter) { | 189 | * blk_add_request_payload(), a discard bio may or may not have |
184 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, | 190 | * a payload we need to set up here (thank you Christoph) and |
185 | &nsegs, &cluster); | 191 | * bi_vcnt is really the only way of telling if we need to. |
186 | } /* segments in rq */ | 192 | */ |
193 | |||
194 | if (bio->bi_vcnt) | ||
195 | goto single_segment; | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | if (bio->bi_rw & REQ_WRITE_SAME) { | ||
201 | single_segment: | ||
202 | *sg = sglist; | ||
203 | bvec = bio_iovec(bio); | ||
204 | sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | ||
205 | return 1; | ||
206 | } | ||
207 | |||
208 | for_each_bio(bio) | ||
209 | bio_for_each_segment(bvec, bio, iter) | ||
210 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, | ||
211 | &nsegs, &cluster); | ||
187 | 212 | ||
213 | return nsegs; | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * map a request to scatterlist, return number of sg entries setup. Caller | ||
218 | * must make sure sg can hold rq->nr_phys_segments entries | ||
219 | */ | ||
220 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | ||
221 | struct scatterlist *sglist) | ||
222 | { | ||
223 | struct scatterlist *sg = NULL; | ||
224 | int nsegs = 0; | ||
225 | |||
226 | if (rq->bio) | ||
227 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); | ||
188 | 228 | ||
189 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | 229 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && |
190 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { | 230 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
@@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg); | |||
230 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, | 270 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, |
231 | struct scatterlist *sglist) | 271 | struct scatterlist *sglist) |
232 | { | 272 | { |
233 | struct bio_vec bvec, bvprv = { NULL }; | 273 | struct scatterlist *sg = NULL; |
234 | struct scatterlist *sg; | 274 | int nsegs; |
235 | int nsegs, cluster; | 275 | struct bio *next = bio->bi_next; |
236 | struct bvec_iter iter; | 276 | bio->bi_next = NULL; |
237 | |||
238 | nsegs = 0; | ||
239 | cluster = blk_queue_cluster(q); | ||
240 | |||
241 | sg = NULL; | ||
242 | bio_for_each_segment(bvec, bio, iter) { | ||
243 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, | ||
244 | &nsegs, &cluster); | ||
245 | } /* segments in bio */ | ||
246 | 277 | ||
278 | nsegs = __blk_bios_map_sg(q, bio, sglist, &sg); | ||
279 | bio->bi_next = next; | ||
247 | if (sg) | 280 | if (sg) |
248 | sg_mark_end(sg); | 281 | sg_mark_end(sg); |
249 | 282 | ||