aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c193
1 files changed, 63 insertions, 130 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 936a110de0b..cfcc37cb222 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -110,49 +110,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
110 return 0; 110 return 0;
111} 111}
112 112
113static void
114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115 struct scatterlist *sglist, struct bio_vec **bvprv,
116 struct scatterlist **sg, int *nsegs, int *cluster)
117{
118
119 int nbytes = bvec->bv_len;
120
121 if (*bvprv && *cluster) {
122 if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 goto new_segment;
124
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
126 goto new_segment;
127 if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
128 goto new_segment;
129
130 (*sg)->length += nbytes;
131 } else {
132new_segment:
133 if (!*sg)
134 *sg = sglist;
135 else {
136 /*
137 * If the driver previously mapped a shorter
138 * list, we could see a termination bit
139 * prematurely unless it fully inits the sg
140 * table on each mapping. We KNOW that there
141 * must be more entries here or the driver
142 * would be buggy, so force clear the
143 * termination bit to avoid doing a full
144 * sg_init_table() in drivers for each command.
145 */
146 (*sg)->page_link &= ~0x02;
147 *sg = sg_next(*sg);
148 }
149
150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 (*nsegs)++;
152 }
153 *bvprv = bvec;
154}
155
156/* 113/*
157 * map a request to scatterlist, return number of sg entries setup. Caller 114 * map a request to scatterlist, return number of sg entries setup. Caller
158 * must make sure sg can hold rq->nr_phys_segments entries 115 * must make sure sg can hold rq->nr_phys_segments entries
@@ -174,8 +131,41 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
174 bvprv = NULL; 131 bvprv = NULL;
175 sg = NULL; 132 sg = NULL;
176 rq_for_each_segment(bvec, rq, iter) { 133 rq_for_each_segment(bvec, rq, iter) {
177 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, 134 int nbytes = bvec->bv_len;
178 &nsegs, &cluster); 135
136 if (bvprv && cluster) {
137 if (sg->length + nbytes > queue_max_segment_size(q))
138 goto new_segment;
139
140 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
141 goto new_segment;
142 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
143 goto new_segment;
144
145 sg->length += nbytes;
146 } else {
147new_segment:
148 if (!sg)
149 sg = sglist;
150 else {
151 /*
152 * If the driver previously mapped a shorter
153 * list, we could see a termination bit
154 * prematurely unless it fully inits the sg
155 * table on each mapping. We KNOW that there
156 * must be more entries here or the driver
157 * would be buggy, so force clear the
158 * termination bit to avoid doing a full
159 * sg_init_table() in drivers for each command.
160 */
161 sg->page_link &= ~0x02;
162 sg = sg_next(sg);
163 }
164
165 sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
166 nsegs++;
167 }
168 bvprv = bvec;
179 } /* segments in rq */ 169 } /* segments in rq */
180 170
181 171
@@ -209,43 +199,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
209} 199}
210EXPORT_SYMBOL(blk_rq_map_sg); 200EXPORT_SYMBOL(blk_rq_map_sg);
211 201
212/**
213 * blk_bio_map_sg - map a bio to a scatterlist
214 * @q: request_queue in question
215 * @bio: bio being mapped
216 * @sglist: scatterlist being mapped
217 *
218 * Note:
219 * Caller must make sure sg can hold bio->bi_phys_segments entries
220 *
221 * Will return the number of sg entries setup
222 */
223int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 struct scatterlist *sglist)
225{
226 struct bio_vec *bvec, *bvprv;
227 struct scatterlist *sg;
228 int nsegs, cluster;
229 unsigned long i;
230
231 nsegs = 0;
232 cluster = blk_queue_cluster(q);
233
234 bvprv = NULL;
235 sg = NULL;
236 bio_for_each_segment(bvec, bio, i) {
237 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
238 &nsegs, &cluster);
239 } /* segments in bio */
240
241 if (sg)
242 sg_mark_end(sg);
243
244 BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
245 return nsegs;
246}
247EXPORT_SYMBOL(blk_bio_map_sg);
248
249static inline int ll_new_hw_segment(struct request_queue *q, 202static inline int ll_new_hw_segment(struct request_queue *q,
250 struct request *req, 203 struct request *req,
251 struct bio *bio) 204 struct bio *bio)
@@ -275,8 +228,14 @@ no_merge:
275int ll_back_merge_fn(struct request_queue *q, struct request *req, 228int ll_back_merge_fn(struct request_queue *q, struct request *req,
276 struct bio *bio) 229 struct bio *bio)
277{ 230{
278 if (blk_rq_sectors(req) + bio_sectors(bio) > 231 unsigned short max_sectors;
279 blk_rq_get_max_sectors(req)) { 232
233 if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
234 max_sectors = queue_max_hw_sectors(q);
235 else
236 max_sectors = queue_max_sectors(q);
237
238 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
280 req->cmd_flags |= REQ_NOMERGE; 239 req->cmd_flags |= REQ_NOMERGE;
281 if (req == q->last_merge) 240 if (req == q->last_merge)
282 q->last_merge = NULL; 241 q->last_merge = NULL;
@@ -293,8 +252,15 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
293int ll_front_merge_fn(struct request_queue *q, struct request *req, 252int ll_front_merge_fn(struct request_queue *q, struct request *req,
294 struct bio *bio) 253 struct bio *bio)
295{ 254{
296 if (blk_rq_sectors(req) + bio_sectors(bio) > 255 unsigned short max_sectors;
297 blk_rq_get_max_sectors(req)) { 256
257 if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
258 max_sectors = queue_max_hw_sectors(q);
259 else
260 max_sectors = queue_max_sectors(q);
261
262
263 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
298 req->cmd_flags |= REQ_NOMERGE; 264 req->cmd_flags |= REQ_NOMERGE;
299 if (req == q->last_merge) 265 if (req == q->last_merge)
300 q->last_merge = NULL; 266 q->last_merge = NULL;
@@ -325,8 +291,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
325 /* 291 /*
326 * Will it become too large? 292 * Will it become too large?
327 */ 293 */
328 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > 294 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
329 blk_rq_get_max_sectors(req))
330 return 0; 295 return 0;
331 296
332 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 297 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -405,7 +370,16 @@ static int attempt_merge(struct request_queue *q, struct request *req,
405 if (!rq_mergeable(req) || !rq_mergeable(next)) 370 if (!rq_mergeable(req) || !rq_mergeable(next))
406 return 0; 371 return 0;
407 372
408 if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) 373 /*
374 * Don't merge file system requests and discard requests
375 */
376 if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
377 return 0;
378
379 /*
380 * Don't merge discard requests and secure discard requests
381 */
382 if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
409 return 0; 383 return 0;
410 384
411 /* 385 /*
@@ -419,10 +393,6 @@ static int attempt_merge(struct request_queue *q, struct request *req,
419 || next->special) 393 || next->special)
420 return 0; 394 return 0;
421 395
422 if (req->cmd_flags & REQ_WRITE_SAME &&
423 !blk_write_same_mergeable(req->bio, next->bio))
424 return 0;
425
426 /* 396 /*
427 * If we are allowed to merge, then append bio list 397 * If we are allowed to merge, then append bio list
428 * from next to rq and release next. merge_requests_fn 398 * from next to rq and release next. merge_requests_fn
@@ -501,40 +471,3 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
501{ 471{
502 return attempt_merge(q, rq, next); 472 return attempt_merge(q, rq, next);
503} 473}
504
505bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
506{
507 if (!rq_mergeable(rq) || !bio_mergeable(bio))
508 return false;
509
510 if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
511 return false;
512
513 /* different data direction or already started, don't merge */
514 if (bio_data_dir(bio) != rq_data_dir(rq))
515 return false;
516
517 /* must be same device and not a special request */
518 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
519 return false;
520
521 /* only merge integrity protected bio into ditto rq */
522 if (bio_integrity(bio) != blk_integrity_rq(rq))
523 return false;
524
525 /* must be using the same buffer */
526 if (rq->cmd_flags & REQ_WRITE_SAME &&
527 !blk_write_same_mergeable(rq->bio, bio))
528 return false;
529
530 return true;
531}
532
533int blk_try_merge(struct request *rq, struct bio *bio)
534{
535 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
536 return ELEVATOR_BACK_MERGE;
537 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
538 return ELEVATOR_FRONT_MERGE;
539 return ELEVATOR_NO_MERGE;
540}