diff options
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r-- | block/blk-merge.c | 129 |
1 files changed, 25 insertions, 104 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 5efc9e7a68b7..908d3e11ac52 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | void blk_recalc_rq_sectors(struct request *rq, int nsect) | 12 | void blk_recalc_rq_sectors(struct request *rq, int nsect) |
13 | { | 13 | { |
14 | if (blk_fs_request(rq)) { | 14 | if (blk_fs_request(rq) || blk_discard_rq(rq)) { |
15 | rq->hard_sector += nsect; | 15 | rq->hard_sector += nsect; |
16 | rq->hard_nr_sectors -= nsect; | 16 | rq->hard_nr_sectors -= nsect; |
17 | 17 | ||
@@ -41,12 +41,9 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect) | |||
41 | void blk_recalc_rq_segments(struct request *rq) | 41 | void blk_recalc_rq_segments(struct request *rq) |
42 | { | 42 | { |
43 | int nr_phys_segs; | 43 | int nr_phys_segs; |
44 | int nr_hw_segs; | ||
45 | unsigned int phys_size; | 44 | unsigned int phys_size; |
46 | unsigned int hw_size; | ||
47 | struct bio_vec *bv, *bvprv = NULL; | 45 | struct bio_vec *bv, *bvprv = NULL; |
48 | int seg_size; | 46 | int seg_size; |
49 | int hw_seg_size; | ||
50 | int cluster; | 47 | int cluster; |
51 | struct req_iterator iter; | 48 | struct req_iterator iter; |
52 | int high, highprv = 1; | 49 | int high, highprv = 1; |
@@ -56,8 +53,8 @@ void blk_recalc_rq_segments(struct request *rq) | |||
56 | return; | 53 | return; |
57 | 54 | ||
58 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 55 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
59 | hw_seg_size = seg_size = 0; | 56 | seg_size = 0; |
60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | 57 | phys_size = nr_phys_segs = 0; |
61 | rq_for_each_segment(bv, rq, iter) { | 58 | rq_for_each_segment(bv, rq, iter) { |
62 | /* | 59 | /* |
63 | * the trick here is making sure that a high page is never | 60 | * the trick here is making sure that a high page is never |
@@ -66,7 +63,7 @@ void blk_recalc_rq_segments(struct request *rq) | |||
66 | */ | 63 | */ |
67 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | 64 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; |
68 | if (high || highprv) | 65 | if (high || highprv) |
69 | goto new_hw_segment; | 66 | goto new_segment; |
70 | if (cluster) { | 67 | if (cluster) { |
71 | if (seg_size + bv->bv_len > q->max_segment_size) | 68 | if (seg_size + bv->bv_len > q->max_segment_size) |
72 | goto new_segment; | 69 | goto new_segment; |
@@ -74,40 +71,19 @@ void blk_recalc_rq_segments(struct request *rq) | |||
74 | goto new_segment; | 71 | goto new_segment; |
75 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | 72 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) |
76 | goto new_segment; | 73 | goto new_segment; |
77 | if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | ||
78 | goto new_hw_segment; | ||
79 | 74 | ||
80 | seg_size += bv->bv_len; | 75 | seg_size += bv->bv_len; |
81 | hw_seg_size += bv->bv_len; | ||
82 | bvprv = bv; | 76 | bvprv = bv; |
83 | continue; | 77 | continue; |
84 | } | 78 | } |
85 | new_segment: | 79 | new_segment: |
86 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && | ||
87 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | ||
88 | hw_seg_size += bv->bv_len; | ||
89 | else { | ||
90 | new_hw_segment: | ||
91 | if (nr_hw_segs == 1 && | ||
92 | hw_seg_size > rq->bio->bi_hw_front_size) | ||
93 | rq->bio->bi_hw_front_size = hw_seg_size; | ||
94 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; | ||
95 | nr_hw_segs++; | ||
96 | } | ||
97 | |||
98 | nr_phys_segs++; | 80 | nr_phys_segs++; |
99 | bvprv = bv; | 81 | bvprv = bv; |
100 | seg_size = bv->bv_len; | 82 | seg_size = bv->bv_len; |
101 | highprv = high; | 83 | highprv = high; |
102 | } | 84 | } |
103 | 85 | ||
104 | if (nr_hw_segs == 1 && | ||
105 | hw_seg_size > rq->bio->bi_hw_front_size) | ||
106 | rq->bio->bi_hw_front_size = hw_seg_size; | ||
107 | if (hw_seg_size > rq->biotail->bi_hw_back_size) | ||
108 | rq->biotail->bi_hw_back_size = hw_seg_size; | ||
109 | rq->nr_phys_segments = nr_phys_segs; | 86 | rq->nr_phys_segments = nr_phys_segs; |
110 | rq->nr_hw_segments = nr_hw_segs; | ||
111 | } | 87 | } |
112 | 88 | ||
113 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | 89 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
@@ -120,7 +96,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio) | |||
120 | blk_recalc_rq_segments(&rq); | 96 | blk_recalc_rq_segments(&rq); |
121 | bio->bi_next = nxt; | 97 | bio->bi_next = nxt; |
122 | bio->bi_phys_segments = rq.nr_phys_segments; | 98 | bio->bi_phys_segments = rq.nr_phys_segments; |
123 | bio->bi_hw_segments = rq.nr_hw_segments; | ||
124 | bio->bi_flags |= (1 << BIO_SEG_VALID); | 99 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
125 | } | 100 | } |
126 | EXPORT_SYMBOL(blk_recount_segments); | 101 | EXPORT_SYMBOL(blk_recount_segments); |
@@ -131,13 +106,17 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
131 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) | 106 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
132 | return 0; | 107 | return 0; |
133 | 108 | ||
134 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | ||
135 | return 0; | ||
136 | if (bio->bi_size + nxt->bi_size > q->max_segment_size) | 109 | if (bio->bi_size + nxt->bi_size > q->max_segment_size) |
137 | return 0; | 110 | return 0; |
138 | 111 | ||
112 | if (!bio_has_data(bio)) | ||
113 | return 1; | ||
114 | |||
115 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | ||
116 | return 0; | ||
117 | |||
139 | /* | 118 | /* |
140 | * bio and nxt are contigous in memory, check if the queue allows | 119 | * bio and nxt are contiguous in memory; check if the queue allows |
141 | * these two to be merged into one | 120 | * these two to be merged into one |
142 | */ | 121 | */ |
143 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | 122 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) |
@@ -146,22 +125,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
146 | return 0; | 125 | return 0; |
147 | } | 126 | } |
148 | 127 | ||
149 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | ||
150 | struct bio *nxt) | ||
151 | { | ||
152 | if (!bio_flagged(bio, BIO_SEG_VALID)) | ||
153 | blk_recount_segments(q, bio); | ||
154 | if (!bio_flagged(nxt, BIO_SEG_VALID)) | ||
155 | blk_recount_segments(q, nxt); | ||
156 | if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || | ||
157 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) | ||
158 | return 0; | ||
159 | if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size) | ||
160 | return 0; | ||
161 | |||
162 | return 1; | ||
163 | } | ||
164 | |||
165 | /* | 128 | /* |
166 | * map a request to scatterlist, return number of sg entries setup. Caller | 129 | * map a request to scatterlist, return number of sg entries setup. Caller |
167 | * must make sure sg can hold rq->nr_phys_segments entries | 130 | * must make sure sg can hold rq->nr_phys_segments entries |
@@ -275,10 +238,9 @@ static inline int ll_new_hw_segment(struct request_queue *q, | |||
275 | struct request *req, | 238 | struct request *req, |
276 | struct bio *bio) | 239 | struct bio *bio) |
277 | { | 240 | { |
278 | int nr_hw_segs = bio_hw_segments(q, bio); | ||
279 | int nr_phys_segs = bio_phys_segments(q, bio); | 241 | int nr_phys_segs = bio_phys_segments(q, bio); |
280 | 242 | ||
281 | if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments | 243 | if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments |
282 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | 244 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { |
283 | req->cmd_flags |= REQ_NOMERGE; | 245 | req->cmd_flags |= REQ_NOMERGE; |
284 | if (req == q->last_merge) | 246 | if (req == q->last_merge) |
@@ -290,7 +252,6 @@ static inline int ll_new_hw_segment(struct request_queue *q, | |||
290 | * This will form the start of a new hw segment. Bump both | 252 | * This will form the start of a new hw segment. Bump both |
291 | * counters. | 253 | * counters. |
292 | */ | 254 | */ |
293 | req->nr_hw_segments += nr_hw_segs; | ||
294 | req->nr_phys_segments += nr_phys_segs; | 255 | req->nr_phys_segments += nr_phys_segs; |
295 | return 1; | 256 | return 1; |
296 | } | 257 | } |
@@ -299,7 +260,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
299 | struct bio *bio) | 260 | struct bio *bio) |
300 | { | 261 | { |
301 | unsigned short max_sectors; | 262 | unsigned short max_sectors; |
302 | int len; | ||
303 | 263 | ||
304 | if (unlikely(blk_pc_request(req))) | 264 | if (unlikely(blk_pc_request(req))) |
305 | max_sectors = q->max_hw_sectors; | 265 | max_sectors = q->max_hw_sectors; |
@@ -316,19 +276,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
316 | blk_recount_segments(q, req->biotail); | 276 | blk_recount_segments(q, req->biotail); |
317 | if (!bio_flagged(bio, BIO_SEG_VALID)) | 277 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
318 | blk_recount_segments(q, bio); | 278 | blk_recount_segments(q, bio); |
319 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; | ||
320 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) | ||
321 | && !BIOVEC_VIRT_OVERSIZE(len)) { | ||
322 | int mergeable = ll_new_mergeable(q, req, bio); | ||
323 | |||
324 | if (mergeable) { | ||
325 | if (req->nr_hw_segments == 1) | ||
326 | req->bio->bi_hw_front_size = len; | ||
327 | if (bio->bi_hw_segments == 1) | ||
328 | bio->bi_hw_back_size = len; | ||
329 | } | ||
330 | return mergeable; | ||
331 | } | ||
332 | 279 | ||
333 | return ll_new_hw_segment(q, req, bio); | 280 | return ll_new_hw_segment(q, req, bio); |
334 | } | 281 | } |
@@ -337,7 +284,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, | |||
337 | struct bio *bio) | 284 | struct bio *bio) |
338 | { | 285 | { |
339 | unsigned short max_sectors; | 286 | unsigned short max_sectors; |
340 | int len; | ||
341 | 287 | ||
342 | if (unlikely(blk_pc_request(req))) | 288 | if (unlikely(blk_pc_request(req))) |
343 | max_sectors = q->max_hw_sectors; | 289 | max_sectors = q->max_hw_sectors; |
@@ -351,23 +297,10 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, | |||
351 | q->last_merge = NULL; | 297 | q->last_merge = NULL; |
352 | return 0; | 298 | return 0; |
353 | } | 299 | } |
354 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; | ||
355 | if (!bio_flagged(bio, BIO_SEG_VALID)) | 300 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
356 | blk_recount_segments(q, bio); | 301 | blk_recount_segments(q, bio); |
357 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) | 302 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
358 | blk_recount_segments(q, req->bio); | 303 | blk_recount_segments(q, req->bio); |
359 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && | ||
360 | !BIOVEC_VIRT_OVERSIZE(len)) { | ||
361 | int mergeable = ll_new_mergeable(q, req, bio); | ||
362 | |||
363 | if (mergeable) { | ||
364 | if (bio->bi_hw_segments == 1) | ||
365 | bio->bi_hw_front_size = len; | ||
366 | if (req->nr_hw_segments == 1) | ||
367 | req->biotail->bi_hw_back_size = len; | ||
368 | } | ||
369 | return mergeable; | ||
370 | } | ||
371 | 304 | ||
372 | return ll_new_hw_segment(q, req, bio); | 305 | return ll_new_hw_segment(q, req, bio); |
373 | } | 306 | } |
@@ -376,7 +309,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
376 | struct request *next) | 309 | struct request *next) |
377 | { | 310 | { |
378 | int total_phys_segments; | 311 | int total_phys_segments; |
379 | int total_hw_segments; | ||
380 | 312 | ||
381 | /* | 313 | /* |
382 | * First check if the either of the requests are re-queued | 314 | * First check if the either of the requests are re-queued |
@@ -398,26 +330,11 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
398 | if (total_phys_segments > q->max_phys_segments) | 330 | if (total_phys_segments > q->max_phys_segments) |
399 | return 0; | 331 | return 0; |
400 | 332 | ||
401 | total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; | 333 | if (total_phys_segments > q->max_hw_segments) |
402 | if (blk_hw_contig_segment(q, req->biotail, next->bio)) { | ||
403 | int len = req->biotail->bi_hw_back_size + | ||
404 | next->bio->bi_hw_front_size; | ||
405 | /* | ||
406 | * propagate the combined length to the end of the requests | ||
407 | */ | ||
408 | if (req->nr_hw_segments == 1) | ||
409 | req->bio->bi_hw_front_size = len; | ||
410 | if (next->nr_hw_segments == 1) | ||
411 | next->biotail->bi_hw_back_size = len; | ||
412 | total_hw_segments--; | ||
413 | } | ||
414 | |||
415 | if (total_hw_segments > q->max_hw_segments) | ||
416 | return 0; | 334 | return 0; |
417 | 335 | ||
418 | /* Merge is OK... */ | 336 | /* Merge is OK... */ |
419 | req->nr_phys_segments = total_phys_segments; | 337 | req->nr_phys_segments = total_phys_segments; |
420 | req->nr_hw_segments = total_hw_segments; | ||
421 | return 1; | 338 | return 1; |
422 | } | 339 | } |
423 | 340 | ||
@@ -470,17 +387,21 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
470 | elv_merge_requests(q, req, next); | 387 | elv_merge_requests(q, req, next); |
471 | 388 | ||
472 | if (req->rq_disk) { | 389 | if (req->rq_disk) { |
473 | struct hd_struct *part | 390 | struct hd_struct *part; |
474 | = get_part(req->rq_disk, req->sector); | 391 | int cpu; |
475 | disk_round_stats(req->rq_disk); | 392 | |
476 | req->rq_disk->in_flight--; | 393 | cpu = part_stat_lock(); |
477 | if (part) { | 394 | part = disk_map_sector_rcu(req->rq_disk, req->sector); |
478 | part_round_stats(part); | 395 | |
479 | part->in_flight--; | 396 | part_round_stats(cpu, part); |
480 | } | 397 | part_dec_in_flight(part); |
398 | |||
399 | part_stat_unlock(); | ||
481 | } | 400 | } |
482 | 401 | ||
483 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | 402 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); |
403 | if (blk_rq_cpu_valid(next)) | ||
404 | req->cpu = next->cpu; | ||
484 | 405 | ||
485 | __blk_put_request(q, next); | 406 | __blk_put_request(q, next); |
486 | return 1; | 407 | return 1; |