aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2008-08-15 04:15:19 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:03 -0400
commitb8b3e16cfe6435d961f6aaebcfd52a1ff2a988c5 (patch)
tree5832535c112c0504590256cb8a0bcabc6e282be3 /block/blk-merge.c
parent6a421c1dc94b12923294a359822346f12492de5e (diff)
block: drop virtual merging accounting
Remove virtual merge accounting. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c79
1 files changed, 6 insertions, 73 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6cf8f0c70a51..2c2a2ee716ec 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -66,7 +66,7 @@ void blk_recalc_rq_segments(struct request *rq)
66 */ 66 */
67 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 67 high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
68 if (high || highprv) 68 if (high || highprv)
69 goto new_hw_segment; 69 goto new_segment;
70 if (cluster) { 70 if (cluster) {
71 if (seg_size + bv->bv_len > q->max_segment_size) 71 if (seg_size + bv->bv_len > q->max_segment_size)
72 goto new_segment; 72 goto new_segment;
@@ -74,8 +74,6 @@ void blk_recalc_rq_segments(struct request *rq)
74 goto new_segment; 74 goto new_segment;
75 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) 75 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
76 goto new_segment; 76 goto new_segment;
77 if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
78 goto new_hw_segment;
79 77
80 seg_size += bv->bv_len; 78 seg_size += bv->bv_len;
81 hw_seg_size += bv->bv_len; 79 hw_seg_size += bv->bv_len;
@@ -83,17 +81,11 @@ void blk_recalc_rq_segments(struct request *rq)
83 continue; 81 continue;
84 } 82 }
85new_segment: 83new_segment:
86 if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && 84 if (nr_hw_segs == 1 &&
87 !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) 85 hw_seg_size > rq->bio->bi_hw_front_size)
88 hw_seg_size += bv->bv_len; 86 rq->bio->bi_hw_front_size = hw_seg_size;
89 else { 87 hw_seg_size = bv->bv_len;
90new_hw_segment: 88 nr_hw_segs++;
91 if (nr_hw_segs == 1 &&
92 hw_seg_size > rq->bio->bi_hw_front_size)
93 rq->bio->bi_hw_front_size = hw_seg_size;
94 hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
95 nr_hw_segs++;
96 }
97 89
98 nr_phys_segs++; 90 nr_phys_segs++;
99 bvprv = bv; 91 bvprv = bv;
@@ -150,23 +142,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
150 return 0; 142 return 0;
151} 143}
152 144
153static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
154 struct bio *nxt)
155{
156 if (!bio_flagged(bio, BIO_SEG_VALID))
157 blk_recount_segments(q, bio);
158 if (!bio_flagged(nxt, BIO_SEG_VALID))
159 blk_recount_segments(q, nxt);
160 if (bio_has_data(bio) &&
161 (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
162 BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)))
163 return 0;
164 if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
165 return 0;
166
167 return 1;
168}
169
170/* 145/*
171 * map a request to scatterlist, return number of sg entries setup. Caller 146 * map a request to scatterlist, return number of sg entries setup. Caller
172 * must make sure sg can hold rq->nr_phys_segments entries 147 * must make sure sg can hold rq->nr_phys_segments entries
@@ -304,7 +279,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
304 struct bio *bio) 279 struct bio *bio)
305{ 280{
306 unsigned short max_sectors; 281 unsigned short max_sectors;
307 int len;
308 282
309 if (unlikely(blk_pc_request(req))) 283 if (unlikely(blk_pc_request(req)))
310 max_sectors = q->max_hw_sectors; 284 max_sectors = q->max_hw_sectors;
@@ -321,20 +295,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
321 blk_recount_segments(q, req->biotail); 295 blk_recount_segments(q, req->biotail);
322 if (!bio_flagged(bio, BIO_SEG_VALID)) 296 if (!bio_flagged(bio, BIO_SEG_VALID))
323 blk_recount_segments(q, bio); 297 blk_recount_segments(q, bio);
324 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
325 if (!bio_has_data(bio) ||
326 (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
327 && !BIOVEC_VIRT_OVERSIZE(len))) {
328 int mergeable = ll_new_mergeable(q, req, bio);
329
330 if (mergeable) {
331 if (req->nr_hw_segments == 1)
332 req->bio->bi_hw_front_size = len;
333 if (bio->bi_hw_segments == 1)
334 bio->bi_hw_back_size = len;
335 }
336 return mergeable;
337 }
338 298
339 return ll_new_hw_segment(q, req, bio); 299 return ll_new_hw_segment(q, req, bio);
340} 300}
@@ -343,7 +303,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
343 struct bio *bio) 303 struct bio *bio)
344{ 304{
345 unsigned short max_sectors; 305 unsigned short max_sectors;
346 int len;
347 306
348 if (unlikely(blk_pc_request(req))) 307 if (unlikely(blk_pc_request(req)))
349 max_sectors = q->max_hw_sectors; 308 max_sectors = q->max_hw_sectors;
@@ -357,24 +316,10 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
357 q->last_merge = NULL; 316 q->last_merge = NULL;
358 return 0; 317 return 0;
359 } 318 }
360 len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
361 if (!bio_flagged(bio, BIO_SEG_VALID)) 319 if (!bio_flagged(bio, BIO_SEG_VALID))
362 blk_recount_segments(q, bio); 320 blk_recount_segments(q, bio);
363 if (!bio_flagged(req->bio, BIO_SEG_VALID)) 321 if (!bio_flagged(req->bio, BIO_SEG_VALID))
364 blk_recount_segments(q, req->bio); 322 blk_recount_segments(q, req->bio);
365 if (!bio_has_data(bio) ||
366 (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
367 !BIOVEC_VIRT_OVERSIZE(len))) {
368 int mergeable = ll_new_mergeable(q, req, bio);
369
370 if (mergeable) {
371 if (bio->bi_hw_segments == 1)
372 bio->bi_hw_front_size = len;
373 if (req->nr_hw_segments == 1)
374 req->biotail->bi_hw_back_size = len;
375 }
376 return mergeable;
377 }
378 323
379 return ll_new_hw_segment(q, req, bio); 324 return ll_new_hw_segment(q, req, bio);
380} 325}
@@ -406,18 +351,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
406 return 0; 351 return 0;
407 352
408 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; 353 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
409 if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
410 int len = req->biotail->bi_hw_back_size +
411 next->bio->bi_hw_front_size;
412 /*
413 * propagate the combined length to the end of the requests
414 */
415 if (req->nr_hw_segments == 1)
416 req->bio->bi_hw_front_size = len;
417 if (next->nr_hw_segments == 1)
418 next->biotail->bi_hw_back_size = len;
419 total_hw_segments--;
420 }
421 354
422 if (total_hw_segments > q->max_hw_segments) 355 if (total_hw_segments > q->max_hw_segments)
423 return 0; 356 return 0;