aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2008-08-15 04:15:19 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:03 -0400
commitb8b3e16cfe6435d961f6aaebcfd52a1ff2a988c5 (patch)
tree5832535c112c0504590256cb8a0bcabc6e282be3
parent6a421c1dc94b12923294a359822346f12492de5e (diff)
block: drop virtual merging accounting
Remove virtual merge accounting. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-merge.c79
-rw-r--r--fs/bio.c6
-rw-r--r--include/linux/bio.h15
3 files changed, 8 insertions, 92 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6cf8f0c70a51..2c2a2ee716ec 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -66,7 +66,7 @@ void blk_recalc_rq_segments(struct request *rq)
66 */ 66 */
67 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 67 high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
68 if (high || highprv) 68 if (high || highprv)
69 goto new_hw_segment; 69 goto new_segment;
70 if (cluster) { 70 if (cluster) {
71 if (seg_size + bv->bv_len > q->max_segment_size) 71 if (seg_size + bv->bv_len > q->max_segment_size)
72 goto new_segment; 72 goto new_segment;
@@ -74,8 +74,6 @@ void blk_recalc_rq_segments(struct request *rq)
74 goto new_segment; 74 goto new_segment;
75 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) 75 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
76 goto new_segment; 76 goto new_segment;
77 if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
78 goto new_hw_segment;
79 77
80 seg_size += bv->bv_len; 78 seg_size += bv->bv_len;
81 hw_seg_size += bv->bv_len; 79 hw_seg_size += bv->bv_len;
@@ -83,17 +81,11 @@ void blk_recalc_rq_segments(struct request *rq)
83 continue; 81 continue;
84 } 82 }
85new_segment: 83new_segment:
86 if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && 84 if (nr_hw_segs == 1 &&
87 !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) 85 hw_seg_size > rq->bio->bi_hw_front_size)
88 hw_seg_size += bv->bv_len; 86 rq->bio->bi_hw_front_size = hw_seg_size;
89 else { 87 hw_seg_size = bv->bv_len;
90new_hw_segment: 88 nr_hw_segs++;
91 if (nr_hw_segs == 1 &&
92 hw_seg_size > rq->bio->bi_hw_front_size)
93 rq->bio->bi_hw_front_size = hw_seg_size;
94 hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
95 nr_hw_segs++;
96 }
97 89
98 nr_phys_segs++; 90 nr_phys_segs++;
99 bvprv = bv; 91 bvprv = bv;
@@ -150,23 +142,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
150 return 0; 142 return 0;
151} 143}
152 144
153static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
154 struct bio *nxt)
155{
156 if (!bio_flagged(bio, BIO_SEG_VALID))
157 blk_recount_segments(q, bio);
158 if (!bio_flagged(nxt, BIO_SEG_VALID))
159 blk_recount_segments(q, nxt);
160 if (bio_has_data(bio) &&
161 (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
162 BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)))
163 return 0;
164 if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
165 return 0;
166
167 return 1;
168}
169
170/* 145/*
171 * map a request to scatterlist, return number of sg entries setup. Caller 146 * map a request to scatterlist, return number of sg entries setup. Caller
172 * must make sure sg can hold rq->nr_phys_segments entries 147 * must make sure sg can hold rq->nr_phys_segments entries
@@ -304,7 +279,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
304 struct bio *bio) 279 struct bio *bio)
305{ 280{
306 unsigned short max_sectors; 281 unsigned short max_sectors;
307 int len;
308 282
309 if (unlikely(blk_pc_request(req))) 283 if (unlikely(blk_pc_request(req)))
310 max_sectors = q->max_hw_sectors; 284 max_sectors = q->max_hw_sectors;
@@ -321,20 +295,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
321 blk_recount_segments(q, req->biotail); 295 blk_recount_segments(q, req->biotail);
322 if (!bio_flagged(bio, BIO_SEG_VALID)) 296 if (!bio_flagged(bio, BIO_SEG_VALID))
323 blk_recount_segments(q, bio); 297 blk_recount_segments(q, bio);
324 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
325 if (!bio_has_data(bio) ||
326 (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
327 && !BIOVEC_VIRT_OVERSIZE(len))) {
328 int mergeable = ll_new_mergeable(q, req, bio);
329
330 if (mergeable) {
331 if (req->nr_hw_segments == 1)
332 req->bio->bi_hw_front_size = len;
333 if (bio->bi_hw_segments == 1)
334 bio->bi_hw_back_size = len;
335 }
336 return mergeable;
337 }
338 298
339 return ll_new_hw_segment(q, req, bio); 299 return ll_new_hw_segment(q, req, bio);
340} 300}
@@ -343,7 +303,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
343 struct bio *bio) 303 struct bio *bio)
344{ 304{
345 unsigned short max_sectors; 305 unsigned short max_sectors;
346 int len;
347 306
348 if (unlikely(blk_pc_request(req))) 307 if (unlikely(blk_pc_request(req)))
349 max_sectors = q->max_hw_sectors; 308 max_sectors = q->max_hw_sectors;
@@ -357,24 +316,10 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
357 q->last_merge = NULL; 316 q->last_merge = NULL;
358 return 0; 317 return 0;
359 } 318 }
360 len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
361 if (!bio_flagged(bio, BIO_SEG_VALID)) 319 if (!bio_flagged(bio, BIO_SEG_VALID))
362 blk_recount_segments(q, bio); 320 blk_recount_segments(q, bio);
363 if (!bio_flagged(req->bio, BIO_SEG_VALID)) 321 if (!bio_flagged(req->bio, BIO_SEG_VALID))
364 blk_recount_segments(q, req->bio); 322 blk_recount_segments(q, req->bio);
365 if (!bio_has_data(bio) ||
366 (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
367 !BIOVEC_VIRT_OVERSIZE(len))) {
368 int mergeable = ll_new_mergeable(q, req, bio);
369
370 if (mergeable) {
371 if (bio->bi_hw_segments == 1)
372 bio->bi_hw_front_size = len;
373 if (req->nr_hw_segments == 1)
374 req->biotail->bi_hw_back_size = len;
375 }
376 return mergeable;
377 }
378 323
379 return ll_new_hw_segment(q, req, bio); 324 return ll_new_hw_segment(q, req, bio);
380} 325}
@@ -406,18 +351,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
406 return 0; 351 return 0;
407 352
408 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; 353 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
409 if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
410 int len = req->biotail->bi_hw_back_size +
411 next->bio->bi_hw_front_size;
412 /*
413 * propagate the combined length to the end of the requests
414 */
415 if (req->nr_hw_segments == 1)
416 req->bio->bi_hw_front_size = len;
417 if (next->nr_hw_segments == 1)
418 next->biotail->bi_hw_back_size = len;
419 total_hw_segments--;
420 }
421 354
422 if (total_hw_segments > q->max_hw_segments) 355 if (total_hw_segments > q->max_hw_segments)
423 return 0; 356 return 0;
diff --git a/fs/bio.c b/fs/bio.c
index 3cba7ae34d75..4ac7c59d1c6d 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -350,8 +350,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
350 */ 350 */
351 351
352 while (bio->bi_phys_segments >= q->max_phys_segments 352 while (bio->bi_phys_segments >= q->max_phys_segments
353 || bio->bi_hw_segments >= q->max_hw_segments 353 || bio->bi_hw_segments >= q->max_hw_segments) {
354 || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) {
355 354
356 if (retried_segments) 355 if (retried_segments)
357 return 0; 356 return 0;
@@ -395,8 +394,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
395 } 394 }
396 395
397 /* If we may be able to merge these biovecs, force a recount */ 396 /* If we may be able to merge these biovecs, force a recount */
398 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) || 397 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
399 BIOVEC_VIRT_MERGEABLE(bvec-1, bvec)))
400 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 398 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
401 399
402 bio->bi_vcnt++; 400 bio->bi_vcnt++;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 33c3947d61e9..894d16ce0020 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -26,21 +26,8 @@
26 26
27#ifdef CONFIG_BLOCK 27#ifdef CONFIG_BLOCK
28 28
29/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
30#include <asm/io.h> 29#include <asm/io.h>
31 30
32#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
33#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
34#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
35#else
36#define BIOVEC_VIRT_START_SIZE(x) 0
37#define BIOVEC_VIRT_OVERSIZE(x) 0
38#endif
39
40#ifndef BIO_VMERGE_BOUNDARY
41#define BIO_VMERGE_BOUNDARY 0
42#endif
43
44#define BIO_DEBUG 31#define BIO_DEBUG
45 32
46#ifdef BIO_DEBUG 33#ifdef BIO_DEBUG
@@ -240,8 +227,6 @@ static inline void *bio_data(struct bio *bio)
240 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 227 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
241#endif 228#endif
242 229
243#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
244 ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
245#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 230#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
246 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 231 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
247#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 232#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \