aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2008-08-15 04:20:02 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:03 -0400
commit5df97b91b5d7ed426034fcc84cb6e7cf682b8838 (patch)
tree727b9fb778f72d2b1ff44c007fb5209bacf67f4a /block
parentb8b3e16cfe6435d961f6aaebcfd52a1ff2a988c5 (diff)
drop vmerge accounting
Remove hw_segments field from struct bio and struct request. Without virtual merge accounting they have no purpose. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-merge.c31
-rw-r--r--block/elevator.c2
3 files changed, 4 insertions, 30 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 1261516dd42..2616cdd049a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2026,7 +2026,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2026 2026
2027 if (bio_has_data(bio)) { 2027 if (bio_has_data(bio)) {
2028 rq->nr_phys_segments = bio_phys_segments(q, bio); 2028 rq->nr_phys_segments = bio_phys_segments(q, bio);
2029 rq->nr_hw_segments = bio_hw_segments(q, bio);
2030 rq->buffer = bio_data(bio); 2029 rq->buffer = bio_data(bio);
2031 } 2030 }
2032 rq->current_nr_sectors = bio_cur_sectors(bio); 2031 rq->current_nr_sectors = bio_cur_sectors(bio);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2c2a2ee716e..d81d91419ff 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -41,12 +41,9 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
41void blk_recalc_rq_segments(struct request *rq) 41void blk_recalc_rq_segments(struct request *rq)
42{ 42{
43 int nr_phys_segs; 43 int nr_phys_segs;
44 int nr_hw_segs;
45 unsigned int phys_size; 44 unsigned int phys_size;
46 unsigned int hw_size;
47 struct bio_vec *bv, *bvprv = NULL; 45 struct bio_vec *bv, *bvprv = NULL;
48 int seg_size; 46 int seg_size;
49 int hw_seg_size;
50 int cluster; 47 int cluster;
51 struct req_iterator iter; 48 struct req_iterator iter;
52 int high, highprv = 1; 49 int high, highprv = 1;
@@ -56,8 +53,8 @@ void blk_recalc_rq_segments(struct request *rq)
56 return; 53 return;
57 54
58 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 55 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
59 hw_seg_size = seg_size = 0; 56 seg_size = 0;
60 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; 57 phys_size = nr_phys_segs = 0;
61 rq_for_each_segment(bv, rq, iter) { 58 rq_for_each_segment(bv, rq, iter) {
62 /* 59 /*
63 * the trick here is making sure that a high page is never 60 * the trick here is making sure that a high page is never
@@ -76,30 +73,17 @@ void blk_recalc_rq_segments(struct request *rq)
76 goto new_segment; 73 goto new_segment;
77 74
78 seg_size += bv->bv_len; 75 seg_size += bv->bv_len;
79 hw_seg_size += bv->bv_len;
80 bvprv = bv; 76 bvprv = bv;
81 continue; 77 continue;
82 } 78 }
83new_segment: 79new_segment:
84 if (nr_hw_segs == 1 &&
85 hw_seg_size > rq->bio->bi_hw_front_size)
86 rq->bio->bi_hw_front_size = hw_seg_size;
87 hw_seg_size = bv->bv_len;
88 nr_hw_segs++;
89
90 nr_phys_segs++; 80 nr_phys_segs++;
91 bvprv = bv; 81 bvprv = bv;
92 seg_size = bv->bv_len; 82 seg_size = bv->bv_len;
93 highprv = high; 83 highprv = high;
94 } 84 }
95 85
96 if (nr_hw_segs == 1 &&
97 hw_seg_size > rq->bio->bi_hw_front_size)
98 rq->bio->bi_hw_front_size = hw_seg_size;
99 if (hw_seg_size > rq->biotail->bi_hw_back_size)
100 rq->biotail->bi_hw_back_size = hw_seg_size;
101 rq->nr_phys_segments = nr_phys_segs; 86 rq->nr_phys_segments = nr_phys_segs;
102 rq->nr_hw_segments = nr_hw_segs;
103} 87}
104 88
105void blk_recount_segments(struct request_queue *q, struct bio *bio) 89void blk_recount_segments(struct request_queue *q, struct bio *bio)
@@ -112,7 +96,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
112 blk_recalc_rq_segments(&rq); 96 blk_recalc_rq_segments(&rq);
113 bio->bi_next = nxt; 97 bio->bi_next = nxt;
114 bio->bi_phys_segments = rq.nr_phys_segments; 98 bio->bi_phys_segments = rq.nr_phys_segments;
115 bio->bi_hw_segments = rq.nr_hw_segments;
116 bio->bi_flags |= (1 << BIO_SEG_VALID); 99 bio->bi_flags |= (1 << BIO_SEG_VALID);
117} 100}
118EXPORT_SYMBOL(blk_recount_segments); 101EXPORT_SYMBOL(blk_recount_segments);
@@ -255,10 +238,9 @@ static inline int ll_new_hw_segment(struct request_queue *q,
255 struct request *req, 238 struct request *req,
256 struct bio *bio) 239 struct bio *bio)
257{ 240{
258 int nr_hw_segs = bio_hw_segments(q, bio);
259 int nr_phys_segs = bio_phys_segments(q, bio); 241 int nr_phys_segs = bio_phys_segments(q, bio);
260 242
261 if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments 243 if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
262 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 244 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
263 req->cmd_flags |= REQ_NOMERGE; 245 req->cmd_flags |= REQ_NOMERGE;
264 if (req == q->last_merge) 246 if (req == q->last_merge)
@@ -270,7 +252,6 @@ static inline int ll_new_hw_segment(struct request_queue *q,
270 * This will form the start of a new hw segment. Bump both 252 * This will form the start of a new hw segment. Bump both
271 * counters. 253 * counters.
272 */ 254 */
273 req->nr_hw_segments += nr_hw_segs;
274 req->nr_phys_segments += nr_phys_segs; 255 req->nr_phys_segments += nr_phys_segs;
275 return 1; 256 return 1;
276} 257}
@@ -328,7 +309,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
328 struct request *next) 309 struct request *next)
329{ 310{
330 int total_phys_segments; 311 int total_phys_segments;
331 int total_hw_segments;
332 312
333 /* 313 /*
334 * First check if the either of the requests are re-queued 314 * First check if the either of the requests are re-queued
@@ -350,14 +330,11 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
350 if (total_phys_segments > q->max_phys_segments) 330 if (total_phys_segments > q->max_phys_segments)
351 return 0; 331 return 0;
352 332
353 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; 333 if (total_phys_segments > q->max_hw_segments)
354
355 if (total_hw_segments > q->max_hw_segments)
356 return 0; 334 return 0;
357 335
358 /* Merge is OK... */ 336 /* Merge is OK... */
359 req->nr_phys_segments = total_phys_segments; 337 req->nr_phys_segments = total_phys_segments;
360 req->nr_hw_segments = total_hw_segments;
361 return 1; 338 return 1;
362} 339}
363 340
diff --git a/block/elevator.c b/block/elevator.c
index 4f5127054e3..269615e6dbf 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -790,7 +790,6 @@ struct request *elv_next_request(struct request_queue *q)
790 * device can handle 790 * device can handle
791 */ 791 */
792 rq->nr_phys_segments++; 792 rq->nr_phys_segments++;
793 rq->nr_hw_segments++;
794 } 793 }
795 794
796 if (!q->prep_rq_fn) 795 if (!q->prep_rq_fn)
@@ -813,7 +812,6 @@ struct request *elv_next_request(struct request_queue *q)
813 * so that we don't add it again 812 * so that we don't add it again
814 */ 813 */
815 --rq->nr_phys_segments; 814 --rq->nr_phys_segments;
816 --rq->nr_hw_segments;
817 } 815 }
818 816
819 rq = NULL; 817 rq = NULL;