diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-05-22 17:17:50 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-22 17:22:54 -0400 |
commit | ae03bf639a5027d27270123f5f6e3ee6a412781d (patch) | |
tree | d705f41a188ad656b1f47f7952626a9f992e3b8f /block/blk-merge.c | |
parent | e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (diff) |
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions
instead of poking the request queue variables directly.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r-- | block/blk-merge.c | 27 |
1 files changed, 14 insertions, 13 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 4974dd5767e5..39ce64432ba6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
32 | * never considered part of another segment, since that | 32 | * never considered part of another segment, since that |
33 | * might change with the bounce page. | 33 | * might change with the bounce page. |
34 | */ | 34 | */ |
35 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | 35 | high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); |
36 | if (high || highprv) | 36 | if (high || highprv) |
37 | goto new_segment; | 37 | goto new_segment; |
38 | if (cluster) { | 38 | if (cluster) { |
39 | if (seg_size + bv->bv_len > q->max_segment_size) | 39 | if (seg_size + bv->bv_len |
40 | > queue_max_segment_size(q)) | ||
40 | goto new_segment; | 41 | goto new_segment; |
41 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | 42 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) |
42 | goto new_segment; | 43 | goto new_segment; |
@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
91 | return 0; | 92 | return 0; |
92 | 93 | ||
93 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > | 94 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
94 | q->max_segment_size) | 95 | queue_max_segment_size(q)) |
95 | return 0; | 96 | return 0; |
96 | 97 | ||
97 | if (!bio_has_data(bio)) | 98 | if (!bio_has_data(bio)) |
@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
134 | int nbytes = bvec->bv_len; | 135 | int nbytes = bvec->bv_len; |
135 | 136 | ||
136 | if (bvprv && cluster) { | 137 | if (bvprv && cluster) { |
137 | if (sg->length + nbytes > q->max_segment_size) | 138 | if (sg->length + nbytes > queue_max_segment_size(q)) |
138 | goto new_segment; | 139 | goto new_segment; |
139 | 140 | ||
140 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | 141 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q, | |||
205 | { | 206 | { |
206 | int nr_phys_segs = bio_phys_segments(q, bio); | 207 | int nr_phys_segs = bio_phys_segments(q, bio); |
207 | 208 | ||
208 | if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments | 209 | if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) || |
209 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | 210 | req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) { |
210 | req->cmd_flags |= REQ_NOMERGE; | 211 | req->cmd_flags |= REQ_NOMERGE; |
211 | if (req == q->last_merge) | 212 | if (req == q->last_merge) |
212 | q->last_merge = NULL; | 213 | q->last_merge = NULL; |
@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, | |||
227 | unsigned short max_sectors; | 228 | unsigned short max_sectors; |
228 | 229 | ||
229 | if (unlikely(blk_pc_request(req))) | 230 | if (unlikely(blk_pc_request(req))) |
230 | max_sectors = q->max_hw_sectors; | 231 | max_sectors = queue_max_hw_sectors(q); |
231 | else | 232 | else |
232 | max_sectors = q->max_sectors; | 233 | max_sectors = queue_max_sectors(q); |
233 | 234 | ||
234 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { | 235 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { |
235 | req->cmd_flags |= REQ_NOMERGE; | 236 | req->cmd_flags |= REQ_NOMERGE; |
@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, | |||
251 | unsigned short max_sectors; | 252 | unsigned short max_sectors; |
252 | 253 | ||
253 | if (unlikely(blk_pc_request(req))) | 254 | if (unlikely(blk_pc_request(req))) |
254 | max_sectors = q->max_hw_sectors; | 255 | max_sectors = queue_max_hw_sectors(q); |
255 | else | 256 | else |
256 | max_sectors = q->max_sectors; | 257 | max_sectors = queue_max_sectors(q); |
257 | 258 | ||
258 | 259 | ||
259 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { | 260 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { |
@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
287 | /* | 288 | /* |
288 | * Will it become too large? | 289 | * Will it become too large? |
289 | */ | 290 | */ |
290 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors) | 291 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q)) |
291 | return 0; | 292 | return 0; |
292 | 293 | ||
293 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | 294 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
299 | total_phys_segments--; | 300 | total_phys_segments--; |
300 | } | 301 | } |
301 | 302 | ||
302 | if (total_phys_segments > q->max_phys_segments) | 303 | if (total_phys_segments > queue_max_phys_segments(q)) |
303 | return 0; | 304 | return 0; |
304 | 305 | ||
305 | if (total_phys_segments > q->max_hw_segments) | 306 | if (total_phys_segments > queue_max_hw_segments(q)) |
306 | return 0; | 307 | return 0; |
307 | 308 | ||
308 | /* Merge is OK... */ | 309 | /* Merge is OK... */ |