diff options
author | Mike Christie <michaelc@cs.wisc.edu> | 2005-12-05 03:37:06 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.(none)> | 2005-12-15 18:11:40 -0500 |
commit | defd94b75409b983f94548ea2f52ff5787ddb848 (patch) | |
tree | 0138b2dae748de88edaee4da23431f1a9dd347a1 /block/ll_rw_blk.c | |
parent | 8b05b773b6030de5b1bab1cbb0bf1ff8c34cdbe0 (diff) |
[SCSI] seperate max_sectors from max_hw_sectors
- export __blk_put_request and blk_execute_rq_nowait
needed for async REQ_BLOCK_PC requests
- seperate max_hw_sectors and max_sectors for block/scsi_ioctl.c and
SG_IO bio.c helpers per Jens's last comments. Since block/scsi_ioctl.c SG_IO was
already testing against max_sectors and SCSI-ml was setting max_sectors and
max_hw_sectors to the same value this does not change any scsi SG_IO behavior. It only
prepares ll_rw_blk.c, scsi_ioctl.c and bio.c for when SCSI-ml begins to set
a valid max_hw_sectors for all LLDs. Today if a LLD does not set it
SCSI-ml sets it to a safe default and some LLDs set it to a artificial low
value to overcome memory and feedback issues.
Note: Since we now cap max_sectors to BLK_DEF_MAX_SECTORS, which is 1024,
drivers that used to call blk_queue_max_sectors with a large value of
max_sectors will now see the fs requests capped to BLK_DEF_MAX_SECTORS.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 34 |
1 files changed, 26 insertions, 8 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c525b5a2b598..d4beb9a89ee0 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) | |||
239 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 239 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
240 | q->backing_dev_info.state = 0; | 240 | q->backing_dev_info.state = 0; |
241 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | 241 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; |
242 | blk_queue_max_sectors(q, MAX_SECTORS); | 242 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); |
243 | blk_queue_hardsect_size(q, 512); | 243 | blk_queue_hardsect_size(q, 512); |
244 | blk_queue_dma_alignment(q, 511); | 244 | blk_queue_dma_alignment(q, 511); |
245 | blk_queue_congestion_threshold(q); | 245 | blk_queue_congestion_threshold(q); |
@@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) | |||
555 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); | 555 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); |
556 | } | 556 | } |
557 | 557 | ||
558 | q->max_sectors = q->max_hw_sectors = max_sectors; | 558 | if (BLK_DEF_MAX_SECTORS > max_sectors) |
559 | q->max_hw_sectors = q->max_sectors = max_sectors; | ||
560 | else { | ||
561 | q->max_sectors = BLK_DEF_MAX_SECTORS; | ||
562 | q->max_hw_sectors = max_sectors; | ||
563 | } | ||
559 | } | 564 | } |
560 | 565 | ||
561 | EXPORT_SYMBOL(blk_queue_max_sectors); | 566 | EXPORT_SYMBOL(blk_queue_max_sectors); |
@@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); | |||
657 | void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) | 662 | void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) |
658 | { | 663 | { |
659 | /* zero is "infinity" */ | 664 | /* zero is "infinity" */ |
660 | t->max_sectors = t->max_hw_sectors = | 665 | t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); |
661 | min_not_zero(t->max_sectors,b->max_sectors); | 666 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); |
662 | 667 | ||
663 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); | 668 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); |
664 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); | 669 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); |
@@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q, | |||
1293 | static int ll_back_merge_fn(request_queue_t *q, struct request *req, | 1298 | static int ll_back_merge_fn(request_queue_t *q, struct request *req, |
1294 | struct bio *bio) | 1299 | struct bio *bio) |
1295 | { | 1300 | { |
1301 | unsigned short max_sectors; | ||
1296 | int len; | 1302 | int len; |
1297 | 1303 | ||
1298 | if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { | 1304 | if (unlikely(blk_pc_request(req))) |
1305 | max_sectors = q->max_hw_sectors; | ||
1306 | else | ||
1307 | max_sectors = q->max_sectors; | ||
1308 | |||
1309 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | ||
1299 | req->flags |= REQ_NOMERGE; | 1310 | req->flags |= REQ_NOMERGE; |
1300 | if (req == q->last_merge) | 1311 | if (req == q->last_merge) |
1301 | q->last_merge = NULL; | 1312 | q->last_merge = NULL; |
@@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, | |||
1325 | static int ll_front_merge_fn(request_queue_t *q, struct request *req, | 1336 | static int ll_front_merge_fn(request_queue_t *q, struct request *req, |
1326 | struct bio *bio) | 1337 | struct bio *bio) |
1327 | { | 1338 | { |
1339 | unsigned short max_sectors; | ||
1328 | int len; | 1340 | int len; |
1329 | 1341 | ||
1330 | if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { | 1342 | if (unlikely(blk_pc_request(req))) |
1343 | max_sectors = q->max_hw_sectors; | ||
1344 | else | ||
1345 | max_sectors = q->max_sectors; | ||
1346 | |||
1347 | |||
1348 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | ||
1331 | req->flags |= REQ_NOMERGE; | 1349 | req->flags |= REQ_NOMERGE; |
1332 | if (req == q->last_merge) | 1350 | if (req == q->last_merge) |
1333 | q->last_merge = NULL; | 1351 | q->last_merge = NULL; |
@@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | |||
2144 | struct bio *bio; | 2162 | struct bio *bio; |
2145 | int reading; | 2163 | int reading; |
2146 | 2164 | ||
2147 | if (len > (q->max_sectors << 9)) | 2165 | if (len > (q->max_hw_sectors << 9)) |
2148 | return -EINVAL; | 2166 | return -EINVAL; |
2149 | if (!len || !ubuf) | 2167 | if (!len || !ubuf) |
2150 | return -EINVAL; | 2168 | return -EINVAL; |
@@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, | |||
2259 | { | 2277 | { |
2260 | struct bio *bio; | 2278 | struct bio *bio; |
2261 | 2279 | ||
2262 | if (len > (q->max_sectors << 9)) | 2280 | if (len > (q->max_hw_sectors << 9)) |
2263 | return -EINVAL; | 2281 | return -EINVAL; |
2264 | if (!len || !kbuf) | 2282 | if (!len || !kbuf) |
2265 | return -EINVAL; | 2283 | return -EINVAL; |