diff options
author | Mike Christie <michaelc@cs.wisc.edu> | 2005-12-05 03:37:06 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.(none)> | 2005-12-15 18:11:40 -0500 |
commit | defd94b75409b983f94548ea2f52ff5787ddb848 (patch) | |
tree | 0138b2dae748de88edaee4da23431f1a9dd347a1 /fs | |
parent | 8b05b773b6030de5b1bab1cbb0bf1ff8c34cdbe0 (diff) |
[SCSI] seperate max_sectors from max_hw_sectors
- export __blk_put_request and blk_execute_rq_nowait
needed for async REQ_BLOCK_PC requests
- seperate max_hw_sectors and max_sectors for block/scsi_ioctl.c and
SG_IO bio.c helpers per Jens's last comments. Since block/scsi_ioctl.c SG_IO was
already testing against max_sectors and SCSI-ml was setting max_sectors and
max_hw_sectors to the same value this does not change any scsi SG_IO behavior. It only
prepares ll_rw_blk.c, scsi_ioctl.c and bio.c for when SCSI-ml begins to set
a valid max_hw_sectors for all LLDs. Today if a LLD does not set it
SCSI-ml sets it to a safe default and some LLDs set it to a artificial low
value to overcome memory and feedback issues.
Note: Since we now cap max_sectors to BLK_DEF_MAX_SECTORS, which is 1024,
drivers that used to call blk_queue_max_sectors with a large value of
max_sectors will now see the fs requests capped to BLK_DEF_MAX_SECTORS.
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bio.c | 20 |
1 files changed, 11 insertions, 9 deletions
@@ -313,7 +313,8 @@ int bio_get_nr_vecs(struct block_device *bdev) | |||
313 | } | 313 | } |
314 | 314 | ||
315 | static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page | 315 | static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page |
316 | *page, unsigned int len, unsigned int offset) | 316 | *page, unsigned int len, unsigned int offset, |
317 | unsigned short max_sectors) | ||
317 | { | 318 | { |
318 | int retried_segments = 0; | 319 | int retried_segments = 0; |
319 | struct bio_vec *bvec; | 320 | struct bio_vec *bvec; |
@@ -327,7 +328,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page | |||
327 | if (bio->bi_vcnt >= bio->bi_max_vecs) | 328 | if (bio->bi_vcnt >= bio->bi_max_vecs) |
328 | return 0; | 329 | return 0; |
329 | 330 | ||
330 | if (((bio->bi_size + len) >> 9) > q->max_sectors) | 331 | if (((bio->bi_size + len) >> 9) > max_sectors) |
331 | return 0; | 332 | return 0; |
332 | 333 | ||
333 | /* | 334 | /* |
@@ -401,7 +402,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page | |||
401 | int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, | 402 | int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, |
402 | unsigned int len, unsigned int offset) | 403 | unsigned int len, unsigned int offset) |
403 | { | 404 | { |
404 | return __bio_add_page(q, bio, page, len, offset); | 405 | return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); |
405 | } | 406 | } |
406 | 407 | ||
407 | /** | 408 | /** |
@@ -420,8 +421,8 @@ int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, | |||
420 | int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | 421 | int bio_add_page(struct bio *bio, struct page *page, unsigned int len, |
421 | unsigned int offset) | 422 | unsigned int offset) |
422 | { | 423 | { |
423 | return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page, | 424 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
424 | len, offset); | 425 | return __bio_add_page(q, bio, page, len, offset, q->max_sectors); |
425 | } | 426 | } |
426 | 427 | ||
427 | struct bio_map_data { | 428 | struct bio_map_data { |
@@ -533,7 +534,7 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, | |||
533 | break; | 534 | break; |
534 | } | 535 | } |
535 | 536 | ||
536 | if (__bio_add_page(q, bio, page, bytes, 0) < bytes) { | 537 | if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { |
537 | ret = -EINVAL; | 538 | ret = -EINVAL; |
538 | break; | 539 | break; |
539 | } | 540 | } |
@@ -647,7 +648,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, | |||
647 | /* | 648 | /* |
648 | * sorry... | 649 | * sorry... |
649 | */ | 650 | */ |
650 | if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes) | 651 | if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < |
652 | bytes) | ||
651 | break; | 653 | break; |
652 | 654 | ||
653 | len -= bytes; | 655 | len -= bytes; |
@@ -820,8 +822,8 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, | |||
820 | if (bytes > len) | 822 | if (bytes > len) |
821 | bytes = len; | 823 | bytes = len; |
822 | 824 | ||
823 | if (__bio_add_page(q, bio, virt_to_page(data), bytes, | 825 | if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, |
824 | offset) < bytes) | 826 | offset) < bytes) |
825 | break; | 827 | break; |
826 | 828 | ||
827 | data += bytes; | 829 | data += bytes; |