diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-05-22 17:17:50 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-22 17:22:54 -0400 |
commit | ae03bf639a5027d27270123f5f6e3ee6a412781d (patch) | |
tree | d705f41a188ad656b1f47f7952626a9f992e3b8f /fs/bio.c | |
parent | e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (diff) |
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions
instead of poking the request queue variables directly.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 19 |
1 files changed, 10 insertions, 9 deletions
@@ -499,11 +499,11 @@ int bio_get_nr_vecs(struct block_device *bdev) | |||
499 | struct request_queue *q = bdev_get_queue(bdev); | 499 | struct request_queue *q = bdev_get_queue(bdev); |
500 | int nr_pages; | 500 | int nr_pages; |
501 | 501 | ||
502 | nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 502 | nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
503 | if (nr_pages > q->max_phys_segments) | 503 | if (nr_pages > queue_max_phys_segments(q)) |
504 | nr_pages = q->max_phys_segments; | 504 | nr_pages = queue_max_phys_segments(q); |
505 | if (nr_pages > q->max_hw_segments) | 505 | if (nr_pages > queue_max_hw_segments(q)) |
506 | nr_pages = q->max_hw_segments; | 506 | nr_pages = queue_max_hw_segments(q); |
507 | 507 | ||
508 | return nr_pages; | 508 | return nr_pages; |
509 | } | 509 | } |
@@ -562,8 +562,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
562 | * make this too complex. | 562 | * make this too complex. |
563 | */ | 563 | */ |
564 | 564 | ||
565 | while (bio->bi_phys_segments >= q->max_phys_segments | 565 | while (bio->bi_phys_segments >= queue_max_phys_segments(q) |
566 | || bio->bi_phys_segments >= q->max_hw_segments) { | 566 | || bio->bi_phys_segments >= queue_max_hw_segments(q)) { |
567 | 567 | ||
568 | if (retried_segments) | 568 | if (retried_segments) |
569 | return 0; | 569 | return 0; |
@@ -634,7 +634,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
634 | int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, | 634 | int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, |
635 | unsigned int len, unsigned int offset) | 635 | unsigned int len, unsigned int offset) |
636 | { | 636 | { |
637 | return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); | 637 | return __bio_add_page(q, bio, page, len, offset, |
638 | queue_max_hw_sectors(q)); | ||
638 | } | 639 | } |
639 | 640 | ||
640 | /** | 641 | /** |
@@ -654,7 +655,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | |||
654 | unsigned int offset) | 655 | unsigned int offset) |
655 | { | 656 | { |
656 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 657 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
657 | return __bio_add_page(q, bio, page, len, offset, q->max_sectors); | 658 | return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); |
658 | } | 659 | } |
659 | 660 | ||
660 | struct bio_map_data { | 661 | struct bio_map_data { |