diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-05-22 17:17:50 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-22 17:22:54 -0400 |
commit | ae03bf639a5027d27270123f5f6e3ee6a412781d (patch) | |
tree | d705f41a188ad656b1f47f7952626a9f992e3b8f /include | |
parent | e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (diff) |
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions
instead of poking the request queue variables directly.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/bio.h | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 36 |
2 files changed, 37 insertions, 1 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h index d30ec6f30dd7..12737be58601 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio) | |||
279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ | 279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ |
280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) | 280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) |
281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ | 281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ |
282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) | 282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ | 283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ |
284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) | 284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) |
285 | 285 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 872b78b7a101..29b48f7b4ba8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -898,6 +898,7 @@ extern void blk_cleanup_queue(struct request_queue *); | |||
898 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 898 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
899 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 899 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
900 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 900 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
901 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | ||
901 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 902 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
902 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 903 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
903 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 904 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
@@ -988,6 +989,41 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | |||
988 | 989 | ||
989 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 990 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
990 | 991 | ||
992 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) | ||
993 | { | ||
994 | return q->bounce_pfn; | ||
995 | } | ||
996 | |||
997 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | ||
998 | { | ||
999 | return q->seg_boundary_mask; | ||
1000 | } | ||
1001 | |||
1002 | static inline unsigned int queue_max_sectors(struct request_queue *q) | ||
1003 | { | ||
1004 | return q->max_sectors; | ||
1005 | } | ||
1006 | |||
1007 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | ||
1008 | { | ||
1009 | return q->max_hw_sectors; | ||
1010 | } | ||
1011 | |||
1012 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | ||
1013 | { | ||
1014 | return q->max_hw_segments; | ||
1015 | } | ||
1016 | |||
1017 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
1018 | { | ||
1019 | return q->max_phys_segments; | ||
1020 | } | ||
1021 | |||
1022 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | ||
1023 | { | ||
1024 | return q->max_segment_size; | ||
1025 | } | ||
1026 | |||
991 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | 1027 | static inline unsigned short queue_logical_block_size(struct request_queue *q) |
992 | { | 1028 | { |
993 | int retval = 512; | 1029 | int retval = 512; |