diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2010-02-26 00:20:38 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-02-26 07:58:08 -0500 |
commit | 086fa5ff0854c676ec333760f4c0154b3b242616 (patch) | |
tree | ee63fb3c7c7d964bd799355b7cde18ba95f91f07 /block | |
parent | eb28d31bc97e6374d81f404da309401ffaed467b (diff) |
block: Rename blk_queue_max_sectors to blk_queue_max_hw_sectors
The block layer calling convention is blk_queue_<limit name>.
blk_queue_max_sectors predates this practice, leading to some confusion.
Rename the function to appropriately reflect that its intended use is to
set max_hw_sectors.
Also introduce a temporary wrapper for backwards compability. This can
be removed after the merge window is closed.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-settings.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 3c53b0beb8dd..61afae9dbc6d 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -154,7 +154,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
154 | q->unplug_timer.data = (unsigned long)q; | 154 | q->unplug_timer.data = (unsigned long)q; |
155 | 155 | ||
156 | blk_set_default_limits(&q->limits); | 156 | blk_set_default_limits(&q->limits); |
157 | blk_queue_max_sectors(q, BLK_SAFE_MAX_SECTORS); | 157 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
158 | 158 | ||
159 | /* | 159 | /* |
160 | * If the caller didn't supply a lock, fall back to our embedded | 160 | * If the caller didn't supply a lock, fall back to our embedded |
@@ -210,7 +210,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | |||
210 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 210 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
211 | 211 | ||
212 | /** | 212 | /** |
213 | * blk_queue_max_sectors - set max sectors for a request for this queue | 213 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
214 | * @q: the request queue for the device | 214 | * @q: the request queue for the device |
215 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | 215 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
216 | * | 216 | * |
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); | |||
225 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. | 225 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
226 | * The soft limit can not exceed max_hw_sectors. | 226 | * The soft limit can not exceed max_hw_sectors. |
227 | **/ | 227 | **/ |
228 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_hw_sectors) | 228 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
229 | { | 229 | { |
230 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { | 230 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
231 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 231 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
@@ -237,7 +237,7 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_hw_sectors) | |||
237 | q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, | 237 | q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, |
238 | BLK_DEF_MAX_SECTORS); | 238 | BLK_DEF_MAX_SECTORS); |
239 | } | 239 | } |
240 | EXPORT_SYMBOL(blk_queue_max_sectors); | 240 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
241 | 241 | ||
242 | /** | 242 | /** |
243 | * blk_queue_max_discard_sectors - set max sectors for a single discard | 243 | * blk_queue_max_discard_sectors - set max sectors for a single discard |