aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2010-02-26 00:20:38 -0500
committerJens Axboe <jens.axboe@oracle.com>2010-02-26 07:58:08 -0500
commit086fa5ff0854c676ec333760f4c0154b3b242616 (patch)
treeee63fb3c7c7d964bd799355b7cde18ba95f91f07 /drivers/mmc/card
parenteb28d31bc97e6374d81f404da309401ffaed467b (diff)
block: Rename blk_queue_max_sectors to blk_queue_max_hw_sectors
The block layer calling convention is blk_queue_<limit name>. blk_queue_max_sectors predates this practice, leading to some confusion. Rename the function to appropriately reflect that its intended use is to set max_hw_sectors. Also introduce a temporary wrapper for backwards compability. This can be removed after the merge window is closed. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/queue.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c5a7a855f4b1..09b633d5657b 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -154,7 +154,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
154 154
155 if (mq->bounce_buf) { 155 if (mq->bounce_buf) {
156 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 156 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
157 blk_queue_max_sectors(mq->queue, bouncesz / 512); 157 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
158 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 158 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
159 blk_queue_max_hw_segments(mq->queue, bouncesz / 512); 159 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
160 blk_queue_max_segment_size(mq->queue, bouncesz); 160 blk_queue_max_segment_size(mq->queue, bouncesz);
@@ -180,7 +180,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
180 180
181 if (!mq->bounce_buf) { 181 if (!mq->bounce_buf) {
182 blk_queue_bounce_limit(mq->queue, limit); 182 blk_queue_bounce_limit(mq->queue, limit);
183 blk_queue_max_sectors(mq->queue, 183 blk_queue_max_hw_sectors(mq->queue,
184 min(host->max_blk_count, host->max_req_size / 512)); 184 min(host->max_blk_count, host->max_req_size / 512));
185 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 185 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
186 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 186 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);