aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2010-02-26 00:20:39 -0500
committerJens Axboe <jens.axboe@oracle.com>2010-02-26 07:58:08 -0500
commit8a78362c4eefc1deddbefe2c7f38aabbc2429d6b (patch)
treec095d95af1aec0f9cee5975b1dcdc6bc1d17d401 /drivers/mmc/card/queue.c
parent086fa5ff0854c676ec333760f4c0154b3b242616 (diff)
block: Consolidate phys_segment and hw_segment limits
Except for SCSI no device drivers distinguish between physical and hardware segment limits. Consolidate the two into a single segment limit. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 09b633d5657b..381fe032caa1 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -155,8 +155,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
155 if (mq->bounce_buf) { 155 if (mq->bounce_buf) {
156 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 156 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
157 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 157 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
158 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 158 blk_queue_max_segments(mq->queue, bouncesz / 512);
159 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
160 blk_queue_max_segment_size(mq->queue, bouncesz); 159 blk_queue_max_segment_size(mq->queue, bouncesz);
161 160
162 mq->sg = kmalloc(sizeof(struct scatterlist), 161 mq->sg = kmalloc(sizeof(struct scatterlist),
@@ -182,8 +181,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
182 blk_queue_bounce_limit(mq->queue, limit); 181 blk_queue_bounce_limit(mq->queue, limit);
183 blk_queue_max_hw_sectors(mq->queue, 182 blk_queue_max_hw_sectors(mq->queue,
184 min(host->max_blk_count, host->max_req_size / 512)); 183 min(host->max_blk_count, host->max_req_size / 512));
185 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 184 blk_queue_max_segments(mq->queue, host->max_hw_segs);
186 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
187 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 185 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
188 186
189 mq->sg = kmalloc(sizeof(struct scatterlist) * 187 mq->sg = kmalloc(sizeof(struct scatterlist) *