diff options
author | Pierre Ossman <drzeus@drzeus.cx> | 2008-08-16 15:34:02 -0400 |
---|---|---|
committer | Pierre Ossman <drzeus@drzeus.cx> | 2008-10-12 05:04:30 -0400 |
commit | f3eb0aaa0211fd804057070bee1fd067cd65cb13 (patch) | |
tree | bf97d54c14f3b63c2785cb19affc2ee1731159ee /drivers/mmc/card/queue.c | |
parent | 6501ff604a5ae18697c9b4cd40a7738d3a68e7fe (diff) |
mmc_block: inform block layer about sector count restriction
Make sure we consider the maximum block count when we tell the block
layer about the maximum sector count. That way we don't have to chop
up the request ourselves.
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r-- | drivers/mmc/card/queue.c | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 3dee97e7d165..5c8f037dca6b 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -142,12 +142,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
142 | bouncesz = host->max_req_size; | 142 | bouncesz = host->max_req_size; |
143 | if (bouncesz > host->max_seg_size) | 143 | if (bouncesz > host->max_seg_size) |
144 | bouncesz = host->max_seg_size; | 144 | bouncesz = host->max_seg_size; |
145 | if (bouncesz > (host->max_blk_count * 512)) | ||
146 | bouncesz = host->max_blk_count * 512; | ||
147 | |||
148 | if (bouncesz > 512) { | ||
149 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | ||
150 | if (!mq->bounce_buf) { | ||
151 | printk(KERN_WARNING "%s: unable to " | ||
152 | "allocate bounce buffer\n", | ||
153 | mmc_card_name(card)); | ||
154 | } | ||
155 | } | ||
145 | 156 | ||
146 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | 157 | if (mq->bounce_buf) { |
147 | if (!mq->bounce_buf) { | ||
148 | printk(KERN_WARNING "%s: unable to allocate " | ||
149 | "bounce buffer\n", mmc_card_name(card)); | ||
150 | } else { | ||
151 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); | 158 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
152 | blk_queue_max_sectors(mq->queue, bouncesz / 512); | 159 | blk_queue_max_sectors(mq->queue, bouncesz / 512); |
153 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); | 160 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); |
@@ -175,7 +182,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
175 | 182 | ||
176 | if (!mq->bounce_buf) { | 183 | if (!mq->bounce_buf) { |
177 | blk_queue_bounce_limit(mq->queue, limit); | 184 | blk_queue_bounce_limit(mq->queue, limit); |
178 | blk_queue_max_sectors(mq->queue, host->max_req_size / 512); | 185 | blk_queue_max_sectors(mq->queue, |
186 | min(host->max_blk_count, host->max_req_size / 512)); | ||
179 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); | 187 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); |
180 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | 188 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); |
181 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | 189 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |