aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
authorPierre Ossman <drzeus@drzeus.cx>2008-08-16 15:34:02 -0400
committerPierre Ossman <drzeus@drzeus.cx>2008-10-12 05:04:30 -0400
commitf3eb0aaa0211fd804057070bee1fd067cd65cb13 (patch)
treebf97d54c14f3b63c2785cb19affc2ee1731159ee /drivers/mmc/card
parent6501ff604a5ae18697c9b4cd40a7738d3a68e7fe (diff)
mmc_block: inform block layer about sector count restriction
Make sure we consider the maximum block count when we tell the block layer about the maximum sector count. That way we don't have to chop up the request ourselves. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/block.c23
-rw-r--r--drivers/mmc/card/queue.c20
2 files changed, 15 insertions, 28 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index ebc8b9d77613..d73cac84d9f2 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -215,8 +215,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
215 struct mmc_blk_data *md = mq->data; 215 struct mmc_blk_data *md = mq->data;
216 struct mmc_card *card = md->queue.card; 216 struct mmc_card *card = md->queue.card;
217 struct mmc_blk_request brq; 217 struct mmc_blk_request brq;
218 int ret = 1, data_size, i; 218 int ret = 1;
219 struct scatterlist *sg;
220 219
221 mmc_claim_host(card->host); 220 mmc_claim_host(card->host);
222 221
@@ -237,8 +236,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
237 brq.stop.arg = 0; 236 brq.stop.arg = 0;
238 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 237 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
239 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); 238 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
240 if (brq.data.blocks > card->host->max_blk_count)
241 brq.data.blocks = card->host->max_blk_count;
242 239
243 if (brq.data.blocks > 1) { 240 if (brq.data.blocks > 1) {
244 /* SPI multiblock writes terminate using a special 241 /* SPI multiblock writes terminate using a special
@@ -270,24 +267,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
270 267
271 mmc_queue_bounce_pre(mq); 268 mmc_queue_bounce_pre(mq);
272 269
273 /*
274 * Adjust the sg list so it is the same size as the
275 * request.
276 */
277 if (brq.data.blocks !=
278 (req->nr_sectors >> (md->block_bits - 9))) {
279 data_size = brq.data.blocks * brq.data.blksz;
280 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
281 data_size -= sg->length;
282 if (data_size <= 0) {
283 sg->length += data_size;
284 i++;
285 break;
286 }
287 }
288 brq.data.sg_len = i;
289 }
290
291 mmc_wait_for_req(card->host, &brq.mrq); 270 mmc_wait_for_req(card->host, &brq.mrq);
292 271
293 mmc_queue_bounce_post(mq); 272 mmc_queue_bounce_post(mq);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3dee97e7d165..5c8f037dca6b 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -142,12 +142,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
142 bouncesz = host->max_req_size; 142 bouncesz = host->max_req_size;
143 if (bouncesz > host->max_seg_size) 143 if (bouncesz > host->max_seg_size)
144 bouncesz = host->max_seg_size; 144 bouncesz = host->max_seg_size;
145 if (bouncesz > (host->max_blk_count * 512))
146 bouncesz = host->max_blk_count * 512;
147
148 if (bouncesz > 512) {
149 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
150 if (!mq->bounce_buf) {
151 printk(KERN_WARNING "%s: unable to "
152 "allocate bounce buffer\n",
153 mmc_card_name(card));
154 }
155 }
145 156
146 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 157 if (mq->bounce_buf) {
147 if (!mq->bounce_buf) {
148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card));
150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 158 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512); 159 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 160 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
@@ -175,7 +182,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
175 182
176 if (!mq->bounce_buf) { 183 if (!mq->bounce_buf) {
177 blk_queue_bounce_limit(mq->queue, limit); 184 blk_queue_bounce_limit(mq->queue, limit);
178 blk_queue_max_sectors(mq->queue, host->max_req_size / 512); 185 blk_queue_max_sectors(mq->queue,
186 min(host->max_blk_count, host->max_req_size / 512));
179 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 187 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
180 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 188 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
181 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 189 blk_queue_max_segment_size(mq->queue, host->max_seg_size);