aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-12 14:51:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-12 14:51:57 -0400
commit46b5e34029fef7a042f3ff16e319e737257e5c7b (patch)
treec2e90b7a6d7c39c3a35eed1dfd0fd19077467c93 /drivers/mmc/card
parent94a9f8ad337aec011da2ca901ef89ae7e885f24c (diff)
parent6ee6c6adf1cfebbf432b8d1f204c7f96e395933e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc: (24 commits) MMC: Use timeout values from CSR MMC: CSD and CID timeout values sdhci: 'scratch' may be used uninitialized mmc: explicitly mention SDIO support in Kconfig mmc: remove redundant "depends on" Fix comment in include/linux/mmc/host.h sdio: high-speed support mmc_block: hard code 512 byte block size sdhci: force high speed capability on some controllers mmc_block: filter out PC requests mmc_block: indicate strict ordering mmc_block: inform block layer about sector count restriction sdio: give sdio irq thread a host specific name sdio: make sleep on error interruptable sdhci: reduce card detection delay sdhci: let the controller wait for busy state to end atmel-mci: Add missing flush_dcache_page() in PIO transfer code atmel-mci: Don't overwrite error bits when NOTBUSY is set atmel-mci: Add experimental DMA support atmel-mci: support multiple mmc slots ...
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/Kconfig3
-rw-r--r--drivers/mmc/card/block.c46
-rw-r--r--drivers/mmc/card/queue.c23
3 files changed, 23 insertions, 49 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index dd0f398ee2f5..3f2a912659af 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -2,7 +2,7 @@
2# MMC/SD card drivers 2# MMC/SD card drivers
3# 3#
4 4
5comment "MMC/SD Card Drivers" 5comment "MMC/SD/SDIO Card Drivers"
6 6
7config MMC_BLOCK 7config MMC_BLOCK
8 tristate "MMC block device driver" 8 tristate "MMC block device driver"
@@ -34,7 +34,6 @@ config MMC_BLOCK_BOUNCE
34 34
35config SDIO_UART 35config SDIO_UART
36 tristate "SDIO UART/GPS class support" 36 tristate "SDIO UART/GPS class support"
37 depends on MMC
38 help 37 help
39 SDIO function driver for SDIO cards that implements the UART 38 SDIO function driver for SDIO cards that implements the UART
40 class, as well as the GPS class which appears like a UART. 39 class, as well as the GPS class which appears like a UART.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index efacee0404a0..24c97d3d16bb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -58,7 +58,6 @@ struct mmc_blk_data {
58 struct mmc_queue queue; 58 struct mmc_queue queue;
59 59
60 unsigned int usage; 60 unsigned int usage;
61 unsigned int block_bits;
62 unsigned int read_only; 61 unsigned int read_only;
63}; 62};
64 63
@@ -216,8 +215,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
216 struct mmc_blk_data *md = mq->data; 215 struct mmc_blk_data *md = mq->data;
217 struct mmc_card *card = md->queue.card; 216 struct mmc_card *card = md->queue.card;
218 struct mmc_blk_request brq; 217 struct mmc_blk_request brq;
219 int ret = 1, data_size, i; 218 int ret = 1;
220 struct scatterlist *sg;
221 219
222 mmc_claim_host(card->host); 220 mmc_claim_host(card->host);
223 221
@@ -233,13 +231,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
233 if (!mmc_card_blockaddr(card)) 231 if (!mmc_card_blockaddr(card))
234 brq.cmd.arg <<= 9; 232 brq.cmd.arg <<= 9;
235 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 233 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
236 brq.data.blksz = 1 << md->block_bits; 234 brq.data.blksz = 512;
237 brq.stop.opcode = MMC_STOP_TRANSMISSION; 235 brq.stop.opcode = MMC_STOP_TRANSMISSION;
238 brq.stop.arg = 0; 236 brq.stop.arg = 0;
239 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 237 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
240 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); 238 brq.data.blocks = req->nr_sectors;
241 if (brq.data.blocks > card->host->max_blk_count)
242 brq.data.blocks = card->host->max_blk_count;
243 239
244 if (brq.data.blocks > 1) { 240 if (brq.data.blocks > 1) {
245 /* SPI multiblock writes terminate using a special 241 /* SPI multiblock writes terminate using a special
@@ -271,24 +267,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
271 267
272 mmc_queue_bounce_pre(mq); 268 mmc_queue_bounce_pre(mq);
273 269
274 /*
275 * Adjust the sg list so it is the same size as the
276 * request.
277 */
278 if (brq.data.blocks !=
279 (req->nr_sectors >> (md->block_bits - 9))) {
280 data_size = brq.data.blocks * brq.data.blksz;
281 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
282 data_size -= sg->length;
283 if (data_size <= 0) {
284 sg->length += data_size;
285 i++;
286 break;
287 }
288 }
289 brq.data.sg_len = i;
290 }
291
292 mmc_wait_for_req(card->host, &brq.mrq); 270 mmc_wait_for_req(card->host, &brq.mrq);
293 271
294 mmc_queue_bounce_post(mq); 272 mmc_queue_bounce_post(mq);
@@ -373,16 +351,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
373 if (rq_data_dir(req) != READ) { 351 if (rq_data_dir(req) != READ) {
374 if (mmc_card_sd(card)) { 352 if (mmc_card_sd(card)) {
375 u32 blocks; 353 u32 blocks;
376 unsigned int bytes;
377 354
378 blocks = mmc_sd_num_wr_blocks(card); 355 blocks = mmc_sd_num_wr_blocks(card);
379 if (blocks != (u32)-1) { 356 if (blocks != (u32)-1) {
380 if (card->csd.write_partial)
381 bytes = blocks << md->block_bits;
382 else
383 bytes = blocks << 9;
384 spin_lock_irq(&md->lock); 357 spin_lock_irq(&md->lock);
385 ret = __blk_end_request(req, 0, bytes); 358 ret = __blk_end_request(req, 0, blocks << 9);
386 spin_unlock_irq(&md->lock); 359 spin_unlock_irq(&md->lock);
387 } 360 }
388 } else { 361 } else {
@@ -432,13 +405,6 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
432 */ 405 */
433 md->read_only = mmc_blk_readonly(card); 406 md->read_only = mmc_blk_readonly(card);
434 407
435 /*
436 * Both SD and MMC specifications state (although a bit
437 * unclearly in the MMC case) that a block size of 512
438 * bytes must always be supported by the card.
439 */
440 md->block_bits = 9;
441
442 md->disk = alloc_disk(1 << MMC_SHIFT); 408 md->disk = alloc_disk(1 << MMC_SHIFT);
443 if (md->disk == NULL) { 409 if (md->disk == NULL) {
444 ret = -ENOMEM; 410 ret = -ENOMEM;
@@ -476,7 +442,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
476 442
477 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 443 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
478 444
479 blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits); 445 blk_queue_hardsect_size(md->queue.queue, 512);
480 446
481 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 447 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
482 /* 448 /*
@@ -514,7 +480,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
514 480
515 mmc_claim_host(card->host); 481 mmc_claim_host(card->host);
516 cmd.opcode = MMC_SET_BLOCKLEN; 482 cmd.opcode = MMC_SET_BLOCKLEN;
517 cmd.arg = 1 << md->block_bits; 483 cmd.arg = 512;
518 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 484 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
519 err = mmc_wait_for_cmd(card->host, &cmd, 5); 485 err = mmc_wait_for_cmd(card->host, &cmd, 5);
520 mmc_release_host(card->host); 486 mmc_release_host(card->host);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3dee97e7d165..406989e992ba 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -31,7 +31,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
31 /* 31 /*
32 * We only like normal block requests. 32 * We only like normal block requests.
33 */ 33 */
34 if (!blk_fs_request(req) && !blk_pc_request(req)) { 34 if (!blk_fs_request(req)) {
35 blk_dump_rq_flags(req, "MMC bad request"); 35 blk_dump_rq_flags(req, "MMC bad request");
36 return BLKPREP_KILL; 36 return BLKPREP_KILL;
37 } 37 }
@@ -131,6 +131,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
131 mq->req = NULL; 131 mq->req = NULL;
132 132
133 blk_queue_prep_rq(mq->queue, mmc_prep_request); 133 blk_queue_prep_rq(mq->queue, mmc_prep_request);
134 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
134 135
135#ifdef CONFIG_MMC_BLOCK_BOUNCE 136#ifdef CONFIG_MMC_BLOCK_BOUNCE
136 if (host->max_hw_segs == 1) { 137 if (host->max_hw_segs == 1) {
@@ -142,12 +143,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
142 bouncesz = host->max_req_size; 143 bouncesz = host->max_req_size;
143 if (bouncesz > host->max_seg_size) 144 if (bouncesz > host->max_seg_size)
144 bouncesz = host->max_seg_size; 145 bouncesz = host->max_seg_size;
146 if (bouncesz > (host->max_blk_count * 512))
147 bouncesz = host->max_blk_count * 512;
148
149 if (bouncesz > 512) {
150 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
151 if (!mq->bounce_buf) {
152 printk(KERN_WARNING "%s: unable to "
153 "allocate bounce buffer\n",
154 mmc_card_name(card));
155 }
156 }
145 157
146 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 158 if (mq->bounce_buf) {
147 if (!mq->bounce_buf) {
148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card));
150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 159 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512); 160 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 161 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
@@ -175,7 +183,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
175 183
176 if (!mq->bounce_buf) { 184 if (!mq->bounce_buf) {
177 blk_queue_bounce_limit(mq->queue, limit); 185 blk_queue_bounce_limit(mq->queue, limit);
178 blk_queue_max_sectors(mq->queue, host->max_req_size / 512); 186 blk_queue_max_sectors(mq->queue,
187 min(host->max_blk_count, host->max_req_size / 512));
179 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 188 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
180 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 189 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
181 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 190 blk_queue_max_segment_size(mq->queue, host->max_seg_size);