aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-12 14:51:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-12 14:51:57 -0400
commit46b5e34029fef7a042f3ff16e319e737257e5c7b (patch)
treec2e90b7a6d7c39c3a35eed1dfd0fd19077467c93 /drivers
parent94a9f8ad337aec011da2ca901ef89ae7e885f24c (diff)
parent6ee6c6adf1cfebbf432b8d1f204c7f96e395933e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc: (24 commits) MMC: Use timeout values from CSR MMC: CSD and CID timeout values sdhci: 'scratch' may be used uninitialized mmc: explicitly mention SDIO support in Kconfig mmc: remove redundant "depends on" Fix comment in include/linux/mmc/host.h sdio: high-speed support mmc_block: hard code 512 byte block size sdhci: force high speed capability on some controllers mmc_block: filter out PC requests mmc_block: indicate strict ordering mmc_block: inform block layer about sector count restriction sdio: give sdio irq thread a host specific name sdio: make sleep on error interruptable sdhci: reduce card detection delay sdhci: let the controller wait for busy state to end atmel-mci: Add missing flush_dcache_page() in PIO transfer code atmel-mci: Don't overwrite error bits when NOTBUSY is set atmel-mci: Add experimental DMA support atmel-mci: support multiple mmc slots ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mmc/Kconfig9
-rw-r--r--drivers/mmc/card/Kconfig3
-rw-r--r--drivers/mmc/card/block.c46
-rw-r--r--drivers/mmc/card/queue.c23
-rw-r--r--drivers/mmc/core/mmc_ops.c8
-rw-r--r--drivers/mmc/core/sdio.c52
-rw-r--r--drivers/mmc/core/sdio_irq.c16
-rw-r--r--drivers/mmc/host/Kconfig30
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h6
-rw-r--r--drivers/mmc/host/atmel-mci.c1352
-rw-r--r--drivers/mmc/host/mmc_spi.c32
-rw-r--r--drivers/mmc/host/sdhci-pci.c3
-rw-r--r--drivers/mmc/host/sdhci.c46
-rw-r--r--drivers/mmc/host/sdhci.h2
14 files changed, 1153 insertions, 475 deletions
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index c0b41e8bcd9d..f2eeb38efa65 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -3,13 +3,14 @@
3# 3#
4 4
5menuconfig MMC 5menuconfig MMC
6 tristate "MMC/SD card support" 6 tristate "MMC/SD/SDIO card support"
7 depends on HAS_IOMEM 7 depends on HAS_IOMEM
8 help 8 help
9 MMC is the "multi-media card" bus protocol. 9 This selects MultiMediaCard, Secure Digital and Secure
10 Digital I/O support.
10 11
11 If you want MMC support, you should say Y here and also 12 If you want MMC/SD/SDIO support, you should say Y here and
12 to the specific driver for your MMC interface. 13 also to your specific host controller driver.
13 14
14config MMC_DEBUG 15config MMC_DEBUG
15 bool "MMC debugging" 16 bool "MMC debugging"
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index dd0f398ee2f5..3f2a912659af 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -2,7 +2,7 @@
2# MMC/SD card drivers 2# MMC/SD card drivers
3# 3#
4 4
5comment "MMC/SD Card Drivers" 5comment "MMC/SD/SDIO Card Drivers"
6 6
7config MMC_BLOCK 7config MMC_BLOCK
8 tristate "MMC block device driver" 8 tristate "MMC block device driver"
@@ -34,7 +34,6 @@ config MMC_BLOCK_BOUNCE
34 34
35config SDIO_UART 35config SDIO_UART
36 tristate "SDIO UART/GPS class support" 36 tristate "SDIO UART/GPS class support"
37 depends on MMC
38 help 37 help
39 SDIO function driver for SDIO cards that implements the UART 38 SDIO function driver for SDIO cards that implements the UART
40 class, as well as the GPS class which appears like a UART. 39 class, as well as the GPS class which appears like a UART.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index efacee0404a0..24c97d3d16bb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -58,7 +58,6 @@ struct mmc_blk_data {
58 struct mmc_queue queue; 58 struct mmc_queue queue;
59 59
60 unsigned int usage; 60 unsigned int usage;
61 unsigned int block_bits;
62 unsigned int read_only; 61 unsigned int read_only;
63}; 62};
64 63
@@ -216,8 +215,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
216 struct mmc_blk_data *md = mq->data; 215 struct mmc_blk_data *md = mq->data;
217 struct mmc_card *card = md->queue.card; 216 struct mmc_card *card = md->queue.card;
218 struct mmc_blk_request brq; 217 struct mmc_blk_request brq;
219 int ret = 1, data_size, i; 218 int ret = 1;
220 struct scatterlist *sg;
221 219
222 mmc_claim_host(card->host); 220 mmc_claim_host(card->host);
223 221
@@ -233,13 +231,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
233 if (!mmc_card_blockaddr(card)) 231 if (!mmc_card_blockaddr(card))
234 brq.cmd.arg <<= 9; 232 brq.cmd.arg <<= 9;
235 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 233 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
236 brq.data.blksz = 1 << md->block_bits; 234 brq.data.blksz = 512;
237 brq.stop.opcode = MMC_STOP_TRANSMISSION; 235 brq.stop.opcode = MMC_STOP_TRANSMISSION;
238 brq.stop.arg = 0; 236 brq.stop.arg = 0;
239 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 237 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
240 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); 238 brq.data.blocks = req->nr_sectors;
241 if (brq.data.blocks > card->host->max_blk_count)
242 brq.data.blocks = card->host->max_blk_count;
243 239
244 if (brq.data.blocks > 1) { 240 if (brq.data.blocks > 1) {
245 /* SPI multiblock writes terminate using a special 241 /* SPI multiblock writes terminate using a special
@@ -271,24 +267,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
271 267
272 mmc_queue_bounce_pre(mq); 268 mmc_queue_bounce_pre(mq);
273 269
274 /*
275 * Adjust the sg list so it is the same size as the
276 * request.
277 */
278 if (brq.data.blocks !=
279 (req->nr_sectors >> (md->block_bits - 9))) {
280 data_size = brq.data.blocks * brq.data.blksz;
281 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
282 data_size -= sg->length;
283 if (data_size <= 0) {
284 sg->length += data_size;
285 i++;
286 break;
287 }
288 }
289 brq.data.sg_len = i;
290 }
291
292 mmc_wait_for_req(card->host, &brq.mrq); 270 mmc_wait_for_req(card->host, &brq.mrq);
293 271
294 mmc_queue_bounce_post(mq); 272 mmc_queue_bounce_post(mq);
@@ -373,16 +351,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
373 if (rq_data_dir(req) != READ) { 351 if (rq_data_dir(req) != READ) {
374 if (mmc_card_sd(card)) { 352 if (mmc_card_sd(card)) {
375 u32 blocks; 353 u32 blocks;
376 unsigned int bytes;
377 354
378 blocks = mmc_sd_num_wr_blocks(card); 355 blocks = mmc_sd_num_wr_blocks(card);
379 if (blocks != (u32)-1) { 356 if (blocks != (u32)-1) {
380 if (card->csd.write_partial)
381 bytes = blocks << md->block_bits;
382 else
383 bytes = blocks << 9;
384 spin_lock_irq(&md->lock); 357 spin_lock_irq(&md->lock);
385 ret = __blk_end_request(req, 0, bytes); 358 ret = __blk_end_request(req, 0, blocks << 9);
386 spin_unlock_irq(&md->lock); 359 spin_unlock_irq(&md->lock);
387 } 360 }
388 } else { 361 } else {
@@ -432,13 +405,6 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
432 */ 405 */
433 md->read_only = mmc_blk_readonly(card); 406 md->read_only = mmc_blk_readonly(card);
434 407
435 /*
436 * Both SD and MMC specifications state (although a bit
437 * unclearly in the MMC case) that a block size of 512
438 * bytes must always be supported by the card.
439 */
440 md->block_bits = 9;
441
442 md->disk = alloc_disk(1 << MMC_SHIFT); 408 md->disk = alloc_disk(1 << MMC_SHIFT);
443 if (md->disk == NULL) { 409 if (md->disk == NULL) {
444 ret = -ENOMEM; 410 ret = -ENOMEM;
@@ -476,7 +442,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
476 442
477 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 443 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
478 444
479 blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits); 445 blk_queue_hardsect_size(md->queue.queue, 512);
480 446
481 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 447 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
482 /* 448 /*
@@ -514,7 +480,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
514 480
515 mmc_claim_host(card->host); 481 mmc_claim_host(card->host);
516 cmd.opcode = MMC_SET_BLOCKLEN; 482 cmd.opcode = MMC_SET_BLOCKLEN;
517 cmd.arg = 1 << md->block_bits; 483 cmd.arg = 512;
518 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 484 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
519 err = mmc_wait_for_cmd(card->host, &cmd, 5); 485 err = mmc_wait_for_cmd(card->host, &cmd, 5);
520 mmc_release_host(card->host); 486 mmc_release_host(card->host);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3dee97e7d165..406989e992ba 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -31,7 +31,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
31 /* 31 /*
32 * We only like normal block requests. 32 * We only like normal block requests.
33 */ 33 */
34 if (!blk_fs_request(req) && !blk_pc_request(req)) { 34 if (!blk_fs_request(req)) {
35 blk_dump_rq_flags(req, "MMC bad request"); 35 blk_dump_rq_flags(req, "MMC bad request");
36 return BLKPREP_KILL; 36 return BLKPREP_KILL;
37 } 37 }
@@ -131,6 +131,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
131 mq->req = NULL; 131 mq->req = NULL;
132 132
133 blk_queue_prep_rq(mq->queue, mmc_prep_request); 133 blk_queue_prep_rq(mq->queue, mmc_prep_request);
134 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
134 135
135#ifdef CONFIG_MMC_BLOCK_BOUNCE 136#ifdef CONFIG_MMC_BLOCK_BOUNCE
136 if (host->max_hw_segs == 1) { 137 if (host->max_hw_segs == 1) {
@@ -142,12 +143,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
142 bouncesz = host->max_req_size; 143 bouncesz = host->max_req_size;
143 if (bouncesz > host->max_seg_size) 144 if (bouncesz > host->max_seg_size)
144 bouncesz = host->max_seg_size; 145 bouncesz = host->max_seg_size;
146 if (bouncesz > (host->max_blk_count * 512))
147 bouncesz = host->max_blk_count * 512;
148
149 if (bouncesz > 512) {
150 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
151 if (!mq->bounce_buf) {
152 printk(KERN_WARNING "%s: unable to "
153 "allocate bounce buffer\n",
154 mmc_card_name(card));
155 }
156 }
145 157
146 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 158 if (mq->bounce_buf) {
147 if (!mq->bounce_buf) {
148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card));
150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 159 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512); 160 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 161 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
@@ -175,7 +183,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
175 183
176 if (!mq->bounce_buf) { 184 if (!mq->bounce_buf) {
177 blk_queue_bounce_limit(mq->queue, limit); 185 blk_queue_bounce_limit(mq->queue, limit);
178 blk_queue_max_sectors(mq->queue, host->max_req_size / 512); 186 blk_queue_max_sectors(mq->queue,
187 min(host->max_blk_count, host->max_req_size / 512));
179 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 188 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
180 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 189 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
181 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 190 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 64b05c6270f2..9c50e6f1c236 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -248,8 +248,12 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
248 248
249 sg_init_one(&sg, data_buf, len); 249 sg_init_one(&sg, data_buf, len);
250 250
251 if (card) 251 /*
252 mmc_set_data_timeout(&data, card); 252 * The spec states that CSR and CID accesses have a timeout
253 * of 64 clock cycles.
254 */
255 data.timeout_ns = 0;
256 data.timeout_clks = 64;
253 257
254 mmc_wait_for_req(host, &mrq); 258 mmc_wait_for_req(host, &mrq);
255 259
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4eab79e09ccc..fb99ccff9080 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -165,6 +165,36 @@ static int sdio_enable_wide(struct mmc_card *card)
165} 165}
166 166
167/* 167/*
168 * Test if the card supports high-speed mode and, if so, switch to it.
169 */
170static int sdio_enable_hs(struct mmc_card *card)
171{
172 int ret;
173 u8 speed;
174
175 if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
176 return 0;
177
178 if (!card->cccr.high_speed)
179 return 0;
180
181 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
182 if (ret)
183 return ret;
184
185 speed |= SDIO_SPEED_EHS;
186
187 ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
188 if (ret)
189 return ret;
190
191 mmc_card_set_highspeed(card);
192 mmc_set_timing(card->host, MMC_TIMING_SD_HS);
193
194 return 0;
195}
196
197/*
168 * Host is being removed. Free up the current card. 198 * Host is being removed. Free up the current card.
169 */ 199 */
170static void mmc_sdio_remove(struct mmc_host *host) 200static void mmc_sdio_remove(struct mmc_host *host)
@@ -333,10 +363,26 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
333 goto remove; 363 goto remove;
334 364
335 /* 365 /*
336 * No support for high-speed yet, so just set 366 * Switch to high-speed (if supported).
337 * the card's maximum speed.
338 */ 367 */
339 mmc_set_clock(host, card->cis.max_dtr); 368 err = sdio_enable_hs(card);
369 if (err)
370 goto remove;
371
372 /*
373 * Change to the card's maximum speed.
374 */
375 if (mmc_card_highspeed(card)) {
376 /*
377 * The SDIO specification doesn't mention how
378 * the CIS transfer speed register relates to
379 * high-speed, but it seems that 50 MHz is
380 * mandatory.
381 */
382 mmc_set_clock(host, 50000000);
383 } else {
384 mmc_set_clock(host, card->cis.max_dtr);
385 }
340 386
341 /* 387 /*
342 * Switch to wider bus (if supported). 388 * Switch to wider bus (if supported).
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index c292e124107a..bb192f90e8e9 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -5,6 +5,8 @@
5 * Created: June 18, 2007 5 * Created: June 18, 2007
6 * Copyright: MontaVista Software Inc. 6 * Copyright: MontaVista Software Inc.
7 * 7 *
8 * Copyright 2008 Pierre Ossman
9 *
8 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at 12 * the Free Software Foundation; either version 2 of the License, or (at
@@ -107,11 +109,14 @@ static int sdio_irq_thread(void *_host)
107 109
108 /* 110 /*
109 * Give other threads a chance to run in the presence of 111 * Give other threads a chance to run in the presence of
110 * errors. FIXME: determine if due to card removal and 112 * errors.
111 * possibly exit this thread if so.
112 */ 113 */
113 if (ret < 0) 114 if (ret < 0) {
114 ssleep(1); 115 set_current_state(TASK_INTERRUPTIBLE);
116 if (!kthread_should_stop())
117 schedule_timeout(HZ);
118 set_current_state(TASK_RUNNING);
119 }
115 120
116 /* 121 /*
117 * Adaptive polling frequency based on the assumption 122 * Adaptive polling frequency based on the assumption
@@ -154,7 +159,8 @@ static int sdio_card_irq_get(struct mmc_card *card)
154 if (!host->sdio_irqs++) { 159 if (!host->sdio_irqs++) {
155 atomic_set(&host->sdio_irq_thread_abort, 0); 160 atomic_set(&host->sdio_irq_thread_abort, 0);
156 host->sdio_irq_thread = 161 host->sdio_irq_thread =
157 kthread_run(sdio_irq_thread, host, "ksdiorqd"); 162 kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
163 mmc_hostname(host));
158 if (IS_ERR(host->sdio_irq_thread)) { 164 if (IS_ERR(host->sdio_irq_thread)) {
159 int err = PTR_ERR(host->sdio_irq_thread); 165 int err = PTR_ERR(host->sdio_irq_thread);
160 host->sdio_irqs--; 166 host->sdio_irqs--;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index ea8d7a3490d9..dfa585f7feaf 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -2,7 +2,7 @@
2# MMC/SD host controller drivers 2# MMC/SD host controller drivers
3# 3#
4 4
5comment "MMC/SD Host Controller Drivers" 5comment "MMC/SD/SDIO Host Controller Drivers"
6 6
7config MMC_ARMMMCI 7config MMC_ARMMMCI
8 tristate "ARM AMBA Multimedia Card Interface support" 8 tristate "ARM AMBA Multimedia Card Interface support"
@@ -114,6 +114,17 @@ config MMC_ATMELMCI
114 114
115 If unsure, say N. 115 If unsure, say N.
116 116
117config MMC_ATMELMCI_DMA
118 bool "Atmel MCI DMA support (EXPERIMENTAL)"
119 depends on MMC_ATMELMCI && DMA_ENGINE && EXPERIMENTAL
120 help
121 Say Y here to have the Atmel MCI driver use a DMA engine to
122 do data transfers and thus increase the throughput and
123 reduce the CPU utilization. Note that this is highly
124 experimental and may cause the driver to lock up.
125
126 If unsure, say N.
127
117config MMC_IMX 128config MMC_IMX
118 tristate "Motorola i.MX Multimedia Card Interface support" 129 tristate "Motorola i.MX Multimedia Card Interface support"
119 depends on ARCH_IMX 130 depends on ARCH_IMX
@@ -141,21 +152,22 @@ config MMC_TIFM_SD
141 module will be called tifm_sd. 152 module will be called tifm_sd.
142 153
143config MMC_SPI 154config MMC_SPI
144 tristate "MMC/SD over SPI" 155 tristate "MMC/SD/SDIO over SPI"
145 depends on MMC && SPI_MASTER && !HIGHMEM && HAS_DMA 156 depends on SPI_MASTER && !HIGHMEM && HAS_DMA
146 select CRC7 157 select CRC7
147 select CRC_ITU_T 158 select CRC_ITU_T
148 help 159 help
149 Some systems accss MMC/SD cards using a SPI controller instead of 160 Some systems accss MMC/SD/SDIO cards using a SPI controller
150 using a "native" MMC/SD controller. This has a disadvantage of 161 instead of using a "native" MMC/SD/SDIO controller. This has a
151 being relatively high overhead, but a compensating advantage of 162 disadvantage of being relatively high overhead, but a compensating
152 working on many systems without dedicated MMC/SD controllers. 163 advantage of working on many systems without dedicated MMC/SD/SDIO
164 controllers.
153 165
154 If unsure, or if your system has no SPI master driver, say N. 166 If unsure, or if your system has no SPI master driver, say N.
155 167
156config MMC_S3C 168config MMC_S3C
157 tristate "Samsung S3C SD/MMC Card Interface support" 169 tristate "Samsung S3C SD/MMC Card Interface support"
158 depends on ARCH_S3C2410 && MMC 170 depends on ARCH_S3C2410
159 help 171 help
160 This selects a driver for the MCI interface found in 172 This selects a driver for the MCI interface found in
161 Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. 173 Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
@@ -166,7 +178,7 @@ config MMC_S3C
166 178
167config MMC_SDRICOH_CS 179config MMC_SDRICOH_CS
168 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" 180 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
169 depends on EXPERIMENTAL && MMC && PCI && PCMCIA 181 depends on EXPERIMENTAL && PCI && PCMCIA
170 help 182 help
171 Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA 183 Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA
172 card whenever you insert a MMC or SD card into the card slot. 184 card whenever you insert a MMC or SD card into the card slot.
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 26bd80e65031..b58364ed6bba 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -25,8 +25,10 @@
25#define MCI_SDCR 0x000c /* SD Card / SDIO */ 25#define MCI_SDCR 0x000c /* SD Card / SDIO */
26# define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ 26# define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */
27# define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ 27# define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */
28# define MCI_SDCBUS_1BIT ( 0 << 7) /* 1-bit data bus */ 28# define MCI_SDCSEL_MASK ( 3 << 0)
29# define MCI_SDCBUS_4BIT ( 1 << 7) /* 4-bit data bus */ 29# define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */
30# define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */
31# define MCI_SDCBUS_MASK ( 3 << 6)
30#define MCI_ARGR 0x0010 /* Command Argument */ 32#define MCI_ARGR 0x0010 /* Command Argument */
31#define MCI_CMDR 0x0014 /* Command */ 33#define MCI_CMDR 0x0014 /* Command */
32# define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ 34# define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 00008967ef7a..7a3f2436b011 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -11,6 +11,8 @@
11#include <linux/clk.h> 11#include <linux/clk.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
14#include <linux/err.h> 16#include <linux/err.h>
15#include <linux/gpio.h> 17#include <linux/gpio.h>
16#include <linux/init.h> 18#include <linux/init.h>
@@ -33,64 +35,178 @@
33#include "atmel-mci-regs.h" 35#include "atmel-mci-regs.h"
34 36
35#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) 37#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE)
38#define ATMCI_DMA_THRESHOLD 16
36 39
37enum { 40enum {
38 EVENT_CMD_COMPLETE = 0, 41 EVENT_CMD_COMPLETE = 0,
39 EVENT_DATA_ERROR,
40 EVENT_DATA_COMPLETE,
41 EVENT_STOP_SENT,
42 EVENT_STOP_COMPLETE,
43 EVENT_XFER_COMPLETE, 42 EVENT_XFER_COMPLETE,
43 EVENT_DATA_COMPLETE,
44 EVENT_DATA_ERROR,
45};
46
47enum atmel_mci_state {
48 STATE_IDLE = 0,
49 STATE_SENDING_CMD,
50 STATE_SENDING_DATA,
51 STATE_DATA_BUSY,
52 STATE_SENDING_STOP,
53 STATE_DATA_ERROR,
54};
55
56struct atmel_mci_dma {
57#ifdef CONFIG_MMC_ATMELMCI_DMA
58 struct dma_client client;
59 struct dma_chan *chan;
60 struct dma_async_tx_descriptor *data_desc;
61#endif
44}; 62};
45 63
64/**
65 * struct atmel_mci - MMC controller state shared between all slots
66 * @lock: Spinlock protecting the queue and associated data.
67 * @regs: Pointer to MMIO registers.
68 * @sg: Scatterlist entry currently being processed by PIO code, if any.
69 * @pio_offset: Offset into the current scatterlist entry.
70 * @cur_slot: The slot which is currently using the controller.
71 * @mrq: The request currently being processed on @cur_slot,
72 * or NULL if the controller is idle.
73 * @cmd: The command currently being sent to the card, or NULL.
74 * @data: The data currently being transferred, or NULL if no data
75 * transfer is in progress.
76 * @dma: DMA client state.
77 * @data_chan: DMA channel being used for the current data transfer.
78 * @cmd_status: Snapshot of SR taken upon completion of the current
79 * command. Only valid when EVENT_CMD_COMPLETE is pending.
80 * @data_status: Snapshot of SR taken upon completion of the current
81 * data transfer. Only valid when EVENT_DATA_COMPLETE or
82 * EVENT_DATA_ERROR is pending.
83 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
84 * to be sent.
85 * @tasklet: Tasklet running the request state machine.
86 * @pending_events: Bitmask of events flagged by the interrupt handler
87 * to be processed by the tasklet.
88 * @completed_events: Bitmask of events which the state machine has
89 * processed.
90 * @state: Tasklet state.
91 * @queue: List of slots waiting for access to the controller.
92 * @need_clock_update: Update the clock rate before the next request.
93 * @need_reset: Reset controller before next request.
94 * @mode_reg: Value of the MR register.
95 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
96 * rate and timeout calculations.
97 * @mapbase: Physical address of the MMIO registers.
98 * @mck: The peripheral bus clock hooked up to the MMC controller.
99 * @pdev: Platform device associated with the MMC controller.
100 * @slot: Slots sharing this MMC controller.
101 *
102 * Locking
103 * =======
104 *
105 * @lock is a softirq-safe spinlock protecting @queue as well as
106 * @cur_slot, @mrq and @state. These must always be updated
107 * at the same time while holding @lock.
108 *
109 * @lock also protects mode_reg and need_clock_update since these are
110 * used to synchronize mode register updates with the queue
111 * processing.
112 *
113 * The @mrq field of struct atmel_mci_slot is also protected by @lock,
114 * and must always be written at the same time as the slot is added to
115 * @queue.
116 *
117 * @pending_events and @completed_events are accessed using atomic bit
118 * operations, so they don't need any locking.
119 *
120 * None of the fields touched by the interrupt handler need any
121 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
122 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
123 * interrupts must be disabled and @data_status updated with a
124 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
125 * CMDRDY interupt must be disabled and @cmd_status updated with a
126 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
127 * bytes_xfered field of @data must be written. This is ensured by
128 * using barriers.
129 */
46struct atmel_mci { 130struct atmel_mci {
47 struct mmc_host *mmc; 131 spinlock_t lock;
48 void __iomem *regs; 132 void __iomem *regs;
49 133
50 struct scatterlist *sg; 134 struct scatterlist *sg;
51 unsigned int pio_offset; 135 unsigned int pio_offset;
52 136
137 struct atmel_mci_slot *cur_slot;
53 struct mmc_request *mrq; 138 struct mmc_request *mrq;
54 struct mmc_command *cmd; 139 struct mmc_command *cmd;
55 struct mmc_data *data; 140 struct mmc_data *data;
56 141
142 struct atmel_mci_dma dma;
143 struct dma_chan *data_chan;
144
57 u32 cmd_status; 145 u32 cmd_status;
58 u32 data_status; 146 u32 data_status;
59 u32 stop_status;
60 u32 stop_cmdr; 147 u32 stop_cmdr;
61 148
62 u32 mode_reg;
63 u32 sdc_reg;
64
65 struct tasklet_struct tasklet; 149 struct tasklet_struct tasklet;
66 unsigned long pending_events; 150 unsigned long pending_events;
67 unsigned long completed_events; 151 unsigned long completed_events;
152 enum atmel_mci_state state;
153 struct list_head queue;
68 154
69 int present; 155 bool need_clock_update;
70 int detect_pin; 156 bool need_reset;
71 int wp_pin; 157 u32 mode_reg;
72
73 /* For detect pin debouncing */
74 struct timer_list detect_timer;
75
76 unsigned long bus_hz; 158 unsigned long bus_hz;
77 unsigned long mapbase; 159 unsigned long mapbase;
78 struct clk *mck; 160 struct clk *mck;
79 struct platform_device *pdev; 161 struct platform_device *pdev;
162
163 struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS];
164};
165
166/**
167 * struct atmel_mci_slot - MMC slot state
168 * @mmc: The mmc_host representing this slot.
169 * @host: The MMC controller this slot is using.
170 * @sdc_reg: Value of SDCR to be written before using this slot.
171 * @mrq: mmc_request currently being processed or waiting to be
172 * processed, or NULL when the slot is idle.
173 * @queue_node: List node for placing this node in the @queue list of
174 * &struct atmel_mci.
175 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
176 * @flags: Random state bits associated with the slot.
177 * @detect_pin: GPIO pin used for card detection, or negative if not
178 * available.
179 * @wp_pin: GPIO pin used for card write protect sending, or negative
180 * if not available.
181 * @detect_timer: Timer used for debouncing @detect_pin interrupts.
182 */
183struct atmel_mci_slot {
184 struct mmc_host *mmc;
185 struct atmel_mci *host;
186
187 u32 sdc_reg;
188
189 struct mmc_request *mrq;
190 struct list_head queue_node;
191
192 unsigned int clock;
193 unsigned long flags;
194#define ATMCI_CARD_PRESENT 0
195#define ATMCI_CARD_NEED_INIT 1
196#define ATMCI_SHUTDOWN 2
197
198 int detect_pin;
199 int wp_pin;
200
201 struct timer_list detect_timer;
80}; 202};
81 203
82#define atmci_is_completed(host, event) \
83 test_bit(event, &host->completed_events)
84#define atmci_test_and_clear_pending(host, event) \ 204#define atmci_test_and_clear_pending(host, event) \
85 test_and_clear_bit(event, &host->pending_events) 205 test_and_clear_bit(event, &host->pending_events)
86#define atmci_test_and_set_completed(host, event) \
87 test_and_set_bit(event, &host->completed_events)
88#define atmci_set_completed(host, event) \ 206#define atmci_set_completed(host, event) \
89 set_bit(event, &host->completed_events) 207 set_bit(event, &host->completed_events)
90#define atmci_set_pending(host, event) \ 208#define atmci_set_pending(host, event) \
91 set_bit(event, &host->pending_events) 209 set_bit(event, &host->pending_events)
92#define atmci_clear_pending(host, event) \
93 clear_bit(event, &host->pending_events)
94 210
95/* 211/*
96 * The debugfs stuff below is mostly optimized away when 212 * The debugfs stuff below is mostly optimized away when
@@ -98,14 +214,15 @@ struct atmel_mci {
98 */ 214 */
99static int atmci_req_show(struct seq_file *s, void *v) 215static int atmci_req_show(struct seq_file *s, void *v)
100{ 216{
101 struct atmel_mci *host = s->private; 217 struct atmel_mci_slot *slot = s->private;
102 struct mmc_request *mrq = host->mrq; 218 struct mmc_request *mrq;
103 struct mmc_command *cmd; 219 struct mmc_command *cmd;
104 struct mmc_command *stop; 220 struct mmc_command *stop;
105 struct mmc_data *data; 221 struct mmc_data *data;
106 222
107 /* Make sure we get a consistent snapshot */ 223 /* Make sure we get a consistent snapshot */
108 spin_lock_irq(&host->mmc->lock); 224 spin_lock_bh(&slot->host->lock);
225 mrq = slot->mrq;
109 226
110 if (mrq) { 227 if (mrq) {
111 cmd = mrq->cmd; 228 cmd = mrq->cmd;
@@ -130,7 +247,7 @@ static int atmci_req_show(struct seq_file *s, void *v)
130 stop->resp[2], stop->error); 247 stop->resp[2], stop->error);
131 } 248 }
132 249
133 spin_unlock_irq(&host->mmc->lock); 250 spin_unlock_bh(&slot->host->lock);
134 251
135 return 0; 252 return 0;
136} 253}
@@ -193,12 +310,16 @@ static int atmci_regs_show(struct seq_file *s, void *v)
193 if (!buf) 310 if (!buf)
194 return -ENOMEM; 311 return -ENOMEM;
195 312
196 /* Grab a more or less consistent snapshot */ 313 /*
197 spin_lock_irq(&host->mmc->lock); 314 * Grab a more or less consistent snapshot. Note that we're
315 * not disabling interrupts, so IMR and SR may not be
316 * consistent.
317 */
318 spin_lock_bh(&host->lock);
198 clk_enable(host->mck); 319 clk_enable(host->mck);
199 memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); 320 memcpy_fromio(buf, host->regs, MCI_REGS_SIZE);
200 clk_disable(host->mck); 321 clk_disable(host->mck);
201 spin_unlock_irq(&host->mmc->lock); 322 spin_unlock_bh(&host->lock);
202 323
203 seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", 324 seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
204 buf[MCI_MR / 4], 325 buf[MCI_MR / 4],
@@ -236,13 +357,13 @@ static const struct file_operations atmci_regs_fops = {
236 .release = single_release, 357 .release = single_release,
237}; 358};
238 359
239static void atmci_init_debugfs(struct atmel_mci *host) 360static void atmci_init_debugfs(struct atmel_mci_slot *slot)
240{ 361{
241 struct mmc_host *mmc; 362 struct mmc_host *mmc = slot->mmc;
242 struct dentry *root; 363 struct atmel_mci *host = slot->host;
243 struct dentry *node; 364 struct dentry *root;
365 struct dentry *node;
244 366
245 mmc = host->mmc;
246 root = mmc->debugfs_root; 367 root = mmc->debugfs_root;
247 if (!root) 368 if (!root)
248 return; 369 return;
@@ -254,7 +375,11 @@ static void atmci_init_debugfs(struct atmel_mci *host)
254 if (!node) 375 if (!node)
255 goto err; 376 goto err;
256 377
257 node = debugfs_create_file("req", S_IRUSR, root, host, &atmci_req_fops); 378 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
379 if (!node)
380 goto err;
381
382 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
258 if (!node) 383 if (!node)
259 goto err; 384 goto err;
260 385
@@ -271,25 +396,7 @@ static void atmci_init_debugfs(struct atmel_mci *host)
271 return; 396 return;
272 397
273err: 398err:
274 dev_err(&host->pdev->dev, 399 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
275 "failed to initialize debugfs for controller\n");
276}
277
278static void atmci_enable(struct atmel_mci *host)
279{
280 clk_enable(host->mck);
281 mci_writel(host, CR, MCI_CR_MCIEN);
282 mci_writel(host, MR, host->mode_reg);
283 mci_writel(host, SDCR, host->sdc_reg);
284}
285
286static void atmci_disable(struct atmel_mci *host)
287{
288 mci_writel(host, CR, MCI_CR_SWRST);
289
290 /* Stall until write is complete, then disable the bus clock */
291 mci_readl(host, SR);
292 clk_disable(host->mck);
293} 400}
294 401
295static inline unsigned int ns_to_clocks(struct atmel_mci *host, 402static inline unsigned int ns_to_clocks(struct atmel_mci *host,
@@ -299,7 +406,7 @@ static inline unsigned int ns_to_clocks(struct atmel_mci *host,
299} 406}
300 407
301static void atmci_set_timeout(struct atmel_mci *host, 408static void atmci_set_timeout(struct atmel_mci *host,
302 struct mmc_data *data) 409 struct atmel_mci_slot *slot, struct mmc_data *data)
303{ 410{
304 static unsigned dtomul_to_shift[] = { 411 static unsigned dtomul_to_shift[] = {
305 0, 4, 7, 8, 10, 12, 16, 20 412 0, 4, 7, 8, 10, 12, 16, 20
@@ -322,7 +429,7 @@ static void atmci_set_timeout(struct atmel_mci *host,
322 dtocyc = 15; 429 dtocyc = 15;
323 } 430 }
324 431
325 dev_vdbg(&host->mmc->class_dev, "setting timeout to %u cycles\n", 432 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
326 dtocyc << dtomul_to_shift[dtomul]); 433 dtocyc << dtomul_to_shift[dtomul]);
327 mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); 434 mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc)));
328} 435}
@@ -375,15 +482,12 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
375} 482}
376 483
377static void atmci_start_command(struct atmel_mci *host, 484static void atmci_start_command(struct atmel_mci *host,
378 struct mmc_command *cmd, 485 struct mmc_command *cmd, u32 cmd_flags)
379 u32 cmd_flags)
380{ 486{
381 /* Must read host->cmd after testing event flags */
382 smp_rmb();
383 WARN_ON(host->cmd); 487 WARN_ON(host->cmd);
384 host->cmd = cmd; 488 host->cmd = cmd;
385 489
386 dev_vdbg(&host->mmc->class_dev, 490 dev_vdbg(&host->pdev->dev,
387 "start command: ARGR=0x%08x CMDR=0x%08x\n", 491 "start command: ARGR=0x%08x CMDR=0x%08x\n",
388 cmd->arg, cmd_flags); 492 cmd->arg, cmd_flags);
389 493
@@ -391,34 +495,157 @@ static void atmci_start_command(struct atmel_mci *host,
391 mci_writel(host, CMDR, cmd_flags); 495 mci_writel(host, CMDR, cmd_flags);
392} 496}
393 497
394static void send_stop_cmd(struct mmc_host *mmc, struct mmc_data *data) 498static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
395{ 499{
396 struct atmel_mci *host = mmc_priv(mmc);
397
398 atmci_start_command(host, data->stop, host->stop_cmdr); 500 atmci_start_command(host, data->stop, host->stop_cmdr);
399 mci_writel(host, IER, MCI_CMDRDY); 501 mci_writel(host, IER, MCI_CMDRDY);
400} 502}
401 503
402static void atmci_request_end(struct mmc_host *mmc, struct mmc_request *mrq) 504#ifdef CONFIG_MMC_ATMELMCI_DMA
505static void atmci_dma_cleanup(struct atmel_mci *host)
403{ 506{
404 struct atmel_mci *host = mmc_priv(mmc); 507 struct mmc_data *data = host->data;
405 508
406 WARN_ON(host->cmd || host->data); 509 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
407 host->mrq = NULL; 510 ((data->flags & MMC_DATA_WRITE)
511 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
512}
513
514static void atmci_stop_dma(struct atmel_mci *host)
515{
516 struct dma_chan *chan = host->data_chan;
517
518 if (chan) {
519 chan->device->device_terminate_all(chan);
520 atmci_dma_cleanup(host);
521 } else {
522 /* Data transfer was stopped by the interrupt handler */
523 atmci_set_pending(host, EVENT_XFER_COMPLETE);
524 mci_writel(host, IER, MCI_NOTBUSY);
525 }
526}
527
528/* This function is called by the DMA driver from tasklet context. */
529static void atmci_dma_complete(void *arg)
530{
531 struct atmel_mci *host = arg;
532 struct mmc_data *data = host->data;
533
534 dev_vdbg(&host->pdev->dev, "DMA complete\n");
535
536 atmci_dma_cleanup(host);
537
538 /*
539 * If the card was removed, data will be NULL. No point trying
540 * to send the stop command or waiting for NBUSY in this case.
541 */
542 if (data) {
543 atmci_set_pending(host, EVENT_XFER_COMPLETE);
544 tasklet_schedule(&host->tasklet);
545
546 /*
547 * Regardless of what the documentation says, we have
548 * to wait for NOTBUSY even after block read
549 * operations.
550 *
551 * When the DMA transfer is complete, the controller
552 * may still be reading the CRC from the card, i.e.
553 * the data transfer is still in progress and we
554 * haven't seen all the potential error bits yet.
555 *
556 * The interrupt handler will schedule a different
557 * tasklet to finish things up when the data transfer
558 * is completely done.
559 *
560 * We may not complete the mmc request here anyway
561 * because the mmc layer may call back and cause us to
562 * violate the "don't submit new operations from the
563 * completion callback" rule of the dma engine
564 * framework.
565 */
566 mci_writel(host, IER, MCI_NOTBUSY);
567 }
568}
569
570static int
571atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
572{
573 struct dma_chan *chan;
574 struct dma_async_tx_descriptor *desc;
575 struct scatterlist *sg;
576 unsigned int i;
577 enum dma_data_direction direction;
578
579 /*
580 * We don't do DMA on "complex" transfers, i.e. with
581 * non-word-aligned buffers or lengths. Also, we don't bother
582 * with all the DMA setup overhead for short transfers.
583 */
584 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
585 return -EINVAL;
586 if (data->blksz & 3)
587 return -EINVAL;
588
589 for_each_sg(data->sg, sg, data->sg_len, i) {
590 if (sg->offset & 3 || sg->length & 3)
591 return -EINVAL;
592 }
593
594 /* If we don't have a channel, we can't do DMA */
595 chan = host->dma.chan;
596 if (chan) {
597 dma_chan_get(chan);
598 host->data_chan = chan;
599 }
600
601 if (!chan)
602 return -ENODEV;
603
604 if (data->flags & MMC_DATA_READ)
605 direction = DMA_FROM_DEVICE;
606 else
607 direction = DMA_TO_DEVICE;
608
609 desc = chan->device->device_prep_slave_sg(chan,
610 data->sg, data->sg_len, direction,
611 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
612 if (!desc)
613 return -ENOMEM;
408 614
409 atmci_disable(host); 615 host->dma.data_desc = desc;
616 desc->callback = atmci_dma_complete;
617 desc->callback_param = host;
618 desc->tx_submit(desc);
410 619
411 mmc_request_done(mmc, mrq); 620 /* Go! */
621 chan->device->device_issue_pending(chan);
622
623 return 0;
624}
625
626#else /* CONFIG_MMC_ATMELMCI_DMA */
627
628static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
629{
630 return -ENOSYS;
412} 631}
413 632
633static void atmci_stop_dma(struct atmel_mci *host)
634{
635 /* Data transfer was stopped by the interrupt handler */
636 atmci_set_pending(host, EVENT_XFER_COMPLETE);
637 mci_writel(host, IER, MCI_NOTBUSY);
638}
639
640#endif /* CONFIG_MMC_ATMELMCI_DMA */
641
414/* 642/*
415 * Returns a mask of interrupt flags to be enabled after the whole 643 * Returns a mask of interrupt flags to be enabled after the whole
416 * request has been prepared. 644 * request has been prepared.
417 */ 645 */
418static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) 646static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
419{ 647{
420 struct atmel_mci *host = mmc_priv(mmc); 648 u32 iflags;
421 u32 iflags;
422 649
423 data->error = -EINPROGRESS; 650 data->error = -EINPROGRESS;
424 651
@@ -426,77 +653,89 @@ static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data)
426 host->sg = NULL; 653 host->sg = NULL;
427 host->data = data; 654 host->data = data;
428 655
429 dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n",
430 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
431
432 iflags = ATMCI_DATA_ERROR_FLAGS; 656 iflags = ATMCI_DATA_ERROR_FLAGS;
433 host->sg = data->sg; 657 if (atmci_submit_data_dma(host, data)) {
434 host->pio_offset = 0; 658 host->data_chan = NULL;
435 if (data->flags & MMC_DATA_READ) 659
436 iflags |= MCI_RXRDY; 660 /*
437 else 661 * Errata: MMC data write operation with less than 12
438 iflags |= MCI_TXRDY; 662 * bytes is impossible.
663 *
664 * Errata: MCI Transmit Data Register (TDR) FIFO
665 * corruption when length is not multiple of 4.
666 */
667 if (data->blocks * data->blksz < 12
668 || (data->blocks * data->blksz) & 3)
669 host->need_reset = true;
670
671 host->sg = data->sg;
672 host->pio_offset = 0;
673 if (data->flags & MMC_DATA_READ)
674 iflags |= MCI_RXRDY;
675 else
676 iflags |= MCI_TXRDY;
677 }
439 678
440 return iflags; 679 return iflags;
441} 680}
442 681
443static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 682static void atmci_start_request(struct atmel_mci *host,
683 struct atmel_mci_slot *slot)
444{ 684{
445 struct atmel_mci *host = mmc_priv(mmc); 685 struct mmc_request *mrq;
446 struct mmc_data *data;
447 struct mmc_command *cmd; 686 struct mmc_command *cmd;
687 struct mmc_data *data;
448 u32 iflags; 688 u32 iflags;
449 u32 cmdflags = 0; 689 u32 cmdflags;
450
451 iflags = mci_readl(host, IMR);
452 if (iflags)
453 dev_warn(&mmc->class_dev, "WARNING: IMR=0x%08x\n",
454 mci_readl(host, IMR));
455
456 WARN_ON(host->mrq != NULL);
457
458 /*
459 * We may "know" the card is gone even though there's still an
460 * electrical connection. If so, we really need to communicate
461 * this to the MMC core since there won't be any more
462 * interrupts as the card is completely removed. Otherwise,
463 * the MMC core might believe the card is still there even
464 * though the card was just removed very slowly.
465 */
466 if (!host->present) {
467 mrq->cmd->error = -ENOMEDIUM;
468 mmc_request_done(mmc, mrq);
469 return;
470 }
471 690
691 mrq = slot->mrq;
692 host->cur_slot = slot;
472 host->mrq = mrq; 693 host->mrq = mrq;
694
473 host->pending_events = 0; 695 host->pending_events = 0;
474 host->completed_events = 0; 696 host->completed_events = 0;
697 host->data_status = 0;
475 698
476 atmci_enable(host); 699 if (host->need_reset) {
700 mci_writel(host, CR, MCI_CR_SWRST);
701 mci_writel(host, CR, MCI_CR_MCIEN);
702 mci_writel(host, MR, host->mode_reg);
703 host->need_reset = false;
704 }
705 mci_writel(host, SDCR, slot->sdc_reg);
477 706
478 /* We don't support multiple blocks of weird lengths. */ 707 iflags = mci_readl(host, IMR);
708 if (iflags)
709 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
710 iflags);
711
712 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
713 /* Send init sequence (74 clock cycles) */
714 mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT);
715 while (!(mci_readl(host, SR) & MCI_CMDRDY))
716 cpu_relax();
717 }
479 data = mrq->data; 718 data = mrq->data;
480 if (data) { 719 if (data) {
481 if (data->blocks > 1 && data->blksz & 3) 720 atmci_set_timeout(host, slot, data);
482 goto fail;
483 atmci_set_timeout(host, data);
484 721
485 /* Must set block count/size before sending command */ 722 /* Must set block count/size before sending command */
486 mci_writel(host, BLKR, MCI_BCNT(data->blocks) 723 mci_writel(host, BLKR, MCI_BCNT(data->blocks)
487 | MCI_BLKLEN(data->blksz)); 724 | MCI_BLKLEN(data->blksz));
725 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
726 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
488 } 727 }
489 728
490 iflags = MCI_CMDRDY; 729 iflags = MCI_CMDRDY;
491 cmd = mrq->cmd; 730 cmd = mrq->cmd;
492 cmdflags = atmci_prepare_command(mmc, cmd); 731 cmdflags = atmci_prepare_command(slot->mmc, cmd);
493 atmci_start_command(host, cmd, cmdflags); 732 atmci_start_command(host, cmd, cmdflags);
494 733
495 if (data) 734 if (data)
496 iflags |= atmci_submit_data(mmc, data); 735 iflags |= atmci_submit_data(host, data);
497 736
498 if (mrq->stop) { 737 if (mrq->stop) {
499 host->stop_cmdr = atmci_prepare_command(mmc, mrq->stop); 738 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
500 host->stop_cmdr |= MCI_CMDR_STOP_XFER; 739 host->stop_cmdr |= MCI_CMDR_STOP_XFER;
501 if (!(data->flags & MMC_DATA_WRITE)) 740 if (!(data->flags & MMC_DATA_WRITE))
502 host->stop_cmdr |= MCI_CMDR_TRDIR_READ; 741 host->stop_cmdr |= MCI_CMDR_TRDIR_READ;
@@ -513,59 +752,156 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
513 * prepared yet.) 752 * prepared yet.)
514 */ 753 */
515 mci_writel(host, IER, iflags); 754 mci_writel(host, IER, iflags);
755}
516 756
517 return; 757static void atmci_queue_request(struct atmel_mci *host,
758 struct atmel_mci_slot *slot, struct mmc_request *mrq)
759{
760 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
761 host->state);
762
763 spin_lock_bh(&host->lock);
764 slot->mrq = mrq;
765 if (host->state == STATE_IDLE) {
766 host->state = STATE_SENDING_CMD;
767 atmci_start_request(host, slot);
768 } else {
769 list_add_tail(&slot->queue_node, &host->queue);
770 }
771 spin_unlock_bh(&host->lock);
772}
518 773
519fail: 774static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
520 atmci_disable(host); 775{
521 host->mrq = NULL; 776 struct atmel_mci_slot *slot = mmc_priv(mmc);
522 mrq->cmd->error = -EINVAL; 777 struct atmel_mci *host = slot->host;
523 mmc_request_done(mmc, mrq); 778 struct mmc_data *data;
779
780 WARN_ON(slot->mrq);
781
782 /*
783 * We may "know" the card is gone even though there's still an
784 * electrical connection. If so, we really need to communicate
785 * this to the MMC core since there won't be any more
786 * interrupts as the card is completely removed. Otherwise,
787 * the MMC core might believe the card is still there even
788 * though the card was just removed very slowly.
789 */
790 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
791 mrq->cmd->error = -ENOMEDIUM;
792 mmc_request_done(mmc, mrq);
793 return;
794 }
795
796 /* We don't support multiple blocks of weird lengths. */
797 data = mrq->data;
798 if (data && data->blocks > 1 && data->blksz & 3) {
799 mrq->cmd->error = -EINVAL;
800 mmc_request_done(mmc, mrq);
801 }
802
803 atmci_queue_request(host, slot, mrq);
524} 804}
525 805
526static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 806static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
527{ 807{
528 struct atmel_mci *host = mmc_priv(mmc); 808 struct atmel_mci_slot *slot = mmc_priv(mmc);
809 struct atmel_mci *host = slot->host;
810 unsigned int i;
811
812 slot->sdc_reg &= ~MCI_SDCBUS_MASK;
813 switch (ios->bus_width) {
814 case MMC_BUS_WIDTH_1:
815 slot->sdc_reg |= MCI_SDCBUS_1BIT;
816 break;
817 case MMC_BUS_WIDTH_4:
818 slot->sdc_reg = MCI_SDCBUS_4BIT;
819 break;
820 }
529 821
530 if (ios->clock) { 822 if (ios->clock) {
823 unsigned int clock_min = ~0U;
531 u32 clkdiv; 824 u32 clkdiv;
532 825
533 /* Set clock rate */ 826 spin_lock_bh(&host->lock);
534 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * ios->clock) - 1; 827 if (!host->mode_reg) {
828 clk_enable(host->mck);
829 mci_writel(host, CR, MCI_CR_SWRST);
830 mci_writel(host, CR, MCI_CR_MCIEN);
831 }
832
833 /*
834 * Use mirror of ios->clock to prevent race with mmc
835 * core ios update when finding the minimum.
836 */
837 slot->clock = ios->clock;
838 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
839 if (host->slot[i] && host->slot[i]->clock
840 && host->slot[i]->clock < clock_min)
841 clock_min = host->slot[i]->clock;
842 }
843
844 /* Calculate clock divider */
845 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
535 if (clkdiv > 255) { 846 if (clkdiv > 255) {
536 dev_warn(&mmc->class_dev, 847 dev_warn(&mmc->class_dev,
537 "clock %u too slow; using %lu\n", 848 "clock %u too slow; using %lu\n",
538 ios->clock, host->bus_hz / (2 * 256)); 849 clock_min, host->bus_hz / (2 * 256));
539 clkdiv = 255; 850 clkdiv = 255;
540 } 851 }
541 852
853 /*
854 * WRPROOF and RDPROOF prevent overruns/underruns by
855 * stopping the clock when the FIFO is full/empty.
856 * This state is not expected to last for long.
857 */
542 host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF 858 host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF
543 | MCI_MR_RDPROOF; 859 | MCI_MR_RDPROOF;
544 }
545 860
546 switch (ios->bus_width) { 861 if (list_empty(&host->queue))
547 case MMC_BUS_WIDTH_1: 862 mci_writel(host, MR, host->mode_reg);
548 host->sdc_reg = 0; 863 else
549 break; 864 host->need_clock_update = true;
550 case MMC_BUS_WIDTH_4: 865
551 host->sdc_reg = MCI_SDCBUS_4BIT; 866 spin_unlock_bh(&host->lock);
552 break; 867 } else {
868 bool any_slot_active = false;
869
870 spin_lock_bh(&host->lock);
871 slot->clock = 0;
872 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
873 if (host->slot[i] && host->slot[i]->clock) {
874 any_slot_active = true;
875 break;
876 }
877 }
878 if (!any_slot_active) {
879 mci_writel(host, CR, MCI_CR_MCIDIS);
880 if (host->mode_reg) {
881 mci_readl(host, MR);
882 clk_disable(host->mck);
883 }
884 host->mode_reg = 0;
885 }
886 spin_unlock_bh(&host->lock);
553 } 887 }
554 888
555 switch (ios->power_mode) { 889 switch (ios->power_mode) {
556 case MMC_POWER_ON: 890 case MMC_POWER_UP:
557 /* Send init sequence (74 clock cycles) */ 891 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
558 atmci_enable(host);
559 mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT);
560 while (!(mci_readl(host, SR) & MCI_CMDRDY))
561 cpu_relax();
562 atmci_disable(host);
563 break; 892 break;
564 default: 893 default:
565 /* 894 /*
566 * TODO: None of the currently available AVR32-based 895 * TODO: None of the currently available AVR32-based
567 * boards allow MMC power to be turned off. Implement 896 * boards allow MMC power to be turned off. Implement
568 * power control when this can be tested properly. 897 * power control when this can be tested properly.
898 *
899 * We also need to hook this into the clock management
900 * somehow so that newly inserted cards aren't
901 * subjected to a fast clock before we have a chance
902 * to figure out what the maximum rate is. Currently,
903 * there's no way to avoid this, and there never will
904 * be for boards that don't support power control.
569 */ 905 */
570 break; 906 break;
571 } 907 }
@@ -573,31 +909,82 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
573 909
574static int atmci_get_ro(struct mmc_host *mmc) 910static int atmci_get_ro(struct mmc_host *mmc)
575{ 911{
576 int read_only = 0; 912 int read_only = -ENOSYS;
577 struct atmel_mci *host = mmc_priv(mmc); 913 struct atmel_mci_slot *slot = mmc_priv(mmc);
578 914
579 if (gpio_is_valid(host->wp_pin)) { 915 if (gpio_is_valid(slot->wp_pin)) {
580 read_only = gpio_get_value(host->wp_pin); 916 read_only = gpio_get_value(slot->wp_pin);
581 dev_dbg(&mmc->class_dev, "card is %s\n", 917 dev_dbg(&mmc->class_dev, "card is %s\n",
582 read_only ? "read-only" : "read-write"); 918 read_only ? "read-only" : "read-write");
583 } else {
584 dev_dbg(&mmc->class_dev,
585 "no pin for checking read-only switch."
586 " Assuming write-enable.\n");
587 } 919 }
588 920
589 return read_only; 921 return read_only;
590} 922}
591 923
592static struct mmc_host_ops atmci_ops = { 924static int atmci_get_cd(struct mmc_host *mmc)
925{
926 int present = -ENOSYS;
927 struct atmel_mci_slot *slot = mmc_priv(mmc);
928
929 if (gpio_is_valid(slot->detect_pin)) {
930 present = !gpio_get_value(slot->detect_pin);
931 dev_dbg(&mmc->class_dev, "card is %spresent\n",
932 present ? "" : "not ");
933 }
934
935 return present;
936}
937
938static const struct mmc_host_ops atmci_ops = {
593 .request = atmci_request, 939 .request = atmci_request,
594 .set_ios = atmci_set_ios, 940 .set_ios = atmci_set_ios,
595 .get_ro = atmci_get_ro, 941 .get_ro = atmci_get_ro,
942 .get_cd = atmci_get_cd,
596}; 943};
597 944
945/* Called with host->lock held */
946static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
947 __releases(&host->lock)
948 __acquires(&host->lock)
949{
950 struct atmel_mci_slot *slot = NULL;
951 struct mmc_host *prev_mmc = host->cur_slot->mmc;
952
953 WARN_ON(host->cmd || host->data);
954
955 /*
956 * Update the MMC clock rate if necessary. This may be
957 * necessary if set_ios() is called when a different slot is
958 * busy transfering data.
959 */
960 if (host->need_clock_update)
961 mci_writel(host, MR, host->mode_reg);
962
963 host->cur_slot->mrq = NULL;
964 host->mrq = NULL;
965 if (!list_empty(&host->queue)) {
966 slot = list_entry(host->queue.next,
967 struct atmel_mci_slot, queue_node);
968 list_del(&slot->queue_node);
969 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
970 mmc_hostname(slot->mmc));
971 host->state = STATE_SENDING_CMD;
972 atmci_start_request(host, slot);
973 } else {
974 dev_vdbg(&host->pdev->dev, "list empty\n");
975 host->state = STATE_IDLE;
976 }
977
978 spin_unlock(&host->lock);
979 mmc_request_done(prev_mmc, mrq);
980 spin_lock(&host->lock);
981}
982
598static void atmci_command_complete(struct atmel_mci *host, 983static void atmci_command_complete(struct atmel_mci *host,
599 struct mmc_command *cmd, u32 status) 984 struct mmc_command *cmd)
600{ 985{
986 u32 status = host->cmd_status;
987
601 /* Read the response from the card (up to 16 bytes) */ 988 /* Read the response from the card (up to 16 bytes) */
602 cmd->resp[0] = mci_readl(host, RSPR); 989 cmd->resp[0] = mci_readl(host, RSPR);
603 cmd->resp[1] = mci_readl(host, RSPR); 990 cmd->resp[1] = mci_readl(host, RSPR);
@@ -614,11 +1001,12 @@ static void atmci_command_complete(struct atmel_mci *host,
614 cmd->error = 0; 1001 cmd->error = 0;
615 1002
616 if (cmd->error) { 1003 if (cmd->error) {
617 dev_dbg(&host->mmc->class_dev, 1004 dev_dbg(&host->pdev->dev,
618 "command error: status=0x%08x\n", status); 1005 "command error: status=0x%08x\n", status);
619 1006
620 if (cmd->data) { 1007 if (cmd->data) {
621 host->data = NULL; 1008 host->data = NULL;
1009 atmci_stop_dma(host);
622 mci_writel(host, IDR, MCI_NOTBUSY 1010 mci_writel(host, IDR, MCI_NOTBUSY
623 | MCI_TXRDY | MCI_RXRDY 1011 | MCI_TXRDY | MCI_RXRDY
624 | ATMCI_DATA_ERROR_FLAGS); 1012 | ATMCI_DATA_ERROR_FLAGS);
@@ -628,146 +1016,222 @@ static void atmci_command_complete(struct atmel_mci *host,
628 1016
629static void atmci_detect_change(unsigned long data) 1017static void atmci_detect_change(unsigned long data)
630{ 1018{
631 struct atmel_mci *host = (struct atmel_mci *)data; 1019 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
632 struct mmc_request *mrq = host->mrq; 1020 bool present;
633 int present; 1021 bool present_old;
634 1022
635 /* 1023 /*
636 * atmci_remove() sets detect_pin to -1 before freeing the 1024 * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
637 * interrupt. We must not re-enable the interrupt if it has 1025 * freeing the interrupt. We must not re-enable the interrupt
638 * been freed. 1026 * if it has been freed, and if we're shutting down, it
1027 * doesn't really matter whether the card is present or not.
639 */ 1028 */
640 smp_rmb(); 1029 smp_rmb();
641 if (!gpio_is_valid(host->detect_pin)) 1030 if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
642 return; 1031 return;
643 1032
644 enable_irq(gpio_to_irq(host->detect_pin)); 1033 enable_irq(gpio_to_irq(slot->detect_pin));
645 present = !gpio_get_value(host->detect_pin); 1034 present = !gpio_get_value(slot->detect_pin);
1035 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1036
1037 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1038 present, present_old);
646 1039
647 dev_vdbg(&host->pdev->dev, "detect change: %d (was %d)\n", 1040 if (present != present_old) {
648 present, host->present); 1041 struct atmel_mci *host = slot->host;
1042 struct mmc_request *mrq;
649 1043
650 if (present != host->present) { 1044 dev_dbg(&slot->mmc->class_dev, "card %s\n",
651 dev_dbg(&host->mmc->class_dev, "card %s\n",
652 present ? "inserted" : "removed"); 1045 present ? "inserted" : "removed");
653 host->present = present;
654 1046
655 /* Reset controller if card is gone */ 1047 spin_lock(&host->lock);
656 if (!present) { 1048
657 mci_writel(host, CR, MCI_CR_SWRST); 1049 if (!present)
658 mci_writel(host, IDR, ~0UL); 1050 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
659 mci_writel(host, CR, MCI_CR_MCIEN); 1051 else
660 } 1052 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
661 1053
662 /* Clean up queue if present */ 1054 /* Clean up queue if present */
1055 mrq = slot->mrq;
663 if (mrq) { 1056 if (mrq) {
664 /* 1057 if (mrq == host->mrq) {
665 * Reset controller to terminate any ongoing 1058 /*
666 * commands or data transfers. 1059 * Reset controller to terminate any ongoing
667 */ 1060 * commands or data transfers.
668 mci_writel(host, CR, MCI_CR_SWRST); 1061 */
1062 mci_writel(host, CR, MCI_CR_SWRST);
1063 mci_writel(host, CR, MCI_CR_MCIEN);
1064 mci_writel(host, MR, host->mode_reg);
669 1065
670 if (!atmci_is_completed(host, EVENT_CMD_COMPLETE))
671 mrq->cmd->error = -ENOMEDIUM;
672
673 if (mrq->data && !atmci_is_completed(host,
674 EVENT_DATA_COMPLETE)) {
675 host->data = NULL; 1066 host->data = NULL;
676 mrq->data->error = -ENOMEDIUM; 1067 host->cmd = NULL;
1068
1069 switch (host->state) {
1070 case STATE_IDLE:
1071 break;
1072 case STATE_SENDING_CMD:
1073 mrq->cmd->error = -ENOMEDIUM;
1074 if (!mrq->data)
1075 break;
1076 /* fall through */
1077 case STATE_SENDING_DATA:
1078 mrq->data->error = -ENOMEDIUM;
1079 atmci_stop_dma(host);
1080 break;
1081 case STATE_DATA_BUSY:
1082 case STATE_DATA_ERROR:
1083 if (mrq->data->error == -EINPROGRESS)
1084 mrq->data->error = -ENOMEDIUM;
1085 if (!mrq->stop)
1086 break;
1087 /* fall through */
1088 case STATE_SENDING_STOP:
1089 mrq->stop->error = -ENOMEDIUM;
1090 break;
1091 }
1092
1093 atmci_request_end(host, mrq);
1094 } else {
1095 list_del(&slot->queue_node);
1096 mrq->cmd->error = -ENOMEDIUM;
1097 if (mrq->data)
1098 mrq->data->error = -ENOMEDIUM;
1099 if (mrq->stop)
1100 mrq->stop->error = -ENOMEDIUM;
1101
1102 spin_unlock(&host->lock);
1103 mmc_request_done(slot->mmc, mrq);
1104 spin_lock(&host->lock);
677 } 1105 }
678 if (mrq->stop && !atmci_is_completed(host,
679 EVENT_STOP_COMPLETE))
680 mrq->stop->error = -ENOMEDIUM;
681
682 host->cmd = NULL;
683 atmci_request_end(host->mmc, mrq);
684 } 1106 }
1107 spin_unlock(&host->lock);
685 1108
686 mmc_detect_change(host->mmc, 0); 1109 mmc_detect_change(slot->mmc, 0);
687 } 1110 }
688} 1111}
689 1112
690static void atmci_tasklet_func(unsigned long priv) 1113static void atmci_tasklet_func(unsigned long priv)
691{ 1114{
692 struct mmc_host *mmc = (struct mmc_host *)priv; 1115 struct atmel_mci *host = (struct atmel_mci *)priv;
693 struct atmel_mci *host = mmc_priv(mmc);
694 struct mmc_request *mrq = host->mrq; 1116 struct mmc_request *mrq = host->mrq;
695 struct mmc_data *data = host->data; 1117 struct mmc_data *data = host->data;
1118 struct mmc_command *cmd = host->cmd;
1119 enum atmel_mci_state state = host->state;
1120 enum atmel_mci_state prev_state;
1121 u32 status;
1122
1123 spin_lock(&host->lock);
696 1124
697 dev_vdbg(&mmc->class_dev, 1125 state = host->state;
698 "tasklet: pending/completed/mask %lx/%lx/%x\n", 1126
699 host->pending_events, host->completed_events, 1127 dev_vdbg(&host->pdev->dev,
1128 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1129 state, host->pending_events, host->completed_events,
700 mci_readl(host, IMR)); 1130 mci_readl(host, IMR));
701 1131
702 if (atmci_test_and_clear_pending(host, EVENT_CMD_COMPLETE)) { 1132 do {
703 /* 1133 prev_state = state;
704 * host->cmd must be set to NULL before the interrupt
705 * handler sees EVENT_CMD_COMPLETE
706 */
707 host->cmd = NULL;
708 smp_wmb();
709 atmci_set_completed(host, EVENT_CMD_COMPLETE);
710 atmci_command_complete(host, mrq->cmd, host->cmd_status);
711
712 if (!mrq->cmd->error && mrq->stop
713 && atmci_is_completed(host, EVENT_XFER_COMPLETE)
714 && !atmci_test_and_set_completed(host,
715 EVENT_STOP_SENT))
716 send_stop_cmd(host->mmc, mrq->data);
717 }
718 if (atmci_test_and_clear_pending(host, EVENT_STOP_COMPLETE)) {
719 /*
720 * host->cmd must be set to NULL before the interrupt
721 * handler sees EVENT_STOP_COMPLETE
722 */
723 host->cmd = NULL;
724 smp_wmb();
725 atmci_set_completed(host, EVENT_STOP_COMPLETE);
726 atmci_command_complete(host, mrq->stop, host->stop_status);
727 }
728 if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) {
729 u32 status = host->data_status;
730 1134
731 dev_vdbg(&mmc->class_dev, "data error: status=%08x\n", status); 1135 switch (state) {
1136 case STATE_IDLE:
1137 break;
732 1138
733 atmci_set_completed(host, EVENT_DATA_ERROR); 1139 case STATE_SENDING_CMD:
734 atmci_set_completed(host, EVENT_DATA_COMPLETE); 1140 if (!atmci_test_and_clear_pending(host,
1141 EVENT_CMD_COMPLETE))
1142 break;
735 1143
736 if (status & MCI_DTOE) { 1144 host->cmd = NULL;
737 dev_dbg(&mmc->class_dev, 1145 atmci_set_completed(host, EVENT_CMD_COMPLETE);
738 "data timeout error\n"); 1146 atmci_command_complete(host, mrq->cmd);
739 data->error = -ETIMEDOUT; 1147 if (!mrq->data || cmd->error) {
740 } else if (status & MCI_DCRCE) { 1148 atmci_request_end(host, host->mrq);
741 dev_dbg(&mmc->class_dev, "data CRC error\n"); 1149 goto unlock;
742 data->error = -EILSEQ; 1150 }
743 } else { 1151
744 dev_dbg(&mmc->class_dev, 1152 prev_state = state = STATE_SENDING_DATA;
745 "data FIFO error (status=%08x)\n", 1153 /* fall through */
746 status); 1154
747 data->error = -EIO; 1155 case STATE_SENDING_DATA:
748 } 1156 if (atmci_test_and_clear_pending(host,
1157 EVENT_DATA_ERROR)) {
1158 atmci_stop_dma(host);
1159 if (data->stop)
1160 send_stop_cmd(host, data);
1161 state = STATE_DATA_ERROR;
1162 break;
1163 }
749 1164
750 if (host->present && data->stop 1165 if (!atmci_test_and_clear_pending(host,
751 && atmci_is_completed(host, EVENT_CMD_COMPLETE) 1166 EVENT_XFER_COMPLETE))
752 && !atmci_test_and_set_completed( 1167 break;
753 host, EVENT_STOP_SENT))
754 send_stop_cmd(host->mmc, data);
755 1168
756 host->data = NULL; 1169 atmci_set_completed(host, EVENT_XFER_COMPLETE);
757 } 1170 prev_state = state = STATE_DATA_BUSY;
758 if (atmci_test_and_clear_pending(host, EVENT_DATA_COMPLETE)) { 1171 /* fall through */
759 atmci_set_completed(host, EVENT_DATA_COMPLETE); 1172
1173 case STATE_DATA_BUSY:
1174 if (!atmci_test_and_clear_pending(host,
1175 EVENT_DATA_COMPLETE))
1176 break;
760 1177
761 if (!atmci_is_completed(host, EVENT_DATA_ERROR)) { 1178 host->data = NULL;
762 data->bytes_xfered = data->blocks * data->blksz; 1179 atmci_set_completed(host, EVENT_DATA_COMPLETE);
763 data->error = 0; 1180 status = host->data_status;
1181 if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
1182 if (status & MCI_DTOE) {
1183 dev_dbg(&host->pdev->dev,
1184 "data timeout error\n");
1185 data->error = -ETIMEDOUT;
1186 } else if (status & MCI_DCRCE) {
1187 dev_dbg(&host->pdev->dev,
1188 "data CRC error\n");
1189 data->error = -EILSEQ;
1190 } else {
1191 dev_dbg(&host->pdev->dev,
1192 "data FIFO error (status=%08x)\n",
1193 status);
1194 data->error = -EIO;
1195 }
1196 } else {
1197 data->bytes_xfered = data->blocks * data->blksz;
1198 data->error = 0;
1199 }
1200
1201 if (!data->stop) {
1202 atmci_request_end(host, host->mrq);
1203 goto unlock;
1204 }
1205
1206 prev_state = state = STATE_SENDING_STOP;
1207 if (!data->error)
1208 send_stop_cmd(host, data);
1209 /* fall through */
1210
1211 case STATE_SENDING_STOP:
1212 if (!atmci_test_and_clear_pending(host,
1213 EVENT_CMD_COMPLETE))
1214 break;
1215
1216 host->cmd = NULL;
1217 atmci_command_complete(host, mrq->stop);
1218 atmci_request_end(host, host->mrq);
1219 goto unlock;
1220
1221 case STATE_DATA_ERROR:
1222 if (!atmci_test_and_clear_pending(host,
1223 EVENT_XFER_COMPLETE))
1224 break;
1225
1226 state = STATE_DATA_BUSY;
1227 break;
764 } 1228 }
1229 } while (state != prev_state);
765 1230
766 host->data = NULL; 1231 host->state = state;
767 }
768 1232
769 if (host->mrq && !host->cmd && !host->data) 1233unlock:
770 atmci_request_end(mmc, host->mrq); 1234 spin_unlock(&host->lock);
771} 1235}
772 1236
773static void atmci_read_data_pio(struct atmel_mci *host) 1237static void atmci_read_data_pio(struct atmel_mci *host)
@@ -789,6 +1253,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
789 nbytes += 4; 1253 nbytes += 4;
790 1254
791 if (offset == sg->length) { 1255 if (offset == sg->length) {
1256 flush_dcache_page(sg_page(sg));
792 host->sg = sg = sg_next(sg); 1257 host->sg = sg = sg_next(sg);
793 if (!sg) 1258 if (!sg)
794 goto done; 1259 goto done;
@@ -817,9 +1282,11 @@ static void atmci_read_data_pio(struct atmel_mci *host)
817 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY 1282 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY
818 | ATMCI_DATA_ERROR_FLAGS)); 1283 | ATMCI_DATA_ERROR_FLAGS));
819 host->data_status = status; 1284 host->data_status = status;
1285 data->bytes_xfered += nbytes;
1286 smp_wmb();
820 atmci_set_pending(host, EVENT_DATA_ERROR); 1287 atmci_set_pending(host, EVENT_DATA_ERROR);
821 tasklet_schedule(&host->tasklet); 1288 tasklet_schedule(&host->tasklet);
822 break; 1289 return;
823 } 1290 }
824 } while (status & MCI_RXRDY); 1291 } while (status & MCI_RXRDY);
825 1292
@@ -832,10 +1299,8 @@ done:
832 mci_writel(host, IDR, MCI_RXRDY); 1299 mci_writel(host, IDR, MCI_RXRDY);
833 mci_writel(host, IER, MCI_NOTBUSY); 1300 mci_writel(host, IER, MCI_NOTBUSY);
834 data->bytes_xfered += nbytes; 1301 data->bytes_xfered += nbytes;
835 atmci_set_completed(host, EVENT_XFER_COMPLETE); 1302 smp_wmb();
836 if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) 1303 atmci_set_pending(host, EVENT_XFER_COMPLETE);
837 && !atmci_test_and_set_completed(host, EVENT_STOP_SENT))
838 send_stop_cmd(host->mmc, data);
839} 1304}
840 1305
841static void atmci_write_data_pio(struct atmel_mci *host) 1306static void atmci_write_data_pio(struct atmel_mci *host)
@@ -888,9 +1353,11 @@ static void atmci_write_data_pio(struct atmel_mci *host)
888 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY 1353 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY
889 | ATMCI_DATA_ERROR_FLAGS)); 1354 | ATMCI_DATA_ERROR_FLAGS));
890 host->data_status = status; 1355 host->data_status = status;
1356 data->bytes_xfered += nbytes;
1357 smp_wmb();
891 atmci_set_pending(host, EVENT_DATA_ERROR); 1358 atmci_set_pending(host, EVENT_DATA_ERROR);
892 tasklet_schedule(&host->tasklet); 1359 tasklet_schedule(&host->tasklet);
893 break; 1360 return;
894 } 1361 }
895 } while (status & MCI_TXRDY); 1362 } while (status & MCI_TXRDY);
896 1363
@@ -903,38 +1370,26 @@ done:
903 mci_writel(host, IDR, MCI_TXRDY); 1370 mci_writel(host, IDR, MCI_TXRDY);
904 mci_writel(host, IER, MCI_NOTBUSY); 1371 mci_writel(host, IER, MCI_NOTBUSY);
905 data->bytes_xfered += nbytes; 1372 data->bytes_xfered += nbytes;
906 atmci_set_completed(host, EVENT_XFER_COMPLETE); 1373 smp_wmb();
907 if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) 1374 atmci_set_pending(host, EVENT_XFER_COMPLETE);
908 && !atmci_test_and_set_completed(host, EVENT_STOP_SENT))
909 send_stop_cmd(host->mmc, data);
910} 1375}
911 1376
912static void atmci_cmd_interrupt(struct mmc_host *mmc, u32 status) 1377static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
913{ 1378{
914 struct atmel_mci *host = mmc_priv(mmc);
915
916 mci_writel(host, IDR, MCI_CMDRDY); 1379 mci_writel(host, IDR, MCI_CMDRDY);
917 1380
918 if (atmci_is_completed(host, EVENT_STOP_SENT)) { 1381 host->cmd_status = status;
919 host->stop_status = status; 1382 smp_wmb();
920 atmci_set_pending(host, EVENT_STOP_COMPLETE); 1383 atmci_set_pending(host, EVENT_CMD_COMPLETE);
921 } else {
922 host->cmd_status = status;
923 atmci_set_pending(host, EVENT_CMD_COMPLETE);
924 }
925
926 tasklet_schedule(&host->tasklet); 1384 tasklet_schedule(&host->tasklet);
927} 1385}
928 1386
929static irqreturn_t atmci_interrupt(int irq, void *dev_id) 1387static irqreturn_t atmci_interrupt(int irq, void *dev_id)
930{ 1388{
931 struct mmc_host *mmc = dev_id; 1389 struct atmel_mci *host = dev_id;
932 struct atmel_mci *host = mmc_priv(mmc);
933 u32 status, mask, pending; 1390 u32 status, mask, pending;
934 unsigned int pass_count = 0; 1391 unsigned int pass_count = 0;
935 1392
936 spin_lock(&mmc->lock);
937
938 do { 1393 do {
939 status = mci_readl(host, SR); 1394 status = mci_readl(host, SR);
940 mask = mci_readl(host, IMR); 1395 mask = mci_readl(host, IMR);
@@ -946,13 +1401,18 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
946 mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS 1401 mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS
947 | MCI_RXRDY | MCI_TXRDY); 1402 | MCI_RXRDY | MCI_TXRDY);
948 pending &= mci_readl(host, IMR); 1403 pending &= mci_readl(host, IMR);
1404
949 host->data_status = status; 1405 host->data_status = status;
1406 smp_wmb();
950 atmci_set_pending(host, EVENT_DATA_ERROR); 1407 atmci_set_pending(host, EVENT_DATA_ERROR);
951 tasklet_schedule(&host->tasklet); 1408 tasklet_schedule(&host->tasklet);
952 } 1409 }
953 if (pending & MCI_NOTBUSY) { 1410 if (pending & MCI_NOTBUSY) {
954 mci_writel(host, IDR, (MCI_NOTBUSY 1411 mci_writel(host, IDR,
955 | ATMCI_DATA_ERROR_FLAGS)); 1412 ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY);
1413 if (!host->data_status)
1414 host->data_status = status;
1415 smp_wmb();
956 atmci_set_pending(host, EVENT_DATA_COMPLETE); 1416 atmci_set_pending(host, EVENT_DATA_COMPLETE);
957 tasklet_schedule(&host->tasklet); 1417 tasklet_schedule(&host->tasklet);
958 } 1418 }
@@ -962,18 +1422,15 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
962 atmci_write_data_pio(host); 1422 atmci_write_data_pio(host);
963 1423
964 if (pending & MCI_CMDRDY) 1424 if (pending & MCI_CMDRDY)
965 atmci_cmd_interrupt(mmc, status); 1425 atmci_cmd_interrupt(host, status);
966 } while (pass_count++ < 5); 1426 } while (pass_count++ < 5);
967 1427
968 spin_unlock(&mmc->lock);
969
970 return pass_count ? IRQ_HANDLED : IRQ_NONE; 1428 return pass_count ? IRQ_HANDLED : IRQ_NONE;
971} 1429}
972 1430
973static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) 1431static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
974{ 1432{
975 struct mmc_host *mmc = dev_id; 1433 struct atmel_mci_slot *slot = dev_id;
976 struct atmel_mci *host = mmc_priv(mmc);
977 1434
978 /* 1435 /*
979 * Disable interrupts until the pin has stabilized and check 1436 * Disable interrupts until the pin has stabilized and check
@@ -981,19 +1438,176 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
981 * middle of the timer routine when this interrupt triggers. 1438 * middle of the timer routine when this interrupt triggers.
982 */ 1439 */
983 disable_irq_nosync(irq); 1440 disable_irq_nosync(irq);
984 mod_timer(&host->detect_timer, jiffies + msecs_to_jiffies(20)); 1441 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
985 1442
986 return IRQ_HANDLED; 1443 return IRQ_HANDLED;
987} 1444}
988 1445
1446#ifdef CONFIG_MMC_ATMELMCI_DMA
1447
1448static inline struct atmel_mci *
1449dma_client_to_atmel_mci(struct dma_client *client)
1450{
1451 return container_of(client, struct atmel_mci, dma.client);
1452}
1453
1454static enum dma_state_client atmci_dma_event(struct dma_client *client,
1455 struct dma_chan *chan, enum dma_state state)
1456{
1457 struct atmel_mci *host;
1458 enum dma_state_client ret = DMA_NAK;
1459
1460 host = dma_client_to_atmel_mci(client);
1461
1462 switch (state) {
1463 case DMA_RESOURCE_AVAILABLE:
1464 spin_lock_bh(&host->lock);
1465 if (!host->dma.chan) {
1466 host->dma.chan = chan;
1467 ret = DMA_ACK;
1468 }
1469 spin_unlock_bh(&host->lock);
1470
1471 if (ret == DMA_ACK)
1472 dev_info(&host->pdev->dev,
1473 "Using %s for DMA transfers\n",
1474 chan->dev.bus_id);
1475 break;
1476
1477 case DMA_RESOURCE_REMOVED:
1478 spin_lock_bh(&host->lock);
1479 if (host->dma.chan == chan) {
1480 host->dma.chan = NULL;
1481 ret = DMA_ACK;
1482 }
1483 spin_unlock_bh(&host->lock);
1484
1485 if (ret == DMA_ACK)
1486 dev_info(&host->pdev->dev,
1487 "Lost %s, falling back to PIO\n",
1488 chan->dev.bus_id);
1489 break;
1490
1491 default:
1492 break;
1493 }
1494
1495
1496 return ret;
1497}
1498#endif /* CONFIG_MMC_ATMELMCI_DMA */
1499
1500static int __init atmci_init_slot(struct atmel_mci *host,
1501 struct mci_slot_pdata *slot_data, unsigned int id,
1502 u32 sdc_reg)
1503{
1504 struct mmc_host *mmc;
1505 struct atmel_mci_slot *slot;
1506
1507 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
1508 if (!mmc)
1509 return -ENOMEM;
1510
1511 slot = mmc_priv(mmc);
1512 slot->mmc = mmc;
1513 slot->host = host;
1514 slot->detect_pin = slot_data->detect_pin;
1515 slot->wp_pin = slot_data->wp_pin;
1516 slot->sdc_reg = sdc_reg;
1517
1518 mmc->ops = &atmci_ops;
1519 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1520 mmc->f_max = host->bus_hz / 2;
1521 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1522 if (slot_data->bus_width >= 4)
1523 mmc->caps |= MMC_CAP_4_BIT_DATA;
1524
1525 mmc->max_hw_segs = 64;
1526 mmc->max_phys_segs = 64;
1527 mmc->max_req_size = 32768 * 512;
1528 mmc->max_blk_size = 32768;
1529 mmc->max_blk_count = 512;
1530
1531 /* Assume card is present initially */
1532 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1533 if (gpio_is_valid(slot->detect_pin)) {
1534 if (gpio_request(slot->detect_pin, "mmc_detect")) {
1535 dev_dbg(&mmc->class_dev, "no detect pin available\n");
1536 slot->detect_pin = -EBUSY;
1537 } else if (gpio_get_value(slot->detect_pin)) {
1538 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1539 }
1540 }
1541
1542 if (!gpio_is_valid(slot->detect_pin))
1543 mmc->caps |= MMC_CAP_NEEDS_POLL;
1544
1545 if (gpio_is_valid(slot->wp_pin)) {
1546 if (gpio_request(slot->wp_pin, "mmc_wp")) {
1547 dev_dbg(&mmc->class_dev, "no WP pin available\n");
1548 slot->wp_pin = -EBUSY;
1549 }
1550 }
1551
1552 host->slot[id] = slot;
1553 mmc_add_host(mmc);
1554
1555 if (gpio_is_valid(slot->detect_pin)) {
1556 int ret;
1557
1558 setup_timer(&slot->detect_timer, atmci_detect_change,
1559 (unsigned long)slot);
1560
1561 ret = request_irq(gpio_to_irq(slot->detect_pin),
1562 atmci_detect_interrupt,
1563 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
1564 "mmc-detect", slot);
1565 if (ret) {
1566 dev_dbg(&mmc->class_dev,
1567 "could not request IRQ %d for detect pin\n",
1568 gpio_to_irq(slot->detect_pin));
1569 gpio_free(slot->detect_pin);
1570 slot->detect_pin = -EBUSY;
1571 }
1572 }
1573
1574 atmci_init_debugfs(slot);
1575
1576 return 0;
1577}
1578
1579static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
1580 unsigned int id)
1581{
1582 /* Debugfs stuff is cleaned up by mmc core */
1583
1584 set_bit(ATMCI_SHUTDOWN, &slot->flags);
1585 smp_wmb();
1586
1587 mmc_remove_host(slot->mmc);
1588
1589 if (gpio_is_valid(slot->detect_pin)) {
1590 int pin = slot->detect_pin;
1591
1592 free_irq(gpio_to_irq(pin), slot);
1593 del_timer_sync(&slot->detect_timer);
1594 gpio_free(pin);
1595 }
1596 if (gpio_is_valid(slot->wp_pin))
1597 gpio_free(slot->wp_pin);
1598
1599 slot->host->slot[id] = NULL;
1600 mmc_free_host(slot->mmc);
1601}
1602
989static int __init atmci_probe(struct platform_device *pdev) 1603static int __init atmci_probe(struct platform_device *pdev)
990{ 1604{
991 struct mci_platform_data *pdata; 1605 struct mci_platform_data *pdata;
992 struct atmel_mci *host; 1606 struct atmel_mci *host;
993 struct mmc_host *mmc; 1607 struct resource *regs;
994 struct resource *regs; 1608 unsigned int nr_slots;
995 int irq; 1609 int irq;
996 int ret; 1610 int ret;
997 1611
998 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1612 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
999 if (!regs) 1613 if (!regs)
@@ -1005,15 +1619,13 @@ static int __init atmci_probe(struct platform_device *pdev)
1005 if (irq < 0) 1619 if (irq < 0)
1006 return irq; 1620 return irq;
1007 1621
1008 mmc = mmc_alloc_host(sizeof(struct atmel_mci), &pdev->dev); 1622 host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
1009 if (!mmc) 1623 if (!host)
1010 return -ENOMEM; 1624 return -ENOMEM;
1011 1625
1012 host = mmc_priv(mmc);
1013 host->pdev = pdev; 1626 host->pdev = pdev;
1014 host->mmc = mmc; 1627 spin_lock_init(&host->lock);
1015 host->detect_pin = pdata->detect_pin; 1628 INIT_LIST_HEAD(&host->queue);
1016 host->wp_pin = pdata->wp_pin;
1017 1629
1018 host->mck = clk_get(&pdev->dev, "mci_clk"); 1630 host->mck = clk_get(&pdev->dev, "mci_clk");
1019 if (IS_ERR(host->mck)) { 1631 if (IS_ERR(host->mck)) {
@@ -1033,122 +1645,102 @@ static int __init atmci_probe(struct platform_device *pdev)
1033 1645
1034 host->mapbase = regs->start; 1646 host->mapbase = regs->start;
1035 1647
1036 mmc->ops = &atmci_ops; 1648 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
1037 mmc->f_min = (host->bus_hz + 511) / 512;
1038 mmc->f_max = host->bus_hz / 2;
1039 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1040 mmc->caps |= MMC_CAP_4_BIT_DATA;
1041
1042 mmc->max_hw_segs = 64;
1043 mmc->max_phys_segs = 64;
1044 mmc->max_req_size = 32768 * 512;
1045 mmc->max_blk_size = 32768;
1046 mmc->max_blk_count = 512;
1047
1048 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)mmc);
1049 1649
1050 ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, mmc); 1650 ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, host);
1051 if (ret) 1651 if (ret)
1052 goto err_request_irq; 1652 goto err_request_irq;
1053 1653
1054 /* Assume card is present if we don't have a detect pin */ 1654#ifdef CONFIG_MMC_ATMELMCI_DMA
1055 host->present = 1; 1655 if (pdata->dma_slave) {
1056 if (gpio_is_valid(host->detect_pin)) { 1656 struct dma_slave *slave = pdata->dma_slave;
1057 if (gpio_request(host->detect_pin, "mmc_detect")) {
1058 dev_dbg(&mmc->class_dev, "no detect pin available\n");
1059 host->detect_pin = -1;
1060 } else {
1061 host->present = !gpio_get_value(host->detect_pin);
1062 }
1063 }
1064 1657
1065 if (!gpio_is_valid(host->detect_pin)) 1658 slave->tx_reg = regs->start + MCI_TDR;
1066 mmc->caps |= MMC_CAP_NEEDS_POLL; 1659 slave->rx_reg = regs->start + MCI_RDR;
1067 1660
1068 if (gpio_is_valid(host->wp_pin)) { 1661 /* Try to grab a DMA channel */
1069 if (gpio_request(host->wp_pin, "mmc_wp")) { 1662 host->dma.client.event_callback = atmci_dma_event;
1070 dev_dbg(&mmc->class_dev, "no WP pin available\n"); 1663 dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask);
1071 host->wp_pin = -1; 1664 host->dma.client.slave = slave;
1072 } 1665
1666 dma_async_client_register(&host->dma.client);
1667 dma_async_client_chan_request(&host->dma.client);
1668 } else {
1669 dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1073 } 1670 }
1671#endif /* CONFIG_MMC_ATMELMCI_DMA */
1074 1672
1075 platform_set_drvdata(pdev, host); 1673 platform_set_drvdata(pdev, host);
1076 1674
1077 mmc_add_host(mmc); 1675 /* We need at least one slot to succeed */
1078 1676 nr_slots = 0;
1079 if (gpio_is_valid(host->detect_pin)) { 1677 ret = -ENODEV;
1080 setup_timer(&host->detect_timer, atmci_detect_change, 1678 if (pdata->slot[0].bus_width) {
1081 (unsigned long)host); 1679 ret = atmci_init_slot(host, &pdata->slot[0],
1082 1680 MCI_SDCSEL_SLOT_A, 0);
1083 ret = request_irq(gpio_to_irq(host->detect_pin), 1681 if (!ret)
1084 atmci_detect_interrupt, 1682 nr_slots++;
1085 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 1683 }
1086 "mmc-detect", mmc); 1684 if (pdata->slot[1].bus_width) {
1087 if (ret) { 1685 ret = atmci_init_slot(host, &pdata->slot[1],
1088 dev_dbg(&mmc->class_dev, 1686 MCI_SDCSEL_SLOT_B, 1);
1089 "could not request IRQ %d for detect pin\n", 1687 if (!ret)
1090 gpio_to_irq(host->detect_pin)); 1688 nr_slots++;
1091 gpio_free(host->detect_pin);
1092 host->detect_pin = -1;
1093 }
1094 } 1689 }
1095 1690
1096 dev_info(&mmc->class_dev, 1691 if (!nr_slots)
1097 "Atmel MCI controller at 0x%08lx irq %d\n", 1692 goto err_init_slot;
1098 host->mapbase, irq);
1099 1693
1100 atmci_init_debugfs(host); 1694 dev_info(&pdev->dev,
1695 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
1696 host->mapbase, irq, nr_slots);
1101 1697
1102 return 0; 1698 return 0;
1103 1699
1700err_init_slot:
1701#ifdef CONFIG_MMC_ATMELMCI_DMA
1702 if (pdata->dma_slave)
1703 dma_async_client_unregister(&host->dma.client);
1704#endif
1705 free_irq(irq, host);
1104err_request_irq: 1706err_request_irq:
1105 iounmap(host->regs); 1707 iounmap(host->regs);
1106err_ioremap: 1708err_ioremap:
1107 clk_put(host->mck); 1709 clk_put(host->mck);
1108err_clk_get: 1710err_clk_get:
1109 mmc_free_host(mmc); 1711 kfree(host);
1110 return ret; 1712 return ret;
1111} 1713}
1112 1714
1113static int __exit atmci_remove(struct platform_device *pdev) 1715static int __exit atmci_remove(struct platform_device *pdev)
1114{ 1716{
1115 struct atmel_mci *host = platform_get_drvdata(pdev); 1717 struct atmel_mci *host = platform_get_drvdata(pdev);
1718 unsigned int i;
1116 1719
1117 platform_set_drvdata(pdev, NULL); 1720 platform_set_drvdata(pdev, NULL);
1118 1721
1119 if (host) { 1722 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1120 /* Debugfs stuff is cleaned up by mmc core */ 1723 if (host->slot[i])
1121 1724 atmci_cleanup_slot(host->slot[i], i);
1122 if (gpio_is_valid(host->detect_pin)) { 1725 }
1123 int pin = host->detect_pin;
1124
1125 /* Make sure the timer doesn't enable the interrupt */
1126 host->detect_pin = -1;
1127 smp_wmb();
1128
1129 free_irq(gpio_to_irq(pin), host->mmc);
1130 del_timer_sync(&host->detect_timer);
1131 gpio_free(pin);
1132 }
1133
1134 mmc_remove_host(host->mmc);
1135 1726
1136 clk_enable(host->mck); 1727 clk_enable(host->mck);
1137 mci_writel(host, IDR, ~0UL); 1728 mci_writel(host, IDR, ~0UL);
1138 mci_writel(host, CR, MCI_CR_MCIDIS); 1729 mci_writel(host, CR, MCI_CR_MCIDIS);
1139 mci_readl(host, SR); 1730 mci_readl(host, SR);
1140 clk_disable(host->mck); 1731 clk_disable(host->mck);
1141 1732
1142 if (gpio_is_valid(host->wp_pin)) 1733#ifdef CONFIG_MMC_ATMELMCI_DMA
1143 gpio_free(host->wp_pin); 1734 if (host->dma.client.slave)
1735 dma_async_client_unregister(&host->dma.client);
1736#endif
1144 1737
1145 free_irq(platform_get_irq(pdev, 0), host->mmc); 1738 free_irq(platform_get_irq(pdev, 0), host);
1146 iounmap(host->regs); 1739 iounmap(host->regs);
1147 1740
1148 clk_put(host->mck); 1741 clk_put(host->mck);
1742 kfree(host);
1149 1743
1150 mmc_free_host(host->mmc);
1151 }
1152 return 0; 1744 return 0;
1153} 1745}
1154 1746
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 7503b81374e0..07faf5412a1f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -95,8 +95,6 @@
95 * reads which takes nowhere near that long. Older cards may be able to use 95 * reads which takes nowhere near that long. Older cards may be able to use
96 * shorter timeouts ... but why bother? 96 * shorter timeouts ... but why bother?
97 */ 97 */
98#define readblock_timeout ktime_set(0, 100 * 1000 * 1000)
99#define writeblock_timeout ktime_set(0, 250 * 1000 * 1000)
100#define r1b_timeout ktime_set(3, 0) 98#define r1b_timeout ktime_set(3, 0)
101 99
102 100
@@ -220,9 +218,9 @@ mmc_spi_wait_unbusy(struct mmc_spi_host *host, ktime_t timeout)
220 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); 218 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
221} 219}
222 220
223static int mmc_spi_readtoken(struct mmc_spi_host *host) 221static int mmc_spi_readtoken(struct mmc_spi_host *host, ktime_t timeout)
224{ 222{
225 return mmc_spi_skip(host, readblock_timeout, 1, 0xff); 223 return mmc_spi_skip(host, timeout, 1, 0xff);
226} 224}
227 225
228 226
@@ -605,7 +603,8 @@ mmc_spi_setup_data_message(
605 * Return negative errno, else success. 603 * Return negative errno, else success.
606 */ 604 */
607static int 605static int
608mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t) 606mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
607 ktime_t timeout)
609{ 608{
610 struct spi_device *spi = host->spi; 609 struct spi_device *spi = host->spi;
611 int status, i; 610 int status, i;
@@ -673,7 +672,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t)
673 if (scratch->status[i] != 0) 672 if (scratch->status[i] != 0)
674 return 0; 673 return 0;
675 } 674 }
676 return mmc_spi_wait_unbusy(host, writeblock_timeout); 675 return mmc_spi_wait_unbusy(host, timeout);
677} 676}
678 677
679/* 678/*
@@ -693,7 +692,8 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t)
693 * STOP_TRANSMISSION command. 692 * STOP_TRANSMISSION command.
694 */ 693 */
695static int 694static int
696mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t) 695mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
696 ktime_t timeout)
697{ 697{
698 struct spi_device *spi = host->spi; 698 struct spi_device *spi = host->spi;
699 int status; 699 int status;
@@ -707,7 +707,7 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t)
707 return status; 707 return status;
708 status = scratch->status[0]; 708 status = scratch->status[0];
709 if (status == 0xff || status == 0) 709 if (status == 0xff || status == 0)
710 status = mmc_spi_readtoken(host); 710 status = mmc_spi_readtoken(host, timeout);
711 711
712 if (status == SPI_TOKEN_SINGLE) { 712 if (status == SPI_TOKEN_SINGLE) {
713 if (host->dma_dev) { 713 if (host->dma_dev) {
@@ -778,6 +778,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
778 struct scatterlist *sg; 778 struct scatterlist *sg;
779 unsigned n_sg; 779 unsigned n_sg;
780 int multiple = (data->blocks > 1); 780 int multiple = (data->blocks > 1);
781 u32 clock_rate;
782 ktime_t timeout;
781 783
782 if (data->flags & MMC_DATA_READ) 784 if (data->flags & MMC_DATA_READ)
783 direction = DMA_FROM_DEVICE; 785 direction = DMA_FROM_DEVICE;
@@ -786,6 +788,14 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
786 mmc_spi_setup_data_message(host, multiple, direction); 788 mmc_spi_setup_data_message(host, multiple, direction);
787 t = &host->t; 789 t = &host->t;
788 790
791 if (t->speed_hz)
792 clock_rate = t->speed_hz;
793 else
794 clock_rate = spi->max_speed_hz;
795
796 timeout = ktime_add_ns(ktime_set(0, 0), data->timeout_ns +
797 data->timeout_clks * 1000000 / clock_rate);
798
789 /* Handle scatterlist segments one at a time, with synch for 799 /* Handle scatterlist segments one at a time, with synch for
790 * each 512-byte block 800 * each 512-byte block
791 */ 801 */
@@ -832,9 +842,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
832 t->len); 842 t->len);
833 843
834 if (direction == DMA_TO_DEVICE) 844 if (direction == DMA_TO_DEVICE)
835 status = mmc_spi_writeblock(host, t); 845 status = mmc_spi_writeblock(host, t, timeout);
836 else 846 else
837 status = mmc_spi_readblock(host, t); 847 status = mmc_spi_readblock(host, t, timeout);
838 if (status < 0) 848 if (status < 0)
839 break; 849 break;
840 850
@@ -917,7 +927,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
917 if (scratch->status[tmp] != 0) 927 if (scratch->status[tmp] != 0)
918 return; 928 return;
919 } 929 }
920 tmp = mmc_spi_wait_unbusy(host, writeblock_timeout); 930 tmp = mmc_spi_wait_unbusy(host, timeout);
921 if (tmp < 0 && !data->error) 931 if (tmp < 0 && !data->error)
922 data->error = tmp; 932 data->error = tmp;
923 } 933 }
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index fcb14c2346cc..0a84f10d719c 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -144,7 +144,8 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
144 SDHCI_QUIRK_32BIT_DMA_SIZE | 144 SDHCI_QUIRK_32BIT_DMA_SIZE |
145 SDHCI_QUIRK_32BIT_ADMA_SIZE | 145 SDHCI_QUIRK_32BIT_ADMA_SIZE |
146 SDHCI_QUIRK_RESET_AFTER_REQUEST | 146 SDHCI_QUIRK_RESET_AFTER_REQUEST |
147 SDHCI_QUIRK_BROKEN_SMALL_PIO; 147 SDHCI_QUIRK_BROKEN_SMALL_PIO |
148 SDHCI_QUIRK_FORCE_HIGHSPEED;
148 } 149 }
149 150
150 /* 151 /*
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e3a8133560a2..30f64b1f2354 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -177,7 +177,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
177{ 177{
178 unsigned long flags; 178 unsigned long flags;
179 size_t blksize, len, chunk; 179 size_t blksize, len, chunk;
180 u32 scratch; 180 u32 uninitialized_var(scratch);
181 u8 *buf; 181 u8 *buf;
182 182
183 DBG("PIO reading\n"); 183 DBG("PIO reading\n");
@@ -1154,7 +1154,7 @@ static void sdhci_tasklet_card(unsigned long param)
1154 1154
1155 spin_unlock_irqrestore(&host->lock, flags); 1155 spin_unlock_irqrestore(&host->lock, flags);
1156 1156
1157 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1157 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1158} 1158}
1159 1159
1160static void sdhci_tasklet_finish(unsigned long param) 1160static void sdhci_tasklet_finish(unsigned long param)
@@ -1266,9 +1266,31 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1266 SDHCI_INT_INDEX)) 1266 SDHCI_INT_INDEX))
1267 host->cmd->error = -EILSEQ; 1267 host->cmd->error = -EILSEQ;
1268 1268
1269 if (host->cmd->error) 1269 if (host->cmd->error) {
1270 tasklet_schedule(&host->finish_tasklet); 1270 tasklet_schedule(&host->finish_tasklet);
1271 else if (intmask & SDHCI_INT_RESPONSE) 1271 return;
1272 }
1273
1274 /*
1275 * The host can send and interrupt when the busy state has
1276 * ended, allowing us to wait without wasting CPU cycles.
1277 * Unfortunately this is overloaded on the "data complete"
1278 * interrupt, so we need to take some care when handling
1279 * it.
1280 *
1281 * Note: The 1.0 specification is a bit ambiguous about this
1282 * feature so there might be some problems with older
1283 * controllers.
1284 */
1285 if (host->cmd->flags & MMC_RSP_BUSY) {
1286 if (host->cmd->data)
1287 DBG("Cannot wait for busy signal when also "
1288 "doing a data transfer");
1289 else
1290 return;
1291 }
1292
1293 if (intmask & SDHCI_INT_RESPONSE)
1272 sdhci_finish_command(host); 1294 sdhci_finish_command(host);
1273} 1295}
1274 1296
@@ -1278,11 +1300,16 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1278 1300
1279 if (!host->data) { 1301 if (!host->data) {
1280 /* 1302 /*
1281 * A data end interrupt is sent together with the response 1303 * The "data complete" interrupt is also used to
1282 * for the stop command. 1304 * indicate that a busy state has ended. See comment
1305 * above in sdhci_cmd_irq().
1283 */ 1306 */
1284 if (intmask & SDHCI_INT_DATA_END) 1307 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1285 return; 1308 if (intmask & SDHCI_INT_DATA_END) {
1309 sdhci_finish_command(host);
1310 return;
1311 }
1312 }
1286 1313
1287 printk(KERN_ERR "%s: Got data interrupt 0x%08x even " 1314 printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1288 "though no data operation was in progress.\n", 1315 "though no data operation was in progress.\n",
@@ -1604,7 +1631,8 @@ int sdhci_add_host(struct sdhci_host *host)
1604 mmc->f_max = host->max_clk; 1631 mmc->f_max = host->max_clk;
1605 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 1632 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1606 1633
1607 if (caps & SDHCI_CAN_DO_HISPD) 1634 if ((caps & SDHCI_CAN_DO_HISPD) ||
1635 (host->quirks & SDHCI_QUIRK_FORCE_HIGHSPEED))
1608 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1636 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1609 1637
1610 mmc->ocr_avail = 0; 1638 mmc->ocr_avail = 0;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 197d4a05f4ae..31f4b1528e76 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -208,6 +208,8 @@ struct sdhci_host {
208#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) 208#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
209/* Controller has an issue with buffer bits for small transfers */ 209/* Controller has an issue with buffer bits for small transfers */
210#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13) 210#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
211/* Controller supports high speed but doesn't have the caps bit set */
212#define SDHCI_QUIRK_FORCE_HIGHSPEED (1<<14)
211 213
212 int irq; /* Device IRQ */ 214 int irq; /* Device IRQ */
213 void __iomem * ioaddr; /* Mapped address */ 215 void __iomem * ioaddr; /* Mapped address */