aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-10-13 12:13:56 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-10-13 12:13:56 -0400
commite758936e02700ff88a0b08b722a3847b95283ef2 (patch)
tree50c919bef1b459a778b85159d5929de95b6c4a01 /drivers/mmc
parent239cfbde1f5843c4a24199f117d5f67f637d72d5 (diff)
parent4480f15b3306f43bbb0310d461142b4e897ca45b (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: include/asm-x86/statfs.h
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/Kconfig9
-rw-r--r--drivers/mmc/card/Kconfig3
-rw-r--r--drivers/mmc/card/block.c71
-rw-r--r--drivers/mmc/card/mmc_test.c4
-rw-r--r--drivers/mmc/card/queue.c23
-rw-r--r--drivers/mmc/core/mmc_ops.c8
-rw-r--r--drivers/mmc/core/sdio.c52
-rw-r--r--drivers/mmc/core/sdio_irq.c16
-rw-r--r--drivers/mmc/host/Kconfig30
-rw-r--r--drivers/mmc/host/at91_mci.c20
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h6
-rw-r--r--drivers/mmc/host/atmel-mci.c1364
-rw-r--r--drivers/mmc/host/mmc_spi.c32
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/sdhci-pci.c3
-rw-r--r--drivers/mmc/host/sdhci.c46
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
18 files changed, 1201 insertions, 496 deletions
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index c0b41e8bcd9d..f2eeb38efa65 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -3,13 +3,14 @@
3# 3#
4 4
5menuconfig MMC 5menuconfig MMC
6 tristate "MMC/SD card support" 6 tristate "MMC/SD/SDIO card support"
7 depends on HAS_IOMEM 7 depends on HAS_IOMEM
8 help 8 help
9 MMC is the "multi-media card" bus protocol. 9 This selects MultiMediaCard, Secure Digital and Secure
10 Digital I/O support.
10 11
11 If you want MMC support, you should say Y here and also 12 If you want MMC/SD/SDIO support, you should say Y here and
12 to the specific driver for your MMC interface. 13 also to your specific host controller driver.
13 14
14config MMC_DEBUG 15config MMC_DEBUG
15 bool "MMC debugging" 16 bool "MMC debugging"
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index dd0f398ee2f5..3f2a912659af 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -2,7 +2,7 @@
2# MMC/SD card drivers 2# MMC/SD card drivers
3# 3#
4 4
5comment "MMC/SD Card Drivers" 5comment "MMC/SD/SDIO Card Drivers"
6 6
7config MMC_BLOCK 7config MMC_BLOCK
8 tristate "MMC block device driver" 8 tristate "MMC block device driver"
@@ -34,7 +34,6 @@ config MMC_BLOCK_BOUNCE
34 34
35config SDIO_UART 35config SDIO_UART
36 tristate "SDIO UART/GPS class support" 36 tristate "SDIO UART/GPS class support"
37 depends on MMC
38 help 37 help
39 SDIO function driver for SDIO cards that implements the UART 38 SDIO function driver for SDIO cards that implements the UART
40 class, as well as the GPS class which appears like a UART. 39 class, as well as the GPS class which appears like a UART.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 86dbb366415a..24c97d3d16bb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -29,6 +29,7 @@
29#include <linux/blkdev.h> 29#include <linux/blkdev.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
32#include <linux/string_helpers.h>
32 33
33#include <linux/mmc/card.h> 34#include <linux/mmc/card.h>
34#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
@@ -57,7 +58,6 @@ struct mmc_blk_data {
57 struct mmc_queue queue; 58 struct mmc_queue queue;
58 59
59 unsigned int usage; 60 unsigned int usage;
60 unsigned int block_bits;
61 unsigned int read_only; 61 unsigned int read_only;
62}; 62};
63 63
@@ -83,7 +83,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
83 mutex_lock(&open_lock); 83 mutex_lock(&open_lock);
84 md->usage--; 84 md->usage--;
85 if (md->usage == 0) { 85 if (md->usage == 0) {
86 int devidx = md->disk->first_minor >> MMC_SHIFT; 86 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
87 __clear_bit(devidx, dev_use); 87 __clear_bit(devidx, dev_use);
88 88
89 put_disk(md->disk); 89 put_disk(md->disk);
@@ -103,8 +103,10 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
103 check_disk_change(inode->i_bdev); 103 check_disk_change(inode->i_bdev);
104 ret = 0; 104 ret = 0;
105 105
106 if ((filp->f_mode & FMODE_WRITE) && md->read_only) 106 if ((filp->f_mode & FMODE_WRITE) && md->read_only) {
107 mmc_blk_put(md);
107 ret = -EROFS; 108 ret = -EROFS;
109 }
108 } 110 }
109 111
110 return ret; 112 return ret;
@@ -213,8 +215,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
213 struct mmc_blk_data *md = mq->data; 215 struct mmc_blk_data *md = mq->data;
214 struct mmc_card *card = md->queue.card; 216 struct mmc_card *card = md->queue.card;
215 struct mmc_blk_request brq; 217 struct mmc_blk_request brq;
216 int ret = 1, data_size, i; 218 int ret = 1;
217 struct scatterlist *sg;
218 219
219 mmc_claim_host(card->host); 220 mmc_claim_host(card->host);
220 221
@@ -230,13 +231,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
230 if (!mmc_card_blockaddr(card)) 231 if (!mmc_card_blockaddr(card))
231 brq.cmd.arg <<= 9; 232 brq.cmd.arg <<= 9;
232 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 233 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
233 brq.data.blksz = 1 << md->block_bits; 234 brq.data.blksz = 512;
234 brq.stop.opcode = MMC_STOP_TRANSMISSION; 235 brq.stop.opcode = MMC_STOP_TRANSMISSION;
235 brq.stop.arg = 0; 236 brq.stop.arg = 0;
236 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 237 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
237 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); 238 brq.data.blocks = req->nr_sectors;
238 if (brq.data.blocks > card->host->max_blk_count)
239 brq.data.blocks = card->host->max_blk_count;
240 239
241 if (brq.data.blocks > 1) { 240 if (brq.data.blocks > 1) {
242 /* SPI multiblock writes terminate using a special 241 /* SPI multiblock writes terminate using a special
@@ -268,24 +267,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
268 267
269 mmc_queue_bounce_pre(mq); 268 mmc_queue_bounce_pre(mq);
270 269
271 /*
272 * Adjust the sg list so it is the same size as the
273 * request.
274 */
275 if (brq.data.blocks !=
276 (req->nr_sectors >> (md->block_bits - 9))) {
277 data_size = brq.data.blocks * brq.data.blksz;
278 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
279 data_size -= sg->length;
280 if (data_size <= 0) {
281 sg->length += data_size;
282 i++;
283 break;
284 }
285 }
286 brq.data.sg_len = i;
287 }
288
289 mmc_wait_for_req(card->host, &brq.mrq); 270 mmc_wait_for_req(card->host, &brq.mrq);
290 271
291 mmc_queue_bounce_post(mq); 272 mmc_queue_bounce_post(mq);
@@ -370,16 +351,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
370 if (rq_data_dir(req) != READ) { 351 if (rq_data_dir(req) != READ) {
371 if (mmc_card_sd(card)) { 352 if (mmc_card_sd(card)) {
372 u32 blocks; 353 u32 blocks;
373 unsigned int bytes;
374 354
375 blocks = mmc_sd_num_wr_blocks(card); 355 blocks = mmc_sd_num_wr_blocks(card);
376 if (blocks != (u32)-1) { 356 if (blocks != (u32)-1) {
377 if (card->csd.write_partial)
378 bytes = blocks << md->block_bits;
379 else
380 bytes = blocks << 9;
381 spin_lock_irq(&md->lock); 357 spin_lock_irq(&md->lock);
382 ret = __blk_end_request(req, 0, bytes); 358 ret = __blk_end_request(req, 0, blocks << 9);
383 spin_unlock_irq(&md->lock); 359 spin_unlock_irq(&md->lock);
384 } 360 }
385 } else { 361 } else {
@@ -429,13 +405,6 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
429 */ 405 */
430 md->read_only = mmc_blk_readonly(card); 406 md->read_only = mmc_blk_readonly(card);
431 407
432 /*
433 * Both SD and MMC specifications state (although a bit
434 * unclearly in the MMC case) that a block size of 512
435 * bytes must always be supported by the card.
436 */
437 md->block_bits = 9;
438
439 md->disk = alloc_disk(1 << MMC_SHIFT); 408 md->disk = alloc_disk(1 << MMC_SHIFT);
440 if (md->disk == NULL) { 409 if (md->disk == NULL) {
441 ret = -ENOMEM; 410 ret = -ENOMEM;
@@ -473,7 +442,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
473 442
474 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 443 sprintf(md->disk->disk_name, "mmcblk%d", devidx);
475 444
476 blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits); 445 blk_queue_hardsect_size(md->queue.queue, 512);
477 446
478 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 447 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
479 /* 448 /*
@@ -511,7 +480,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
511 480
512 mmc_claim_host(card->host); 481 mmc_claim_host(card->host);
513 cmd.opcode = MMC_SET_BLOCKLEN; 482 cmd.opcode = MMC_SET_BLOCKLEN;
514 cmd.arg = 1 << md->block_bits; 483 cmd.arg = 512;
515 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 484 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
516 err = mmc_wait_for_cmd(card->host, &cmd, 5); 485 err = mmc_wait_for_cmd(card->host, &cmd, 5);
517 mmc_release_host(card->host); 486 mmc_release_host(card->host);
@@ -530,6 +499,8 @@ static int mmc_blk_probe(struct mmc_card *card)
530 struct mmc_blk_data *md; 499 struct mmc_blk_data *md;
531 int err; 500 int err;
532 501
502 char cap_str[10];
503
533 /* 504 /*
534 * Check that the card supports the command class(es) we need. 505 * Check that the card supports the command class(es) we need.
535 */ 506 */
@@ -544,10 +515,11 @@ static int mmc_blk_probe(struct mmc_card *card)
544 if (err) 515 if (err)
545 goto out; 516 goto out;
546 517
547 printk(KERN_INFO "%s: %s %s %lluKiB %s\n", 518 string_get_size(get_capacity(md->disk) << 9, STRING_UNITS_2,
519 cap_str, sizeof(cap_str));
520 printk(KERN_INFO "%s: %s %s %s %s\n",
548 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 521 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
549 (unsigned long long)(get_capacity(md->disk) >> 1), 522 cap_str, md->read_only ? "(ro)" : "");
550 md->read_only ? "(ro)" : "");
551 523
552 mmc_set_drvdata(card, md); 524 mmc_set_drvdata(card, md);
553 add_disk(md->disk); 525 add_disk(md->disk);
@@ -613,14 +585,19 @@ static struct mmc_driver mmc_driver = {
613 585
614static int __init mmc_blk_init(void) 586static int __init mmc_blk_init(void)
615{ 587{
616 int res = -ENOMEM; 588 int res;
617 589
618 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 590 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
619 if (res) 591 if (res)
620 goto out; 592 goto out;
621 593
622 return mmc_register_driver(&mmc_driver); 594 res = mmc_register_driver(&mmc_driver);
595 if (res)
596 goto out2;
623 597
598 return 0;
599 out2:
600 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
624 out: 601 out:
625 return res; 602 return res;
626} 603}
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index f26b01d811ae..b92b172074ee 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -1040,7 +1040,7 @@ static const struct mmc_test_case mmc_test_cases[] = {
1040 1040
1041}; 1041};
1042 1042
1043static struct mutex mmc_test_lock; 1043static DEFINE_MUTEX(mmc_test_lock);
1044 1044
1045static void mmc_test_run(struct mmc_test_card *test, int testcase) 1045static void mmc_test_run(struct mmc_test_card *test, int testcase)
1046{ 1046{
@@ -1171,8 +1171,6 @@ static int mmc_test_probe(struct mmc_card *card)
1171 if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD)) 1171 if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
1172 return -ENODEV; 1172 return -ENODEV;
1173 1173
1174 mutex_init(&mmc_test_lock);
1175
1176 ret = device_create_file(&card->dev, &dev_attr_test); 1174 ret = device_create_file(&card->dev, &dev_attr_test);
1177 if (ret) 1175 if (ret)
1178 return ret; 1176 return ret;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3dee97e7d165..406989e992ba 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -31,7 +31,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
31 /* 31 /*
32 * We only like normal block requests. 32 * We only like normal block requests.
33 */ 33 */
34 if (!blk_fs_request(req) && !blk_pc_request(req)) { 34 if (!blk_fs_request(req)) {
35 blk_dump_rq_flags(req, "MMC bad request"); 35 blk_dump_rq_flags(req, "MMC bad request");
36 return BLKPREP_KILL; 36 return BLKPREP_KILL;
37 } 37 }
@@ -131,6 +131,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
131 mq->req = NULL; 131 mq->req = NULL;
132 132
133 blk_queue_prep_rq(mq->queue, mmc_prep_request); 133 blk_queue_prep_rq(mq->queue, mmc_prep_request);
134 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
134 135
135#ifdef CONFIG_MMC_BLOCK_BOUNCE 136#ifdef CONFIG_MMC_BLOCK_BOUNCE
136 if (host->max_hw_segs == 1) { 137 if (host->max_hw_segs == 1) {
@@ -142,12 +143,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
142 bouncesz = host->max_req_size; 143 bouncesz = host->max_req_size;
143 if (bouncesz > host->max_seg_size) 144 if (bouncesz > host->max_seg_size)
144 bouncesz = host->max_seg_size; 145 bouncesz = host->max_seg_size;
146 if (bouncesz > (host->max_blk_count * 512))
147 bouncesz = host->max_blk_count * 512;
148
149 if (bouncesz > 512) {
150 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
151 if (!mq->bounce_buf) {
152 printk(KERN_WARNING "%s: unable to "
153 "allocate bounce buffer\n",
154 mmc_card_name(card));
155 }
156 }
145 157
146 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 158 if (mq->bounce_buf) {
147 if (!mq->bounce_buf) {
148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card));
150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 159 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512); 160 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 161 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
@@ -175,7 +183,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
175 183
176 if (!mq->bounce_buf) { 184 if (!mq->bounce_buf) {
177 blk_queue_bounce_limit(mq->queue, limit); 185 blk_queue_bounce_limit(mq->queue, limit);
178 blk_queue_max_sectors(mq->queue, host->max_req_size / 512); 186 blk_queue_max_sectors(mq->queue,
187 min(host->max_blk_count, host->max_req_size / 512));
179 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 188 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
180 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 189 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
181 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 190 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 64b05c6270f2..9c50e6f1c236 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -248,8 +248,12 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
248 248
249 sg_init_one(&sg, data_buf, len); 249 sg_init_one(&sg, data_buf, len);
250 250
251 if (card) 251 /*
252 mmc_set_data_timeout(&data, card); 252 * The spec states that CSR and CID accesses have a timeout
253 * of 64 clock cycles.
254 */
255 data.timeout_ns = 0;
256 data.timeout_clks = 64;
253 257
254 mmc_wait_for_req(host, &mrq); 258 mmc_wait_for_req(host, &mrq);
255 259
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4eab79e09ccc..fb99ccff9080 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -165,6 +165,36 @@ static int sdio_enable_wide(struct mmc_card *card)
165} 165}
166 166
167/* 167/*
168 * Test if the card supports high-speed mode and, if so, switch to it.
169 */
170static int sdio_enable_hs(struct mmc_card *card)
171{
172 int ret;
173 u8 speed;
174
175 if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
176 return 0;
177
178 if (!card->cccr.high_speed)
179 return 0;
180
181 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
182 if (ret)
183 return ret;
184
185 speed |= SDIO_SPEED_EHS;
186
187 ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
188 if (ret)
189 return ret;
190
191 mmc_card_set_highspeed(card);
192 mmc_set_timing(card->host, MMC_TIMING_SD_HS);
193
194 return 0;
195}
196
197/*
168 * Host is being removed. Free up the current card. 198 * Host is being removed. Free up the current card.
169 */ 199 */
170static void mmc_sdio_remove(struct mmc_host *host) 200static void mmc_sdio_remove(struct mmc_host *host)
@@ -333,10 +363,26 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
333 goto remove; 363 goto remove;
334 364
335 /* 365 /*
336 * No support for high-speed yet, so just set 366 * Switch to high-speed (if supported).
337 * the card's maximum speed.
338 */ 367 */
339 mmc_set_clock(host, card->cis.max_dtr); 368 err = sdio_enable_hs(card);
369 if (err)
370 goto remove;
371
372 /*
373 * Change to the card's maximum speed.
374 */
375 if (mmc_card_highspeed(card)) {
376 /*
377 * The SDIO specification doesn't mention how
378 * the CIS transfer speed register relates to
379 * high-speed, but it seems that 50 MHz is
380 * mandatory.
381 */
382 mmc_set_clock(host, 50000000);
383 } else {
384 mmc_set_clock(host, card->cis.max_dtr);
385 }
340 386
341 /* 387 /*
342 * Switch to wider bus (if supported). 388 * Switch to wider bus (if supported).
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index c292e124107a..bb192f90e8e9 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -5,6 +5,8 @@
5 * Created: June 18, 2007 5 * Created: June 18, 2007
6 * Copyright: MontaVista Software Inc. 6 * Copyright: MontaVista Software Inc.
7 * 7 *
8 * Copyright 2008 Pierre Ossman
9 *
8 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at 12 * the Free Software Foundation; either version 2 of the License, or (at
@@ -107,11 +109,14 @@ static int sdio_irq_thread(void *_host)
107 109
108 /* 110 /*
109 * Give other threads a chance to run in the presence of 111 * Give other threads a chance to run in the presence of
110 * errors. FIXME: determine if due to card removal and 112 * errors.
111 * possibly exit this thread if so.
112 */ 113 */
113 if (ret < 0) 114 if (ret < 0) {
114 ssleep(1); 115 set_current_state(TASK_INTERRUPTIBLE);
116 if (!kthread_should_stop())
117 schedule_timeout(HZ);
118 set_current_state(TASK_RUNNING);
119 }
115 120
116 /* 121 /*
117 * Adaptive polling frequency based on the assumption 122 * Adaptive polling frequency based on the assumption
@@ -154,7 +159,8 @@ static int sdio_card_irq_get(struct mmc_card *card)
154 if (!host->sdio_irqs++) { 159 if (!host->sdio_irqs++) {
155 atomic_set(&host->sdio_irq_thread_abort, 0); 160 atomic_set(&host->sdio_irq_thread_abort, 0);
156 host->sdio_irq_thread = 161 host->sdio_irq_thread =
157 kthread_run(sdio_irq_thread, host, "ksdiorqd"); 162 kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
163 mmc_hostname(host));
158 if (IS_ERR(host->sdio_irq_thread)) { 164 if (IS_ERR(host->sdio_irq_thread)) {
159 int err = PTR_ERR(host->sdio_irq_thread); 165 int err = PTR_ERR(host->sdio_irq_thread);
160 host->sdio_irqs--; 166 host->sdio_irqs--;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index ea8d7a3490d9..dfa585f7feaf 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -2,7 +2,7 @@
2# MMC/SD host controller drivers 2# MMC/SD host controller drivers
3# 3#
4 4
5comment "MMC/SD Host Controller Drivers" 5comment "MMC/SD/SDIO Host Controller Drivers"
6 6
7config MMC_ARMMMCI 7config MMC_ARMMMCI
8 tristate "ARM AMBA Multimedia Card Interface support" 8 tristate "ARM AMBA Multimedia Card Interface support"
@@ -114,6 +114,17 @@ config MMC_ATMELMCI
114 114
115 If unsure, say N. 115 If unsure, say N.
116 116
117config MMC_ATMELMCI_DMA
118 bool "Atmel MCI DMA support (EXPERIMENTAL)"
119 depends on MMC_ATMELMCI && DMA_ENGINE && EXPERIMENTAL
120 help
121 Say Y here to have the Atmel MCI driver use a DMA engine to
122 do data transfers and thus increase the throughput and
123 reduce the CPU utilization. Note that this is highly
124 experimental and may cause the driver to lock up.
125
126 If unsure, say N.
127
117config MMC_IMX 128config MMC_IMX
118 tristate "Motorola i.MX Multimedia Card Interface support" 129 tristate "Motorola i.MX Multimedia Card Interface support"
119 depends on ARCH_IMX 130 depends on ARCH_IMX
@@ -141,21 +152,22 @@ config MMC_TIFM_SD
141 module will be called tifm_sd. 152 module will be called tifm_sd.
142 153
143config MMC_SPI 154config MMC_SPI
144 tristate "MMC/SD over SPI" 155 tristate "MMC/SD/SDIO over SPI"
145 depends on MMC && SPI_MASTER && !HIGHMEM && HAS_DMA 156 depends on SPI_MASTER && !HIGHMEM && HAS_DMA
146 select CRC7 157 select CRC7
147 select CRC_ITU_T 158 select CRC_ITU_T
148 help 159 help
149 Some systems accss MMC/SD cards using a SPI controller instead of 160 Some systems accss MMC/SD/SDIO cards using a SPI controller
150 using a "native" MMC/SD controller. This has a disadvantage of 161 instead of using a "native" MMC/SD/SDIO controller. This has a
151 being relatively high overhead, but a compensating advantage of 162 disadvantage of being relatively high overhead, but a compensating
152 working on many systems without dedicated MMC/SD controllers. 163 advantage of working on many systems without dedicated MMC/SD/SDIO
164 controllers.
153 165
154 If unsure, or if your system has no SPI master driver, say N. 166 If unsure, or if your system has no SPI master driver, say N.
155 167
156config MMC_S3C 168config MMC_S3C
157 tristate "Samsung S3C SD/MMC Card Interface support" 169 tristate "Samsung S3C SD/MMC Card Interface support"
158 depends on ARCH_S3C2410 && MMC 170 depends on ARCH_S3C2410
159 help 171 help
160 This selects a driver for the MCI interface found in 172 This selects a driver for the MCI interface found in
161 Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. 173 Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
@@ -166,7 +178,7 @@ config MMC_S3C
166 178
167config MMC_SDRICOH_CS 179config MMC_SDRICOH_CS
168 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" 180 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
169 depends on EXPERIMENTAL && MMC && PCI && PCMCIA 181 depends on EXPERIMENTAL && PCI && PCMCIA
170 help 182 help
171 Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA 183 Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA
172 card whenever you insert a MMC or SD card into the card slot. 184 card whenever you insert a MMC or SD card into the card slot.
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 6915f40ac8ab..1f8b5b36222c 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -621,12 +621,21 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
621 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263()) 621 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
622 if (host->total_length < 12) 622 if (host->total_length < 12)
623 host->total_length = 12; 623 host->total_length = 12;
624 host->buffer = dma_alloc_coherent(NULL, 624
625 host->total_length, 625 host->buffer = kmalloc(host->total_length, GFP_KERNEL);
626 &host->physical_address, GFP_KERNEL); 626 if (!host->buffer) {
627 pr_debug("Can't alloc tx buffer\n");
628 cmd->error = -ENOMEM;
629 mmc_request_done(host->mmc, host->request);
630 return;
631 }
627 632
628 at91_mci_sg_to_dma(host, data); 633 at91_mci_sg_to_dma(host, data);
629 634
635 host->physical_address = dma_map_single(NULL,
636 host->buffer, host->total_length,
637 DMA_TO_DEVICE);
638
630 pr_debug("Transmitting %d bytes\n", host->total_length); 639 pr_debug("Transmitting %d bytes\n", host->total_length);
631 640
632 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); 641 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
@@ -694,7 +703,10 @@ static void at91_mci_completed_command(struct at91mci_host *host, unsigned int s
694 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3)); 703 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
695 704
696 if (host->buffer) { 705 if (host->buffer) {
697 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address); 706 dma_unmap_single(NULL,
707 host->physical_address, host->total_length,
708 DMA_TO_DEVICE);
709 kfree(host->buffer);
698 host->buffer = NULL; 710 host->buffer = NULL;
699 } 711 }
700 712
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 26bd80e65031..b58364ed6bba 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -25,8 +25,10 @@
25#define MCI_SDCR 0x000c /* SD Card / SDIO */ 25#define MCI_SDCR 0x000c /* SD Card / SDIO */
26# define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ 26# define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */
27# define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ 27# define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */
28# define MCI_SDCBUS_1BIT ( 0 << 7) /* 1-bit data bus */ 28# define MCI_SDCSEL_MASK ( 3 << 0)
29# define MCI_SDCBUS_4BIT ( 1 << 7) /* 4-bit data bus */ 29# define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */
30# define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */
31# define MCI_SDCBUS_MASK ( 3 << 6)
30#define MCI_ARGR 0x0010 /* Command Argument */ 32#define MCI_ARGR 0x0010 /* Command Argument */
31#define MCI_CMDR 0x0014 /* Command */ 33#define MCI_CMDR 0x0014 /* Command */
32# define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ 34# define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 0bd06f5bd62f..7a3f2436b011 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -11,6 +11,8 @@
11#include <linux/clk.h> 11#include <linux/clk.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
14#include <linux/err.h> 16#include <linux/err.h>
15#include <linux/gpio.h> 17#include <linux/gpio.h>
16#include <linux/init.h> 18#include <linux/init.h>
@@ -33,64 +35,178 @@
33#include "atmel-mci-regs.h" 35#include "atmel-mci-regs.h"
34 36
35#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) 37#define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE)
38#define ATMCI_DMA_THRESHOLD 16
36 39
37enum { 40enum {
38 EVENT_CMD_COMPLETE = 0, 41 EVENT_CMD_COMPLETE = 0,
39 EVENT_DATA_ERROR,
40 EVENT_DATA_COMPLETE,
41 EVENT_STOP_SENT,
42 EVENT_STOP_COMPLETE,
43 EVENT_XFER_COMPLETE, 42 EVENT_XFER_COMPLETE,
43 EVENT_DATA_COMPLETE,
44 EVENT_DATA_ERROR,
45};
46
47enum atmel_mci_state {
48 STATE_IDLE = 0,
49 STATE_SENDING_CMD,
50 STATE_SENDING_DATA,
51 STATE_DATA_BUSY,
52 STATE_SENDING_STOP,
53 STATE_DATA_ERROR,
54};
55
56struct atmel_mci_dma {
57#ifdef CONFIG_MMC_ATMELMCI_DMA
58 struct dma_client client;
59 struct dma_chan *chan;
60 struct dma_async_tx_descriptor *data_desc;
61#endif
44}; 62};
45 63
64/**
65 * struct atmel_mci - MMC controller state shared between all slots
66 * @lock: Spinlock protecting the queue and associated data.
67 * @regs: Pointer to MMIO registers.
68 * @sg: Scatterlist entry currently being processed by PIO code, if any.
69 * @pio_offset: Offset into the current scatterlist entry.
70 * @cur_slot: The slot which is currently using the controller.
71 * @mrq: The request currently being processed on @cur_slot,
72 * or NULL if the controller is idle.
73 * @cmd: The command currently being sent to the card, or NULL.
74 * @data: The data currently being transferred, or NULL if no data
75 * transfer is in progress.
76 * @dma: DMA client state.
77 * @data_chan: DMA channel being used for the current data transfer.
78 * @cmd_status: Snapshot of SR taken upon completion of the current
79 * command. Only valid when EVENT_CMD_COMPLETE is pending.
80 * @data_status: Snapshot of SR taken upon completion of the current
81 * data transfer. Only valid when EVENT_DATA_COMPLETE or
82 * EVENT_DATA_ERROR is pending.
83 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
84 * to be sent.
85 * @tasklet: Tasklet running the request state machine.
86 * @pending_events: Bitmask of events flagged by the interrupt handler
87 * to be processed by the tasklet.
88 * @completed_events: Bitmask of events which the state machine has
89 * processed.
90 * @state: Tasklet state.
91 * @queue: List of slots waiting for access to the controller.
92 * @need_clock_update: Update the clock rate before the next request.
93 * @need_reset: Reset controller before next request.
94 * @mode_reg: Value of the MR register.
95 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
96 * rate and timeout calculations.
97 * @mapbase: Physical address of the MMIO registers.
98 * @mck: The peripheral bus clock hooked up to the MMC controller.
99 * @pdev: Platform device associated with the MMC controller.
100 * @slot: Slots sharing this MMC controller.
101 *
102 * Locking
103 * =======
104 *
105 * @lock is a softirq-safe spinlock protecting @queue as well as
106 * @cur_slot, @mrq and @state. These must always be updated
107 * at the same time while holding @lock.
108 *
109 * @lock also protects mode_reg and need_clock_update since these are
110 * used to synchronize mode register updates with the queue
111 * processing.
112 *
113 * The @mrq field of struct atmel_mci_slot is also protected by @lock,
114 * and must always be written at the same time as the slot is added to
115 * @queue.
116 *
117 * @pending_events and @completed_events are accessed using atomic bit
118 * operations, so they don't need any locking.
119 *
120 * None of the fields touched by the interrupt handler need any
121 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
122 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
123 * interrupts must be disabled and @data_status updated with a
124 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
125 * CMDRDY interupt must be disabled and @cmd_status updated with a
126 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
127 * bytes_xfered field of @data must be written. This is ensured by
128 * using barriers.
129 */
46struct atmel_mci { 130struct atmel_mci {
47 struct mmc_host *mmc; 131 spinlock_t lock;
48 void __iomem *regs; 132 void __iomem *regs;
49 133
50 struct scatterlist *sg; 134 struct scatterlist *sg;
51 unsigned int pio_offset; 135 unsigned int pio_offset;
52 136
137 struct atmel_mci_slot *cur_slot;
53 struct mmc_request *mrq; 138 struct mmc_request *mrq;
54 struct mmc_command *cmd; 139 struct mmc_command *cmd;
55 struct mmc_data *data; 140 struct mmc_data *data;
56 141
142 struct atmel_mci_dma dma;
143 struct dma_chan *data_chan;
144
57 u32 cmd_status; 145 u32 cmd_status;
58 u32 data_status; 146 u32 data_status;
59 u32 stop_status;
60 u32 stop_cmdr; 147 u32 stop_cmdr;
61 148
62 u32 mode_reg;
63 u32 sdc_reg;
64
65 struct tasklet_struct tasklet; 149 struct tasklet_struct tasklet;
66 unsigned long pending_events; 150 unsigned long pending_events;
67 unsigned long completed_events; 151 unsigned long completed_events;
152 enum atmel_mci_state state;
153 struct list_head queue;
68 154
69 int present; 155 bool need_clock_update;
70 int detect_pin; 156 bool need_reset;
71 int wp_pin; 157 u32 mode_reg;
72
73 /* For detect pin debouncing */
74 struct timer_list detect_timer;
75
76 unsigned long bus_hz; 158 unsigned long bus_hz;
77 unsigned long mapbase; 159 unsigned long mapbase;
78 struct clk *mck; 160 struct clk *mck;
79 struct platform_device *pdev; 161 struct platform_device *pdev;
162
163 struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS];
164};
165
166/**
167 * struct atmel_mci_slot - MMC slot state
168 * @mmc: The mmc_host representing this slot.
169 * @host: The MMC controller this slot is using.
170 * @sdc_reg: Value of SDCR to be written before using this slot.
171 * @mrq: mmc_request currently being processed or waiting to be
172 * processed, or NULL when the slot is idle.
173 * @queue_node: List node for placing this node in the @queue list of
174 * &struct atmel_mci.
175 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
176 * @flags: Random state bits associated with the slot.
177 * @detect_pin: GPIO pin used for card detection, or negative if not
178 * available.
179 * @wp_pin: GPIO pin used for card write protect sending, or negative
180 * if not available.
181 * @detect_timer: Timer used for debouncing @detect_pin interrupts.
182 */
183struct atmel_mci_slot {
184 struct mmc_host *mmc;
185 struct atmel_mci *host;
186
187 u32 sdc_reg;
188
189 struct mmc_request *mrq;
190 struct list_head queue_node;
191
192 unsigned int clock;
193 unsigned long flags;
194#define ATMCI_CARD_PRESENT 0
195#define ATMCI_CARD_NEED_INIT 1
196#define ATMCI_SHUTDOWN 2
197
198 int detect_pin;
199 int wp_pin;
200
201 struct timer_list detect_timer;
80}; 202};
81 203
82#define atmci_is_completed(host, event) \
83 test_bit(event, &host->completed_events)
84#define atmci_test_and_clear_pending(host, event) \ 204#define atmci_test_and_clear_pending(host, event) \
85 test_and_clear_bit(event, &host->pending_events) 205 test_and_clear_bit(event, &host->pending_events)
86#define atmci_test_and_set_completed(host, event) \
87 test_and_set_bit(event, &host->completed_events)
88#define atmci_set_completed(host, event) \ 206#define atmci_set_completed(host, event) \
89 set_bit(event, &host->completed_events) 207 set_bit(event, &host->completed_events)
90#define atmci_set_pending(host, event) \ 208#define atmci_set_pending(host, event) \
91 set_bit(event, &host->pending_events) 209 set_bit(event, &host->pending_events)
92#define atmci_clear_pending(host, event) \
93 clear_bit(event, &host->pending_events)
94 210
95/* 211/*
96 * The debugfs stuff below is mostly optimized away when 212 * The debugfs stuff below is mostly optimized away when
@@ -98,14 +214,15 @@ struct atmel_mci {
98 */ 214 */
99static int atmci_req_show(struct seq_file *s, void *v) 215static int atmci_req_show(struct seq_file *s, void *v)
100{ 216{
101 struct atmel_mci *host = s->private; 217 struct atmel_mci_slot *slot = s->private;
102 struct mmc_request *mrq = host->mrq; 218 struct mmc_request *mrq;
103 struct mmc_command *cmd; 219 struct mmc_command *cmd;
104 struct mmc_command *stop; 220 struct mmc_command *stop;
105 struct mmc_data *data; 221 struct mmc_data *data;
106 222
107 /* Make sure we get a consistent snapshot */ 223 /* Make sure we get a consistent snapshot */
108 spin_lock_irq(&host->mmc->lock); 224 spin_lock_bh(&slot->host->lock);
225 mrq = slot->mrq;
109 226
110 if (mrq) { 227 if (mrq) {
111 cmd = mrq->cmd; 228 cmd = mrq->cmd;
@@ -130,7 +247,7 @@ static int atmci_req_show(struct seq_file *s, void *v)
130 stop->resp[2], stop->error); 247 stop->resp[2], stop->error);
131 } 248 }
132 249
133 spin_unlock_irq(&host->mmc->lock); 250 spin_unlock_bh(&slot->host->lock);
134 251
135 return 0; 252 return 0;
136} 253}
@@ -193,10 +310,16 @@ static int atmci_regs_show(struct seq_file *s, void *v)
193 if (!buf) 310 if (!buf)
194 return -ENOMEM; 311 return -ENOMEM;
195 312
196 /* Grab a more or less consistent snapshot */ 313 /*
197 spin_lock_irq(&host->mmc->lock); 314 * Grab a more or less consistent snapshot. Note that we're
315 * not disabling interrupts, so IMR and SR may not be
316 * consistent.
317 */
318 spin_lock_bh(&host->lock);
319 clk_enable(host->mck);
198 memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); 320 memcpy_fromio(buf, host->regs, MCI_REGS_SIZE);
199 spin_unlock_irq(&host->mmc->lock); 321 clk_disable(host->mck);
322 spin_unlock_bh(&host->lock);
200 323
201 seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", 324 seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
202 buf[MCI_MR / 4], 325 buf[MCI_MR / 4],
@@ -216,6 +339,8 @@ static int atmci_regs_show(struct seq_file *s, void *v)
216 atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); 339 atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
217 atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); 340 atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
218 341
342 kfree(buf);
343
219 return 0; 344 return 0;
220} 345}
221 346
@@ -232,14 +357,13 @@ static const struct file_operations atmci_regs_fops = {
232 .release = single_release, 357 .release = single_release,
233}; 358};
234 359
235static void atmci_init_debugfs(struct atmel_mci *host) 360static void atmci_init_debugfs(struct atmel_mci_slot *slot)
236{ 361{
237 struct mmc_host *mmc; 362 struct mmc_host *mmc = slot->mmc;
238 struct dentry *root; 363 struct atmel_mci *host = slot->host;
239 struct dentry *node; 364 struct dentry *root;
240 struct resource *res; 365 struct dentry *node;
241 366
242 mmc = host->mmc;
243 root = mmc->debugfs_root; 367 root = mmc->debugfs_root;
244 if (!root) 368 if (!root)
245 return; 369 return;
@@ -251,10 +375,11 @@ static void atmci_init_debugfs(struct atmel_mci *host)
251 if (!node) 375 if (!node)
252 goto err; 376 goto err;
253 377
254 res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); 378 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
255 node->d_inode->i_size = res->end - res->start + 1; 379 if (!node)
380 goto err;
256 381
257 node = debugfs_create_file("req", S_IRUSR, root, host, &atmci_req_fops); 382 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
258 if (!node) 383 if (!node)
259 goto err; 384 goto err;
260 385
@@ -271,25 +396,7 @@ static void atmci_init_debugfs(struct atmel_mci *host)
271 return; 396 return;
272 397
273err: 398err:
274 dev_err(&host->pdev->dev, 399 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
275 "failed to initialize debugfs for controller\n");
276}
277
278static void atmci_enable(struct atmel_mci *host)
279{
280 clk_enable(host->mck);
281 mci_writel(host, CR, MCI_CR_MCIEN);
282 mci_writel(host, MR, host->mode_reg);
283 mci_writel(host, SDCR, host->sdc_reg);
284}
285
286static void atmci_disable(struct atmel_mci *host)
287{
288 mci_writel(host, CR, MCI_CR_SWRST);
289
290 /* Stall until write is complete, then disable the bus clock */
291 mci_readl(host, SR);
292 clk_disable(host->mck);
293} 400}
294 401
295static inline unsigned int ns_to_clocks(struct atmel_mci *host, 402static inline unsigned int ns_to_clocks(struct atmel_mci *host,
@@ -299,7 +406,7 @@ static inline unsigned int ns_to_clocks(struct atmel_mci *host,
299} 406}
300 407
301static void atmci_set_timeout(struct atmel_mci *host, 408static void atmci_set_timeout(struct atmel_mci *host,
302 struct mmc_data *data) 409 struct atmel_mci_slot *slot, struct mmc_data *data)
303{ 410{
304 static unsigned dtomul_to_shift[] = { 411 static unsigned dtomul_to_shift[] = {
305 0, 4, 7, 8, 10, 12, 16, 20 412 0, 4, 7, 8, 10, 12, 16, 20
@@ -322,7 +429,7 @@ static void atmci_set_timeout(struct atmel_mci *host,
322 dtocyc = 15; 429 dtocyc = 15;
323 } 430 }
324 431
325 dev_vdbg(&host->mmc->class_dev, "setting timeout to %u cycles\n", 432 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
326 dtocyc << dtomul_to_shift[dtomul]); 433 dtocyc << dtomul_to_shift[dtomul]);
327 mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); 434 mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc)));
328} 435}
@@ -375,15 +482,12 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
375} 482}
376 483
377static void atmci_start_command(struct atmel_mci *host, 484static void atmci_start_command(struct atmel_mci *host,
378 struct mmc_command *cmd, 485 struct mmc_command *cmd, u32 cmd_flags)
379 u32 cmd_flags)
380{ 486{
381 /* Must read host->cmd after testing event flags */
382 smp_rmb();
383 WARN_ON(host->cmd); 487 WARN_ON(host->cmd);
384 host->cmd = cmd; 488 host->cmd = cmd;
385 489
386 dev_vdbg(&host->mmc->class_dev, 490 dev_vdbg(&host->pdev->dev,
387 "start command: ARGR=0x%08x CMDR=0x%08x\n", 491 "start command: ARGR=0x%08x CMDR=0x%08x\n",
388 cmd->arg, cmd_flags); 492 cmd->arg, cmd_flags);
389 493
@@ -391,34 +495,157 @@ static void atmci_start_command(struct atmel_mci *host,
391 mci_writel(host, CMDR, cmd_flags); 495 mci_writel(host, CMDR, cmd_flags);
392} 496}
393 497
394static void send_stop_cmd(struct mmc_host *mmc, struct mmc_data *data) 498static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
395{ 499{
396 struct atmel_mci *host = mmc_priv(mmc);
397
398 atmci_start_command(host, data->stop, host->stop_cmdr); 500 atmci_start_command(host, data->stop, host->stop_cmdr);
399 mci_writel(host, IER, MCI_CMDRDY); 501 mci_writel(host, IER, MCI_CMDRDY);
400} 502}
401 503
402static void atmci_request_end(struct mmc_host *mmc, struct mmc_request *mrq) 504#ifdef CONFIG_MMC_ATMELMCI_DMA
505static void atmci_dma_cleanup(struct atmel_mci *host)
403{ 506{
404 struct atmel_mci *host = mmc_priv(mmc); 507 struct mmc_data *data = host->data;
405 508
406 WARN_ON(host->cmd || host->data); 509 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
407 host->mrq = NULL; 510 ((data->flags & MMC_DATA_WRITE)
511 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
512}
513
514static void atmci_stop_dma(struct atmel_mci *host)
515{
516 struct dma_chan *chan = host->data_chan;
517
518 if (chan) {
519 chan->device->device_terminate_all(chan);
520 atmci_dma_cleanup(host);
521 } else {
522 /* Data transfer was stopped by the interrupt handler */
523 atmci_set_pending(host, EVENT_XFER_COMPLETE);
524 mci_writel(host, IER, MCI_NOTBUSY);
525 }
526}
527
528/* This function is called by the DMA driver from tasklet context. */
529static void atmci_dma_complete(void *arg)
530{
531 struct atmel_mci *host = arg;
532 struct mmc_data *data = host->data;
533
534 dev_vdbg(&host->pdev->dev, "DMA complete\n");
535
536 atmci_dma_cleanup(host);
537
538 /*
539 * If the card was removed, data will be NULL. No point trying
540 * to send the stop command or waiting for NBUSY in this case.
541 */
542 if (data) {
543 atmci_set_pending(host, EVENT_XFER_COMPLETE);
544 tasklet_schedule(&host->tasklet);
545
546 /*
547 * Regardless of what the documentation says, we have
548 * to wait for NOTBUSY even after block read
549 * operations.
550 *
551 * When the DMA transfer is complete, the controller
552 * may still be reading the CRC from the card, i.e.
553 * the data transfer is still in progress and we
554 * haven't seen all the potential error bits yet.
555 *
556 * The interrupt handler will schedule a different
557 * tasklet to finish things up when the data transfer
558 * is completely done.
559 *
560 * We may not complete the mmc request here anyway
561 * because the mmc layer may call back and cause us to
562 * violate the "don't submit new operations from the
563 * completion callback" rule of the dma engine
564 * framework.
565 */
566 mci_writel(host, IER, MCI_NOTBUSY);
567 }
568}
569
570static int
571atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
572{
573 struct dma_chan *chan;
574 struct dma_async_tx_descriptor *desc;
575 struct scatterlist *sg;
576 unsigned int i;
577 enum dma_data_direction direction;
578
579 /*
580 * We don't do DMA on "complex" transfers, i.e. with
581 * non-word-aligned buffers or lengths. Also, we don't bother
582 * with all the DMA setup overhead for short transfers.
583 */
584 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
585 return -EINVAL;
586 if (data->blksz & 3)
587 return -EINVAL;
588
589 for_each_sg(data->sg, sg, data->sg_len, i) {
590 if (sg->offset & 3 || sg->length & 3)
591 return -EINVAL;
592 }
593
594 /* If we don't have a channel, we can't do DMA */
595 chan = host->dma.chan;
596 if (chan) {
597 dma_chan_get(chan);
598 host->data_chan = chan;
599 }
600
601 if (!chan)
602 return -ENODEV;
603
604 if (data->flags & MMC_DATA_READ)
605 direction = DMA_FROM_DEVICE;
606 else
607 direction = DMA_TO_DEVICE;
608
609 desc = chan->device->device_prep_slave_sg(chan,
610 data->sg, data->sg_len, direction,
611 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
612 if (!desc)
613 return -ENOMEM;
614
615 host->dma.data_desc = desc;
616 desc->callback = atmci_dma_complete;
617 desc->callback_param = host;
618 desc->tx_submit(desc);
619
620 /* Go! */
621 chan->device->device_issue_pending(chan);
622
623 return 0;
624}
408 625
409 atmci_disable(host); 626#else /* CONFIG_MMC_ATMELMCI_DMA */
410 627
411 mmc_request_done(mmc, mrq); 628static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
629{
630 return -ENOSYS;
412} 631}
413 632
633static void atmci_stop_dma(struct atmel_mci *host)
634{
635 /* Data transfer was stopped by the interrupt handler */
636 atmci_set_pending(host, EVENT_XFER_COMPLETE);
637 mci_writel(host, IER, MCI_NOTBUSY);
638}
639
640#endif /* CONFIG_MMC_ATMELMCI_DMA */
641
414/* 642/*
415 * Returns a mask of interrupt flags to be enabled after the whole 643 * Returns a mask of interrupt flags to be enabled after the whole
416 * request has been prepared. 644 * request has been prepared.
417 */ 645 */
418static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) 646static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
419{ 647{
420 struct atmel_mci *host = mmc_priv(mmc); 648 u32 iflags;
421 u32 iflags;
422 649
423 data->error = -EINPROGRESS; 650 data->error = -EINPROGRESS;
424 651
@@ -426,75 +653,89 @@ static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data)
426 host->sg = NULL; 653 host->sg = NULL;
427 host->data = data; 654 host->data = data;
428 655
429 mci_writel(host, BLKR, MCI_BCNT(data->blocks)
430 | MCI_BLKLEN(data->blksz));
431 dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n",
432 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
433
434 iflags = ATMCI_DATA_ERROR_FLAGS; 656 iflags = ATMCI_DATA_ERROR_FLAGS;
435 host->sg = data->sg; 657 if (atmci_submit_data_dma(host, data)) {
436 host->pio_offset = 0; 658 host->data_chan = NULL;
437 if (data->flags & MMC_DATA_READ) 659
438 iflags |= MCI_RXRDY; 660 /*
439 else 661 * Errata: MMC data write operation with less than 12
440 iflags |= MCI_TXRDY; 662 * bytes is impossible.
663 *
664 * Errata: MCI Transmit Data Register (TDR) FIFO
665 * corruption when length is not multiple of 4.
666 */
667 if (data->blocks * data->blksz < 12
668 || (data->blocks * data->blksz) & 3)
669 host->need_reset = true;
670
671 host->sg = data->sg;
672 host->pio_offset = 0;
673 if (data->flags & MMC_DATA_READ)
674 iflags |= MCI_RXRDY;
675 else
676 iflags |= MCI_TXRDY;
677 }
441 678
442 return iflags; 679 return iflags;
443} 680}
444 681
445static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 682static void atmci_start_request(struct atmel_mci *host,
683 struct atmel_mci_slot *slot)
446{ 684{
447 struct atmel_mci *host = mmc_priv(mmc); 685 struct mmc_request *mrq;
448 struct mmc_data *data;
449 struct mmc_command *cmd; 686 struct mmc_command *cmd;
687 struct mmc_data *data;
450 u32 iflags; 688 u32 iflags;
451 u32 cmdflags = 0; 689 u32 cmdflags;
452
453 iflags = mci_readl(host, IMR);
454 if (iflags)
455 dev_warn(&mmc->class_dev, "WARNING: IMR=0x%08x\n",
456 mci_readl(host, IMR));
457
458 WARN_ON(host->mrq != NULL);
459
460 /*
461 * We may "know" the card is gone even though there's still an
462 * electrical connection. If so, we really need to communicate
463 * this to the MMC core since there won't be any more
464 * interrupts as the card is completely removed. Otherwise,
465 * the MMC core might believe the card is still there even
466 * though the card was just removed very slowly.
467 */
468 if (!host->present) {
469 mrq->cmd->error = -ENOMEDIUM;
470 mmc_request_done(mmc, mrq);
471 return;
472 }
473 690
691 mrq = slot->mrq;
692 host->cur_slot = slot;
474 host->mrq = mrq; 693 host->mrq = mrq;
694
475 host->pending_events = 0; 695 host->pending_events = 0;
476 host->completed_events = 0; 696 host->completed_events = 0;
697 host->data_status = 0;
477 698
478 atmci_enable(host); 699 if (host->need_reset) {
700 mci_writel(host, CR, MCI_CR_SWRST);
701 mci_writel(host, CR, MCI_CR_MCIEN);
702 mci_writel(host, MR, host->mode_reg);
703 host->need_reset = false;
704 }
705 mci_writel(host, SDCR, slot->sdc_reg);
479 706
480 /* We don't support multiple blocks of weird lengths. */ 707 iflags = mci_readl(host, IMR);
708 if (iflags)
709 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
710 iflags);
711
712 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
713 /* Send init sequence (74 clock cycles) */
714 mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT);
715 while (!(mci_readl(host, SR) & MCI_CMDRDY))
716 cpu_relax();
717 }
481 data = mrq->data; 718 data = mrq->data;
482 if (data) { 719 if (data) {
483 if (data->blocks > 1 && data->blksz & 3) 720 atmci_set_timeout(host, slot, data);
484 goto fail; 721
485 atmci_set_timeout(host, data); 722 /* Must set block count/size before sending command */
723 mci_writel(host, BLKR, MCI_BCNT(data->blocks)
724 | MCI_BLKLEN(data->blksz));
725 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
726 MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
486 } 727 }
487 728
488 iflags = MCI_CMDRDY; 729 iflags = MCI_CMDRDY;
489 cmd = mrq->cmd; 730 cmd = mrq->cmd;
490 cmdflags = atmci_prepare_command(mmc, cmd); 731 cmdflags = atmci_prepare_command(slot->mmc, cmd);
491 atmci_start_command(host, cmd, cmdflags); 732 atmci_start_command(host, cmd, cmdflags);
492 733
493 if (data) 734 if (data)
494 iflags |= atmci_submit_data(mmc, data); 735 iflags |= atmci_submit_data(host, data);
495 736
496 if (mrq->stop) { 737 if (mrq->stop) {
497 host->stop_cmdr = atmci_prepare_command(mmc, mrq->stop); 738 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
498 host->stop_cmdr |= MCI_CMDR_STOP_XFER; 739 host->stop_cmdr |= MCI_CMDR_STOP_XFER;
499 if (!(data->flags & MMC_DATA_WRITE)) 740 if (!(data->flags & MMC_DATA_WRITE))
500 host->stop_cmdr |= MCI_CMDR_TRDIR_READ; 741 host->stop_cmdr |= MCI_CMDR_TRDIR_READ;
@@ -511,59 +752,156 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
511 * prepared yet.) 752 * prepared yet.)
512 */ 753 */
513 mci_writel(host, IER, iflags); 754 mci_writel(host, IER, iflags);
755}
514 756
515 return; 757static void atmci_queue_request(struct atmel_mci *host,
758 struct atmel_mci_slot *slot, struct mmc_request *mrq)
759{
760 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
761 host->state);
762
763 spin_lock_bh(&host->lock);
764 slot->mrq = mrq;
765 if (host->state == STATE_IDLE) {
766 host->state = STATE_SENDING_CMD;
767 atmci_start_request(host, slot);
768 } else {
769 list_add_tail(&slot->queue_node, &host->queue);
770 }
771 spin_unlock_bh(&host->lock);
772}
516 773
517fail: 774static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
518 atmci_disable(host); 775{
519 host->mrq = NULL; 776 struct atmel_mci_slot *slot = mmc_priv(mmc);
520 mrq->cmd->error = -EINVAL; 777 struct atmel_mci *host = slot->host;
521 mmc_request_done(mmc, mrq); 778 struct mmc_data *data;
779
780 WARN_ON(slot->mrq);
781
782 /*
783 * We may "know" the card is gone even though there's still an
784 * electrical connection. If so, we really need to communicate
785 * this to the MMC core since there won't be any more
786 * interrupts as the card is completely removed. Otherwise,
787 * the MMC core might believe the card is still there even
788 * though the card was just removed very slowly.
789 */
790 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
791 mrq->cmd->error = -ENOMEDIUM;
792 mmc_request_done(mmc, mrq);
793 return;
794 }
795
796 /* We don't support multiple blocks of weird lengths. */
797 data = mrq->data;
798 if (data && data->blocks > 1 && data->blksz & 3) {
799 mrq->cmd->error = -EINVAL;
800 mmc_request_done(mmc, mrq);
801 }
802
803 atmci_queue_request(host, slot, mrq);
522} 804}
523 805
524static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 806static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
525{ 807{
526 struct atmel_mci *host = mmc_priv(mmc); 808 struct atmel_mci_slot *slot = mmc_priv(mmc);
809 struct atmel_mci *host = slot->host;
810 unsigned int i;
811
812 slot->sdc_reg &= ~MCI_SDCBUS_MASK;
813 switch (ios->bus_width) {
814 case MMC_BUS_WIDTH_1:
815 slot->sdc_reg |= MCI_SDCBUS_1BIT;
816 break;
817 case MMC_BUS_WIDTH_4:
818 slot->sdc_reg = MCI_SDCBUS_4BIT;
819 break;
820 }
527 821
528 if (ios->clock) { 822 if (ios->clock) {
823 unsigned int clock_min = ~0U;
529 u32 clkdiv; 824 u32 clkdiv;
530 825
531 /* Set clock rate */ 826 spin_lock_bh(&host->lock);
532 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * ios->clock) - 1; 827 if (!host->mode_reg) {
828 clk_enable(host->mck);
829 mci_writel(host, CR, MCI_CR_SWRST);
830 mci_writel(host, CR, MCI_CR_MCIEN);
831 }
832
833 /*
834 * Use mirror of ios->clock to prevent race with mmc
835 * core ios update when finding the minimum.
836 */
837 slot->clock = ios->clock;
838 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
839 if (host->slot[i] && host->slot[i]->clock
840 && host->slot[i]->clock < clock_min)
841 clock_min = host->slot[i]->clock;
842 }
843
844 /* Calculate clock divider */
845 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
533 if (clkdiv > 255) { 846 if (clkdiv > 255) {
534 dev_warn(&mmc->class_dev, 847 dev_warn(&mmc->class_dev,
535 "clock %u too slow; using %lu\n", 848 "clock %u too slow; using %lu\n",
536 ios->clock, host->bus_hz / (2 * 256)); 849 clock_min, host->bus_hz / (2 * 256));
537 clkdiv = 255; 850 clkdiv = 255;
538 } 851 }
539 852
853 /*
854 * WRPROOF and RDPROOF prevent overruns/underruns by
855 * stopping the clock when the FIFO is full/empty.
856 * This state is not expected to last for long.
857 */
540 host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF 858 host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF
541 | MCI_MR_RDPROOF; 859 | MCI_MR_RDPROOF;
542 }
543 860
544 switch (ios->bus_width) { 861 if (list_empty(&host->queue))
545 case MMC_BUS_WIDTH_1: 862 mci_writel(host, MR, host->mode_reg);
546 host->sdc_reg = 0; 863 else
547 break; 864 host->need_clock_update = true;
548 case MMC_BUS_WIDTH_4: 865
549 host->sdc_reg = MCI_SDCBUS_4BIT; 866 spin_unlock_bh(&host->lock);
550 break; 867 } else {
868 bool any_slot_active = false;
869
870 spin_lock_bh(&host->lock);
871 slot->clock = 0;
872 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
873 if (host->slot[i] && host->slot[i]->clock) {
874 any_slot_active = true;
875 break;
876 }
877 }
878 if (!any_slot_active) {
879 mci_writel(host, CR, MCI_CR_MCIDIS);
880 if (host->mode_reg) {
881 mci_readl(host, MR);
882 clk_disable(host->mck);
883 }
884 host->mode_reg = 0;
885 }
886 spin_unlock_bh(&host->lock);
551 } 887 }
552 888
553 switch (ios->power_mode) { 889 switch (ios->power_mode) {
554 case MMC_POWER_ON: 890 case MMC_POWER_UP:
555 /* Send init sequence (74 clock cycles) */ 891 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
556 atmci_enable(host);
557 mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT);
558 while (!(mci_readl(host, SR) & MCI_CMDRDY))
559 cpu_relax();
560 atmci_disable(host);
561 break; 892 break;
562 default: 893 default:
563 /* 894 /*
564 * TODO: None of the currently available AVR32-based 895 * TODO: None of the currently available AVR32-based
565 * boards allow MMC power to be turned off. Implement 896 * boards allow MMC power to be turned off. Implement
566 * power control when this can be tested properly. 897 * power control when this can be tested properly.
898 *
899 * We also need to hook this into the clock management
900 * somehow so that newly inserted cards aren't
901 * subjected to a fast clock before we have a chance
902 * to figure out what the maximum rate is. Currently,
903 * there's no way to avoid this, and there never will
904 * be for boards that don't support power control.
567 */ 905 */
568 break; 906 break;
569 } 907 }
@@ -571,31 +909,82 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
571 909
572static int atmci_get_ro(struct mmc_host *mmc) 910static int atmci_get_ro(struct mmc_host *mmc)
573{ 911{
574 int read_only = 0; 912 int read_only = -ENOSYS;
575 struct atmel_mci *host = mmc_priv(mmc); 913 struct atmel_mci_slot *slot = mmc_priv(mmc);
576 914
577 if (gpio_is_valid(host->wp_pin)) { 915 if (gpio_is_valid(slot->wp_pin)) {
578 read_only = gpio_get_value(host->wp_pin); 916 read_only = gpio_get_value(slot->wp_pin);
579 dev_dbg(&mmc->class_dev, "card is %s\n", 917 dev_dbg(&mmc->class_dev, "card is %s\n",
580 read_only ? "read-only" : "read-write"); 918 read_only ? "read-only" : "read-write");
581 } else {
582 dev_dbg(&mmc->class_dev,
583 "no pin for checking read-only switch."
584 " Assuming write-enable.\n");
585 } 919 }
586 920
587 return read_only; 921 return read_only;
588} 922}
589 923
590static struct mmc_host_ops atmci_ops = { 924static int atmci_get_cd(struct mmc_host *mmc)
925{
926 int present = -ENOSYS;
927 struct atmel_mci_slot *slot = mmc_priv(mmc);
928
929 if (gpio_is_valid(slot->detect_pin)) {
930 present = !gpio_get_value(slot->detect_pin);
931 dev_dbg(&mmc->class_dev, "card is %spresent\n",
932 present ? "" : "not ");
933 }
934
935 return present;
936}
937
938static const struct mmc_host_ops atmci_ops = {
591 .request = atmci_request, 939 .request = atmci_request,
592 .set_ios = atmci_set_ios, 940 .set_ios = atmci_set_ios,
593 .get_ro = atmci_get_ro, 941 .get_ro = atmci_get_ro,
942 .get_cd = atmci_get_cd,
594}; 943};
595 944
945/* Called with host->lock held */
946static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
947 __releases(&host->lock)
948 __acquires(&host->lock)
949{
950 struct atmel_mci_slot *slot = NULL;
951 struct mmc_host *prev_mmc = host->cur_slot->mmc;
952
953 WARN_ON(host->cmd || host->data);
954
955 /*
956 * Update the MMC clock rate if necessary. This may be
957 * necessary if set_ios() is called when a different slot is
958 * busy transfering data.
959 */
960 if (host->need_clock_update)
961 mci_writel(host, MR, host->mode_reg);
962
963 host->cur_slot->mrq = NULL;
964 host->mrq = NULL;
965 if (!list_empty(&host->queue)) {
966 slot = list_entry(host->queue.next,
967 struct atmel_mci_slot, queue_node);
968 list_del(&slot->queue_node);
969 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
970 mmc_hostname(slot->mmc));
971 host->state = STATE_SENDING_CMD;
972 atmci_start_request(host, slot);
973 } else {
974 dev_vdbg(&host->pdev->dev, "list empty\n");
975 host->state = STATE_IDLE;
976 }
977
978 spin_unlock(&host->lock);
979 mmc_request_done(prev_mmc, mrq);
980 spin_lock(&host->lock);
981}
982
596static void atmci_command_complete(struct atmel_mci *host, 983static void atmci_command_complete(struct atmel_mci *host,
597 struct mmc_command *cmd, u32 status) 984 struct mmc_command *cmd)
598{ 985{
986 u32 status = host->cmd_status;
987
599 /* Read the response from the card (up to 16 bytes) */ 988 /* Read the response from the card (up to 16 bytes) */
600 cmd->resp[0] = mci_readl(host, RSPR); 989 cmd->resp[0] = mci_readl(host, RSPR);
601 cmd->resp[1] = mci_readl(host, RSPR); 990 cmd->resp[1] = mci_readl(host, RSPR);
@@ -612,11 +1001,12 @@ static void atmci_command_complete(struct atmel_mci *host,
612 cmd->error = 0; 1001 cmd->error = 0;
613 1002
614 if (cmd->error) { 1003 if (cmd->error) {
615 dev_dbg(&host->mmc->class_dev, 1004 dev_dbg(&host->pdev->dev,
616 "command error: status=0x%08x\n", status); 1005 "command error: status=0x%08x\n", status);
617 1006
618 if (cmd->data) { 1007 if (cmd->data) {
619 host->data = NULL; 1008 host->data = NULL;
1009 atmci_stop_dma(host);
620 mci_writel(host, IDR, MCI_NOTBUSY 1010 mci_writel(host, IDR, MCI_NOTBUSY
621 | MCI_TXRDY | MCI_RXRDY 1011 | MCI_TXRDY | MCI_RXRDY
622 | ATMCI_DATA_ERROR_FLAGS); 1012 | ATMCI_DATA_ERROR_FLAGS);
@@ -626,146 +1016,222 @@ static void atmci_command_complete(struct atmel_mci *host,
626 1016
627static void atmci_detect_change(unsigned long data) 1017static void atmci_detect_change(unsigned long data)
628{ 1018{
629 struct atmel_mci *host = (struct atmel_mci *)data; 1019 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
630 struct mmc_request *mrq = host->mrq; 1020 bool present;
631 int present; 1021 bool present_old;
632 1022
633 /* 1023 /*
634 * atmci_remove() sets detect_pin to -1 before freeing the 1024 * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
635 * interrupt. We must not re-enable the interrupt if it has 1025 * freeing the interrupt. We must not re-enable the interrupt
636 * been freed. 1026 * if it has been freed, and if we're shutting down, it
1027 * doesn't really matter whether the card is present or not.
637 */ 1028 */
638 smp_rmb(); 1029 smp_rmb();
639 if (!gpio_is_valid(host->detect_pin)) 1030 if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
640 return; 1031 return;
641 1032
642 enable_irq(gpio_to_irq(host->detect_pin)); 1033 enable_irq(gpio_to_irq(slot->detect_pin));
643 present = !gpio_get_value(host->detect_pin); 1034 present = !gpio_get_value(slot->detect_pin);
1035 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
644 1036
645 dev_vdbg(&host->pdev->dev, "detect change: %d (was %d)\n", 1037 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
646 present, host->present); 1038 present, present_old);
647 1039
648 if (present != host->present) { 1040 if (present != present_old) {
649 dev_dbg(&host->mmc->class_dev, "card %s\n", 1041 struct atmel_mci *host = slot->host;
1042 struct mmc_request *mrq;
1043
1044 dev_dbg(&slot->mmc->class_dev, "card %s\n",
650 present ? "inserted" : "removed"); 1045 present ? "inserted" : "removed");
651 host->present = present;
652 1046
653 /* Reset controller if card is gone */ 1047 spin_lock(&host->lock);
654 if (!present) { 1048
655 mci_writel(host, CR, MCI_CR_SWRST); 1049 if (!present)
656 mci_writel(host, IDR, ~0UL); 1050 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
657 mci_writel(host, CR, MCI_CR_MCIEN); 1051 else
658 } 1052 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
659 1053
660 /* Clean up queue if present */ 1054 /* Clean up queue if present */
1055 mrq = slot->mrq;
661 if (mrq) { 1056 if (mrq) {
662 /* 1057 if (mrq == host->mrq) {
663 * Reset controller to terminate any ongoing 1058 /*
664 * commands or data transfers. 1059 * Reset controller to terminate any ongoing
665 */ 1060 * commands or data transfers.
666 mci_writel(host, CR, MCI_CR_SWRST); 1061 */
667 1062 mci_writel(host, CR, MCI_CR_SWRST);
668 if (!atmci_is_completed(host, EVENT_CMD_COMPLETE)) 1063 mci_writel(host, CR, MCI_CR_MCIEN);
669 mrq->cmd->error = -ENOMEDIUM; 1064 mci_writel(host, MR, host->mode_reg);
670 1065
671 if (mrq->data && !atmci_is_completed(host,
672 EVENT_DATA_COMPLETE)) {
673 host->data = NULL; 1066 host->data = NULL;
674 mrq->data->error = -ENOMEDIUM; 1067 host->cmd = NULL;
1068
1069 switch (host->state) {
1070 case STATE_IDLE:
1071 break;
1072 case STATE_SENDING_CMD:
1073 mrq->cmd->error = -ENOMEDIUM;
1074 if (!mrq->data)
1075 break;
1076 /* fall through */
1077 case STATE_SENDING_DATA:
1078 mrq->data->error = -ENOMEDIUM;
1079 atmci_stop_dma(host);
1080 break;
1081 case STATE_DATA_BUSY:
1082 case STATE_DATA_ERROR:
1083 if (mrq->data->error == -EINPROGRESS)
1084 mrq->data->error = -ENOMEDIUM;
1085 if (!mrq->stop)
1086 break;
1087 /* fall through */
1088 case STATE_SENDING_STOP:
1089 mrq->stop->error = -ENOMEDIUM;
1090 break;
1091 }
1092
1093 atmci_request_end(host, mrq);
1094 } else {
1095 list_del(&slot->queue_node);
1096 mrq->cmd->error = -ENOMEDIUM;
1097 if (mrq->data)
1098 mrq->data->error = -ENOMEDIUM;
1099 if (mrq->stop)
1100 mrq->stop->error = -ENOMEDIUM;
1101
1102 spin_unlock(&host->lock);
1103 mmc_request_done(slot->mmc, mrq);
1104 spin_lock(&host->lock);
675 } 1105 }
676 if (mrq->stop && !atmci_is_completed(host,
677 EVENT_STOP_COMPLETE))
678 mrq->stop->error = -ENOMEDIUM;
679
680 host->cmd = NULL;
681 atmci_request_end(host->mmc, mrq);
682 } 1106 }
1107 spin_unlock(&host->lock);
683 1108
684 mmc_detect_change(host->mmc, 0); 1109 mmc_detect_change(slot->mmc, 0);
685 } 1110 }
686} 1111}
687 1112
688static void atmci_tasklet_func(unsigned long priv) 1113static void atmci_tasklet_func(unsigned long priv)
689{ 1114{
690 struct mmc_host *mmc = (struct mmc_host *)priv; 1115 struct atmel_mci *host = (struct atmel_mci *)priv;
691 struct atmel_mci *host = mmc_priv(mmc);
692 struct mmc_request *mrq = host->mrq; 1116 struct mmc_request *mrq = host->mrq;
693 struct mmc_data *data = host->data; 1117 struct mmc_data *data = host->data;
1118 struct mmc_command *cmd = host->cmd;
1119 enum atmel_mci_state state = host->state;
1120 enum atmel_mci_state prev_state;
1121 u32 status;
694 1122
695 dev_vdbg(&mmc->class_dev, 1123 spin_lock(&host->lock);
696 "tasklet: pending/completed/mask %lx/%lx/%x\n", 1124
697 host->pending_events, host->completed_events, 1125 state = host->state;
1126
1127 dev_vdbg(&host->pdev->dev,
1128 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1129 state, host->pending_events, host->completed_events,
698 mci_readl(host, IMR)); 1130 mci_readl(host, IMR));
699 1131
700 if (atmci_test_and_clear_pending(host, EVENT_CMD_COMPLETE)) { 1132 do {
701 /* 1133 prev_state = state;
702 * host->cmd must be set to NULL before the interrupt
703 * handler sees EVENT_CMD_COMPLETE
704 */
705 host->cmd = NULL;
706 smp_wmb();
707 atmci_set_completed(host, EVENT_CMD_COMPLETE);
708 atmci_command_complete(host, mrq->cmd, host->cmd_status);
709
710 if (!mrq->cmd->error && mrq->stop
711 && atmci_is_completed(host, EVENT_XFER_COMPLETE)
712 && !atmci_test_and_set_completed(host,
713 EVENT_STOP_SENT))
714 send_stop_cmd(host->mmc, mrq->data);
715 }
716 if (atmci_test_and_clear_pending(host, EVENT_STOP_COMPLETE)) {
717 /*
718 * host->cmd must be set to NULL before the interrupt
719 * handler sees EVENT_STOP_COMPLETE
720 */
721 host->cmd = NULL;
722 smp_wmb();
723 atmci_set_completed(host, EVENT_STOP_COMPLETE);
724 atmci_command_complete(host, mrq->stop, host->stop_status);
725 }
726 if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) {
727 u32 status = host->data_status;
728 1134
729 dev_vdbg(&mmc->class_dev, "data error: status=%08x\n", status); 1135 switch (state) {
1136 case STATE_IDLE:
1137 break;
730 1138
731 atmci_set_completed(host, EVENT_DATA_ERROR); 1139 case STATE_SENDING_CMD:
732 atmci_set_completed(host, EVENT_DATA_COMPLETE); 1140 if (!atmci_test_and_clear_pending(host,
1141 EVENT_CMD_COMPLETE))
1142 break;
733 1143
734 if (status & MCI_DTOE) { 1144 host->cmd = NULL;
735 dev_dbg(&mmc->class_dev, 1145 atmci_set_completed(host, EVENT_CMD_COMPLETE);
736 "data timeout error\n"); 1146 atmci_command_complete(host, mrq->cmd);
737 data->error = -ETIMEDOUT; 1147 if (!mrq->data || cmd->error) {
738 } else if (status & MCI_DCRCE) { 1148 atmci_request_end(host, host->mrq);
739 dev_dbg(&mmc->class_dev, "data CRC error\n"); 1149 goto unlock;
740 data->error = -EILSEQ; 1150 }
741 } else {
742 dev_dbg(&mmc->class_dev,
743 "data FIFO error (status=%08x)\n",
744 status);
745 data->error = -EIO;
746 }
747 1151
748 if (host->present && data->stop 1152 prev_state = state = STATE_SENDING_DATA;
749 && atmci_is_completed(host, EVENT_CMD_COMPLETE) 1153 /* fall through */
750 && !atmci_test_and_set_completed( 1154
751 host, EVENT_STOP_SENT)) 1155 case STATE_SENDING_DATA:
752 send_stop_cmd(host->mmc, data); 1156 if (atmci_test_and_clear_pending(host,
1157 EVENT_DATA_ERROR)) {
1158 atmci_stop_dma(host);
1159 if (data->stop)
1160 send_stop_cmd(host, data);
1161 state = STATE_DATA_ERROR;
1162 break;
1163 }
753 1164
754 host->data = NULL; 1165 if (!atmci_test_and_clear_pending(host,
755 } 1166 EVENT_XFER_COMPLETE))
756 if (atmci_test_and_clear_pending(host, EVENT_DATA_COMPLETE)) { 1167 break;
757 atmci_set_completed(host, EVENT_DATA_COMPLETE); 1168
1169 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1170 prev_state = state = STATE_DATA_BUSY;
1171 /* fall through */
758 1172
759 if (!atmci_is_completed(host, EVENT_DATA_ERROR)) { 1173 case STATE_DATA_BUSY:
760 data->bytes_xfered = data->blocks * data->blksz; 1174 if (!atmci_test_and_clear_pending(host,
761 data->error = 0; 1175 EVENT_DATA_COMPLETE))
1176 break;
1177
1178 host->data = NULL;
1179 atmci_set_completed(host, EVENT_DATA_COMPLETE);
1180 status = host->data_status;
1181 if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
1182 if (status & MCI_DTOE) {
1183 dev_dbg(&host->pdev->dev,
1184 "data timeout error\n");
1185 data->error = -ETIMEDOUT;
1186 } else if (status & MCI_DCRCE) {
1187 dev_dbg(&host->pdev->dev,
1188 "data CRC error\n");
1189 data->error = -EILSEQ;
1190 } else {
1191 dev_dbg(&host->pdev->dev,
1192 "data FIFO error (status=%08x)\n",
1193 status);
1194 data->error = -EIO;
1195 }
1196 } else {
1197 data->bytes_xfered = data->blocks * data->blksz;
1198 data->error = 0;
1199 }
1200
1201 if (!data->stop) {
1202 atmci_request_end(host, host->mrq);
1203 goto unlock;
1204 }
1205
1206 prev_state = state = STATE_SENDING_STOP;
1207 if (!data->error)
1208 send_stop_cmd(host, data);
1209 /* fall through */
1210
1211 case STATE_SENDING_STOP:
1212 if (!atmci_test_and_clear_pending(host,
1213 EVENT_CMD_COMPLETE))
1214 break;
1215
1216 host->cmd = NULL;
1217 atmci_command_complete(host, mrq->stop);
1218 atmci_request_end(host, host->mrq);
1219 goto unlock;
1220
1221 case STATE_DATA_ERROR:
1222 if (!atmci_test_and_clear_pending(host,
1223 EVENT_XFER_COMPLETE))
1224 break;
1225
1226 state = STATE_DATA_BUSY;
1227 break;
762 } 1228 }
1229 } while (state != prev_state);
763 1230
764 host->data = NULL; 1231 host->state = state;
765 }
766 1232
767 if (host->mrq && !host->cmd && !host->data) 1233unlock:
768 atmci_request_end(mmc, host->mrq); 1234 spin_unlock(&host->lock);
769} 1235}
770 1236
771static void atmci_read_data_pio(struct atmel_mci *host) 1237static void atmci_read_data_pio(struct atmel_mci *host)
@@ -787,6 +1253,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
787 nbytes += 4; 1253 nbytes += 4;
788 1254
789 if (offset == sg->length) { 1255 if (offset == sg->length) {
1256 flush_dcache_page(sg_page(sg));
790 host->sg = sg = sg_next(sg); 1257 host->sg = sg = sg_next(sg);
791 if (!sg) 1258 if (!sg)
792 goto done; 1259 goto done;
@@ -815,9 +1282,11 @@ static void atmci_read_data_pio(struct atmel_mci *host)
815 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY 1282 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY
816 | ATMCI_DATA_ERROR_FLAGS)); 1283 | ATMCI_DATA_ERROR_FLAGS));
817 host->data_status = status; 1284 host->data_status = status;
1285 data->bytes_xfered += nbytes;
1286 smp_wmb();
818 atmci_set_pending(host, EVENT_DATA_ERROR); 1287 atmci_set_pending(host, EVENT_DATA_ERROR);
819 tasklet_schedule(&host->tasklet); 1288 tasklet_schedule(&host->tasklet);
820 break; 1289 return;
821 } 1290 }
822 } while (status & MCI_RXRDY); 1291 } while (status & MCI_RXRDY);
823 1292
@@ -830,10 +1299,8 @@ done:
830 mci_writel(host, IDR, MCI_RXRDY); 1299 mci_writel(host, IDR, MCI_RXRDY);
831 mci_writel(host, IER, MCI_NOTBUSY); 1300 mci_writel(host, IER, MCI_NOTBUSY);
832 data->bytes_xfered += nbytes; 1301 data->bytes_xfered += nbytes;
833 atmci_set_completed(host, EVENT_XFER_COMPLETE); 1302 smp_wmb();
834 if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) 1303 atmci_set_pending(host, EVENT_XFER_COMPLETE);
835 && !atmci_test_and_set_completed(host, EVENT_STOP_SENT))
836 send_stop_cmd(host->mmc, data);
837} 1304}
838 1305
839static void atmci_write_data_pio(struct atmel_mci *host) 1306static void atmci_write_data_pio(struct atmel_mci *host)
@@ -886,9 +1353,11 @@ static void atmci_write_data_pio(struct atmel_mci *host)
886 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY 1353 mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY
887 | ATMCI_DATA_ERROR_FLAGS)); 1354 | ATMCI_DATA_ERROR_FLAGS));
888 host->data_status = status; 1355 host->data_status = status;
1356 data->bytes_xfered += nbytes;
1357 smp_wmb();
889 atmci_set_pending(host, EVENT_DATA_ERROR); 1358 atmci_set_pending(host, EVENT_DATA_ERROR);
890 tasklet_schedule(&host->tasklet); 1359 tasklet_schedule(&host->tasklet);
891 break; 1360 return;
892 } 1361 }
893 } while (status & MCI_TXRDY); 1362 } while (status & MCI_TXRDY);
894 1363
@@ -901,38 +1370,26 @@ done:
901 mci_writel(host, IDR, MCI_TXRDY); 1370 mci_writel(host, IDR, MCI_TXRDY);
902 mci_writel(host, IER, MCI_NOTBUSY); 1371 mci_writel(host, IER, MCI_NOTBUSY);
903 data->bytes_xfered += nbytes; 1372 data->bytes_xfered += nbytes;
904 atmci_set_completed(host, EVENT_XFER_COMPLETE); 1373 smp_wmb();
905 if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) 1374 atmci_set_pending(host, EVENT_XFER_COMPLETE);
906 && !atmci_test_and_set_completed(host, EVENT_STOP_SENT))
907 send_stop_cmd(host->mmc, data);
908} 1375}
909 1376
910static void atmci_cmd_interrupt(struct mmc_host *mmc, u32 status) 1377static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
911{ 1378{
912 struct atmel_mci *host = mmc_priv(mmc);
913
914 mci_writel(host, IDR, MCI_CMDRDY); 1379 mci_writel(host, IDR, MCI_CMDRDY);
915 1380
916 if (atmci_is_completed(host, EVENT_STOP_SENT)) { 1381 host->cmd_status = status;
917 host->stop_status = status; 1382 smp_wmb();
918 atmci_set_pending(host, EVENT_STOP_COMPLETE); 1383 atmci_set_pending(host, EVENT_CMD_COMPLETE);
919 } else {
920 host->cmd_status = status;
921 atmci_set_pending(host, EVENT_CMD_COMPLETE);
922 }
923
924 tasklet_schedule(&host->tasklet); 1384 tasklet_schedule(&host->tasklet);
925} 1385}
926 1386
927static irqreturn_t atmci_interrupt(int irq, void *dev_id) 1387static irqreturn_t atmci_interrupt(int irq, void *dev_id)
928{ 1388{
929 struct mmc_host *mmc = dev_id; 1389 struct atmel_mci *host = dev_id;
930 struct atmel_mci *host = mmc_priv(mmc);
931 u32 status, mask, pending; 1390 u32 status, mask, pending;
932 unsigned int pass_count = 0; 1391 unsigned int pass_count = 0;
933 1392
934 spin_lock(&mmc->lock);
935
936 do { 1393 do {
937 status = mci_readl(host, SR); 1394 status = mci_readl(host, SR);
938 mask = mci_readl(host, IMR); 1395 mask = mci_readl(host, IMR);
@@ -944,13 +1401,18 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
944 mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS 1401 mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS
945 | MCI_RXRDY | MCI_TXRDY); 1402 | MCI_RXRDY | MCI_TXRDY);
946 pending &= mci_readl(host, IMR); 1403 pending &= mci_readl(host, IMR);
1404
947 host->data_status = status; 1405 host->data_status = status;
1406 smp_wmb();
948 atmci_set_pending(host, EVENT_DATA_ERROR); 1407 atmci_set_pending(host, EVENT_DATA_ERROR);
949 tasklet_schedule(&host->tasklet); 1408 tasklet_schedule(&host->tasklet);
950 } 1409 }
951 if (pending & MCI_NOTBUSY) { 1410 if (pending & MCI_NOTBUSY) {
952 mci_writel(host, IDR, (MCI_NOTBUSY 1411 mci_writel(host, IDR,
953 | ATMCI_DATA_ERROR_FLAGS)); 1412 ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY);
1413 if (!host->data_status)
1414 host->data_status = status;
1415 smp_wmb();
954 atmci_set_pending(host, EVENT_DATA_COMPLETE); 1416 atmci_set_pending(host, EVENT_DATA_COMPLETE);
955 tasklet_schedule(&host->tasklet); 1417 tasklet_schedule(&host->tasklet);
956 } 1418 }
@@ -960,18 +1422,15 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
960 atmci_write_data_pio(host); 1422 atmci_write_data_pio(host);
961 1423
962 if (pending & MCI_CMDRDY) 1424 if (pending & MCI_CMDRDY)
963 atmci_cmd_interrupt(mmc, status); 1425 atmci_cmd_interrupt(host, status);
964 } while (pass_count++ < 5); 1426 } while (pass_count++ < 5);
965 1427
966 spin_unlock(&mmc->lock);
967
968 return pass_count ? IRQ_HANDLED : IRQ_NONE; 1428 return pass_count ? IRQ_HANDLED : IRQ_NONE;
969} 1429}
970 1430
971static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) 1431static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
972{ 1432{
973 struct mmc_host *mmc = dev_id; 1433 struct atmel_mci_slot *slot = dev_id;
974 struct atmel_mci *host = mmc_priv(mmc);
975 1434
976 /* 1435 /*
977 * Disable interrupts until the pin has stabilized and check 1436 * Disable interrupts until the pin has stabilized and check
@@ -979,19 +1438,176 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
979 * middle of the timer routine when this interrupt triggers. 1438 * middle of the timer routine when this interrupt triggers.
980 */ 1439 */
981 disable_irq_nosync(irq); 1440 disable_irq_nosync(irq);
982 mod_timer(&host->detect_timer, jiffies + msecs_to_jiffies(20)); 1441 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
983 1442
984 return IRQ_HANDLED; 1443 return IRQ_HANDLED;
985} 1444}
986 1445
1446#ifdef CONFIG_MMC_ATMELMCI_DMA
1447
1448static inline struct atmel_mci *
1449dma_client_to_atmel_mci(struct dma_client *client)
1450{
1451 return container_of(client, struct atmel_mci, dma.client);
1452}
1453
1454static enum dma_state_client atmci_dma_event(struct dma_client *client,
1455 struct dma_chan *chan, enum dma_state state)
1456{
1457 struct atmel_mci *host;
1458 enum dma_state_client ret = DMA_NAK;
1459
1460 host = dma_client_to_atmel_mci(client);
1461
1462 switch (state) {
1463 case DMA_RESOURCE_AVAILABLE:
1464 spin_lock_bh(&host->lock);
1465 if (!host->dma.chan) {
1466 host->dma.chan = chan;
1467 ret = DMA_ACK;
1468 }
1469 spin_unlock_bh(&host->lock);
1470
1471 if (ret == DMA_ACK)
1472 dev_info(&host->pdev->dev,
1473 "Using %s for DMA transfers\n",
1474 chan->dev.bus_id);
1475 break;
1476
1477 case DMA_RESOURCE_REMOVED:
1478 spin_lock_bh(&host->lock);
1479 if (host->dma.chan == chan) {
1480 host->dma.chan = NULL;
1481 ret = DMA_ACK;
1482 }
1483 spin_unlock_bh(&host->lock);
1484
1485 if (ret == DMA_ACK)
1486 dev_info(&host->pdev->dev,
1487 "Lost %s, falling back to PIO\n",
1488 chan->dev.bus_id);
1489 break;
1490
1491 default:
1492 break;
1493 }
1494
1495
1496 return ret;
1497}
1498#endif /* CONFIG_MMC_ATMELMCI_DMA */
1499
1500static int __init atmci_init_slot(struct atmel_mci *host,
1501 struct mci_slot_pdata *slot_data, unsigned int id,
1502 u32 sdc_reg)
1503{
1504 struct mmc_host *mmc;
1505 struct atmel_mci_slot *slot;
1506
1507 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
1508 if (!mmc)
1509 return -ENOMEM;
1510
1511 slot = mmc_priv(mmc);
1512 slot->mmc = mmc;
1513 slot->host = host;
1514 slot->detect_pin = slot_data->detect_pin;
1515 slot->wp_pin = slot_data->wp_pin;
1516 slot->sdc_reg = sdc_reg;
1517
1518 mmc->ops = &atmci_ops;
1519 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1520 mmc->f_max = host->bus_hz / 2;
1521 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1522 if (slot_data->bus_width >= 4)
1523 mmc->caps |= MMC_CAP_4_BIT_DATA;
1524
1525 mmc->max_hw_segs = 64;
1526 mmc->max_phys_segs = 64;
1527 mmc->max_req_size = 32768 * 512;
1528 mmc->max_blk_size = 32768;
1529 mmc->max_blk_count = 512;
1530
1531 /* Assume card is present initially */
1532 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1533 if (gpio_is_valid(slot->detect_pin)) {
1534 if (gpio_request(slot->detect_pin, "mmc_detect")) {
1535 dev_dbg(&mmc->class_dev, "no detect pin available\n");
1536 slot->detect_pin = -EBUSY;
1537 } else if (gpio_get_value(slot->detect_pin)) {
1538 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1539 }
1540 }
1541
1542 if (!gpio_is_valid(slot->detect_pin))
1543 mmc->caps |= MMC_CAP_NEEDS_POLL;
1544
1545 if (gpio_is_valid(slot->wp_pin)) {
1546 if (gpio_request(slot->wp_pin, "mmc_wp")) {
1547 dev_dbg(&mmc->class_dev, "no WP pin available\n");
1548 slot->wp_pin = -EBUSY;
1549 }
1550 }
1551
1552 host->slot[id] = slot;
1553 mmc_add_host(mmc);
1554
1555 if (gpio_is_valid(slot->detect_pin)) {
1556 int ret;
1557
1558 setup_timer(&slot->detect_timer, atmci_detect_change,
1559 (unsigned long)slot);
1560
1561 ret = request_irq(gpio_to_irq(slot->detect_pin),
1562 atmci_detect_interrupt,
1563 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
1564 "mmc-detect", slot);
1565 if (ret) {
1566 dev_dbg(&mmc->class_dev,
1567 "could not request IRQ %d for detect pin\n",
1568 gpio_to_irq(slot->detect_pin));
1569 gpio_free(slot->detect_pin);
1570 slot->detect_pin = -EBUSY;
1571 }
1572 }
1573
1574 atmci_init_debugfs(slot);
1575
1576 return 0;
1577}
1578
1579static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
1580 unsigned int id)
1581{
1582 /* Debugfs stuff is cleaned up by mmc core */
1583
1584 set_bit(ATMCI_SHUTDOWN, &slot->flags);
1585 smp_wmb();
1586
1587 mmc_remove_host(slot->mmc);
1588
1589 if (gpio_is_valid(slot->detect_pin)) {
1590 int pin = slot->detect_pin;
1591
1592 free_irq(gpio_to_irq(pin), slot);
1593 del_timer_sync(&slot->detect_timer);
1594 gpio_free(pin);
1595 }
1596 if (gpio_is_valid(slot->wp_pin))
1597 gpio_free(slot->wp_pin);
1598
1599 slot->host->slot[id] = NULL;
1600 mmc_free_host(slot->mmc);
1601}
1602
987static int __init atmci_probe(struct platform_device *pdev) 1603static int __init atmci_probe(struct platform_device *pdev)
988{ 1604{
989 struct mci_platform_data *pdata; 1605 struct mci_platform_data *pdata;
990 struct atmel_mci *host; 1606 struct atmel_mci *host;
991 struct mmc_host *mmc; 1607 struct resource *regs;
992 struct resource *regs; 1608 unsigned int nr_slots;
993 int irq; 1609 int irq;
994 int ret; 1610 int ret;
995 1611
996 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1612 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
997 if (!regs) 1613 if (!regs)
@@ -1003,15 +1619,13 @@ static int __init atmci_probe(struct platform_device *pdev)
1003 if (irq < 0) 1619 if (irq < 0)
1004 return irq; 1620 return irq;
1005 1621
1006 mmc = mmc_alloc_host(sizeof(struct atmel_mci), &pdev->dev); 1622 host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
1007 if (!mmc) 1623 if (!host)
1008 return -ENOMEM; 1624 return -ENOMEM;
1009 1625
1010 host = mmc_priv(mmc);
1011 host->pdev = pdev; 1626 host->pdev = pdev;
1012 host->mmc = mmc; 1627 spin_lock_init(&host->lock);
1013 host->detect_pin = pdata->detect_pin; 1628 INIT_LIST_HEAD(&host->queue);
1014 host->wp_pin = pdata->wp_pin;
1015 1629
1016 host->mck = clk_get(&pdev->dev, "mci_clk"); 1630 host->mck = clk_get(&pdev->dev, "mci_clk");
1017 if (IS_ERR(host->mck)) { 1631 if (IS_ERR(host->mck)) {
@@ -1031,118 +1645,102 @@ static int __init atmci_probe(struct platform_device *pdev)
1031 1645
1032 host->mapbase = regs->start; 1646 host->mapbase = regs->start;
1033 1647
1034 mmc->ops = &atmci_ops; 1648 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
1035 mmc->f_min = (host->bus_hz + 511) / 512;
1036 mmc->f_max = host->bus_hz / 2;
1037 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1038 mmc->caps |= MMC_CAP_4_BIT_DATA;
1039
1040 mmc->max_hw_segs = 64;
1041 mmc->max_phys_segs = 64;
1042 mmc->max_req_size = 32768 * 512;
1043 mmc->max_blk_size = 32768;
1044 mmc->max_blk_count = 512;
1045
1046 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)mmc);
1047 1649
1048 ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, mmc); 1650 ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, host);
1049 if (ret) 1651 if (ret)
1050 goto err_request_irq; 1652 goto err_request_irq;
1051 1653
1052 /* Assume card is present if we don't have a detect pin */ 1654#ifdef CONFIG_MMC_ATMELMCI_DMA
1053 host->present = 1; 1655 if (pdata->dma_slave) {
1054 if (gpio_is_valid(host->detect_pin)) { 1656 struct dma_slave *slave = pdata->dma_slave;
1055 if (gpio_request(host->detect_pin, "mmc_detect")) {
1056 dev_dbg(&mmc->class_dev, "no detect pin available\n");
1057 host->detect_pin = -1;
1058 } else {
1059 host->present = !gpio_get_value(host->detect_pin);
1060 }
1061 }
1062 if (gpio_is_valid(host->wp_pin)) {
1063 if (gpio_request(host->wp_pin, "mmc_wp")) {
1064 dev_dbg(&mmc->class_dev, "no WP pin available\n");
1065 host->wp_pin = -1;
1066 }
1067 }
1068 1657
1069 platform_set_drvdata(pdev, host); 1658 slave->tx_reg = regs->start + MCI_TDR;
1659 slave->rx_reg = regs->start + MCI_RDR;
1070 1660
1071 mmc_add_host(mmc); 1661 /* Try to grab a DMA channel */
1662 host->dma.client.event_callback = atmci_dma_event;
1663 dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask);
1664 host->dma.client.slave = slave;
1072 1665
1073 if (gpio_is_valid(host->detect_pin)) { 1666 dma_async_client_register(&host->dma.client);
1074 setup_timer(&host->detect_timer, atmci_detect_change, 1667 dma_async_client_chan_request(&host->dma.client);
1075 (unsigned long)host); 1668 } else {
1669 dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1670 }
1671#endif /* CONFIG_MMC_ATMELMCI_DMA */
1076 1672
1077 ret = request_irq(gpio_to_irq(host->detect_pin), 1673 platform_set_drvdata(pdev, host);
1078 atmci_detect_interrupt, 1674
1079 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 1675 /* We need at least one slot to succeed */
1080 "mmc-detect", mmc); 1676 nr_slots = 0;
1081 if (ret) { 1677 ret = -ENODEV;
1082 dev_dbg(&mmc->class_dev, 1678 if (pdata->slot[0].bus_width) {
1083 "could not request IRQ %d for detect pin\n", 1679 ret = atmci_init_slot(host, &pdata->slot[0],
1084 gpio_to_irq(host->detect_pin)); 1680 MCI_SDCSEL_SLOT_A, 0);
1085 gpio_free(host->detect_pin); 1681 if (!ret)
1086 host->detect_pin = -1; 1682 nr_slots++;
1087 } 1683 }
1684 if (pdata->slot[1].bus_width) {
1685 ret = atmci_init_slot(host, &pdata->slot[1],
1686 MCI_SDCSEL_SLOT_B, 1);
1687 if (!ret)
1688 nr_slots++;
1088 } 1689 }
1089 1690
1090 dev_info(&mmc->class_dev, 1691 if (!nr_slots)
1091 "Atmel MCI controller at 0x%08lx irq %d\n", 1692 goto err_init_slot;
1092 host->mapbase, irq);
1093 1693
1094 atmci_init_debugfs(host); 1694 dev_info(&pdev->dev,
1695 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
1696 host->mapbase, irq, nr_slots);
1095 1697
1096 return 0; 1698 return 0;
1097 1699
1700err_init_slot:
1701#ifdef CONFIG_MMC_ATMELMCI_DMA
1702 if (pdata->dma_slave)
1703 dma_async_client_unregister(&host->dma.client);
1704#endif
1705 free_irq(irq, host);
1098err_request_irq: 1706err_request_irq:
1099 iounmap(host->regs); 1707 iounmap(host->regs);
1100err_ioremap: 1708err_ioremap:
1101 clk_put(host->mck); 1709 clk_put(host->mck);
1102err_clk_get: 1710err_clk_get:
1103 mmc_free_host(mmc); 1711 kfree(host);
1104 return ret; 1712 return ret;
1105} 1713}
1106 1714
1107static int __exit atmci_remove(struct platform_device *pdev) 1715static int __exit atmci_remove(struct platform_device *pdev)
1108{ 1716{
1109 struct atmel_mci *host = platform_get_drvdata(pdev); 1717 struct atmel_mci *host = platform_get_drvdata(pdev);
1718 unsigned int i;
1110 1719
1111 platform_set_drvdata(pdev, NULL); 1720 platform_set_drvdata(pdev, NULL);
1112 1721
1113 if (host) { 1722 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1114 /* Debugfs stuff is cleaned up by mmc core */ 1723 if (host->slot[i])
1115 1724 atmci_cleanup_slot(host->slot[i], i);
1116 if (gpio_is_valid(host->detect_pin)) { 1725 }
1117 int pin = host->detect_pin;
1118
1119 /* Make sure the timer doesn't enable the interrupt */
1120 host->detect_pin = -1;
1121 smp_wmb();
1122
1123 free_irq(gpio_to_irq(pin), host->mmc);
1124 del_timer_sync(&host->detect_timer);
1125 gpio_free(pin);
1126 }
1127
1128 mmc_remove_host(host->mmc);
1129 1726
1130 clk_enable(host->mck); 1727 clk_enable(host->mck);
1131 mci_writel(host, IDR, ~0UL); 1728 mci_writel(host, IDR, ~0UL);
1132 mci_writel(host, CR, MCI_CR_MCIDIS); 1729 mci_writel(host, CR, MCI_CR_MCIDIS);
1133 mci_readl(host, SR); 1730 mci_readl(host, SR);
1134 clk_disable(host->mck); 1731 clk_disable(host->mck);
1135 1732
1136 if (gpio_is_valid(host->wp_pin)) 1733#ifdef CONFIG_MMC_ATMELMCI_DMA
1137 gpio_free(host->wp_pin); 1734 if (host->dma.client.slave)
1735 dma_async_client_unregister(&host->dma.client);
1736#endif
1138 1737
1139 free_irq(platform_get_irq(pdev, 0), host->mmc); 1738 free_irq(platform_get_irq(pdev, 0), host);
1140 iounmap(host->regs); 1739 iounmap(host->regs);
1141 1740
1142 clk_put(host->mck); 1741 clk_put(host->mck);
1742 kfree(host);
1143 1743
1144 mmc_free_host(host->mmc);
1145 }
1146 return 0; 1744 return 0;
1147} 1745}
1148 1746
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 7503b81374e0..07faf5412a1f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -95,8 +95,6 @@
95 * reads which takes nowhere near that long. Older cards may be able to use 95 * reads which takes nowhere near that long. Older cards may be able to use
96 * shorter timeouts ... but why bother? 96 * shorter timeouts ... but why bother?
97 */ 97 */
98#define readblock_timeout ktime_set(0, 100 * 1000 * 1000)
99#define writeblock_timeout ktime_set(0, 250 * 1000 * 1000)
100#define r1b_timeout ktime_set(3, 0) 98#define r1b_timeout ktime_set(3, 0)
101 99
102 100
@@ -220,9 +218,9 @@ mmc_spi_wait_unbusy(struct mmc_spi_host *host, ktime_t timeout)
220 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); 218 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
221} 219}
222 220
223static int mmc_spi_readtoken(struct mmc_spi_host *host) 221static int mmc_spi_readtoken(struct mmc_spi_host *host, ktime_t timeout)
224{ 222{
225 return mmc_spi_skip(host, readblock_timeout, 1, 0xff); 223 return mmc_spi_skip(host, timeout, 1, 0xff);
226} 224}
227 225
228 226
@@ -605,7 +603,8 @@ mmc_spi_setup_data_message(
605 * Return negative errno, else success. 603 * Return negative errno, else success.
606 */ 604 */
607static int 605static int
608mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t) 606mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
607 ktime_t timeout)
609{ 608{
610 struct spi_device *spi = host->spi; 609 struct spi_device *spi = host->spi;
611 int status, i; 610 int status, i;
@@ -673,7 +672,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t)
673 if (scratch->status[i] != 0) 672 if (scratch->status[i] != 0)
674 return 0; 673 return 0;
675 } 674 }
676 return mmc_spi_wait_unbusy(host, writeblock_timeout); 675 return mmc_spi_wait_unbusy(host, timeout);
677} 676}
678 677
679/* 678/*
@@ -693,7 +692,8 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t)
693 * STOP_TRANSMISSION command. 692 * STOP_TRANSMISSION command.
694 */ 693 */
695static int 694static int
696mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t) 695mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
696 ktime_t timeout)
697{ 697{
698 struct spi_device *spi = host->spi; 698 struct spi_device *spi = host->spi;
699 int status; 699 int status;
@@ -707,7 +707,7 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t)
707 return status; 707 return status;
708 status = scratch->status[0]; 708 status = scratch->status[0];
709 if (status == 0xff || status == 0) 709 if (status == 0xff || status == 0)
710 status = mmc_spi_readtoken(host); 710 status = mmc_spi_readtoken(host, timeout);
711 711
712 if (status == SPI_TOKEN_SINGLE) { 712 if (status == SPI_TOKEN_SINGLE) {
713 if (host->dma_dev) { 713 if (host->dma_dev) {
@@ -778,6 +778,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
778 struct scatterlist *sg; 778 struct scatterlist *sg;
779 unsigned n_sg; 779 unsigned n_sg;
780 int multiple = (data->blocks > 1); 780 int multiple = (data->blocks > 1);
781 u32 clock_rate;
782 ktime_t timeout;
781 783
782 if (data->flags & MMC_DATA_READ) 784 if (data->flags & MMC_DATA_READ)
783 direction = DMA_FROM_DEVICE; 785 direction = DMA_FROM_DEVICE;
@@ -786,6 +788,14 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
786 mmc_spi_setup_data_message(host, multiple, direction); 788 mmc_spi_setup_data_message(host, multiple, direction);
787 t = &host->t; 789 t = &host->t;
788 790
791 if (t->speed_hz)
792 clock_rate = t->speed_hz;
793 else
794 clock_rate = spi->max_speed_hz;
795
796 timeout = ktime_add_ns(ktime_set(0, 0), data->timeout_ns +
797 data->timeout_clks * 1000000 / clock_rate);
798
789 /* Handle scatterlist segments one at a time, with synch for 799 /* Handle scatterlist segments one at a time, with synch for
790 * each 512-byte block 800 * each 512-byte block
791 */ 801 */
@@ -832,9 +842,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
832 t->len); 842 t->len);
833 843
834 if (direction == DMA_TO_DEVICE) 844 if (direction == DMA_TO_DEVICE)
835 status = mmc_spi_writeblock(host, t); 845 status = mmc_spi_writeblock(host, t, timeout);
836 else 846 else
837 status = mmc_spi_readblock(host, t); 847 status = mmc_spi_readblock(host, t, timeout);
838 if (status < 0) 848 if (status < 0)
839 break; 849 break;
840 850
@@ -917,7 +927,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
917 if (scratch->status[tmp] != 0) 927 if (scratch->status[tmp] != 0)
918 return; 928 return;
919 } 929 }
920 tmp = mmc_spi_wait_unbusy(host, writeblock_timeout); 930 tmp = mmc_spi_wait_unbusy(host, timeout);
921 if (tmp < 0 && !data->error) 931 if (tmp < 0 && !data->error)
922 data->error = tmp; 932 data->error = tmp;
923 } 933 }
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 55093ad132ca..ebfaa9960939 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -520,7 +520,7 @@ static int pxamci_probe(struct platform_device *pdev)
520 /* 520 /*
521 * Block length register is only 10 bits before PXA27x. 521 * Block length register is only 10 bits before PXA27x.
522 */ 522 */
523 mmc->max_blk_size = (cpu_is_pxa21x() || cpu_is_pxa25x()) ? 1023 : 2048; 523 mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
524 524
525 /* 525 /*
526 * Block count register is 16 bits. 526 * Block count register is 16 bits.
@@ -554,7 +554,7 @@ static int pxamci_probe(struct platform_device *pdev)
554 MMC_VDD_32_33|MMC_VDD_33_34; 554 MMC_VDD_32_33|MMC_VDD_33_34;
555 mmc->caps = 0; 555 mmc->caps = 0;
556 host->cmdat = 0; 556 host->cmdat = 0;
557 if (!cpu_is_pxa21x() && !cpu_is_pxa25x()) { 557 if (!cpu_is_pxa25x()) {
558 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 558 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
559 host->cmdat |= CMDAT_SDIO_INT_EN; 559 host->cmdat |= CMDAT_SDIO_INT_EN;
560 if (cpu_is_pxa300() || cpu_is_pxa310()) 560 if (cpu_is_pxa300() || cpu_is_pxa310())
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 0341cfbd6fc4..9bd7026b0021 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -144,7 +144,8 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
144 SDHCI_QUIRK_32BIT_DMA_SIZE | 144 SDHCI_QUIRK_32BIT_DMA_SIZE |
145 SDHCI_QUIRK_32BIT_ADMA_SIZE | 145 SDHCI_QUIRK_32BIT_ADMA_SIZE |
146 SDHCI_QUIRK_RESET_AFTER_REQUEST | 146 SDHCI_QUIRK_RESET_AFTER_REQUEST |
147 SDHCI_QUIRK_BROKEN_SMALL_PIO; 147 SDHCI_QUIRK_BROKEN_SMALL_PIO |
148 SDHCI_QUIRK_FORCE_HIGHSPEED;
148 } 149 }
149 150
150 /* 151 /*
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e3a8133560a2..30f64b1f2354 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -177,7 +177,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
177{ 177{
178 unsigned long flags; 178 unsigned long flags;
179 size_t blksize, len, chunk; 179 size_t blksize, len, chunk;
180 u32 scratch; 180 u32 uninitialized_var(scratch);
181 u8 *buf; 181 u8 *buf;
182 182
183 DBG("PIO reading\n"); 183 DBG("PIO reading\n");
@@ -1154,7 +1154,7 @@ static void sdhci_tasklet_card(unsigned long param)
1154 1154
1155 spin_unlock_irqrestore(&host->lock, flags); 1155 spin_unlock_irqrestore(&host->lock, flags);
1156 1156
1157 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1157 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1158} 1158}
1159 1159
1160static void sdhci_tasklet_finish(unsigned long param) 1160static void sdhci_tasklet_finish(unsigned long param)
@@ -1266,9 +1266,31 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1266 SDHCI_INT_INDEX)) 1266 SDHCI_INT_INDEX))
1267 host->cmd->error = -EILSEQ; 1267 host->cmd->error = -EILSEQ;
1268 1268
1269 if (host->cmd->error) 1269 if (host->cmd->error) {
1270 tasklet_schedule(&host->finish_tasklet); 1270 tasklet_schedule(&host->finish_tasklet);
1271 else if (intmask & SDHCI_INT_RESPONSE) 1271 return;
1272 }
1273
1274 /*
1275 * The host can send and interrupt when the busy state has
1276 * ended, allowing us to wait without wasting CPU cycles.
1277 * Unfortunately this is overloaded on the "data complete"
1278 * interrupt, so we need to take some care when handling
1279 * it.
1280 *
1281 * Note: The 1.0 specification is a bit ambiguous about this
1282 * feature so there might be some problems with older
1283 * controllers.
1284 */
1285 if (host->cmd->flags & MMC_RSP_BUSY) {
1286 if (host->cmd->data)
1287 DBG("Cannot wait for busy signal when also "
1288 "doing a data transfer");
1289 else
1290 return;
1291 }
1292
1293 if (intmask & SDHCI_INT_RESPONSE)
1272 sdhci_finish_command(host); 1294 sdhci_finish_command(host);
1273} 1295}
1274 1296
@@ -1278,11 +1300,16 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1278 1300
1279 if (!host->data) { 1301 if (!host->data) {
1280 /* 1302 /*
1281 * A data end interrupt is sent together with the response 1303 * The "data complete" interrupt is also used to
1282 * for the stop command. 1304 * indicate that a busy state has ended. See comment
1305 * above in sdhci_cmd_irq().
1283 */ 1306 */
1284 if (intmask & SDHCI_INT_DATA_END) 1307 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1285 return; 1308 if (intmask & SDHCI_INT_DATA_END) {
1309 sdhci_finish_command(host);
1310 return;
1311 }
1312 }
1286 1313
1287 printk(KERN_ERR "%s: Got data interrupt 0x%08x even " 1314 printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1288 "though no data operation was in progress.\n", 1315 "though no data operation was in progress.\n",
@@ -1604,7 +1631,8 @@ int sdhci_add_host(struct sdhci_host *host)
1604 mmc->f_max = host->max_clk; 1631 mmc->f_max = host->max_clk;
1605 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 1632 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1606 1633
1607 if (caps & SDHCI_CAN_DO_HISPD) 1634 if ((caps & SDHCI_CAN_DO_HISPD) ||
1635 (host->quirks & SDHCI_QUIRK_FORCE_HIGHSPEED))
1608 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1636 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1609 1637
1610 mmc->ocr_avail = 0; 1638 mmc->ocr_avail = 0;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 197d4a05f4ae..31f4b1528e76 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -208,6 +208,8 @@ struct sdhci_host {
208#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12) 208#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
209/* Controller has an issue with buffer bits for small transfers */ 209/* Controller has an issue with buffer bits for small transfers */
210#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13) 210#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
211/* Controller supports high speed but doesn't have the caps bit set */
212#define SDHCI_QUIRK_FORCE_HIGHSPEED (1<<14)
211 213
212 int irq; /* Device IRQ */ 214 int irq; /* Device IRQ */
213 void __iomem * ioaddr; /* Mapped address */ 215 void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9e647a06054f..ba2b4240a86a 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -159,10 +159,10 @@ static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
159#define STATUS_TO_TEXT(a) \ 159#define STATUS_TO_TEXT(a) \
160 do { \ 160 do { \
161 if (status & TMIO_STAT_##a) \ 161 if (status & TMIO_STAT_##a) \
162 printf(#a); \ 162 printk(#a); \
163 } while (0) 163 } while (0)
164 164
165void debug_status(u32 status) 165void pr_debug_status(u32 status)
166{ 166{
167 printk(KERN_DEBUG "status: %08x = ", status); 167 printk(KERN_DEBUG "status: %08x = ", status);
168 STATUS_TO_TEXT(CARD_REMOVE); 168 STATUS_TO_TEXT(CARD_REMOVE);