aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/block.c22
-rw-r--r--drivers/mmc/card/queue.c6
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/cd-gpio.c3
-rw-r--r--drivers/mmc/core/core.c18
-rw-r--r--drivers/mmc/core/mmc.c119
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_irq.c11
-rw-r--r--drivers/mmc/host/Kconfig17
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c469
-rw-r--r--drivers/mmc/host/davinci_mmc.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c18
-rw-r--r--drivers/mmc/host/imxmmc.c1169
-rw-r--r--drivers/mmc/host/imxmmc.h64
-rw-r--r--drivers/mmc/host/mmci.c83
-rw-r--r--drivers/mmc/host/mvsdio.c14
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/omap_hsmmc.c84
-rw-r--r--drivers/mmc/host/sdhci-spear.c82
-rw-r--r--drivers/mmc/host/sdhci-tegra.c26
-rw-r--r--drivers/mmc/host/sdhci.c4
22 files changed, 661 insertions, 1602 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dabec556ebb8..dd2d374dcc7a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -384,7 +384,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
384 md = mmc_blk_get(bdev->bd_disk); 384 md = mmc_blk_get(bdev->bd_disk);
385 if (!md) { 385 if (!md) {
386 err = -EINVAL; 386 err = -EINVAL;
387 goto cmd_done; 387 goto cmd_err;
388 } 388 }
389 389
390 card = md->queue.card; 390 card = md->queue.card;
@@ -483,6 +483,7 @@ cmd_rel_host:
483 483
484cmd_done: 484cmd_done:
485 mmc_blk_put(md); 485 mmc_blk_put(md);
486cmd_err:
486 kfree(idata->buf); 487 kfree(idata->buf);
487 kfree(idata); 488 kfree(idata);
488 return err; 489 return err;
@@ -1283,7 +1284,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1283 int ret = 1, disable_multi = 0, retry = 0, type; 1284 int ret = 1, disable_multi = 0, retry = 0, type;
1284 enum mmc_blk_status status; 1285 enum mmc_blk_status status;
1285 struct mmc_queue_req *mq_rq; 1286 struct mmc_queue_req *mq_rq;
1286 struct request *req; 1287 struct request *req = rqc;
1287 struct mmc_async_req *areq; 1288 struct mmc_async_req *areq;
1288 1289
1289 if (!rqc && !mq->mqrq_prev->req) 1290 if (!rqc && !mq->mqrq_prev->req)
@@ -1291,6 +1292,16 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1291 1292
1292 do { 1293 do {
1293 if (rqc) { 1294 if (rqc) {
1295 /*
1296 * When 4KB native sector is enabled, only 8 blocks
1297 * multiple read or write is allowed
1298 */
1299 if ((brq->data.blocks & 0x07) &&
1300 (card->ext_csd.data_sector_size == 4096)) {
1301 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1302 req->rq_disk->disk_name);
1303 goto cmd_abort;
1304 }
1294 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1305 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1295 areq = &mq->mqrq_cur->mmc_active; 1306 areq = &mq->mqrq_cur->mmc_active;
1296 } else 1307 } else
@@ -1538,7 +1549,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1538 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 1549 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1539 "mmcblk%d%s", md->name_idx, subname ? subname : ""); 1550 "mmcblk%d%s", md->name_idx, subname ? subname : "");
1540 1551
1541 blk_queue_logical_block_size(md->queue.queue, 512); 1552 if (mmc_card_mmc(card))
1553 blk_queue_logical_block_size(md->queue.queue,
1554 card->ext_csd.data_sector_size);
1555 else
1556 blk_queue_logical_block_size(md->queue.queue, 512);
1557
1542 set_capacity(md->disk, size); 1558 set_capacity(md->disk, size);
1543 1559
1544 if (mmc_host_cmd23(card->host)) { 1560 if (mmc_host_cmd23(card->host)) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 996f8e36e23d..e360a979857d 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -96,7 +96,7 @@ static int mmc_queue_thread(void *d)
96 * on any queue on this host, and attempt to issue it. This may 96 * on any queue on this host, and attempt to issue it. This may
97 * not be the queue we were asked to process. 97 * not be the queue we were asked to process.
98 */ 98 */
99static void mmc_request(struct request_queue *q) 99static void mmc_request_fn(struct request_queue *q)
100{ 100{
101 struct mmc_queue *mq = q->queuedata; 101 struct mmc_queue *mq = q->queuedata;
102 struct request *req; 102 struct request *req;
@@ -171,12 +171,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
171 limit = *mmc_dev(host)->dma_mask; 171 limit = *mmc_dev(host)->dma_mask;
172 172
173 mq->card = card; 173 mq->card = card;
174 mq->queue = blk_init_queue(mmc_request, lock); 174 mq->queue = blk_init_queue(mmc_request_fn, lock);
175 if (!mq->queue) 175 if (!mq->queue)
176 return -ENOMEM; 176 return -ENOMEM;
177 177
178 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
179 memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
180 mq->mqrq_cur = mqrq_cur; 178 mq->mqrq_cur = mqrq_cur;
181 mq->mqrq_prev = mqrq_prev; 179 mq->mqrq_prev = mqrq_prev;
182 mq->queue->queuedata = mq; 180 mq->queue->queuedata = mq;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c60cee92a2b2..9b68933f27e7 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,6 +122,7 @@ static int mmc_bus_remove(struct device *dev)
122 return 0; 122 return 0;
123} 123}
124 124
125#ifdef CONFIG_PM_SLEEP
125static int mmc_bus_suspend(struct device *dev) 126static int mmc_bus_suspend(struct device *dev)
126{ 127{
127 struct mmc_driver *drv = to_mmc_driver(dev->driver); 128 struct mmc_driver *drv = to_mmc_driver(dev->driver);
@@ -143,6 +144,7 @@ static int mmc_bus_resume(struct device *dev)
143 ret = drv->resume(card); 144 ret = drv->resume(card);
144 return ret; 145 return ret;
145} 146}
147#endif
146 148
147#ifdef CONFIG_PM_RUNTIME 149#ifdef CONFIG_PM_RUNTIME
148 150
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 2c14be73254c..f13e38deceac 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -73,6 +73,9 @@ void mmc_cd_gpio_free(struct mmc_host *host)
73{ 73{
74 struct mmc_cd_gpio *cd = host->hotplug.handler_priv; 74 struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
75 75
76 if (!cd)
77 return;
78
76 free_irq(host->hotplug.irq, host); 79 free_irq(host->hotplug.irq, host);
77 gpio_free(cd->gpio); 80 gpio_free(cd->gpio);
78 kfree(cd); 81 kfree(cd);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ba821fe70bca..0b6141d29dbd 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -42,6 +42,7 @@
42#include "sdio_ops.h" 42#include "sdio_ops.h"
43 43
44static struct workqueue_struct *workqueue; 44static struct workqueue_struct *workqueue;
45static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
45 46
46/* 47/*
47 * Enabling software CRCs on the data blocks can be a significant (30%) 48 * Enabling software CRCs on the data blocks can be a significant (30%)
@@ -1157,6 +1158,9 @@ static void mmc_power_up(struct mmc_host *host)
1157{ 1158{
1158 int bit; 1159 int bit;
1159 1160
1161 if (host->ios.power_mode == MMC_POWER_ON)
1162 return;
1163
1160 mmc_host_clk_hold(host); 1164 mmc_host_clk_hold(host);
1161 1165
1162 /* If ocr is set, we use it */ 1166 /* If ocr is set, we use it */
@@ -1199,6 +1203,10 @@ static void mmc_power_up(struct mmc_host *host)
1199void mmc_power_off(struct mmc_host *host) 1203void mmc_power_off(struct mmc_host *host)
1200{ 1204{
1201 int err = 0; 1205 int err = 0;
1206
1207 if (host->ios.power_mode == MMC_POWER_OFF)
1208 return;
1209
1202 mmc_host_clk_hold(host); 1210 mmc_host_clk_hold(host);
1203 1211
1204 host->ios.clock = 0; 1212 host->ios.clock = 0;
@@ -2005,7 +2013,6 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
2005 2013
2006void mmc_rescan(struct work_struct *work) 2014void mmc_rescan(struct work_struct *work)
2007{ 2015{
2008 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
2009 struct mmc_host *host = 2016 struct mmc_host *host =
2010 container_of(work, struct mmc_host, detect.work); 2017 container_of(work, struct mmc_host, detect.work);
2011 int i; 2018 int i;
@@ -2044,8 +2051,12 @@ void mmc_rescan(struct work_struct *work)
2044 */ 2051 */
2045 mmc_bus_put(host); 2052 mmc_bus_put(host);
2046 2053
2047 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 2054 if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
2055 mmc_claim_host(host);
2056 mmc_power_off(host);
2057 mmc_release_host(host);
2048 goto out; 2058 goto out;
2059 }
2049 2060
2050 mmc_claim_host(host); 2061 mmc_claim_host(host);
2051 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2062 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
@@ -2063,7 +2074,8 @@ void mmc_rescan(struct work_struct *work)
2063 2074
2064void mmc_start_host(struct mmc_host *host) 2075void mmc_start_host(struct mmc_host *host)
2065{ 2076{
2066 mmc_power_off(host); 2077 host->f_init = max(freqs[0], host->f_min);
2078 mmc_power_up(host);
2067 mmc_detect_change(host, 0); 2079 mmc_detect_change(host, 0);
2068} 2080}
2069 2081
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 54df5adc0413..2d4a4b746750 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -235,6 +235,36 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
235 return err; 235 return err;
236} 236}
237 237
238static void mmc_select_card_type(struct mmc_card *card)
239{
240 struct mmc_host *host = card->host;
241 u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
242 unsigned int caps = host->caps, caps2 = host->caps2;
243 unsigned int hs_max_dtr = 0;
244
245 if (card_type & EXT_CSD_CARD_TYPE_26)
246 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
247
248 if (caps & MMC_CAP_MMC_HIGHSPEED &&
249 card_type & EXT_CSD_CARD_TYPE_52)
250 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
251
252 if ((caps & MMC_CAP_1_8V_DDR &&
253 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
254 (caps & MMC_CAP_1_2V_DDR &&
255 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
256 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
257
258 if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
259 card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
260 (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
261 card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
262 hs_max_dtr = MMC_HS200_MAX_DTR;
263
264 card->ext_csd.hs_max_dtr = hs_max_dtr;
265 card->ext_csd.card_type = card_type;
266}
267
238/* 268/*
239 * Decode extended CSD. 269 * Decode extended CSD.
240 */ 270 */
@@ -284,56 +314,9 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
284 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) 314 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
285 mmc_card_set_blockaddr(card); 315 mmc_card_set_blockaddr(card);
286 } 316 }
317
287 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; 318 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
288 switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { 319 mmc_select_card_type(card);
289 case EXT_CSD_CARD_TYPE_SDR_ALL:
290 case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
291 case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
292 case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
293 card->ext_csd.hs_max_dtr = 200000000;
294 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
295 break;
296 case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
297 case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
298 case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
299 case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
300 card->ext_csd.hs_max_dtr = 200000000;
301 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
302 break;
303 case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
304 case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
305 case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
306 case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
307 card->ext_csd.hs_max_dtr = 200000000;
308 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
309 break;
310 case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
311 EXT_CSD_CARD_TYPE_26:
312 card->ext_csd.hs_max_dtr = 52000000;
313 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
314 break;
315 case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
316 EXT_CSD_CARD_TYPE_26:
317 card->ext_csd.hs_max_dtr = 52000000;
318 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
319 break;
320 case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
321 EXT_CSD_CARD_TYPE_26:
322 card->ext_csd.hs_max_dtr = 52000000;
323 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
324 break;
325 case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
326 card->ext_csd.hs_max_dtr = 52000000;
327 break;
328 case EXT_CSD_CARD_TYPE_26:
329 card->ext_csd.hs_max_dtr = 26000000;
330 break;
331 default:
332 /* MMC v4 spec says this cannot happen */
333 pr_warning("%s: card is mmc v4 but doesn't "
334 "support any high-speed modes.\n",
335 mmc_hostname(card->host));
336 }
337 320
338 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; 321 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
339 card->ext_csd.raw_erase_timeout_mult = 322 card->ext_csd.raw_erase_timeout_mult =
@@ -533,6 +516,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
533 } else { 516 } else {
534 card->ext_csd.data_tag_unit_size = 0; 517 card->ext_csd.data_tag_unit_size = 0;
535 } 518 }
519 } else {
520 card->ext_csd.data_sector_size = 512;
536 } 521 }
537 522
538out: 523out:
@@ -556,14 +541,10 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
556 err = mmc_get_ext_csd(card, &bw_ext_csd); 541 err = mmc_get_ext_csd(card, &bw_ext_csd);
557 542
558 if (err || bw_ext_csd == NULL) { 543 if (err || bw_ext_csd == NULL) {
559 if (bus_width != MMC_BUS_WIDTH_1) 544 err = -EINVAL;
560 err = -EINVAL;
561 goto out; 545 goto out;
562 } 546 }
563 547
564 if (bus_width == MMC_BUS_WIDTH_1)
565 goto out;
566
567 /* only compare read only fields */ 548 /* only compare read only fields */
568 err = !((card->ext_csd.raw_partition_support == 549 err = !((card->ext_csd.raw_partition_support ==
569 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && 550 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
@@ -736,6 +717,10 @@ static int mmc_select_powerclass(struct mmc_card *card,
736 card->ext_csd.generic_cmd6_time); 717 card->ext_csd.generic_cmd6_time);
737 } 718 }
738 719
720 if (err)
721 pr_err("%s: power class selection for ext_csd_bus_width %d"
722 " failed\n", mmc_hostname(card->host), bus_width);
723
739 return err; 724 return err;
740} 725}
741 726
@@ -745,7 +730,7 @@ static int mmc_select_powerclass(struct mmc_card *card,
745 */ 730 */
746static int mmc_select_hs200(struct mmc_card *card) 731static int mmc_select_hs200(struct mmc_card *card)
747{ 732{
748 int idx, err = 0; 733 int idx, err = -EINVAL;
749 struct mmc_host *host; 734 struct mmc_host *host;
750 static unsigned ext_csd_bits[] = { 735 static unsigned ext_csd_bits[] = {
751 EXT_CSD_BUS_WIDTH_4, 736 EXT_CSD_BUS_WIDTH_4,
@@ -761,10 +746,12 @@ static int mmc_select_hs200(struct mmc_card *card)
761 host = card->host; 746 host = card->host;
762 747
763 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && 748 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
764 host->caps2 & MMC_CAP2_HS200_1_2V_SDR) 749 host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
765 if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0)) 750 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
766 err = mmc_set_signal_voltage(host, 751
767 MMC_SIGNAL_VOLTAGE_180, 0); 752 if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
753 host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
754 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
768 755
769 /* If fails try again during next card power cycle */ 756 /* If fails try again during next card power cycle */
770 if (err) 757 if (err)
@@ -1117,9 +1104,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1117 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 1104 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
1118 err = mmc_select_powerclass(card, ext_csd_bits, ext_csd); 1105 err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
1119 if (err) 1106 if (err)
1120 pr_warning("%s: power class selection to bus width %d" 1107 goto err;
1121 " failed\n", mmc_hostname(card->host),
1122 1 << bus_width);
1123 } 1108 }
1124 1109
1125 /* 1110 /*
@@ -1151,10 +1136,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1151 err = mmc_select_powerclass(card, ext_csd_bits[idx][0], 1136 err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
1152 ext_csd); 1137 ext_csd);
1153 if (err) 1138 if (err)
1154 pr_warning("%s: power class selection to " 1139 goto err;
1155 "bus width %d failed\n",
1156 mmc_hostname(card->host),
1157 1 << bus_width);
1158 1140
1159 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1141 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1160 EXT_CSD_BUS_WIDTH, 1142 EXT_CSD_BUS_WIDTH,
@@ -1182,10 +1164,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1182 err = mmc_select_powerclass(card, ext_csd_bits[idx][1], 1164 err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
1183 ext_csd); 1165 ext_csd);
1184 if (err) 1166 if (err)
1185 pr_warning("%s: power class selection to " 1167 goto err;
1186 "bus width %d ddr %d failed\n",
1187 mmc_hostname(card->host),
1188 1 << bus_width, ddr);
1189 1168
1190 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1169 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1191 EXT_CSD_BUS_WIDTH, 1170 EXT_CSD_BUS_WIDTH,
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2c7c83f832d2..13d0e95380ab 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -947,7 +947,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
947 } 947 }
948 948
949 if (!err && host->sdio_irqs) 949 if (!err && host->sdio_irqs)
950 mmc_signal_sdio_irq(host); 950 wake_up_process(host->sdio_irq_thread);
951 mmc_release_host(host); 951 mmc_release_host(host);
952 952
953 /* 953 /*
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index f573e7f9f740..3d8ceb4084de 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -28,18 +28,20 @@
28 28
29#include "sdio_ops.h" 29#include "sdio_ops.h"
30 30
31static int process_sdio_pending_irqs(struct mmc_card *card) 31static int process_sdio_pending_irqs(struct mmc_host *host)
32{ 32{
33 struct mmc_card *card = host->card;
33 int i, ret, count; 34 int i, ret, count;
34 unsigned char pending; 35 unsigned char pending;
35 struct sdio_func *func; 36 struct sdio_func *func;
36 37
37 /* 38 /*
38 * Optimization, if there is only 1 function interrupt registered 39 * Optimization, if there is only 1 function interrupt registered
39 * call irq handler directly 40 * and we know an IRQ was signaled then call irq handler directly.
41 * Otherwise do the full probe.
40 */ 42 */
41 func = card->sdio_single_irq; 43 func = card->sdio_single_irq;
42 if (func) { 44 if (func && host->sdio_irq_pending) {
43 func->irq_handler(func); 45 func->irq_handler(func);
44 return 1; 46 return 1;
45 } 47 }
@@ -116,7 +118,8 @@ static int sdio_irq_thread(void *_host)
116 ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); 118 ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
117 if (ret) 119 if (ret)
118 break; 120 break;
119 ret = process_sdio_pending_irqs(host->card); 121 ret = process_sdio_pending_irqs(host);
122 host->sdio_irq_pending = false;
120 mmc_release_host(host); 123 mmc_release_host(host);
121 124
122 /* 125 /*
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2bc06e7344db..aa131b32e3b2 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -278,10 +278,13 @@ choice
278 Choose which driver to use for the Atmel MCI Silicon 278 Choose which driver to use for the Atmel MCI Silicon
279 279
280config MMC_AT91 280config MMC_AT91
281 tristate "AT91 SD/MMC Card Interface support" 281 tristate "AT91 SD/MMC Card Interface support (DEPRECATED)"
282 depends on ARCH_AT91 282 depends on ARCH_AT91
283 help 283 help
284 This selects the AT91 MCI controller. 284 This selects the AT91 MCI controller. This driver will
285 be removed soon (for more information have a look to
286 Documentation/feature-removal-schedule.txt). Please use
287 MMC_ATMEL_MCI.
285 288
286 If unsure, say N. 289 If unsure, say N.
287 290
@@ -307,16 +310,6 @@ config MMC_ATMELMCI_DMA
307 310
308 If unsure, say N. 311 If unsure, say N.
309 312
310config MMC_IMX
311 tristate "Motorola i.MX Multimedia Card Interface support"
312 depends on ARCH_MX1
313 help
314 This selects the Motorola i.MX Multimedia card Interface.
315 If you have a i.MX platform with a Multimedia Card slot,
316 say Y or M here.
317
318 If unsure, say N.
319
320config MMC_MSM 313config MMC_MSM
321 tristate "Qualcomm SDCC Controller Support" 314 tristate "Qualcomm SDCC Controller Support"
322 depends on MMC && ARCH_MSM 315 depends on MMC && ARCH_MSM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3e7e26d08073..8922b06be925 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -4,7 +4,6 @@
4 4
5obj-$(CONFIG_MMC_ARMMMCI) += mmci.o 5obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
6obj-$(CONFIG_MMC_PXA) += pxamci.o 6obj-$(CONFIG_MMC_PXA) += pxamci.o
7obj-$(CONFIG_MMC_IMX) += imxmmc.o
8obj-$(CONFIG_MMC_MXC) += mxcmmc.o 7obj-$(CONFIG_MMC_MXC) += mxcmmc.o
9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o 8obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
10obj-$(CONFIG_MMC_SDHCI) += sdhci.o 9obj-$(CONFIG_MMC_SDHCI) += sdhci.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e94476beca18..420aca642b14 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -45,19 +45,19 @@
45#define ATMCI_DMA_THRESHOLD 16 45#define ATMCI_DMA_THRESHOLD 16
46 46
47enum { 47enum {
48 EVENT_CMD_COMPLETE = 0, 48 EVENT_CMD_RDY = 0,
49 EVENT_XFER_COMPLETE, 49 EVENT_XFER_COMPLETE,
50 EVENT_DATA_COMPLETE, 50 EVENT_NOTBUSY,
51 EVENT_DATA_ERROR, 51 EVENT_DATA_ERROR,
52}; 52};
53 53
54enum atmel_mci_state { 54enum atmel_mci_state {
55 STATE_IDLE = 0, 55 STATE_IDLE = 0,
56 STATE_SENDING_CMD, 56 STATE_SENDING_CMD,
57 STATE_SENDING_DATA, 57 STATE_DATA_XFER,
58 STATE_DATA_BUSY, 58 STATE_WAITING_NOTBUSY,
59 STATE_SENDING_STOP, 59 STATE_SENDING_STOP,
60 STATE_DATA_ERROR, 60 STATE_END_REQUEST,
61}; 61};
62 62
63enum atmci_xfer_dir { 63enum atmci_xfer_dir {
@@ -78,6 +78,9 @@ struct atmel_mci_caps {
78 bool has_highspeed; 78 bool has_highspeed;
79 bool has_rwproof; 79 bool has_rwproof;
80 bool has_odd_clk_div; 80 bool has_odd_clk_div;
81 bool has_bad_data_ordering;
82 bool need_reset_after_xfer;
83 bool need_blksz_mul_4;
81}; 84};
82 85
83struct atmel_mci_dma { 86struct atmel_mci_dma {
@@ -91,6 +94,11 @@ struct atmel_mci_dma {
91 * @regs: Pointer to MMIO registers. 94 * @regs: Pointer to MMIO registers.
92 * @sg: Scatterlist entry currently being processed by PIO or PDC code. 95 * @sg: Scatterlist entry currently being processed by PIO or PDC code.
93 * @pio_offset: Offset into the current scatterlist entry. 96 * @pio_offset: Offset into the current scatterlist entry.
97 * @buffer: Buffer used if we don't have the r/w proof capability. We
98 * don't have the time to switch pdc buffers so we have to use only
99 * one buffer for the full transaction.
100 * @buf_size: size of the buffer.
101 * @phys_buf_addr: buffer address needed for pdc.
94 * @cur_slot: The slot which is currently using the controller. 102 * @cur_slot: The slot which is currently using the controller.
95 * @mrq: The request currently being processed on @cur_slot, 103 * @mrq: The request currently being processed on @cur_slot,
96 * or NULL if the controller is idle. 104 * or NULL if the controller is idle.
@@ -116,6 +124,7 @@ struct atmel_mci_dma {
116 * @queue: List of slots waiting for access to the controller. 124 * @queue: List of slots waiting for access to the controller.
117 * @need_clock_update: Update the clock rate before the next request. 125 * @need_clock_update: Update the clock rate before the next request.
118 * @need_reset: Reset controller before next request. 126 * @need_reset: Reset controller before next request.
127 * @timer: Timer to balance the data timeout error flag which cannot rise.
119 * @mode_reg: Value of the MR register. 128 * @mode_reg: Value of the MR register.
120 * @cfg_reg: Value of the CFG register. 129 * @cfg_reg: Value of the CFG register.
121 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus 130 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
@@ -166,6 +175,9 @@ struct atmel_mci {
166 175
167 struct scatterlist *sg; 176 struct scatterlist *sg;
168 unsigned int pio_offset; 177 unsigned int pio_offset;
178 unsigned int *buffer;
179 unsigned int buf_size;
180 dma_addr_t buf_phys_addr;
169 181
170 struct atmel_mci_slot *cur_slot; 182 struct atmel_mci_slot *cur_slot;
171 struct mmc_request *mrq; 183 struct mmc_request *mrq;
@@ -189,6 +201,7 @@ struct atmel_mci {
189 201
190 bool need_clock_update; 202 bool need_clock_update;
191 bool need_reset; 203 bool need_reset;
204 struct timer_list timer;
192 u32 mode_reg; 205 u32 mode_reg;
193 u32 cfg_reg; 206 u32 cfg_reg;
194 unsigned long bus_hz; 207 unsigned long bus_hz;
@@ -480,6 +493,32 @@ err:
480 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 493 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
481} 494}
482 495
496static inline unsigned int atmci_get_version(struct atmel_mci *host)
497{
498 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
499}
500
501static void atmci_timeout_timer(unsigned long data)
502{
503 struct atmel_mci *host;
504
505 host = (struct atmel_mci *)data;
506
507 dev_dbg(&host->pdev->dev, "software timeout\n");
508
509 if (host->mrq->cmd->data) {
510 host->mrq->cmd->data->error = -ETIMEDOUT;
511 host->data = NULL;
512 } else {
513 host->mrq->cmd->error = -ETIMEDOUT;
514 host->cmd = NULL;
515 }
516 host->need_reset = 1;
517 host->state = STATE_END_REQUEST;
518 smp_wmb();
519 tasklet_schedule(&host->tasklet);
520}
521
483static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host, 522static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
484 unsigned int ns) 523 unsigned int ns)
485{ 524{
@@ -591,6 +630,7 @@ static void atmci_send_command(struct atmel_mci *host,
591 630
592static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) 631static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
593{ 632{
633 dev_dbg(&host->pdev->dev, "send stop command\n");
594 atmci_send_command(host, data->stop, host->stop_cmdr); 634 atmci_send_command(host, data->stop, host->stop_cmdr);
595 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); 635 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
596} 636}
@@ -603,6 +643,7 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
603 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb) 643 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
604{ 644{
605 u32 pointer_reg, counter_reg; 645 u32 pointer_reg, counter_reg;
646 unsigned int buf_size;
606 647
607 if (dir == XFER_RECEIVE) { 648 if (dir == XFER_RECEIVE) {
608 pointer_reg = ATMEL_PDC_RPR; 649 pointer_reg = ATMEL_PDC_RPR;
@@ -617,8 +658,15 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
617 counter_reg += ATMEL_PDC_SCND_BUF_OFF; 658 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
618 } 659 }
619 660
620 atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); 661 if (!host->caps.has_rwproof) {
621 if (host->data_size <= sg_dma_len(host->sg)) { 662 buf_size = host->buf_size;
663 atmci_writel(host, pointer_reg, host->buf_phys_addr);
664 } else {
665 buf_size = sg_dma_len(host->sg);
666 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
667 }
668
669 if (host->data_size <= buf_size) {
622 if (host->data_size & 0x3) { 670 if (host->data_size & 0x3) {
623 /* If size is different from modulo 4, transfer bytes */ 671 /* If size is different from modulo 4, transfer bytes */
624 atmci_writel(host, counter_reg, host->data_size); 672 atmci_writel(host, counter_reg, host->data_size);
@@ -670,7 +718,20 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
670 */ 718 */
671static void atmci_pdc_complete(struct atmel_mci *host) 719static void atmci_pdc_complete(struct atmel_mci *host)
672{ 720{
721 int transfer_size = host->data->blocks * host->data->blksz;
722 int i;
723
673 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); 724 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
725
726 if ((!host->caps.has_rwproof)
727 && (host->data->flags & MMC_DATA_READ)) {
728 if (host->caps.has_bad_data_ordering)
729 for (i = 0; i < transfer_size; i++)
730 host->buffer[i] = swab32(host->buffer[i]);
731 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
732 host->buffer, transfer_size);
733 }
734
674 atmci_pdc_cleanup(host); 735 atmci_pdc_cleanup(host);
675 736
676 /* 737 /*
@@ -678,9 +739,10 @@ static void atmci_pdc_complete(struct atmel_mci *host)
678 * to send the stop command or waiting for NBUSY in this case. 739 * to send the stop command or waiting for NBUSY in this case.
679 */ 740 */
680 if (host->data) { 741 if (host->data) {
742 dev_dbg(&host->pdev->dev,
743 "(%s) set pending xfer complete\n", __func__);
681 atmci_set_pending(host, EVENT_XFER_COMPLETE); 744 atmci_set_pending(host, EVENT_XFER_COMPLETE);
682 tasklet_schedule(&host->tasklet); 745 tasklet_schedule(&host->tasklet);
683 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
684 } 746 }
685} 747}
686 748
@@ -716,6 +778,8 @@ static void atmci_dma_complete(void *arg)
716 * to send the stop command or waiting for NBUSY in this case. 778 * to send the stop command or waiting for NBUSY in this case.
717 */ 779 */
718 if (data) { 780 if (data) {
781 dev_dbg(&host->pdev->dev,
782 "(%s) set pending xfer complete\n", __func__);
719 atmci_set_pending(host, EVENT_XFER_COMPLETE); 783 atmci_set_pending(host, EVENT_XFER_COMPLETE);
720 tasklet_schedule(&host->tasklet); 784 tasklet_schedule(&host->tasklet);
721 785
@@ -791,6 +855,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
791 u32 iflags, tmp; 855 u32 iflags, tmp;
792 unsigned int sg_len; 856 unsigned int sg_len;
793 enum dma_data_direction dir; 857 enum dma_data_direction dir;
858 int i;
794 859
795 data->error = -EINPROGRESS; 860 data->error = -EINPROGRESS;
796 861
@@ -806,7 +871,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
806 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF; 871 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
807 } else { 872 } else {
808 dir = DMA_TO_DEVICE; 873 dir = DMA_TO_DEVICE;
809 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE; 874 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
810 } 875 }
811 876
812 /* Set BLKLEN */ 877 /* Set BLKLEN */
@@ -818,6 +883,16 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
818 /* Configure PDC */ 883 /* Configure PDC */
819 host->data_size = data->blocks * data->blksz; 884 host->data_size = data->blocks * data->blksz;
820 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); 885 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
886
887 if ((!host->caps.has_rwproof)
888 && (host->data->flags & MMC_DATA_WRITE)) {
889 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
890 host->buffer, host->data_size);
891 if (host->caps.has_bad_data_ordering)
892 for (i = 0; i < host->data_size; i++)
893 host->buffer[i] = swab32(host->buffer[i]);
894 }
895
821 if (host->data_size) 896 if (host->data_size)
822 atmci_pdc_set_both_buf(host, 897 atmci_pdc_set_both_buf(host,
823 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT)); 898 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
@@ -931,6 +1006,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
931 1006
932static void atmci_stop_transfer(struct atmel_mci *host) 1007static void atmci_stop_transfer(struct atmel_mci *host)
933{ 1008{
1009 dev_dbg(&host->pdev->dev,
1010 "(%s) set pending xfer complete\n", __func__);
934 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1011 atmci_set_pending(host, EVENT_XFER_COMPLETE);
935 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1012 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
936} 1013}
@@ -940,8 +1017,7 @@ static void atmci_stop_transfer(struct atmel_mci *host)
940 */ 1017 */
941static void atmci_stop_transfer_pdc(struct atmel_mci *host) 1018static void atmci_stop_transfer_pdc(struct atmel_mci *host)
942{ 1019{
943 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1020 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
944 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
945} 1021}
946 1022
947static void atmci_stop_transfer_dma(struct atmel_mci *host) 1023static void atmci_stop_transfer_dma(struct atmel_mci *host)
@@ -953,6 +1029,8 @@ static void atmci_stop_transfer_dma(struct atmel_mci *host)
953 atmci_dma_cleanup(host); 1029 atmci_dma_cleanup(host);
954 } else { 1030 } else {
955 /* Data transfer was stopped by the interrupt handler */ 1031 /* Data transfer was stopped by the interrupt handler */
1032 dev_dbg(&host->pdev->dev,
1033 "(%s) set pending xfer complete\n", __func__);
956 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1034 atmci_set_pending(host, EVENT_XFER_COMPLETE);
957 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1035 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
958 } 1036 }
@@ -977,9 +1055,12 @@ static void atmci_start_request(struct atmel_mci *host,
977 1055
978 host->pending_events = 0; 1056 host->pending_events = 0;
979 host->completed_events = 0; 1057 host->completed_events = 0;
1058 host->cmd_status = 0;
980 host->data_status = 0; 1059 host->data_status = 0;
981 1060
982 if (host->need_reset) { 1061 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1062
1063 if (host->need_reset || host->caps.need_reset_after_xfer) {
983 iflags = atmci_readl(host, ATMCI_IMR); 1064 iflags = atmci_readl(host, ATMCI_IMR);
984 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB); 1065 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
985 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 1066 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
@@ -994,7 +1075,7 @@ static void atmci_start_request(struct atmel_mci *host,
994 1075
995 iflags = atmci_readl(host, ATMCI_IMR); 1076 iflags = atmci_readl(host, ATMCI_IMR);
996 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) 1077 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
997 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", 1078 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
998 iflags); 1079 iflags);
999 1080
1000 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { 1081 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
@@ -1043,6 +1124,8 @@ static void atmci_start_request(struct atmel_mci *host,
1043 * prepared yet.) 1124 * prepared yet.)
1044 */ 1125 */
1045 atmci_writel(host, ATMCI_IER, iflags); 1126 atmci_writel(host, ATMCI_IER, iflags);
1127
1128 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
1046} 1129}
1047 1130
1048static void atmci_queue_request(struct atmel_mci *host, 1131static void atmci_queue_request(struct atmel_mci *host,
@@ -1057,6 +1140,7 @@ static void atmci_queue_request(struct atmel_mci *host,
1057 host->state = STATE_SENDING_CMD; 1140 host->state = STATE_SENDING_CMD;
1058 atmci_start_request(host, slot); 1141 atmci_start_request(host, slot);
1059 } else { 1142 } else {
1143 dev_dbg(&host->pdev->dev, "queue request\n");
1060 list_add_tail(&slot->queue_node, &host->queue); 1144 list_add_tail(&slot->queue_node, &host->queue);
1061 } 1145 }
1062 spin_unlock_bh(&host->lock); 1146 spin_unlock_bh(&host->lock);
@@ -1069,6 +1153,7 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1069 struct mmc_data *data; 1153 struct mmc_data *data;
1070 1154
1071 WARN_ON(slot->mrq); 1155 WARN_ON(slot->mrq);
1156 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1072 1157
1073 /* 1158 /*
1074 * We may "know" the card is gone even though there's still an 1159 * We may "know" the card is gone even though there's still an
@@ -1308,6 +1393,8 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1308 host->state = STATE_IDLE; 1393 host->state = STATE_IDLE;
1309 } 1394 }
1310 1395
1396 del_timer(&host->timer);
1397
1311 spin_unlock(&host->lock); 1398 spin_unlock(&host->lock);
1312 mmc_request_done(prev_mmc, mrq); 1399 mmc_request_done(prev_mmc, mrq);
1313 spin_lock(&host->lock); 1400 spin_lock(&host->lock);
@@ -1330,21 +1417,13 @@ static void atmci_command_complete(struct atmel_mci *host,
1330 cmd->error = -EILSEQ; 1417 cmd->error = -EILSEQ;
1331 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE)) 1418 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1332 cmd->error = -EIO; 1419 cmd->error = -EIO;
1333 else 1420 else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1334 cmd->error = 0; 1421 if (host->caps.need_blksz_mul_4) {
1335 1422 cmd->error = -EINVAL;
1336 if (cmd->error) { 1423 host->need_reset = 1;
1337 dev_dbg(&host->pdev->dev,
1338 "command error: status=0x%08x\n", status);
1339
1340 if (cmd->data) {
1341 host->stop_transfer(host);
1342 host->data = NULL;
1343 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY
1344 | ATMCI_TXRDY | ATMCI_RXRDY
1345 | ATMCI_DATA_ERROR_FLAGS);
1346 } 1424 }
1347 } 1425 } else
1426 cmd->error = 0;
1348} 1427}
1349 1428
1350static void atmci_detect_change(unsigned long data) 1429static void atmci_detect_change(unsigned long data)
@@ -1407,23 +1486,21 @@ static void atmci_detect_change(unsigned long data)
1407 break; 1486 break;
1408 case STATE_SENDING_CMD: 1487 case STATE_SENDING_CMD:
1409 mrq->cmd->error = -ENOMEDIUM; 1488 mrq->cmd->error = -ENOMEDIUM;
1410 if (!mrq->data) 1489 if (mrq->data)
1411 break; 1490 host->stop_transfer(host);
1412 /* fall through */ 1491 break;
1413 case STATE_SENDING_DATA: 1492 case STATE_DATA_XFER:
1414 mrq->data->error = -ENOMEDIUM; 1493 mrq->data->error = -ENOMEDIUM;
1415 host->stop_transfer(host); 1494 host->stop_transfer(host);
1416 break; 1495 break;
1417 case STATE_DATA_BUSY: 1496 case STATE_WAITING_NOTBUSY:
1418 case STATE_DATA_ERROR: 1497 mrq->data->error = -ENOMEDIUM;
1419 if (mrq->data->error == -EINPROGRESS) 1498 break;
1420 mrq->data->error = -ENOMEDIUM;
1421 if (!mrq->stop)
1422 break;
1423 /* fall through */
1424 case STATE_SENDING_STOP: 1499 case STATE_SENDING_STOP:
1425 mrq->stop->error = -ENOMEDIUM; 1500 mrq->stop->error = -ENOMEDIUM;
1426 break; 1501 break;
1502 case STATE_END_REQUEST:
1503 break;
1427 } 1504 }
1428 1505
1429 atmci_request_end(host, mrq); 1506 atmci_request_end(host, mrq);
@@ -1451,7 +1528,6 @@ static void atmci_tasklet_func(unsigned long priv)
1451 struct atmel_mci *host = (struct atmel_mci *)priv; 1528 struct atmel_mci *host = (struct atmel_mci *)priv;
1452 struct mmc_request *mrq = host->mrq; 1529 struct mmc_request *mrq = host->mrq;
1453 struct mmc_data *data = host->data; 1530 struct mmc_data *data = host->data;
1454 struct mmc_command *cmd = host->cmd;
1455 enum atmel_mci_state state = host->state; 1531 enum atmel_mci_state state = host->state;
1456 enum atmel_mci_state prev_state; 1532 enum atmel_mci_state prev_state;
1457 u32 status; 1533 u32 status;
@@ -1467,107 +1543,186 @@ static void atmci_tasklet_func(unsigned long priv)
1467 1543
1468 do { 1544 do {
1469 prev_state = state; 1545 prev_state = state;
1546 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1470 1547
1471 switch (state) { 1548 switch (state) {
1472 case STATE_IDLE: 1549 case STATE_IDLE:
1473 break; 1550 break;
1474 1551
1475 case STATE_SENDING_CMD: 1552 case STATE_SENDING_CMD:
1553 /*
1554 * Command has been sent, we are waiting for command
1555 * ready. Then we have three next states possible:
1556 * END_REQUEST by default, WAITING_NOTBUSY if it's a
1557 * command needing it or DATA_XFER if there is data.
1558 */
1559 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1476 if (!atmci_test_and_clear_pending(host, 1560 if (!atmci_test_and_clear_pending(host,
1477 EVENT_CMD_COMPLETE)) 1561 EVENT_CMD_RDY))
1478 break; 1562 break;
1479 1563
1564 dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1480 host->cmd = NULL; 1565 host->cmd = NULL;
1481 atmci_set_completed(host, EVENT_CMD_COMPLETE); 1566 atmci_set_completed(host, EVENT_CMD_RDY);
1482 atmci_command_complete(host, mrq->cmd); 1567 atmci_command_complete(host, mrq->cmd);
1483 if (!mrq->data || cmd->error) { 1568 if (mrq->data) {
1484 atmci_request_end(host, host->mrq); 1569 dev_dbg(&host->pdev->dev,
1485 goto unlock; 1570 "command with data transfer");
1486 } 1571 /*
1572 * If there is a command error don't start
1573 * data transfer.
1574 */
1575 if (mrq->cmd->error) {
1576 host->stop_transfer(host);
1577 host->data = NULL;
1578 atmci_writel(host, ATMCI_IDR,
1579 ATMCI_TXRDY | ATMCI_RXRDY
1580 | ATMCI_DATA_ERROR_FLAGS);
1581 state = STATE_END_REQUEST;
1582 } else
1583 state = STATE_DATA_XFER;
1584 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1585 dev_dbg(&host->pdev->dev,
1586 "command response need waiting notbusy");
1587 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1588 state = STATE_WAITING_NOTBUSY;
1589 } else
1590 state = STATE_END_REQUEST;
1487 1591
1488 prev_state = state = STATE_SENDING_DATA; 1592 break;
1489 /* fall through */
1490 1593
1491 case STATE_SENDING_DATA: 1594 case STATE_DATA_XFER:
1492 if (atmci_test_and_clear_pending(host, 1595 if (atmci_test_and_clear_pending(host,
1493 EVENT_DATA_ERROR)) { 1596 EVENT_DATA_ERROR)) {
1494 host->stop_transfer(host); 1597 dev_dbg(&host->pdev->dev, "set completed data error\n");
1495 if (data->stop) 1598 atmci_set_completed(host, EVENT_DATA_ERROR);
1496 atmci_send_stop_cmd(host, data); 1599 state = STATE_END_REQUEST;
1497 state = STATE_DATA_ERROR;
1498 break; 1600 break;
1499 } 1601 }
1500 1602
1603 /*
1604 * A data transfer is in progress. The event expected
1605 * to move to the next state depends of data transfer
1606 * type (PDC or DMA). Once transfer done we can move
1607 * to the next step which is WAITING_NOTBUSY in write
1608 * case and directly SENDING_STOP in read case.
1609 */
1610 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1501 if (!atmci_test_and_clear_pending(host, 1611 if (!atmci_test_and_clear_pending(host,
1502 EVENT_XFER_COMPLETE)) 1612 EVENT_XFER_COMPLETE))
1503 break; 1613 break;
1504 1614
1615 dev_dbg(&host->pdev->dev,
1616 "(%s) set completed xfer complete\n",
1617 __func__);
1505 atmci_set_completed(host, EVENT_XFER_COMPLETE); 1618 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1506 prev_state = state = STATE_DATA_BUSY;
1507 /* fall through */
1508 1619
1509 case STATE_DATA_BUSY: 1620 if (host->data->flags & MMC_DATA_WRITE) {
1510 if (!atmci_test_and_clear_pending(host, 1621 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1511 EVENT_DATA_COMPLETE)) 1622 state = STATE_WAITING_NOTBUSY;
1512 break; 1623 } else if (host->mrq->stop) {
1513 1624 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1514 host->data = NULL; 1625 atmci_send_stop_cmd(host, data);
1515 atmci_set_completed(host, EVENT_DATA_COMPLETE); 1626 state = STATE_SENDING_STOP;
1516 status = host->data_status;
1517 if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
1518 if (status & ATMCI_DTOE) {
1519 dev_dbg(&host->pdev->dev,
1520 "data timeout error\n");
1521 data->error = -ETIMEDOUT;
1522 } else if (status & ATMCI_DCRCE) {
1523 dev_dbg(&host->pdev->dev,
1524 "data CRC error\n");
1525 data->error = -EILSEQ;
1526 } else {
1527 dev_dbg(&host->pdev->dev,
1528 "data FIFO error (status=%08x)\n",
1529 status);
1530 data->error = -EIO;
1531 }
1532 } else { 1627 } else {
1628 host->data = NULL;
1533 data->bytes_xfered = data->blocks * data->blksz; 1629 data->bytes_xfered = data->blocks * data->blksz;
1534 data->error = 0; 1630 data->error = 0;
1535 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS); 1631 state = STATE_END_REQUEST;
1536 } 1632 }
1633 break;
1537 1634
1538 if (!data->stop) { 1635 case STATE_WAITING_NOTBUSY:
1539 atmci_request_end(host, host->mrq); 1636 /*
1540 goto unlock; 1637 * We can be in the state for two reasons: a command
1541 } 1638 * requiring waiting not busy signal (stop command
1639 * included) or a write operation. In the latest case,
1640 * we need to send a stop command.
1641 */
1642 dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1643 if (!atmci_test_and_clear_pending(host,
1644 EVENT_NOTBUSY))
1645 break;
1542 1646
1543 prev_state = state = STATE_SENDING_STOP; 1647 dev_dbg(&host->pdev->dev, "set completed not busy\n");
1544 if (!data->error) 1648 atmci_set_completed(host, EVENT_NOTBUSY);
1545 atmci_send_stop_cmd(host, data); 1649
1546 /* fall through */ 1650 if (host->data) {
1651 /*
1652 * For some commands such as CMD53, even if
1653 * there is data transfer, there is no stop
1654 * command to send.
1655 */
1656 if (host->mrq->stop) {
1657 atmci_writel(host, ATMCI_IER,
1658 ATMCI_CMDRDY);
1659 atmci_send_stop_cmd(host, data);
1660 state = STATE_SENDING_STOP;
1661 } else {
1662 host->data = NULL;
1663 data->bytes_xfered = data->blocks
1664 * data->blksz;
1665 data->error = 0;
1666 state = STATE_END_REQUEST;
1667 }
1668 } else
1669 state = STATE_END_REQUEST;
1670 break;
1547 1671
1548 case STATE_SENDING_STOP: 1672 case STATE_SENDING_STOP:
1673 /*
1674 * In this state, it is important to set host->data to
1675 * NULL (which is tested in the waiting notbusy state)
1676 * in order to go to the end request state instead of
1677 * sending stop again.
1678 */
1679 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1549 if (!atmci_test_and_clear_pending(host, 1680 if (!atmci_test_and_clear_pending(host,
1550 EVENT_CMD_COMPLETE)) 1681 EVENT_CMD_RDY))
1551 break; 1682 break;
1552 1683
1684 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1553 host->cmd = NULL; 1685 host->cmd = NULL;
1686 host->data = NULL;
1687 data->bytes_xfered = data->blocks * data->blksz;
1688 data->error = 0;
1554 atmci_command_complete(host, mrq->stop); 1689 atmci_command_complete(host, mrq->stop);
1555 atmci_request_end(host, host->mrq); 1690 if (mrq->stop->error) {
1556 goto unlock; 1691 host->stop_transfer(host);
1692 atmci_writel(host, ATMCI_IDR,
1693 ATMCI_TXRDY | ATMCI_RXRDY
1694 | ATMCI_DATA_ERROR_FLAGS);
1695 state = STATE_END_REQUEST;
1696 } else {
1697 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1698 state = STATE_WAITING_NOTBUSY;
1699 }
1700 break;
1557 1701
1558 case STATE_DATA_ERROR: 1702 case STATE_END_REQUEST:
1559 if (!atmci_test_and_clear_pending(host, 1703 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1560 EVENT_XFER_COMPLETE)) 1704 | ATMCI_DATA_ERROR_FLAGS);
1561 break; 1705 status = host->data_status;
1706 if (unlikely(status)) {
1707 host->stop_transfer(host);
1708 host->data = NULL;
1709 if (status & ATMCI_DTOE) {
1710 data->error = -ETIMEDOUT;
1711 } else if (status & ATMCI_DCRCE) {
1712 data->error = -EILSEQ;
1713 } else {
1714 data->error = -EIO;
1715 }
1716 }
1562 1717
1563 state = STATE_DATA_BUSY; 1718 atmci_request_end(host, host->mrq);
1719 state = STATE_IDLE;
1564 break; 1720 break;
1565 } 1721 }
1566 } while (state != prev_state); 1722 } while (state != prev_state);
1567 1723
1568 host->state = state; 1724 host->state = state;
1569 1725
1570unlock:
1571 spin_unlock(&host->lock); 1726 spin_unlock(&host->lock);
1572} 1727}
1573 1728
@@ -1620,9 +1775,6 @@ static void atmci_read_data_pio(struct atmel_mci *host)
1620 | ATMCI_DATA_ERROR_FLAGS)); 1775 | ATMCI_DATA_ERROR_FLAGS));
1621 host->data_status = status; 1776 host->data_status = status;
1622 data->bytes_xfered += nbytes; 1777 data->bytes_xfered += nbytes;
1623 smp_wmb();
1624 atmci_set_pending(host, EVENT_DATA_ERROR);
1625 tasklet_schedule(&host->tasklet);
1626 return; 1778 return;
1627 } 1779 }
1628 } while (status & ATMCI_RXRDY); 1780 } while (status & ATMCI_RXRDY);
@@ -1691,9 +1843,6 @@ static void atmci_write_data_pio(struct atmel_mci *host)
1691 | ATMCI_DATA_ERROR_FLAGS)); 1843 | ATMCI_DATA_ERROR_FLAGS));
1692 host->data_status = status; 1844 host->data_status = status;
1693 data->bytes_xfered += nbytes; 1845 data->bytes_xfered += nbytes;
1694 smp_wmb();
1695 atmci_set_pending(host, EVENT_DATA_ERROR);
1696 tasklet_schedule(&host->tasklet);
1697 return; 1846 return;
1698 } 1847 }
1699 } while (status & ATMCI_TXRDY); 1848 } while (status & ATMCI_TXRDY);
@@ -1711,16 +1860,6 @@ done:
1711 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1860 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1712} 1861}
1713 1862
1714static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
1715{
1716 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
1717
1718 host->cmd_status = status;
1719 smp_wmb();
1720 atmci_set_pending(host, EVENT_CMD_COMPLETE);
1721 tasklet_schedule(&host->tasklet);
1722}
1723
1724static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) 1863static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1725{ 1864{
1726 int i; 1865 int i;
@@ -1748,17 +1887,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1748 break; 1887 break;
1749 1888
1750 if (pending & ATMCI_DATA_ERROR_FLAGS) { 1889 if (pending & ATMCI_DATA_ERROR_FLAGS) {
1890 dev_dbg(&host->pdev->dev, "IRQ: data error\n");
1751 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS 1891 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
1752 | ATMCI_RXRDY | ATMCI_TXRDY); 1892 | ATMCI_RXRDY | ATMCI_TXRDY
1753 pending &= atmci_readl(host, ATMCI_IMR); 1893 | ATMCI_ENDRX | ATMCI_ENDTX
1894 | ATMCI_RXBUFF | ATMCI_TXBUFE);
1754 1895
1755 host->data_status = status; 1896 host->data_status = status;
1897 dev_dbg(&host->pdev->dev, "set pending data error\n");
1756 smp_wmb(); 1898 smp_wmb();
1757 atmci_set_pending(host, EVENT_DATA_ERROR); 1899 atmci_set_pending(host, EVENT_DATA_ERROR);
1758 tasklet_schedule(&host->tasklet); 1900 tasklet_schedule(&host->tasklet);
1759 } 1901 }
1760 1902
1761 if (pending & ATMCI_TXBUFE) { 1903 if (pending & ATMCI_TXBUFE) {
1904 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
1762 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE); 1905 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
1763 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); 1906 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1764 /* 1907 /*
@@ -1774,6 +1917,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1774 atmci_pdc_complete(host); 1917 atmci_pdc_complete(host);
1775 } 1918 }
1776 } else if (pending & ATMCI_ENDTX) { 1919 } else if (pending & ATMCI_ENDTX) {
1920 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
1777 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); 1921 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1778 1922
1779 if (host->data_size) { 1923 if (host->data_size) {
@@ -1784,6 +1928,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1784 } 1928 }
1785 1929
1786 if (pending & ATMCI_RXBUFF) { 1930 if (pending & ATMCI_RXBUFF) {
1931 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
1787 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF); 1932 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
1788 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); 1933 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1789 /* 1934 /*
@@ -1799,6 +1944,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1799 atmci_pdc_complete(host); 1944 atmci_pdc_complete(host);
1800 } 1945 }
1801 } else if (pending & ATMCI_ENDRX) { 1946 } else if (pending & ATMCI_ENDRX) {
1947 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
1802 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); 1948 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1803 1949
1804 if (host->data_size) { 1950 if (host->data_size) {
@@ -1808,23 +1954,44 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1808 } 1954 }
1809 } 1955 }
1810 1956
1957 /*
1958 * First mci IPs, so mainly the ones having pdc, have some
1959 * issues with the notbusy signal. You can't get it after
1960 * data transmission if you have not sent a stop command.
1961 * The appropriate workaround is to use the BLKE signal.
1962 */
1963 if (pending & ATMCI_BLKE) {
1964 dev_dbg(&host->pdev->dev, "IRQ: blke\n");
1965 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
1966 smp_wmb();
1967 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
1968 atmci_set_pending(host, EVENT_NOTBUSY);
1969 tasklet_schedule(&host->tasklet);
1970 }
1811 1971
1812 if (pending & ATMCI_NOTBUSY) { 1972 if (pending & ATMCI_NOTBUSY) {
1813 atmci_writel(host, ATMCI_IDR, 1973 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
1814 ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY); 1974 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
1815 if (!host->data_status)
1816 host->data_status = status;
1817 smp_wmb(); 1975 smp_wmb();
1818 atmci_set_pending(host, EVENT_DATA_COMPLETE); 1976 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
1977 atmci_set_pending(host, EVENT_NOTBUSY);
1819 tasklet_schedule(&host->tasklet); 1978 tasklet_schedule(&host->tasklet);
1820 } 1979 }
1980
1821 if (pending & ATMCI_RXRDY) 1981 if (pending & ATMCI_RXRDY)
1822 atmci_read_data_pio(host); 1982 atmci_read_data_pio(host);
1823 if (pending & ATMCI_TXRDY) 1983 if (pending & ATMCI_TXRDY)
1824 atmci_write_data_pio(host); 1984 atmci_write_data_pio(host);
1825 1985
1826 if (pending & ATMCI_CMDRDY) 1986 if (pending & ATMCI_CMDRDY) {
1827 atmci_cmd_interrupt(host, status); 1987 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
1988 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
1989 host->cmd_status = status;
1990 smp_wmb();
1991 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
1992 atmci_set_pending(host, EVENT_CMD_RDY);
1993 tasklet_schedule(&host->tasklet);
1994 }
1828 1995
1829 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) 1996 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1830 atmci_sdio_interrupt(host, status); 1997 atmci_sdio_interrupt(host, status);
@@ -1877,13 +2044,26 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1877 mmc->caps |= MMC_CAP_SDIO_IRQ; 2044 mmc->caps |= MMC_CAP_SDIO_IRQ;
1878 if (host->caps.has_highspeed) 2045 if (host->caps.has_highspeed)
1879 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 2046 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1880 if (slot_data->bus_width >= 4) 2047 /*
2048 * Without the read/write proof capability, it is strongly suggested to
2049 * use only one bit for data to prevent fifo underruns and overruns
2050 * which will corrupt data.
2051 */
2052 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
1881 mmc->caps |= MMC_CAP_4_BIT_DATA; 2053 mmc->caps |= MMC_CAP_4_BIT_DATA;
1882 2054
1883 mmc->max_segs = 64; 2055 if (atmci_get_version(host) < 0x200) {
1884 mmc->max_req_size = 32768 * 512; 2056 mmc->max_segs = 256;
1885 mmc->max_blk_size = 32768; 2057 mmc->max_blk_size = 4095;
1886 mmc->max_blk_count = 512; 2058 mmc->max_blk_count = 256;
2059 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2060 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2061 } else {
2062 mmc->max_segs = 64;
2063 mmc->max_req_size = 32768 * 512;
2064 mmc->max_blk_size = 32768;
2065 mmc->max_blk_count = 512;
2066 }
1887 2067
1888 /* Assume card is present initially */ 2068 /* Assume card is present initially */
1889 set_bit(ATMCI_CARD_PRESENT, &slot->flags); 2069 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
@@ -2007,11 +2187,6 @@ static bool atmci_configure_dma(struct atmel_mci *host)
2007 } 2187 }
2008} 2188}
2009 2189
2010static inline unsigned int atmci_get_version(struct atmel_mci *host)
2011{
2012 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
2013}
2014
2015/* 2190/*
2016 * HSMCI (High Speed MCI) module is not fully compatible with MCI module. 2191 * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
2017 * HSMCI provides DMA support and a new config register but no more supports 2192 * HSMCI provides DMA support and a new config register but no more supports
@@ -2032,6 +2207,9 @@ static void __init atmci_get_cap(struct atmel_mci *host)
2032 host->caps.has_highspeed = 0; 2207 host->caps.has_highspeed = 0;
2033 host->caps.has_rwproof = 0; 2208 host->caps.has_rwproof = 0;
2034 host->caps.has_odd_clk_div = 0; 2209 host->caps.has_odd_clk_div = 0;
2210 host->caps.has_bad_data_ordering = 1;
2211 host->caps.need_reset_after_xfer = 1;
2212 host->caps.need_blksz_mul_4 = 1;
2035 2213
2036 /* keep only major version number */ 2214 /* keep only major version number */
2037 switch (version & 0xf00) { 2215 switch (version & 0xf00) {
@@ -2051,7 +2229,11 @@ static void __init atmci_get_cap(struct atmel_mci *host)
2051 host->caps.has_highspeed = 1; 2229 host->caps.has_highspeed = 1;
2052 case 0x200: 2230 case 0x200:
2053 host->caps.has_rwproof = 1; 2231 host->caps.has_rwproof = 1;
2232 host->caps.need_blksz_mul_4 = 0;
2054 case 0x100: 2233 case 0x100:
2234 host->caps.has_bad_data_ordering = 0;
2235 host->caps.need_reset_after_xfer = 0;
2236 case 0x0:
2055 break; 2237 break;
2056 default: 2238 default:
2057 host->caps.has_pdc = 0; 2239 host->caps.has_pdc = 0;
@@ -2138,14 +2320,20 @@ static int __init atmci_probe(struct platform_device *pdev)
2138 if (pdata->slot[0].bus_width) { 2320 if (pdata->slot[0].bus_width) {
2139 ret = atmci_init_slot(host, &pdata->slot[0], 2321 ret = atmci_init_slot(host, &pdata->slot[0],
2140 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA); 2322 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2141 if (!ret) 2323 if (!ret) {
2142 nr_slots++; 2324 nr_slots++;
2325 host->buf_size = host->slot[0]->mmc->max_req_size;
2326 }
2143 } 2327 }
2144 if (pdata->slot[1].bus_width) { 2328 if (pdata->slot[1].bus_width) {
2145 ret = atmci_init_slot(host, &pdata->slot[1], 2329 ret = atmci_init_slot(host, &pdata->slot[1],
2146 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB); 2330 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2147 if (!ret) 2331 if (!ret) {
2148 nr_slots++; 2332 nr_slots++;
2333 if (host->slot[1]->mmc->max_req_size > host->buf_size)
2334 host->buf_size =
2335 host->slot[1]->mmc->max_req_size;
2336 }
2149 } 2337 }
2150 2338
2151 if (!nr_slots) { 2339 if (!nr_slots) {
@@ -2153,6 +2341,19 @@ static int __init atmci_probe(struct platform_device *pdev)
2153 goto err_init_slot; 2341 goto err_init_slot;
2154 } 2342 }
2155 2343
2344 if (!host->caps.has_rwproof) {
2345 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2346 &host->buf_phys_addr,
2347 GFP_KERNEL);
2348 if (!host->buffer) {
2349 ret = -ENOMEM;
2350 dev_err(&pdev->dev, "buffer allocation failed\n");
2351 goto err_init_slot;
2352 }
2353 }
2354
2355 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2356
2156 dev_info(&pdev->dev, 2357 dev_info(&pdev->dev,
2157 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", 2358 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2158 host->mapbase, irq, nr_slots); 2359 host->mapbase, irq, nr_slots);
@@ -2179,6 +2380,10 @@ static int __exit atmci_remove(struct platform_device *pdev)
2179 2380
2180 platform_set_drvdata(pdev, NULL); 2381 platform_set_drvdata(pdev, NULL);
2181 2382
2383 if (host->buffer)
2384 dma_free_coherent(&pdev->dev, host->buf_size,
2385 host->buffer, host->buf_phys_addr);
2386
2182 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 2387 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2183 if (host->slot[i]) 2388 if (host->slot[i])
2184 atmci_cleanup_slot(host->slot[i], i); 2389 atmci_cleanup_slot(host->slot[i], i);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index c1f3673ae1ef..7cf6c624bf73 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1533,4 +1533,5 @@ module_exit(davinci_mmcsd_exit);
1533MODULE_AUTHOR("Texas Instruments India"); 1533MODULE_AUTHOR("Texas Instruments India");
1534MODULE_LICENSE("GPL"); 1534MODULE_LICENSE("GPL");
1535MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1535MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
1536MODULE_ALIAS("platform:davinci_mmc");
1536 1537
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index ab3fc4617107..9bbf45f8c538 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -100,8 +100,6 @@ struct dw_mci_slot {
100 int last_detect_state; 100 int last_detect_state;
101}; 101};
102 102
103static struct workqueue_struct *dw_mci_card_workqueue;
104
105#if defined(CONFIG_DEBUG_FS) 103#if defined(CONFIG_DEBUG_FS)
106static int dw_mci_req_show(struct seq_file *s, void *v) 104static int dw_mci_req_show(struct seq_file *s, void *v)
107{ 105{
@@ -859,10 +857,10 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
859 int_mask = mci_readl(host, INTMASK); 857 int_mask = mci_readl(host, INTMASK);
860 if (enb) { 858 if (enb) {
861 mci_writel(host, INTMASK, 859 mci_writel(host, INTMASK,
862 (int_mask | (1 << SDMMC_INT_SDIO(slot->id)))); 860 (int_mask | SDMMC_INT_SDIO(slot->id)));
863 } else { 861 } else {
864 mci_writel(host, INTMASK, 862 mci_writel(host, INTMASK,
865 (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id)))); 863 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
866 } 864 }
867} 865}
868 866
@@ -1605,7 +1603,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1605 1603
1606 if (pending & SDMMC_INT_CD) { 1604 if (pending & SDMMC_INT_CD) {
1607 mci_writel(host, RINTSTS, SDMMC_INT_CD); 1605 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1608 queue_work(dw_mci_card_workqueue, &host->card_work); 1606 queue_work(host->card_workqueue, &host->card_work);
1609 } 1607 }
1610 1608
1611 /* Handle SDIO Interrupts */ 1609 /* Handle SDIO Interrupts */
@@ -1844,7 +1842,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1844 * Card may have been plugged in prior to boot so we 1842 * Card may have been plugged in prior to boot so we
1845 * need to run the detect tasklet 1843 * need to run the detect tasklet
1846 */ 1844 */
1847 queue_work(dw_mci_card_workqueue, &host->card_work); 1845 queue_work(host->card_workqueue, &host->card_work);
1848 1846
1849 return 0; 1847 return 0;
1850} 1848}
@@ -2021,9 +2019,9 @@ int dw_mci_probe(struct dw_mci *host)
2021 mci_writel(host, CLKSRC, 0); 2019 mci_writel(host, CLKSRC, 0);
2022 2020
2023 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 2021 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2024 dw_mci_card_workqueue = alloc_workqueue("dw-mci-card", 2022 host->card_workqueue = alloc_workqueue("dw-mci-card",
2025 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); 2023 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2026 if (!dw_mci_card_workqueue) 2024 if (!host->card_workqueue)
2027 goto err_dmaunmap; 2025 goto err_dmaunmap;
2028 INIT_WORK(&host->card_work, dw_mci_work_routine_card); 2026 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2029 ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host); 2027 ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
@@ -2085,7 +2083,7 @@ err_init_slot:
2085 free_irq(host->irq, host); 2083 free_irq(host->irq, host);
2086 2084
2087err_workqueue: 2085err_workqueue:
2088 destroy_workqueue(dw_mci_card_workqueue); 2086 destroy_workqueue(host->card_workqueue);
2089 2087
2090err_dmaunmap: 2088err_dmaunmap:
2091 if (host->use_dma && host->dma_ops->exit) 2089 if (host->use_dma && host->dma_ops->exit)
@@ -2119,7 +2117,7 @@ void dw_mci_remove(struct dw_mci *host)
2119 mci_writel(host, CLKSRC, 0); 2117 mci_writel(host, CLKSRC, 0);
2120 2118
2121 free_irq(host->irq, host); 2119 free_irq(host->irq, host);
2122 destroy_workqueue(dw_mci_card_workqueue); 2120 destroy_workqueue(host->card_workqueue);
2123 dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2121 dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
2124 2122
2125 if (host->use_dma && host->dma_ops->exit) 2123 if (host->use_dma && host->dma_ops->exit)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
deleted file mode 100644
index ea0f3cedef21..000000000000
--- a/drivers/mmc/host/imxmmc.c
+++ /dev/null
@@ -1,1169 +0,0 @@
1/*
2 * linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
3 *
4 * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
5 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
6 *
7 * derived from pxamci.c by Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/ioport.h>
18#include <linux/platform_device.h>
19#include <linux/interrupt.h>
20#include <linux/blkdev.h>
21#include <linux/dma-mapping.h>
22#include <linux/mmc/host.h>
23#include <linux/mmc/card.h>
24#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27
28#include <asm/dma.h>
29#include <asm/irq.h>
30#include <asm/sizes.h>
31#include <mach/mmc.h>
32#include <mach/imx-dma.h>
33
34#include "imxmmc.h"
35
36#define DRIVER_NAME "imx-mmc"
37
38#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
39 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
40 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
41
42struct imxmci_host {
43 struct mmc_host *mmc;
44 spinlock_t lock;
45 struct resource *res;
46 void __iomem *base;
47 int irq;
48 imx_dmach_t dma;
49 volatile unsigned int imask;
50 unsigned int power_mode;
51 unsigned int present;
52 struct imxmmc_platform_data *pdata;
53
54 struct mmc_request *req;
55 struct mmc_command *cmd;
56 struct mmc_data *data;
57
58 struct timer_list timer;
59 struct tasklet_struct tasklet;
60 unsigned int status_reg;
61 unsigned long pending_events;
62 /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
63 u16 *data_ptr;
64 unsigned int data_cnt;
65 atomic_t stuck_timeout;
66
67 unsigned int dma_nents;
68 unsigned int dma_size;
69 unsigned int dma_dir;
70 int dma_allocated;
71
72 unsigned char actual_bus_width;
73
74 int prev_cmd_code;
75
76 struct clk *clk;
77};
78
79#define IMXMCI_PEND_IRQ_b 0
80#define IMXMCI_PEND_DMA_END_b 1
81#define IMXMCI_PEND_DMA_ERR_b 2
82#define IMXMCI_PEND_WAIT_RESP_b 3
83#define IMXMCI_PEND_DMA_DATA_b 4
84#define IMXMCI_PEND_CPU_DATA_b 5
85#define IMXMCI_PEND_CARD_XCHG_b 6
86#define IMXMCI_PEND_SET_INIT_b 7
87#define IMXMCI_PEND_STARTED_b 8
88
89#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
90#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
91#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
92#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
93#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
94#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
95#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
96#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
97#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
98
99static void imxmci_stop_clock(struct imxmci_host *host)
100{
101 int i = 0;
102 u16 reg;
103
104 reg = readw(host->base + MMC_REG_STR_STP_CLK);
105 writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
106 while (i < 0x1000) {
107 if (!(i & 0x7f)) {
108 reg = readw(host->base + MMC_REG_STR_STP_CLK);
109 writew(reg | STR_STP_CLK_STOP_CLK,
110 host->base + MMC_REG_STR_STP_CLK);
111 }
112
113 reg = readw(host->base + MMC_REG_STATUS);
114 if (!(reg & STATUS_CARD_BUS_CLK_RUN)) {
115 /* Check twice before cut */
116 reg = readw(host->base + MMC_REG_STATUS);
117 if (!(reg & STATUS_CARD_BUS_CLK_RUN))
118 return;
119 }
120
121 i++;
122 }
123 dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
124}
125
126static int imxmci_start_clock(struct imxmci_host *host)
127{
128 unsigned int trials = 0;
129 unsigned int delay_limit = 128;
130 unsigned long flags;
131 u16 reg;
132
133 reg = readw(host->base + MMC_REG_STR_STP_CLK);
134 writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
135
136 clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
137
138 /*
139 * Command start of the clock, this usually succeeds in less
140 * then 6 delay loops, but during card detection (low clockrate)
141 * it takes up to 5000 delay loops and sometimes fails for the first time
142 */
143 reg = readw(host->base + MMC_REG_STR_STP_CLK);
144 writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
145
146 do {
147 unsigned int delay = delay_limit;
148
149 while (delay--) {
150 reg = readw(host->base + MMC_REG_STATUS);
151 if (reg & STATUS_CARD_BUS_CLK_RUN) {
152 /* Check twice before cut */
153 reg = readw(host->base + MMC_REG_STATUS);
154 if (reg & STATUS_CARD_BUS_CLK_RUN)
155 return 0;
156 }
157
158 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
159 return 0;
160 }
161
162 local_irq_save(flags);
163 /*
164 * Ensure, that request is not doubled under all possible circumstances.
165 * It is possible, that cock running state is missed, because some other
166 * IRQ or schedule delays this function execution and the clocks has
167 * been already stopped by other means (response processing, SDHC HW)
168 */
169 if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) {
170 reg = readw(host->base + MMC_REG_STR_STP_CLK);
171 writew(reg | STR_STP_CLK_START_CLK,
172 host->base + MMC_REG_STR_STP_CLK);
173 }
174 local_irq_restore(flags);
175
176 } while (++trials < 256);
177
178 dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
179
180 return -1;
181}
182
183static void imxmci_softreset(struct imxmci_host *host)
184{
185 int i;
186
187 /* reset sequence */
188 writew(0x08, host->base + MMC_REG_STR_STP_CLK);
189 writew(0x0D, host->base + MMC_REG_STR_STP_CLK);
190
191 for (i = 0; i < 8; i++)
192 writew(0x05, host->base + MMC_REG_STR_STP_CLK);
193
194 writew(0xff, host->base + MMC_REG_RES_TO);
195 writew(512, host->base + MMC_REG_BLK_LEN);
196 writew(1, host->base + MMC_REG_NOB);
197}
198
199static int imxmci_busy_wait_for_status(struct imxmci_host *host,
200 unsigned int *pstat, unsigned int stat_mask,
201 int timeout, const char *where)
202{
203 int loops = 0;
204
205 while (!(*pstat & stat_mask)) {
206 loops += 2;
207 if (loops >= timeout) {
208 dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
209 where, *pstat, stat_mask);
210 return -1;
211 }
212 udelay(2);
213 *pstat |= readw(host->base + MMC_REG_STATUS);
214 }
215 if (!loops)
216 return 0;
217
218 /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
219 if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000))
220 dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
221 loops, where, *pstat, stat_mask);
222 return loops;
223}
224
225static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
226{
227 unsigned int nob = data->blocks;
228 unsigned int blksz = data->blksz;
229 unsigned int datasz = nob * blksz;
230 int i;
231
232 if (data->flags & MMC_DATA_STREAM)
233 nob = 0xffff;
234
235 host->data = data;
236 data->bytes_xfered = 0;
237
238 writew(nob, host->base + MMC_REG_NOB);
239 writew(blksz, host->base + MMC_REG_BLK_LEN);
240
241 /*
242 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
243 * We are in big troubles for non-512 byte transfers according to note in the paragraph
244 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
245 * The situation is even more complex in reality. The SDHC in not able to handle wll
246 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
247 * This is required for SCR read at least.
248 */
249 if (datasz < 512) {
250 host->dma_size = datasz;
251 if (data->flags & MMC_DATA_READ) {
252 host->dma_dir = DMA_FROM_DEVICE;
253
254 /* Hack to enable read SCR */
255 writew(1, host->base + MMC_REG_NOB);
256 writew(512, host->base + MMC_REG_BLK_LEN);
257 } else {
258 host->dma_dir = DMA_TO_DEVICE;
259 }
260
261 /* Convert back to virtual address */
262 host->data_ptr = (u16 *)sg_virt(data->sg);
263 host->data_cnt = 0;
264
265 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
266 set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
267
268 return;
269 }
270
271 if (data->flags & MMC_DATA_READ) {
272 host->dma_dir = DMA_FROM_DEVICE;
273 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
274 data->sg_len, host->dma_dir);
275
276 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
277 host->res->start + MMC_REG_BUFFER_ACCESS,
278 DMA_MODE_READ);
279
280 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
281 CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
282 } else {
283 host->dma_dir = DMA_TO_DEVICE;
284
285 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
286 data->sg_len, host->dma_dir);
287
288 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
289 host->res->start + MMC_REG_BUFFER_ACCESS,
290 DMA_MODE_WRITE);
291
292 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
293 CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
294 }
295
296#if 1 /* This code is there only for consistency checking and can be disabled in future */
297 host->dma_size = 0;
298 for (i = 0; i < host->dma_nents; i++)
299 host->dma_size += data->sg[i].length;
300
301 if (datasz > host->dma_size) {
302 dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
303 datasz, host->dma_size);
304 }
305#endif
306
307 host->dma_size = datasz;
308
309 wmb();
310
311 set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
312 clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
313
314 /* start DMA engine for read, write is delayed after initial response */
315 if (host->dma_dir == DMA_FROM_DEVICE)
316 imx_dma_enable(host->dma);
317}
318
319static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
320{
321 unsigned long flags;
322 u32 imask;
323
324 WARN_ON(host->cmd != NULL);
325 host->cmd = cmd;
326
327 /* Ensure, that clock are stopped else command programming and start fails */
328 imxmci_stop_clock(host);
329
330 if (cmd->flags & MMC_RSP_BUSY)
331 cmdat |= CMD_DAT_CONT_BUSY;
332
333 switch (mmc_resp_type(cmd)) {
334 case MMC_RSP_R1: /* short CRC, OPCODE */
335 case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
336 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
337 break;
338 case MMC_RSP_R2: /* long 136 bit + CRC */
339 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
340 break;
341 case MMC_RSP_R3: /* short */
342 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
343 break;
344 default:
345 break;
346 }
347
348 if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events))
349 cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
350
351 if (host->actual_bus_width == MMC_BUS_WIDTH_4)
352 cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
353
354 writew(cmd->opcode, host->base + MMC_REG_CMD);
355 writew(cmd->arg >> 16, host->base + MMC_REG_ARGH);
356 writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL);
357 writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
358
359 atomic_set(&host->stuck_timeout, 0);
360 set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
361
362
363 imask = IMXMCI_INT_MASK_DEFAULT;
364 imask &= ~INT_MASK_END_CMD_RES;
365 if (cmdat & CMD_DAT_CONT_DATA_ENABLE) {
366 /* imask &= ~INT_MASK_BUF_READY; */
367 imask &= ~INT_MASK_DATA_TRAN;
368 if (cmdat & CMD_DAT_CONT_WRITE)
369 imask &= ~INT_MASK_WRITE_OP_DONE;
370 if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
371 imask &= ~INT_MASK_BUF_READY;
372 }
373
374 spin_lock_irqsave(&host->lock, flags);
375 host->imask = imask;
376 writew(host->imask, host->base + MMC_REG_INT_MASK);
377 spin_unlock_irqrestore(&host->lock, flags);
378
379 dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
380 cmd->opcode, cmd->opcode, imask);
381
382 imxmci_start_clock(host);
383}
384
385static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
386{
387 unsigned long flags;
388
389 spin_lock_irqsave(&host->lock, flags);
390
391 host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
392 IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
393
394 host->imask = IMXMCI_INT_MASK_DEFAULT;
395 writew(host->imask, host->base + MMC_REG_INT_MASK);
396
397 spin_unlock_irqrestore(&host->lock, flags);
398
399 if (req && req->cmd)
400 host->prev_cmd_code = req->cmd->opcode;
401
402 host->req = NULL;
403 host->cmd = NULL;
404 host->data = NULL;
405 mmc_request_done(host->mmc, req);
406}
407
408static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
409{
410 struct mmc_data *data = host->data;
411 int data_error;
412
413 if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
414 imx_dma_disable(host->dma);
415 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
416 host->dma_dir);
417 }
418
419 if (stat & STATUS_ERR_MASK) {
420 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat);
421 if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
422 data->error = -EILSEQ;
423 else if (stat & STATUS_TIME_OUT_READ)
424 data->error = -ETIMEDOUT;
425 else
426 data->error = -EIO;
427 } else {
428 data->bytes_xfered = host->dma_size;
429 }
430
431 data_error = data->error;
432
433 host->data = NULL;
434
435 return data_error;
436}
437
438static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
439{
440 struct mmc_command *cmd = host->cmd;
441 int i;
442 u32 a, b, c;
443 struct mmc_data *data = host->data;
444
445 if (!cmd)
446 return 0;
447
448 host->cmd = NULL;
449
450 if (stat & STATUS_TIME_OUT_RESP) {
451 dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
452 cmd->error = -ETIMEDOUT;
453 } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
454 dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
455 cmd->error = -EILSEQ;
456 }
457
458 if (cmd->flags & MMC_RSP_PRESENT) {
459 if (cmd->flags & MMC_RSP_136) {
460 for (i = 0; i < 4; i++) {
461 a = readw(host->base + MMC_REG_RES_FIFO);
462 b = readw(host->base + MMC_REG_RES_FIFO);
463 cmd->resp[i] = a << 16 | b;
464 }
465 } else {
466 a = readw(host->base + MMC_REG_RES_FIFO);
467 b = readw(host->base + MMC_REG_RES_FIFO);
468 c = readw(host->base + MMC_REG_RES_FIFO);
469 cmd->resp[0] = a << 24 | b << 8 | c >> 8;
470 }
471 }
472
473 dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
474 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
475
476 if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) {
477 if (host->req->data->flags & MMC_DATA_WRITE) {
478
479 /* Wait for FIFO to be empty before starting DMA write */
480
481 stat = readw(host->base + MMC_REG_STATUS);
482 if (imxmci_busy_wait_for_status(host, &stat,
483 STATUS_APPL_BUFF_FE,
484 40, "imxmci_cmd_done DMA WR") < 0) {
485 cmd->error = -EIO;
486 imxmci_finish_data(host, stat);
487 if (host->req)
488 imxmci_finish_request(host, host->req);
489 dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
490 stat);
491 return 0;
492 }
493
494 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
495 imx_dma_enable(host->dma);
496 }
497 } else {
498 struct mmc_request *req;
499 imxmci_stop_clock(host);
500 req = host->req;
501
502 if (data)
503 imxmci_finish_data(host, stat);
504
505 if (req)
506 imxmci_finish_request(host, req);
507 else
508 dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
509 }
510
511 return 1;
512}
513
514static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
515{
516 struct mmc_data *data = host->data;
517 int data_error;
518
519 if (!data)
520 return 0;
521
522 data_error = imxmci_finish_data(host, stat);
523
524 if (host->req->stop) {
525 imxmci_stop_clock(host);
526 imxmci_start_cmd(host, host->req->stop, 0);
527 } else {
528 struct mmc_request *req;
529 req = host->req;
530 if (req)
531 imxmci_finish_request(host, req);
532 else
533 dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
534 }
535
536 return 1;
537}
538
539static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
540{
541 int i;
542 int burst_len;
543 int trans_done = 0;
544 unsigned int stat = *pstat;
545
546 if (host->actual_bus_width != MMC_BUS_WIDTH_4)
547 burst_len = 16;
548 else
549 burst_len = 64;
550
551 /* This is unfortunately required */
552 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
553 stat);
554
555 udelay(20); /* required for clocks < 8MHz*/
556
557 if (host->dma_dir == DMA_FROM_DEVICE) {
558 imxmci_busy_wait_for_status(host, &stat,
559 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
560 STATUS_TIME_OUT_READ,
561 50, "imxmci_cpu_driven_data read");
562
563 while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
564 !(stat & STATUS_TIME_OUT_READ) &&
565 (host->data_cnt < 512)) {
566
567 udelay(20); /* required for clocks < 8MHz*/
568
569 for (i = burst_len; i >= 2 ; i -= 2) {
570 u16 data;
571 data = readw(host->base + MMC_REG_BUFFER_ACCESS);
572 udelay(10); /* required for clocks < 8MHz*/
573 if (host->data_cnt+2 <= host->dma_size) {
574 *(host->data_ptr++) = data;
575 } else {
576 if (host->data_cnt < host->dma_size)
577 *(u8 *)(host->data_ptr) = data;
578 }
579 host->data_cnt += 2;
580 }
581
582 stat = readw(host->base + MMC_REG_STATUS);
583
584 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
585 host->data_cnt, burst_len, stat);
586 }
587
588 if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
589 trans_done = 1;
590
591 if (host->dma_size & 0x1ff)
592 stat &= ~STATUS_CRC_READ_ERR;
593
594 if (stat & STATUS_TIME_OUT_READ) {
595 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
596 stat);
597 trans_done = -1;
598 }
599
600 } else {
601 imxmci_busy_wait_for_status(host, &stat,
602 STATUS_APPL_BUFF_FE,
603 20, "imxmci_cpu_driven_data write");
604
605 while ((stat & STATUS_APPL_BUFF_FE) &&
606 (host->data_cnt < host->dma_size)) {
607 if (burst_len >= host->dma_size - host->data_cnt) {
608 burst_len = host->dma_size - host->data_cnt;
609 host->data_cnt = host->dma_size;
610 trans_done = 1;
611 } else {
612 host->data_cnt += burst_len;
613 }
614
615 for (i = burst_len; i > 0 ; i -= 2)
616 writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS);
617
618 stat = readw(host->base + MMC_REG_STATUS);
619
620 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
621 burst_len, stat);
622 }
623 }
624
625 *pstat = stat;
626
627 return trans_done;
628}
629
630static void imxmci_dma_irq(int dma, void *devid)
631{
632 struct imxmci_host *host = devid;
633 u32 stat = readw(host->base + MMC_REG_STATUS);
634
635 atomic_set(&host->stuck_timeout, 0);
636 host->status_reg = stat;
637 set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
638 tasklet_schedule(&host->tasklet);
639}
640
641static irqreturn_t imxmci_irq(int irq, void *devid)
642{
643 struct imxmci_host *host = devid;
644 u32 stat = readw(host->base + MMC_REG_STATUS);
645 int handled = 1;
646
647 writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT,
648 host->base + MMC_REG_INT_MASK);
649
650 atomic_set(&host->stuck_timeout, 0);
651 host->status_reg = stat;
652 set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
653 set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
654 tasklet_schedule(&host->tasklet);
655
656 return IRQ_RETVAL(handled);
657}
658
659static void imxmci_tasklet_fnc(unsigned long data)
660{
661 struct imxmci_host *host = (struct imxmci_host *)data;
662 u32 stat;
663 unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
664 int timeout = 0;
665
666 if (atomic_read(&host->stuck_timeout) > 4) {
667 char *what;
668 timeout = 1;
669 stat = readw(host->base + MMC_REG_STATUS);
670 host->status_reg = stat;
671 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
672 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
673 what = "RESP+DMA";
674 else
675 what = "RESP";
676 else
677 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
678 if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
679 what = "DATA";
680 else
681 what = "DMA";
682 else
683 what = "???";
684
685 dev_err(mmc_dev(host->mmc),
686 "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
687 what, stat,
688 readw(host->base + MMC_REG_INT_MASK));
689 dev_err(mmc_dev(host->mmc),
690 "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
691 readw(host->base + MMC_REG_CMD_DAT_CONT),
692 readw(host->base + MMC_REG_BLK_LEN),
693 readw(host->base + MMC_REG_NOB),
694 CCR(host->dma));
695 dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
696 host->cmd ? host->cmd->opcode : 0,
697 host->prev_cmd_code,
698 1 << host->actual_bus_width, host->dma_size);
699 }
700
701 if (!host->present || timeout)
702 host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
703 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
704
705 if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
706 clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
707
708 stat = readw(host->base + MMC_REG_STATUS);
709 /*
710 * This is not required in theory, but there is chance to miss some flag
711 * which clears automatically by mask write, FreeScale original code keeps
712 * stat from IRQ time so do I
713 */
714 stat |= host->status_reg;
715
716 if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
717 stat &= ~STATUS_CRC_READ_ERR;
718
719 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
720 imxmci_busy_wait_for_status(host, &stat,
721 STATUS_END_CMD_RESP | STATUS_ERR_MASK,
722 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
723 }
724
725 if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
726 if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
727 imxmci_cmd_done(host, stat);
728 if (host->data && (stat & STATUS_ERR_MASK))
729 imxmci_data_done(host, stat);
730 }
731
732 if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
733 stat |= readw(host->base + MMC_REG_STATUS);
734 if (imxmci_cpu_driven_data(host, &stat)) {
735 if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
736 imxmci_cmd_done(host, stat);
737 atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
738 &host->pending_events);
739 imxmci_data_done(host, stat);
740 }
741 }
742 }
743
744 if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
745 !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
746
747 stat = readw(host->base + MMC_REG_STATUS);
748 /* Same as above */
749 stat |= host->status_reg;
750
751 if (host->dma_dir == DMA_TO_DEVICE)
752 data_dir_mask = STATUS_WRITE_OP_DONE;
753 else
754 data_dir_mask = STATUS_DATA_TRANS_DONE;
755
756 if (stat & data_dir_mask) {
757 clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
758 imxmci_data_done(host, stat);
759 }
760 }
761
762 if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
763
764 if (host->cmd)
765 imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
766
767 if (host->data)
768 imxmci_data_done(host, STATUS_TIME_OUT_READ |
769 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
770
771 if (host->req)
772 imxmci_finish_request(host, host->req);
773
774 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
775
776 }
777}
778
779static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
780{
781 struct imxmci_host *host = mmc_priv(mmc);
782 unsigned int cmdat;
783
784 WARN_ON(host->req != NULL);
785
786 host->req = req;
787
788 cmdat = 0;
789
790 if (req->data) {
791 imxmci_setup_data(host, req->data);
792
793 cmdat |= CMD_DAT_CONT_DATA_ENABLE;
794
795 if (req->data->flags & MMC_DATA_WRITE)
796 cmdat |= CMD_DAT_CONT_WRITE;
797
798 if (req->data->flags & MMC_DATA_STREAM)
799 cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
800 }
801
802 imxmci_start_cmd(host, req->cmd, cmdat);
803}
804
805#define CLK_RATE 19200000
806
807static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
808{
809 struct imxmci_host *host = mmc_priv(mmc);
810 int prescaler;
811
812 if (ios->bus_width == MMC_BUS_WIDTH_4) {
813 host->actual_bus_width = MMC_BUS_WIDTH_4;
814 imx_gpio_mode(PB11_PF_SD_DAT3);
815 BLR(host->dma) = 0; /* burst 64 byte read/write */
816 } else {
817 host->actual_bus_width = MMC_BUS_WIDTH_1;
818 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
819 BLR(host->dma) = 16; /* burst 16 byte read/write */
820 }
821
822 if (host->power_mode != ios->power_mode) {
823 switch (ios->power_mode) {
824 case MMC_POWER_OFF:
825 break;
826 case MMC_POWER_UP:
827 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
828 break;
829 case MMC_POWER_ON:
830 break;
831 }
832 host->power_mode = ios->power_mode;
833 }
834
835 if (ios->clock) {
836 unsigned int clk;
837 u16 reg;
838
839 /* The prescaler is 5 for PERCLK2 equal to 96MHz
840 * then 96MHz / 5 = 19.2 MHz
841 */
842 clk = clk_get_rate(host->clk);
843 prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE;
844 switch (prescaler) {
845 case 0:
846 case 1: prescaler = 0;
847 break;
848 case 2: prescaler = 1;
849 break;
850 case 3: prescaler = 2;
851 break;
852 case 4: prescaler = 4;
853 break;
854 default:
855 case 5: prescaler = 5;
856 break;
857 }
858
859 dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
860 clk, prescaler);
861
862 for (clk = 0; clk < 8; clk++) {
863 int x;
864 x = CLK_RATE / (1 << clk);
865 if (x <= ios->clock)
866 break;
867 }
868
869 /* enable controller */
870 reg = readw(host->base + MMC_REG_STR_STP_CLK);
871 writew(reg | STR_STP_CLK_ENABLE,
872 host->base + MMC_REG_STR_STP_CLK);
873
874 imxmci_stop_clock(host);
875 writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE);
876 /*
877 * Under my understanding, clock should not be started there, because it would
878 * initiate SDHC sequencer and send last or random command into card
879 */
880 /* imxmci_start_clock(host); */
881
882 dev_dbg(mmc_dev(host->mmc),
883 "MMC_CLK_RATE: 0x%08x\n",
884 readw(host->base + MMC_REG_CLK_RATE));
885 } else {
886 imxmci_stop_clock(host);
887 }
888}
889
890static int imxmci_get_ro(struct mmc_host *mmc)
891{
892 struct imxmci_host *host = mmc_priv(mmc);
893
894 if (host->pdata && host->pdata->get_ro)
895 return !!host->pdata->get_ro(mmc_dev(mmc));
896 /*
897 * Board doesn't support read only detection; let the mmc core
898 * decide what to do.
899 */
900 return -ENOSYS;
901}
902
903
904static const struct mmc_host_ops imxmci_ops = {
905 .request = imxmci_request,
906 .set_ios = imxmci_set_ios,
907 .get_ro = imxmci_get_ro,
908};
909
910static void imxmci_check_status(unsigned long data)
911{
912 struct imxmci_host *host = (struct imxmci_host *)data;
913
914 if (host->pdata && host->pdata->card_present &&
915 host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
916 host->present ^= 1;
917 dev_info(mmc_dev(host->mmc), "card %s\n",
918 host->present ? "inserted" : "removed");
919
920 set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
921 tasklet_schedule(&host->tasklet);
922 }
923
924 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
925 test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
926 atomic_inc(&host->stuck_timeout);
927 if (atomic_read(&host->stuck_timeout) > 4)
928 tasklet_schedule(&host->tasklet);
929 } else {
930 atomic_set(&host->stuck_timeout, 0);
931
932 }
933
934 mod_timer(&host->timer, jiffies + (HZ>>1));
935}
936
937static int __init imxmci_probe(struct platform_device *pdev)
938{
939 struct mmc_host *mmc;
940 struct imxmci_host *host = NULL;
941 struct resource *r;
942 int ret = 0, irq;
943 u16 rev_no;
944
945 pr_info("i.MX mmc driver\n");
946
947 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
948 irq = platform_get_irq(pdev, 0);
949 if (!r || irq < 0)
950 return -ENXIO;
951
952 r = request_mem_region(r->start, resource_size(r), pdev->name);
953 if (!r)
954 return -EBUSY;
955
956 mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
957 if (!mmc) {
958 ret = -ENOMEM;
959 goto out;
960 }
961
962 mmc->ops = &imxmci_ops;
963 mmc->f_min = 150000;
964 mmc->f_max = CLK_RATE/2;
965 mmc->ocr_avail = MMC_VDD_32_33;
966 mmc->caps = MMC_CAP_4_BIT_DATA;
967
968 /* MMC core transfer sizes tunable parameters */
969 mmc->max_segs = 64;
970 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
971 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
972 mmc->max_blk_size = 2048;
973 mmc->max_blk_count = 65535;
974
975 host = mmc_priv(mmc);
976 host->base = ioremap(r->start, resource_size(r));
977 if (!host->base) {
978 ret = -ENOMEM;
979 goto out;
980 }
981
982 host->mmc = mmc;
983 host->dma_allocated = 0;
984 host->pdata = pdev->dev.platform_data;
985 if (!host->pdata)
986 dev_warn(&pdev->dev, "No platform data provided!\n");
987
988 spin_lock_init(&host->lock);
989 host->res = r;
990 host->irq = irq;
991
992 host->clk = clk_get(&pdev->dev, "perclk2");
993 if (IS_ERR(host->clk)) {
994 ret = PTR_ERR(host->clk);
995 goto out;
996 }
997 clk_enable(host->clk);
998
999 imx_gpio_mode(PB8_PF_SD_DAT0);
1000 imx_gpio_mode(PB9_PF_SD_DAT1);
1001 imx_gpio_mode(PB10_PF_SD_DAT2);
1002 /* Configured as GPIO with pull-up to ensure right MCC card mode */
1003 /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
1004 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
1005 /* imx_gpio_mode(PB11_PF_SD_DAT3); */
1006 imx_gpio_mode(PB12_PF_SD_CLK);
1007 imx_gpio_mode(PB13_PF_SD_CMD);
1008
1009 imxmci_softreset(host);
1010
1011 rev_no = readw(host->base + MMC_REG_REV_NO);
1012 if (rev_no != 0x390) {
1013 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1014 readw(host->base + MMC_REG_REV_NO));
1015 goto out;
1016 }
1017
1018 /* recommended in data sheet */
1019 writew(0x2db4, host->base + MMC_REG_READ_TO);
1020
1021 host->imask = IMXMCI_INT_MASK_DEFAULT;
1022 writew(host->imask, host->base + MMC_REG_INT_MASK);
1023
1024 host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
1025 if(host->dma < 0) {
1026 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
1027 ret = -EBUSY;
1028 goto out;
1029 }
1030 host->dma_allocated = 1;
1031 imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
1032 RSSR(host->dma) = DMA_REQ_SDHC;
1033
1034 tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
1035 host->status_reg=0;
1036 host->pending_events=0;
1037
1038 ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
1039 if (ret)
1040 goto out;
1041
1042 if (host->pdata && host->pdata->card_present)
1043 host->present = host->pdata->card_present(mmc_dev(mmc));
1044 else /* if there is no way to detect assume that card is present */
1045 host->present = 1;
1046
1047 init_timer(&host->timer);
1048 host->timer.data = (unsigned long)host;
1049 host->timer.function = imxmci_check_status;
1050 add_timer(&host->timer);
1051 mod_timer(&host->timer, jiffies + (HZ >> 1));
1052
1053 platform_set_drvdata(pdev, mmc);
1054
1055 mmc_add_host(mmc);
1056
1057 return 0;
1058
1059out:
1060 if (host) {
1061 if (host->dma_allocated) {
1062 imx_dma_free(host->dma);
1063 host->dma_allocated = 0;
1064 }
1065 if (host->clk) {
1066 clk_disable(host->clk);
1067 clk_put(host->clk);
1068 }
1069 if (host->base)
1070 iounmap(host->base);
1071 }
1072 if (mmc)
1073 mmc_free_host(mmc);
1074 release_mem_region(r->start, resource_size(r));
1075 return ret;
1076}
1077
1078static int __exit imxmci_remove(struct platform_device *pdev)
1079{
1080 struct mmc_host *mmc = platform_get_drvdata(pdev);
1081
1082 platform_set_drvdata(pdev, NULL);
1083
1084 if (mmc) {
1085 struct imxmci_host *host = mmc_priv(mmc);
1086
1087 tasklet_disable(&host->tasklet);
1088
1089 del_timer_sync(&host->timer);
1090 mmc_remove_host(mmc);
1091
1092 free_irq(host->irq, host);
1093 iounmap(host->base);
1094 if (host->dma_allocated) {
1095 imx_dma_free(host->dma);
1096 host->dma_allocated = 0;
1097 }
1098
1099 tasklet_kill(&host->tasklet);
1100
1101 clk_disable(host->clk);
1102 clk_put(host->clk);
1103
1104 release_mem_region(host->res->start, resource_size(host->res));
1105
1106 mmc_free_host(mmc);
1107 }
1108 return 0;
1109}
1110
1111#ifdef CONFIG_PM
1112static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1113{
1114 struct mmc_host *mmc = platform_get_drvdata(dev);
1115 int ret = 0;
1116
1117 if (mmc)
1118 ret = mmc_suspend_host(mmc);
1119
1120 return ret;
1121}
1122
1123static int imxmci_resume(struct platform_device *dev)
1124{
1125 struct mmc_host *mmc = platform_get_drvdata(dev);
1126 struct imxmci_host *host;
1127 int ret = 0;
1128
1129 if (mmc) {
1130 host = mmc_priv(mmc);
1131 if (host)
1132 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
1133 ret = mmc_resume_host(mmc);
1134 }
1135
1136 return ret;
1137}
1138#else
1139#define imxmci_suspend NULL
1140#define imxmci_resume NULL
1141#endif /* CONFIG_PM */
1142
1143static struct platform_driver imxmci_driver = {
1144 .remove = __exit_p(imxmci_remove),
1145 .suspend = imxmci_suspend,
1146 .resume = imxmci_resume,
1147 .driver = {
1148 .name = DRIVER_NAME,
1149 .owner = THIS_MODULE,
1150 }
1151};
1152
1153static int __init imxmci_init(void)
1154{
1155 return platform_driver_probe(&imxmci_driver, imxmci_probe);
1156}
1157
1158static void __exit imxmci_exit(void)
1159{
1160 platform_driver_unregister(&imxmci_driver);
1161}
1162
1163module_init(imxmci_init);
1164module_exit(imxmci_exit);
1165
1166MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1167MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1168MODULE_LICENSE("GPL");
1169MODULE_ALIAS("platform:imx-mmc");
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
deleted file mode 100644
index 09d5d4ee3a77..000000000000
--- a/drivers/mmc/host/imxmmc.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#define MMC_REG_STR_STP_CLK 0x00
2#define MMC_REG_STATUS 0x04
3#define MMC_REG_CLK_RATE 0x08
4#define MMC_REG_CMD_DAT_CONT 0x0C
5#define MMC_REG_RES_TO 0x10
6#define MMC_REG_READ_TO 0x14
7#define MMC_REG_BLK_LEN 0x18
8#define MMC_REG_NOB 0x1C
9#define MMC_REG_REV_NO 0x20
10#define MMC_REG_INT_MASK 0x24
11#define MMC_REG_CMD 0x28
12#define MMC_REG_ARGH 0x2C
13#define MMC_REG_ARGL 0x30
14#define MMC_REG_RES_FIFO 0x34
15#define MMC_REG_BUFFER_ACCESS 0x38
16
17#define STR_STP_CLK_IPG_CLK_GATE_DIS (1<<15)
18#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14)
19#define STR_STP_CLK_ENDIAN (1<<5)
20#define STR_STP_CLK_RESET (1<<3)
21#define STR_STP_CLK_ENABLE (1<<2)
22#define STR_STP_CLK_START_CLK (1<<1)
23#define STR_STP_CLK_STOP_CLK (1<<0)
24#define STATUS_CARD_PRESENCE (1<<15)
25#define STATUS_SDIO_INT_ACTIVE (1<<14)
26#define STATUS_END_CMD_RESP (1<<13)
27#define STATUS_WRITE_OP_DONE (1<<12)
28#define STATUS_DATA_TRANS_DONE (1<<11)
29#define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10)
30#define STATUS_CARD_BUS_CLK_RUN (1<<8)
31#define STATUS_APPL_BUFF_FF (1<<7)
32#define STATUS_APPL_BUFF_FE (1<<6)
33#define STATUS_RESP_CRC_ERR (1<<5)
34#define STATUS_CRC_READ_ERR (1<<3)
35#define STATUS_CRC_WRITE_ERR (1<<2)
36#define STATUS_TIME_OUT_RESP (1<<1)
37#define STATUS_TIME_OUT_READ (1<<0)
38#define STATUS_ERR_MASK 0x2f
39#define CLK_RATE_PRESCALER(x) ((x) & 0x7)
40#define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3)
41#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12)
42#define CMD_DAT_CONT_STOP_READWAIT (1<<11)
43#define CMD_DAT_CONT_START_READWAIT (1<<10)
44#define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8)
45#define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8)
46#define CMD_DAT_CONT_INIT (1<<7)
47#define CMD_DAT_CONT_BUSY (1<<6)
48#define CMD_DAT_CONT_STREAM_BLOCK (1<<5)
49#define CMD_DAT_CONT_WRITE (1<<4)
50#define CMD_DAT_CONT_DATA_ENABLE (1<<3)
51#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
52#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
53#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
54#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
55#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
56#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
57#define INT_MASK_AUTO_CARD_DETECT (1<<6)
58#define INT_MASK_DAT0_EN (1<<5)
59#define INT_MASK_SDIO (1<<4)
60#define INT_MASK_BUF_READY (1<<3)
61#define INT_MASK_END_CMD_RES (1<<2)
62#define INT_MASK_WRITE_OP_DONE (1<<1)
63#define INT_MASK_DATA_TRAN (1<<0)
64#define INT_ALL (0x7f)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 032b84791a16..f0fcce40cd8d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -15,6 +15,7 @@
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/slab.h>
18#include <linux/delay.h> 19#include <linux/delay.h>
19#include <linux/err.h> 20#include <linux/err.h>
20#include <linux/highmem.h> 21#include <linux/highmem.h>
@@ -25,6 +26,7 @@
25#include <linux/clk.h> 26#include <linux/clk.h>
26#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
27#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_gpio.h>
28#include <linux/regulator/consumer.h> 30#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h> 31#include <linux/dmaengine.h>
30#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
@@ -94,6 +96,17 @@ static struct variant_data variant_u300 = {
94 .signal_direction = true, 96 .signal_direction = true,
95}; 97};
96 98
99static struct variant_data variant_nomadik = {
100 .fifosize = 16 * 4,
101 .fifohalfsize = 8 * 4,
102 .clkreg = MCI_CLK_ENABLE,
103 .datalength_bits = 24,
104 .sdio = true,
105 .st_clkdiv = true,
106 .pwrreg_powerup = MCI_PWR_ON,
107 .signal_direction = true,
108};
109
97static struct variant_data variant_ux500 = { 110static struct variant_data variant_ux500 = {
98 .fifosize = 30 * 4, 111 .fifosize = 30 * 4,
99 .fifohalfsize = 8 * 4, 112 .fifohalfsize = 8 * 4,
@@ -1196,21 +1209,76 @@ static const struct mmc_host_ops mmci_ops = {
1196 .get_cd = mmci_get_cd, 1209 .get_cd = mmci_get_cd,
1197}; 1210};
1198 1211
1212#ifdef CONFIG_OF
1213static void mmci_dt_populate_generic_pdata(struct device_node *np,
1214 struct mmci_platform_data *pdata)
1215{
1216 int bus_width = 0;
1217
1218 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
1219 if (!pdata->gpio_wp)
1220 pdata->gpio_wp = -1;
1221
1222 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
1223 if (!pdata->gpio_cd)
1224 pdata->gpio_cd = -1;
1225
1226 if (of_get_property(np, "cd-inverted", NULL))
1227 pdata->cd_invert = true;
1228 else
1229 pdata->cd_invert = false;
1230
1231 of_property_read_u32(np, "max-frequency", &pdata->f_max);
1232 if (!pdata->f_max)
1233 pr_warn("%s has no 'max-frequency' property\n", np->full_name);
1234
1235 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1236 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
1237 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1238 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
1239
1240 of_property_read_u32(np, "bus-width", &bus_width);
1241 switch (bus_width) {
1242 case 0 :
1243 /* No bus-width supplied. */
1244 break;
1245 case 4 :
1246 pdata->capabilities |= MMC_CAP_4_BIT_DATA;
1247 break;
1248 case 8 :
1249 pdata->capabilities |= MMC_CAP_8_BIT_DATA;
1250 break;
1251 default :
1252 pr_warn("%s: Unsupported bus width\n", np->full_name);
1253 }
1254}
1255#else
1256static void mmci_dt_populate_generic_pdata(struct device_node *np,
1257 struct mmci_platform_data *pdata)
1258{
1259 return;
1260}
1261#endif
1262
1199static int __devinit mmci_probe(struct amba_device *dev, 1263static int __devinit mmci_probe(struct amba_device *dev,
1200 const struct amba_id *id) 1264 const struct amba_id *id)
1201{ 1265{
1202 struct mmci_platform_data *plat = dev->dev.platform_data; 1266 struct mmci_platform_data *plat = dev->dev.platform_data;
1267 struct device_node *np = dev->dev.of_node;
1203 struct variant_data *variant = id->data; 1268 struct variant_data *variant = id->data;
1204 struct mmci_host *host; 1269 struct mmci_host *host;
1205 struct mmc_host *mmc; 1270 struct mmc_host *mmc;
1206 int ret; 1271 int ret;
1207 1272
1208 /* must have platform data */ 1273 /* Must have platform data or Device Tree. */
1209 if (!plat) { 1274 if (!plat && !np) {
1210 ret = -EINVAL; 1275 dev_err(&dev->dev, "No plat data or DT found\n");
1211 goto out; 1276 return -EINVAL;
1212 } 1277 }
1213 1278
1279 if (np)
1280 mmci_dt_populate_generic_pdata(np, plat);
1281
1214 ret = amba_request_regions(dev, DRIVER_NAME); 1282 ret = amba_request_regions(dev, DRIVER_NAME);
1215 if (ret) 1283 if (ret)
1216 goto out; 1284 goto out;
@@ -1397,7 +1465,7 @@ static int __devinit mmci_probe(struct amba_device *dev,
1397 if (ret) 1465 if (ret)
1398 goto unmap; 1466 goto unmap;
1399 1467
1400 if (dev->irq[1] == NO_IRQ || !dev->irq[1]) 1468 if (!dev->irq[1])
1401 host->singleirq = true; 1469 host->singleirq = true;
1402 else { 1470 else {
1403 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1471 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
@@ -1569,6 +1637,11 @@ static struct amba_id mmci_ids[] = {
1569 .data = &variant_u300, 1637 .data = &variant_u300,
1570 }, 1638 },
1571 { 1639 {
1640 .id = 0x10180180,
1641 .mask = 0xf0ffffff,
1642 .data = &variant_nomadik,
1643 },
1644 {
1572 .id = 0x00280180, 1645 .id = 0x00280180,
1573 .mask = 0x00ffffff, 1646 .mask = 0x00ffffff,
1574 .data = &variant_u300, 1647 .data = &variant_u300,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index eeb8cd125b0c..3b9136c1a475 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -19,6 +19,7 @@
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/scatterlist.h> 20#include <linux/scatterlist.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/clk.h>
22#include <linux/gpio.h> 23#include <linux/gpio.h>
23#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
24 25
@@ -51,6 +52,7 @@ struct mvsd_host {
51 struct device *dev; 52 struct device *dev;
52 struct resource *res; 53 struct resource *res;
53 int irq; 54 int irq;
55 struct clk *clk;
54 int gpio_card_detect; 56 int gpio_card_detect;
55 int gpio_write_protect; 57 int gpio_write_protect;
56}; 58};
@@ -770,6 +772,13 @@ static int __init mvsd_probe(struct platform_device *pdev)
770 } else 772 } else
771 host->irq = irq; 773 host->irq = irq;
772 774
775 /* Not all platforms can gate the clock, so it is not
776 an error if the clock does not exists. */
777 host->clk = clk_get(&pdev->dev, NULL);
778 if (!IS_ERR(host->clk)) {
779 clk_prepare_enable(host->clk);
780 }
781
773 if (mvsd_data->gpio_card_detect) { 782 if (mvsd_data->gpio_card_detect) {
774 ret = gpio_request(mvsd_data->gpio_card_detect, 783 ret = gpio_request(mvsd_data->gpio_card_detect,
775 DRIVER_NAME " cd"); 784 DRIVER_NAME " cd");
@@ -854,6 +863,11 @@ static int __exit mvsd_remove(struct platform_device *pdev)
854 mvsd_power_down(host); 863 mvsd_power_down(host);
855 iounmap(host->base); 864 iounmap(host->base);
856 release_resource(host->res); 865 release_resource(host->res);
866
867 if (!IS_ERR(host->clk)) {
868 clk_disable_unprepare(host->clk);
869 clk_put(host->clk);
870 }
857 mmc_free_host(mmc); 871 mmc_free_host(mmc);
858 } 872 }
859 platform_set_drvdata(pdev, NULL); 873 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 887c0e598cf3..552196c764d4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -169,11 +169,11 @@ struct mmc_omap_host {
169 struct timer_list clk_timer; 169 struct timer_list clk_timer;
170 spinlock_t clk_lock; /* for changing enabled state */ 170 spinlock_t clk_lock; /* for changing enabled state */
171 unsigned int fclk_enabled:1; 171 unsigned int fclk_enabled:1;
172 struct workqueue_struct *mmc_omap_wq;
172 173
173 struct omap_mmc_platform_data *pdata; 174 struct omap_mmc_platform_data *pdata;
174}; 175};
175 176
176static struct workqueue_struct *mmc_omap_wq;
177 177
178static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) 178static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
179{ 179{
@@ -291,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
291 host->next_slot = new_slot; 291 host->next_slot = new_slot;
292 host->mmc = new_slot->mmc; 292 host->mmc = new_slot->mmc;
293 spin_unlock_irqrestore(&host->slot_lock, flags); 293 spin_unlock_irqrestore(&host->slot_lock, flags);
294 queue_work(mmc_omap_wq, &host->slot_release_work); 294 queue_work(host->mmc_omap_wq, &host->slot_release_work);
295 return; 295 return;
296 } 296 }
297 297
@@ -459,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
459 } 459 }
460 460
461 host->stop_data = data; 461 host->stop_data = data;
462 queue_work(mmc_omap_wq, &host->send_stop_work); 462 queue_work(host->mmc_omap_wq, &host->send_stop_work);
463} 463}
464 464
465static void 465static void
@@ -639,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data)
639 OMAP_MMC_WRITE(host, IE, 0); 639 OMAP_MMC_WRITE(host, IE, 0);
640 disable_irq(host->irq); 640 disable_irq(host->irq);
641 host->abort = 1; 641 host->abort = 1;
642 queue_work(mmc_omap_wq, &host->cmd_abort_work); 642 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
643 } 643 }
644 spin_unlock_irqrestore(&host->slot_lock, flags); 644 spin_unlock_irqrestore(&host->slot_lock, flags);
645} 645}
@@ -828,7 +828,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
828 host->abort = 1; 828 host->abort = 1;
829 OMAP_MMC_WRITE(host, IE, 0); 829 OMAP_MMC_WRITE(host, IE, 0);
830 disable_irq_nosync(host->irq); 830 disable_irq_nosync(host->irq);
831 queue_work(mmc_omap_wq, &host->cmd_abort_work); 831 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
832 return IRQ_HANDLED; 832 return IRQ_HANDLED;
833 } 833 }
834 834
@@ -1389,13 +1389,13 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1389 1389
1390 tasklet_kill(&slot->cover_tasklet); 1390 tasklet_kill(&slot->cover_tasklet);
1391 del_timer_sync(&slot->cover_timer); 1391 del_timer_sync(&slot->cover_timer);
1392 flush_workqueue(mmc_omap_wq); 1392 flush_workqueue(slot->host->mmc_omap_wq);
1393 1393
1394 mmc_remove_host(mmc); 1394 mmc_remove_host(mmc);
1395 mmc_free_host(mmc); 1395 mmc_free_host(mmc);
1396} 1396}
1397 1397
1398static int __init mmc_omap_probe(struct platform_device *pdev) 1398static int __devinit mmc_omap_probe(struct platform_device *pdev)
1399{ 1399{
1400 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 1400 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1401 struct mmc_omap_host *host = NULL; 1401 struct mmc_omap_host *host = NULL;
@@ -1497,6 +1497,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1497 1497
1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); 1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1499 1499
1500 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1501 if (!host->mmc_omap_wq)
1502 goto err_plat_cleanup;
1503
1500 return 0; 1504 return 0;
1501 1505
1502err_plat_cleanup: 1506err_plat_cleanup:
@@ -1518,7 +1522,7 @@ err_free_mem_region:
1518 return ret; 1522 return ret;
1519} 1523}
1520 1524
1521static int mmc_omap_remove(struct platform_device *pdev) 1525static int __devexit mmc_omap_remove(struct platform_device *pdev)
1522{ 1526{
1523 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1527 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1524 int i; 1528 int i;
@@ -1542,6 +1546,7 @@ static int mmc_omap_remove(struct platform_device *pdev)
1542 iounmap(host->virt_base); 1546 iounmap(host->virt_base);
1543 release_mem_region(pdev->resource[0].start, 1547 release_mem_region(pdev->resource[0].start,
1544 pdev->resource[0].end - pdev->resource[0].start + 1); 1548 pdev->resource[0].end - pdev->resource[0].start + 1);
1549 destroy_workqueue(host->mmc_omap_wq);
1545 1550
1546 kfree(host); 1551 kfree(host);
1547 1552
@@ -1599,7 +1604,8 @@ static int mmc_omap_resume(struct platform_device *pdev)
1599#endif 1604#endif
1600 1605
1601static struct platform_driver mmc_omap_driver = { 1606static struct platform_driver mmc_omap_driver = {
1602 .remove = mmc_omap_remove, 1607 .probe = mmc_omap_probe,
1608 .remove = __devexit_p(mmc_omap_remove),
1603 .suspend = mmc_omap_suspend, 1609 .suspend = mmc_omap_suspend,
1604 .resume = mmc_omap_resume, 1610 .resume = mmc_omap_resume,
1605 .driver = { 1611 .driver = {
@@ -1608,29 +1614,7 @@ static struct platform_driver mmc_omap_driver = {
1608 }, 1614 },
1609}; 1615};
1610 1616
1611static int __init mmc_omap_init(void) 1617module_platform_driver(mmc_omap_driver);
1612{
1613 int ret;
1614
1615 mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1616 if (!mmc_omap_wq)
1617 return -ENOMEM;
1618
1619 ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
1620 if (ret)
1621 destroy_workqueue(mmc_omap_wq);
1622 return ret;
1623}
1624
1625static void __exit mmc_omap_exit(void)
1626{
1627 platform_driver_unregister(&mmc_omap_driver);
1628 destroy_workqueue(mmc_omap_wq);
1629}
1630
1631module_init(mmc_omap_init);
1632module_exit(mmc_omap_exit);
1633
1634MODULE_DESCRIPTION("OMAP Multimedia Card driver"); 1618MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1635MODULE_LICENSE("GPL"); 1619MODULE_LICENSE("GPL");
1636MODULE_ALIAS("platform:" DRIVER_NAME); 1620MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 33e81c24e140..9a7a60aeb19e 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -85,12 +85,14 @@
85#define BRR_ENABLE (1 << 5) 85#define BRR_ENABLE (1 << 5)
86#define DTO_ENABLE (1 << 20) 86#define DTO_ENABLE (1 << 20)
87#define INIT_STREAM (1 << 1) 87#define INIT_STREAM (1 << 1)
88#define ACEN_ACMD12 (1 << 2)
88#define DP_SELECT (1 << 21) 89#define DP_SELECT (1 << 21)
89#define DDIR (1 << 4) 90#define DDIR (1 << 4)
90#define DMA_EN 0x1 91#define DMA_EN 0x1
91#define MSBS (1 << 5) 92#define MSBS (1 << 5)
92#define BCE (1 << 1) 93#define BCE (1 << 1)
93#define FOUR_BIT (1 << 1) 94#define FOUR_BIT (1 << 1)
95#define DDR (1 << 19)
94#define DW8 (1 << 5) 96#define DW8 (1 << 5)
95#define CC 0x1 97#define CC 0x1
96#define TC 0x02 98#define TC 0x02
@@ -115,6 +117,7 @@
115#define OMAP_MMC_MAX_CLOCK 52000000 117#define OMAP_MMC_MAX_CLOCK 52000000
116#define DRIVER_NAME "omap_hsmmc" 118#define DRIVER_NAME "omap_hsmmc"
117 119
120#define AUTO_CMD12 (1 << 0) /* Auto CMD12 support */
118/* 121/*
119 * One controller can have multiple slots, like on some omap boards using 122 * One controller can have multiple slots, like on some omap boards using
120 * omap.c controller driver. Luckily this is not currently done on any known 123 * omap.c controller driver. Luckily this is not currently done on any known
@@ -167,7 +170,6 @@ struct omap_hsmmc_host {
167 int use_dma, dma_ch; 170 int use_dma, dma_ch;
168 int dma_line_tx, dma_line_rx; 171 int dma_line_tx, dma_line_rx;
169 int slot_id; 172 int slot_id;
170 int got_dbclk;
171 int response_busy; 173 int response_busy;
172 int context_loss; 174 int context_loss;
173 int vdd; 175 int vdd;
@@ -175,6 +177,7 @@ struct omap_hsmmc_host {
175 int reqs_blocked; 177 int reqs_blocked;
176 int use_reg; 178 int use_reg;
177 int req_in_progress; 179 int req_in_progress;
180 unsigned int flags;
178 struct omap_hsmmc_next next_data; 181 struct omap_hsmmc_next next_data;
179 182
180 struct omap_mmc_platform_data *pdata; 183 struct omap_mmc_platform_data *pdata;
@@ -520,6 +523,10 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
520 u32 con; 523 u32 con;
521 524
522 con = OMAP_HSMMC_READ(host->base, CON); 525 con = OMAP_HSMMC_READ(host->base, CON);
526 if (ios->timing == MMC_TIMING_UHS_DDR50)
527 con |= DDR; /* configure in DDR mode */
528 else
529 con &= ~DDR;
523 switch (ios->bus_width) { 530 switch (ios->bus_width) {
524 case MMC_BUS_WIDTH_8: 531 case MMC_BUS_WIDTH_8:
525 OMAP_HSMMC_WRITE(host->base, CON, con | DW8); 532 OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
@@ -766,6 +773,8 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
766 cmdtype = 0x3; 773 cmdtype = 0x3;
767 774
768 cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22); 775 cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
776 if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
777 cmdreg |= ACEN_ACMD12;
769 778
770 if (data) { 779 if (data) {
771 cmdreg |= DP_SELECT | MSBS | BCE; 780 cmdreg |= DP_SELECT | MSBS | BCE;
@@ -796,11 +805,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
796static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) 805static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
797{ 806{
798 int dma_ch; 807 int dma_ch;
808 unsigned long flags;
799 809
800 spin_lock(&host->irq_lock); 810 spin_lock_irqsave(&host->irq_lock, flags);
801 host->req_in_progress = 0; 811 host->req_in_progress = 0;
802 dma_ch = host->dma_ch; 812 dma_ch = host->dma_ch;
803 spin_unlock(&host->irq_lock); 813 spin_unlock_irqrestore(&host->irq_lock, flags);
804 814
805 omap_hsmmc_disable_irq(host); 815 omap_hsmmc_disable_irq(host);
806 /* Do not complete the request if DMA is still in progress */ 816 /* Do not complete the request if DMA is still in progress */
@@ -837,11 +847,14 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
837 else 847 else
838 data->bytes_xfered = 0; 848 data->bytes_xfered = 0;
839 849
840 if (!data->stop) { 850 if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
851 omap_hsmmc_start_command(host, data->stop, NULL);
852 } else {
853 if (data->stop)
854 data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
855 RSP76);
841 omap_hsmmc_request_done(host, data->mrq); 856 omap_hsmmc_request_done(host, data->mrq);
842 return;
843 } 857 }
844 omap_hsmmc_start_command(host, data->stop, NULL);
845} 858}
846 859
847/* 860/*
@@ -874,13 +887,14 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
874static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 887static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
875{ 888{
876 int dma_ch; 889 int dma_ch;
890 unsigned long flags;
877 891
878 host->data->error = errno; 892 host->data->error = errno;
879 893
880 spin_lock(&host->irq_lock); 894 spin_lock_irqsave(&host->irq_lock, flags);
881 dma_ch = host->dma_ch; 895 dma_ch = host->dma_ch;
882 host->dma_ch = -1; 896 host->dma_ch = -1;
883 spin_unlock(&host->irq_lock); 897 spin_unlock_irqrestore(&host->irq_lock, flags);
884 898
885 if (host->use_dma && dma_ch != -1) { 899 if (host->use_dma && dma_ch != -1) {
886 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, 900 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
@@ -1082,7 +1096,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1082 1096
1083 /* Disable the clocks */ 1097 /* Disable the clocks */
1084 pm_runtime_put_sync(host->dev); 1098 pm_runtime_put_sync(host->dev);
1085 if (host->got_dbclk) 1099 if (host->dbclk)
1086 clk_disable(host->dbclk); 1100 clk_disable(host->dbclk);
1087 1101
1088 /* Turn the power off */ 1102 /* Turn the power off */
@@ -1093,7 +1107,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1093 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, 1107 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
1094 vdd); 1108 vdd);
1095 pm_runtime_get_sync(host->dev); 1109 pm_runtime_get_sync(host->dev);
1096 if (host->got_dbclk) 1110 if (host->dbclk)
1097 clk_enable(host->dbclk); 1111 clk_enable(host->dbclk);
1098 1112
1099 if (ret != 0) 1113 if (ret != 0)
@@ -1234,6 +1248,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1234 struct omap_hsmmc_host *host = cb_data; 1248 struct omap_hsmmc_host *host = cb_data;
1235 struct mmc_data *data; 1249 struct mmc_data *data;
1236 int dma_ch, req_in_progress; 1250 int dma_ch, req_in_progress;
1251 unsigned long flags;
1237 1252
1238 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { 1253 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
1239 dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n", 1254 dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
@@ -1241,9 +1256,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1241 return; 1256 return;
1242 } 1257 }
1243 1258
1244 spin_lock(&host->irq_lock); 1259 spin_lock_irqsave(&host->irq_lock, flags);
1245 if (host->dma_ch < 0) { 1260 if (host->dma_ch < 0) {
1246 spin_unlock(&host->irq_lock); 1261 spin_unlock_irqrestore(&host->irq_lock, flags);
1247 return; 1262 return;
1248 } 1263 }
1249 1264
@@ -1253,7 +1268,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1253 /* Fire up the next transfer. */ 1268 /* Fire up the next transfer. */
1254 omap_hsmmc_config_dma_params(host, data, 1269 omap_hsmmc_config_dma_params(host, data,
1255 data->sg + host->dma_sg_idx); 1270 data->sg + host->dma_sg_idx);
1256 spin_unlock(&host->irq_lock); 1271 spin_unlock_irqrestore(&host->irq_lock, flags);
1257 return; 1272 return;
1258 } 1273 }
1259 1274
@@ -1264,7 +1279,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1264 req_in_progress = host->req_in_progress; 1279 req_in_progress = host->req_in_progress;
1265 dma_ch = host->dma_ch; 1280 dma_ch = host->dma_ch;
1266 host->dma_ch = -1; 1281 host->dma_ch = -1;
1267 spin_unlock(&host->irq_lock); 1282 spin_unlock_irqrestore(&host->irq_lock, flags);
1268 1283
1269 omap_free_dma(dma_ch); 1284 omap_free_dma(dma_ch);
1270 1285
@@ -1844,6 +1859,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1844 host->mapbase = res->start + pdata->reg_offset; 1859 host->mapbase = res->start + pdata->reg_offset;
1845 host->base = ioremap(host->mapbase, SZ_4K); 1860 host->base = ioremap(host->mapbase, SZ_4K);
1846 host->power_mode = MMC_POWER_OFF; 1861 host->power_mode = MMC_POWER_OFF;
1862 host->flags = AUTO_CMD12;
1847 host->next_data.cookie = 1; 1863 host->next_data.cookie = 1;
1848 1864
1849 platform_set_drvdata(pdev, host); 1865 platform_set_drvdata(pdev, host);
@@ -1885,21 +1901,17 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1885 1901
1886 omap_hsmmc_context_save(host); 1902 omap_hsmmc_context_save(host);
1887 1903
1888 if (cpu_is_omap2430()) { 1904 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
1889 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); 1905 /*
1890 /* 1906 * MMC can still work without debounce clock.
1891 * MMC can still work without debounce clock. 1907 */
1892 */ 1908 if (IS_ERR(host->dbclk)) {
1893 if (IS_ERR(host->dbclk)) 1909 dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n");
1894 dev_warn(mmc_dev(host->mmc), 1910 host->dbclk = NULL;
1895 "Failed to get debounce clock\n"); 1911 } else if (clk_enable(host->dbclk) != 0) {
1896 else 1912 dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
1897 host->got_dbclk = 1; 1913 clk_put(host->dbclk);
1898 1914 host->dbclk = NULL;
1899 if (host->got_dbclk)
1900 if (clk_enable(host->dbclk) != 0)
1901 dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
1902 " clk failed\n");
1903 } 1915 }
1904 1916
1905 /* Since we do only SG emulation, we can have as many segs 1917 /* Since we do only SG emulation, we can have as many segs
@@ -1969,7 +1981,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1969 ret = request_threaded_irq(mmc_slot(host).card_detect_irq, 1981 ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
1970 NULL, 1982 NULL,
1971 omap_hsmmc_detect, 1983 omap_hsmmc_detect,
1972 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1984 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1973 mmc_hostname(mmc), host); 1985 mmc_hostname(mmc), host);
1974 if (ret) { 1986 if (ret) {
1975 dev_dbg(mmc_dev(host->mmc), 1987 dev_dbg(mmc_dev(host->mmc),
@@ -2019,7 +2031,7 @@ err_irq:
2019 pm_runtime_put_sync(host->dev); 2031 pm_runtime_put_sync(host->dev);
2020 pm_runtime_disable(host->dev); 2032 pm_runtime_disable(host->dev);
2021 clk_put(host->fclk); 2033 clk_put(host->fclk);
2022 if (host->got_dbclk) { 2034 if (host->dbclk) {
2023 clk_disable(host->dbclk); 2035 clk_disable(host->dbclk);
2024 clk_put(host->dbclk); 2036 clk_put(host->dbclk);
2025 } 2037 }
@@ -2030,7 +2042,9 @@ err1:
2030err_alloc: 2042err_alloc:
2031 omap_hsmmc_gpio_free(pdata); 2043 omap_hsmmc_gpio_free(pdata);
2032err: 2044err:
2033 release_mem_region(res->start, resource_size(res)); 2045 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2046 if (res)
2047 release_mem_region(res->start, resource_size(res));
2034 return ret; 2048 return ret;
2035} 2049}
2036 2050
@@ -2052,7 +2066,7 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
2052 pm_runtime_put_sync(host->dev); 2066 pm_runtime_put_sync(host->dev);
2053 pm_runtime_disable(host->dev); 2067 pm_runtime_disable(host->dev);
2054 clk_put(host->fclk); 2068 clk_put(host->fclk);
2055 if (host->got_dbclk) { 2069 if (host->dbclk) {
2056 clk_disable(host->dbclk); 2070 clk_disable(host->dbclk);
2057 clk_put(host->dbclk); 2071 clk_put(host->dbclk);
2058 } 2072 }
@@ -2110,7 +2124,7 @@ static int omap_hsmmc_suspend(struct device *dev)
2110 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2124 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2111 } 2125 }
2112 2126
2113 if (host->got_dbclk) 2127 if (host->dbclk)
2114 clk_disable(host->dbclk); 2128 clk_disable(host->dbclk);
2115err: 2129err:
2116 pm_runtime_put_sync(host->dev); 2130 pm_runtime_put_sync(host->dev);
@@ -2131,7 +2145,7 @@ static int omap_hsmmc_resume(struct device *dev)
2131 2145
2132 pm_runtime_get_sync(host->dev); 2146 pm_runtime_get_sync(host->dev);
2133 2147
2134 if (host->got_dbclk) 2148 if (host->dbclk)
2135 clk_enable(host->dbclk); 2149 clk_enable(host->dbclk);
2136 2150
2137 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) 2151 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 6dfa82e03c7e..1fe32dfa7cd4 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -75,8 +75,6 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
75 struct spear_sdhci *sdhci; 75 struct spear_sdhci *sdhci;
76 int ret; 76 int ret;
77 77
78 BUG_ON(pdev == NULL);
79
80 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 78 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
81 if (!iomem) { 79 if (!iomem) {
82 ret = -ENOMEM; 80 ret = -ENOMEM;
@@ -84,18 +82,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
84 goto err; 82 goto err;
85 } 83 }
86 84
87 if (!request_mem_region(iomem->start, resource_size(iomem), 85 if (!devm_request_mem_region(&pdev->dev, iomem->start,
88 "spear-sdhci")) { 86 resource_size(iomem), "spear-sdhci")) {
89 ret = -EBUSY; 87 ret = -EBUSY;
90 dev_dbg(&pdev->dev, "cannot request region\n"); 88 dev_dbg(&pdev->dev, "cannot request region\n");
91 goto err; 89 goto err;
92 } 90 }
93 91
94 sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL); 92 sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL);
95 if (!sdhci) { 93 if (!sdhci) {
96 ret = -ENOMEM; 94 ret = -ENOMEM;
97 dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); 95 dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
98 goto err_kzalloc; 96 goto err;
99 } 97 }
100 98
101 /* clk enable */ 99 /* clk enable */
@@ -103,13 +101,13 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
103 if (IS_ERR(sdhci->clk)) { 101 if (IS_ERR(sdhci->clk)) {
104 ret = PTR_ERR(sdhci->clk); 102 ret = PTR_ERR(sdhci->clk);
105 dev_dbg(&pdev->dev, "Error getting clock\n"); 103 dev_dbg(&pdev->dev, "Error getting clock\n");
106 goto err_clk_get; 104 goto err;
107 } 105 }
108 106
109 ret = clk_enable(sdhci->clk); 107 ret = clk_enable(sdhci->clk);
110 if (ret) { 108 if (ret) {
111 dev_dbg(&pdev->dev, "Error enabling clock\n"); 109 dev_dbg(&pdev->dev, "Error enabling clock\n");
112 goto err_clk_enb; 110 goto put_clk;
113 } 111 }
114 112
115 /* overwrite platform_data */ 113 /* overwrite platform_data */
@@ -124,7 +122,7 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
124 if (IS_ERR(host)) { 122 if (IS_ERR(host)) {
125 ret = PTR_ERR(host); 123 ret = PTR_ERR(host);
126 dev_dbg(&pdev->dev, "error allocating host\n"); 124 dev_dbg(&pdev->dev, "error allocating host\n");
127 goto err_alloc_host; 125 goto disable_clk;
128 } 126 }
129 127
130 host->hw_name = "sdhci"; 128 host->hw_name = "sdhci";
@@ -132,17 +130,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
132 host->irq = platform_get_irq(pdev, 0); 130 host->irq = platform_get_irq(pdev, 0);
133 host->quirks = SDHCI_QUIRK_BROKEN_ADMA; 131 host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
134 132
135 host->ioaddr = ioremap(iomem->start, resource_size(iomem)); 133 host->ioaddr = devm_ioremap(&pdev->dev, iomem->start,
134 resource_size(iomem));
136 if (!host->ioaddr) { 135 if (!host->ioaddr) {
137 ret = -ENOMEM; 136 ret = -ENOMEM;
138 dev_dbg(&pdev->dev, "failed to remap registers\n"); 137 dev_dbg(&pdev->dev, "failed to remap registers\n");
139 goto err_ioremap; 138 goto free_host;
140 } 139 }
141 140
142 ret = sdhci_add_host(host); 141 ret = sdhci_add_host(host);
143 if (ret) { 142 if (ret) {
144 dev_dbg(&pdev->dev, "error adding host\n"); 143 dev_dbg(&pdev->dev, "error adding host\n");
145 goto err_add_host; 144 goto free_host;
146 } 145 }
147 146
148 platform_set_drvdata(pdev, host); 147 platform_set_drvdata(pdev, host);
@@ -161,11 +160,12 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
161 if (sdhci->data->card_power_gpio >= 0) { 160 if (sdhci->data->card_power_gpio >= 0) {
162 int val = 0; 161 int val = 0;
163 162
164 ret = gpio_request(sdhci->data->card_power_gpio, "sdhci"); 163 ret = devm_gpio_request(&pdev->dev,
164 sdhci->data->card_power_gpio, "sdhci");
165 if (ret < 0) { 165 if (ret < 0) {
166 dev_dbg(&pdev->dev, "gpio request fail: %d\n", 166 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
167 sdhci->data->card_power_gpio); 167 sdhci->data->card_power_gpio);
168 goto err_pgpio_request; 168 goto set_drvdata;
169 } 169 }
170 170
171 if (sdhci->data->power_always_enb) 171 if (sdhci->data->power_always_enb)
@@ -177,60 +177,48 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
177 if (ret) { 177 if (ret) {
178 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", 178 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
179 sdhci->data->card_power_gpio); 179 sdhci->data->card_power_gpio);
180 goto err_pgpio_direction; 180 goto set_drvdata;
181 } 181 }
182 } 182 }
183 183
184 if (sdhci->data->card_int_gpio >= 0) { 184 if (sdhci->data->card_int_gpio >= 0) {
185 ret = gpio_request(sdhci->data->card_int_gpio, "sdhci"); 185 ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio,
186 "sdhci");
186 if (ret < 0) { 187 if (ret < 0) {
187 dev_dbg(&pdev->dev, "gpio request fail: %d\n", 188 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
188 sdhci->data->card_int_gpio); 189 sdhci->data->card_int_gpio);
189 goto err_igpio_request; 190 goto set_drvdata;
190 } 191 }
191 192
192 ret = gpio_direction_input(sdhci->data->card_int_gpio); 193 ret = gpio_direction_input(sdhci->data->card_int_gpio);
193 if (ret) { 194 if (ret) {
194 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", 195 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
195 sdhci->data->card_int_gpio); 196 sdhci->data->card_int_gpio);
196 goto err_igpio_direction; 197 goto set_drvdata;
197 } 198 }
198 ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio), 199 ret = devm_request_irq(&pdev->dev,
200 gpio_to_irq(sdhci->data->card_int_gpio),
199 sdhci_gpio_irq, IRQF_TRIGGER_LOW, 201 sdhci_gpio_irq, IRQF_TRIGGER_LOW,
200 mmc_hostname(host->mmc), pdev); 202 mmc_hostname(host->mmc), pdev);
201 if (ret) { 203 if (ret) {
202 dev_dbg(&pdev->dev, "gpio request irq fail: %d\n", 204 dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
203 sdhci->data->card_int_gpio); 205 sdhci->data->card_int_gpio);
204 goto err_igpio_request_irq; 206 goto set_drvdata;
205 } 207 }
206 208
207 } 209 }
208 210
209 return 0; 211 return 0;
210 212
211err_igpio_request_irq: 213set_drvdata:
212err_igpio_direction:
213 if (sdhci->data->card_int_gpio >= 0)
214 gpio_free(sdhci->data->card_int_gpio);
215err_igpio_request:
216err_pgpio_direction:
217 if (sdhci->data->card_power_gpio >= 0)
218 gpio_free(sdhci->data->card_power_gpio);
219err_pgpio_request:
220 platform_set_drvdata(pdev, NULL); 214 platform_set_drvdata(pdev, NULL);
221 sdhci_remove_host(host, 1); 215 sdhci_remove_host(host, 1);
222err_add_host: 216free_host:
223 iounmap(host->ioaddr);
224err_ioremap:
225 sdhci_free_host(host); 217 sdhci_free_host(host);
226err_alloc_host: 218disable_clk:
227 clk_disable(sdhci->clk); 219 clk_disable(sdhci->clk);
228err_clk_enb: 220put_clk:
229 clk_put(sdhci->clk); 221 clk_put(sdhci->clk);
230err_clk_get:
231 kfree(sdhci);
232err_kzalloc:
233 release_mem_region(iomem->start, resource_size(iomem));
234err: 222err:
235 dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret); 223 dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
236 return ret; 224 return ret;
@@ -239,35 +227,19 @@ err:
239static int __devexit sdhci_remove(struct platform_device *pdev) 227static int __devexit sdhci_remove(struct platform_device *pdev)
240{ 228{
241 struct sdhci_host *host = platform_get_drvdata(pdev); 229 struct sdhci_host *host = platform_get_drvdata(pdev);
242 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
243 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); 230 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
244 int dead; 231 int dead = 0;
245 u32 scratch; 232 u32 scratch;
246 233
247 if (sdhci->data) {
248 if (sdhci->data->card_int_gpio >= 0) {
249 free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
250 gpio_free(sdhci->data->card_int_gpio);
251 }
252
253 if (sdhci->data->card_power_gpio >= 0)
254 gpio_free(sdhci->data->card_power_gpio);
255 }
256
257 platform_set_drvdata(pdev, NULL); 234 platform_set_drvdata(pdev, NULL);
258 dead = 0;
259 scratch = readl(host->ioaddr + SDHCI_INT_STATUS); 235 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
260 if (scratch == (u32)-1) 236 if (scratch == (u32)-1)
261 dead = 1; 237 dead = 1;
262 238
263 sdhci_remove_host(host, dead); 239 sdhci_remove_host(host, dead);
264 iounmap(host->ioaddr);
265 sdhci_free_host(host); 240 sdhci_free_host(host);
266 clk_disable(sdhci->clk); 241 clk_disable(sdhci->clk);
267 clk_put(sdhci->clk); 242 clk_put(sdhci->clk);
268 kfree(sdhci);
269 if (iomem)
270 release_mem_region(iomem->start, resource_size(iomem));
271 243
272 return 0; 244 return 0;
273} 245}
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index ff5a16991939..b38d8a78f6a0 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -32,8 +32,13 @@
32 32
33#include "sdhci-pltfm.h" 33#include "sdhci-pltfm.h"
34 34
35/* Tegra SDHOST controller vendor register definitions */
36#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
37#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
38
35#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) 39#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
36#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) 40#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
41#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
37 42
38struct sdhci_tegra_soc_data { 43struct sdhci_tegra_soc_data {
39 struct sdhci_pltfm_data *pdata; 44 struct sdhci_pltfm_data *pdata;
@@ -120,6 +125,25 @@ static irqreturn_t carddetect_irq(int irq, void *data)
120 return IRQ_HANDLED; 125 return IRQ_HANDLED;
121}; 126};
122 127
128static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
129{
130 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
131 struct sdhci_tegra *tegra_host = pltfm_host->priv;
132 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
133
134 if (!(mask & SDHCI_RESET_ALL))
135 return;
136
137 /* Erratum: Enable SDHCI spec v3.00 support */
138 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) {
139 u32 misc_ctrl;
140
141 misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
142 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
143 sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
144 }
145}
146
123static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) 147static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
124{ 148{
125 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 149 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -148,6 +172,7 @@ static struct sdhci_ops tegra_sdhci_ops = {
148 .read_w = tegra_sdhci_readw, 172 .read_w = tegra_sdhci_readw,
149 .write_l = tegra_sdhci_writel, 173 .write_l = tegra_sdhci_writel,
150 .platform_8bit_width = tegra_sdhci_8bit, 174 .platform_8bit_width = tegra_sdhci_8bit,
175 .platform_reset_exit = tegra_sdhci_reset_exit,
151}; 176};
152 177
153#ifdef CONFIG_ARCH_TEGRA_2x_SOC 178#ifdef CONFIG_ARCH_TEGRA_2x_SOC
@@ -178,6 +203,7 @@ static struct sdhci_pltfm_data sdhci_tegra30_pdata = {
178 203
179static struct sdhci_tegra_soc_data soc_data_tegra30 = { 204static struct sdhci_tegra_soc_data soc_data_tegra30 = {
180 .pdata = &sdhci_tegra30_pdata, 205 .pdata = &sdhci_tegra30_pdata,
206 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300,
181}; 207};
182#endif 208#endif
183 209
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ccefdebeff14..e626732aff77 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
680 } 680 }
681 681
682 if (count >= 0xF) { 682 if (count >= 0xF) {
683 pr_warning("%s: Too large timeout requested for CMD%d!\n", 683 pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
684 mmc_hostname(host->mmc), cmd->opcode); 684 mmc_hostname(host->mmc), count, cmd->opcode);
685 count = 0xE; 685 count = 0xE;
686 } 686 }
687 687