aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/core/core.c318
-rw-r--r--drivers/mmc/core/core.h8
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/core/host.h2
-rw-r--r--drivers/mmc/core/mmc.c149
-rw-r--r--drivers/mmc/core/mmc_ops.c59
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c75
-rw-r--r--drivers/mmc/core/sdio.c316
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/core/sdio_cis.c2
-rw-r--r--drivers/mmc/core/sdio_io.c2
-rw-r--r--drivers/mmc/host/Kconfig29
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c42
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/msm_sdcc.c1287
-rw-r--r--drivers/mmc/host/msm_sdcc.h238
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c1083
-rw-r--r--drivers/mmc/host/sdhci-of.c49
-rw-r--r--drivers/mmc/host/sdhci-pci.c5
-rw-r--r--drivers/mmc/host/sdhci.c54
-rw-r--r--drivers/mmc/host/sdhci.h6
25 files changed, 3312 insertions, 423 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index adc205c49fbf..85f0e8cd875b 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -130,7 +130,7 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
130 return 0; 130 return 0;
131} 131}
132 132
133static struct block_device_operations mmc_bdops = { 133static const struct block_device_operations mmc_bdops = {
134 .open = mmc_blk_open, 134 .open = mmc_blk_open,
135 .release = mmc_blk_release, 135 .release = mmc_blk_release,
136 .getgeo = mmc_blk_getgeo, 136 .getgeo = mmc_blk_getgeo,
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d84c880fac84..7dab2e5f4bc9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -344,6 +344,101 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
344EXPORT_SYMBOL(mmc_align_data_size); 344EXPORT_SYMBOL(mmc_align_data_size);
345 345
346/** 346/**
347 * mmc_host_enable - enable a host.
348 * @host: mmc host to enable
349 *
350 * Hosts that support power saving can use the 'enable' and 'disable'
351 * methods to exit and enter power saving states. For more information
352 * see comments for struct mmc_host_ops.
353 */
354int mmc_host_enable(struct mmc_host *host)
355{
356 if (!(host->caps & MMC_CAP_DISABLE))
357 return 0;
358
359 if (host->en_dis_recurs)
360 return 0;
361
362 if (host->nesting_cnt++)
363 return 0;
364
365 cancel_delayed_work_sync(&host->disable);
366
367 if (host->enabled)
368 return 0;
369
370 if (host->ops->enable) {
371 int err;
372
373 host->en_dis_recurs = 1;
374 err = host->ops->enable(host);
375 host->en_dis_recurs = 0;
376
377 if (err) {
378 pr_debug("%s: enable error %d\n",
379 mmc_hostname(host), err);
380 return err;
381 }
382 }
383 host->enabled = 1;
384 return 0;
385}
386EXPORT_SYMBOL(mmc_host_enable);
387
388static int mmc_host_do_disable(struct mmc_host *host, int lazy)
389{
390 if (host->ops->disable) {
391 int err;
392
393 host->en_dis_recurs = 1;
394 err = host->ops->disable(host, lazy);
395 host->en_dis_recurs = 0;
396
397 if (err < 0) {
398 pr_debug("%s: disable error %d\n",
399 mmc_hostname(host), err);
400 return err;
401 }
402 if (err > 0) {
403 unsigned long delay = msecs_to_jiffies(err);
404
405 mmc_schedule_delayed_work(&host->disable, delay);
406 }
407 }
408 host->enabled = 0;
409 return 0;
410}
411
412/**
413 * mmc_host_disable - disable a host.
414 * @host: mmc host to disable
415 *
416 * Hosts that support power saving can use the 'enable' and 'disable'
417 * methods to exit and enter power saving states. For more information
418 * see comments for struct mmc_host_ops.
419 */
420int mmc_host_disable(struct mmc_host *host)
421{
422 int err;
423
424 if (!(host->caps & MMC_CAP_DISABLE))
425 return 0;
426
427 if (host->en_dis_recurs)
428 return 0;
429
430 if (--host->nesting_cnt)
431 return 0;
432
433 if (!host->enabled)
434 return 0;
435
436 err = mmc_host_do_disable(host, 0);
437 return err;
438}
439EXPORT_SYMBOL(mmc_host_disable);
440
441/**
347 * __mmc_claim_host - exclusively claim a host 442 * __mmc_claim_host - exclusively claim a host
348 * @host: mmc host to claim 443 * @host: mmc host to claim
349 * @abort: whether or not the operation should be aborted 444 * @abort: whether or not the operation should be aborted
@@ -366,25 +461,111 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
366 while (1) { 461 while (1) {
367 set_current_state(TASK_UNINTERRUPTIBLE); 462 set_current_state(TASK_UNINTERRUPTIBLE);
368 stop = abort ? atomic_read(abort) : 0; 463 stop = abort ? atomic_read(abort) : 0;
369 if (stop || !host->claimed) 464 if (stop || !host->claimed || host->claimer == current)
370 break; 465 break;
371 spin_unlock_irqrestore(&host->lock, flags); 466 spin_unlock_irqrestore(&host->lock, flags);
372 schedule(); 467 schedule();
373 spin_lock_irqsave(&host->lock, flags); 468 spin_lock_irqsave(&host->lock, flags);
374 } 469 }
375 set_current_state(TASK_RUNNING); 470 set_current_state(TASK_RUNNING);
376 if (!stop) 471 if (!stop) {
377 host->claimed = 1; 472 host->claimed = 1;
378 else 473 host->claimer = current;
474 host->claim_cnt += 1;
475 } else
379 wake_up(&host->wq); 476 wake_up(&host->wq);
380 spin_unlock_irqrestore(&host->lock, flags); 477 spin_unlock_irqrestore(&host->lock, flags);
381 remove_wait_queue(&host->wq, &wait); 478 remove_wait_queue(&host->wq, &wait);
479 if (!stop)
480 mmc_host_enable(host);
382 return stop; 481 return stop;
383} 482}
384 483
385EXPORT_SYMBOL(__mmc_claim_host); 484EXPORT_SYMBOL(__mmc_claim_host);
386 485
387/** 486/**
487 * mmc_try_claim_host - try exclusively to claim a host
488 * @host: mmc host to claim
489 *
490 * Returns %1 if the host is claimed, %0 otherwise.
491 */
492int mmc_try_claim_host(struct mmc_host *host)
493{
494 int claimed_host = 0;
495 unsigned long flags;
496
497 spin_lock_irqsave(&host->lock, flags);
498 if (!host->claimed || host->claimer == current) {
499 host->claimed = 1;
500 host->claimer = current;
501 host->claim_cnt += 1;
502 claimed_host = 1;
503 }
504 spin_unlock_irqrestore(&host->lock, flags);
505 return claimed_host;
506}
507EXPORT_SYMBOL(mmc_try_claim_host);
508
509static void mmc_do_release_host(struct mmc_host *host)
510{
511 unsigned long flags;
512
513 spin_lock_irqsave(&host->lock, flags);
514 if (--host->claim_cnt) {
515 /* Release for nested claim */
516 spin_unlock_irqrestore(&host->lock, flags);
517 } else {
518 host->claimed = 0;
519 host->claimer = NULL;
520 spin_unlock_irqrestore(&host->lock, flags);
521 wake_up(&host->wq);
522 }
523}
524
525void mmc_host_deeper_disable(struct work_struct *work)
526{
527 struct mmc_host *host =
528 container_of(work, struct mmc_host, disable.work);
529
530 /* If the host is claimed then we do not want to disable it anymore */
531 if (!mmc_try_claim_host(host))
532 return;
533 mmc_host_do_disable(host, 1);
534 mmc_do_release_host(host);
535}
536
537/**
538 * mmc_host_lazy_disable - lazily disable a host.
539 * @host: mmc host to disable
540 *
541 * Hosts that support power saving can use the 'enable' and 'disable'
542 * methods to exit and enter power saving states. For more information
543 * see comments for struct mmc_host_ops.
544 */
545int mmc_host_lazy_disable(struct mmc_host *host)
546{
547 if (!(host->caps & MMC_CAP_DISABLE))
548 return 0;
549
550 if (host->en_dis_recurs)
551 return 0;
552
553 if (--host->nesting_cnt)
554 return 0;
555
556 if (!host->enabled)
557 return 0;
558
559 if (host->disable_delay) {
560 mmc_schedule_delayed_work(&host->disable,
561 msecs_to_jiffies(host->disable_delay));
562 return 0;
563 } else
564 return mmc_host_do_disable(host, 1);
565}
566EXPORT_SYMBOL(mmc_host_lazy_disable);
567
568/**
388 * mmc_release_host - release a host 569 * mmc_release_host - release a host
389 * @host: mmc host to release 570 * @host: mmc host to release
390 * 571 *
@@ -393,15 +574,11 @@ EXPORT_SYMBOL(__mmc_claim_host);
393 */ 574 */
394void mmc_release_host(struct mmc_host *host) 575void mmc_release_host(struct mmc_host *host)
395{ 576{
396 unsigned long flags;
397
398 WARN_ON(!host->claimed); 577 WARN_ON(!host->claimed);
399 578
400 spin_lock_irqsave(&host->lock, flags); 579 mmc_host_lazy_disable(host);
401 host->claimed = 0;
402 spin_unlock_irqrestore(&host->lock, flags);
403 580
404 wake_up(&host->wq); 581 mmc_do_release_host(host);
405} 582}
406 583
407EXPORT_SYMBOL(mmc_release_host); 584EXPORT_SYMBOL(mmc_release_host);
@@ -687,7 +864,13 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
687 */ 864 */
688static void mmc_power_up(struct mmc_host *host) 865static void mmc_power_up(struct mmc_host *host)
689{ 866{
690 int bit = fls(host->ocr_avail) - 1; 867 int bit;
868
869 /* If ocr is set, we use it */
870 if (host->ocr)
871 bit = ffs(host->ocr) - 1;
872 else
873 bit = fls(host->ocr_avail) - 1;
691 874
692 host->ios.vdd = bit; 875 host->ios.vdd = bit;
693 if (mmc_host_is_spi(host)) { 876 if (mmc_host_is_spi(host)) {
@@ -947,6 +1130,8 @@ void mmc_stop_host(struct mmc_host *host)
947 spin_unlock_irqrestore(&host->lock, flags); 1130 spin_unlock_irqrestore(&host->lock, flags);
948#endif 1131#endif
949 1132
1133 if (host->caps & MMC_CAP_DISABLE)
1134 cancel_delayed_work(&host->disable);
950 cancel_delayed_work(&host->detect); 1135 cancel_delayed_work(&host->detect);
951 mmc_flush_scheduled_work(); 1136 mmc_flush_scheduled_work();
952 1137
@@ -958,6 +1143,8 @@ void mmc_stop_host(struct mmc_host *host)
958 mmc_claim_host(host); 1143 mmc_claim_host(host);
959 mmc_detach_bus(host); 1144 mmc_detach_bus(host);
960 mmc_release_host(host); 1145 mmc_release_host(host);
1146 mmc_bus_put(host);
1147 return;
961 } 1148 }
962 mmc_bus_put(host); 1149 mmc_bus_put(host);
963 1150
@@ -966,6 +1153,80 @@ void mmc_stop_host(struct mmc_host *host)
966 mmc_power_off(host); 1153 mmc_power_off(host);
967} 1154}
968 1155
1156void mmc_power_save_host(struct mmc_host *host)
1157{
1158 mmc_bus_get(host);
1159
1160 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1161 mmc_bus_put(host);
1162 return;
1163 }
1164
1165 if (host->bus_ops->power_save)
1166 host->bus_ops->power_save(host);
1167
1168 mmc_bus_put(host);
1169
1170 mmc_power_off(host);
1171}
1172EXPORT_SYMBOL(mmc_power_save_host);
1173
1174void mmc_power_restore_host(struct mmc_host *host)
1175{
1176 mmc_bus_get(host);
1177
1178 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1179 mmc_bus_put(host);
1180 return;
1181 }
1182
1183 mmc_power_up(host);
1184 host->bus_ops->power_restore(host);
1185
1186 mmc_bus_put(host);
1187}
1188EXPORT_SYMBOL(mmc_power_restore_host);
1189
1190int mmc_card_awake(struct mmc_host *host)
1191{
1192 int err = -ENOSYS;
1193
1194 mmc_bus_get(host);
1195
1196 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1197 err = host->bus_ops->awake(host);
1198
1199 mmc_bus_put(host);
1200
1201 return err;
1202}
1203EXPORT_SYMBOL(mmc_card_awake);
1204
1205int mmc_card_sleep(struct mmc_host *host)
1206{
1207 int err = -ENOSYS;
1208
1209 mmc_bus_get(host);
1210
1211 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1212 err = host->bus_ops->sleep(host);
1213
1214 mmc_bus_put(host);
1215
1216 return err;
1217}
1218EXPORT_SYMBOL(mmc_card_sleep);
1219
1220int mmc_card_can_sleep(struct mmc_host *host)
1221{
1222 struct mmc_card *card = host->card;
1223
1224 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
1225 return 1;
1226 return 0;
1227}
1228EXPORT_SYMBOL(mmc_card_can_sleep);
1229
969#ifdef CONFIG_PM 1230#ifdef CONFIG_PM
970 1231
971/** 1232/**
@@ -975,27 +1236,36 @@ void mmc_stop_host(struct mmc_host *host)
975 */ 1236 */
976int mmc_suspend_host(struct mmc_host *host, pm_message_t state) 1237int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
977{ 1238{
1239 int err = 0;
1240
1241 if (host->caps & MMC_CAP_DISABLE)
1242 cancel_delayed_work(&host->disable);
978 cancel_delayed_work(&host->detect); 1243 cancel_delayed_work(&host->detect);
979 mmc_flush_scheduled_work(); 1244 mmc_flush_scheduled_work();
980 1245
981 mmc_bus_get(host); 1246 mmc_bus_get(host);
982 if (host->bus_ops && !host->bus_dead) { 1247 if (host->bus_ops && !host->bus_dead) {
983 if (host->bus_ops->suspend) 1248 if (host->bus_ops->suspend)
984 host->bus_ops->suspend(host); 1249 err = host->bus_ops->suspend(host);
985 if (!host->bus_ops->resume) { 1250 if (err == -ENOSYS || !host->bus_ops->resume) {
1251 /*
1252 * We simply "remove" the card in this case.
1253 * It will be redetected on resume.
1254 */
986 if (host->bus_ops->remove) 1255 if (host->bus_ops->remove)
987 host->bus_ops->remove(host); 1256 host->bus_ops->remove(host);
988
989 mmc_claim_host(host); 1257 mmc_claim_host(host);
990 mmc_detach_bus(host); 1258 mmc_detach_bus(host);
991 mmc_release_host(host); 1259 mmc_release_host(host);
1260 err = 0;
992 } 1261 }
993 } 1262 }
994 mmc_bus_put(host); 1263 mmc_bus_put(host);
995 1264
996 mmc_power_off(host); 1265 if (!err)
1266 mmc_power_off(host);
997 1267
998 return 0; 1268 return err;
999} 1269}
1000 1270
1001EXPORT_SYMBOL(mmc_suspend_host); 1271EXPORT_SYMBOL(mmc_suspend_host);
@@ -1006,12 +1276,26 @@ EXPORT_SYMBOL(mmc_suspend_host);
1006 */ 1276 */
1007int mmc_resume_host(struct mmc_host *host) 1277int mmc_resume_host(struct mmc_host *host)
1008{ 1278{
1279 int err = 0;
1280
1009 mmc_bus_get(host); 1281 mmc_bus_get(host);
1010 if (host->bus_ops && !host->bus_dead) { 1282 if (host->bus_ops && !host->bus_dead) {
1011 mmc_power_up(host); 1283 mmc_power_up(host);
1012 mmc_select_voltage(host, host->ocr); 1284 mmc_select_voltage(host, host->ocr);
1013 BUG_ON(!host->bus_ops->resume); 1285 BUG_ON(!host->bus_ops->resume);
1014 host->bus_ops->resume(host); 1286 err = host->bus_ops->resume(host);
1287 if (err) {
1288 printk(KERN_WARNING "%s: error %d during resume "
1289 "(card was removed?)\n",
1290 mmc_hostname(host), err);
1291 if (host->bus_ops->remove)
1292 host->bus_ops->remove(host);
1293 mmc_claim_host(host);
1294 mmc_detach_bus(host);
1295 mmc_release_host(host);
1296 /* no need to bother upper layers */
1297 err = 0;
1298 }
1015 } 1299 }
1016 mmc_bus_put(host); 1300 mmc_bus_put(host);
1017 1301
@@ -1021,7 +1305,7 @@ int mmc_resume_host(struct mmc_host *host)
1021 */ 1305 */
1022 mmc_detect_change(host, 1); 1306 mmc_detect_change(host, 1);
1023 1307
1024 return 0; 1308 return err;
1025} 1309}
1026 1310
1027EXPORT_SYMBOL(mmc_resume_host); 1311EXPORT_SYMBOL(mmc_resume_host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index c819effa1032..67ae6abc4230 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -16,10 +16,14 @@
16#define MMC_CMD_RETRIES 3 16#define MMC_CMD_RETRIES 3
17 17
18struct mmc_bus_ops { 18struct mmc_bus_ops {
19 int (*awake)(struct mmc_host *);
20 int (*sleep)(struct mmc_host *);
19 void (*remove)(struct mmc_host *); 21 void (*remove)(struct mmc_host *);
20 void (*detect)(struct mmc_host *); 22 void (*detect)(struct mmc_host *);
21 void (*suspend)(struct mmc_host *); 23 int (*suspend)(struct mmc_host *);
22 void (*resume)(struct mmc_host *); 24 int (*resume)(struct mmc_host *);
25 void (*power_save)(struct mmc_host *);
26 void (*power_restore)(struct mmc_host *);
23}; 27};
24 28
25void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); 29void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 5e945e64ead7..a268d12f1af0 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -83,6 +83,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
83 spin_lock_init(&host->lock); 83 spin_lock_init(&host->lock);
84 init_waitqueue_head(&host->wq); 84 init_waitqueue_head(&host->wq);
85 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 85 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
86 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
86 87
87 /* 88 /*
88 * By default, hosts do not support SGIO or large requests. 89 * By default, hosts do not support SGIO or large requests.
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index c2dc3d2d9f9a..8c87e1109a34 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -14,5 +14,7 @@
14int mmc_register_host_class(void); 14int mmc_register_host_class(void);
15void mmc_unregister_host_class(void); 15void mmc_unregister_host_class(void);
16 16
17void mmc_host_deeper_disable(struct work_struct *work);
18
17#endif 19#endif
18 20
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2fb9d5f271ea..bfefce365ae7 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -160,7 +160,6 @@ static int mmc_read_ext_csd(struct mmc_card *card)
160{ 160{
161 int err; 161 int err;
162 u8 *ext_csd; 162 u8 *ext_csd;
163 unsigned int ext_csd_struct;
164 163
165 BUG_ON(!card); 164 BUG_ON(!card);
166 165
@@ -180,11 +179,11 @@ static int mmc_read_ext_csd(struct mmc_card *card)
180 179
181 err = mmc_send_ext_csd(card, ext_csd); 180 err = mmc_send_ext_csd(card, ext_csd);
182 if (err) { 181 if (err) {
183 /* 182 /* If the host or the card can't do the switch,
184 * We all hosts that cannot perform the command 183 * fail more gracefully. */
185 * to fail more gracefully 184 if ((err != -EINVAL)
186 */ 185 && (err != -ENOSYS)
187 if (err != -EINVAL) 186 && (err != -EFAULT))
188 goto out; 187 goto out;
189 188
190 /* 189 /*
@@ -207,16 +206,16 @@ static int mmc_read_ext_csd(struct mmc_card *card)
207 goto out; 206 goto out;
208 } 207 }
209 208
210 ext_csd_struct = ext_csd[EXT_CSD_REV]; 209 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
211 if (ext_csd_struct > 3) { 210 if (card->ext_csd.rev > 3) {
212 printk(KERN_ERR "%s: unrecognised EXT_CSD structure " 211 printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
213 "version %d\n", mmc_hostname(card->host), 212 "version %d\n", mmc_hostname(card->host),
214 ext_csd_struct); 213 card->ext_csd.rev);
215 err = -EINVAL; 214 err = -EINVAL;
216 goto out; 215 goto out;
217 } 216 }
218 217
219 if (ext_csd_struct >= 2) { 218 if (card->ext_csd.rev >= 2) {
220 card->ext_csd.sectors = 219 card->ext_csd.sectors =
221 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | 220 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
222 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | 221 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
@@ -241,6 +240,15 @@ static int mmc_read_ext_csd(struct mmc_card *card)
241 goto out; 240 goto out;
242 } 241 }
243 242
243 if (card->ext_csd.rev >= 3) {
244 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
245
246 /* Sleep / awake timeout in 100ns units */
247 if (sa_shift > 0 && sa_shift <= 0x17)
248 card->ext_csd.sa_timeout =
249 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
250 }
251
244out: 252out:
245 kfree(ext_csd); 253 kfree(ext_csd);
246 254
@@ -408,12 +416,17 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
408 (host->caps & MMC_CAP_MMC_HIGHSPEED)) { 416 (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
409 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 417 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
410 EXT_CSD_HS_TIMING, 1); 418 EXT_CSD_HS_TIMING, 1);
411 if (err) 419 if (err && err != -EBADMSG)
412 goto free_card; 420 goto free_card;
413 421
414 mmc_card_set_highspeed(card); 422 if (err) {
415 423 printk(KERN_WARNING "%s: switch to highspeed failed\n",
416 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 424 mmc_hostname(card->host));
425 err = 0;
426 } else {
427 mmc_card_set_highspeed(card);
428 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
429 }
417 } 430 }
418 431
419 /* 432 /*
@@ -448,10 +461,17 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
448 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 461 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
449 EXT_CSD_BUS_WIDTH, ext_csd_bit); 462 EXT_CSD_BUS_WIDTH, ext_csd_bit);
450 463
451 if (err) 464 if (err && err != -EBADMSG)
452 goto free_card; 465 goto free_card;
453 466
454 mmc_set_bus_width(card->host, bus_width); 467 if (err) {
468 printk(KERN_WARNING "%s: switch to bus width %d "
469 "failed\n", mmc_hostname(card->host),
470 1 << bus_width);
471 err = 0;
472 } else {
473 mmc_set_bus_width(card->host, bus_width);
474 }
455 } 475 }
456 476
457 if (!oldcard) 477 if (!oldcard)
@@ -507,12 +527,10 @@ static void mmc_detect(struct mmc_host *host)
507 } 527 }
508} 528}
509 529
510#ifdef CONFIG_MMC_UNSAFE_RESUME
511
512/* 530/*
513 * Suspend callback from host. 531 * Suspend callback from host.
514 */ 532 */
515static void mmc_suspend(struct mmc_host *host) 533static int mmc_suspend(struct mmc_host *host)
516{ 534{
517 BUG_ON(!host); 535 BUG_ON(!host);
518 BUG_ON(!host->card); 536 BUG_ON(!host->card);
@@ -522,6 +540,8 @@ static void mmc_suspend(struct mmc_host *host)
522 mmc_deselect_cards(host); 540 mmc_deselect_cards(host);
523 host->card->state &= ~MMC_STATE_HIGHSPEED; 541 host->card->state &= ~MMC_STATE_HIGHSPEED;
524 mmc_release_host(host); 542 mmc_release_host(host);
543
544 return 0;
525} 545}
526 546
527/* 547/*
@@ -530,7 +550,7 @@ static void mmc_suspend(struct mmc_host *host)
530 * This function tries to determine if the same card is still present 550 * This function tries to determine if the same card is still present
531 * and, if so, restore all state to it. 551 * and, if so, restore all state to it.
532 */ 552 */
533static void mmc_resume(struct mmc_host *host) 553static int mmc_resume(struct mmc_host *host)
534{ 554{
535 int err; 555 int err;
536 556
@@ -541,30 +561,99 @@ static void mmc_resume(struct mmc_host *host)
541 err = mmc_init_card(host, host->ocr, host->card); 561 err = mmc_init_card(host, host->ocr, host->card);
542 mmc_release_host(host); 562 mmc_release_host(host);
543 563
544 if (err) { 564 return err;
545 mmc_remove(host); 565}
546 566
547 mmc_claim_host(host); 567static void mmc_power_restore(struct mmc_host *host)
548 mmc_detach_bus(host); 568{
549 mmc_release_host(host); 569 host->card->state &= ~MMC_STATE_HIGHSPEED;
570 mmc_claim_host(host);
571 mmc_init_card(host, host->ocr, host->card);
572 mmc_release_host(host);
573}
574
575static int mmc_sleep(struct mmc_host *host)
576{
577 struct mmc_card *card = host->card;
578 int err = -ENOSYS;
579
580 if (card && card->ext_csd.rev >= 3) {
581 err = mmc_card_sleepawake(host, 1);
582 if (err < 0)
583 pr_debug("%s: Error %d while putting card into sleep",
584 mmc_hostname(host), err);
550 } 585 }
551 586
587 return err;
552} 588}
553 589
554#else 590static int mmc_awake(struct mmc_host *host)
591{
592 struct mmc_card *card = host->card;
593 int err = -ENOSYS;
594
595 if (card && card->ext_csd.rev >= 3) {
596 err = mmc_card_sleepawake(host, 0);
597 if (err < 0)
598 pr_debug("%s: Error %d while awaking sleeping card",
599 mmc_hostname(host), err);
600 }
601
602 return err;
603}
555 604
556#define mmc_suspend NULL 605#ifdef CONFIG_MMC_UNSAFE_RESUME
557#define mmc_resume NULL
558 606
559#endif 607static const struct mmc_bus_ops mmc_ops = {
608 .awake = mmc_awake,
609 .sleep = mmc_sleep,
610 .remove = mmc_remove,
611 .detect = mmc_detect,
612 .suspend = mmc_suspend,
613 .resume = mmc_resume,
614 .power_restore = mmc_power_restore,
615};
616
617static void mmc_attach_bus_ops(struct mmc_host *host)
618{
619 mmc_attach_bus(host, &mmc_ops);
620}
621
622#else
560 623
561static const struct mmc_bus_ops mmc_ops = { 624static const struct mmc_bus_ops mmc_ops = {
625 .awake = mmc_awake,
626 .sleep = mmc_sleep,
627 .remove = mmc_remove,
628 .detect = mmc_detect,
629 .suspend = NULL,
630 .resume = NULL,
631 .power_restore = mmc_power_restore,
632};
633
634static const struct mmc_bus_ops mmc_ops_unsafe = {
635 .awake = mmc_awake,
636 .sleep = mmc_sleep,
562 .remove = mmc_remove, 637 .remove = mmc_remove,
563 .detect = mmc_detect, 638 .detect = mmc_detect,
564 .suspend = mmc_suspend, 639 .suspend = mmc_suspend,
565 .resume = mmc_resume, 640 .resume = mmc_resume,
641 .power_restore = mmc_power_restore,
566}; 642};
567 643
644static void mmc_attach_bus_ops(struct mmc_host *host)
645{
646 const struct mmc_bus_ops *bus_ops;
647
648 if (host->caps & MMC_CAP_NONREMOVABLE)
649 bus_ops = &mmc_ops_unsafe;
650 else
651 bus_ops = &mmc_ops;
652 mmc_attach_bus(host, bus_ops);
653}
654
655#endif
656
568/* 657/*
569 * Starting point for MMC card init. 658 * Starting point for MMC card init.
570 */ 659 */
@@ -575,7 +664,7 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
575 BUG_ON(!host); 664 BUG_ON(!host);
576 WARN_ON(!host->claimed); 665 WARN_ON(!host->claimed);
577 666
578 mmc_attach_bus(host, &mmc_ops); 667 mmc_attach_bus_ops(host);
579 668
580 /* 669 /*
581 * We need to get OCR a different way for SPI. 670 * We need to get OCR a different way for SPI.
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 34ce2703d29a..d2cb5c634392 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -57,6 +57,42 @@ int mmc_deselect_cards(struct mmc_host *host)
57 return _mmc_select_card(host, NULL); 57 return _mmc_select_card(host, NULL);
58} 58}
59 59
60int mmc_card_sleepawake(struct mmc_host *host, int sleep)
61{
62 struct mmc_command cmd;
63 struct mmc_card *card = host->card;
64 int err;
65
66 if (sleep)
67 mmc_deselect_cards(host);
68
69 memset(&cmd, 0, sizeof(struct mmc_command));
70
71 cmd.opcode = MMC_SLEEP_AWAKE;
72 cmd.arg = card->rca << 16;
73 if (sleep)
74 cmd.arg |= 1 << 15;
75
76 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
77 err = mmc_wait_for_cmd(host, &cmd, 0);
78 if (err)
79 return err;
80
81 /*
82 * If the host does not wait while the card signals busy, then we will
83 * will have to wait the sleep/awake timeout. Note, we cannot use the
84 * SEND_STATUS command to poll the status because that command (and most
85 * others) is invalid while the card sleeps.
86 */
87 if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
88 mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
89
90 if (!sleep)
91 err = mmc_select_card(card);
92
93 return err;
94}
95
60int mmc_go_idle(struct mmc_host *host) 96int mmc_go_idle(struct mmc_host *host)
61{ 97{
62 int err; 98 int err;
@@ -354,6 +390,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
354{ 390{
355 int err; 391 int err;
356 struct mmc_command cmd; 392 struct mmc_command cmd;
393 u32 status;
357 394
358 BUG_ON(!card); 395 BUG_ON(!card);
359 BUG_ON(!card->host); 396 BUG_ON(!card->host);
@@ -371,6 +408,28 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
371 if (err) 408 if (err)
372 return err; 409 return err;
373 410
411 /* Must check status to be sure of no errors */
412 do {
413 err = mmc_send_status(card, &status);
414 if (err)
415 return err;
416 if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
417 break;
418 if (mmc_host_is_spi(card->host))
419 break;
420 } while (R1_CURRENT_STATE(status) == 7);
421
422 if (mmc_host_is_spi(card->host)) {
423 if (status & R1_SPI_ILLEGAL_COMMAND)
424 return -EBADMSG;
425 } else {
426 if (status & 0xFDFFA000)
427 printk(KERN_WARNING "%s: unexpected status %#x after "
428 "switch", mmc_hostname(card->host), status);
429 if (status & R1_SWITCH_ERROR)
430 return -EBADMSG;
431 }
432
374 return 0; 433 return 0;
375} 434}
376 435
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 17854bf7cf0d..653eb8e84178 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -25,6 +25,7 @@ int mmc_send_status(struct mmc_card *card, u32 *status);
25int mmc_send_cid(struct mmc_host *host, u32 *cid); 25int mmc_send_cid(struct mmc_host *host, u32 *cid);
26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
27int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 27int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
28int mmc_card_sleepawake(struct mmc_host *host, int sleep);
28 29
29#endif 30#endif
30 31
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 7ad646fe077e..10b2a4d20f5a 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -210,11 +210,11 @@ static int mmc_read_switch(struct mmc_card *card)
210 210
211 err = mmc_sd_switch(card, 0, 0, 1, status); 211 err = mmc_sd_switch(card, 0, 0, 1, status);
212 if (err) { 212 if (err) {
213 /* 213 /* If the host or the card can't do the switch,
214 * We all hosts that cannot perform the command 214 * fail more gracefully. */
215 * to fail more gracefully 215 if ((err != -EINVAL)
216 */ 216 && (err != -ENOSYS)
217 if (err != -EINVAL) 217 && (err != -EFAULT))
218 goto out; 218 goto out;
219 219
220 printk(KERN_WARNING "%s: problem reading switch " 220 printk(KERN_WARNING "%s: problem reading switch "
@@ -561,12 +561,10 @@ static void mmc_sd_detect(struct mmc_host *host)
561 } 561 }
562} 562}
563 563
564#ifdef CONFIG_MMC_UNSAFE_RESUME
565
566/* 564/*
567 * Suspend callback from host. 565 * Suspend callback from host.
568 */ 566 */
569static void mmc_sd_suspend(struct mmc_host *host) 567static int mmc_sd_suspend(struct mmc_host *host)
570{ 568{
571 BUG_ON(!host); 569 BUG_ON(!host);
572 BUG_ON(!host->card); 570 BUG_ON(!host->card);
@@ -576,6 +574,8 @@ static void mmc_sd_suspend(struct mmc_host *host)
576 mmc_deselect_cards(host); 574 mmc_deselect_cards(host);
577 host->card->state &= ~MMC_STATE_HIGHSPEED; 575 host->card->state &= ~MMC_STATE_HIGHSPEED;
578 mmc_release_host(host); 576 mmc_release_host(host);
577
578 return 0;
579} 579}
580 580
581/* 581/*
@@ -584,7 +584,7 @@ static void mmc_sd_suspend(struct mmc_host *host)
584 * This function tries to determine if the same card is still present 584 * This function tries to determine if the same card is still present
585 * and, if so, restore all state to it. 585 * and, if so, restore all state to it.
586 */ 586 */
587static void mmc_sd_resume(struct mmc_host *host) 587static int mmc_sd_resume(struct mmc_host *host)
588{ 588{
589 int err; 589 int err;
590 590
@@ -595,30 +595,63 @@ static void mmc_sd_resume(struct mmc_host *host)
595 err = mmc_sd_init_card(host, host->ocr, host->card); 595 err = mmc_sd_init_card(host, host->ocr, host->card);
596 mmc_release_host(host); 596 mmc_release_host(host);
597 597
598 if (err) { 598 return err;
599 mmc_sd_remove(host); 599}
600
601 mmc_claim_host(host);
602 mmc_detach_bus(host);
603 mmc_release_host(host);
604 }
605 600
601static void mmc_sd_power_restore(struct mmc_host *host)
602{
603 host->card->state &= ~MMC_STATE_HIGHSPEED;
604 mmc_claim_host(host);
605 mmc_sd_init_card(host, host->ocr, host->card);
606 mmc_release_host(host);
606} 607}
607 608
608#else 609#ifdef CONFIG_MMC_UNSAFE_RESUME
609 610
610#define mmc_sd_suspend NULL 611static const struct mmc_bus_ops mmc_sd_ops = {
611#define mmc_sd_resume NULL 612 .remove = mmc_sd_remove,
613 .detect = mmc_sd_detect,
614 .suspend = mmc_sd_suspend,
615 .resume = mmc_sd_resume,
616 .power_restore = mmc_sd_power_restore,
617};
612 618
613#endif 619static void mmc_sd_attach_bus_ops(struct mmc_host *host)
620{
621 mmc_attach_bus(host, &mmc_sd_ops);
622}
623
624#else
614 625
615static const struct mmc_bus_ops mmc_sd_ops = { 626static const struct mmc_bus_ops mmc_sd_ops = {
616 .remove = mmc_sd_remove, 627 .remove = mmc_sd_remove,
617 .detect = mmc_sd_detect, 628 .detect = mmc_sd_detect,
629 .suspend = NULL,
630 .resume = NULL,
631 .power_restore = mmc_sd_power_restore,
632};
633
634static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
635 .remove = mmc_sd_remove,
636 .detect = mmc_sd_detect,
618 .suspend = mmc_sd_suspend, 637 .suspend = mmc_sd_suspend,
619 .resume = mmc_sd_resume, 638 .resume = mmc_sd_resume,
639 .power_restore = mmc_sd_power_restore,
620}; 640};
621 641
642static void mmc_sd_attach_bus_ops(struct mmc_host *host)
643{
644 const struct mmc_bus_ops *bus_ops;
645
646 if (host->caps & MMC_CAP_NONREMOVABLE)
647 bus_ops = &mmc_sd_ops_unsafe;
648 else
649 bus_ops = &mmc_sd_ops;
650 mmc_attach_bus(host, bus_ops);
651}
652
653#endif
654
622/* 655/*
623 * Starting point for SD card init. 656 * Starting point for SD card init.
624 */ 657 */
@@ -629,7 +662,7 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
629 BUG_ON(!host); 662 BUG_ON(!host);
630 WARN_ON(!host->claimed); 663 WARN_ON(!host->claimed);
631 664
632 mmc_attach_bus(host, &mmc_sd_ops); 665 mmc_sd_attach_bus_ops(host);
633 666
634 /* 667 /*
635 * We need to get OCR a different way for SPI. 668 * We need to get OCR a different way for SPI.
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index fb99ccff9080..cdb845b68ab5 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -165,6 +165,29 @@ static int sdio_enable_wide(struct mmc_card *card)
165} 165}
166 166
167/* 167/*
168 * If desired, disconnect the pull-up resistor on CD/DAT[3] (pin 1)
169 * of the card. This may be required on certain setups of boards,
170 * controllers and embedded sdio device which do not need the card's
171 * pull-up. As a result, card detection is disabled and power is saved.
172 */
173static int sdio_disable_cd(struct mmc_card *card)
174{
175 int ret;
176 u8 ctrl;
177
178 if (!card->cccr.disable_cd)
179 return 0;
180
181 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
182 if (ret)
183 return ret;
184
185 ctrl |= SDIO_BUS_CD_DISABLE;
186
187 return mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
188}
189
190/*
168 * Test if the card supports high-speed mode and, if so, switch to it. 191 * Test if the card supports high-speed mode and, if so, switch to it.
169 */ 192 */
170static int sdio_enable_hs(struct mmc_card *card) 193static int sdio_enable_hs(struct mmc_card *card)
@@ -195,6 +218,135 @@ static int sdio_enable_hs(struct mmc_card *card)
195} 218}
196 219
197/* 220/*
221 * Handle the detection and initialisation of a card.
222 *
223 * In the case of a resume, "oldcard" will contain the card
224 * we're trying to reinitialise.
225 */
226static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
227 struct mmc_card *oldcard)
228{
229 struct mmc_card *card;
230 int err;
231
232 BUG_ON(!host);
233 WARN_ON(!host->claimed);
234
235 /*
236 * Inform the card of the voltage
237 */
238 err = mmc_send_io_op_cond(host, host->ocr, &ocr);
239 if (err)
240 goto err;
241
242 /*
243 * For SPI, enable CRC as appropriate.
244 */
245 if (mmc_host_is_spi(host)) {
246 err = mmc_spi_set_crc(host, use_spi_crc);
247 if (err)
248 goto err;
249 }
250
251 /*
252 * Allocate card structure.
253 */
254 card = mmc_alloc_card(host, NULL);
255 if (IS_ERR(card)) {
256 err = PTR_ERR(card);
257 goto err;
258 }
259
260 card->type = MMC_TYPE_SDIO;
261
262 /*
263 * For native busses: set card RCA and quit open drain mode.
264 */
265 if (!mmc_host_is_spi(host)) {
266 err = mmc_send_relative_addr(host, &card->rca);
267 if (err)
268 goto remove;
269
270 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
271 }
272
273 /*
274 * Select card, as all following commands rely on that.
275 */
276 if (!mmc_host_is_spi(host)) {
277 err = mmc_select_card(card);
278 if (err)
279 goto remove;
280 }
281
282 /*
283 * Read the common registers.
284 */
285 err = sdio_read_cccr(card);
286 if (err)
287 goto remove;
288
289 /*
290 * Read the common CIS tuples.
291 */
292 err = sdio_read_common_cis(card);
293 if (err)
294 goto remove;
295
296 if (oldcard) {
297 int same = (card->cis.vendor == oldcard->cis.vendor &&
298 card->cis.device == oldcard->cis.device);
299 mmc_remove_card(card);
300 if (!same) {
301 err = -ENOENT;
302 goto err;
303 }
304 card = oldcard;
305 return 0;
306 }
307
308 /*
309 * Switch to high-speed (if supported).
310 */
311 err = sdio_enable_hs(card);
312 if (err)
313 goto remove;
314
315 /*
316 * Change to the card's maximum speed.
317 */
318 if (mmc_card_highspeed(card)) {
319 /*
320 * The SDIO specification doesn't mention how
321 * the CIS transfer speed register relates to
322 * high-speed, but it seems that 50 MHz is
323 * mandatory.
324 */
325 mmc_set_clock(host, 50000000);
326 } else {
327 mmc_set_clock(host, card->cis.max_dtr);
328 }
329
330 /*
331 * Switch to wider bus (if supported).
332 */
333 err = sdio_enable_wide(card);
334 if (err)
335 goto remove;
336
337 if (!oldcard)
338 host->card = card;
339 return 0;
340
341remove:
342 if (!oldcard)
343 mmc_remove_card(card);
344
345err:
346 return err;
347}
348
349/*
198 * Host is being removed. Free up the current card. 350 * Host is being removed. Free up the current card.
199 */ 351 */
200static void mmc_sdio_remove(struct mmc_host *host) 352static void mmc_sdio_remove(struct mmc_host *host)
@@ -243,10 +395,77 @@ static void mmc_sdio_detect(struct mmc_host *host)
243 } 395 }
244} 396}
245 397
398/*
399 * SDIO suspend. We need to suspend all functions separately.
400 * Therefore all registered functions must have drivers with suspend
401 * and resume methods. Failing that we simply remove the whole card.
402 */
403static int mmc_sdio_suspend(struct mmc_host *host)
404{
405 int i, err = 0;
406
407 for (i = 0; i < host->card->sdio_funcs; i++) {
408 struct sdio_func *func = host->card->sdio_func[i];
409 if (func && sdio_func_present(func) && func->dev.driver) {
410 const struct dev_pm_ops *pmops = func->dev.driver->pm;
411 if (!pmops || !pmops->suspend || !pmops->resume) {
412 /* force removal of entire card in that case */
413 err = -ENOSYS;
414 } else
415 err = pmops->suspend(&func->dev);
416 if (err)
417 break;
418 }
419 }
420 while (err && --i >= 0) {
421 struct sdio_func *func = host->card->sdio_func[i];
422 if (func && sdio_func_present(func) && func->dev.driver) {
423 const struct dev_pm_ops *pmops = func->dev.driver->pm;
424 pmops->resume(&func->dev);
425 }
426 }
427
428 return err;
429}
430
431static int mmc_sdio_resume(struct mmc_host *host)
432{
433 int i, err;
434
435 BUG_ON(!host);
436 BUG_ON(!host->card);
437
438 /* Basic card reinitialization. */
439 mmc_claim_host(host);
440 err = mmc_sdio_init_card(host, host->ocr, host->card);
441 mmc_release_host(host);
442
443 /*
444 * If the card looked to be the same as before suspending, then
445 * we proceed to resume all card functions. If one of them returns
446 * an error then we simply return that error to the core and the
447 * card will be redetected as new. It is the responsibility of
448 * the function driver to perform further tests with the extra
449 * knowledge it has of the card to confirm the card is indeed the
450 * same as before suspending (same MAC address for network cards,
451 * etc.) and return an error otherwise.
452 */
453 for (i = 0; !err && i < host->card->sdio_funcs; i++) {
454 struct sdio_func *func = host->card->sdio_func[i];
455 if (func && sdio_func_present(func) && func->dev.driver) {
456 const struct dev_pm_ops *pmops = func->dev.driver->pm;
457 err = pmops->resume(&func->dev);
458 }
459 }
460
461 return err;
462}
246 463
247static const struct mmc_bus_ops mmc_sdio_ops = { 464static const struct mmc_bus_ops mmc_sdio_ops = {
248 .remove = mmc_sdio_remove, 465 .remove = mmc_sdio_remove,
249 .detect = mmc_sdio_detect, 466 .detect = mmc_sdio_detect,
467 .suspend = mmc_sdio_suspend,
468 .resume = mmc_sdio_resume,
250}; 469};
251 470
252 471
@@ -275,13 +494,6 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
275 ocr &= ~0x7F; 494 ocr &= ~0x7F;
276 } 495 }
277 496
278 if (ocr & MMC_VDD_165_195) {
279 printk(KERN_WARNING "%s: SDIO card claims to support the "
280 "incompletely defined 'low voltage range'. This "
281 "will be ignored.\n", mmc_hostname(host));
282 ocr &= ~MMC_VDD_165_195;
283 }
284
285 host->ocr = mmc_select_voltage(host, ocr); 497 host->ocr = mmc_select_voltage(host, ocr);
286 498
287 /* 499 /*
@@ -293,101 +505,23 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
293 } 505 }
294 506
295 /* 507 /*
296 * Inform the card of the voltage 508 * Detect and init the card.
297 */ 509 */
298 err = mmc_send_io_op_cond(host, host->ocr, &ocr); 510 err = mmc_sdio_init_card(host, host->ocr, NULL);
299 if (err) 511 if (err)
300 goto err; 512 goto err;
301 513 card = host->card;
302 /*
303 * For SPI, enable CRC as appropriate.
304 */
305 if (mmc_host_is_spi(host)) {
306 err = mmc_spi_set_crc(host, use_spi_crc);
307 if (err)
308 goto err;
309 }
310 514
311 /* 515 /*
312 * The number of functions on the card is encoded inside 516 * The number of functions on the card is encoded inside
313 * the ocr. 517 * the ocr.
314 */ 518 */
315 funcs = (ocr & 0x70000000) >> 28; 519 card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28;
316
317 /*
318 * Allocate card structure.
319 */
320 card = mmc_alloc_card(host, NULL);
321 if (IS_ERR(card)) {
322 err = PTR_ERR(card);
323 goto err;
324 }
325
326 card->type = MMC_TYPE_SDIO;
327 card->sdio_funcs = funcs;
328
329 host->card = card;
330
331 /*
332 * For native busses: set card RCA and quit open drain mode.
333 */
334 if (!mmc_host_is_spi(host)) {
335 err = mmc_send_relative_addr(host, &card->rca);
336 if (err)
337 goto remove;
338
339 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
340 }
341
342 /*
343 * Select card, as all following commands rely on that.
344 */
345 if (!mmc_host_is_spi(host)) {
346 err = mmc_select_card(card);
347 if (err)
348 goto remove;
349 }
350 520
351 /* 521 /*
352 * Read the common registers. 522 * If needed, disconnect card detection pull-up resistor.
353 */ 523 */
354 err = sdio_read_cccr(card); 524 err = sdio_disable_cd(card);
355 if (err)
356 goto remove;
357
358 /*
359 * Read the common CIS tuples.
360 */
361 err = sdio_read_common_cis(card);
362 if (err)
363 goto remove;
364
365 /*
366 * Switch to high-speed (if supported).
367 */
368 err = sdio_enable_hs(card);
369 if (err)
370 goto remove;
371
372 /*
373 * Change to the card's maximum speed.
374 */
375 if (mmc_card_highspeed(card)) {
376 /*
377 * The SDIO specification doesn't mention how
378 * the CIS transfer speed register relates to
379 * high-speed, but it seems that 50 MHz is
380 * mandatory.
381 */
382 mmc_set_clock(host, 50000000);
383 } else {
384 mmc_set_clock(host, card->cis.max_dtr);
385 }
386
387 /*
388 * Switch to wider bus (if supported).
389 */
390 err = sdio_enable_wide(card);
391 if (err) 525 if (err)
392 goto remove; 526 goto remove;
393 527
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 46284b527397..d37464e296a5 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -20,9 +20,6 @@
20#include "sdio_cis.h" 20#include "sdio_cis.h"
21#include "sdio_bus.h" 21#include "sdio_bus.h"
22 22
23#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev)
24#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
25
26/* show configuration fields */ 23/* show configuration fields */
27#define sdio_config_attr(field, format_string) \ 24#define sdio_config_attr(field, format_string) \
28static ssize_t \ 25static ssize_t \
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 963f2937c5e3..6636354b48ce 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -40,7 +40,7 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
40 nr_strings++; 40 nr_strings++;
41 } 41 }
42 42
43 if (buf[i-1] != '\0') { 43 if (nr_strings < 4) {
44 printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n"); 44 printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n");
45 return 0; 45 return 0;
46 } 46 }
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index f61fc2d4cd0a..f9aa8a7deffa 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -624,7 +624,7 @@ void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr,
624 624
625 BUG_ON(!func); 625 BUG_ON(!func);
626 626
627 if (addr < 0xF0 || addr > 0xFF) { 627 if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) {
628 if (err_ret) 628 if (err_ret)
629 *err_ret = -EINVAL; 629 *err_ret = -EINVAL;
630 return; 630 return;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 891ef18bd77b..7cb057f3f883 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -132,11 +132,11 @@ config MMC_OMAP
132 132
133config MMC_OMAP_HS 133config MMC_OMAP_HS
134 tristate "TI OMAP High Speed Multimedia Card Interface support" 134 tristate "TI OMAP High Speed Multimedia Card Interface support"
135 depends on ARCH_OMAP2430 || ARCH_OMAP3 135 depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4
136 help 136 help
137 This selects the TI OMAP High Speed Multimedia card Interface. 137 This selects the TI OMAP High Speed Multimedia card Interface.
138 If you have an OMAP2430 or OMAP3 board with a Multimedia Card slot, 138 If you have an OMAP2430 or OMAP3 board or OMAP4 board with a
139 say Y or M here. 139 Multimedia Card slot, say Y or M here.
140 140
141 If unsure, say N. 141 If unsure, say N.
142 142
@@ -160,6 +160,12 @@ config MMC_AU1X
160 160
161 If unsure, say N. 161 If unsure, say N.
162 162
163choice
164 prompt "Atmel SD/MMC Driver"
165 default MMC_ATMELMCI if AVR32
166 help
167 Choose which driver to use for the Atmel MCI Silicon
168
163config MMC_AT91 169config MMC_AT91
164 tristate "AT91 SD/MMC Card Interface support" 170 tristate "AT91 SD/MMC Card Interface support"
165 depends on ARCH_AT91 171 depends on ARCH_AT91
@@ -170,17 +176,19 @@ config MMC_AT91
170 176
171config MMC_ATMELMCI 177config MMC_ATMELMCI
172 tristate "Atmel Multimedia Card Interface support" 178 tristate "Atmel Multimedia Card Interface support"
173 depends on AVR32 179 depends on AVR32 || ARCH_AT91
174 help 180 help
175 This selects the Atmel Multimedia Card Interface driver. If 181 This selects the Atmel Multimedia Card Interface driver. If
176 you have an AT32 (AVR32) platform with a Multimedia Card 182 you have an AT32 (AVR32) or AT91 platform with a Multimedia
177 slot, say Y or M here. 183 Card slot, say Y or M here.
178 184
179 If unsure, say N. 185 If unsure, say N.
180 186
187endchoice
188
181config MMC_ATMELMCI_DMA 189config MMC_ATMELMCI_DMA
182 bool "Atmel MCI DMA support (EXPERIMENTAL)" 190 bool "Atmel MCI DMA support (EXPERIMENTAL)"
183 depends on MMC_ATMELMCI && DMA_ENGINE && EXPERIMENTAL 191 depends on MMC_ATMELMCI && AVR32 && DMA_ENGINE && EXPERIMENTAL
184 help 192 help
185 Say Y here to have the Atmel MCI driver use a DMA engine to 193 Say Y here to have the Atmel MCI driver use a DMA engine to
186 do data transfers and thus increase the throughput and 194 do data transfers and thus increase the throughput and
@@ -199,6 +207,13 @@ config MMC_IMX
199 207
200 If unsure, say N. 208 If unsure, say N.
201 209
210config MMC_MSM7X00A
211 tristate "Qualcomm MSM 7X00A SDCC Controller Support"
212 depends on MMC && ARCH_MSM
213 help
214 This provides support for the SD/MMC cell found in the
215 MSM 7X00A controllers from Qualcomm.
216
202config MMC_MXC 217config MMC_MXC
203 tristate "Freescale i.MX2/3 Multimedia Card Interface support" 218 tristate "Freescale i.MX2/3 Multimedia Card Interface support"
204 depends on ARCH_MXC 219 depends on ARCH_MXC
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index cf153f628457..abcb0400e06d 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o
23obj-$(CONFIG_MMC_AT91) += at91_mci.o 23obj-$(CONFIG_MMC_AT91) += at91_mci.o
24obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o 24obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
25obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o 25obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
26obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o
26obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o 27obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
27obj-$(CONFIG_MMC_SPI) += mmc_spi.o 28obj-$(CONFIG_MMC_SPI) += mmc_spi.o
28ifeq ($(CONFIG_OF),y) 29ifeq ($(CONFIG_OF),y)
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 7b603e4b41db..fc25586b7ee1 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -30,6 +30,7 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/unaligned.h> 31#include <asm/unaligned.h>
32 32
33#include <mach/cpu.h>
33#include <mach/board.h> 34#include <mach/board.h>
34 35
35#include "atmel-mci-regs.h" 36#include "atmel-mci-regs.h"
@@ -210,6 +211,18 @@ struct atmel_mci_slot {
210 set_bit(event, &host->pending_events) 211 set_bit(event, &host->pending_events)
211 212
212/* 213/*
214 * Enable or disable features/registers based on
215 * whether the processor supports them
216 */
217static bool mci_has_rwproof(void)
218{
219 if (cpu_is_at91sam9261() || cpu_is_at91rm9200())
220 return false;
221 else
222 return true;
223}
224
225/*
213 * The debugfs stuff below is mostly optimized away when 226 * The debugfs stuff below is mostly optimized away when
214 * CONFIG_DEBUG_FS is not set. 227 * CONFIG_DEBUG_FS is not set.
215 */ 228 */
@@ -276,8 +289,13 @@ static void atmci_show_status_reg(struct seq_file *s,
276 [3] = "BLKE", 289 [3] = "BLKE",
277 [4] = "DTIP", 290 [4] = "DTIP",
278 [5] = "NOTBUSY", 291 [5] = "NOTBUSY",
292 [6] = "ENDRX",
293 [7] = "ENDTX",
279 [8] = "SDIOIRQA", 294 [8] = "SDIOIRQA",
280 [9] = "SDIOIRQB", 295 [9] = "SDIOIRQB",
296 [12] = "SDIOWAIT",
297 [14] = "RXBUFF",
298 [15] = "TXBUFE",
281 [16] = "RINDE", 299 [16] = "RINDE",
282 [17] = "RDIRE", 300 [17] = "RDIRE",
283 [18] = "RCRCE", 301 [18] = "RCRCE",
@@ -285,6 +303,11 @@ static void atmci_show_status_reg(struct seq_file *s,
285 [20] = "RTOE", 303 [20] = "RTOE",
286 [21] = "DCRCE", 304 [21] = "DCRCE",
287 [22] = "DTOE", 305 [22] = "DTOE",
306 [23] = "CSTOE",
307 [24] = "BLKOVRE",
308 [25] = "DMADONE",
309 [26] = "FIFOEMPTY",
310 [27] = "XFRDONE",
288 [30] = "OVRE", 311 [30] = "OVRE",
289 [31] = "UNRE", 312 [31] = "UNRE",
290 }; 313 };
@@ -576,6 +599,7 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
576 struct scatterlist *sg; 599 struct scatterlist *sg;
577 unsigned int i; 600 unsigned int i;
578 enum dma_data_direction direction; 601 enum dma_data_direction direction;
602 unsigned int sglen;
579 603
580 /* 604 /*
581 * We don't do DMA on "complex" transfers, i.e. with 605 * We don't do DMA on "complex" transfers, i.e. with
@@ -605,11 +629,14 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
605 else 629 else
606 direction = DMA_TO_DEVICE; 630 direction = DMA_TO_DEVICE;
607 631
632 sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction);
633 if (sglen != data->sg_len)
634 goto unmap_exit;
608 desc = chan->device->device_prep_slave_sg(chan, 635 desc = chan->device->device_prep_slave_sg(chan,
609 data->sg, data->sg_len, direction, 636 data->sg, data->sg_len, direction,
610 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 637 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
611 if (!desc) 638 if (!desc)
612 return -ENOMEM; 639 goto unmap_exit;
613 640
614 host->dma.data_desc = desc; 641 host->dma.data_desc = desc;
615 desc->callback = atmci_dma_complete; 642 desc->callback = atmci_dma_complete;
@@ -620,6 +647,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
620 chan->device->device_issue_pending(chan); 647 chan->device->device_issue_pending(chan);
621 648
622 return 0; 649 return 0;
650unmap_exit:
651 dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction);
652 return -ENOMEM;
623} 653}
624 654
625#else /* CONFIG_MMC_ATMELMCI_DMA */ 655#else /* CONFIG_MMC_ATMELMCI_DMA */
@@ -849,13 +879,15 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
849 clkdiv = 255; 879 clkdiv = 255;
850 } 880 }
851 881
882 host->mode_reg = MCI_MR_CLKDIV(clkdiv);
883
852 /* 884 /*
853 * WRPROOF and RDPROOF prevent overruns/underruns by 885 * WRPROOF and RDPROOF prevent overruns/underruns by
854 * stopping the clock when the FIFO is full/empty. 886 * stopping the clock when the FIFO is full/empty.
855 * This state is not expected to last for long. 887 * This state is not expected to last for long.
856 */ 888 */
857 host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF 889 if (mci_has_rwproof())
858 | MCI_MR_RDPROOF; 890 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
859 891
860 if (list_empty(&host->queue)) 892 if (list_empty(&host->queue))
861 mci_writel(host, MR, host->mode_reg); 893 mci_writel(host, MR, host->mode_reg);
@@ -1648,8 +1680,10 @@ static int __init atmci_probe(struct platform_device *pdev)
1648 nr_slots++; 1680 nr_slots++;
1649 } 1681 }
1650 1682
1651 if (!nr_slots) 1683 if (!nr_slots) {
1684 dev_err(&pdev->dev, "init failed: no slot defined\n");
1652 goto err_init_slot; 1685 goto err_init_slot;
1686 }
1653 1687
1654 dev_info(&pdev->dev, 1688 dev_info(&pdev->dev,
1655 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", 1689 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index a461017ce5ce..d55fe4fb7935 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1562,3 +1562,4 @@ MODULE_AUTHOR("Mike Lavender, David Brownell, "
1562 "Hans-Peter Nilsson, Jan Nikitenko"); 1562 "Hans-Peter Nilsson, Jan Nikitenko");
1563MODULE_DESCRIPTION("SPI SD/MMC host driver"); 1563MODULE_DESCRIPTION("SPI SD/MMC host driver");
1564MODULE_LICENSE("GPL"); 1564MODULE_LICENSE("GPL");
1565MODULE_ALIAS("spi:mmc_spi");
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
new file mode 100644
index 000000000000..dba4600bcdb4
--- /dev/null
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -0,0 +1,1287 @@
1/*
2 * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
3 *
4 * Copyright (C) 2007 Google Inc,
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Based on mmci.c
12 *
13 * Author: San Mehat (san@android.com)
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/init.h>
20#include <linux/ioport.h>
21#include <linux/device.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/err.h>
25#include <linux/highmem.h>
26#include <linux/log2.h>
27#include <linux/mmc/host.h>
28#include <linux/mmc/card.h>
29#include <linux/clk.h>
30#include <linux/scatterlist.h>
31#include <linux/platform_device.h>
32#include <linux/dma-mapping.h>
33#include <linux/debugfs.h>
34#include <linux/io.h>
35#include <linux/memory.h>
36
37#include <asm/cacheflush.h>
38#include <asm/div64.h>
39#include <asm/sizes.h>
40
41#include <asm/mach/mmc.h>
42#include <mach/msm_iomap.h>
43#include <mach/dma.h>
44#include <mach/htc_pwrsink.h>
45
46#include "msm_sdcc.h"
47
48#define DRIVER_NAME "msm-sdcc"
49
50static unsigned int msmsdcc_fmin = 144000;
51static unsigned int msmsdcc_fmax = 50000000;
52static unsigned int msmsdcc_4bit = 1;
53static unsigned int msmsdcc_pwrsave = 1;
54static unsigned int msmsdcc_piopoll = 1;
55static unsigned int msmsdcc_sdioirq;
56
57#define PIO_SPINMAX 30
58#define CMD_SPINMAX 20
59
60static void
61msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
62 u32 c);
63
64static void
65msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
66{
67 writel(0, host->base + MMCICOMMAND);
68
69 BUG_ON(host->curr.data);
70
71 host->curr.mrq = NULL;
72 host->curr.cmd = NULL;
73
74 if (mrq->data)
75 mrq->data->bytes_xfered = host->curr.data_xfered;
76 if (mrq->cmd->error == -ETIMEDOUT)
77 mdelay(5);
78
79 /*
80 * Need to drop the host lock here; mmc_request_done may call
81 * back into the driver...
82 */
83 spin_unlock(&host->lock);
84 mmc_request_done(host->mmc, mrq);
85 spin_lock(&host->lock);
86}
87
88static void
89msmsdcc_stop_data(struct msmsdcc_host *host)
90{
91 writel(0, host->base + MMCIDATACTRL);
92 host->curr.data = NULL;
93 host->curr.got_dataend = host->curr.got_datablkend = 0;
94}
95
96uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
97{
98 switch (host->pdev_id) {
99 case 1:
100 return MSM_SDC1_PHYS + MMCIFIFO;
101 case 2:
102 return MSM_SDC2_PHYS + MMCIFIFO;
103 case 3:
104 return MSM_SDC3_PHYS + MMCIFIFO;
105 case 4:
106 return MSM_SDC4_PHYS + MMCIFIFO;
107 }
108 BUG();
109 return 0;
110}
111
112static void
113msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
114 unsigned int result,
115 struct msm_dmov_errdata *err)
116{
117 struct msmsdcc_dma_data *dma_data =
118 container_of(cmd, struct msmsdcc_dma_data, hdr);
119 struct msmsdcc_host *host = dma_data->host;
120 unsigned long flags;
121 struct mmc_request *mrq;
122
123 spin_lock_irqsave(&host->lock, flags);
124 mrq = host->curr.mrq;
125 BUG_ON(!mrq);
126
127 if (!(result & DMOV_RSLT_VALID)) {
128 pr_err("msmsdcc: Invalid DataMover result\n");
129 goto out;
130 }
131
132 if (result & DMOV_RSLT_DONE) {
133 host->curr.data_xfered = host->curr.xfer_size;
134 } else {
135 /* Error or flush */
136 if (result & DMOV_RSLT_ERROR)
137 pr_err("%s: DMA error (0x%.8x)\n",
138 mmc_hostname(host->mmc), result);
139 if (result & DMOV_RSLT_FLUSH)
140 pr_err("%s: DMA channel flushed (0x%.8x)\n",
141 mmc_hostname(host->mmc), result);
142 if (err)
143 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
144 err->flush[0], err->flush[1], err->flush[2],
145 err->flush[3], err->flush[4], err->flush[5]);
146 if (!mrq->data->error)
147 mrq->data->error = -EIO;
148 }
149 host->dma.busy = 0;
150 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
151 host->dma.dir);
152
153 if (host->curr.user_pages) {
154 struct scatterlist *sg = host->dma.sg;
155 int i;
156
157 for (i = 0; i < host->dma.num_ents; i++)
158 flush_dcache_page(sg_page(sg++));
159 }
160
161 host->dma.sg = NULL;
162
163 if ((host->curr.got_dataend && host->curr.got_datablkend)
164 || mrq->data->error) {
165
166 /*
167 * If we've already gotten our DATAEND / DATABLKEND
168 * for this request, then complete it through here.
169 */
170 msmsdcc_stop_data(host);
171
172 if (!mrq->data->error)
173 host->curr.data_xfered = host->curr.xfer_size;
174 if (!mrq->data->stop || mrq->cmd->error) {
175 writel(0, host->base + MMCICOMMAND);
176 host->curr.mrq = NULL;
177 host->curr.cmd = NULL;
178 mrq->data->bytes_xfered = host->curr.data_xfered;
179
180 spin_unlock_irqrestore(&host->lock, flags);
181 mmc_request_done(host->mmc, mrq);
182 return;
183 } else
184 msmsdcc_start_command(host, mrq->data->stop, 0);
185 }
186
187out:
188 spin_unlock_irqrestore(&host->lock, flags);
189 return;
190}
191
192static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
193{
194 if (host->dma.channel == -1)
195 return -ENOENT;
196
197 if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
198 return -EINVAL;
199 if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
200 return -EINVAL;
201 return 0;
202}
203
204static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
205{
206 struct msmsdcc_nc_dmadata *nc;
207 dmov_box *box;
208 uint32_t rows;
209 uint32_t crci;
210 unsigned int n;
211 int i, rc;
212 struct scatterlist *sg = data->sg;
213
214 rc = validate_dma(host, data);
215 if (rc)
216 return rc;
217
218 host->dma.sg = data->sg;
219 host->dma.num_ents = data->sg_len;
220
221 nc = host->dma.nc;
222
223 switch (host->pdev_id) {
224 case 1:
225 crci = MSMSDCC_CRCI_SDC1;
226 break;
227 case 2:
228 crci = MSMSDCC_CRCI_SDC2;
229 break;
230 case 3:
231 crci = MSMSDCC_CRCI_SDC3;
232 break;
233 case 4:
234 crci = MSMSDCC_CRCI_SDC4;
235 break;
236 default:
237 host->dma.sg = NULL;
238 host->dma.num_ents = 0;
239 return -ENOENT;
240 }
241
242 if (data->flags & MMC_DATA_READ)
243 host->dma.dir = DMA_FROM_DEVICE;
244 else
245 host->dma.dir = DMA_TO_DEVICE;
246
247 host->curr.user_pages = 0;
248
249 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
250 host->dma.num_ents, host->dma.dir);
251
252 if (n != host->dma.num_ents) {
253 pr_err("%s: Unable to map in all sg elements\n",
254 mmc_hostname(host->mmc));
255 host->dma.sg = NULL;
256 host->dma.num_ents = 0;
257 return -ENOMEM;
258 }
259
260 box = &nc->cmd[0];
261 for (i = 0; i < host->dma.num_ents; i++) {
262 box->cmd = CMD_MODE_BOX;
263
264 if (i == (host->dma.num_ents - 1))
265 box->cmd |= CMD_LC;
266 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
267 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
268 (sg_dma_len(sg) / MCI_FIFOSIZE) ;
269
270 if (data->flags & MMC_DATA_READ) {
271 box->src_row_addr = msmsdcc_fifo_addr(host);
272 box->dst_row_addr = sg_dma_address(sg);
273
274 box->src_dst_len = (MCI_FIFOSIZE << 16) |
275 (MCI_FIFOSIZE);
276 box->row_offset = MCI_FIFOSIZE;
277
278 box->num_rows = rows * ((1 << 16) + 1);
279 box->cmd |= CMD_SRC_CRCI(crci);
280 } else {
281 box->src_row_addr = sg_dma_address(sg);
282 box->dst_row_addr = msmsdcc_fifo_addr(host);
283
284 box->src_dst_len = (MCI_FIFOSIZE << 16) |
285 (MCI_FIFOSIZE);
286 box->row_offset = (MCI_FIFOSIZE << 16);
287
288 box->num_rows = rows * ((1 << 16) + 1);
289 box->cmd |= CMD_DST_CRCI(crci);
290 }
291 box++;
292 sg++;
293 }
294
295 /* location of command block must be 64 bit aligned */
296 BUG_ON(host->dma.cmd_busaddr & 0x07);
297
298 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
299 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
300 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
301 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
302
303 return 0;
304}
305
306static void
307msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
308{
309 unsigned int datactrl, timeout;
310 unsigned long long clks;
311 void __iomem *base = host->base;
312 unsigned int pio_irqmask = 0;
313
314 host->curr.data = data;
315 host->curr.xfer_size = data->blksz * data->blocks;
316 host->curr.xfer_remain = host->curr.xfer_size;
317 host->curr.data_xfered = 0;
318 host->curr.got_dataend = 0;
319 host->curr.got_datablkend = 0;
320
321 memset(&host->pio, 0, sizeof(host->pio));
322
323 clks = (unsigned long long)data->timeout_ns * host->clk_rate;
324 do_div(clks, NSEC_PER_SEC);
325 timeout = data->timeout_clks + (unsigned int)clks;
326 writel(timeout, base + MMCIDATATIMER);
327
328 writel(host->curr.xfer_size, base + MMCIDATALENGTH);
329
330 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
331
332 if (!msmsdcc_config_dma(host, data))
333 datactrl |= MCI_DPSM_DMAENABLE;
334 else {
335 host->pio.sg = data->sg;
336 host->pio.sg_len = data->sg_len;
337 host->pio.sg_off = 0;
338
339 if (data->flags & MMC_DATA_READ) {
340 pio_irqmask = MCI_RXFIFOHALFFULLMASK;
341 if (host->curr.xfer_remain < MCI_FIFOSIZE)
342 pio_irqmask |= MCI_RXDATAAVLBLMASK;
343 } else
344 pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
345 }
346
347 if (data->flags & MMC_DATA_READ)
348 datactrl |= MCI_DPSM_DIRECTION;
349
350 writel(pio_irqmask, base + MMCIMASK1);
351 writel(datactrl, base + MMCIDATACTRL);
352
353 if (datactrl & MCI_DPSM_DMAENABLE) {
354 host->dma.busy = 1;
355 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
356 }
357}
358
359static void
360msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
361{
362 void __iomem *base = host->base;
363
364 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
365 writel(0, base + MMCICOMMAND);
366 udelay(2 + ((5 * 1000000) / host->clk_rate));
367 }
368
369 c |= cmd->opcode | MCI_CPSM_ENABLE;
370
371 if (cmd->flags & MMC_RSP_PRESENT) {
372 if (cmd->flags & MMC_RSP_136)
373 c |= MCI_CPSM_LONGRSP;
374 c |= MCI_CPSM_RESPONSE;
375 }
376
377 if (cmd->opcode == 17 || cmd->opcode == 18 ||
378 cmd->opcode == 24 || cmd->opcode == 25 ||
379 cmd->opcode == 53)
380 c |= MCI_CSPM_DATCMD;
381
382 if (cmd == cmd->mrq->stop)
383 c |= MCI_CSPM_MCIABORT;
384
385 host->curr.cmd = cmd;
386
387 host->stats.cmds++;
388
389 writel(cmd->arg, base + MMCIARGUMENT);
390 writel(c, base + MMCICOMMAND);
391}
392
393static void
394msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
395 unsigned int status)
396{
397 if (status & MCI_DATACRCFAIL) {
398 pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
399 pr_err("%s: opcode 0x%.8x\n", __func__,
400 data->mrq->cmd->opcode);
401 pr_err("%s: blksz %d, blocks %d\n", __func__,
402 data->blksz, data->blocks);
403 data->error = -EILSEQ;
404 } else if (status & MCI_DATATIMEOUT) {
405 pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
406 data->error = -ETIMEDOUT;
407 } else if (status & MCI_RXOVERRUN) {
408 pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
409 data->error = -EIO;
410 } else if (status & MCI_TXUNDERRUN) {
411 pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
412 data->error = -EIO;
413 } else {
414 pr_err("%s: Unknown error (0x%.8x)\n",
415 mmc_hostname(host->mmc), status);
416 data->error = -EIO;
417 }
418}
419
420
421static int
422msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
423{
424 void __iomem *base = host->base;
425 uint32_t *ptr = (uint32_t *) buffer;
426 int count = 0;
427
428 while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
429
430 *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
431 ptr++;
432 count += sizeof(uint32_t);
433
434 remain -= sizeof(uint32_t);
435 if (remain == 0)
436 break;
437 }
438 return count;
439}
440
441static int
442msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
443 unsigned int remain, u32 status)
444{
445 void __iomem *base = host->base;
446 char *ptr = buffer;
447
448 do {
449 unsigned int count, maxcnt;
450
451 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
452 MCI_FIFOHALFSIZE;
453 count = min(remain, maxcnt);
454
455 writesl(base + MMCIFIFO, ptr, count >> 2);
456 ptr += count;
457 remain -= count;
458
459 if (remain == 0)
460 break;
461
462 status = readl(base + MMCISTATUS);
463 } while (status & MCI_TXFIFOHALFEMPTY);
464
465 return ptr - buffer;
466}
467
468static int
469msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
470{
471 while (maxspin) {
472 if ((readl(host->base + MMCISTATUS) & mask))
473 return 0;
474 udelay(1);
475 --maxspin;
476 }
477 return -ETIMEDOUT;
478}
479
480static int
481msmsdcc_pio_irq(int irq, void *dev_id)
482{
483 struct msmsdcc_host *host = dev_id;
484 void __iomem *base = host->base;
485 uint32_t status;
486
487 status = readl(base + MMCISTATUS);
488
489 do {
490 unsigned long flags;
491 unsigned int remain, len;
492 char *buffer;
493
494 if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
495 if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
496 break;
497
498 if (msmsdcc_spin_on_status(host,
499 (MCI_TXFIFOHALFEMPTY |
500 MCI_RXDATAAVLBL),
501 PIO_SPINMAX)) {
502 break;
503 }
504 }
505
506 /* Map the current scatter buffer */
507 local_irq_save(flags);
508 buffer = kmap_atomic(sg_page(host->pio.sg),
509 KM_BIO_SRC_IRQ) + host->pio.sg->offset;
510 buffer += host->pio.sg_off;
511 remain = host->pio.sg->length - host->pio.sg_off;
512 len = 0;
513 if (status & MCI_RXACTIVE)
514 len = msmsdcc_pio_read(host, buffer, remain);
515 if (status & MCI_TXACTIVE)
516 len = msmsdcc_pio_write(host, buffer, remain, status);
517
518 /* Unmap the buffer */
519 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
520 local_irq_restore(flags);
521
522 host->pio.sg_off += len;
523 host->curr.xfer_remain -= len;
524 host->curr.data_xfered += len;
525 remain -= len;
526
527 if (remain == 0) {
528 /* This sg page is full - do some housekeeping */
529 if (status & MCI_RXACTIVE && host->curr.user_pages)
530 flush_dcache_page(sg_page(host->pio.sg));
531
532 if (!--host->pio.sg_len) {
533 memset(&host->pio, 0, sizeof(host->pio));
534 break;
535 }
536
537 /* Advance to next sg */
538 host->pio.sg++;
539 host->pio.sg_off = 0;
540 }
541
542 status = readl(base + MMCISTATUS);
543 } while (1);
544
545 if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
546 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
547
548 if (!host->curr.xfer_remain)
549 writel(0, base + MMCIMASK1);
550
551 return IRQ_HANDLED;
552}
553
554static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
555{
556 struct mmc_command *cmd = host->curr.cmd;
557 void __iomem *base = host->base;
558
559 host->curr.cmd = NULL;
560 cmd->resp[0] = readl(base + MMCIRESPONSE0);
561 cmd->resp[1] = readl(base + MMCIRESPONSE1);
562 cmd->resp[2] = readl(base + MMCIRESPONSE2);
563 cmd->resp[3] = readl(base + MMCIRESPONSE3);
564
565 del_timer(&host->command_timer);
566 if (status & MCI_CMDTIMEOUT) {
567 cmd->error = -ETIMEDOUT;
568 } else if (status & MCI_CMDCRCFAIL &&
569 cmd->flags & MMC_RSP_CRC) {
570 pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
571 cmd->error = -EILSEQ;
572 }
573
574 if (!cmd->data || cmd->error) {
575 if (host->curr.data && host->dma.sg)
576 msm_dmov_stop_cmd(host->dma.channel,
577 &host->dma.hdr, 0);
578 else if (host->curr.data) { /* Non DMA */
579 msmsdcc_stop_data(host);
580 msmsdcc_request_end(host, cmd->mrq);
581 } else /* host->data == NULL */
582 msmsdcc_request_end(host, cmd->mrq);
583 } else if (!(cmd->data->flags & MMC_DATA_READ))
584 msmsdcc_start_data(host, cmd->data);
585}
586
587static void
588msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
589 void __iomem *base)
590{
591 struct mmc_data *data = host->curr.data;
592
593 if (!data)
594 return;
595
596 /* Check for data errors */
597 if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
598 MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
599 msmsdcc_data_err(host, data, status);
600 host->curr.data_xfered = 0;
601 if (host->dma.sg)
602 msm_dmov_stop_cmd(host->dma.channel,
603 &host->dma.hdr, 0);
604 else {
605 msmsdcc_stop_data(host);
606 if (!data->stop)
607 msmsdcc_request_end(host, data->mrq);
608 else
609 msmsdcc_start_command(host, data->stop, 0);
610 }
611 }
612
613 /* Check for data done */
614 if (!host->curr.got_dataend && (status & MCI_DATAEND))
615 host->curr.got_dataend = 1;
616
617 if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
618 host->curr.got_datablkend = 1;
619
620 /*
621 * If DMA is still in progress, we complete via the completion handler
622 */
623 if (host->curr.got_dataend && host->curr.got_datablkend &&
624 !host->dma.busy) {
625 /*
626 * There appears to be an issue in the controller where
627 * if you request a small block transfer (< fifo size),
628 * you may get your DATAEND/DATABLKEND irq without the
629 * PIO data irq.
630 *
631 * Check to see if there is still data to be read,
632 * and simulate a PIO irq.
633 */
634 if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
635 msmsdcc_pio_irq(1, host);
636
637 msmsdcc_stop_data(host);
638 if (!data->error)
639 host->curr.data_xfered = host->curr.xfer_size;
640
641 if (!data->stop)
642 msmsdcc_request_end(host, data->mrq);
643 else
644 msmsdcc_start_command(host, data->stop, 0);
645 }
646}
647
648static irqreturn_t
649msmsdcc_irq(int irq, void *dev_id)
650{
651 struct msmsdcc_host *host = dev_id;
652 void __iomem *base = host->base;
653 u32 status;
654 int ret = 0;
655 int cardint = 0;
656
657 spin_lock(&host->lock);
658
659 do {
660 status = readl(base + MMCISTATUS);
661
662 status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
663 writel(status, base + MMCICLEAR);
664
665 msmsdcc_handle_irq_data(host, status, base);
666
667 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
668 MCI_CMDTIMEOUT) && host->curr.cmd) {
669 msmsdcc_do_cmdirq(host, status);
670 }
671
672 if (status & MCI_SDIOINTOPER) {
673 cardint = 1;
674 status &= ~MCI_SDIOINTOPER;
675 }
676 ret = 1;
677 } while (status);
678
679 spin_unlock(&host->lock);
680
681 /*
682 * We have to delay handling the card interrupt as it calls
683 * back into the driver.
684 */
685 if (cardint)
686 mmc_signal_sdio_irq(host->mmc);
687
688 return IRQ_RETVAL(ret);
689}
690
691static void
692msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
693{
694 struct msmsdcc_host *host = mmc_priv(mmc);
695 unsigned long flags;
696
697 WARN_ON(host->curr.mrq != NULL);
698 WARN_ON(host->pwr == 0);
699
700 spin_lock_irqsave(&host->lock, flags);
701
702 host->stats.reqs++;
703
704 if (host->eject) {
705 if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
706 mrq->cmd->error = 0;
707 mrq->data->bytes_xfered = mrq->data->blksz *
708 mrq->data->blocks;
709 } else
710 mrq->cmd->error = -ENOMEDIUM;
711
712 spin_unlock_irqrestore(&host->lock, flags);
713 mmc_request_done(mmc, mrq);
714 return;
715 }
716
717 host->curr.mrq = mrq;
718
719 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
720 msmsdcc_start_data(host, mrq->data);
721
722 msmsdcc_start_command(host, mrq->cmd, 0);
723
724 if (host->cmdpoll && !msmsdcc_spin_on_status(host,
725 MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
726 CMD_SPINMAX)) {
727 uint32_t status = readl(host->base + MMCISTATUS);
728 msmsdcc_do_cmdirq(host, status);
729 writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
730 host->base + MMCICLEAR);
731 host->stats.cmdpoll_hits++;
732 } else {
733 host->stats.cmdpoll_misses++;
734 mod_timer(&host->command_timer, jiffies + HZ);
735 }
736 spin_unlock_irqrestore(&host->lock, flags);
737}
738
739static void
740msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
741{
742 struct msmsdcc_host *host = mmc_priv(mmc);
743 u32 clk = 0, pwr = 0;
744 int rc;
745
746 if (ios->clock) {
747
748 if (!host->clks_on) {
749 clk_enable(host->pclk);
750 clk_enable(host->clk);
751 host->clks_on = 1;
752 }
753 if (ios->clock != host->clk_rate) {
754 rc = clk_set_rate(host->clk, ios->clock);
755 if (rc < 0)
756 pr_err("%s: Error setting clock rate (%d)\n",
757 mmc_hostname(host->mmc), rc);
758 else
759 host->clk_rate = ios->clock;
760 }
761 clk |= MCI_CLK_ENABLE;
762 }
763
764 if (ios->bus_width == MMC_BUS_WIDTH_4)
765 clk |= (2 << 10); /* Set WIDEBUS */
766
767 if (ios->clock > 400000 && msmsdcc_pwrsave)
768 clk |= (1 << 9); /* PWRSAVE */
769
770 clk |= (1 << 12); /* FLOW_ENA */
771 clk |= (1 << 15); /* feedback clock */
772
773 if (host->plat->translate_vdd)
774 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
775
776 switch (ios->power_mode) {
777 case MMC_POWER_OFF:
778 htc_pwrsink_set(PWRSINK_SDCARD, 0);
779 break;
780 case MMC_POWER_UP:
781 pwr |= MCI_PWR_UP;
782 break;
783 case MMC_POWER_ON:
784 htc_pwrsink_set(PWRSINK_SDCARD, 100);
785 pwr |= MCI_PWR_ON;
786 break;
787 }
788
789 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
790 pwr |= MCI_OD;
791
792 writel(clk, host->base + MMCICLOCK);
793
794 if (host->pwr != pwr) {
795 host->pwr = pwr;
796 writel(pwr, host->base + MMCIPOWER);
797 }
798
799 if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
800 clk_disable(host->clk);
801 clk_disable(host->pclk);
802 host->clks_on = 0;
803 }
804}
805
806static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
807{
808 struct msmsdcc_host *host = mmc_priv(mmc);
809 unsigned long flags;
810 u32 status;
811
812 spin_lock_irqsave(&host->lock, flags);
813 if (msmsdcc_sdioirq == 1) {
814 status = readl(host->base + MMCIMASK0);
815 if (enable)
816 status |= MCI_SDIOINTOPERMASK;
817 else
818 status &= ~MCI_SDIOINTOPERMASK;
819 host->saved_irq0mask = status;
820 writel(status, host->base + MMCIMASK0);
821 }
822 spin_unlock_irqrestore(&host->lock, flags);
823}
824
825static const struct mmc_host_ops msmsdcc_ops = {
826 .request = msmsdcc_request,
827 .set_ios = msmsdcc_set_ios,
828 .enable_sdio_irq = msmsdcc_enable_sdio_irq,
829};
830
831static void
832msmsdcc_check_status(unsigned long data)
833{
834 struct msmsdcc_host *host = (struct msmsdcc_host *)data;
835 unsigned int status;
836
837 if (!host->plat->status) {
838 mmc_detect_change(host->mmc, 0);
839 goto out;
840 }
841
842 status = host->plat->status(mmc_dev(host->mmc));
843 host->eject = !status;
844 if (status ^ host->oldstat) {
845 pr_info("%s: Slot status change detected (%d -> %d)\n",
846 mmc_hostname(host->mmc), host->oldstat, status);
847 if (status)
848 mmc_detect_change(host->mmc, (5 * HZ) / 2);
849 else
850 mmc_detect_change(host->mmc, 0);
851 }
852
853 host->oldstat = status;
854
855out:
856 if (host->timer.function)
857 mod_timer(&host->timer, jiffies + HZ);
858}
859
860static irqreturn_t
861msmsdcc_platform_status_irq(int irq, void *dev_id)
862{
863 struct msmsdcc_host *host = dev_id;
864
865 printk(KERN_DEBUG "%s: %d\n", __func__, irq);
866 msmsdcc_check_status((unsigned long) host);
867 return IRQ_HANDLED;
868}
869
870static void
871msmsdcc_status_notify_cb(int card_present, void *dev_id)
872{
873 struct msmsdcc_host *host = dev_id;
874
875 printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
876 card_present);
877 msmsdcc_check_status((unsigned long) host);
878}
879
880/*
881 * called when a command expires.
882 * Dump some debugging, and then error
883 * out the transaction.
884 */
885static void
886msmsdcc_command_expired(unsigned long _data)
887{
888 struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
889 struct mmc_request *mrq;
890 unsigned long flags;
891
892 spin_lock_irqsave(&host->lock, flags);
893 mrq = host->curr.mrq;
894
895 if (!mrq) {
896 pr_info("%s: Command expiry misfire\n",
897 mmc_hostname(host->mmc));
898 spin_unlock_irqrestore(&host->lock, flags);
899 return;
900 }
901
902 pr_err("%s: Command timeout (%p %p %p %p)\n",
903 mmc_hostname(host->mmc), mrq, mrq->cmd,
904 mrq->data, host->dma.sg);
905
906 mrq->cmd->error = -ETIMEDOUT;
907 msmsdcc_stop_data(host);
908
909 writel(0, host->base + MMCICOMMAND);
910
911 host->curr.mrq = NULL;
912 host->curr.cmd = NULL;
913
914 spin_unlock_irqrestore(&host->lock, flags);
915 mmc_request_done(host->mmc, mrq);
916}
917
918static int
919msmsdcc_init_dma(struct msmsdcc_host *host)
920{
921 memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
922 host->dma.host = host;
923 host->dma.channel = -1;
924
925 if (!host->dmares)
926 return -ENODEV;
927
928 host->dma.nc = dma_alloc_coherent(NULL,
929 sizeof(struct msmsdcc_nc_dmadata),
930 &host->dma.nc_busaddr,
931 GFP_KERNEL);
932 if (host->dma.nc == NULL) {
933 pr_err("Unable to allocate DMA buffer\n");
934 return -ENOMEM;
935 }
936 memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
937 host->dma.cmd_busaddr = host->dma.nc_busaddr;
938 host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
939 offsetof(struct msmsdcc_nc_dmadata, cmdptr);
940 host->dma.channel = host->dmares->start;
941
942 return 0;
943}
944
945#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
946static void
947do_resume_work(struct work_struct *work)
948{
949 struct msmsdcc_host *host =
950 container_of(work, struct msmsdcc_host, resume_task);
951 struct mmc_host *mmc = host->mmc;
952
953 if (mmc) {
954 mmc_resume_host(mmc);
955 if (host->stat_irq)
956 enable_irq(host->stat_irq);
957 }
958}
959#endif
960
961static int
962msmsdcc_probe(struct platform_device *pdev)
963{
964 struct mmc_platform_data *plat = pdev->dev.platform_data;
965 struct msmsdcc_host *host;
966 struct mmc_host *mmc;
967 struct resource *cmd_irqres = NULL;
968 struct resource *pio_irqres = NULL;
969 struct resource *stat_irqres = NULL;
970 struct resource *memres = NULL;
971 struct resource *dmares = NULL;
972 int ret;
973
974 /* must have platform data */
975 if (!plat) {
976 pr_err("%s: Platform data not available\n", __func__);
977 ret = -EINVAL;
978 goto out;
979 }
980
981 if (pdev->id < 1 || pdev->id > 4)
982 return -EINVAL;
983
984 if (pdev->resource == NULL || pdev->num_resources < 2) {
985 pr_err("%s: Invalid resource\n", __func__);
986 return -ENXIO;
987 }
988
989 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
990 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
991 cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
992 "cmd_irq");
993 pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
994 "pio_irq");
995 stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
996 "status_irq");
997
998 if (!cmd_irqres || !pio_irqres || !memres) {
999 pr_err("%s: Invalid resource\n", __func__);
1000 return -ENXIO;
1001 }
1002
1003 /*
1004 * Setup our host structure
1005 */
1006
1007 mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
1008 if (!mmc) {
1009 ret = -ENOMEM;
1010 goto out;
1011 }
1012
1013 host = mmc_priv(mmc);
1014 host->pdev_id = pdev->id;
1015 host->plat = plat;
1016 host->mmc = mmc;
1017
1018 host->cmdpoll = 1;
1019
1020 host->base = ioremap(memres->start, PAGE_SIZE);
1021 if (!host->base) {
1022 ret = -ENOMEM;
1023 goto out;
1024 }
1025
1026 host->cmd_irqres = cmd_irqres;
1027 host->pio_irqres = pio_irqres;
1028 host->memres = memres;
1029 host->dmares = dmares;
1030 spin_lock_init(&host->lock);
1031
1032 /*
1033 * Setup DMA
1034 */
1035 msmsdcc_init_dma(host);
1036
1037 /*
1038 * Setup main peripheral bus clock
1039 */
1040 host->pclk = clk_get(&pdev->dev, "sdc_pclk");
1041 if (IS_ERR(host->pclk)) {
1042 ret = PTR_ERR(host->pclk);
1043 goto host_free;
1044 }
1045
1046 ret = clk_enable(host->pclk);
1047 if (ret)
1048 goto pclk_put;
1049
1050 host->pclk_rate = clk_get_rate(host->pclk);
1051
1052 /*
1053 * Setup SDC MMC clock
1054 */
1055 host->clk = clk_get(&pdev->dev, "sdc_clk");
1056 if (IS_ERR(host->clk)) {
1057 ret = PTR_ERR(host->clk);
1058 goto pclk_disable;
1059 }
1060
1061 ret = clk_enable(host->clk);
1062 if (ret)
1063 goto clk_put;
1064
1065 ret = clk_set_rate(host->clk, msmsdcc_fmin);
1066 if (ret) {
1067 pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
1068 goto clk_disable;
1069 }
1070
1071 host->clk_rate = clk_get_rate(host->clk);
1072
1073 host->clks_on = 1;
1074
1075 /*
1076 * Setup MMC host structure
1077 */
1078 mmc->ops = &msmsdcc_ops;
1079 mmc->f_min = msmsdcc_fmin;
1080 mmc->f_max = msmsdcc_fmax;
1081 mmc->ocr_avail = plat->ocr_mask;
1082
1083 if (msmsdcc_4bit)
1084 mmc->caps |= MMC_CAP_4_BIT_DATA;
1085 if (msmsdcc_sdioirq)
1086 mmc->caps |= MMC_CAP_SDIO_IRQ;
1087 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
1088
1089 mmc->max_phys_segs = NR_SG;
1090 mmc->max_hw_segs = NR_SG;
1091 mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
1092 mmc->max_blk_count = 65536;
1093
1094 mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
1095 mmc->max_seg_size = mmc->max_req_size;
1096
1097 writel(0, host->base + MMCIMASK0);
1098 writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
1099
1100 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1101 host->saved_irq0mask = MCI_IRQENABLE;
1102
1103 /*
1104 * Setup card detect change
1105 */
1106
1107 memset(&host->timer, 0, sizeof(host->timer));
1108
1109 if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
1110 unsigned long irqflags = IRQF_SHARED |
1111 (stat_irqres->flags & IRQF_TRIGGER_MASK);
1112
1113 host->stat_irq = stat_irqres->start;
1114 ret = request_irq(host->stat_irq,
1115 msmsdcc_platform_status_irq,
1116 irqflags,
1117 DRIVER_NAME " (slot)",
1118 host);
1119 if (ret) {
1120 pr_err("%s: Unable to get slot IRQ %d (%d)\n",
1121 mmc_hostname(mmc), host->stat_irq, ret);
1122 goto clk_disable;
1123 }
1124 } else if (plat->register_status_notify) {
1125 plat->register_status_notify(msmsdcc_status_notify_cb, host);
1126 } else if (!plat->status)
1127 pr_err("%s: No card detect facilities available\n",
1128 mmc_hostname(mmc));
1129 else {
1130 init_timer(&host->timer);
1131 host->timer.data = (unsigned long)host;
1132 host->timer.function = msmsdcc_check_status;
1133 host->timer.expires = jiffies + HZ;
1134 add_timer(&host->timer);
1135 }
1136
1137 if (plat->status) {
1138 host->oldstat = host->plat->status(mmc_dev(host->mmc));
1139 host->eject = !host->oldstat;
1140 }
1141
1142 /*
1143 * Setup a command timer. We currently need this due to
1144 * some 'strange' timeout / error handling situations.
1145 */
1146 init_timer(&host->command_timer);
1147 host->command_timer.data = (unsigned long) host;
1148 host->command_timer.function = msmsdcc_command_expired;
1149
1150 ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
1151 DRIVER_NAME " (cmd)", host);
1152 if (ret)
1153 goto stat_irq_free;
1154
1155 ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
1156 DRIVER_NAME " (pio)", host);
1157 if (ret)
1158 goto cmd_irq_free;
1159
1160 mmc_set_drvdata(pdev, mmc);
1161 mmc_add_host(mmc);
1162
1163 pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
1164 mmc_hostname(mmc), (unsigned long long)memres->start,
1165 (unsigned int) cmd_irqres->start,
1166 (unsigned int) host->stat_irq, host->dma.channel);
1167 pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
1168 (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
1169 pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
1170 mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
1171 pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
1172 pr_info("%s: Power save feature enable = %d\n",
1173 mmc_hostname(mmc), msmsdcc_pwrsave);
1174
1175 if (host->dma.channel != -1) {
1176 pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
1177 mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
1178 pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
1179 mmc_hostname(mmc), host->dma.cmd_busaddr,
1180 host->dma.cmdptr_busaddr);
1181 } else
1182 pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
1183 if (host->timer.function)
1184 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
1185
1186 return 0;
1187 cmd_irq_free:
1188 free_irq(cmd_irqres->start, host);
1189 stat_irq_free:
1190 if (host->stat_irq)
1191 free_irq(host->stat_irq, host);
1192 clk_disable:
1193 clk_disable(host->clk);
1194 clk_put:
1195 clk_put(host->clk);
1196 pclk_disable:
1197 clk_disable(host->pclk);
1198 pclk_put:
1199 clk_put(host->pclk);
1200 host_free:
1201 mmc_free_host(mmc);
1202 out:
1203 return ret;
1204}
1205
1206static int
1207msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1208{
1209 struct mmc_host *mmc = mmc_get_drvdata(dev);
1210 int rc = 0;
1211
1212 if (mmc) {
1213 struct msmsdcc_host *host = mmc_priv(mmc);
1214
1215 if (host->stat_irq)
1216 disable_irq(host->stat_irq);
1217
1218 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1219 rc = mmc_suspend_host(mmc, state);
1220 if (!rc) {
1221 writel(0, host->base + MMCIMASK0);
1222
1223 if (host->clks_on) {
1224 clk_disable(host->clk);
1225 clk_disable(host->pclk);
1226 host->clks_on = 0;
1227 }
1228 }
1229 }
1230 return rc;
1231}
1232
1233static int
1234msmsdcc_resume(struct platform_device *dev)
1235{
1236 struct mmc_host *mmc = mmc_get_drvdata(dev);
1237 unsigned long flags;
1238
1239 if (mmc) {
1240 struct msmsdcc_host *host = mmc_priv(mmc);
1241
1242 spin_lock_irqsave(&host->lock, flags);
1243
1244 if (!host->clks_on) {
1245 clk_enable(host->pclk);
1246 clk_enable(host->clk);
1247 host->clks_on = 1;
1248 }
1249
1250 writel(host->saved_irq0mask, host->base + MMCIMASK0);
1251
1252 spin_unlock_irqrestore(&host->lock, flags);
1253
1254 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1255 mmc_resume_host(mmc);
1256 if (host->stat_irq)
1257 enable_irq(host->stat_irq);
1258 else if (host->stat_irq)
1259 enable_irq(host->stat_irq);
1260 }
1261 return 0;
1262}
1263
1264static struct platform_driver msmsdcc_driver = {
1265 .probe = msmsdcc_probe,
1266 .suspend = msmsdcc_suspend,
1267 .resume = msmsdcc_resume,
1268 .driver = {
1269 .name = "msm_sdcc",
1270 },
1271};
1272
1273static int __init msmsdcc_init(void)
1274{
1275 return platform_driver_register(&msmsdcc_driver);
1276}
1277
1278static void __exit msmsdcc_exit(void)
1279{
1280 platform_driver_unregister(&msmsdcc_driver);
1281}
1282
1283module_init(msmsdcc_init);
1284module_exit(msmsdcc_exit);
1285
1286MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
1287MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
new file mode 100644
index 000000000000..8c8448469811
--- /dev/null
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -0,0 +1,238 @@
1/*
2 * linux/drivers/mmc/host/msmsdcc.h - QCT MSM7K SDC Controller
3 *
4 * Copyright (C) 2008 Google, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * - Based on mmci.h
11 */
12
13#ifndef _MSM_SDCC_H
14#define _MSM_SDCC_H
15
16#define MSMSDCC_CRCI_SDC1 6
17#define MSMSDCC_CRCI_SDC2 7
18#define MSMSDCC_CRCI_SDC3 12
19#define MSMSDCC_CRCI_SDC4 13
20
21#define MMCIPOWER 0x000
22#define MCI_PWR_OFF 0x00
23#define MCI_PWR_UP 0x02
24#define MCI_PWR_ON 0x03
25#define MCI_OD (1 << 6)
26
27#define MMCICLOCK 0x004
28#define MCI_CLK_ENABLE (1 << 8)
29#define MCI_CLK_PWRSAVE (1 << 9)
30#define MCI_CLK_WIDEBUS (1 << 10)
31#define MCI_CLK_FLOWENA (1 << 12)
32#define MCI_CLK_INVERTOUT (1 << 13)
33#define MCI_CLK_SELECTIN (1 << 14)
34
35#define MMCIARGUMENT 0x008
36#define MMCICOMMAND 0x00c
37#define MCI_CPSM_RESPONSE (1 << 6)
38#define MCI_CPSM_LONGRSP (1 << 7)
39#define MCI_CPSM_INTERRUPT (1 << 8)
40#define MCI_CPSM_PENDING (1 << 9)
41#define MCI_CPSM_ENABLE (1 << 10)
42#define MCI_CPSM_PROGENA (1 << 11)
43#define MCI_CSPM_DATCMD (1 << 12)
44#define MCI_CSPM_MCIABORT (1 << 13)
45#define MCI_CSPM_CCSENABLE (1 << 14)
46#define MCI_CSPM_CCSDISABLE (1 << 15)
47
48
49#define MMCIRESPCMD 0x010
50#define MMCIRESPONSE0 0x014
51#define MMCIRESPONSE1 0x018
52#define MMCIRESPONSE2 0x01c
53#define MMCIRESPONSE3 0x020
54#define MMCIDATATIMER 0x024
55#define MMCIDATALENGTH 0x028
56
57#define MMCIDATACTRL 0x02c
58#define MCI_DPSM_ENABLE (1 << 0)
59#define MCI_DPSM_DIRECTION (1 << 1)
60#define MCI_DPSM_MODE (1 << 2)
61#define MCI_DPSM_DMAENABLE (1 << 3)
62
63#define MMCIDATACNT 0x030
64#define MMCISTATUS 0x034
65#define MCI_CMDCRCFAIL (1 << 0)
66#define MCI_DATACRCFAIL (1 << 1)
67#define MCI_CMDTIMEOUT (1 << 2)
68#define MCI_DATATIMEOUT (1 << 3)
69#define MCI_TXUNDERRUN (1 << 4)
70#define MCI_RXOVERRUN (1 << 5)
71#define MCI_CMDRESPEND (1 << 6)
72#define MCI_CMDSENT (1 << 7)
73#define MCI_DATAEND (1 << 8)
74#define MCI_DATABLOCKEND (1 << 10)
75#define MCI_CMDACTIVE (1 << 11)
76#define MCI_TXACTIVE (1 << 12)
77#define MCI_RXACTIVE (1 << 13)
78#define MCI_TXFIFOHALFEMPTY (1 << 14)
79#define MCI_RXFIFOHALFFULL (1 << 15)
80#define MCI_TXFIFOFULL (1 << 16)
81#define MCI_RXFIFOFULL (1 << 17)
82#define MCI_TXFIFOEMPTY (1 << 18)
83#define MCI_RXFIFOEMPTY (1 << 19)
84#define MCI_TXDATAAVLBL (1 << 20)
85#define MCI_RXDATAAVLBL (1 << 21)
86#define MCI_SDIOINTR (1 << 22)
87#define MCI_PROGDONE (1 << 23)
88#define MCI_ATACMDCOMPL (1 << 24)
89#define MCI_SDIOINTOPER (1 << 25)
90#define MCI_CCSTIMEOUT (1 << 26)
91
92#define MMCICLEAR 0x038
93#define MCI_CMDCRCFAILCLR (1 << 0)
94#define MCI_DATACRCFAILCLR (1 << 1)
95#define MCI_CMDTIMEOUTCLR (1 << 2)
96#define MCI_DATATIMEOUTCLR (1 << 3)
97#define MCI_TXUNDERRUNCLR (1 << 4)
98#define MCI_RXOVERRUNCLR (1 << 5)
99#define MCI_CMDRESPENDCLR (1 << 6)
100#define MCI_CMDSENTCLR (1 << 7)
101#define MCI_DATAENDCLR (1 << 8)
102#define MCI_DATABLOCKENDCLR (1 << 10)
103
104#define MMCIMASK0 0x03c
105#define MCI_CMDCRCFAILMASK (1 << 0)
106#define MCI_DATACRCFAILMASK (1 << 1)
107#define MCI_CMDTIMEOUTMASK (1 << 2)
108#define MCI_DATATIMEOUTMASK (1 << 3)
109#define MCI_TXUNDERRUNMASK (1 << 4)
110#define MCI_RXOVERRUNMASK (1 << 5)
111#define MCI_CMDRESPENDMASK (1 << 6)
112#define MCI_CMDSENTMASK (1 << 7)
113#define MCI_DATAENDMASK (1 << 8)
114#define MCI_DATABLOCKENDMASK (1 << 10)
115#define MCI_CMDACTIVEMASK (1 << 11)
116#define MCI_TXACTIVEMASK (1 << 12)
117#define MCI_RXACTIVEMASK (1 << 13)
118#define MCI_TXFIFOHALFEMPTYMASK (1 << 14)
119#define MCI_RXFIFOHALFFULLMASK (1 << 15)
120#define MCI_TXFIFOFULLMASK (1 << 16)
121#define MCI_RXFIFOFULLMASK (1 << 17)
122#define MCI_TXFIFOEMPTYMASK (1 << 18)
123#define MCI_RXFIFOEMPTYMASK (1 << 19)
124#define MCI_TXDATAAVLBLMASK (1 << 20)
125#define MCI_RXDATAAVLBLMASK (1 << 21)
126#define MCI_SDIOINTMASK (1 << 22)
127#define MCI_PROGDONEMASK (1 << 23)
128#define MCI_ATACMDCOMPLMASK (1 << 24)
129#define MCI_SDIOINTOPERMASK (1 << 25)
130#define MCI_CCSTIMEOUTMASK (1 << 26)
131
132#define MMCIMASK1 0x040
133#define MMCIFIFOCNT 0x044
134#define MCICCSTIMER 0x058
135
136#define MMCIFIFO 0x080 /* to 0x0bc */
137
138#define MCI_IRQENABLE \
139 (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
140 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
141 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK)
142
143/*
144 * The size of the FIFO in bytes.
145 */
146#define MCI_FIFOSIZE (16*4)
147
148#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
149
150#define NR_SG 32
151
152struct clk;
153
154struct msmsdcc_nc_dmadata {
155 dmov_box cmd[NR_SG];
156 uint32_t cmdptr;
157};
158
159struct msmsdcc_dma_data {
160 struct msmsdcc_nc_dmadata *nc;
161 dma_addr_t nc_busaddr;
162 dma_addr_t cmd_busaddr;
163 dma_addr_t cmdptr_busaddr;
164
165 struct msm_dmov_cmd hdr;
166 enum dma_data_direction dir;
167
168 struct scatterlist *sg;
169 int num_ents;
170
171 int channel;
172 struct msmsdcc_host *host;
173 int busy; /* Set if DM is busy */
174};
175
176struct msmsdcc_pio_data {
177 struct scatterlist *sg;
178 unsigned int sg_len;
179 unsigned int sg_off;
180};
181
182struct msmsdcc_curr_req {
183 struct mmc_request *mrq;
184 struct mmc_command *cmd;
185 struct mmc_data *data;
186 unsigned int xfer_size; /* Total data size */
187 unsigned int xfer_remain; /* Bytes remaining to send */
188 unsigned int data_xfered; /* Bytes acked by BLKEND irq */
189 int got_dataend;
190 int got_datablkend;
191 int user_pages;
192};
193
194struct msmsdcc_stats {
195 unsigned int reqs;
196 unsigned int cmds;
197 unsigned int cmdpoll_hits;
198 unsigned int cmdpoll_misses;
199};
200
201struct msmsdcc_host {
202 struct resource *cmd_irqres;
203 struct resource *pio_irqres;
204 struct resource *memres;
205 struct resource *dmares;
206 void __iomem *base;
207 int pdev_id;
208 unsigned int stat_irq;
209
210 struct msmsdcc_curr_req curr;
211
212 struct mmc_host *mmc;
213 struct clk *clk; /* main MMC bus clock */
214 struct clk *pclk; /* SDCC peripheral bus clock */
215 unsigned int clks_on; /* set if clocks are enabled */
216 struct timer_list command_timer;
217
218 unsigned int eject; /* eject state */
219
220 spinlock_t lock;
221
222 unsigned int clk_rate; /* Current clock rate */
223 unsigned int pclk_rate;
224
225 u32 pwr;
226 u32 saved_irq0mask; /* MMCIMASK0 reg value */
227 struct mmc_platform_data *plat;
228
229 struct timer_list timer;
230 unsigned int oldstat;
231
232 struct msmsdcc_dma_data dma;
233 struct msmsdcc_pio_data pio;
234 int cmdpoll;
235 struct msmsdcc_stats stats;
236};
237
238#endif
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bc14bb1b0579..88671529c45d 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -512,7 +512,7 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
512 } 512 }
513 513
514 /* For the DMA case the DMA engine handles the data transfer 514 /* For the DMA case the DMA engine handles the data transfer
515 * automatically. For non DMA we have to to it ourselves. 515 * automatically. For non DMA we have to do it ourselves.
516 * Don't do it in interrupt context though. 516 * Don't do it in interrupt context though.
517 */ 517 */
518 if (!mxcmci_use_dma(host) && host->data) 518 if (!mxcmci_use_dma(host) && host->data)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 1cf9cfb3b64f..4487cc097911 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -17,6 +17,8 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/debugfs.h>
21#include <linux/seq_file.h>
20#include <linux/interrupt.h> 22#include <linux/interrupt.h>
21#include <linux/delay.h> 23#include <linux/delay.h>
22#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
@@ -25,6 +27,7 @@
25#include <linux/timer.h> 27#include <linux/timer.h>
26#include <linux/clk.h> 28#include <linux/clk.h>
27#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
30#include <linux/mmc/core.h>
28#include <linux/io.h> 31#include <linux/io.h>
29#include <linux/semaphore.h> 32#include <linux/semaphore.h>
30#include <mach/dma.h> 33#include <mach/dma.h>
@@ -35,6 +38,7 @@
35 38
36/* OMAP HSMMC Host Controller Registers */ 39/* OMAP HSMMC Host Controller Registers */
37#define OMAP_HSMMC_SYSCONFIG 0x0010 40#define OMAP_HSMMC_SYSCONFIG 0x0010
41#define OMAP_HSMMC_SYSSTATUS 0x0014
38#define OMAP_HSMMC_CON 0x002C 42#define OMAP_HSMMC_CON 0x002C
39#define OMAP_HSMMC_BLK 0x0104 43#define OMAP_HSMMC_BLK 0x0104
40#define OMAP_HSMMC_ARG 0x0108 44#define OMAP_HSMMC_ARG 0x0108
@@ -70,6 +74,8 @@
70#define DTO_MASK 0x000F0000 74#define DTO_MASK 0x000F0000
71#define DTO_SHIFT 16 75#define DTO_SHIFT 16
72#define INT_EN_MASK 0x307F0033 76#define INT_EN_MASK 0x307F0033
77#define BWR_ENABLE (1 << 4)
78#define BRR_ENABLE (1 << 5)
73#define INIT_STREAM (1 << 1) 79#define INIT_STREAM (1 << 1)
74#define DP_SELECT (1 << 21) 80#define DP_SELECT (1 << 21)
75#define DDIR (1 << 4) 81#define DDIR (1 << 4)
@@ -92,6 +98,8 @@
92#define DUAL_VOLT_OCR_BIT 7 98#define DUAL_VOLT_OCR_BIT 7
93#define SRC (1 << 25) 99#define SRC (1 << 25)
94#define SRD (1 << 26) 100#define SRD (1 << 26)
101#define SOFTRESET (1 << 1)
102#define RESETDONE (1 << 0)
95 103
96/* 104/*
97 * FIXME: Most likely all the data using these _DEVID defines should come 105 * FIXME: Most likely all the data using these _DEVID defines should come
@@ -101,11 +109,18 @@
101#define OMAP_MMC1_DEVID 0 109#define OMAP_MMC1_DEVID 0
102#define OMAP_MMC2_DEVID 1 110#define OMAP_MMC2_DEVID 1
103#define OMAP_MMC3_DEVID 2 111#define OMAP_MMC3_DEVID 2
112#define OMAP_MMC4_DEVID 3
113#define OMAP_MMC5_DEVID 4
104 114
105#define MMC_TIMEOUT_MS 20 115#define MMC_TIMEOUT_MS 20
106#define OMAP_MMC_MASTER_CLOCK 96000000 116#define OMAP_MMC_MASTER_CLOCK 96000000
107#define DRIVER_NAME "mmci-omap-hs" 117#define DRIVER_NAME "mmci-omap-hs"
108 118
119/* Timeouts for entering power saving states on inactivity, msec */
120#define OMAP_MMC_DISABLED_TIMEOUT 100
121#define OMAP_MMC_SLEEP_TIMEOUT 1000
122#define OMAP_MMC_OFF_TIMEOUT 8000
123
109/* 124/*
110 * One controller can have multiple slots, like on some omap boards using 125 * One controller can have multiple slots, like on some omap boards using
111 * omap.c controller driver. Luckily this is not currently done on any known 126 * omap.c controller driver. Luckily this is not currently done on any known
@@ -122,7 +137,7 @@
122#define OMAP_HSMMC_WRITE(base, reg, val) \ 137#define OMAP_HSMMC_WRITE(base, reg, val) \
123 __raw_writel((val), (base) + OMAP_HSMMC_##reg) 138 __raw_writel((val), (base) + OMAP_HSMMC_##reg)
124 139
125struct mmc_omap_host { 140struct omap_hsmmc_host {
126 struct device *dev; 141 struct device *dev;
127 struct mmc_host *mmc; 142 struct mmc_host *mmc;
128 struct mmc_request *mrq; 143 struct mmc_request *mrq;
@@ -135,27 +150,35 @@ struct mmc_omap_host {
135 struct work_struct mmc_carddetect_work; 150 struct work_struct mmc_carddetect_work;
136 void __iomem *base; 151 void __iomem *base;
137 resource_size_t mapbase; 152 resource_size_t mapbase;
153 spinlock_t irq_lock; /* Prevent races with irq handler */
154 unsigned long flags;
138 unsigned int id; 155 unsigned int id;
139 unsigned int dma_len; 156 unsigned int dma_len;
140 unsigned int dma_sg_idx; 157 unsigned int dma_sg_idx;
141 unsigned char bus_mode; 158 unsigned char bus_mode;
159 unsigned char power_mode;
142 u32 *buffer; 160 u32 *buffer;
143 u32 bytesleft; 161 u32 bytesleft;
144 int suspended; 162 int suspended;
145 int irq; 163 int irq;
146 int carddetect;
147 int use_dma, dma_ch; 164 int use_dma, dma_ch;
148 int dma_line_tx, dma_line_rx; 165 int dma_line_tx, dma_line_rx;
149 int slot_id; 166 int slot_id;
150 int dbclk_enabled; 167 int got_dbclk;
151 int response_busy; 168 int response_busy;
169 int context_loss;
170 int dpm_state;
171 int vdd;
172 int protect_card;
173 int reqs_blocked;
174
152 struct omap_mmc_platform_data *pdata; 175 struct omap_mmc_platform_data *pdata;
153}; 176};
154 177
155/* 178/*
156 * Stop clock to the card 179 * Stop clock to the card
157 */ 180 */
158static void omap_mmc_stop_clock(struct mmc_omap_host *host) 181static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
159{ 182{
160 OMAP_HSMMC_WRITE(host->base, SYSCTL, 183 OMAP_HSMMC_WRITE(host->base, SYSCTL,
161 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); 184 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
@@ -163,15 +186,178 @@ static void omap_mmc_stop_clock(struct mmc_omap_host *host)
163 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); 186 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
164} 187}
165 188
189#ifdef CONFIG_PM
190
191/*
192 * Restore the MMC host context, if it was lost as result of a
193 * power state change.
194 */
195static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
196{
197 struct mmc_ios *ios = &host->mmc->ios;
198 struct omap_mmc_platform_data *pdata = host->pdata;
199 int context_loss = 0;
200 u32 hctl, capa, con;
201 u16 dsor = 0;
202 unsigned long timeout;
203
204 if (pdata->get_context_loss_count) {
205 context_loss = pdata->get_context_loss_count(host->dev);
206 if (context_loss < 0)
207 return 1;
208 }
209
210 dev_dbg(mmc_dev(host->mmc), "context was %slost\n",
211 context_loss == host->context_loss ? "not " : "");
212 if (host->context_loss == context_loss)
213 return 1;
214
215 /* Wait for hardware reset */
216 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
217 while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE
218 && time_before(jiffies, timeout))
219 ;
220
221 /* Do software reset */
222 OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET);
223 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
224 while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE
225 && time_before(jiffies, timeout))
226 ;
227
228 OMAP_HSMMC_WRITE(host->base, SYSCONFIG,
229 OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE);
230
231 if (host->id == OMAP_MMC1_DEVID) {
232 if (host->power_mode != MMC_POWER_OFF &&
233 (1 << ios->vdd) <= MMC_VDD_23_24)
234 hctl = SDVS18;
235 else
236 hctl = SDVS30;
237 capa = VS30 | VS18;
238 } else {
239 hctl = SDVS18;
240 capa = VS18;
241 }
242
243 OMAP_HSMMC_WRITE(host->base, HCTL,
244 OMAP_HSMMC_READ(host->base, HCTL) | hctl);
245
246 OMAP_HSMMC_WRITE(host->base, CAPA,
247 OMAP_HSMMC_READ(host->base, CAPA) | capa);
248
249 OMAP_HSMMC_WRITE(host->base, HCTL,
250 OMAP_HSMMC_READ(host->base, HCTL) | SDBP);
251
252 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
253 while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP
254 && time_before(jiffies, timeout))
255 ;
256
257 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
258 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
259 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
260
261 /* Do not initialize card-specific things if the power is off */
262 if (host->power_mode == MMC_POWER_OFF)
263 goto out;
264
265 con = OMAP_HSMMC_READ(host->base, CON);
266 switch (ios->bus_width) {
267 case MMC_BUS_WIDTH_8:
268 OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
269 break;
270 case MMC_BUS_WIDTH_4:
271 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
272 OMAP_HSMMC_WRITE(host->base, HCTL,
273 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
274 break;
275 case MMC_BUS_WIDTH_1:
276 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
277 OMAP_HSMMC_WRITE(host->base, HCTL,
278 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
279 break;
280 }
281
282 if (ios->clock) {
283 dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
284 if (dsor < 1)
285 dsor = 1;
286
287 if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
288 dsor++;
289
290 if (dsor > 250)
291 dsor = 250;
292 }
293
294 OMAP_HSMMC_WRITE(host->base, SYSCTL,
295 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
296 OMAP_HSMMC_WRITE(host->base, SYSCTL, (dsor << 6) | (DTO << 16));
297 OMAP_HSMMC_WRITE(host->base, SYSCTL,
298 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
299
300 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
301 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
302 && time_before(jiffies, timeout))
303 ;
304
305 OMAP_HSMMC_WRITE(host->base, SYSCTL,
306 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
307
308 con = OMAP_HSMMC_READ(host->base, CON);
309 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
310 OMAP_HSMMC_WRITE(host->base, CON, con | OD);
311 else
312 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
313out:
314 host->context_loss = context_loss;
315
316 dev_dbg(mmc_dev(host->mmc), "context is restored\n");
317 return 0;
318}
319
320/*
321 * Save the MMC host context (store the number of power state changes so far).
322 */
323static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
324{
325 struct omap_mmc_platform_data *pdata = host->pdata;
326 int context_loss;
327
328 if (pdata->get_context_loss_count) {
329 context_loss = pdata->get_context_loss_count(host->dev);
330 if (context_loss < 0)
331 return;
332 host->context_loss = context_loss;
333 }
334}
335
336#else
337
338static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
339{
340 return 0;
341}
342
343static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
344{
345}
346
347#endif
348
166/* 349/*
167 * Send init stream sequence to card 350 * Send init stream sequence to card
168 * before sending IDLE command 351 * before sending IDLE command
169 */ 352 */
170static void send_init_stream(struct mmc_omap_host *host) 353static void send_init_stream(struct omap_hsmmc_host *host)
171{ 354{
172 int reg = 0; 355 int reg = 0;
173 unsigned long timeout; 356 unsigned long timeout;
174 357
358 if (host->protect_card)
359 return;
360
175 disable_irq(host->irq); 361 disable_irq(host->irq);
176 OMAP_HSMMC_WRITE(host->base, CON, 362 OMAP_HSMMC_WRITE(host->base, CON,
177 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); 363 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
@@ -183,51 +369,53 @@ static void send_init_stream(struct mmc_omap_host *host)
183 369
184 OMAP_HSMMC_WRITE(host->base, CON, 370 OMAP_HSMMC_WRITE(host->base, CON,
185 OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); 371 OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM);
372
373 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
374 OMAP_HSMMC_READ(host->base, STAT);
375
186 enable_irq(host->irq); 376 enable_irq(host->irq);
187} 377}
188 378
189static inline 379static inline
190int mmc_omap_cover_is_closed(struct mmc_omap_host *host) 380int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
191{ 381{
192 int r = 1; 382 int r = 1;
193 383
194 if (host->pdata->slots[host->slot_id].get_cover_state) 384 if (mmc_slot(host).get_cover_state)
195 r = host->pdata->slots[host->slot_id].get_cover_state(host->dev, 385 r = mmc_slot(host).get_cover_state(host->dev, host->slot_id);
196 host->slot_id);
197 return r; 386 return r;
198} 387}
199 388
200static ssize_t 389static ssize_t
201mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr, 390omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
202 char *buf) 391 char *buf)
203{ 392{
204 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 393 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
205 struct mmc_omap_host *host = mmc_priv(mmc); 394 struct omap_hsmmc_host *host = mmc_priv(mmc);
206 395
207 return sprintf(buf, "%s\n", mmc_omap_cover_is_closed(host) ? "closed" : 396 return sprintf(buf, "%s\n",
208 "open"); 397 omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
209} 398}
210 399
211static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL); 400static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);
212 401
213static ssize_t 402static ssize_t
214mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr, 403omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
215 char *buf) 404 char *buf)
216{ 405{
217 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 406 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
218 struct mmc_omap_host *host = mmc_priv(mmc); 407 struct omap_hsmmc_host *host = mmc_priv(mmc);
219 struct omap_mmc_slot_data slot = host->pdata->slots[host->slot_id];
220 408
221 return sprintf(buf, "%s\n", slot.name); 409 return sprintf(buf, "%s\n", mmc_slot(host).name);
222} 410}
223 411
224static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL); 412static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);
225 413
226/* 414/*
227 * Configure the response type and send the cmd. 415 * Configure the response type and send the cmd.
228 */ 416 */
229static void 417static void
230mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd, 418omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
231 struct mmc_data *data) 419 struct mmc_data *data)
232{ 420{
233 int cmdreg = 0, resptype = 0, cmdtype = 0; 421 int cmdreg = 0, resptype = 0, cmdtype = 0;
@@ -241,7 +429,12 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
241 */ 429 */
242 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 430 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
243 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); 431 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
244 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); 432
433 if (host->use_dma)
434 OMAP_HSMMC_WRITE(host->base, IE,
435 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
436 else
437 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
245 438
246 host->response_busy = 0; 439 host->response_busy = 0;
247 if (cmd->flags & MMC_RSP_PRESENT) { 440 if (cmd->flags & MMC_RSP_PRESENT) {
@@ -275,12 +468,20 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
275 if (host->use_dma) 468 if (host->use_dma)
276 cmdreg |= DMA_EN; 469 cmdreg |= DMA_EN;
277 470
471 /*
472 * In an interrupt context (i.e. STOP command), the spinlock is unlocked
473 * by the interrupt handler, otherwise (i.e. for a new request) it is
474 * unlocked here.
475 */
476 if (!in_interrupt())
477 spin_unlock_irqrestore(&host->irq_lock, host->flags);
478
278 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); 479 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
279 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 480 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
280} 481}
281 482
282static int 483static int
283mmc_omap_get_dma_dir(struct mmc_omap_host *host, struct mmc_data *data) 484omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
284{ 485{
285 if (data->flags & MMC_DATA_WRITE) 486 if (data->flags & MMC_DATA_WRITE)
286 return DMA_TO_DEVICE; 487 return DMA_TO_DEVICE;
@@ -292,11 +493,18 @@ mmc_omap_get_dma_dir(struct mmc_omap_host *host, struct mmc_data *data)
292 * Notify the transfer complete to MMC core 493 * Notify the transfer complete to MMC core
293 */ 494 */
294static void 495static void
295mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) 496omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
296{ 497{
297 if (!data) { 498 if (!data) {
298 struct mmc_request *mrq = host->mrq; 499 struct mmc_request *mrq = host->mrq;
299 500
501 /* TC before CC from CMD6 - don't know why, but it happens */
502 if (host->cmd && host->cmd->opcode == 6 &&
503 host->response_busy) {
504 host->response_busy = 0;
505 return;
506 }
507
300 host->mrq = NULL; 508 host->mrq = NULL;
301 mmc_request_done(host->mmc, mrq); 509 mmc_request_done(host->mmc, mrq);
302 return; 510 return;
@@ -306,7 +514,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
306 514
307 if (host->use_dma && host->dma_ch != -1) 515 if (host->use_dma && host->dma_ch != -1)
308 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, 516 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
309 mmc_omap_get_dma_dir(host, data)); 517 omap_hsmmc_get_dma_dir(host, data));
310 518
311 if (!data->error) 519 if (!data->error)
312 data->bytes_xfered += data->blocks * (data->blksz); 520 data->bytes_xfered += data->blocks * (data->blksz);
@@ -318,14 +526,14 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
318 mmc_request_done(host->mmc, data->mrq); 526 mmc_request_done(host->mmc, data->mrq);
319 return; 527 return;
320 } 528 }
321 mmc_omap_start_command(host, data->stop, NULL); 529 omap_hsmmc_start_command(host, data->stop, NULL);
322} 530}
323 531
324/* 532/*
325 * Notify the core about command completion 533 * Notify the core about command completion
326 */ 534 */
327static void 535static void
328mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd) 536omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
329{ 537{
330 host->cmd = NULL; 538 host->cmd = NULL;
331 539
@@ -350,13 +558,13 @@ mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
350/* 558/*
351 * DMA clean up for command errors 559 * DMA clean up for command errors
352 */ 560 */
353static void mmc_dma_cleanup(struct mmc_omap_host *host, int errno) 561static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
354{ 562{
355 host->data->error = errno; 563 host->data->error = errno;
356 564
357 if (host->use_dma && host->dma_ch != -1) { 565 if (host->use_dma && host->dma_ch != -1) {
358 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 566 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
359 mmc_omap_get_dma_dir(host, host->data)); 567 omap_hsmmc_get_dma_dir(host, host->data));
360 omap_free_dma(host->dma_ch); 568 omap_free_dma(host->dma_ch);
361 host->dma_ch = -1; 569 host->dma_ch = -1;
362 up(&host->sem); 570 up(&host->sem);
@@ -368,10 +576,10 @@ static void mmc_dma_cleanup(struct mmc_omap_host *host, int errno)
368 * Readable error output 576 * Readable error output
369 */ 577 */
370#ifdef CONFIG_MMC_DEBUG 578#ifdef CONFIG_MMC_DEBUG
371static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status) 579static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status)
372{ 580{
373 /* --- means reserved bit without definition at documentation */ 581 /* --- means reserved bit without definition at documentation */
374 static const char *mmc_omap_status_bits[] = { 582 static const char *omap_hsmmc_status_bits[] = {
375 "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ", 583 "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ",
376 "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC", 584 "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC",
377 "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---", 585 "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---",
@@ -384,9 +592,9 @@ static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
384 len = sprintf(buf, "MMC IRQ 0x%x :", status); 592 len = sprintf(buf, "MMC IRQ 0x%x :", status);
385 buf += len; 593 buf += len;
386 594
387 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) 595 for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
388 if (status & (1 << i)) { 596 if (status & (1 << i)) {
389 len = sprintf(buf, " %s", mmc_omap_status_bits[i]); 597 len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
390 buf += len; 598 buf += len;
391 } 599 }
392 600
@@ -401,8 +609,8 @@ static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
401 * SRC or SRD bit of SYSCTL register 609 * SRC or SRD bit of SYSCTL register
402 * Can be called from interrupt context 610 * Can be called from interrupt context
403 */ 611 */
404static inline void mmc_omap_reset_controller_fsm(struct mmc_omap_host *host, 612static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
405 unsigned long bit) 613 unsigned long bit)
406{ 614{
407 unsigned long i = 0; 615 unsigned long i = 0;
408 unsigned long limit = (loops_per_jiffy * 616 unsigned long limit = (loops_per_jiffy *
@@ -424,17 +632,20 @@ static inline void mmc_omap_reset_controller_fsm(struct mmc_omap_host *host,
424/* 632/*
425 * MMC controller IRQ handler 633 * MMC controller IRQ handler
426 */ 634 */
427static irqreturn_t mmc_omap_irq(int irq, void *dev_id) 635static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
428{ 636{
429 struct mmc_omap_host *host = dev_id; 637 struct omap_hsmmc_host *host = dev_id;
430 struct mmc_data *data; 638 struct mmc_data *data;
431 int end_cmd = 0, end_trans = 0, status; 639 int end_cmd = 0, end_trans = 0, status;
432 640
641 spin_lock(&host->irq_lock);
642
433 if (host->mrq == NULL) { 643 if (host->mrq == NULL) {
434 OMAP_HSMMC_WRITE(host->base, STAT, 644 OMAP_HSMMC_WRITE(host->base, STAT,
435 OMAP_HSMMC_READ(host->base, STAT)); 645 OMAP_HSMMC_READ(host->base, STAT));
436 /* Flush posted write */ 646 /* Flush posted write */
437 OMAP_HSMMC_READ(host->base, STAT); 647 OMAP_HSMMC_READ(host->base, STAT);
648 spin_unlock(&host->irq_lock);
438 return IRQ_HANDLED; 649 return IRQ_HANDLED;
439 } 650 }
440 651
@@ -444,13 +655,14 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
444 655
445 if (status & ERR) { 656 if (status & ERR) {
446#ifdef CONFIG_MMC_DEBUG 657#ifdef CONFIG_MMC_DEBUG
447 mmc_omap_report_irq(host, status); 658 omap_hsmmc_report_irq(host, status);
448#endif 659#endif
449 if ((status & CMD_TIMEOUT) || 660 if ((status & CMD_TIMEOUT) ||
450 (status & CMD_CRC)) { 661 (status & CMD_CRC)) {
451 if (host->cmd) { 662 if (host->cmd) {
452 if (status & CMD_TIMEOUT) { 663 if (status & CMD_TIMEOUT) {
453 mmc_omap_reset_controller_fsm(host, SRC); 664 omap_hsmmc_reset_controller_fsm(host,
665 SRC);
454 host->cmd->error = -ETIMEDOUT; 666 host->cmd->error = -ETIMEDOUT;
455 } else { 667 } else {
456 host->cmd->error = -EILSEQ; 668 host->cmd->error = -EILSEQ;
@@ -459,9 +671,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
459 } 671 }
460 if (host->data || host->response_busy) { 672 if (host->data || host->response_busy) {
461 if (host->data) 673 if (host->data)
462 mmc_dma_cleanup(host, -ETIMEDOUT); 674 omap_hsmmc_dma_cleanup(host,
675 -ETIMEDOUT);
463 host->response_busy = 0; 676 host->response_busy = 0;
464 mmc_omap_reset_controller_fsm(host, SRD); 677 omap_hsmmc_reset_controller_fsm(host, SRD);
465 } 678 }
466 } 679 }
467 if ((status & DATA_TIMEOUT) || 680 if ((status & DATA_TIMEOUT) ||
@@ -471,11 +684,11 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
471 -ETIMEDOUT : -EILSEQ; 684 -ETIMEDOUT : -EILSEQ;
472 685
473 if (host->data) 686 if (host->data)
474 mmc_dma_cleanup(host, err); 687 omap_hsmmc_dma_cleanup(host, err);
475 else 688 else
476 host->mrq->cmd->error = err; 689 host->mrq->cmd->error = err;
477 host->response_busy = 0; 690 host->response_busy = 0;
478 mmc_omap_reset_controller_fsm(host, SRD); 691 omap_hsmmc_reset_controller_fsm(host, SRD);
479 end_trans = 1; 692 end_trans = 1;
480 } 693 }
481 } 694 }
@@ -494,14 +707,16 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
494 OMAP_HSMMC_READ(host->base, STAT); 707 OMAP_HSMMC_READ(host->base, STAT);
495 708
496 if (end_cmd || ((status & CC) && host->cmd)) 709 if (end_cmd || ((status & CC) && host->cmd))
497 mmc_omap_cmd_done(host, host->cmd); 710 omap_hsmmc_cmd_done(host, host->cmd);
498 if (end_trans || (status & TC)) 711 if ((end_trans || (status & TC)) && host->mrq)
499 mmc_omap_xfer_done(host, data); 712 omap_hsmmc_xfer_done(host, data);
713
714 spin_unlock(&host->irq_lock);
500 715
501 return IRQ_HANDLED; 716 return IRQ_HANDLED;
502} 717}
503 718
504static void set_sd_bus_power(struct mmc_omap_host *host) 719static void set_sd_bus_power(struct omap_hsmmc_host *host)
505{ 720{
506 unsigned long i; 721 unsigned long i;
507 722
@@ -521,7 +736,7 @@ static void set_sd_bus_power(struct mmc_omap_host *host)
521 * The MMC2 transceiver controls are used instead of DAT4..DAT7. 736 * The MMC2 transceiver controls are used instead of DAT4..DAT7.
522 * Some chips, like eMMC ones, use internal transceivers. 737 * Some chips, like eMMC ones, use internal transceivers.
523 */ 738 */
524static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd) 739static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
525{ 740{
526 u32 reg_val = 0; 741 u32 reg_val = 0;
527 int ret; 742 int ret;
@@ -529,22 +744,24 @@ static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd)
529 /* Disable the clocks */ 744 /* Disable the clocks */
530 clk_disable(host->fclk); 745 clk_disable(host->fclk);
531 clk_disable(host->iclk); 746 clk_disable(host->iclk);
532 clk_disable(host->dbclk); 747 if (host->got_dbclk)
748 clk_disable(host->dbclk);
533 749
534 /* Turn the power off */ 750 /* Turn the power off */
535 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); 751 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
536 if (ret != 0)
537 goto err;
538 752
539 /* Turn the power ON with given VDD 1.8 or 3.0v */ 753 /* Turn the power ON with given VDD 1.8 or 3.0v */
540 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd); 754 if (!ret)
755 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
756 vdd);
757 clk_enable(host->iclk);
758 clk_enable(host->fclk);
759 if (host->got_dbclk)
760 clk_enable(host->dbclk);
761
541 if (ret != 0) 762 if (ret != 0)
542 goto err; 763 goto err;
543 764
544 clk_enable(host->fclk);
545 clk_enable(host->iclk);
546 clk_enable(host->dbclk);
547
548 OMAP_HSMMC_WRITE(host->base, HCTL, 765 OMAP_HSMMC_WRITE(host->base, HCTL,
549 OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR); 766 OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR);
550 reg_val = OMAP_HSMMC_READ(host->base, HCTL); 767 reg_val = OMAP_HSMMC_READ(host->base, HCTL);
@@ -552,7 +769,7 @@ static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd)
552 /* 769 /*
553 * If a MMC dual voltage card is detected, the set_ios fn calls 770 * If a MMC dual voltage card is detected, the set_ios fn calls
554 * this fn with VDD bit set for 1.8V. Upon card removal from the 771 * this fn with VDD bit set for 1.8V. Upon card removal from the
555 * slot, omap_mmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF. 772 * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
556 * 773 *
557 * Cope with a bit of slop in the range ... per data sheets: 774 * Cope with a bit of slop in the range ... per data sheets:
558 * - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max, 775 * - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
@@ -578,25 +795,59 @@ err:
578 return ret; 795 return ret;
579} 796}
580 797
798/* Protect the card while the cover is open */
799static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
800{
801 if (!mmc_slot(host).get_cover_state)
802 return;
803
804 host->reqs_blocked = 0;
805 if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) {
806 if (host->protect_card) {
807 printk(KERN_INFO "%s: cover is closed, "
808 "card is now accessible\n",
809 mmc_hostname(host->mmc));
810 host->protect_card = 0;
811 }
812 } else {
813 if (!host->protect_card) {
814 printk(KERN_INFO "%s: cover is open, "
815 "card is now inaccessible\n",
816 mmc_hostname(host->mmc));
817 host->protect_card = 1;
818 }
819 }
820}
821
581/* 822/*
582 * Work Item to notify the core about card insertion/removal 823 * Work Item to notify the core about card insertion/removal
583 */ 824 */
584static void mmc_omap_detect(struct work_struct *work) 825static void omap_hsmmc_detect(struct work_struct *work)
585{ 826{
586 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, 827 struct omap_hsmmc_host *host =
587 mmc_carddetect_work); 828 container_of(work, struct omap_hsmmc_host, mmc_carddetect_work);
588 struct omap_mmc_slot_data *slot = &mmc_slot(host); 829 struct omap_mmc_slot_data *slot = &mmc_slot(host);
830 int carddetect;
589 831
590 if (mmc_slot(host).card_detect) 832 if (host->suspended)
591 host->carddetect = slot->card_detect(slot->card_detect_irq); 833 return;
592 else
593 host->carddetect = -ENOSYS;
594 834
595 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); 835 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
596 if (host->carddetect) { 836
837 if (slot->card_detect)
838 carddetect = slot->card_detect(slot->card_detect_irq);
839 else {
840 omap_hsmmc_protect_card(host);
841 carddetect = -ENOSYS;
842 }
843
844 if (carddetect) {
597 mmc_detect_change(host->mmc, (HZ * 200) / 1000); 845 mmc_detect_change(host->mmc, (HZ * 200) / 1000);
598 } else { 846 } else {
599 mmc_omap_reset_controller_fsm(host, SRD); 847 mmc_host_enable(host->mmc);
848 omap_hsmmc_reset_controller_fsm(host, SRD);
849 mmc_host_lazy_disable(host->mmc);
850
600 mmc_detect_change(host->mmc, (HZ * 50) / 1000); 851 mmc_detect_change(host->mmc, (HZ * 50) / 1000);
601 } 852 }
602} 853}
@@ -604,16 +855,18 @@ static void mmc_omap_detect(struct work_struct *work)
604/* 855/*
605 * ISR for handling card insertion and removal 856 * ISR for handling card insertion and removal
606 */ 857 */
607static irqreturn_t omap_mmc_cd_handler(int irq, void *dev_id) 858static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id)
608{ 859{
609 struct mmc_omap_host *host = (struct mmc_omap_host *)dev_id; 860 struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id;
610 861
862 if (host->suspended)
863 return IRQ_HANDLED;
611 schedule_work(&host->mmc_carddetect_work); 864 schedule_work(&host->mmc_carddetect_work);
612 865
613 return IRQ_HANDLED; 866 return IRQ_HANDLED;
614} 867}
615 868
616static int mmc_omap_get_dma_sync_dev(struct mmc_omap_host *host, 869static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
617 struct mmc_data *data) 870 struct mmc_data *data)
618{ 871{
619 int sync_dev; 872 int sync_dev;
@@ -625,7 +878,7 @@ static int mmc_omap_get_dma_sync_dev(struct mmc_omap_host *host,
625 return sync_dev; 878 return sync_dev;
626} 879}
627 880
628static void mmc_omap_config_dma_params(struct mmc_omap_host *host, 881static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
629 struct mmc_data *data, 882 struct mmc_data *data,
630 struct scatterlist *sgl) 883 struct scatterlist *sgl)
631{ 884{
@@ -639,7 +892,7 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
639 sg_dma_address(sgl), 0, 0); 892 sg_dma_address(sgl), 0, 0);
640 } else { 893 } else {
641 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, 894 omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
642 (host->mapbase + OMAP_HSMMC_DATA), 0, 0); 895 (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
643 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, 896 omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
644 sg_dma_address(sgl), 0, 0); 897 sg_dma_address(sgl), 0, 0);
645 } 898 }
@@ -649,7 +902,7 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
649 902
650 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, 903 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
651 blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, 904 blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
652 mmc_omap_get_dma_sync_dev(host, data), 905 omap_hsmmc_get_dma_sync_dev(host, data),
653 !(data->flags & MMC_DATA_WRITE)); 906 !(data->flags & MMC_DATA_WRITE));
654 907
655 omap_start_dma(dma_ch); 908 omap_start_dma(dma_ch);
@@ -658,9 +911,9 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
658/* 911/*
659 * DMA call back function 912 * DMA call back function
660 */ 913 */
661static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) 914static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
662{ 915{
663 struct mmc_omap_host *host = data; 916 struct omap_hsmmc_host *host = data;
664 917
665 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) 918 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
666 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); 919 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
@@ -671,7 +924,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
671 host->dma_sg_idx++; 924 host->dma_sg_idx++;
672 if (host->dma_sg_idx < host->dma_len) { 925 if (host->dma_sg_idx < host->dma_len) {
673 /* Fire up the next transfer. */ 926 /* Fire up the next transfer. */
674 mmc_omap_config_dma_params(host, host->data, 927 omap_hsmmc_config_dma_params(host, host->data,
675 host->data->sg + host->dma_sg_idx); 928 host->data->sg + host->dma_sg_idx);
676 return; 929 return;
677 } 930 }
@@ -688,14 +941,14 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
688/* 941/*
689 * Routine to configure and start DMA for the MMC card 942 * Routine to configure and start DMA for the MMC card
690 */ 943 */
691static int 944static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
692mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req) 945 struct mmc_request *req)
693{ 946{
694 int dma_ch = 0, ret = 0, err = 1, i; 947 int dma_ch = 0, ret = 0, err = 1, i;
695 struct mmc_data *data = req->data; 948 struct mmc_data *data = req->data;
696 949
697 /* Sanity check: all the SG entries must be aligned by block size. */ 950 /* Sanity check: all the SG entries must be aligned by block size. */
698 for (i = 0; i < host->dma_len; i++) { 951 for (i = 0; i < data->sg_len; i++) {
699 struct scatterlist *sgl; 952 struct scatterlist *sgl;
700 953
701 sgl = data->sg + i; 954 sgl = data->sg + i;
@@ -726,8 +979,8 @@ mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
726 return err; 979 return err;
727 } 980 }
728 981
729 ret = omap_request_dma(mmc_omap_get_dma_sync_dev(host, data), "MMC/SD", 982 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
730 mmc_omap_dma_cb,host, &dma_ch); 983 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
731 if (ret != 0) { 984 if (ret != 0) {
732 dev_err(mmc_dev(host->mmc), 985 dev_err(mmc_dev(host->mmc),
733 "%s: omap_request_dma() failed with %d\n", 986 "%s: omap_request_dma() failed with %d\n",
@@ -736,17 +989,18 @@ mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
736 } 989 }
737 990
738 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 991 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
739 data->sg_len, mmc_omap_get_dma_dir(host, data)); 992 data->sg_len, omap_hsmmc_get_dma_dir(host, data));
740 host->dma_ch = dma_ch; 993 host->dma_ch = dma_ch;
741 host->dma_sg_idx = 0; 994 host->dma_sg_idx = 0;
742 995
743 mmc_omap_config_dma_params(host, data, data->sg); 996 omap_hsmmc_config_dma_params(host, data, data->sg);
744 997
745 return 0; 998 return 0;
746} 999}
747 1000
748static void set_data_timeout(struct mmc_omap_host *host, 1001static void set_data_timeout(struct omap_hsmmc_host *host,
749 struct mmc_request *req) 1002 unsigned int timeout_ns,
1003 unsigned int timeout_clks)
750{ 1004{
751 unsigned int timeout, cycle_ns; 1005 unsigned int timeout, cycle_ns;
752 uint32_t reg, clkd, dto = 0; 1006 uint32_t reg, clkd, dto = 0;
@@ -757,8 +1011,8 @@ static void set_data_timeout(struct mmc_omap_host *host,
757 clkd = 1; 1011 clkd = 1;
758 1012
759 cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd); 1013 cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd);
760 timeout = req->data->timeout_ns / cycle_ns; 1014 timeout = timeout_ns / cycle_ns;
761 timeout += req->data->timeout_clks; 1015 timeout += timeout_clks;
762 if (timeout) { 1016 if (timeout) {
763 while ((timeout & 0x80000000) == 0) { 1017 while ((timeout & 0x80000000) == 0) {
764 dto += 1; 1018 dto += 1;
@@ -785,22 +1039,28 @@ static void set_data_timeout(struct mmc_omap_host *host,
785 * Configure block length for MMC/SD cards and initiate the transfer. 1039 * Configure block length for MMC/SD cards and initiate the transfer.
786 */ 1040 */
787static int 1041static int
788mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) 1042omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
789{ 1043{
790 int ret; 1044 int ret;
791 host->data = req->data; 1045 host->data = req->data;
792 1046
793 if (req->data == NULL) { 1047 if (req->data == NULL) {
794 OMAP_HSMMC_WRITE(host->base, BLK, 0); 1048 OMAP_HSMMC_WRITE(host->base, BLK, 0);
1049 /*
1050 * Set an arbitrary 100ms data timeout for commands with
1051 * busy signal.
1052 */
1053 if (req->cmd->flags & MMC_RSP_BUSY)
1054 set_data_timeout(host, 100000000U, 0);
795 return 0; 1055 return 0;
796 } 1056 }
797 1057
798 OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) 1058 OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz)
799 | (req->data->blocks << 16)); 1059 | (req->data->blocks << 16));
800 set_data_timeout(host, req); 1060 set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks);
801 1061
802 if (host->use_dma) { 1062 if (host->use_dma) {
803 ret = mmc_omap_start_dma_transfer(host, req); 1063 ret = omap_hsmmc_start_dma_transfer(host, req);
804 if (ret != 0) { 1064 if (ret != 0) {
805 dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n"); 1065 dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
806 return ret; 1066 return ret;
@@ -812,35 +1072,92 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
812/* 1072/*
813 * Request function. for read/write operation 1073 * Request function. for read/write operation
814 */ 1074 */
815static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req) 1075static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
816{ 1076{
817 struct mmc_omap_host *host = mmc_priv(mmc); 1077 struct omap_hsmmc_host *host = mmc_priv(mmc);
1078 int err;
818 1079
1080 /*
1081 * Prevent races with the interrupt handler because of unexpected
1082 * interrupts, but not if we are already in interrupt context i.e.
1083 * retries.
1084 */
1085 if (!in_interrupt()) {
1086 spin_lock_irqsave(&host->irq_lock, host->flags);
1087 /*
1088 * Protect the card from I/O if there is a possibility
1089 * it can be removed.
1090 */
1091 if (host->protect_card) {
1092 if (host->reqs_blocked < 3) {
1093 /*
1094 * Ensure the controller is left in a consistent
1095 * state by resetting the command and data state
1096 * machines.
1097 */
1098 omap_hsmmc_reset_controller_fsm(host, SRD);
1099 omap_hsmmc_reset_controller_fsm(host, SRC);
1100 host->reqs_blocked += 1;
1101 }
1102 req->cmd->error = -EBADF;
1103 if (req->data)
1104 req->data->error = -EBADF;
1105 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1106 mmc_request_done(mmc, req);
1107 return;
1108 } else if (host->reqs_blocked)
1109 host->reqs_blocked = 0;
1110 }
819 WARN_ON(host->mrq != NULL); 1111 WARN_ON(host->mrq != NULL);
820 host->mrq = req; 1112 host->mrq = req;
821 mmc_omap_prepare_data(host, req); 1113 err = omap_hsmmc_prepare_data(host, req);
822 mmc_omap_start_command(host, req->cmd, req->data); 1114 if (err) {
823} 1115 req->cmd->error = err;
1116 if (req->data)
1117 req->data->error = err;
1118 host->mrq = NULL;
1119 if (!in_interrupt())
1120 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1121 mmc_request_done(mmc, req);
1122 return;
1123 }
824 1124
1125 omap_hsmmc_start_command(host, req->cmd, req->data);
1126}
825 1127
826/* Routine to configure clock values. Exposed API to core */ 1128/* Routine to configure clock values. Exposed API to core */
827static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1129static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
828{ 1130{
829 struct mmc_omap_host *host = mmc_priv(mmc); 1131 struct omap_hsmmc_host *host = mmc_priv(mmc);
830 u16 dsor = 0; 1132 u16 dsor = 0;
831 unsigned long regval; 1133 unsigned long regval;
832 unsigned long timeout; 1134 unsigned long timeout;
833 u32 con; 1135 u32 con;
1136 int do_send_init_stream = 0;
834 1137
835 switch (ios->power_mode) { 1138 mmc_host_enable(host->mmc);
836 case MMC_POWER_OFF: 1139
837 mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); 1140 if (ios->power_mode != host->power_mode) {
838 break; 1141 switch (ios->power_mode) {
839 case MMC_POWER_UP: 1142 case MMC_POWER_OFF:
840 mmc_slot(host).set_power(host->dev, host->slot_id, 1, ios->vdd); 1143 mmc_slot(host).set_power(host->dev, host->slot_id,
841 break; 1144 0, 0);
1145 host->vdd = 0;
1146 break;
1147 case MMC_POWER_UP:
1148 mmc_slot(host).set_power(host->dev, host->slot_id,
1149 1, ios->vdd);
1150 host->vdd = ios->vdd;
1151 break;
1152 case MMC_POWER_ON:
1153 do_send_init_stream = 1;
1154 break;
1155 }
1156 host->power_mode = ios->power_mode;
842 } 1157 }
843 1158
1159 /* FIXME: set registers based only on changes to ios */
1160
844 con = OMAP_HSMMC_READ(host->base, CON); 1161 con = OMAP_HSMMC_READ(host->base, CON);
845 switch (mmc->ios.bus_width) { 1162 switch (mmc->ios.bus_width) {
846 case MMC_BUS_WIDTH_8: 1163 case MMC_BUS_WIDTH_8:
@@ -870,8 +1187,8 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
870 * MMC_POWER_UP upon recalculating the voltage. 1187 * MMC_POWER_UP upon recalculating the voltage.
871 * vdd 1.8v. 1188 * vdd 1.8v.
872 */ 1189 */
873 if (omap_mmc_switch_opcond(host, ios->vdd) != 0) 1190 if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
874 dev_dbg(mmc_dev(host->mmc), 1191 dev_dbg(mmc_dev(host->mmc),
875 "Switch operation failed\n"); 1192 "Switch operation failed\n");
876 } 1193 }
877 } 1194 }
@@ -887,7 +1204,7 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
887 if (dsor > 250) 1204 if (dsor > 250)
888 dsor = 250; 1205 dsor = 250;
889 } 1206 }
890 omap_mmc_stop_clock(host); 1207 omap_hsmmc_stop_clock(host);
891 regval = OMAP_HSMMC_READ(host->base, SYSCTL); 1208 regval = OMAP_HSMMC_READ(host->base, SYSCTL);
892 regval = regval & ~(CLKD_MASK); 1209 regval = regval & ~(CLKD_MASK);
893 regval = regval | (dsor << 6) | (DTO << 16); 1210 regval = regval | (dsor << 6) | (DTO << 16);
@@ -897,42 +1214,47 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
897 1214
898 /* Wait till the ICS bit is set */ 1215 /* Wait till the ICS bit is set */
899 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); 1216 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
900 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != 0x2 1217 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
901 && time_before(jiffies, timeout)) 1218 && time_before(jiffies, timeout))
902 msleep(1); 1219 msleep(1);
903 1220
904 OMAP_HSMMC_WRITE(host->base, SYSCTL, 1221 OMAP_HSMMC_WRITE(host->base, SYSCTL,
905 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); 1222 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
906 1223
907 if (ios->power_mode == MMC_POWER_ON) 1224 if (do_send_init_stream)
908 send_init_stream(host); 1225 send_init_stream(host);
909 1226
1227 con = OMAP_HSMMC_READ(host->base, CON);
910 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 1228 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
911 OMAP_HSMMC_WRITE(host->base, CON, 1229 OMAP_HSMMC_WRITE(host->base, CON, con | OD);
912 OMAP_HSMMC_READ(host->base, CON) | OD); 1230 else
1231 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
1232
1233 if (host->power_mode == MMC_POWER_OFF)
1234 mmc_host_disable(host->mmc);
1235 else
1236 mmc_host_lazy_disable(host->mmc);
913} 1237}
914 1238
915static int omap_hsmmc_get_cd(struct mmc_host *mmc) 1239static int omap_hsmmc_get_cd(struct mmc_host *mmc)
916{ 1240{
917 struct mmc_omap_host *host = mmc_priv(mmc); 1241 struct omap_hsmmc_host *host = mmc_priv(mmc);
918 struct omap_mmc_platform_data *pdata = host->pdata;
919 1242
920 if (!pdata->slots[0].card_detect) 1243 if (!mmc_slot(host).card_detect)
921 return -ENOSYS; 1244 return -ENOSYS;
922 return pdata->slots[0].card_detect(pdata->slots[0].card_detect_irq); 1245 return mmc_slot(host).card_detect(mmc_slot(host).card_detect_irq);
923} 1246}
924 1247
925static int omap_hsmmc_get_ro(struct mmc_host *mmc) 1248static int omap_hsmmc_get_ro(struct mmc_host *mmc)
926{ 1249{
927 struct mmc_omap_host *host = mmc_priv(mmc); 1250 struct omap_hsmmc_host *host = mmc_priv(mmc);
928 struct omap_mmc_platform_data *pdata = host->pdata;
929 1251
930 if (!pdata->slots[0].get_ro) 1252 if (!mmc_slot(host).get_ro)
931 return -ENOSYS; 1253 return -ENOSYS;
932 return pdata->slots[0].get_ro(host->dev, 0); 1254 return mmc_slot(host).get_ro(host->dev, 0);
933} 1255}
934 1256
935static void omap_hsmmc_init(struct mmc_omap_host *host) 1257static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
936{ 1258{
937 u32 hctl, capa, value; 1259 u32 hctl, capa, value;
938 1260
@@ -959,19 +1281,340 @@ static void omap_hsmmc_init(struct mmc_omap_host *host)
959 set_sd_bus_power(host); 1281 set_sd_bus_power(host);
960} 1282}
961 1283
962static struct mmc_host_ops mmc_omap_ops = { 1284/*
963 .request = omap_mmc_request, 1285 * Dynamic power saving handling, FSM:
964 .set_ios = omap_mmc_set_ios, 1286 * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF
1287 * ^___________| | |
1288 * |______________________|______________________|
1289 *
1290 * ENABLED: mmc host is fully functional
1291 * DISABLED: fclk is off
1292 * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
1293 * REGSLEEP: fclk is off, voltage regulator is asleep
1294 * OFF: fclk is off, voltage regulator is off
1295 *
1296 * Transition handlers return the timeout for the next state transition
1297 * or negative error.
1298 */
1299
1300enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};
1301
1302/* Handler for [ENABLED -> DISABLED] transition */
1303static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
1304{
1305 omap_hsmmc_context_save(host);
1306 clk_disable(host->fclk);
1307 host->dpm_state = DISABLED;
1308
1309 dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n");
1310
1311 if (host->power_mode == MMC_POWER_OFF)
1312 return 0;
1313
1314 return msecs_to_jiffies(OMAP_MMC_SLEEP_TIMEOUT);
1315}
1316
1317/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
1318static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
1319{
1320 int err, new_state;
1321
1322 if (!mmc_try_claim_host(host->mmc))
1323 return 0;
1324
1325 clk_enable(host->fclk);
1326 omap_hsmmc_context_restore(host);
1327 if (mmc_card_can_sleep(host->mmc)) {
1328 err = mmc_card_sleep(host->mmc);
1329 if (err < 0) {
1330 clk_disable(host->fclk);
1331 mmc_release_host(host->mmc);
1332 return err;
1333 }
1334 new_state = CARDSLEEP;
1335 } else {
1336 new_state = REGSLEEP;
1337 }
1338 if (mmc_slot(host).set_sleep)
1339 mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
1340 new_state == CARDSLEEP);
1341 /* FIXME: turn off bus power and perhaps interrupts too */
1342 clk_disable(host->fclk);
1343 host->dpm_state = new_state;
1344
1345 mmc_release_host(host->mmc);
1346
1347 dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
1348 host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1349
1350 if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
1351 mmc_slot(host).card_detect ||
1352 (mmc_slot(host).get_cover_state &&
1353 mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
1354 return msecs_to_jiffies(OMAP_MMC_OFF_TIMEOUT);
1355
1356 return 0;
1357}
1358
1359/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
1360static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
1361{
1362 if (!mmc_try_claim_host(host->mmc))
1363 return 0;
1364
1365 if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
1366 mmc_slot(host).card_detect ||
1367 (mmc_slot(host).get_cover_state &&
1368 mmc_slot(host).get_cover_state(host->dev, host->slot_id)))) {
1369 mmc_release_host(host->mmc);
1370 return 0;
1371 }
1372
1373 mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
1374 host->vdd = 0;
1375 host->power_mode = MMC_POWER_OFF;
1376
1377 dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n",
1378 host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1379
1380 host->dpm_state = OFF;
1381
1382 mmc_release_host(host->mmc);
1383
1384 return 0;
1385}
1386
1387/* Handler for [DISABLED -> ENABLED] transition */
1388static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host)
1389{
1390 int err;
1391
1392 err = clk_enable(host->fclk);
1393 if (err < 0)
1394 return err;
1395
1396 omap_hsmmc_context_restore(host);
1397 host->dpm_state = ENABLED;
1398
1399 dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n");
1400
1401 return 0;
1402}
1403
1404/* Handler for [SLEEP -> ENABLED] transition */
1405static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host)
1406{
1407 if (!mmc_try_claim_host(host->mmc))
1408 return 0;
1409
1410 clk_enable(host->fclk);
1411 omap_hsmmc_context_restore(host);
1412 if (mmc_slot(host).set_sleep)
1413 mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
1414 host->vdd, host->dpm_state == CARDSLEEP);
1415 if (mmc_card_can_sleep(host->mmc))
1416 mmc_card_awake(host->mmc);
1417
1418 dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n",
1419 host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
1420
1421 host->dpm_state = ENABLED;
1422
1423 mmc_release_host(host->mmc);
1424
1425 return 0;
1426}
1427
1428/* Handler for [OFF -> ENABLED] transition */
1429static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host)
1430{
1431 clk_enable(host->fclk);
1432
1433 omap_hsmmc_context_restore(host);
1434 omap_hsmmc_conf_bus_power(host);
1435 mmc_power_restore_host(host->mmc);
1436
1437 host->dpm_state = ENABLED;
1438
1439 dev_dbg(mmc_dev(host->mmc), "OFF -> ENABLED\n");
1440
1441 return 0;
1442}
1443
1444/*
1445 * Bring MMC host to ENABLED from any other PM state.
1446 */
1447static int omap_hsmmc_enable(struct mmc_host *mmc)
1448{
1449 struct omap_hsmmc_host *host = mmc_priv(mmc);
1450
1451 switch (host->dpm_state) {
1452 case DISABLED:
1453 return omap_hsmmc_disabled_to_enabled(host);
1454 case CARDSLEEP:
1455 case REGSLEEP:
1456 return omap_hsmmc_sleep_to_enabled(host);
1457 case OFF:
1458 return omap_hsmmc_off_to_enabled(host);
1459 default:
1460 dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
1461 return -EINVAL;
1462 }
1463}
1464
1465/*
1466 * Bring MMC host in PM state (one level deeper).
1467 */
1468static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy)
1469{
1470 struct omap_hsmmc_host *host = mmc_priv(mmc);
1471
1472 switch (host->dpm_state) {
1473 case ENABLED: {
1474 int delay;
1475
1476 delay = omap_hsmmc_enabled_to_disabled(host);
1477 if (lazy || delay < 0)
1478 return delay;
1479 return 0;
1480 }
1481 case DISABLED:
1482 return omap_hsmmc_disabled_to_sleep(host);
1483 case CARDSLEEP:
1484 case REGSLEEP:
1485 return omap_hsmmc_sleep_to_off(host);
1486 default:
1487 dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
1488 return -EINVAL;
1489 }
1490}
1491
1492static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)
1493{
1494 struct omap_hsmmc_host *host = mmc_priv(mmc);
1495 int err;
1496
1497 err = clk_enable(host->fclk);
1498 if (err)
1499 return err;
1500 dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n");
1501 omap_hsmmc_context_restore(host);
1502 return 0;
1503}
1504
1505static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy)
1506{
1507 struct omap_hsmmc_host *host = mmc_priv(mmc);
1508
1509 omap_hsmmc_context_save(host);
1510 clk_disable(host->fclk);
1511 dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n");
1512 return 0;
1513}
1514
1515static const struct mmc_host_ops omap_hsmmc_ops = {
1516 .enable = omap_hsmmc_enable_fclk,
1517 .disable = omap_hsmmc_disable_fclk,
1518 .request = omap_hsmmc_request,
1519 .set_ios = omap_hsmmc_set_ios,
965 .get_cd = omap_hsmmc_get_cd, 1520 .get_cd = omap_hsmmc_get_cd,
966 .get_ro = omap_hsmmc_get_ro, 1521 .get_ro = omap_hsmmc_get_ro,
967 /* NYET -- enable_sdio_irq */ 1522 /* NYET -- enable_sdio_irq */
968}; 1523};
969 1524
970static int __init omap_mmc_probe(struct platform_device *pdev) 1525static const struct mmc_host_ops omap_hsmmc_ps_ops = {
1526 .enable = omap_hsmmc_enable,
1527 .disable = omap_hsmmc_disable,
1528 .request = omap_hsmmc_request,
1529 .set_ios = omap_hsmmc_set_ios,
1530 .get_cd = omap_hsmmc_get_cd,
1531 .get_ro = omap_hsmmc_get_ro,
1532 /* NYET -- enable_sdio_irq */
1533};
1534
1535#ifdef CONFIG_DEBUG_FS
1536
1537static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
1538{
1539 struct mmc_host *mmc = s->private;
1540 struct omap_hsmmc_host *host = mmc_priv(mmc);
1541 int context_loss = 0;
1542
1543 if (host->pdata->get_context_loss_count)
1544 context_loss = host->pdata->get_context_loss_count(host->dev);
1545
1546 seq_printf(s, "mmc%d:\n"
1547 " enabled:\t%d\n"
1548 " dpm_state:\t%d\n"
1549 " nesting_cnt:\t%d\n"
1550 " ctx_loss:\t%d:%d\n"
1551 "\nregs:\n",
1552 mmc->index, mmc->enabled ? 1 : 0,
1553 host->dpm_state, mmc->nesting_cnt,
1554 host->context_loss, context_loss);
1555
1556 if (host->suspended || host->dpm_state == OFF) {
1557 seq_printf(s, "host suspended, can't read registers\n");
1558 return 0;
1559 }
1560
1561 if (clk_enable(host->fclk) != 0) {
1562 seq_printf(s, "can't read the regs\n");
1563 return 0;
1564 }
1565
1566 seq_printf(s, "SYSCONFIG:\t0x%08x\n",
1567 OMAP_HSMMC_READ(host->base, SYSCONFIG));
1568 seq_printf(s, "CON:\t\t0x%08x\n",
1569 OMAP_HSMMC_READ(host->base, CON));
1570 seq_printf(s, "HCTL:\t\t0x%08x\n",
1571 OMAP_HSMMC_READ(host->base, HCTL));
1572 seq_printf(s, "SYSCTL:\t\t0x%08x\n",
1573 OMAP_HSMMC_READ(host->base, SYSCTL));
1574 seq_printf(s, "IE:\t\t0x%08x\n",
1575 OMAP_HSMMC_READ(host->base, IE));
1576 seq_printf(s, "ISE:\t\t0x%08x\n",
1577 OMAP_HSMMC_READ(host->base, ISE));
1578 seq_printf(s, "CAPA:\t\t0x%08x\n",
1579 OMAP_HSMMC_READ(host->base, CAPA));
1580
1581 clk_disable(host->fclk);
1582
1583 return 0;
1584}
1585
1586static int omap_hsmmc_regs_open(struct inode *inode, struct file *file)
1587{
1588 return single_open(file, omap_hsmmc_regs_show, inode->i_private);
1589}
1590
1591static const struct file_operations mmc_regs_fops = {
1592 .open = omap_hsmmc_regs_open,
1593 .read = seq_read,
1594 .llseek = seq_lseek,
1595 .release = single_release,
1596};
1597
1598static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1599{
1600 if (mmc->debugfs_root)
1601 debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
1602 mmc, &mmc_regs_fops);
1603}
1604
1605#else
1606
1607static void omap_hsmmc_debugfs(struct mmc_host *mmc)
1608{
1609}
1610
1611#endif
1612
1613static int __init omap_hsmmc_probe(struct platform_device *pdev)
971{ 1614{
972 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 1615 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
973 struct mmc_host *mmc; 1616 struct mmc_host *mmc;
974 struct mmc_omap_host *host = NULL; 1617 struct omap_hsmmc_host *host = NULL;
975 struct resource *res; 1618 struct resource *res;
976 int ret = 0, irq; 1619 int ret = 0, irq;
977 1620
@@ -995,7 +1638,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
995 if (res == NULL) 1638 if (res == NULL)
996 return -EBUSY; 1639 return -EBUSY;
997 1640
998 mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev); 1641 mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
999 if (!mmc) { 1642 if (!mmc) {
1000 ret = -ENOMEM; 1643 ret = -ENOMEM;
1001 goto err; 1644 goto err;
@@ -1013,15 +1656,21 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1013 host->slot_id = 0; 1656 host->slot_id = 0;
1014 host->mapbase = res->start; 1657 host->mapbase = res->start;
1015 host->base = ioremap(host->mapbase, SZ_4K); 1658 host->base = ioremap(host->mapbase, SZ_4K);
1659 host->power_mode = -1;
1016 1660
1017 platform_set_drvdata(pdev, host); 1661 platform_set_drvdata(pdev, host);
1018 INIT_WORK(&host->mmc_carddetect_work, mmc_omap_detect); 1662 INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
1663
1664 if (mmc_slot(host).power_saving)
1665 mmc->ops = &omap_hsmmc_ps_ops;
1666 else
1667 mmc->ops = &omap_hsmmc_ops;
1019 1668
1020 mmc->ops = &mmc_omap_ops;
1021 mmc->f_min = 400000; 1669 mmc->f_min = 400000;
1022 mmc->f_max = 52000000; 1670 mmc->f_max = 52000000;
1023 1671
1024 sema_init(&host->sem, 1); 1672 sema_init(&host->sem, 1);
1673 spin_lock_init(&host->irq_lock);
1025 1674
1026 host->iclk = clk_get(&pdev->dev, "ick"); 1675 host->iclk = clk_get(&pdev->dev, "ick");
1027 if (IS_ERR(host->iclk)) { 1676 if (IS_ERR(host->iclk)) {
@@ -1037,31 +1686,42 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1037 goto err1; 1686 goto err1;
1038 } 1687 }
1039 1688
1040 if (clk_enable(host->fclk) != 0) { 1689 omap_hsmmc_context_save(host);
1690
1691 mmc->caps |= MMC_CAP_DISABLE;
1692 mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT);
1693 /* we start off in DISABLED state */
1694 host->dpm_state = DISABLED;
1695
1696 if (mmc_host_enable(host->mmc) != 0) {
1041 clk_put(host->iclk); 1697 clk_put(host->iclk);
1042 clk_put(host->fclk); 1698 clk_put(host->fclk);
1043 goto err1; 1699 goto err1;
1044 } 1700 }
1045 1701
1046 if (clk_enable(host->iclk) != 0) { 1702 if (clk_enable(host->iclk) != 0) {
1047 clk_disable(host->fclk); 1703 mmc_host_disable(host->mmc);
1048 clk_put(host->iclk); 1704 clk_put(host->iclk);
1049 clk_put(host->fclk); 1705 clk_put(host->fclk);
1050 goto err1; 1706 goto err1;
1051 } 1707 }
1052 1708
1053 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); 1709 if (cpu_is_omap2430()) {
1054 /* 1710 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
1055 * MMC can still work without debounce clock. 1711 /*
1056 */ 1712 * MMC can still work without debounce clock.
1057 if (IS_ERR(host->dbclk)) 1713 */
1058 dev_warn(mmc_dev(host->mmc), "Failed to get debounce clock\n"); 1714 if (IS_ERR(host->dbclk))
1059 else 1715 dev_warn(mmc_dev(host->mmc),
1060 if (clk_enable(host->dbclk) != 0) 1716 "Failed to get debounce clock\n");
1061 dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
1062 " clk failed\n");
1063 else 1717 else
1064 host->dbclk_enabled = 1; 1718 host->got_dbclk = 1;
1719
1720 if (host->got_dbclk)
1721 if (clk_enable(host->dbclk) != 0)
1722 dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
1723 " clk failed\n");
1724 }
1065 1725
1066 /* Since we do only SG emulation, we can have as many segs 1726 /* Since we do only SG emulation, we can have as many segs
1067 * as we want. */ 1727 * as we want. */
@@ -1073,14 +1733,18 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1073 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1733 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1074 mmc->max_seg_size = mmc->max_req_size; 1734 mmc->max_seg_size = mmc->max_req_size;
1075 1735
1076 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; 1736 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1737 MMC_CAP_WAIT_WHILE_BUSY;
1077 1738
1078 if (pdata->slots[host->slot_id].wires >= 8) 1739 if (mmc_slot(host).wires >= 8)
1079 mmc->caps |= MMC_CAP_8_BIT_DATA; 1740 mmc->caps |= MMC_CAP_8_BIT_DATA;
1080 else if (pdata->slots[host->slot_id].wires >= 4) 1741 else if (mmc_slot(host).wires >= 4)
1081 mmc->caps |= MMC_CAP_4_BIT_DATA; 1742 mmc->caps |= MMC_CAP_4_BIT_DATA;
1082 1743
1083 omap_hsmmc_init(host); 1744 if (mmc_slot(host).nonremovable)
1745 mmc->caps |= MMC_CAP_NONREMOVABLE;
1746
1747 omap_hsmmc_conf_bus_power(host);
1084 1748
1085 /* Select DMA lines */ 1749 /* Select DMA lines */
1086 switch (host->id) { 1750 switch (host->id) {
@@ -1096,13 +1760,21 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1096 host->dma_line_tx = OMAP34XX_DMA_MMC3_TX; 1760 host->dma_line_tx = OMAP34XX_DMA_MMC3_TX;
1097 host->dma_line_rx = OMAP34XX_DMA_MMC3_RX; 1761 host->dma_line_rx = OMAP34XX_DMA_MMC3_RX;
1098 break; 1762 break;
1763 case OMAP_MMC4_DEVID:
1764 host->dma_line_tx = OMAP44XX_DMA_MMC4_TX;
1765 host->dma_line_rx = OMAP44XX_DMA_MMC4_RX;
1766 break;
1767 case OMAP_MMC5_DEVID:
1768 host->dma_line_tx = OMAP44XX_DMA_MMC5_TX;
1769 host->dma_line_rx = OMAP44XX_DMA_MMC5_RX;
1770 break;
1099 default: 1771 default:
1100 dev_err(mmc_dev(host->mmc), "Invalid MMC id\n"); 1772 dev_err(mmc_dev(host->mmc), "Invalid MMC id\n");
1101 goto err_irq; 1773 goto err_irq;
1102 } 1774 }
1103 1775
1104 /* Request IRQ for MMC operations */ 1776 /* Request IRQ for MMC operations */
1105 ret = request_irq(host->irq, mmc_omap_irq, IRQF_DISABLED, 1777 ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED,
1106 mmc_hostname(mmc), host); 1778 mmc_hostname(mmc), host);
1107 if (ret) { 1779 if (ret) {
1108 dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); 1780 dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
@@ -1112,7 +1784,8 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1112 /* initialize power supplies, gpios, etc */ 1784 /* initialize power supplies, gpios, etc */
1113 if (pdata->init != NULL) { 1785 if (pdata->init != NULL) {
1114 if (pdata->init(&pdev->dev) != 0) { 1786 if (pdata->init(&pdev->dev) != 0) {
1115 dev_dbg(mmc_dev(host->mmc), "late init error\n"); 1787 dev_dbg(mmc_dev(host->mmc),
1788 "Unable to configure MMC IRQs\n");
1116 goto err_irq_cd_init; 1789 goto err_irq_cd_init;
1117 } 1790 }
1118 } 1791 }
@@ -1121,7 +1794,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1121 /* Request IRQ for card detect */ 1794 /* Request IRQ for card detect */
1122 if ((mmc_slot(host).card_detect_irq)) { 1795 if ((mmc_slot(host).card_detect_irq)) {
1123 ret = request_irq(mmc_slot(host).card_detect_irq, 1796 ret = request_irq(mmc_slot(host).card_detect_irq,
1124 omap_mmc_cd_handler, 1797 omap_hsmmc_cd_handler,
1125 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING 1798 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
1126 | IRQF_DISABLED, 1799 | IRQF_DISABLED,
1127 mmc_hostname(mmc), host); 1800 mmc_hostname(mmc), host);
@@ -1135,21 +1808,26 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
1135 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); 1808 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
1136 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); 1809 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
1137 1810
1811 mmc_host_lazy_disable(host->mmc);
1812
1813 omap_hsmmc_protect_card(host);
1814
1138 mmc_add_host(mmc); 1815 mmc_add_host(mmc);
1139 1816
1140 if (host->pdata->slots[host->slot_id].name != NULL) { 1817 if (mmc_slot(host).name != NULL) {
1141 ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); 1818 ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
1142 if (ret < 0) 1819 if (ret < 0)
1143 goto err_slot_name; 1820 goto err_slot_name;
1144 } 1821 }
1145 if (mmc_slot(host).card_detect_irq && 1822 if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) {
1146 host->pdata->slots[host->slot_id].get_cover_state) {
1147 ret = device_create_file(&mmc->class_dev, 1823 ret = device_create_file(&mmc->class_dev,
1148 &dev_attr_cover_switch); 1824 &dev_attr_cover_switch);
1149 if (ret < 0) 1825 if (ret < 0)
1150 goto err_cover_switch; 1826 goto err_cover_switch;
1151 } 1827 }
1152 1828
1829 omap_hsmmc_debugfs(mmc);
1830
1153 return 0; 1831 return 0;
1154 1832
1155err_cover_switch: 1833err_cover_switch:
@@ -1161,11 +1839,11 @@ err_irq_cd:
1161err_irq_cd_init: 1839err_irq_cd_init:
1162 free_irq(host->irq, host); 1840 free_irq(host->irq, host);
1163err_irq: 1841err_irq:
1164 clk_disable(host->fclk); 1842 mmc_host_disable(host->mmc);
1165 clk_disable(host->iclk); 1843 clk_disable(host->iclk);
1166 clk_put(host->fclk); 1844 clk_put(host->fclk);
1167 clk_put(host->iclk); 1845 clk_put(host->iclk);
1168 if (host->dbclk_enabled) { 1846 if (host->got_dbclk) {
1169 clk_disable(host->dbclk); 1847 clk_disable(host->dbclk);
1170 clk_put(host->dbclk); 1848 clk_put(host->dbclk);
1171 } 1849 }
@@ -1180,12 +1858,13 @@ err:
1180 return ret; 1858 return ret;
1181} 1859}
1182 1860
1183static int omap_mmc_remove(struct platform_device *pdev) 1861static int omap_hsmmc_remove(struct platform_device *pdev)
1184{ 1862{
1185 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1863 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
1186 struct resource *res; 1864 struct resource *res;
1187 1865
1188 if (host) { 1866 if (host) {
1867 mmc_host_enable(host->mmc);
1189 mmc_remove_host(host->mmc); 1868 mmc_remove_host(host->mmc);
1190 if (host->pdata->cleanup) 1869 if (host->pdata->cleanup)
1191 host->pdata->cleanup(&pdev->dev); 1870 host->pdata->cleanup(&pdev->dev);
@@ -1194,11 +1873,11 @@ static int omap_mmc_remove(struct platform_device *pdev)
1194 free_irq(mmc_slot(host).card_detect_irq, host); 1873 free_irq(mmc_slot(host).card_detect_irq, host);
1195 flush_scheduled_work(); 1874 flush_scheduled_work();
1196 1875
1197 clk_disable(host->fclk); 1876 mmc_host_disable(host->mmc);
1198 clk_disable(host->iclk); 1877 clk_disable(host->iclk);
1199 clk_put(host->fclk); 1878 clk_put(host->fclk);
1200 clk_put(host->iclk); 1879 clk_put(host->iclk);
1201 if (host->dbclk_enabled) { 1880 if (host->got_dbclk) {
1202 clk_disable(host->dbclk); 1881 clk_disable(host->dbclk);
1203 clk_put(host->dbclk); 1882 clk_put(host->dbclk);
1204 } 1883 }
@@ -1216,36 +1895,51 @@ static int omap_mmc_remove(struct platform_device *pdev)
1216} 1895}
1217 1896
1218#ifdef CONFIG_PM 1897#ifdef CONFIG_PM
1219static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state) 1898static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
1220{ 1899{
1221 int ret = 0; 1900 int ret = 0;
1222 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1901 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
1223 1902
1224 if (host && host->suspended) 1903 if (host && host->suspended)
1225 return 0; 1904 return 0;
1226 1905
1227 if (host) { 1906 if (host) {
1907 host->suspended = 1;
1908 if (host->pdata->suspend) {
1909 ret = host->pdata->suspend(&pdev->dev,
1910 host->slot_id);
1911 if (ret) {
1912 dev_dbg(mmc_dev(host->mmc),
1913 "Unable to handle MMC board"
1914 " level suspend\n");
1915 host->suspended = 0;
1916 return ret;
1917 }
1918 }
1919 cancel_work_sync(&host->mmc_carddetect_work);
1920 mmc_host_enable(host->mmc);
1228 ret = mmc_suspend_host(host->mmc, state); 1921 ret = mmc_suspend_host(host->mmc, state);
1229 if (ret == 0) { 1922 if (ret == 0) {
1230 host->suspended = 1;
1231
1232 OMAP_HSMMC_WRITE(host->base, ISE, 0); 1923 OMAP_HSMMC_WRITE(host->base, ISE, 0);
1233 OMAP_HSMMC_WRITE(host->base, IE, 0); 1924 OMAP_HSMMC_WRITE(host->base, IE, 0);
1234 1925
1235 if (host->pdata->suspend) {
1236 ret = host->pdata->suspend(&pdev->dev,
1237 host->slot_id);
1238 if (ret)
1239 dev_dbg(mmc_dev(host->mmc),
1240 "Unable to handle MMC board"
1241 " level suspend\n");
1242 }
1243 1926
1244 OMAP_HSMMC_WRITE(host->base, HCTL, 1927 OMAP_HSMMC_WRITE(host->base, HCTL,
1245 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 1928 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
1246 clk_disable(host->fclk); 1929 mmc_host_disable(host->mmc);
1247 clk_disable(host->iclk); 1930 clk_disable(host->iclk);
1248 clk_disable(host->dbclk); 1931 if (host->got_dbclk)
1932 clk_disable(host->dbclk);
1933 } else {
1934 host->suspended = 0;
1935 if (host->pdata->resume) {
1936 ret = host->pdata->resume(&pdev->dev,
1937 host->slot_id);
1938 if (ret)
1939 dev_dbg(mmc_dev(host->mmc),
1940 "Unmask interrupt failed\n");
1941 }
1942 mmc_host_disable(host->mmc);
1249 } 1943 }
1250 1944
1251 } 1945 }
@@ -1253,32 +1947,28 @@ static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state)
1253} 1947}
1254 1948
1255/* Routine to resume the MMC device */ 1949/* Routine to resume the MMC device */
1256static int omap_mmc_resume(struct platform_device *pdev) 1950static int omap_hsmmc_resume(struct platform_device *pdev)
1257{ 1951{
1258 int ret = 0; 1952 int ret = 0;
1259 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1953 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
1260 1954
1261 if (host && !host->suspended) 1955 if (host && !host->suspended)
1262 return 0; 1956 return 0;
1263 1957
1264 if (host) { 1958 if (host) {
1265 1959 ret = clk_enable(host->iclk);
1266 ret = clk_enable(host->fclk);
1267 if (ret) 1960 if (ret)
1268 goto clk_en_err; 1961 goto clk_en_err;
1269 1962
1270 ret = clk_enable(host->iclk); 1963 if (mmc_host_enable(host->mmc) != 0) {
1271 if (ret) { 1964 clk_disable(host->iclk);
1272 clk_disable(host->fclk);
1273 clk_put(host->fclk);
1274 goto clk_en_err; 1965 goto clk_en_err;
1275 } 1966 }
1276 1967
1277 if (clk_enable(host->dbclk) != 0) 1968 if (host->got_dbclk)
1278 dev_dbg(mmc_dev(host->mmc), 1969 clk_enable(host->dbclk);
1279 "Enabling debounce clk failed\n");
1280 1970
1281 omap_hsmmc_init(host); 1971 omap_hsmmc_conf_bus_power(host);
1282 1972
1283 if (host->pdata->resume) { 1973 if (host->pdata->resume) {
1284 ret = host->pdata->resume(&pdev->dev, host->slot_id); 1974 ret = host->pdata->resume(&pdev->dev, host->slot_id);
@@ -1287,10 +1977,14 @@ static int omap_mmc_resume(struct platform_device *pdev)
1287 "Unmask interrupt failed\n"); 1977 "Unmask interrupt failed\n");
1288 } 1978 }
1289 1979
1980 omap_hsmmc_protect_card(host);
1981
1290 /* Notify the core to resume the host */ 1982 /* Notify the core to resume the host */
1291 ret = mmc_resume_host(host->mmc); 1983 ret = mmc_resume_host(host->mmc);
1292 if (ret == 0) 1984 if (ret == 0)
1293 host->suspended = 0; 1985 host->suspended = 0;
1986
1987 mmc_host_lazy_disable(host->mmc);
1294 } 1988 }
1295 1989
1296 return ret; 1990 return ret;
@@ -1302,35 +1996,34 @@ clk_en_err:
1302} 1996}
1303 1997
1304#else 1998#else
1305#define omap_mmc_suspend NULL 1999#define omap_hsmmc_suspend NULL
1306#define omap_mmc_resume NULL 2000#define omap_hsmmc_resume NULL
1307#endif 2001#endif
1308 2002
1309static struct platform_driver omap_mmc_driver = { 2003static struct platform_driver omap_hsmmc_driver = {
1310 .probe = omap_mmc_probe, 2004 .remove = omap_hsmmc_remove,
1311 .remove = omap_mmc_remove, 2005 .suspend = omap_hsmmc_suspend,
1312 .suspend = omap_mmc_suspend, 2006 .resume = omap_hsmmc_resume,
1313 .resume = omap_mmc_resume,
1314 .driver = { 2007 .driver = {
1315 .name = DRIVER_NAME, 2008 .name = DRIVER_NAME,
1316 .owner = THIS_MODULE, 2009 .owner = THIS_MODULE,
1317 }, 2010 },
1318}; 2011};
1319 2012
1320static int __init omap_mmc_init(void) 2013static int __init omap_hsmmc_init(void)
1321{ 2014{
1322 /* Register the MMC driver */ 2015 /* Register the MMC driver */
1323 return platform_driver_register(&omap_mmc_driver); 2016 return platform_driver_register(&omap_hsmmc_driver);
1324} 2017}
1325 2018
1326static void __exit omap_mmc_cleanup(void) 2019static void __exit omap_hsmmc_cleanup(void)
1327{ 2020{
1328 /* Unregister MMC driver */ 2021 /* Unregister MMC driver */
1329 platform_driver_unregister(&omap_mmc_driver); 2022 platform_driver_unregister(&omap_hsmmc_driver);
1330} 2023}
1331 2024
1332module_init(omap_mmc_init); 2025module_init(omap_hsmmc_init);
1333module_exit(omap_mmc_cleanup); 2026module_exit(omap_hsmmc_cleanup);
1334 2027
1335MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver"); 2028MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
1336MODULE_LICENSE("GPL"); 2029MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index 1e8aa590bb39..01ab916c2802 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -21,6 +21,7 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include <asm/machdep.h>
24#include "sdhci.h" 25#include "sdhci.h"
25 26
26struct sdhci_of_data { 27struct sdhci_of_data {
@@ -48,6 +49,8 @@ struct sdhci_of_host {
48#define ESDHC_CLOCK_HCKEN 0x00000002 49#define ESDHC_CLOCK_HCKEN 0x00000002
49#define ESDHC_CLOCK_IPGEN 0x00000001 50#define ESDHC_CLOCK_IPGEN 0x00000001
50 51
52#define ESDHC_HOST_CONTROL_RES 0x05
53
51static u32 esdhc_readl(struct sdhci_host *host, int reg) 54static u32 esdhc_readl(struct sdhci_host *host, int reg)
52{ 55{
53 return in_be32(host->ioaddr + reg); 56 return in_be32(host->ioaddr + reg);
@@ -109,13 +112,17 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
109 int base = reg & ~0x3; 112 int base = reg & ~0x3;
110 int shift = (reg & 0x3) * 8; 113 int shift = (reg & 0x3) * 8;
111 114
115 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
116 if (reg == SDHCI_HOST_CONTROL)
117 val &= ~ESDHC_HOST_CONTROL_RES;
118
112 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); 119 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
113} 120}
114 121
115static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) 122static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
116{ 123{
117 int div;
118 int pre_div = 2; 124 int pre_div = 2;
125 int div = 1;
119 126
120 clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | 127 clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
121 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); 128 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
@@ -123,19 +130,17 @@ static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
123 if (clock == 0) 130 if (clock == 0)
124 goto out; 131 goto out;
125 132
126 if (host->max_clk / 16 > clock) { 133 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
127 for (; pre_div < 256; pre_div *= 2) { 134 pre_div *= 2;
128 if (host->max_clk / pre_div < clock * 16)
129 break;
130 }
131 }
132 135
133 for (div = 1; div <= 16; div++) { 136 while (host->max_clk / pre_div / div > clock && div < 16)
134 if (host->max_clk / (div * pre_div) <= clock) 137 div++;
135 break; 138
136 } 139 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
140 clock, host->max_clk / pre_div / div);
137 141
138 pre_div >>= 1; 142 pre_div >>= 1;
143 div--;
139 144
140 setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | 145 setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
141 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | 146 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
@@ -165,19 +170,12 @@ static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
165 return of_host->clock / 256 / 16; 170 return of_host->clock / 256 / 16;
166} 171}
167 172
168static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host)
169{
170 struct sdhci_of_host *of_host = sdhci_priv(host);
171
172 return of_host->clock / 1000;
173}
174
175static struct sdhci_of_data sdhci_esdhc = { 173static struct sdhci_of_data sdhci_esdhc = {
176 .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 | 174 .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
177 SDHCI_QUIRK_BROKEN_CARD_DETECTION | 175 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
178 SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
179 SDHCI_QUIRK_NO_BUSY_IRQ | 176 SDHCI_QUIRK_NO_BUSY_IRQ |
180 SDHCI_QUIRK_NONSTANDARD_CLOCK | 177 SDHCI_QUIRK_NONSTANDARD_CLOCK |
178 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
181 SDHCI_QUIRK_PIO_NEEDS_DELAY | 179 SDHCI_QUIRK_PIO_NEEDS_DELAY |
182 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | 180 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
183 SDHCI_QUIRK_NO_CARD_NO_RESET, 181 SDHCI_QUIRK_NO_CARD_NO_RESET,
@@ -192,7 +190,6 @@ static struct sdhci_of_data sdhci_esdhc = {
192 .enable_dma = esdhc_enable_dma, 190 .enable_dma = esdhc_enable_dma,
193 .get_max_clock = esdhc_get_max_clock, 191 .get_max_clock = esdhc_get_max_clock,
194 .get_min_clock = esdhc_get_min_clock, 192 .get_min_clock = esdhc_get_min_clock,
195 .get_timeout_clock = esdhc_get_timeout_clock,
196 }, 193 },
197}; 194};
198 195
@@ -219,6 +216,15 @@ static int sdhci_of_resume(struct of_device *ofdev)
219 216
220#endif 217#endif
221 218
219static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
220{
221 if (of_get_property(np, "sdhci,wp-inverted", NULL))
222 return true;
223
224 /* Old device trees don't have the wp-inverted property. */
225 return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
226}
227
222static int __devinit sdhci_of_probe(struct of_device *ofdev, 228static int __devinit sdhci_of_probe(struct of_device *ofdev,
223 const struct of_device_id *match) 229 const struct of_device_id *match)
224{ 230{
@@ -261,6 +267,9 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev,
261 if (of_get_property(np, "sdhci,1-bit-only", NULL)) 267 if (of_get_property(np, "sdhci,1-bit-only", NULL))
262 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; 268 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
263 269
270 if (sdhci_of_wp_inverted(np))
271 host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
272
264 clk = of_get_property(np, "clock-frequency", &size); 273 clk = of_get_property(np, "clock-frequency", &size);
265 if (clk && size == sizeof(*clk) && *clk) 274 if (clk && size == sizeof(*clk) && *clk)
266 of_host->clock = *clk; 275 of_host->clock = *clk;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 2f15cc17d887..e0356644d1aa 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -83,7 +83,8 @@ static int ricoh_probe(struct sdhci_pci_chip *chip)
83 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 83 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
84 chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET; 84 chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET;
85 85
86 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) 86 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
87 chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
87 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; 88 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
88 89
89 return 0; 90 return 0;
@@ -395,7 +396,7 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
395 396
396 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && 397 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
397 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && 398 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
398 (host->flags & SDHCI_USE_DMA)) { 399 (host->flags & SDHCI_USE_SDMA)) {
399 dev_warn(&pdev->dev, "Will use DMA mode even though HW " 400 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
400 "doesn't fully claim to support it.\n"); 401 "doesn't fully claim to support it.\n");
401 } 402 }
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index fc96f8cb9c0b..c279fbc4c2e5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -591,6 +591,9 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
591 target_timeout = data->timeout_ns / 1000 + 591 target_timeout = data->timeout_ns / 1000 +
592 data->timeout_clks / host->clock; 592 data->timeout_clks / host->clock;
593 593
594 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
595 host->timeout_clk = host->clock / 1000;
596
594 /* 597 /*
595 * Figure out needed cycles. 598 * Figure out needed cycles.
596 * We do this in steps in order to fit inside a 32 bit int. 599 * We do this in steps in order to fit inside a 32 bit int.
@@ -652,7 +655,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
652 count = sdhci_calc_timeout(host, data); 655 count = sdhci_calc_timeout(host, data);
653 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 656 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
654 657
655 if (host->flags & SDHCI_USE_DMA) 658 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
656 host->flags |= SDHCI_REQ_USE_DMA; 659 host->flags |= SDHCI_REQ_USE_DMA;
657 660
658 /* 661 /*
@@ -991,8 +994,8 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
991 clk |= SDHCI_CLOCK_INT_EN; 994 clk |= SDHCI_CLOCK_INT_EN;
992 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 995 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
993 996
994 /* Wait max 10 ms */ 997 /* Wait max 20 ms */
995 timeout = 10; 998 timeout = 20;
996 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 999 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
997 & SDHCI_CLOCK_INT_STABLE)) { 1000 & SDHCI_CLOCK_INT_STABLE)) {
998 if (timeout == 0) { 1001 if (timeout == 0) {
@@ -1597,7 +1600,7 @@ int sdhci_resume_host(struct sdhci_host *host)
1597{ 1600{
1598 int ret; 1601 int ret;
1599 1602
1600 if (host->flags & SDHCI_USE_DMA) { 1603 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1601 if (host->ops->enable_dma) 1604 if (host->ops->enable_dma)
1602 host->ops->enable_dma(host); 1605 host->ops->enable_dma(host);
1603 } 1606 }
@@ -1678,23 +1681,20 @@ int sdhci_add_host(struct sdhci_host *host)
1678 caps = sdhci_readl(host, SDHCI_CAPABILITIES); 1681 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
1679 1682
1680 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 1683 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1681 host->flags |= SDHCI_USE_DMA; 1684 host->flags |= SDHCI_USE_SDMA;
1682 else if (!(caps & SDHCI_CAN_DO_DMA)) 1685 else if (!(caps & SDHCI_CAN_DO_SDMA))
1683 DBG("Controller doesn't have DMA capability\n"); 1686 DBG("Controller doesn't have SDMA capability\n");
1684 else 1687 else
1685 host->flags |= SDHCI_USE_DMA; 1688 host->flags |= SDHCI_USE_SDMA;
1686 1689
1687 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 1690 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1688 (host->flags & SDHCI_USE_DMA)) { 1691 (host->flags & SDHCI_USE_SDMA)) {
1689 DBG("Disabling DMA as it is marked broken\n"); 1692 DBG("Disabling DMA as it is marked broken\n");
1690 host->flags &= ~SDHCI_USE_DMA; 1693 host->flags &= ~SDHCI_USE_SDMA;
1691 } 1694 }
1692 1695
1693 if (host->flags & SDHCI_USE_DMA) { 1696 if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2))
1694 if ((host->version >= SDHCI_SPEC_200) && 1697 host->flags |= SDHCI_USE_ADMA;
1695 (caps & SDHCI_CAN_DO_ADMA2))
1696 host->flags |= SDHCI_USE_ADMA;
1697 }
1698 1698
1699 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 1699 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1700 (host->flags & SDHCI_USE_ADMA)) { 1700 (host->flags & SDHCI_USE_ADMA)) {
@@ -1702,13 +1702,14 @@ int sdhci_add_host(struct sdhci_host *host)
1702 host->flags &= ~SDHCI_USE_ADMA; 1702 host->flags &= ~SDHCI_USE_ADMA;
1703 } 1703 }
1704 1704
1705 if (host->flags & SDHCI_USE_DMA) { 1705 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1706 if (host->ops->enable_dma) { 1706 if (host->ops->enable_dma) {
1707 if (host->ops->enable_dma(host)) { 1707 if (host->ops->enable_dma(host)) {
1708 printk(KERN_WARNING "%s: No suitable DMA " 1708 printk(KERN_WARNING "%s: No suitable DMA "
1709 "available. Falling back to PIO.\n", 1709 "available. Falling back to PIO.\n",
1710 mmc_hostname(mmc)); 1710 mmc_hostname(mmc));
1711 host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); 1711 host->flags &=
1712 ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
1712 } 1713 }
1713 } 1714 }
1714 } 1715 }
@@ -1736,7 +1737,7 @@ int sdhci_add_host(struct sdhci_host *host)
1736 * mask, but PIO does not need the hw shim so we set a new 1737 * mask, but PIO does not need the hw shim so we set a new
1737 * mask here in that case. 1738 * mask here in that case.
1738 */ 1739 */
1739 if (!(host->flags & SDHCI_USE_DMA)) { 1740 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
1740 host->dma_mask = DMA_BIT_MASK(64); 1741 host->dma_mask = DMA_BIT_MASK(64);
1741 mmc_dev(host->mmc)->dma_mask = &host->dma_mask; 1742 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1742 } 1743 }
@@ -1757,13 +1758,15 @@ int sdhci_add_host(struct sdhci_host *host)
1757 host->timeout_clk = 1758 host->timeout_clk =
1758 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; 1759 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1759 if (host->timeout_clk == 0) { 1760 if (host->timeout_clk == 0) {
1760 if (!host->ops->get_timeout_clock) { 1761 if (host->ops->get_timeout_clock) {
1762 host->timeout_clk = host->ops->get_timeout_clock(host);
1763 } else if (!(host->quirks &
1764 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
1761 printk(KERN_ERR 1765 printk(KERN_ERR
1762 "%s: Hardware doesn't specify timeout clock " 1766 "%s: Hardware doesn't specify timeout clock "
1763 "frequency.\n", mmc_hostname(mmc)); 1767 "frequency.\n", mmc_hostname(mmc));
1764 return -ENODEV; 1768 return -ENODEV;
1765 } 1769 }
1766 host->timeout_clk = host->ops->get_timeout_clock(host);
1767 } 1770 }
1768 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1771 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1769 host->timeout_clk *= 1000; 1772 host->timeout_clk *= 1000;
@@ -1772,7 +1775,8 @@ int sdhci_add_host(struct sdhci_host *host)
1772 * Set host parameters. 1775 * Set host parameters.
1773 */ 1776 */
1774 mmc->ops = &sdhci_ops; 1777 mmc->ops = &sdhci_ops;
1775 if (host->ops->get_min_clock) 1778 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK &&
1779 host->ops->set_clock && host->ops->get_min_clock)
1776 mmc->f_min = host->ops->get_min_clock(host); 1780 mmc->f_min = host->ops->get_min_clock(host);
1777 else 1781 else
1778 mmc->f_min = host->max_clk / 256; 1782 mmc->f_min = host->max_clk / 256;
@@ -1810,7 +1814,7 @@ int sdhci_add_host(struct sdhci_host *host)
1810 */ 1814 */
1811 if (host->flags & SDHCI_USE_ADMA) 1815 if (host->flags & SDHCI_USE_ADMA)
1812 mmc->max_hw_segs = 128; 1816 mmc->max_hw_segs = 128;
1813 else if (host->flags & SDHCI_USE_DMA) 1817 else if (host->flags & SDHCI_USE_SDMA)
1814 mmc->max_hw_segs = 1; 1818 mmc->max_hw_segs = 1;
1815 else /* PIO */ 1819 else /* PIO */
1816 mmc->max_hw_segs = 128; 1820 mmc->max_hw_segs = 128;
@@ -1893,10 +1897,10 @@ int sdhci_add_host(struct sdhci_host *host)
1893 1897
1894 mmc_add_host(mmc); 1898 mmc_add_host(mmc);
1895 1899
1896 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", 1900 printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n",
1897 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 1901 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1898 (host->flags & SDHCI_USE_ADMA)?"A":"", 1902 (host->flags & SDHCI_USE_ADMA) ? "ADMA" :
1899 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); 1903 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
1900 1904
1901 sdhci_enable_card_detection(host); 1905 sdhci_enable_card_detection(host);
1902 1906
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c77e9ff30223..ce5f1d73dc04 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -143,7 +143,7 @@
143#define SDHCI_CAN_DO_ADMA2 0x00080000 143#define SDHCI_CAN_DO_ADMA2 0x00080000
144#define SDHCI_CAN_DO_ADMA1 0x00100000 144#define SDHCI_CAN_DO_ADMA1 0x00100000
145#define SDHCI_CAN_DO_HISPD 0x00200000 145#define SDHCI_CAN_DO_HISPD 0x00200000
146#define SDHCI_CAN_DO_DMA 0x00400000 146#define SDHCI_CAN_DO_SDMA 0x00400000
147#define SDHCI_CAN_VDD_330 0x01000000 147#define SDHCI_CAN_VDD_330 0x01000000
148#define SDHCI_CAN_VDD_300 0x02000000 148#define SDHCI_CAN_VDD_300 0x02000000
149#define SDHCI_CAN_VDD_180 0x04000000 149#define SDHCI_CAN_VDD_180 0x04000000
@@ -232,6 +232,8 @@ struct sdhci_host {
232#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22) 232#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
233/* Controller needs 10ms delay between applying power and clock */ 233/* Controller needs 10ms delay between applying power and clock */
234#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) 234#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
235/* Controller uses SDCLK instead of TMCLK for data timeouts */
236#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
235 237
236 int irq; /* Device IRQ */ 238 int irq; /* Device IRQ */
237 void __iomem * ioaddr; /* Mapped address */ 239 void __iomem * ioaddr; /* Mapped address */
@@ -250,7 +252,7 @@ struct sdhci_host {
250 spinlock_t lock; /* Mutex */ 252 spinlock_t lock; /* Mutex */
251 253
252 int flags; /* Host attributes */ 254 int flags; /* Host attributes */
253#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ 255#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */
254#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ 256#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
255#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ 257#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
256#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ 258#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */