aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2012-09-28 13:46:28 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2012-09-28 14:50:15 -0400
commitdaa22703f14c007e93b464c45fa60019a36f546d (patch)
treea1a130b6e128dc9d57c35c026977e1b4953105e1 /drivers/mmc
parent5aa287dcf1b5879aa0150b0511833c52885f5b4c (diff)
Apply k4412 kernel from HardKernel for ODROID-X.
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/Kconfig14
-rw-r--r--drivers/mmc/card/Makefile8
-rw-r--r--drivers/mmc/card/block.c1586
-rw-r--r--drivers/mmc/card/cprmdrv_samsung.c450
-rw-r--r--drivers/mmc/card/cprmdrv_samsung.h75
-rw-r--r--drivers/mmc/card/queue.c264
-rw-r--r--drivers/mmc/card/queue.h46
-rw-r--r--drivers/mmc/core/Kconfig30
-rw-r--r--drivers/mmc/core/bus.c29
-rw-r--r--drivers/mmc/core/core.c824
-rw-r--r--drivers/mmc/core/core.h3
-rw-r--r--drivers/mmc/core/debugfs.c12
-rw-r--r--drivers/mmc/core/host.c71
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c559
-rw-r--r--drivers/mmc/core/mmc_ops.c42
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/quirks.c25
-rw-r--r--drivers/mmc/core/sd.c208
-rw-r--r--drivers/mmc/core/sdio.c476
-rw-r--r--drivers/mmc/core/sdio_bus.c13
-rw-r--r--drivers/mmc/core/sdio_io.c36
-rw-r--r--drivers/mmc/core/sdio_irq.c10
-rw-r--r--drivers/mmc/host/Kconfig30
-rw-r--r--drivers/mmc/host/Makefile5
-rw-r--r--drivers/mmc/host/dw_mmc.c803
-rw-r--r--drivers/mmc/host/dw_mmc.h20
-rw-r--r--drivers/mmc/host/mshci-s3c-dma.c220
-rw-r--r--drivers/mmc/host/mshci-s3c.c631
-rw-r--r--drivers/mmc/host/mshci.c2248
-rw-r--r--drivers/mmc/host/mshci.h463
-rw-r--r--drivers/mmc/host/sdhci-s3c.c246
-rw-r--r--drivers/mmc/host/sdhci.c233
-rw-r--r--drivers/mmc/host/sdhci.h3
34 files changed, 8993 insertions, 712 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783bf92..4283bc2eb46 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,15 @@ config MMC_BLOCK_BOUNCE
50 50
51 If unsure, say Y here. 51 If unsure, say Y here.
52 52
53config MMC_BLOCK_DEFERRED_RESUME
54 bool "Deferr MMC layer resume until I/O is requested"
55 depends on MMC_BLOCK
56 default n
57 help
58 Say Y here to enable deferred MMC resume until I/O
59 is requested. This will reduce overall resume latency and
60 save power when theres an SD card inserted but not being used.
61
53config SDIO_UART 62config SDIO_UART
54 tristate "SDIO UART/GPS class support" 63 tristate "SDIO UART/GPS class support"
55 help 64 help
@@ -67,3 +76,8 @@ config MMC_TEST
67 76
68 This driver is only of interest to those developing or 77 This driver is only of interest to those developing or
69 testing a host driver. Most people should say N here. 78 testing a host driver. Most people should say N here.
79
80config MMC_SELECTIVE_PACKED_CMD_POLICY
81 tristate "Change the condition of Pakced command"
82 help
83 Say Y here to change packed_cmd policy
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406a06c..a6efd4d85e7 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -1,10 +1,16 @@
1# 1#
2# Makefile for MMC/SD card drivers 2# Makefile for MMC/SD card drivers
3# 3#
4ifeq ($(CONFIG_MMC_CPRM),y)
5EXTRA_CFLAGS += -I$(src)/cprm/softcprm
6EXTRA_CFLAGS += -I$(src)/cprm/include
7endif
4 8
5obj-$(CONFIG_MMC_BLOCK) += mmc_block.o 9obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
6mmc_block-objs := block.o queue.o 10mmc_block-objs := block.o queue.o
11ifeq ($(CONFIG_MMC_CPRM),y)
12mmc_block-objs += cprmdrv_samsung.o
13endif
7obj-$(CONFIG_MMC_TEST) += mmc_test.o 14obj-$(CONFIG_MMC_TEST) += mmc_test.o
8 15
9obj-$(CONFIG_SDIO_UART) += sdio_uart.o 16obj-$(CONFIG_SDIO_UART) += sdio_uart.o
10
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c0839d48f6c..279d7d826ee 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -45,8 +45,35 @@
45#include <asm/uaccess.h> 45#include <asm/uaccess.h>
46 46
47#include "queue.h" 47#include "queue.h"
48#include "../core/core.h"
48 49
49MODULE_ALIAS("mmc:block"); 50MODULE_ALIAS("mmc:block");
51
52#if defined(CONFIG_MMC_CPRM)
53#define MMC_ENABLE_CPRM
54#endif
55
56#ifdef MMC_ENABLE_CPRM
57#include "cprmdrv_samsung.h"
58#include <linux/ioctl.h>
59#define MMC_IOCTL_BASE 0xB3 /* Same as MMC block device major number */
60#define MMC_IOCTL_GET_SECTOR_COUNT _IOR(MMC_IOCTL_BASE, 100, int)
61#define MMC_IOCTL_GET_SECTOR_SIZE _IOR(MMC_IOCTL_BASE, 101, int)
62#define MMC_IOCTL_GET_BLOCK_SIZE _IOR(MMC_IOCTL_BASE, 102, int)
63#endif
64
65#ifdef MOVI_DEBUG
66struct CMD_LOG {
67 u32 cmd;
68 u32 arg;
69 u32 cnt;
70 u32 rsp;
71 u32 stoprsp;
72};
73struct CMD_LOG gaCmdLog[5];
74int gnCmdLogIdx;
75#endif
76
50#ifdef MODULE_PARAM_PREFIX 77#ifdef MODULE_PARAM_PREFIX
51#undef MODULE_PARAM_PREFIX 78#undef MODULE_PARAM_PREFIX
52#endif 79#endif
@@ -59,6 +86,13 @@ MODULE_ALIAS("mmc:block");
59#define INAND_CMD38_ARG_SECTRIM1 0x81 86#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88 87#define INAND_CMD38_ARG_SECTRIM2 0x88
61 88
89#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
90 (req->cmd_flags & REQ_META)) && \
91 (rq_data_dir(req) == WRITE))
92#define PACKED_CMD_VER 0x01
93#define PACKED_CMD_RD 0x01
94#define PACKED_CMD_WR 0x02
95
62static DEFINE_MUTEX(block_mutex); 96static DEFINE_MUTEX(block_mutex);
63 97
64/* 98/*
@@ -94,6 +128,12 @@ struct mmc_blk_data {
94 unsigned int read_only; 128 unsigned int read_only;
95 unsigned int part_type; 129 unsigned int part_type;
96 unsigned int name_idx; 130 unsigned int name_idx;
131 unsigned int reset_done;
132#define MMC_BLK_READ BIT(0)
133#define MMC_BLK_WRITE BIT(1)
134#define MMC_BLK_DISCARD BIT(2)
135#define MMC_BLK_SECDISCARD BIT(3)
136#define MMC_BLK_WR_HDR BIT(4)
97 137
98 /* 138 /*
99 * Only set in main mmc_blk_data associated 139 * Only set in main mmc_blk_data associated
@@ -106,6 +146,23 @@ struct mmc_blk_data {
106 146
107static DEFINE_MUTEX(open_lock); 147static DEFINE_MUTEX(open_lock);
108 148
149enum mmc_blk_status {
150 MMC_BLK_SUCCESS = 0,
151 MMC_BLK_PARTIAL,
152 MMC_BLK_CMD_ERR,
153 MMC_BLK_RETRY,
154 MMC_BLK_ABORT,
155 MMC_BLK_DATA_ERR,
156 MMC_BLK_ECC_ERR,
157 MMC_BLK_NOMEDIUM,
158};
159
160enum {
161 MMC_PACKED_N_IDX = -1,
162 MMC_PACKED_N_ZERO,
163 MMC_PACKED_N_SINGLE,
164};
165
109module_param(perdev_minors, int, 0444); 166module_param(perdev_minors, int, 0444);
110MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 167MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
111 168
@@ -126,11 +183,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
126 183
127static inline int mmc_get_devidx(struct gendisk *disk) 184static inline int mmc_get_devidx(struct gendisk *disk)
128{ 185{
129 int devmaj = MAJOR(disk_devt(disk)); 186 int devidx = disk->first_minor / perdev_minors;
130 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
131
132 if (!devmaj)
133 devidx = disk->first_minor / perdev_minors;
134 return devidx; 187 return devidx;
135} 188}
136 189
@@ -286,7 +339,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
286 struct mmc_data data = {0}; 339 struct mmc_data data = {0};
287 struct mmc_request mrq = {0}; 340 struct mmc_request mrq = {0};
288 struct scatterlist sg; 341 struct scatterlist sg;
289 int err; 342 int err = 0;
290 343
291 /* 344 /*
292 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 345 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
@@ -401,7 +454,8 @@ cmd_rel_host:
401 mmc_release_host(card->host); 454 mmc_release_host(card->host);
402 455
403cmd_done: 456cmd_done:
404 mmc_blk_put(md); 457 if (md)
458 mmc_blk_put(md);
405 kfree(idata->buf); 459 kfree(idata->buf);
406 kfree(idata); 460 kfree(idata);
407 return err; 461 return err;
@@ -410,9 +464,45 @@ cmd_done:
410static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, 464static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
411 unsigned int cmd, unsigned long arg) 465 unsigned int cmd, unsigned long arg)
412{ 466{
467#ifdef MMC_ENABLE_CPRM
468 struct mmc_blk_data *md = bdev->bd_disk->private_data;
469 struct mmc_card *card = md->queue.card;
470#endif
413 int ret = -EINVAL; 471 int ret = -EINVAL;
414 if (cmd == MMC_IOC_CMD) 472 if (cmd == MMC_IOC_CMD)
415 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); 473 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
474
475#ifdef MMC_ENABLE_CPRM
476 printk(KERN_DEBUG " %s ], %x ", __func__, cmd);
477
478 switch (cmd) {
479 case MMC_IOCTL_GET_SECTOR_COUNT: {
480 int size = 0;
481
482 size = (int)get_capacity(md->disk) << 9;
483 printk(KERN_DEBUG "[%s]:MMC_IOCTL_GET_SECTOR_COUNT size = %d\n",
484 __func__, size);
485
486 return copy_to_user((void *)arg, &size, sizeof(u64));
487 }
488 break;
489 case ACMD13:
490 case ACMD18:
491 case ACMD25:
492 case ACMD43:
493 case ACMD44:
494 case ACMD45:
495 case ACMD46:
496 case ACMD47:
497 case ACMD48: {
498 struct cprm_request *req = (struct cprm_request *)arg;
499
500 printk(KERN_DEBUG "[%s]: cmd [%x]\n", __func__, cmd);
501 return stub_sendcmd(card, req->cmd, req->arg, \
502 req->len, req->buff);
503 }
504 }
505#endif
416 return ret; 506 return ret;
417} 507}
418 508
@@ -435,14 +525,6 @@ static const struct block_device_operations mmc_bdops = {
435#endif 525#endif
436}; 526};
437 527
438struct mmc_blk_request {
439 struct mmc_request mrq;
440 struct mmc_command sbc;
441 struct mmc_command cmd;
442 struct mmc_command stop;
443 struct mmc_data data;
444};
445
446static inline int mmc_blk_part_switch(struct mmc_card *card, 528static inline int mmc_blk_part_switch(struct mmc_card *card,
447 struct mmc_blk_data *md) 529 struct mmc_blk_data *md)
448{ 530{
@@ -460,7 +542,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
460 card->ext_csd.part_time); 542 card->ext_csd.part_time);
461 if (ret) 543 if (ret)
462 return ret; 544 return ret;
463} 545 }
464 546
465 main_md->part_curr = md->part_type; 547 main_md->part_curr = md->part_type;
466 return 0; 548 return 0;
@@ -499,8 +581,15 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
499 data.timeout_clks = card->csd.tacc_clks * 100; 581 data.timeout_clks = card->csd.tacc_clks * 100;
500 582
501 timeout_us = data.timeout_ns / 1000; 583 timeout_us = data.timeout_ns / 1000;
502 timeout_us += data.timeout_clks * 1000 / 584 if (card->host->ios.clock) {
503 (card->host->ios.clock / 1000); 585 /* original */
586 timeout_us += data.timeout_clks * 1000 /
587 (card->host->ios.clock / 1000);
588 } else {
589 /* if clock is 0, assume ios.clock is 50000000(working clock) */
590 timeout_us += data.timeout_clks * 1000 /
591 (50000000 / 1000);
592 }
504 593
505 if (timeout_us > 100000) { 594 if (timeout_us > 100000) {
506 data.timeout_ns = 100000000; 595 data.timeout_ns = 100000000;
@@ -533,7 +622,20 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
533 return result; 622 return result;
534} 623}
535 624
536static u32 get_card_status(struct mmc_card *card, struct request *req) 625static int send_stop(struct mmc_card *card, u32 *status)
626{
627 struct mmc_command cmd = {0};
628 int err;
629
630 cmd.opcode = MMC_STOP_TRANSMISSION;
631 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
632 err = mmc_wait_for_cmd(card->host, &cmd, 5);
633 if (err == 0)
634 *status = cmd.resp[0];
635 return err;
636}
637
638static int get_card_status(struct mmc_card *card, u32 *status, int retries)
537{ 639{
538 struct mmc_command cmd = {0}; 640 struct mmc_command cmd = {0};
539 int err; 641 int err;
@@ -542,11 +644,198 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
542 if (!mmc_host_is_spi(card->host)) 644 if (!mmc_host_is_spi(card->host))
543 cmd.arg = card->rca << 16; 645 cmd.arg = card->rca << 16;
544 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 646 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
545 err = mmc_wait_for_cmd(card->host, &cmd, 0); 647 err = mmc_wait_for_cmd(card->host, &cmd, retries);
546 if (err) 648 if (err == 0)
547 printk(KERN_ERR "%s: error %d sending status command", 649 *status = cmd.resp[0];
548 req->rq_disk->disk_name, err); 650 return err;
549 return cmd.resp[0]; 651}
652
653#define ERR_NOMEDIUM 3
654#define ERR_RETRY 2
655#define ERR_ABORT 1
656#define ERR_CONTINUE 0
657
658static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
659 bool status_valid, u32 status)
660{
661 switch (error) {
662 case -EILSEQ:
663 /* response crc error, retry the r/w cmd */
664 pr_err("%s: %s sending %s command, card status %#x\n",
665 req->rq_disk->disk_name, "response CRC error",
666 name, status);
667 return ERR_RETRY;
668
669 case -ETIMEDOUT:
670 pr_err("%s: %s sending %s command, card status %#x\n",
671 req->rq_disk->disk_name, "timed out", name, status);
672
673 /* If the status cmd initially failed, retry the r/w cmd */
674 if (!status_valid) {
675 pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
676 return ERR_RETRY;
677 }
678 /*
679 * If it was a r/w cmd crc error, or illegal command
680 * (eg, issued in wrong state) then retry - we should
681 * have corrected the state problem above.
682 */
683 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
684 pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
685 return ERR_RETRY;
686 }
687
688 /* Otherwise abort the command */
689 pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
690 return ERR_ABORT;
691
692 default:
693 /* We don't understand the error code the driver gave us */
694 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
695 req->rq_disk->disk_name, error, status);
696 return ERR_ABORT;
697 }
698}
699
700/*
701 * Initial r/w and stop cmd error recovery.
702 * We don't know whether the card received the r/w cmd or not, so try to
703 * restore things back to a sane state. Essentially, we do this as follows:
704 * - Obtain card status. If the first attempt to obtain card status fails,
705 * the status word will reflect the failed status cmd, not the failed
706 * r/w cmd. If we fail to obtain card status, it suggests we can no
707 * longer communicate with the card.
708 * - Check the card state. If the card received the cmd but there was a
709 * transient problem with the response, it might still be in a data transfer
710 * mode. Try to send it a stop command. If this fails, we can't recover.
711 * - If the r/w cmd failed due to a response CRC error, it was probably
712 * transient, so retry the cmd.
713 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
714 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
715 * illegal cmd, retry.
716 * Otherwise we don't understand what happened, so abort.
717 */
718static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
719 struct mmc_blk_request *brq, int *ecc_err)
720{
721 bool prev_cmd_status_valid = true;
722 u32 status, stop_status = 0;
723 int err, retry;
724
725 if (mmc_card_removed(card))
726 return ERR_NOMEDIUM;
727
728 /*
729 * Try to get card status which indicates both the card state
730 * and why there was no response. If the first attempt fails,
731 * we can't be sure the returned status is for the r/w command.
732 */
733 for (retry = 2; retry >= 0; retry--) {
734 err = get_card_status(card, &status, 0);
735 if (!err)
736 break;
737
738 prev_cmd_status_valid = false;
739 pr_err("%s: error %d sending status command, %sing\n",
740 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
741 }
742
743 /* We couldn't get a response from the card. Give up. */
744 if (err) {
745 /* Check if the card is removed */
746 if (mmc_detect_card_removed(card->host))
747 return ERR_NOMEDIUM;
748 return ERR_ABORT;
749 }
750
751 /* Flag ECC errors */
752 if ((status & R1_CARD_ECC_FAILED) ||
753 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
754 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
755 *ecc_err = 1;
756
757 /*
758 * Check the current card state. If it is in some data transfer
759 * mode, tell it to stop (and hopefully transition back to TRAN.)
760 */
761 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
762 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
763 err = send_stop(card, &stop_status);
764 if (err)
765 pr_err("%s: error %d sending stop command\n",
766 req->rq_disk->disk_name, err);
767
768 /*
769 * If the stop cmd also timed out, the card is probably
770 * not present, so abort. Other errors are bad news too.
771 */
772 if (err)
773 return ERR_ABORT;
774 if (stop_status & R1_CARD_ECC_FAILED)
775 *ecc_err = 1;
776 }
777
778 /* Check for set block count errors */
779 if (brq->sbc.error)
780 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
781 prev_cmd_status_valid, status);
782
783 /* Check for r/w command errors */
784 if (brq->cmd.error)
785 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
786 prev_cmd_status_valid, status);
787
788 /* Data errors */
789 if (!brq->stop.error)
790 return ERR_CONTINUE;
791
792 /* Now for stop errors. These aren't fatal to the transfer. */
793 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
794 req->rq_disk->disk_name, brq->stop.error,
795 brq->cmd.resp[0], status);
796
797 /*
798 * Subsitute in our own stop status as this will give the error
799 * state which happened during the execution of the r/w command.
800 */
801 if (stop_status) {
802 brq->stop.resp[0] = stop_status;
803 brq->stop.error = 0;
804 }
805 return ERR_CONTINUE;
806}
807
808static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
809 int type)
810{
811 int err;
812
813 if (md->reset_done & type)
814 return -EEXIST;
815
816 md->reset_done |= type;
817 err = mmc_hw_reset(host);
818 /* Ensure we switch back to the correct partition */
819 if (err != -EOPNOTSUPP) {
820 struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
821 int part_err;
822
823 main_md->part_curr = main_md->part_type;
824 part_err = mmc_blk_part_switch(host->card, md);
825 if (part_err) {
826 /*
827 * We have failed to get back into the correct
828 * partition, so we need to abort the whole request.
829 */
830 return -ENODEV;
831 }
832 }
833 return err;
834}
835
836static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
837{
838 md->reset_done &= ~type;
550} 839}
551 840
552static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 841static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -554,7 +843,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
554 struct mmc_blk_data *md = mq->data; 843 struct mmc_blk_data *md = mq->data;
555 struct mmc_card *card = md->queue.card; 844 struct mmc_card *card = md->queue.card;
556 unsigned int from, nr, arg; 845 unsigned int from, nr, arg;
557 int err = 0; 846 int err = 0, type = MMC_BLK_DISCARD;
558 847
559 if (!mmc_can_erase(card)) { 848 if (!mmc_can_erase(card)) {
560 err = -EOPNOTSUPP; 849 err = -EOPNOTSUPP;
@@ -564,11 +853,13 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
564 from = blk_rq_pos(req); 853 from = blk_rq_pos(req);
565 nr = blk_rq_sectors(req); 854 nr = blk_rq_sectors(req);
566 855
567 if (mmc_can_trim(card)) 856 if (mmc_can_discard(card))
857 arg = MMC_DISCARD_ARG;
858 else if (mmc_can_trim(card))
568 arg = MMC_TRIM_ARG; 859 arg = MMC_TRIM_ARG;
569 else 860 else
570 arg = MMC_ERASE_ARG; 861 arg = MMC_ERASE_ARG;
571 862retry:
572 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 863 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
573 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 864 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
574 INAND_CMD38_ARG_EXT_CSD, 865 INAND_CMD38_ARG_EXT_CSD,
@@ -581,6 +872,10 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
581 } 872 }
582 err = mmc_erase(card, from, nr, arg); 873 err = mmc_erase(card, from, nr, arg);
583out: 874out:
875 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
876 goto retry;
877 if (!err)
878 mmc_blk_reset_success(md, type);
584 spin_lock_irq(&md->lock); 879 spin_lock_irq(&md->lock);
585 __blk_end_request(req, err, blk_rq_bytes(req)); 880 __blk_end_request(req, err, blk_rq_bytes(req));
586 spin_unlock_irq(&md->lock); 881 spin_unlock_irq(&md->lock);
@@ -594,13 +889,20 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
594 struct mmc_blk_data *md = mq->data; 889 struct mmc_blk_data *md = mq->data;
595 struct mmc_card *card = md->queue.card; 890 struct mmc_card *card = md->queue.card;
596 unsigned int from, nr, arg; 891 unsigned int from, nr, arg;
597 int err = 0; 892 int err = 0, type = MMC_BLK_SECDISCARD;
598 893
599 if (!mmc_can_secure_erase_trim(card)) { 894 if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
600 err = -EOPNOTSUPP; 895 err = -EOPNOTSUPP;
601 goto out; 896 goto out;
602 } 897 }
603 898
899 /* The sanitize operation is supported at v4.5 only */
900 if (mmc_can_sanitize(card)) {
901 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
902 EXT_CSD_SANITIZE_START, 1, 0);
903 goto out;
904 }
905
604 from = blk_rq_pos(req); 906 from = blk_rq_pos(req);
605 nr = blk_rq_sectors(req); 907 nr = blk_rq_sectors(req);
606 908
@@ -608,7 +910,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
608 arg = MMC_SECURE_TRIM1_ARG; 910 arg = MMC_SECURE_TRIM1_ARG;
609 else 911 else
610 arg = MMC_SECURE_ERASE_ARG; 912 arg = MMC_SECURE_ERASE_ARG;
611 913retry:
612 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 914 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
613 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 915 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
614 INAND_CMD38_ARG_EXT_CSD, 916 INAND_CMD38_ARG_EXT_CSD,
@@ -632,6 +934,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
632 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 934 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
633 } 935 }
634out: 936out:
937 if (err == -EIO && !mmc_blk_reset(md, card->host, type))
938 goto retry;
939 if (!err)
940 mmc_blk_reset_success(md, type);
635 spin_lock_irq(&md->lock); 941 spin_lock_irq(&md->lock);
636 __blk_end_request(req, err, blk_rq_bytes(req)); 942 __blk_end_request(req, err, blk_rq_bytes(req));
637 spin_unlock_irq(&md->lock); 943 spin_unlock_irq(&md->lock);
@@ -642,16 +948,18 @@ out:
642static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 948static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
643{ 949{
644 struct mmc_blk_data *md = mq->data; 950 struct mmc_blk_data *md = mq->data;
951 struct mmc_card *card = md->queue.card;
952 int ret = 0;
953
954 ret = mmc_flush_cache(card);
955 if (ret)
956 ret = -EIO;
645 957
646 /*
647 * No-op, only service this because we need REQ_FUA for reliable
648 * writes.
649 */
650 spin_lock_irq(&md->lock); 958 spin_lock_irq(&md->lock);
651 __blk_end_request_all(req, 0); 959 __blk_end_request_all(req, ret);
652 spin_unlock_irq(&md->lock); 960 spin_unlock_irq(&md->lock);
653 961
654 return 1; 962 return ret ? 0 : 1;
655} 963}
656 964
657/* 965/*
@@ -677,12 +985,197 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
677 } 985 }
678} 986}
679 987
680static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 988#define CMD_ERRORS \
989 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
990 R1_ADDRESS_ERROR | /* Misaligned address */ \
991 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
992 R1_WP_VIOLATION | /* Tried to write to protected block */ \
993 R1_CC_ERROR | /* Card controller error */ \
994 R1_ERROR) /* General/unknown error */
995
996static int mmc_blk_err_check(struct mmc_card *card,
997 struct mmc_async_req *areq)
681{ 998{
999 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
1000 mmc_active);
1001 struct mmc_blk_request *brq = &mq_mrq->brq;
1002 struct request *req = mq_mrq->req;
1003 int ecc_err = 0;
1004
1005 /*
1006 * sbc.error indicates a problem with the set block count
1007 * command. No data will have been transferred.
1008 *
1009 * cmd.error indicates a problem with the r/w command. No
1010 * data will have been transferred.
1011 *
1012 * stop.error indicates a problem with the stop command. Data
1013 * may have been transferred, or may still be transferring.
1014 */
1015 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1016 brq->data.error) {
1017#if defined(CONFIG_MACH_M0) || defined(CONFIG_MACH_P4NOTE) || \
1018 defined(CONFIG_MACH_C1_USA_ATT)
1019 /* dh0421.hwang */
1020 if (mmc_card_mmc(card)) {
1021 printk(KERN_ERR "[TEST] brq->sbc.opcode=%d,"
1022 "brq->cmd.opcode=%d.\n",
1023 brq->sbc.opcode, brq->cmd.opcode);
1024 printk(KERN_ERR "[TEST] brq->sbc.error=%d,"
1025 "brq->cmd.error=%d, brq->stop.error=%d,"
1026 "brq->data.error=%d.\n", brq->sbc.error,
1027 brq->cmd.error, brq->stop.error,
1028 brq->data.error);
1029 }
1030#endif
1031 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
1032 case ERR_RETRY:
1033 return MMC_BLK_RETRY;
1034 case ERR_ABORT:
1035 return MMC_BLK_ABORT;
1036 case ERR_NOMEDIUM:
1037 return MMC_BLK_NOMEDIUM;
1038 case ERR_CONTINUE:
1039 break;
1040 }
1041 }
1042
1043 /*
1044 * Check for errors relating to the execution of the
1045 * initial command - such as address errors. No data
1046 * has been transferred.
1047 */
1048 if (brq->cmd.resp[0] & CMD_ERRORS) {
1049 pr_err("%s: r/w command failed, status = %#x\n",
1050 req->rq_disk->disk_name, brq->cmd.resp[0]);
1051 return MMC_BLK_ABORT;
1052 }
1053
1054 /*
1055 * Everything else is either success, or a data error of some
1056 * kind. If it was a write, we may have transitioned to
1057 * program mode, which we have to wait for it to complete.
1058 */
1059 if ((!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) ||
1060 (mq_mrq->packed_cmd == MMC_PACKED_WR_HDR)) {
1061 u32 status;
1062 /* timeout value set 0x30000 : It works just SDcard case.
1063 * It means send CMD sequencially about 7.8sec.
1064 * If SDcard's data line stays low, timeout is about 4sec.
1065 * max timeout is up to 300ms
1066 */
1067 u32 timeout = 0x30000;
1068 do {
1069 int err = get_card_status(card, &status, 5);
1070 if (err) {
1071 printk(KERN_ERR "%s: error %d requesting status\n",
1072 req->rq_disk->disk_name, err);
1073 return MMC_BLK_CMD_ERR;
1074 }
1075 /*
1076 * Some cards mishandle the status bits,
1077 * so make sure to check both the busy
1078 * indication and the card state.
1079 */
1080 /* Just SDcard case, decrease timeout */
1081 if (mmc_card_sd(card))
1082 timeout--;
1083 } while ((!(status & R1_READY_FOR_DATA) ||
1084 (R1_CURRENT_STATE(status) == R1_STATE_PRG)) &&
1085 timeout);
1086
1087 /* If SDcard stays busy status, timeout is to be zero */
1088 if (!timeout) {
1089 pr_err("%s: card state has been never changed "
1090 "to trans.!\n",
1091 req->rq_disk->disk_name);
1092 return MMC_BLK_DATA_ERR;
1093 }
1094 }
1095
1096 if (brq->data.error) {
1097 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1098 req->rq_disk->disk_name, brq->data.error,
1099 (unsigned)blk_rq_pos(req),
1100 (unsigned)blk_rq_sectors(req),
1101 brq->cmd.resp[0], brq->stop.resp[0]);
1102
1103 if (rq_data_dir(req) == READ &&
1104 mq_mrq->packed_cmd != MMC_PACKED_WR_HDR) {
1105 if (ecc_err)
1106 return MMC_BLK_ECC_ERR;
1107 return MMC_BLK_DATA_ERR;
1108 } else {
1109 return MMC_BLK_CMD_ERR;
1110 }
1111 }
1112
1113 if (!brq->data.bytes_xfered)
1114 return MMC_BLK_RETRY;
1115
1116 if (mq_mrq->packed_cmd != MMC_PACKED_NONE) {
1117 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
1118 return MMC_BLK_PARTIAL;
1119 else
1120 return MMC_BLK_SUCCESS;
1121 }
1122
1123 if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1124 return MMC_BLK_PARTIAL;
1125
1126 return MMC_BLK_SUCCESS;
1127}
1128
1129static int mmc_blk_packed_err_check(struct mmc_card *card,
1130 struct mmc_async_req *areq)
1131{
1132 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
1133 mmc_active);
1134 struct request *req = mq_rq->req;
1135 int err, check, status;
1136 u8 ext_csd[512];
1137
1138 check = mmc_blk_err_check(card, areq);
1139 err = get_card_status(card, &status, 0);
1140 if (err) {
1141 pr_err("%s: error %d sending status command\n",
1142 req->rq_disk->disk_name, err);
1143 return MMC_BLK_ABORT;
1144 }
1145
1146 if (status & R1_EXP_EVENT) {
1147 err = mmc_send_ext_csd(card, ext_csd);
1148 if (err) {
1149 pr_err("%s: error %d sending ext_csd\n",
1150 req->rq_disk->disk_name, err);
1151 return MMC_BLK_ABORT;
1152 }
1153
1154 if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
1155 EXT_CSD_PACKED_FAILURE) &&
1156 (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1157 EXT_CSD_PACKED_GENERIC_ERROR)) {
1158 if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
1159 EXT_CSD_PACKED_INDEXED_ERROR) {
1160 mq_rq->packed_fail_idx =
1161 ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
1162 return MMC_BLK_PARTIAL;
1163 }
1164 }
1165 }
1166
1167 return check;
1168}
1169
1170static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1171 struct mmc_card *card,
1172 int disable_multi,
1173 struct mmc_queue *mq)
1174{
1175 u32 readcmd, writecmd;
1176 struct mmc_blk_request *brq = &mqrq->brq;
1177 struct request *req = mqrq->req;
682 struct mmc_blk_data *md = mq->data; 1178 struct mmc_blk_data *md = mq->data;
683 struct mmc_card *card = md->queue.card;
684 struct mmc_blk_request brq;
685 int ret = 1, disable_multi = 0;
686 1179
687 /* 1180 /*
688 * Reliable writes are used to implement Forced Unit Access and 1181 * Reliable writes are used to implement Forced Unit Access and
@@ -693,233 +1186,370 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
693 (rq_data_dir(req) == WRITE) && 1186 (rq_data_dir(req) == WRITE) &&
694 (md->flags & MMC_BLK_REL_WR); 1187 (md->flags & MMC_BLK_REL_WR);
695 1188
696 do { 1189 memset(brq, 0, sizeof(struct mmc_blk_request));
697 struct mmc_command cmd = {0}; 1190 brq->mrq.cmd = &brq->cmd;
698 u32 readcmd, writecmd, status = 0; 1191 brq->mrq.data = &brq->data;
699
700 memset(&brq, 0, sizeof(struct mmc_blk_request));
701 brq.mrq.cmd = &brq.cmd;
702 brq.mrq.data = &brq.data;
703
704 brq.cmd.arg = blk_rq_pos(req);
705 if (!mmc_card_blockaddr(card))
706 brq.cmd.arg <<= 9;
707 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
708 brq.data.blksz = 512;
709 brq.stop.opcode = MMC_STOP_TRANSMISSION;
710 brq.stop.arg = 0;
711 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
712 brq.data.blocks = blk_rq_sectors(req);
713
714 /*
715 * The block layer doesn't support all sector count
716 * restrictions, so we need to be prepared for too big
717 * requests.
718 */
719 if (brq.data.blocks > card->host->max_blk_count)
720 brq.data.blocks = card->host->max_blk_count;
721 1192
722 /* 1193 brq->cmd.arg = blk_rq_pos(req);
723 * After a read error, we redo the request one sector at a time 1194 if (!mmc_card_blockaddr(card))
724 * in order to accurately determine which sectors can be read 1195 brq->cmd.arg <<= 9;
725 * successfully. 1196 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
726 */ 1197 brq->data.blksz = 512;
727 if (disable_multi && brq.data.blocks > 1) 1198 brq->stop.opcode = MMC_STOP_TRANSMISSION;
728 brq.data.blocks = 1; 1199 brq->stop.arg = 0;
1200 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1201 brq->data.blocks = blk_rq_sectors(req);
729 1202
730 if (brq.data.blocks > 1 || do_rel_wr) { 1203 /*
731 /* SPI multiblock writes terminate using a special 1204 * The block layer doesn't support all sector count
732 * token, not a STOP_TRANSMISSION request. 1205 * restrictions, so we need to be prepared for too big
733 */ 1206 * requests.
734 if (!mmc_host_is_spi(card->host) || 1207 */
735 rq_data_dir(req) == READ) 1208 if (brq->data.blocks > card->host->max_blk_count)
736 brq.mrq.stop = &brq.stop; 1209 brq->data.blocks = card->host->max_blk_count;
737 readcmd = MMC_READ_MULTIPLE_BLOCK;
738 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
739 } else {
740 brq.mrq.stop = NULL;
741 readcmd = MMC_READ_SINGLE_BLOCK;
742 writecmd = MMC_WRITE_BLOCK;
743 }
744 if (rq_data_dir(req) == READ) {
745 brq.cmd.opcode = readcmd;
746 brq.data.flags |= MMC_DATA_READ;
747 } else {
748 brq.cmd.opcode = writecmd;
749 brq.data.flags |= MMC_DATA_WRITE;
750 }
751 1210
752 if (do_rel_wr) 1211 /*
753 mmc_apply_rel_rw(&brq, card, req); 1212 * After a read error, we redo the request one sector at a time
1213 * in order to accurately determine which sectors can be read
1214 * successfully.
1215 */
1216 if (disable_multi && brq->data.blocks > 1)
1217 brq->data.blocks = 1;
754 1218
755 /* 1219 if (brq->data.blocks > 1 || do_rel_wr) {
756 * Pre-defined multi-block transfers are preferable to 1220 /* SPI multiblock writes terminate using a special
757 * open ended-ones (and necessary for reliable writes). 1221 * token, not a STOP_TRANSMISSION request.
758 * However, it is not sufficient to just send CMD23,
759 * and avoid the final CMD12, as on an error condition
760 * CMD12 (stop) needs to be sent anyway. This, coupled
761 * with Auto-CMD23 enhancements provided by some
762 * hosts, means that the complexity of dealing
763 * with this is best left to the host. If CMD23 is
764 * supported by card and host, we'll fill sbc in and let
765 * the host deal with handling it correctly. This means
766 * that for hosts that don't expose MMC_CAP_CMD23, no
767 * change of behavior will be observed.
768 *
769 * N.B: Some MMC cards experience perf degradation.
770 * We'll avoid using CMD23-bounded multiblock writes for
771 * these, while retaining features like reliable writes.
772 */ 1222 */
1223 if (!mmc_host_is_spi(card->host) ||
1224 rq_data_dir(req) == READ)
1225 brq->mrq.stop = &brq->stop;
1226 readcmd = MMC_READ_MULTIPLE_BLOCK;
1227 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1228 } else {
1229 brq->mrq.stop = NULL;
1230 readcmd = MMC_READ_SINGLE_BLOCK;
1231 writecmd = MMC_WRITE_BLOCK;
1232 }
1233 if (rq_data_dir(req) == READ) {
1234 brq->cmd.opcode = readcmd;
1235 brq->data.flags |= MMC_DATA_READ;
1236 } else {
1237 brq->cmd.opcode = writecmd;
1238 brq->data.flags |= MMC_DATA_WRITE;
1239 }
773 1240
774 if ((md->flags & MMC_BLK_CMD23) && 1241 if (do_rel_wr)
775 mmc_op_multi(brq.cmd.opcode) && 1242 mmc_apply_rel_rw(brq, card, req);
776 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
777 brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
778 brq.sbc.arg = brq.data.blocks |
779 (do_rel_wr ? (1 << 31) : 0);
780 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
781 brq.mrq.sbc = &brq.sbc;
782 }
783 1243
784 mmc_set_data_timeout(&brq.data, card); 1244 /*
1245 * Pre-defined multi-block transfers are preferable to
1246 * open ended-ones (and necessary for reliable writes).
1247 * However, it is not sufficient to just send CMD23,
1248 * and avoid the final CMD12, as on an error condition
1249 * CMD12 (stop) needs to be sent anyway. This, coupled
1250 * with Auto-CMD23 enhancements provided by some
1251 * hosts, means that the complexity of dealing
1252 * with this is best left to the host. If CMD23 is
1253 * supported by card and host, we'll fill sbc in and let
1254 * the host deal with handling it correctly. This means
1255 * that for hosts that don't expose MMC_CAP_CMD23, no
1256 * change of behavior will be observed.
1257 *
1258 * N.B: Some MMC cards experience perf degradation.
1259 * We'll avoid using CMD23-bounded multiblock writes for
1260 * these, while retaining features like reliable writes.
1261 */
785 1262
786 brq.data.sg = mq->sg; 1263 if ((md->flags & MMC_BLK_CMD23) &&
787 brq.data.sg_len = mmc_queue_map_sg(mq); 1264 mmc_op_multi(brq->cmd.opcode) &&
1265 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
1266 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1267 brq->sbc.arg = brq->data.blocks |
1268 (do_rel_wr ? (1 << 31) : 0);
1269 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1270 brq->mrq.sbc = &brq->sbc;
1271 }
788 1272
789 /* 1273 mmc_set_data_timeout(&brq->data, card);
790 * Adjust the sg list so it is the same size as the 1274
791 * request. 1275 brq->data.sg = mqrq->sg;
792 */ 1276 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
793 if (brq.data.blocks != blk_rq_sectors(req)) { 1277
794 int i, data_size = brq.data.blocks << 9; 1278 /*
795 struct scatterlist *sg; 1279 * Adjust the sg list so it is the same size as the
796 1280 * request.
797 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { 1281 */
798 data_size -= sg->length; 1282 if (brq->data.blocks != blk_rq_sectors(req)) {
799 if (data_size <= 0) { 1283 int i, data_size = brq->data.blocks << 9;
800 sg->length += data_size; 1284 struct scatterlist *sg;
801 i++; 1285
802 break; 1286 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
803 } 1287 data_size -= sg->length;
1288 if (data_size <= 0) {
1289 sg->length += data_size;
1290 i++;
1291 break;
804 } 1292 }
805 brq.data.sg_len = i;
806 } 1293 }
1294 brq->data.sg_len = i;
1295 }
807 1296
808 mmc_queue_bounce_pre(mq); 1297 mqrq->mmc_active.mrq = &brq->mrq;
1298 mqrq->mmc_active.err_check = mmc_blk_err_check;
809 1299
810 mmc_wait_for_req(card->host, &brq.mrq); 1300 mmc_queue_bounce_pre(mqrq);
1301}
811 1302
812 mmc_queue_bounce_post(mq); 1303static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
1304{
1305 struct request_queue *q = mq->queue;
1306 struct mmc_card *card = mq->card;
1307 struct request *cur = req, *next = NULL;
1308 struct mmc_blk_data *md = mq->data;
1309 bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
1310 unsigned int req_sectors = 0, phys_segments = 0;
1311 unsigned int max_blk_count, max_phys_segs;
1312 u8 put_back = 0;
1313 u8 max_packed_rw = 0;
1314 u8 reqs = 0;
1315
1316 mq->mqrq_cur->packed_num = MMC_PACKED_N_ZERO;
1317
1318 if (!(md->flags & MMC_BLK_CMD23) ||
1319 !card->ext_csd.packed_event_en)
1320 goto no_packed;
1321
1322 if (rq_data_dir(cur) == READ &&
1323 (card->host->caps2 & MMC_CAP2_PACKED_RD))
1324 max_packed_rw = card->ext_csd.max_packed_reads;
1325 else if ((rq_data_dir(cur) == WRITE) &&
1326 (card->host->caps2 & MMC_CAP2_PACKED_WR))
1327 max_packed_rw = card->ext_csd.max_packed_writes;
1328
1329 if (max_packed_rw == 0)
1330 goto no_packed;
1331
1332#ifdef CONFIG_MMC_SELECTIVE_PACKED_CMD_POLICY
1333 if (rq_data_dir(cur) == READ)
1334 goto no_packed;
1335#endif
813 1336
814 /* 1337 if (mmc_req_rel_wr(cur) &&
815 * Check for errors here, but don't jump to cmd_err 1338 (md->flags & MMC_BLK_REL_WR) &&
816 * until later as we need to wait for the card to leave 1339 !en_rel_wr) {
817 * programming mode even when things go wrong. 1340 goto no_packed;
818 */ 1341 }
819 if (brq.sbc.error || brq.cmd.error || 1342
820 brq.data.error || brq.stop.error) { 1343 max_blk_count = min(card->host->max_blk_count,
821 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { 1344 card->host->max_req_size >> 9);
822 /* Redo read one sector at a time */ 1345 if (unlikely(max_blk_count > 0xffff))
823 printk(KERN_WARNING "%s: retrying using single " 1346 max_blk_count = 0xffff;
824 "block read\n", req->rq_disk->disk_name); 1347
825 disable_multi = 1; 1348 max_phys_segs = queue_max_segments(q);
826 continue; 1349 req_sectors += blk_rq_sectors(cur);
827 } 1350 phys_segments += req->nr_phys_segments;
828 status = get_card_status(card, req); 1351
829 } 1352 if (rq_data_dir(cur) == WRITE) {
1353 req_sectors++;
1354 phys_segments++;
1355 }
830 1356
831 if (brq.sbc.error) { 1357 while (reqs < max_packed_rw - 1) {
832 printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT " 1358 spin_lock_irq(q->queue_lock);
833 "command, response %#x, card status %#x\n", 1359 next = blk_fetch_request(q);
834 req->rq_disk->disk_name, brq.sbc.error, 1360 spin_unlock_irq(q->queue_lock);
835 brq.sbc.resp[0], status); 1361 if (!next)
1362 break;
1363
1364 if (next->cmd_flags & REQ_DISCARD ||
1365 next->cmd_flags & REQ_FLUSH) {
1366 put_back = 1;
1367 break;
1368 }
1369#ifdef CONFIG_MMC_SELECTIVE_PACKED_CMD_POLICY
1370 if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) != \
1371 blk_rq_pos(next)) {
1372 /* if next request dose not start at end block of
1373 previous request */
1374 put_back = 1;
1375 break;
1376 }
1377#endif
1378 if (rq_data_dir(cur) != rq_data_dir(next)) {
1379 put_back = 1;
1380 break;
836 } 1381 }
837 1382
838 if (brq.cmd.error) { 1383 if (mmc_req_rel_wr(next) &&
839 printk(KERN_ERR "%s: error %d sending read/write " 1384 (md->flags & MMC_BLK_REL_WR) &&
840 "command, response %#x, card status %#x\n", 1385 !en_rel_wr) {
841 req->rq_disk->disk_name, brq.cmd.error, 1386 put_back = 1;
842 brq.cmd.resp[0], status); 1387 break;
843 } 1388 }
844 1389
845 if (brq.data.error) { 1390 req_sectors += blk_rq_sectors(next);
846 if (brq.data.error == -ETIMEDOUT && brq.mrq.stop) 1391 if (req_sectors > max_blk_count) {
847 /* 'Stop' response contains card status */ 1392 put_back = 1;
848 status = brq.mrq.stop->resp[0]; 1393 break;
849 printk(KERN_ERR "%s: error %d transferring data,"
850 " sector %u, nr %u, card status %#x\n",
851 req->rq_disk->disk_name, brq.data.error,
852 (unsigned)blk_rq_pos(req),
853 (unsigned)blk_rq_sectors(req), status);
854 } 1394 }
855 1395
856 if (brq.stop.error) { 1396 phys_segments += next->nr_phys_segments;
857 printk(KERN_ERR "%s: error %d sending stop command, " 1397 if (phys_segments > max_phys_segs) {
858 "response %#x, card status %#x\n", 1398 put_back = 1;
859 req->rq_disk->disk_name, brq.stop.error, 1399 break;
860 brq.stop.resp[0], status);
861 } 1400 }
862 1401
863 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 1402 list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
864 do { 1403 cur = next;
865 int err; 1404 reqs++;
1405 }
866 1406
867 cmd.opcode = MMC_SEND_STATUS; 1407 if (put_back) {
868 cmd.arg = card->rca << 16; 1408 spin_lock_irq(q->queue_lock);
869 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1409 blk_requeue_request(q, next);
870 err = mmc_wait_for_cmd(card->host, &cmd, 5); 1410 spin_unlock_irq(q->queue_lock);
871 if (err) { 1411 }
872 printk(KERN_ERR "%s: error %d requesting status\n",
873 req->rq_disk->disk_name, err);
874 goto cmd_err;
875 }
876 /*
877 * Some cards mishandle the status bits,
878 * so make sure to check both the busy
879 * indication and the card state.
880 */
881 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
882 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
883
884#if 0
885 if (cmd.resp[0] & ~0x00000900)
886 printk(KERN_ERR "%s: status = %08x\n",
887 req->rq_disk->disk_name, cmd.resp[0]);
888 if (mmc_decode_status(cmd.resp))
889 goto cmd_err;
890#endif
891 }
892 1412
893 if (brq.cmd.error || brq.stop.error || brq.data.error) { 1413 if (reqs > 0) {
894 if (rq_data_dir(req) == READ) { 1414 list_add(&req->queuelist, &mq->mqrq_cur->packed_list);
895 /* 1415 mq->mqrq_cur->packed_num = ++reqs;
896 * After an error, we redo I/O one sector at a 1416 return reqs;
897 * time, so we only reach here after trying to 1417 }
898 * read a single sector.
899 */
900 spin_lock_irq(&md->lock);
901 ret = __blk_end_request(req, -EIO, brq.data.blksz);
902 spin_unlock_irq(&md->lock);
903 continue;
904 }
905 goto cmd_err;
906 }
907 1418
908 /* 1419no_packed:
909 * A block was successfully transferred. 1420 mq->mqrq_cur->packed_cmd = MMC_PACKED_NONE;
910 */ 1421 mq->mqrq_cur->packed_num = MMC_PACKED_N_ZERO;
911 spin_lock_irq(&md->lock); 1422 return 0;
912 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1423}
913 spin_unlock_irq(&md->lock);
914 } while (ret);
915 1424
916 return 1; 1425static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1426 struct mmc_card *card,
1427 struct mmc_queue *mq)
1428{
1429 struct mmc_blk_request *brq = &mqrq->brq;
1430 struct request *req = mqrq->req;
1431 struct request *prq;
1432 struct mmc_blk_data *md = mq->data;
1433 bool do_rel_wr;
1434 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
1435 u8 i = 1;
1436
1437 mqrq->packed_cmd = (rq_data_dir(req) == READ) ?
1438 MMC_PACKED_WR_HDR : MMC_PACKED_WRITE;
1439 mqrq->packed_blocks = 0;
1440 mqrq->packed_fail_idx = MMC_PACKED_N_IDX;
917 1441
918 cmd_err: 1442 memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr));
919 /* 1443 packed_cmd_hdr[0] = (mqrq->packed_num << 16) |
920 * If this is an SD card and we're writing, we can first 1444 (((rq_data_dir(req) == READ) ?
921 * mark the known good sectors as ok. 1445 PACKED_CMD_RD : PACKED_CMD_WR) << 8) |
922 * 1446 PACKED_CMD_VER;
1447
1448 /*
1449 * Argument for each entry of packed group
1450 */
1451 list_for_each_entry(prq, &mqrq->packed_list, queuelist) {
1452 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
1453 /* Argument of CMD23*/
1454 packed_cmd_hdr[(i * 2)] = (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
1455 blk_rq_sectors(prq);
1456 /* Argument of CMD18 or CMD25 */
1457 packed_cmd_hdr[((i * 2)) + 1] = mmc_card_blockaddr(card) ?
1458 blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
1459 mqrq->packed_blocks += blk_rq_sectors(prq);
1460 i++;
1461 }
1462
1463 memset(brq, 0, sizeof(struct mmc_blk_request));
1464 brq->mrq.cmd = &brq->cmd;
1465 brq->mrq.data = &brq->data;
1466 brq->mrq.sbc = &brq->sbc;
1467 brq->mrq.stop = &brq->stop;
1468
1469 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1470 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
1471 ((rq_data_dir(req) == READ) ? 1 : mqrq->packed_blocks + 1);
1472 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1473
1474 brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
1475 brq->cmd.arg = blk_rq_pos(req);
1476 if (!mmc_card_blockaddr(card))
1477 brq->cmd.arg <<= 9;
1478 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1479
1480 brq->data.blksz = 512;
1481 /*
1482 * Write separately the packd command header only for packed read.
1483 * In case of packed write, header is sent with blocks of data.
1484 */
1485 brq->data.blocks = (rq_data_dir(req) == READ) ?
1486 1 : mqrq->packed_blocks + 1;
1487 brq->data.flags |= MMC_DATA_WRITE;
1488
1489 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1490 brq->stop.arg = 0;
1491 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1492
1493 mmc_set_data_timeout(&brq->data, card);
1494
1495 brq->data.sg = mqrq->sg;
1496 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1497
1498 mqrq->mmc_active.mrq = &brq->mrq;
1499 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1500
1501 mmc_queue_bounce_pre(mqrq);
1502}
1503
1504static void mmc_blk_packed_rrq_prep(struct mmc_queue_req *mqrq,
1505 struct mmc_card *card,
1506 struct mmc_queue *mq)
1507{
1508 struct mmc_blk_request *brq = &mqrq->brq;
1509 struct request *req = mqrq->req;
1510
1511 mqrq->packed_cmd = MMC_PACKED_READ;
1512
1513 memset(brq, 0, sizeof(struct mmc_blk_request));
1514 brq->mrq.cmd = &brq->cmd;
1515 brq->mrq.data = &brq->data;
1516 brq->mrq.stop = &brq->stop;
1517
1518 brq->cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
1519 brq->cmd.arg = blk_rq_pos(req);
1520 if (!mmc_card_blockaddr(card))
1521 brq->cmd.arg <<= 9;
1522 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1523 brq->data.blksz = 512;
1524 brq->data.blocks = mqrq->packed_blocks;
1525 brq->data.flags |= MMC_DATA_READ;
1526
1527 brq->stop.opcode = MMC_STOP_TRANSMISSION;
1528 brq->stop.arg = 0;
1529 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1530
1531 mmc_set_data_timeout(&brq->data, card);
1532
1533 brq->data.sg = mqrq->sg;
1534 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1535
1536 mqrq->mmc_active.mrq = &brq->mrq;
1537 mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
1538
1539 mmc_queue_bounce_pre(mqrq);
1540}
1541
1542static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1543 struct mmc_blk_request *brq, struct request *req,
1544 int ret)
1545{
1546 struct mmc_queue_req *mq_rq;
1547 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1548
1549 /*
1550 * If this is an SD card and we're writing, we can first
1551 * mark the known good sectors as ok.
1552 *
923 * If the card is not SD, we can still ok written sectors 1553 * If the card is not SD, we can still ok written sectors
924 * as reported by the controller (which might be less than 1554 * as reported by the controller (which might be less than
925 * the real number of written sectors, but never more). 1555 * the real number of written sectors, but never more).
@@ -934,45 +1564,490 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
934 spin_unlock_irq(&md->lock); 1564 spin_unlock_irq(&md->lock);
935 } 1565 }
936 } else { 1566 } else {
937 spin_lock_irq(&md->lock); 1567 if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
938 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1568 spin_lock_irq(&md->lock);
939 spin_unlock_irq(&md->lock); 1569 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1570 spin_unlock_irq(&md->lock);
1571 }
1572 }
1573 return ret;
1574}
1575
1576static int mmc_blk_chk_hdr_err(struct mmc_queue *mq, int status)
1577{
1578 struct mmc_blk_data *md = mq->data;
1579 struct mmc_card *card = md->queue.card;
1580 int type = MMC_BLK_WR_HDR, err = 0;
1581
1582 switch (status) {
1583 case MMC_BLK_PARTIAL:
1584 case MMC_BLK_RETRY:
1585 err = 0;
1586 break;
1587 case MMC_BLK_CMD_ERR:
1588 case MMC_BLK_ABORT:
1589 case MMC_BLK_DATA_ERR:
1590 case MMC_BLK_ECC_ERR:
1591 err = mmc_blk_reset(md, card->host, type);
1592 if (!err)
1593 mmc_blk_reset_success(md, type);
1594 break;
1595 }
1596
1597 return err;
1598}
1599
1600static int mmc_blk_issue_packed_rd(struct mmc_queue *mq,
1601 struct mmc_queue_req *mq_rq)
1602{
1603 struct mmc_blk_data *md = mq->data;
1604 struct mmc_card *card = md->queue.card;
1605 int status, ret = -EIO, retry = 2;
1606
1607 do {
1608 mmc_start_req(card->host, NULL, (int *) &status);
1609 if (status) {
1610 ret = mmc_blk_chk_hdr_err(mq, status);
1611 if (ret)
1612 break;
1613 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1614 mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1615 } else {
1616 mmc_blk_packed_rrq_prep(mq_rq, card, mq);
1617 mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1618 ret = 0;
1619 break;
1620 }
1621 } while (retry-- > 0);
1622
1623 return ret;
1624}
1625
1626static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1627{
1628 struct mmc_blk_data *md = mq->data;
1629 struct mmc_card *card = md->queue.card;
1630 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1631 int ret = 1, disable_multi = 0, retry = 0, type;
1632 enum mmc_blk_status status;
1633 struct mmc_queue_req *mq_rq;
1634 struct request *req, *prq;
1635 struct mmc_async_req *areq;
1636 const u8 packed_num = 2;
1637 u8 reqs = 0;
1638#ifdef MOVI_DEBUG
1639 gnCmdLogIdx = 0;
1640#endif
1641
1642 if (!rqc && !mq->mqrq_prev->req)
1643 return 0;
1644
1645 if (rqc)
1646 reqs = mmc_blk_prep_packed_list(mq, rqc);
1647
1648 do {
1649#ifdef MOVI_DEBUG
1650 struct mmc_command cmd;
1651#endif
1652 if (rqc) {
1653 if (reqs >= packed_num) {
1654 mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, card, mq);
1655 }
1656 else
1657 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1658 areq = &mq->mqrq_cur->mmc_active;
1659 } else
1660 areq = NULL;
1661 areq = mmc_start_req(card->host, areq, (int *) &status);
1662 if (!areq) {
1663 if (mq->mqrq_cur->packed_cmd == MMC_PACKED_WR_HDR)
1664 goto snd_packed_rd;
1665 else
1666 return 0;
1667 }
1668
1669 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1670 brq = &mq_rq->brq;
1671 req = mq_rq->req;
1672 type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1673 mmc_queue_bounce_post(mq_rq);
1674
1675#ifdef MOVI_DEBUG
1676 if (card->type == MMC_TYPE_MMC) {
1677 gaCmdLog[gnCmdLogIdx].cmd = brq->cmd.opcode;
1678 gaCmdLog[gnCmdLogIdx].arg = brq->cmd.arg;
1679 gaCmdLog[gnCmdLogIdx].cnt = brq->data.blocks;
1680 gaCmdLog[gnCmdLogIdx].rsp = brq->cmd.resp[0];
1681 gaCmdLog[gnCmdLogIdx].stoprsp = brq->stop.resp[0];
1682 gnCmdLogIdx++;
1683
1684 if (gnCmdLogIdx >= 5)
1685 gnCmdLogIdx = 0;
1686 }
1687
1688 if (brq->cmd.error) {
1689 if (card->type == MMC_TYPE_MMC) {
1690 get_card_status(card, &status, 0);
1691 printk(KERN_ERR "[MOVI_DEBUG] card status is 0x%x\n",
1692 status);
1693 if (!status) {
1694 int err, i, j;
1695 for (i = 0; i < 5; i++) {
1696 printk(KERN_ERR "[CMD LOG] CMD:%d, ARG:0x%x, CNT:%d, RSP:0x%x, STRSP:0x%x\n",
1697 gaCmdLog[gnCmdLogIdx].cmd,
1698 gaCmdLog[gnCmdLogIdx].arg,
1699 gaCmdLog[gnCmdLogIdx].cnt,
1700 gaCmdLog[gnCmdLogIdx].rsp,
1701 gaCmdLog[gnCmdLogIdx].stoprsp);
1702 gnCmdLogIdx++;
1703 if (gnCmdLogIdx >= 5)
1704 gnCmdLogIdx = 0;
1705 }
1706
1707 get_card_status(card, &status, 0);
1708 printk(KERN_ERR "COMMAND13 response = 0x%x\n",
1709 status);
1710
1711 cmd.opcode = 12;
1712 cmd.arg = 0;
1713 cmd.flags = MMC_RSP_R1;
1714 err = mmc_wait_for_cmd
1715 (card->host, &cmd, 0);
1716 if (err) {
1717 printk(KERN_ERR "KERN_ERR %s: error %d CMD12\n",
1718 req->rq_disk->disk_name, err);
1719 }
1720 printk(KERN_ERR "COMD12 RESP = 0x%x\n",
1721 cmd.resp[0]);
1722 msleep(100);
1723
1724 get_card_status(card, &status, 0);
1725 printk(KERN_ERR "COMMAND13 response = 0x%x\n",
1726 status);
1727
1728 mmc_set_clock(card->host, 400000);
1729
1730 for (i = 0; i < 3; i++) {
1731 cmd.opcode = 1;
1732 cmd.arg = 0x40ff8080;
1733 cmd.flags = MMC_RSP_R3 |
1734 MMC_CMD_BCR;
1735 err = mmc_wait_for_cmd
1736 (card->host, &cmd, 0);
1737 if (err) {
1738 printk(KERN_ERR "%s: error %d CMD1\n",
1739 req->rq_disk->disk_name,
1740 err);
1741 }
1742 printk(KERN_ERR "COMD1 RESP = 0x%x\n",
1743 cmd.resp[0]);
1744 msleep(50);
1745 }
1746
1747 for (i = 0; i < 3; i++) {
1748 cmd.opcode = 0;
1749 cmd.arg = 0x20110210;
1750 cmd.flags = MMC_RSP_NONE |
1751 MMC_CMD_BC;
1752 err = mmc_wait_for_cmd
1753 (card->host, &cmd, 0);
1754 if (err) {
1755 printk(KERN_ERR "%s: error %d CMD0\n",
1756 req->rq_disk->disk_name,
1757 err);
1758 }
1759 msleep(50);
1760 cmd.opcode = 0;
1761 cmd.arg = 0x60FACC06;
1762 cmd.flags = MMC_RSP_NONE |
1763 MMC_CMD_BC;
1764 err = mmc_wait_for_cmd
1765 (card->host, &cmd, 0);
1766 if (err) {
1767 printk(KERN_ERR "%s: error %d CMD0\n",
1768 req->rq_disk->disk_name,
1769 err);
1770 }
1771 for (j = 0; j < 3; j++) {
1772 msleep(50);
1773 cmd.opcode = 1;
1774 cmd.arg = 0x0;
1775 cmd.flags = MMC_RSP_R3 |
1776 MMC_CMD_BCR;
1777 err = mmc_wait_for_cmd
1778 (card->host, &cmd, 0);
1779 if (err) {
1780 printk(KERN_ERR "%s: error %d CMD1\n",
1781 req->rq_disk->disk_name,
1782 err);
1783 }
1784
1785 printk(KERN_ERR "COMD1 RESP = 0x%x\n",
1786 cmd.resp[0]);
1787 }
1788 }
1789 panic("MOVINAND DEBUG PANIC\n");
1790 }
1791 }
1792 }
1793#endif
1794
1795 switch (status) {
1796 case MMC_BLK_SUCCESS:
1797 case MMC_BLK_PARTIAL:
1798 /*
1799 * A block was successfully transferred.
1800 */
1801 mmc_blk_reset_success(md, type);
1802
1803 if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
1804 int idx = mq_rq->packed_fail_idx, i = 0;
1805 ret = 0;
1806 while (!list_empty(&mq_rq->packed_list)) {
1807 prq = list_entry_rq(mq_rq->packed_list.next);
1808 if (idx == i) {
1809 /* retry from error index */
1810 mq_rq->packed_num -= idx;
1811 mq_rq->req = prq;
1812 ret = 1;
1813 break;
1814 }
1815 list_del_init(&prq->queuelist);
1816 spin_lock_irq(&md->lock);
1817 __blk_end_request(prq, 0, blk_rq_bytes(prq));
1818 spin_unlock_irq(&md->lock);
1819 i++;
1820 }
1821 if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) {
1822 prq = list_entry_rq(mq_rq->packed_list.next);
1823 list_del_init(&prq->queuelist);
1824 mq_rq->packed_cmd = MMC_PACKED_NONE;
1825 mq_rq->packed_num = MMC_PACKED_N_ZERO;
1826 }
1827 break;
1828 } else {
1829 spin_lock_irq(&md->lock);
1830 ret = __blk_end_request(req, 0,
1831 brq->data.bytes_xfered);
1832 spin_unlock_irq(&md->lock);
1833 }
1834
1835 /*
1836 * If the blk_end_request function returns non-zero even
1837 * though all data has been transferred and no errors
1838 * were returned by the host controller, it's a bug.
1839 */
1840 if (status == MMC_BLK_SUCCESS && ret) {
1841 printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
1842 __func__, blk_rq_bytes(req),
1843 brq->data.bytes_xfered);
1844 rqc = NULL;
1845 goto cmd_abort;
1846 }
1847 break;
1848 case MMC_BLK_CMD_ERR:
1849 ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1850 if (!mmc_blk_reset(md, card->host, type))
1851 break;
1852 goto cmd_abort;
1853 case MMC_BLK_RETRY:
1854 if (retry++ < 5)
1855 break;
1856 /* Fall through */
1857 case MMC_BLK_ABORT:
1858 if (!mmc_blk_reset(md, card->host, type))
1859 break;
1860 goto cmd_abort;
1861 case MMC_BLK_DATA_ERR: {
1862 int err;
1863
1864 err = mmc_blk_reset(md, card->host, type);
1865 if (!err)
1866 break;
1867 if (err == -ENODEV)
1868 goto cmd_abort;
1869 if (mq_rq->packed_cmd != MMC_PACKED_NONE)
1870 break;
1871 /* Fall through */
1872 }
1873 case MMC_BLK_ECC_ERR:
1874 if (brq->data.blocks > 1) {
1875 /* Redo read one sector at a time */
1876 pr_warning("%s: retrying using single block read\n",
1877 req->rq_disk->disk_name);
1878 disable_multi = 1;
1879 break;
1880 }
1881 /*
1882 * After an error, we redo I/O one sector at a
1883 * time, so we only reach here after trying to
1884 * read a single sector.
1885 */
1886 spin_lock_irq(&md->lock);
1887 ret = __blk_end_request(req, -EIO,
1888 brq->data.blksz);
1889 spin_unlock_irq(&md->lock);
1890 if (!ret)
1891 goto start_new_req;
1892 break;
1893 case MMC_BLK_NOMEDIUM:
1894 goto cmd_abort;
1895 }
1896
1897 if (ret) {
1898 if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
1899 /*
1900 * In case of a incomplete request
1901 * prepare it again and resend.
1902 */
1903 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
1904 mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1905 } else {
1906 mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
1907 mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1908 if (mq_rq->packed_cmd == MMC_PACKED_WR_HDR) {
1909 if (mmc_blk_issue_packed_rd(mq, mq_rq))
1910 goto cmd_abort;
1911 }
1912 }
1913 }
1914 } while (ret);
1915
1916snd_packed_rd:
1917 if (mq->mqrq_cur->packed_cmd == MMC_PACKED_WR_HDR) {
1918 if (mmc_blk_issue_packed_rd(mq, mq->mqrq_cur))
1919 goto start_new_req;
940 } 1920 }
1921 return 1;
941 1922
1923 cmd_abort:
942 spin_lock_irq(&md->lock); 1924 spin_lock_irq(&md->lock);
943 while (ret) 1925 if (mmc_card_removed(card))
944 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1926 req->cmd_flags |= REQ_QUIET;
945 spin_unlock_irq(&md->lock); 1927 spin_unlock_irq(&md->lock);
1928 if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
1929 spin_lock_irq(&md->lock);
1930 while (ret)
1931 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
1932 spin_unlock_irq(&md->lock);
1933 } else {
1934 while (!list_empty(&mq_rq->packed_list)) {
1935 prq = list_entry_rq(mq_rq->packed_list.next);
1936 list_del_init(&prq->queuelist);
1937 spin_lock_irq(&md->lock);
1938 __blk_end_request(prq, -EIO, blk_rq_bytes(prq));
1939 spin_unlock_irq(&md->lock);
1940 }
1941 }
1942#if defined(CONFIG_MACH_M0) || defined(CONFIG_MACH_P4NOTE) || \
1943 defined(CONFIG_MACH_C1_USA_ATT)
1944 /*
1945 * dh0421.hwang
1946 * It's for Engineering DEBUGGING only
1947 * This has to be removed before PVR(guessing)
1948 * Please refer mshci reg dumps
1949 */
1950 if (mmc_card_mmc(card) && status != 3) {
1951 printk(KERN_ERR "[TEST] CMD aborting case in"
1952 "MMC's block layer ret %d.\n", ret);
1953 printk(KERN_ERR "%s: CMD%d, ARG=0x%x.\n",
1954 req->rq_disk->disk_name,
1955 brq->cmd.opcode,
1956 brq->cmd.arg);
1957 printk(KERN_ERR "[TEST] If PACKED_NONE,"
1958 "confirm end_request done\n");
1959 printk(KERN_ERR "packed CMD type = %d.\n",
1960 mq_rq ? mq_rq->packed_cmd : -1);
1961 printk(KERN_ERR "[TEST] mmc%d, request returns %d.\n",
1962 card->host->index, status);
1963 printk(KERN_ERR "[TEST] err means...\n");
1964 printk(KERN_ERR "\t1: MMC_BLK_PARTIAL.\n");
1965 printk(KERN_ERR "\t2: MMC_BLK_CMD_ERR.\n");
1966 printk(KERN_ERR "\t3: MMC_BLK_RETRY.\n");
1967 printk(KERN_ERR "\t4: MMC_BLK_ABORT.\n");
1968 printk(KERN_ERR "\t5: MMC_BLK_DATA_ERR.\n");
1969 printk(KERN_ERR "\t6: MMC_BLK_ECC_ERR.\n");
1970 if (!rqc) {
1971 panic("[TEST] mmc%d, returns %d.\n",
1972 card->host->index, status);
1973 }
1974 }
1975#endif
1976
1977 start_new_req:
1978 if (rqc) {
1979 /*
1980 * If current request is packed, it needs to put back.
1981 */
1982 if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) {
1983 while (!list_empty(&mq->mqrq_cur->packed_list)) {
1984 prq = list_entry_rq(mq->mqrq_cur->packed_list.prev);
1985 if (prq->queuelist.prev != &mq->mqrq_cur->packed_list) {
1986 list_del_init(&prq->queuelist);
1987 spin_lock_irq(mq->queue->queue_lock);
1988 blk_requeue_request(mq->queue, prq);
1989 spin_unlock_irq(mq->queue->queue_lock);
1990 } else {
1991 list_del_init(&prq->queuelist);
1992 }
1993 }
1994 mq->mqrq_cur->packed_cmd = MMC_PACKED_NONE;
1995 mq->mqrq_cur->packed_num = MMC_PACKED_N_ZERO;
1996 }
1997 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1998 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
1999 }
946 2000
947 return 0; 2001 return 0;
948} 2002}
949 2003
2004static int
2005mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
2006
950static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 2007static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
951{ 2008{
952 int ret; 2009 int ret;
953 struct mmc_blk_data *md = mq->data; 2010 struct mmc_blk_data *md = mq->data;
954 struct mmc_card *card = md->queue.card; 2011 struct mmc_card *card = md->queue.card;
955 2012
956 mmc_claim_host(card->host); 2013#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2014 if (mmc_bus_needs_resume(card->host)) {
2015 mmc_resume_bus(card->host);
2016 mmc_blk_set_blksize(md, card);
2017 }
2018#endif
2019
2020 if (req && !mq->mqrq_prev->req)
2021 /* claim host only for the first request */
2022 mmc_claim_host(card->host);
2023
957 ret = mmc_blk_part_switch(card, md); 2024 ret = mmc_blk_part_switch(card, md);
958 if (ret) { 2025 if (ret) {
959 ret = 0; 2026 ret = 0;
960 goto out; 2027 goto out;
961 } 2028 }
962 2029
963 if (req->cmd_flags & REQ_DISCARD) { 2030 if (req && req->cmd_flags & REQ_DISCARD) {
2031 /* complete ongoing async transfer before issuing discard */
2032 if (card->host->areq)
2033 mmc_blk_issue_rw_rq(mq, NULL);
964 if (req->cmd_flags & REQ_SECURE) 2034 if (req->cmd_flags & REQ_SECURE)
965 ret = mmc_blk_issue_secdiscard_rq(mq, req); 2035 ret = mmc_blk_issue_secdiscard_rq(mq, req);
966 else 2036 else
967 ret = mmc_blk_issue_discard_rq(mq, req); 2037 ret = mmc_blk_issue_discard_rq(mq, req);
968 } else if (req->cmd_flags & REQ_FLUSH) { 2038 } else if (req && req->cmd_flags & REQ_FLUSH) {
2039 /* complete ongoing async transfer before issuing flush */
2040 if (card->host->areq)
2041 mmc_blk_issue_rw_rq(mq, NULL);
969 ret = mmc_blk_issue_flush(mq, req); 2042 ret = mmc_blk_issue_flush(mq, req);
970 } else { 2043 } else {
971 ret = mmc_blk_issue_rw_rq(mq, req); 2044 ret = mmc_blk_issue_rw_rq(mq, req);
972 } 2045 }
973 2046
974out: 2047out:
975 mmc_release_host(card->host); 2048 if (!req)
2049 /* release host only when there are no more requests */
2050 mmc_release_host(card->host);
976 return ret; 2051 return ret;
977} 2052}
978 2053
@@ -1046,6 +2121,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1046 md->disk->queue = md->queue.queue; 2121 md->disk->queue = md->queue.queue;
1047 md->disk->driverfs_dev = parent; 2122 md->disk->driverfs_dev = parent;
1048 set_disk_ro(md->disk, md->read_only || default_ro); 2123 set_disk_ro(md->disk, md->read_only || default_ro);
2124 md->disk->flags = GENHD_FL_EXT_DEVT;
1049 2125
1050 /* 2126 /*
1051 * As discussed on lkml, GENHD_FL_REMOVABLE should: 2127 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -1278,13 +2354,17 @@ static int mmc_blk_probe(struct mmc_card *card)
1278 printk(KERN_INFO "%s: %s %s %s %s\n", 2354 printk(KERN_INFO "%s: %s %s %s %s\n",
1279 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 2355 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1280 cap_str, md->read_only ? "(ro)" : ""); 2356 cap_str, md->read_only ? "(ro)" : "");
1281 2357
1282 if (mmc_blk_alloc_parts(card, md)) 2358 // expose boot partitions.
1283 goto out; 2359 if (mmc_blk_alloc_parts(card, md))
2360 goto out;
1284 2361
1285 mmc_set_drvdata(card, md); 2362 mmc_set_drvdata(card, md);
1286 mmc_fixup_device(card, blk_fixups); 2363 mmc_fixup_device(card, blk_fixups);
1287 2364
2365#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2366 mmc_set_bus_resume_policy(card->host, 1);
2367#endif
1288 if (mmc_add_disk(md)) 2368 if (mmc_add_disk(md))
1289 goto out; 2369 goto out;
1290 2370
@@ -1310,6 +2390,9 @@ static void mmc_blk_remove(struct mmc_card *card)
1310 mmc_release_host(card->host); 2390 mmc_release_host(card->host);
1311 mmc_blk_remove_req(md); 2391 mmc_blk_remove_req(md);
1312 mmc_set_drvdata(card, NULL); 2392 mmc_set_drvdata(card, NULL);
2393#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
2394 mmc_set_bus_resume_policy(card->host, 0);
2395#endif
1313} 2396}
1314 2397
1315#ifdef CONFIG_PM 2398#ifdef CONFIG_PM
@@ -1333,7 +2416,9 @@ static int mmc_blk_resume(struct mmc_card *card)
1333 struct mmc_blk_data *md = mmc_get_drvdata(card); 2416 struct mmc_blk_data *md = mmc_get_drvdata(card);
1334 2417
1335 if (md) { 2418 if (md) {
2419#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
1336 mmc_blk_set_blksize(md, card); 2420 mmc_blk_set_blksize(md, card);
2421#endif
1337 2422
1338 /* 2423 /*
1339 * Resume involves the card going into idle state, 2424 * Resume involves the card going into idle state,
@@ -1397,4 +2482,3 @@ module_exit(mmc_blk_exit);
1397 2482
1398MODULE_LICENSE("GPL"); 2483MODULE_LICENSE("GPL");
1399MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 2484MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
1400
diff --git a/drivers/mmc/card/cprmdrv_samsung.c b/drivers/mmc/card/cprmdrv_samsung.c
new file mode 100644
index 00000000000..6f64a7fa2d3
--- /dev/null
+++ b/drivers/mmc/card/cprmdrv_samsung.c
@@ -0,0 +1,450 @@
1
2#include <linux/mmc/core.h>
3#include <linux/mmc/card.h>
4#include <linux/mmc/host.h>
5#include <linux/mmc/mmc.h>
6#include <linux/mmc/sd.h>
7
8
9#include <linux/scatterlist.h>
10#include <linux/uaccess.h>
11
12#include "cprmdrv_samsung.h"
13#include <linux/slab.h>
14
15
16static int mmc_wait_busy(struct mmc_card *card)
17{
18 int ret, busy;
19 struct mmc_command cmd;
20
21 busy = 0;
22 do {
23 memset(&cmd, 0, sizeof(struct mmc_command));
24
25 cmd.opcode = MMC_SEND_STATUS;
26 cmd.arg = card->rca << 16;
27 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
28
29 ret = mmc_wait_for_cmd(card->host, &cmd, 0);
30 if (ret)
31 break;
32
33 if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
34 busy = 1;
35 printk(KERN_INFO "%s: Warning: Host did not "
36 "wait for busy state to end.\n",
37 mmc_hostname(card->host));
38 }
39 } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
40
41 return ret;
42}
43
44static int CPRM_CMD_SecureRW(struct mmc_card *card,
45 unsigned int command,
46 unsigned int dir,
47 unsigned long arg,
48 unsigned char *buff,
49 unsigned int length) {
50
51 int err;
52 int i = 0;
53 struct mmc_request mrq;
54 struct mmc_command cmd;
55 struct mmc_command stop;
56 struct mmc_data data;
57 unsigned int timeout_us;
58
59 struct scatterlist sg;
60
61 if (command == SD_ACMD25_SECURE_WRITE_MULTI_BLOCK ||
62 command == SD_ACMD18_SECURE_READ_MULTI_BLOCK) {
63 return -EINVAL;
64 }
65
66 memset(&cmd, 0, sizeof(struct mmc_command));
67
68 cmd.opcode = MMC_APP_CMD;
69 cmd.arg = card->rca << 16;
70 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
71
72 err = mmc_wait_for_cmd(card->host, &cmd, 0);
73 if (err)
74 return (u32)-1;
75
76 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
77 return (u32)-1;
78
79 printk("CPRM_CMD_SecureRW: 1, command : %d\n", command);
80
81 memset(&cmd, 0, sizeof(struct mmc_command));
82
83 cmd.opcode = command;
84
85 if (command == SD_ACMD43_GET_MKB)
86 cmd.arg = arg;
87 else
88 cmd.arg = 0;
89
90 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
91
92 memset(&data, 0, sizeof(struct mmc_data));
93
94 data.timeout_ns = card->csd.tacc_ns * 100;
95 data.timeout_clks = card->csd.tacc_clks * 100;
96
97 timeout_us = data.timeout_ns / 1000;
98 timeout_us += data.timeout_clks * 1000 /
99 (card->host->ios.clock / 1000);
100
101 if (timeout_us > 100000) {
102 data.timeout_ns = 100000000;
103 data.timeout_clks = 0;
104 }
105
106#if defined(CONFIG_TARGET_LOCALE_NTT)
107 data.timeout_ns = 100000000;
108 data.timeout_clks = 0;
109#endif
110
111 data.blksz = length;
112 data.blocks = 1;
113 data.flags = dir;
114 data.sg = &sg;
115 data.sg_len = 1;
116
117 stop.opcode = MMC_STOP_TRANSMISSION;
118 stop.arg = 0;
119 stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
120
121 memset(&mrq, 0, sizeof(struct mmc_request));
122
123 mrq.cmd = &cmd;
124 mrq.data = &data;
125
126 if (data.blocks == 1)
127 mrq.stop = NULL;
128 else
129 mrq.stop = &stop;
130
131 printk(KERN_DEBUG "CPRM_CMD_SecureRW: 2\n");
132
133 sg_init_one(&sg, buff, length);
134
135 printk(KERN_DEBUG "CPRM_CMD_SecureRW: 3\n");
136
137 mmc_wait_for_req(card->host, &mrq);
138
139 printk(KERN_DEBUG "CPRM_CMD_SecureRW: 4\n");
140
141 i = 0;
142 do {
143 printk(KERN_DEBUG "%x", buff[i++]);
144 if (i > 10)
145 break;
146 } while (i < length);
147 printk(KERN_DEBUG "\n");
148
149 if (cmd.error) {
150 printk(KERN_DEBUG "%s]cmd.error=%d\n ", __func__, cmd.error);
151 return cmd.error;
152 }
153
154 if (data.error) {
155 printk(KERN_DEBUG "%s]data.error=%d\n ", __func__, data.error);
156 return data.error;
157 }
158
159 err = mmc_wait_busy(card);
160 printk(KERN_DEBUG "CPRM_CMD_SecureRW: 5\n");
161
162 if (err)
163 return err;
164
165 return 0;
166}
167
168static int CPRM_CMD_SecureMultiRW(struct mmc_card *card,
169 unsigned int command,
170 unsigned int dir,
171 unsigned long arg,
172 unsigned char *buff,
173 unsigned int length) {
174
175 int err;
176
177 struct mmc_request mrq;
178 struct mmc_command cmd;
179 struct mmc_command stop;
180 struct mmc_data data;
181 unsigned int timeout_us;
182 unsigned long flags;
183
184 struct scatterlist sg;
185
186 memset(&cmd, 0, sizeof(struct mmc_command));
187 memset(&stop, 0, sizeof(struct mmc_command));
188
189 cmd.opcode = MMC_APP_CMD;
190 cmd.arg = card->rca << 16;
191 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
192
193 err = mmc_wait_for_cmd(card->host, &cmd, 0);
194 if (err)
195 return (u32)-1;
196
197 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
198 return (u32)-1;
199
200 printk(KERN_DEBUG "CPRM_CMD_SecureRW: 1\n");
201
202 memset(&cmd, 0, sizeof(struct mmc_command));
203
204 cmd.opcode = command;
205
206 if (command == SD_ACMD43_GET_MKB)
207 cmd.arg = arg;
208 else
209 cmd.arg = 0;
210
211 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
212
213 memset(&data, 0, sizeof(struct mmc_data));
214
215 data.timeout_ns = card->csd.tacc_ns * 100;
216 data.timeout_clks = card->csd.tacc_clks * 100;
217
218 timeout_us = data.timeout_ns / 1000;
219 timeout_us += data.timeout_clks * 1000 /
220 (card->host->ios.clock / 1000);
221
222 if (timeout_us > 100000) {
223 data.timeout_ns = 100000000;
224 data.timeout_clks = 0;
225 }
226
227#if defined(CONFIG_TARGET_LOCALE_NTT)
228 data.timeout_ns = 100000000;
229 data.timeout_clks = 0;
230#endif
231
232 data.blksz = 512;
233 data.blocks = (length + 511) / 512;
234
235 data.flags = dir;
236 data.sg = &sg;
237 data.sg_len = 1;
238
239 stop.opcode = MMC_STOP_TRANSMISSION;
240 stop.arg = 0;
241 stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
242
243 memset(&mrq, 0, sizeof(struct mmc_request));
244
245 mrq.cmd = &cmd;
246 mrq.data = &data;
247 mrq.stop = &stop;
248
249
250 printk(KERN_DEBUG "CPRM_CMD_SecureRW: 2\n");
251
252 sg_init_one(&sg, buff, length);
253
254 if (dir == MMC_DATA_WRITE) {
255 local_irq_save(flags);
256 sg_copy_from_buffer(&sg, data.sg_len, buff, length);
257 local_irq_restore(flags);
258 }
259 printk(KERN_DEBUG "CPRM_CMD_SecureRW: 3\n");
260
261 mmc_wait_for_req(card->host, &mrq);
262
263 printk(KERN_DEBUG "CPRM_CMD_SecureRW: 4\n");
264
265 if (cmd.error) {
266 printk(KERN_DEBUG "%s]cmd.error=%d\n", __func__, cmd.error);
267 return cmd.error;
268 }
269
270 if (data.error) {
271 printk(KERN_DEBUG "%s]data.error=%d\n", __func__, data.error);
272 return data.error;
273 }
274
275 err = mmc_wait_busy(card);
276 printk(KERN_DEBUG "CPRM_CMD_SecureRW: 5\n");
277
278 if (dir == MMC_DATA_READ) {
279 local_irq_save(flags);
280 sg_copy_to_buffer(&sg, data.sg_len, buff, length);
281 local_irq_restore(flags);
282 }
283
284 if (err)
285 return err;
286
287 return 0;
288}
289
290
291int stub_sendcmd(struct mmc_card *card,
292 unsigned int cmd,
293 unsigned long arg,
294 unsigned int len,
295 unsigned char *buff) {
296
297 int returnVal = -1;
298 unsigned char *kbuffer = NULL;
299 int direction = 0;
300 int result = 0;
301
302 if (card == NULL) {
303 printk(KERN_DEBUG "stub_sendcmd: card is null error\n");
304 return -ENXIO;
305 }
306
307 kbuffer = kmalloc(len, GFP_KERNEL);
308 if (kbuffer == NULL) {
309 printk(KERN_DEBUG "malloc failed\n");
310 return -ENOMEM;
311 }
312
313 memset(kbuffer, 0x00, len);
314
315 printk(KERN_DEBUG "%s]cmd=0x%x,len=%d\n ", __func__, cmd, len);
316
317 mmc_claim_host(card->host);
318
319 switch (cmd) {
320
321 case ACMD43:
322 direction = MMC_DATA_READ;
323 returnVal = CPRM_CMD_SecureRW(card,
324 SD_ACMD43_GET_MKB,
325 direction,
326 arg,
327 kbuffer,
328 len);
329
330 printk(KERN_DEBUG "SD_ACMD43_GET_MKB:0x%x\n", returnVal);
331 break;
332
333 case ACMD44:
334 direction = MMC_DATA_READ;
335 returnVal = CPRM_CMD_SecureRW(card,
336 SD_ACMD44_GET_MID,
337 direction,
338 0,
339 kbuffer,
340 len);
341
342 printk(KERN_DEBUG "SD_ACMD44_GET_MID:0x%x\n", returnVal);
343 break;
344
345 case ACMD45:
346 direction = MMC_DATA_WRITE;
347 result = copy_from_user((void *)kbuffer, (void *)buff, len);
348 returnVal = CPRM_CMD_SecureRW(card,
349 SD_ACMD45_SET_CER_RN1,
350 direction,
351 0,
352 kbuffer,
353 len);
354
355 printk(KERN_DEBUG "SD_ACMD45_SET_CER_RN1:0x%x\n",
356 returnVal);
357 break;
358
359 case ACMD46:
360 direction = MMC_DATA_READ;
361 returnVal = CPRM_CMD_SecureRW(card,
362 SD_ACMD46_GET_CER_RN2,
363 direction,
364 0,
365 kbuffer,
366 len);
367
368 printk(KERN_DEBUG "SD_ACMD46_GET_CER_RN2:0x%x\n",
369 returnVal);
370 break;
371
372 case ACMD47:
373 direction = MMC_DATA_WRITE;
374 result = copy_from_user((void *)kbuffer, (void *)buff, len);
375 returnVal = CPRM_CMD_SecureRW(card,
376 SD_ACMD47_SET_CER_RES2,
377 direction,
378 0,
379 kbuffer,
380 len);
381
382 printk(KERN_DEBUG "SD_ACMD47_SET_CER_RES2:0x%x\n",
383 returnVal);
384 break;
385
386 case ACMD48:
387 direction = MMC_DATA_READ;
388 returnVal = CPRM_CMD_SecureRW(card,
389 SD_ACMD48_GET_CER_RES1,
390 direction,
391 0,
392 kbuffer,
393 len);
394
395 printk(KERN_DEBUG "SD_ACMD48_GET_CER_RES1:0x%x\n",
396 returnVal);
397 break;
398
399 case ACMD25:
400 direction = MMC_DATA_WRITE;
401 result = copy_from_user((void *)kbuffer, (void *)buff, len);
402 returnVal = CPRM_CMD_SecureMultiRW(card,
403 SD_ACMD25_SECURE_WRITE_MULTI_BLOCK,
404 direction,
405 0,
406 kbuffer,
407 len);
408
409 printk(KERN_DEBUG "SD_ACMD25_SECURE_WRITE_MULTI_BLOCK[%d]=%d\n",
410 len, returnVal);
411 break;
412
413 case ACMD18:
414 direction = MMC_DATA_READ;
415 returnVal = CPRM_CMD_SecureMultiRW(card,
416 SD_ACMD18_SECURE_READ_MULTI_BLOCK,
417 direction,
418 0,
419 kbuffer,
420 len);
421
422 printk(KERN_DEBUG "SD_ACMD18_SECURE_READ_MULTI_BLOCK [%d]=%d\n",
423 len, returnVal);
424 break;
425
426 case ACMD13:
427 break;
428
429 default:
430 printk(KERN_DEBUG " %s ] : CMD [ %x ] ERROR", __func__, cmd);
431 break;
432 }
433
434 if (returnVal == 0) {
435 if (direction == MMC_DATA_READ)
436 result = copy_to_user((void *)buff,
437 (void *)kbuffer,
438 len);
439
440 result = returnVal;
441 printk(KERN_DEBUG "stub_sendcmd SDAS_E_SUCCESS\n");
442 } else {
443 printk(KERN_DEBUG "stub_sendcmd SDAS_E_FAIL\n");
444 result = -EIO;
445 }
446
447 mmc_release_host(card->host);
448 kfree(kbuffer);
449 return result;
450}
diff --git a/drivers/mmc/card/cprmdrv_samsung.h b/drivers/mmc/card/cprmdrv_samsung.h
new file mode 100644
index 00000000000..b07dd6e7f6f
--- /dev/null
+++ b/drivers/mmc/card/cprmdrv_samsung.h
@@ -0,0 +1,75 @@
1
2#ifndef __CPRM_API_SAMSUNG
3#define __CPRM_API_SAMSUNG
4
5#define SETRESP(x) (x << 11)
6#define GETRESP(x) ((x >> 11) & 0x0007)
7
8#define NORESP SETRESP(0) /* No response command */
9#define R1RESP SETRESP(1) /* r1 response command */
10#define R1BRESP SETRESP(2) /* r1b response command */
11#define R2RESP SETRESP(3) /* r2 response command */
12#define R3RESP SETRESP(4) /* r3 response command */
13#define R6RESP SETRESP(5) /* r6 response command */
14#define R7RESP SETRESP(6) /* r7 response command */
15
16#define DT 0x8000 /* With data */
17#define DIR_IN 0x0000 /* Data Transfer read */
18#define DIR_OUT 0x4000 /* Data Transfer write */
19#define ACMD 0x0400 /* Is ACMD */
20
21#define ACMD6 (6+R1RESP+ACMD) /* Set Bus Width(SD) */
22#define ACMD13 (13+R1RESP+ACMD+DT+DIR_IN) /* SD Status */
23#define ACMD18 (18+R1RESP+ACMD+DT+DIR_IN) /* Secure Read Multi Block */
24#define ACMD22 (22+R1RESP+ACMD+DT+DIR_IN) /* Send Number Write block */
25#define ACMD23 (23+R1RESP+ACMD) /* Set Write block Erase Count */
26#define ACMD25 (25+R1RESP+ACMD+DT+DIR_OUT) /* Secure Write Multiple Block */
27#define ACMD26 (26+R1RESP+ACMD+DT+DIR_OUT) /* Secure Write MKB */
28#define ACMD38 (38+R1BRESP+ACMD) /* Secure Erase */
29#define ACMD41 (41+R3RESP+ACMD) /* Send App Operating Condition */
30#define ACMD42 (42+R1RESP+ACMD) /* Set Clear Card Detect */
31#define ACMD43 (43+R1RESP+ACMD+DT+DIR_IN) /* Get MKB */
32#define ACMD44 (44+R1RESP+ACMD+DT+DIR_IN) /* Get MID */
33#define ACMD45 (45+R1RESP+ACMD+DT+DIR_OUT) /* Set CER RN1 */
34#define ACMD46 (46+R1RESP+ACMD+DT+DIR_IN) /* Get CER RN2 */
35#define ACMD47 (47+R1RESP+ACMD+DT+DIR_OUT) /* Set CER RES2 */
36#define ACMD48 (48+R1RESP+ACMD+DT+DIR_IN) /* Get CER RES1 */
37#define ACMD49 (49+R1BRESP+ACMD) /* Change Erase Area */
38#define ACMD51 (51+R1RESP+ACMD+DT+DIR_IN) /* Send SCR */
39
40/* Application-specific commands supported by all SD cards */
41enum SD_ACMD {
42SD_ACMD6_SET_BUS_WIDTH = 6,
43SD_ACMD13_SD_STATUS = 13,
44SD_ACMD18_SECURE_READ_MULTI_BLOCK = 18,
45SD_ACMD22_SEND_NUM_WR_BLOCKS = 22,
46SD_ACMD23_SET_WR_BLK_ERASE_COUNT = 23,
47SD_ACMD25_SECURE_WRITE_MULTI_BLOCK = 25,
48SD_ACMD26_SECURE_WRITE_MKB = 26,
49SD_ACMD38_SECURE_ERASE = 38,
50SD_ACMD41_SD_APP_OP_COND = 41,
51SD_ACMD42_SET_CLR_CARD_DETECT = 42,
52SD_ACMD43_GET_MKB = 43,
53SD_ACMD44_GET_MID = 44,
54SD_ACMD45_SET_CER_RN1 = 45,
55SD_ACMD46_GET_CER_RN2 = 46,
56SD_ACMD47_SET_CER_RES2 = 47,
57SD_ACMD48_GET_CER_RES1 = 48,
58SD_ACMD49_CHANGE_SECURE_AREA = 49,
59SD_ACMD51_SEND_SCR = 51
60};
61
62struct cprm_request {
63 unsigned int cmd;
64 unsigned long arg;
65 unsigned char *buff;
66 unsigned int len;
67};
68
69int stub_sendcmd(struct mmc_card *card,
70 unsigned int cmd,
71 unsigned long arg,
72 unsigned int len,
73 unsigned char *buff);
74
75#endif /* __CPRM_API_SAMSUNG */
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6413afa318d..bf5d1837d57 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -29,6 +29,8 @@
29 */ 29 */
30static int mmc_prep_request(struct request_queue *q, struct request *req) 30static int mmc_prep_request(struct request_queue *q, struct request *req)
31{ 31{
32 struct mmc_queue *mq = q->queuedata;
33
32 /* 34 /*
33 * We only like normal block requests and discards. 35 * We only like normal block requests and discards.
34 */ 36 */
@@ -37,6 +39,9 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
37 return BLKPREP_KILL; 39 return BLKPREP_KILL;
38 } 40 }
39 41
42 if (mq && mmc_card_removed(mq->card))
43 return BLKPREP_KILL;
44
40 req->cmd_flags |= REQ_DONTPREP; 45 req->cmd_flags |= REQ_DONTPREP;
41 46
42 return BLKPREP_OK; 47 return BLKPREP_OK;
@@ -52,14 +57,18 @@ static int mmc_queue_thread(void *d)
52 down(&mq->thread_sem); 57 down(&mq->thread_sem);
53 do { 58 do {
54 struct request *req = NULL; 59 struct request *req = NULL;
60 struct mmc_queue_req *tmp;
55 61
56 spin_lock_irq(q->queue_lock); 62 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 63 set_current_state(TASK_INTERRUPTIBLE);
58 req = blk_fetch_request(q); 64 req = blk_fetch_request(q);
59 mq->req = req; 65 mq->mqrq_cur->req = req;
60 spin_unlock_irq(q->queue_lock); 66 spin_unlock_irq(q->queue_lock);
61 67
62 if (!req) { 68 if (req || mq->mqrq_prev->req) {
69 set_current_state(TASK_RUNNING);
70 mq->issue_fn(mq, req);
71 } else {
63 if (kthread_should_stop()) { 72 if (kthread_should_stop()) {
64 set_current_state(TASK_RUNNING); 73 set_current_state(TASK_RUNNING);
65 break; 74 break;
@@ -67,11 +76,14 @@ static int mmc_queue_thread(void *d)
67 up(&mq->thread_sem); 76 up(&mq->thread_sem);
68 schedule(); 77 schedule();
69 down(&mq->thread_sem); 78 down(&mq->thread_sem);
70 continue;
71 } 79 }
72 set_current_state(TASK_RUNNING);
73 80
74 mq->issue_fn(mq, req); 81 /* Current request becomes previous request and vice versa. */
82 mq->mqrq_prev->brq.mrq.data = NULL;
83 mq->mqrq_prev->req = NULL;
84 tmp = mq->mqrq_prev;
85 mq->mqrq_prev = mq->mqrq_cur;
86 mq->mqrq_cur = tmp;
75 } while (1); 87 } while (1);
76 up(&mq->thread_sem); 88 up(&mq->thread_sem);
77 89
@@ -97,10 +109,46 @@ static void mmc_request(struct request_queue *q)
97 return; 109 return;
98 } 110 }
99 111
100 if (!mq->req) 112 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
101 wake_up_process(mq->thread); 113 wake_up_process(mq->thread);
102} 114}
103 115
116static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
117{
118 struct scatterlist *sg;
119
120 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
121 if (!sg)
122 *err = -ENOMEM;
123 else {
124 *err = 0;
125 sg_init_table(sg, sg_len);
126 }
127
128 return sg;
129}
130
131static void mmc_queue_setup_discard(struct request_queue *q,
132 struct mmc_card *card)
133{
134 unsigned max_discard;
135
136 max_discard = mmc_calc_max_discard(card);
137 if (!max_discard)
138 return;
139
140 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
141 q->limits.max_discard_sectors = max_discard;
142 if (card->erased_byte == 0)
143 q->limits.discard_zeroes_data = 1;
144 q->limits.discard_granularity = card->pref_erase << 9;
145 /* granularity must not be greater than max. discard */
146 if (card->pref_erase > max_discard)
147 q->limits.discard_granularity = 0;
148 if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
149 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
150}
151
104/** 152/**
105 * mmc_init_queue - initialise a queue structure. 153 * mmc_init_queue - initialise a queue structure.
106 * @mq: mmc queue 154 * @mq: mmc queue
@@ -116,6 +164,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
116 struct mmc_host *host = card->host; 164 struct mmc_host *host = card->host;
117 u64 limit = BLK_BOUNCE_HIGH; 165 u64 limit = BLK_BOUNCE_HIGH;
118 int ret; 166 int ret;
167 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
168 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
119 169
120 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 170 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
121 limit = *mmc_dev(host)->dma_mask; 171 limit = *mmc_dev(host)->dma_mask;
@@ -125,21 +175,18 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
125 if (!mq->queue) 175 if (!mq->queue)
126 return -ENOMEM; 176 return -ENOMEM;
127 177
178 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
179 memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
180 INIT_LIST_HEAD(&mqrq_cur->packed_list);
181 INIT_LIST_HEAD(&mqrq_prev->packed_list);
182 mq->mqrq_cur = mqrq_cur;
183 mq->mqrq_prev = mqrq_prev;
128 mq->queue->queuedata = mq; 184 mq->queue->queuedata = mq;
129 mq->req = NULL;
130 185
131 blk_queue_prep_rq(mq->queue, mmc_prep_request); 186 blk_queue_prep_rq(mq->queue, mmc_prep_request);
132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 187 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
133 if (mmc_can_erase(card)) { 188 if (mmc_can_erase(card))
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); 189 mmc_queue_setup_discard(mq->queue, card);
135 mq->queue->limits.max_discard_sectors = UINT_MAX;
136 if (card->erased_byte == 0)
137 mq->queue->limits.discard_zeroes_data = 1;
138 mq->queue->limits.discard_granularity = card->pref_erase << 9;
139 if (mmc_can_secure_erase_trim(card))
140 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
141 mq->queue);
142 }
143 190
144#ifdef CONFIG_MMC_BLOCK_BOUNCE 191#ifdef CONFIG_MMC_BLOCK_BOUNCE
145 if (host->max_segs == 1) { 192 if (host->max_segs == 1) {
@@ -155,53 +202,64 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
155 bouncesz = host->max_blk_count * 512; 202 bouncesz = host->max_blk_count * 512;
156 203
157 if (bouncesz > 512) { 204 if (bouncesz > 512) {
158 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 205 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
159 if (!mq->bounce_buf) { 206 if (!mqrq_cur->bounce_buf) {
160 printk(KERN_WARNING "%s: unable to " 207 printk(KERN_WARNING "%s: unable to "
161 "allocate bounce buffer\n", 208 "allocate bounce cur buffer\n",
162 mmc_card_name(card)); 209 mmc_card_name(card));
163 } 210 }
211 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
212 if (!mqrq_prev->bounce_buf) {
213 printk(KERN_WARNING "%s: unable to "
214 "allocate bounce prev buffer\n",
215 mmc_card_name(card));
216 kfree(mqrq_cur->bounce_buf);
217 mqrq_cur->bounce_buf = NULL;
218 }
164 } 219 }
165 220
166 if (mq->bounce_buf) { 221 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
167 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 222 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
168 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 223 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
169 blk_queue_max_segments(mq->queue, bouncesz / 512); 224 blk_queue_max_segments(mq->queue, bouncesz / 512);
170 blk_queue_max_segment_size(mq->queue, bouncesz); 225 blk_queue_max_segment_size(mq->queue, bouncesz);
171 226
172 mq->sg = kmalloc(sizeof(struct scatterlist), 227 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
173 GFP_KERNEL); 228 if (ret)
174 if (!mq->sg) {
175 ret = -ENOMEM;
176 goto cleanup_queue; 229 goto cleanup_queue;
177 }
178 sg_init_table(mq->sg, 1);
179 230
180 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * 231 mqrq_cur->bounce_sg =
181 bouncesz / 512, GFP_KERNEL); 232 mmc_alloc_sg(bouncesz / 512, &ret);
182 if (!mq->bounce_sg) { 233 if (ret)
183 ret = -ENOMEM; 234 goto cleanup_queue;
235
236 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
237 if (ret)
238 goto cleanup_queue;
239
240 mqrq_prev->bounce_sg =
241 mmc_alloc_sg(bouncesz / 512, &ret);
242 if (ret)
184 goto cleanup_queue; 243 goto cleanup_queue;
185 }
186 sg_init_table(mq->bounce_sg, bouncesz / 512);
187 } 244 }
188 } 245 }
189#endif 246#endif
190 247
191 if (!mq->bounce_buf) { 248 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
192 blk_queue_bounce_limit(mq->queue, limit); 249 blk_queue_bounce_limit(mq->queue, limit);
193 blk_queue_max_hw_sectors(mq->queue, 250 blk_queue_max_hw_sectors(mq->queue,
194 min(host->max_blk_count, host->max_req_size / 512)); 251 min(host->max_blk_count, host->max_req_size / 512));
195 blk_queue_max_segments(mq->queue, host->max_segs); 252 blk_queue_max_segments(mq->queue, host->max_segs);
196 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 253 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
197 254
198 mq->sg = kmalloc(sizeof(struct scatterlist) * 255 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
199 host->max_segs, GFP_KERNEL); 256 if (ret)
200 if (!mq->sg) { 257 goto cleanup_queue;
201 ret = -ENOMEM; 258
259
260 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
261 if (ret)
202 goto cleanup_queue; 262 goto cleanup_queue;
203 }
204 sg_init_table(mq->sg, host->max_segs);
205 } 263 }
206 264
207 sema_init(&mq->thread_sem, 1); 265 sema_init(&mq->thread_sem, 1);
@@ -216,16 +274,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
216 274
217 return 0; 275 return 0;
218 free_bounce_sg: 276 free_bounce_sg:
219 if (mq->bounce_sg) 277 kfree(mqrq_cur->bounce_sg);
220 kfree(mq->bounce_sg); 278 mqrq_cur->bounce_sg = NULL;
221 mq->bounce_sg = NULL; 279 kfree(mqrq_prev->bounce_sg);
280 mqrq_prev->bounce_sg = NULL;
281
222 cleanup_queue: 282 cleanup_queue:
223 if (mq->sg) 283 kfree(mqrq_cur->sg);
224 kfree(mq->sg); 284 mqrq_cur->sg = NULL;
225 mq->sg = NULL; 285 kfree(mqrq_cur->bounce_buf);
226 if (mq->bounce_buf) 286 mqrq_cur->bounce_buf = NULL;
227 kfree(mq->bounce_buf); 287
228 mq->bounce_buf = NULL; 288 kfree(mqrq_prev->sg);
289 mqrq_prev->sg = NULL;
290 kfree(mqrq_prev->bounce_buf);
291 mqrq_prev->bounce_buf = NULL;
292
229 blk_cleanup_queue(mq->queue); 293 blk_cleanup_queue(mq->queue);
230 return ret; 294 return ret;
231} 295}
@@ -234,6 +298,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
234{ 298{
235 struct request_queue *q = mq->queue; 299 struct request_queue *q = mq->queue;
236 unsigned long flags; 300 unsigned long flags;
301 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
302 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
237 303
238 /* Make sure the queue isn't suspended, as that will deadlock */ 304 /* Make sure the queue isn't suspended, as that will deadlock */
239 mmc_queue_resume(mq); 305 mmc_queue_resume(mq);
@@ -247,16 +313,23 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
247 blk_start_queue(q); 313 blk_start_queue(q);
248 spin_unlock_irqrestore(q->queue_lock, flags); 314 spin_unlock_irqrestore(q->queue_lock, flags);
249 315
250 if (mq->bounce_sg) 316 kfree(mqrq_cur->bounce_sg);
251 kfree(mq->bounce_sg); 317 mqrq_cur->bounce_sg = NULL;
252 mq->bounce_sg = NULL; 318
319 kfree(mqrq_cur->sg);
320 mqrq_cur->sg = NULL;
321
322 kfree(mqrq_cur->bounce_buf);
323 mqrq_cur->bounce_buf = NULL;
253 324
254 kfree(mq->sg); 325 kfree(mqrq_prev->bounce_sg);
255 mq->sg = NULL; 326 mqrq_prev->bounce_sg = NULL;
256 327
257 if (mq->bounce_buf) 328 kfree(mqrq_prev->sg);
258 kfree(mq->bounce_buf); 329 mqrq_prev->sg = NULL;
259 mq->bounce_buf = NULL; 330
331 kfree(mqrq_prev->bounce_buf);
332 mqrq_prev->bounce_buf = NULL;
260 333
261 mq->card = NULL; 334 mq->card = NULL;
262} 335}
@@ -306,30 +379,70 @@ void mmc_queue_resume(struct mmc_queue *mq)
306 } 379 }
307} 380}
308 381
382static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
383 struct mmc_queue_req *mqrq,
384 struct scatterlist *sg)
385{
386 struct scatterlist *__sg;
387 unsigned int sg_len = 0;
388 struct request *req;
389 enum mmc_packed_cmd cmd;
390
391 cmd = mqrq->packed_cmd;
392
393 if (cmd == MMC_PACKED_WR_HDR || cmd == MMC_PACKED_WRITE) {
394 __sg = sg;
395 sg_set_buf(__sg, mqrq->packed_cmd_hdr,
396 sizeof(mqrq->packed_cmd_hdr));
397 sg_len++;
398 if (cmd == MMC_PACKED_WR_HDR) {
399 sg_mark_end(__sg);
400 return sg_len;
401 }
402 __sg->page_link &= ~0x02;
403 }
404
405 __sg = sg + sg_len;
406 list_for_each_entry(req, &mqrq->packed_list, queuelist) {
407 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
408 __sg = sg + (sg_len - 1);
409 (__sg++)->page_link &= ~0x02;
410 }
411 sg_mark_end(sg + (sg_len - 1));
412 return sg_len;
413}
414
309/* 415/*
310 * Prepare the sg list(s) to be handed of to the host driver 416 * Prepare the sg list(s) to be handed of to the host driver
311 */ 417 */
312unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 418unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
313{ 419{
314 unsigned int sg_len; 420 unsigned int sg_len;
315 size_t buflen; 421 size_t buflen;
316 struct scatterlist *sg; 422 struct scatterlist *sg;
317 int i; 423 int i;
318 424
319 if (!mq->bounce_buf) 425 if (!mqrq->bounce_buf) {
320 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 426 if (!list_empty(&mqrq->packed_list))
427 return mmc_queue_packed_map_sg(mq, mqrq, mqrq->sg);
428 else
429 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
430 }
321 431
322 BUG_ON(!mq->bounce_sg); 432 BUG_ON(!mqrq->bounce_sg);
323 433
324 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); 434 if (!list_empty(&mqrq->packed_list))
435 sg_len = mmc_queue_packed_map_sg(mq, mqrq, mqrq->bounce_sg);
436 else
437 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
325 438
326 mq->bounce_sg_len = sg_len; 439 mqrq->bounce_sg_len = sg_len;
327 440
328 buflen = 0; 441 buflen = 0;
329 for_each_sg(mq->bounce_sg, sg, sg_len, i) 442 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
330 buflen += sg->length; 443 buflen += sg->length;
331 444
332 sg_init_one(mq->sg, mq->bounce_buf, buflen); 445 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
333 446
334 return 1; 447 return 1;
335} 448}
@@ -338,31 +451,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
338 * If writing, bounce the data to the buffer before the request 451 * If writing, bounce the data to the buffer before the request
339 * is sent to the host driver 452 * is sent to the host driver
340 */ 453 */
341void mmc_queue_bounce_pre(struct mmc_queue *mq) 454void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
342{ 455{
343 if (!mq->bounce_buf) 456 if (!mqrq->bounce_buf)
344 return; 457 return;
345 458
346 if (rq_data_dir(mq->req) != WRITE) 459 if (rq_data_dir(mqrq->req) != WRITE)
347 return; 460 return;
348 461
349 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 462 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
350 mq->bounce_buf, mq->sg[0].length); 463 mqrq->bounce_buf, mqrq->sg[0].length);
351} 464}
352 465
353/* 466/*
354 * If reading, bounce the data from the buffer after the request 467 * If reading, bounce the data from the buffer after the request
355 * has been handled by the host driver 468 * has been handled by the host driver
356 */ 469 */
357void mmc_queue_bounce_post(struct mmc_queue *mq) 470void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
358{ 471{
359 if (!mq->bounce_buf) 472 if (!mqrq->bounce_buf)
360 return; 473 return;
361 474
362 if (rq_data_dir(mq->req) != READ) 475 if (rq_data_dir(mqrq->req) != READ)
363 return; 476 return;
364 477
365 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 478 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
366 mq->bounce_buf, mq->sg[0].length); 479 mqrq->bounce_buf, mqrq->sg[0].length);
367} 480}
368
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 6223ef8dc9c..be58b3c77ab 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -4,19 +4,48 @@
4struct request; 4struct request;
5struct task_struct; 5struct task_struct;
6 6
7struct mmc_blk_request {
8 struct mmc_request mrq;
9 struct mmc_command sbc;
10 struct mmc_command cmd;
11 struct mmc_command stop;
12 struct mmc_data data;
13};
14
15enum mmc_packed_cmd {
16 MMC_PACKED_NONE = 0,
17 MMC_PACKED_WR_HDR,
18 MMC_PACKED_WRITE,
19 MMC_PACKED_READ,
20};
21
22struct mmc_queue_req {
23 struct request *req;
24 struct mmc_blk_request brq;
25 struct scatterlist *sg;
26 char *bounce_buf;
27 struct scatterlist *bounce_sg;
28 unsigned int bounce_sg_len;
29 struct mmc_async_req mmc_active;
30 struct list_head packed_list;
31 u32 packed_cmd_hdr[128];
32 unsigned int packed_blocks;
33 enum mmc_packed_cmd packed_cmd;
34 int packed_fail_idx;
35 u8 packed_num;
36};
37
7struct mmc_queue { 38struct mmc_queue {
8 struct mmc_card *card; 39 struct mmc_card *card;
9 struct task_struct *thread; 40 struct task_struct *thread;
10 struct semaphore thread_sem; 41 struct semaphore thread_sem;
11 unsigned int flags; 42 unsigned int flags;
12 struct request *req;
13 int (*issue_fn)(struct mmc_queue *, struct request *); 43 int (*issue_fn)(struct mmc_queue *, struct request *);
14 void *data; 44 void *data;
15 struct request_queue *queue; 45 struct request_queue *queue;
16 struct scatterlist *sg; 46 struct mmc_queue_req mqrq[2];
17 char *bounce_buf; 47 struct mmc_queue_req *mqrq_cur;
18 struct scatterlist *bounce_sg; 48 struct mmc_queue_req *mqrq_prev;
19 unsigned int bounce_sg_len;
20}; 49};
21 50
22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, 51extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -25,8 +54,9 @@ extern void mmc_cleanup_queue(struct mmc_queue *);
25extern void mmc_queue_suspend(struct mmc_queue *); 54extern void mmc_queue_suspend(struct mmc_queue *);
26extern void mmc_queue_resume(struct mmc_queue *); 55extern void mmc_queue_resume(struct mmc_queue *);
27 56
28extern unsigned int mmc_queue_map_sg(struct mmc_queue *); 57extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
29extern void mmc_queue_bounce_pre(struct mmc_queue *); 58 struct mmc_queue_req *);
30extern void mmc_queue_bounce_post(struct mmc_queue *); 59extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
60extern void mmc_queue_bounce_post(struct mmc_queue_req *);
31 61
32#endif 62#endif
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ef103871517..cc83b0a8d30 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -27,3 +27,33 @@ config MMC_CLKGATE
27 support handling this in order for it to be of any use. 27 support handling this in order for it to be of any use.
28 28
29 If unsure, say N. 29 If unsure, say N.
30
31config MMC_EMBEDDED_SDIO
32 boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
33 depends on EXPERIMENTAL
34 help
35 If you say Y here, support will be added for embedded SDIO
36 devices which do not contain the necessary enumeration
37 support in hardware to be properly detected.
38
39config MMC_PARANOID_SD_INIT
40 bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
41 depends on EXPERIMENTAL
42 help
43 If you say Y here, the MMC layer will be extra paranoid
44 about re-trying SD init requests. This can be a useful
45 work-around for buggy controllers and hardware. Enable
46 if you are experiencing issues with SD detection.
47
48config MMC_NOT_USE_SANITIZE
49 bool "Disable SANITIZE emmc4.5 feature (EXPERIMENTAL)"
50 depends on EXPERIMENTAL
51 help
52 If you say Y here, sanitize feature will be disable.
53
54config MMC_POLLING_WAIT_CMD23
55 bool "Wait for cmd23's done interrupt by polling check (EXPERIMENTAL)"
56 depends on EXPERIMENTAL
57 help
58 If you say Y here, cmd23 dose not use interrupt.
59
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 393d817ed04..eb7a4c8086b 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -120,18 +120,19 @@ static int mmc_bus_remove(struct device *dev)
120 return 0; 120 return 0;
121} 121}
122 122
123static int mmc_bus_suspend(struct device *dev, pm_message_t state) 123static int mmc_bus_pm_suspend(struct device *dev)
124{ 124{
125 struct mmc_driver *drv = to_mmc_driver(dev->driver); 125 struct mmc_driver *drv = to_mmc_driver(dev->driver);
126 struct mmc_card *card = mmc_dev_to_card(dev); 126 struct mmc_card *card = mmc_dev_to_card(dev);
127 int ret = 0; 127 int ret = 0;
128 pm_message_t state = { PM_EVENT_SUSPEND };
128 129
129 if (dev->driver && drv->suspend) 130 if (dev->driver && drv->suspend)
130 ret = drv->suspend(card, state); 131 ret = drv->suspend(card, state);
131 return ret; 132 return ret;
132} 133}
133 134
134static int mmc_bus_resume(struct device *dev) 135static int mmc_bus_pm_resume(struct device *dev)
135{ 136{
136 struct mmc_driver *drv = to_mmc_driver(dev->driver); 137 struct mmc_driver *drv = to_mmc_driver(dev->driver);
137 struct mmc_card *card = mmc_dev_to_card(dev); 138 struct mmc_card *card = mmc_dev_to_card(dev);
@@ -143,7 +144,6 @@ static int mmc_bus_resume(struct device *dev)
143} 144}
144 145
145#ifdef CONFIG_PM_RUNTIME 146#ifdef CONFIG_PM_RUNTIME
146
147static int mmc_runtime_suspend(struct device *dev) 147static int mmc_runtime_suspend(struct device *dev)
148{ 148{
149 struct mmc_card *card = mmc_dev_to_card(dev); 149 struct mmc_card *card = mmc_dev_to_card(dev);
@@ -162,21 +162,13 @@ static int mmc_runtime_idle(struct device *dev)
162{ 162{
163 return pm_runtime_suspend(dev); 163 return pm_runtime_suspend(dev);
164} 164}
165#endif /* CONFIG_PM_RUNTIME */
165 166
166static const struct dev_pm_ops mmc_bus_pm_ops = { 167static const struct dev_pm_ops mmc_bus_pm_ops = {
167 .runtime_suspend = mmc_runtime_suspend, 168 SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_pm_suspend, mmc_bus_pm_resume)
168 .runtime_resume = mmc_runtime_resume, 169 SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, mmc_runtime_idle)
169 .runtime_idle = mmc_runtime_idle,
170}; 170};
171 171
172#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
173
174#else /* !CONFIG_PM_RUNTIME */
175
176#define MMC_PM_OPS_PTR NULL
177
178#endif /* !CONFIG_PM_RUNTIME */
179
180static struct bus_type mmc_bus_type = { 172static struct bus_type mmc_bus_type = {
181 .name = "mmc", 173 .name = "mmc",
182 .dev_attrs = mmc_dev_attrs, 174 .dev_attrs = mmc_dev_attrs,
@@ -184,9 +176,7 @@ static struct bus_type mmc_bus_type = {
184 .uevent = mmc_bus_uevent, 176 .uevent = mmc_bus_uevent,
185 .probe = mmc_bus_probe, 177 .probe = mmc_bus_probe,
186 .remove = mmc_bus_remove, 178 .remove = mmc_bus_remove,
187 .suspend = mmc_bus_suspend, 179 .pm = &mmc_bus_pm_ops,
188 .resume = mmc_bus_resume,
189 .pm = MMC_PM_OPS_PTR,
190}; 180};
191 181
192int mmc_register_bus(void) 182int mmc_register_bus(void)
@@ -301,10 +291,11 @@ int mmc_add_card(struct mmc_card *card)
301 mmc_card_ddr_mode(card) ? "DDR " : "", 291 mmc_card_ddr_mode(card) ? "DDR " : "",
302 type); 292 type);
303 } else { 293 } else {
304 printk(KERN_INFO "%s: new %s%s%s card at address %04x\n", 294 pr_info("%s: new %s%s%s%s card at address %04x\n",
305 mmc_hostname(card->host), 295 mmc_hostname(card->host),
306 mmc_sd_card_uhs(card) ? "ultra high speed " : 296 mmc_card_uhs(card) ? "ultra high speed " :
307 (mmc_card_highspeed(card) ? "high speed " : ""), 297 (mmc_card_highspeed(card) ? "high speed " : ""),
298 (mmc_card_hs200(card) ? "HS200 " : ""),
308 mmc_card_ddr_mode(card) ? "DDR " : "", 299 mmc_card_ddr_mode(card) ? "DDR " : "",
309 type, card->rca); 300 type, card->rca);
310 } 301 }
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 75db30e6dcf..805183514b9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -23,6 +23,7 @@
23#include <linux/log2.h> 23#include <linux/log2.h>
24#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h> 25#include <linux/pm_runtime.h>
26#include <linux/wakelock.h>
26 27
27#include <linux/mmc/card.h> 28#include <linux/mmc/card.h>
28#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
@@ -100,7 +101,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
100 cmd->retries = 0; 101 cmd->retries = 0;
101 } 102 }
102 103
103 if (err && cmd->retries) { 104 if (err && cmd->retries && !mmc_card_removed(host->card)) {
104 pr_debug("%s: req failed (CMD%u): %d, retrying...\n", 105 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
105 mmc_hostname(host), cmd->opcode, err); 106 mmc_hostname(host), cmd->opcode, err);
106 107
@@ -198,9 +199,219 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
198 199
199static void mmc_wait_done(struct mmc_request *mrq) 200static void mmc_wait_done(struct mmc_request *mrq)
200{ 201{
201 complete(mrq->done_data); 202 complete(&mrq->completion);
202} 203}
203 204
205static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
206{
207 init_completion(&mrq->completion);
208 mrq->done = mmc_wait_done;
209
210 if (mmc_card_removed(host->card)) {
211 mrq->cmd->error = -ENOMEDIUM;
212 complete(&mrq->completion);
213 return;
214 }
215
216#if (defined(CONFIG_MIDAS_COMMON) && !defined(CONFIG_EXYNOS4_DEV_DWMCI)) || \
217 defined(CONFIG_MACH_U1) || defined(CONFIG_MACH_SLP_NAPLES)
218#ifndef CONFIG_MMC_POLLING_WAIT_CMD23
219
220 if(mrq->sbc) {
221 struct mmc_request tmp_mrq;
222
223 memcpy(&tmp_mrq, mrq, sizeof(struct mmc_request));
224
225 /* send cmd 23 first */
226 mrq->cmd = mrq->sbc;
227 mrq->data = 0;
228 mmc_start_request(host, mrq);
229
230 /* wait for cmd 23 complete */
231 wait_for_completion(&mrq->completion);
232
233 /* check that cmd23 is done well */
234 if(mrq->cmd->error) {
235 /* there were an error while cmd23 was doing */
236 mrq->sbc = mrq->cmd;
237 mrq->cmd = tmp_mrq.cmd;
238 mrq->data = tmp_mrq.data;
239 return;
240 }
241 /* send R/W command */
242 init_completion(&mrq->completion);
243 mrq->sbc = mrq->cmd;
244 mrq->cmd = tmp_mrq.cmd;
245 mrq->data = tmp_mrq.data;
246 mmc_start_request(host, mrq);
247 } else
248#endif
249#endif
250 mmc_start_request(host, mrq);
251}
252
253static inline void mmc_set_ios(struct mmc_host *host);
254static void mmc_power_up(struct mmc_host *host);
255static void mmc_wait_for_req_done(struct mmc_host *host,
256 struct mmc_request *mrq)
257{
258 struct mmc_command *cmd;
259#if (defined(CONFIG_MIDAS_COMMON) && !defined(CONFIG_EXYNOS4_DEV_DWMCI))
260#ifndef CONFIG_MMC_POLLING_WAIT_CMD23
261 if(mrq->sbc && mrq->sbc->error) {
262 /* if an sbc error exists, do not wait completion.
263 completion is already called.
264 nothing to do at this condition. */
265 } else
266#endif
267#endif
268 wait_for_completion(&mrq->completion);
269
270 cmd = mrq->cmd;
271 if (!cmd->error || !cmd->retries ||
272 mmc_card_removed(host->card))
273 return;
274
275 /* if card is mmc type and nonremovable, and there are erros after
276 issuing r/w command, then init eMMC and mshc */
277 if (((host->card) && mmc_card_mmc(host->card) && \
278 (host->caps & MMC_CAP_NONREMOVABLE)) && \
279 (mrq->cmd->error == -ENOTRECOVERABLE || \
280 ((mrq->cmd->opcode == 17 || mrq->cmd->opcode == 18) && \
281 ((mrq->data->error) || mrq->cmd->error || \
282 (mrq->sbc && mrq->sbc->error))))) {
283 int rt_err = -1,count = 3;
284
285 printk(KERN_ERR "%s: it occurs a critical error on eMMC "
286 "it'll try to recover eMMC to normal state\n",
287 mmc_hostname(host));
288 do {
289 /* these errors mean eMMC gets abnormal state.
290 to recover eMMC to be normal, it has to reset eMMC.
291 first of all, it stops to power to eMMC over 10ms.*/
292 if (host->ops->init_card) {
293 host->ops->init_card(host, host->card);
294 }
295 /* re-init eMMC card */
296 if (host->bus_ops && !host->bus_dead) {
297 /* to init mshc */
298 host->ios.power_mode = MMC_POWER_OFF;
299 host->ios.bus_width = MMC_BUS_WIDTH_1;
300 host->ios.timing = MMC_TIMING_LEGACY;
301 mmc_set_ios(host);
302 mmc_power_up(host);
303
304 /* to init eMMC */
305 if( host->bus_ops->resume )
306 rt_err = host->bus_ops->resume(host);
307 }
308 count--;
309 } while(count && rt_err);
310
311 if (rt_err) {
312 printk(KERN_ERR "%s: it has failed to recover eMMC\n",
313 mmc_hostname(host));
314 } else {
315 printk(KERN_INFO "%s: recovering eMMC has been done\n",
316 mmc_hostname(host));
317 }
318
319 }
320}
321
322/**
323 * mmc_pre_req - Prepare for a new request
324 * @host: MMC host to prepare command
325 * @mrq: MMC request to prepare for
326 * @is_first_req: true if there is no previous started request
327 * that may run in parellel to this call, otherwise false
328 *
329 * mmc_pre_req() is called in prior to mmc_start_req() to let
330 * host prepare for the new request. Preparation of a request may be
331 * performed while another request is running on the host.
332 */
333static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
334 bool is_first_req)
335{
336 if (host->ops->pre_req) {
337 mmc_host_clk_hold(host);
338 host->ops->pre_req(host, mrq, is_first_req);
339 mmc_host_clk_release(host);
340 }
341}
342
343/**
344 * mmc_post_req - Post process a completed request
345 * @host: MMC host to post process command
346 * @mrq: MMC request to post process for
347 * @err: Error, if non zero, clean up any resources made in pre_req
348 *
349 * Let the host post process a completed request. Post processing of
350 * a request may be performed while another reuqest is running.
351 */
352static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
353 int err)
354{
355 if (host->ops->post_req) {
356 mmc_host_clk_hold(host);
357 host->ops->post_req(host, mrq, err);
358 mmc_host_clk_release(host);
359 }
360}
361
362/**
363 * mmc_start_req - start a non-blocking request
364 * @host: MMC host to start command
365 * @areq: async request to start
366 * @error: out parameter returns 0 for success, otherwise non zero
367 *
368 * Start a new MMC custom command request for a host.
369 * If there is on ongoing async request wait for completion
370 * of that request and start the new one and return.
371 * Does not wait for the new request to complete.
372 *
373 * Returns the completed request, NULL in case of none completed.
374 * Wait for the an ongoing request (previoulsy started) to complete and
375 * return the completed request. If there is no ongoing request, NULL
376 * is returned without waiting. NULL is not an error condition.
377 */
378struct mmc_async_req *mmc_start_req(struct mmc_host *host,
379 struct mmc_async_req *areq, int *error)
380{
381 int err = 0;
382 struct mmc_async_req *data = host->areq;
383
384 /* Prepare a new request */
385 if (areq)
386 mmc_pre_req(host, areq->mrq, !host->areq);
387
388 if (host->areq) {
389 mmc_wait_for_req_done(host, host->areq->mrq);
390 err = host->areq->err_check(host->card, host->areq);
391 if (err) {
392 mmc_post_req(host, host->areq->mrq, 0);
393 if (areq)
394 mmc_post_req(host, areq->mrq, -EINVAL);
395
396 host->areq = NULL;
397 goto out;
398 }
399 }
400
401 if (areq)
402 __mmc_start_req(host, areq->mrq);
403
404 if (host->areq)
405 mmc_post_req(host, host->areq->mrq, 0);
406
407 host->areq = areq;
408 out:
409 if (error)
410 *error = err;
411 return data;
412}
413EXPORT_SYMBOL(mmc_start_req);
414
204/** 415/**
205 * mmc_wait_for_req - start a request and wait for completion 416 * mmc_wait_for_req - start a request and wait for completion
206 * @host: MMC host to start command 417 * @host: MMC host to start command
@@ -212,17 +423,67 @@ static void mmc_wait_done(struct mmc_request *mrq)
212 */ 423 */
213void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) 424void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
214{ 425{
215 DECLARE_COMPLETION_ONSTACK(complete); 426 __mmc_start_req(host, mrq);
427 mmc_wait_for_req_done(host, mrq);
428}
429EXPORT_SYMBOL(mmc_wait_for_req);
216 430
217 mrq->done_data = &complete; 431/**
218 mrq->done = mmc_wait_done; 432 * mmc_interrupt_hpi - Issue for High priority Interrupt
433 * @card: the MMC card associated with the HPI transfer
434 *
435 * Issued High Priority Interrupt, and check for card status
436 * util out-of prg-state.
437 */
438int mmc_interrupt_hpi(struct mmc_card *card)
439{
440 int err;
441 u32 status;
219 442
220 mmc_start_request(host, mrq); 443 BUG_ON(!card);
221 444
222 wait_for_completion(&complete); 445 if (!card->ext_csd.hpi_en) {
223} 446 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
447 return 1;
448 }
224 449
225EXPORT_SYMBOL(mmc_wait_for_req); 450 mmc_claim_host(card->host);
451 err = mmc_send_status(card, &status);
452 if (err) {
453 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
454 goto out;
455 }
456
457 /*
458 * If the card status is in PRG-state, we can send the HPI command.
459 */
460 if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
461 do {
462 /*
463 * We don't know when the HPI command will finish
464 * processing, so we need to resend HPI until out
465 * of prg-state, and keep checking the card status
466 * with SEND_STATUS. If a timeout error occurs when
467 * sending the HPI command, we are already out of
468 * prg-state.
469 */
470 err = mmc_send_hpi_cmd(card, &status);
471 if (err)
472 pr_debug("%s: abort HPI (%d error)\n",
473 mmc_hostname(card->host), err);
474
475 err = mmc_send_status(card, &status);
476 if (err)
477 break;
478 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
479 } else
480 pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
481
482out:
483 mmc_release_host(card->host);
484 return err;
485}
486EXPORT_SYMBOL(mmc_interrupt_hpi);
226 487
227/** 488/**
228 * mmc_wait_for_cmd - start a command and wait for completion 489 * mmc_wait_for_cmd - start a command and wait for completion
@@ -390,7 +651,9 @@ int mmc_host_enable(struct mmc_host *host)
390 int err; 651 int err;
391 652
392 host->en_dis_recurs = 1; 653 host->en_dis_recurs = 1;
654 mmc_host_clk_hold(host);
393 err = host->ops->enable(host); 655 err = host->ops->enable(host);
656 mmc_host_clk_release(host);
394 host->en_dis_recurs = 0; 657 host->en_dis_recurs = 0;
395 658
396 if (err) { 659 if (err) {
@@ -410,7 +673,9 @@ static int mmc_host_do_disable(struct mmc_host *host, int lazy)
410 int err; 673 int err;
411 674
412 host->en_dis_recurs = 1; 675 host->en_dis_recurs = 1;
676 mmc_host_clk_hold(host);
413 err = host->ops->disable(host, lazy); 677 err = host->ops->disable(host, lazy);
678 mmc_host_clk_release(host);
414 host->en_dis_recurs = 0; 679 host->en_dis_recurs = 0;
415 680
416 if (err < 0) { 681 if (err < 0) {
@@ -973,8 +1238,11 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
973 1238
974 host->ios.signal_voltage = signal_voltage; 1239 host->ios.signal_voltage = signal_voltage;
975 1240
976 if (host->ops->start_signal_voltage_switch) 1241 if (host->ops->start_signal_voltage_switch) {
1242 mmc_host_clk_hold(host);
977 err = host->ops->start_signal_voltage_switch(host, &host->ios); 1243 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1244 mmc_host_clk_release(host);
1245 }
978 1246
979 return err; 1247 return err;
980} 1248}
@@ -1001,6 +1269,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1001 mmc_host_clk_release(host); 1269 mmc_host_clk_release(host);
1002} 1270}
1003 1271
1272static void mmc_poweroff_notify(struct mmc_host *host)
1273{
1274 struct mmc_card *card;
1275 unsigned int timeout;
1276 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1277 int err = 0;
1278
1279 card = host->card;
1280
1281 /*
1282 * Send power notify command only if card
1283 * is mmc and notify state is powered ON
1284 */
1285 if (card && mmc_card_mmc(card) &&
1286 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1287
1288 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1289 notify_type = EXT_CSD_POWER_OFF_SHORT;
1290 timeout = card->ext_csd.generic_cmd6_time;
1291 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1292 } else {
1293 notify_type = EXT_CSD_POWER_OFF_LONG;
1294 timeout = card->ext_csd.power_off_longtime;
1295 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1296 }
1297
1298 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1299 EXT_CSD_POWER_OFF_NOTIFICATION,
1300 notify_type, timeout);
1301
1302 if (err && err != -EBADMSG)
1303 pr_err("Device failed to respond within %d poweroff "
1304 "time. Forcefully powering down the device\n",
1305 timeout);
1306
1307 /* Set the card state to no notification after the poweroff */
1308 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1309 }
1310}
1311
1004/* 1312/*
1005 * Apply power to the MMC stack. This is a two-stage process. 1313 * Apply power to the MMC stack. This is a two-stage process.
1006 * First, we enable power to the card without the clock running. 1314 * First, we enable power to the card without the clock running.
@@ -1064,6 +1372,8 @@ void mmc_power_off(struct mmc_host *host)
1064 host->ios.clock = 0; 1372 host->ios.clock = 0;
1065 host->ios.vdd = 0; 1373 host->ios.vdd = 0;
1066 1374
1375 mmc_poweroff_notify(host);
1376
1067 /* 1377 /*
1068 * Reset ocr mask to be the highest possible voltage supported for 1378 * Reset ocr mask to be the highest possible voltage supported for
1069 * this mmc host. This value will be used at next power up. 1379 * this mmc host. This value will be used at next power up.
@@ -1121,6 +1431,36 @@ static inline void mmc_bus_put(struct mmc_host *host)
1121 spin_unlock_irqrestore(&host->lock, flags); 1431 spin_unlock_irqrestore(&host->lock, flags);
1122} 1432}
1123 1433
1434int mmc_resume_bus(struct mmc_host *host)
1435{
1436 unsigned long flags;
1437
1438 if (!mmc_bus_needs_resume(host))
1439 return -EINVAL;
1440
1441 printk("%s: Starting deferred resume\n", mmc_hostname(host));
1442 spin_lock_irqsave(&host->lock, flags);
1443 host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
1444 host->rescan_disable = 0;
1445 spin_unlock_irqrestore(&host->lock, flags);
1446
1447 mmc_bus_get(host);
1448 if (host->bus_ops && !host->bus_dead) {
1449 mmc_power_up(host);
1450 BUG_ON(!host->bus_ops->resume);
1451 host->bus_ops->resume(host);
1452 }
1453
1454 if (host->bus_ops->detect && !host->bus_dead)
1455 host->bus_ops->detect(host);
1456
1457 mmc_bus_put(host);
1458 printk("%s: Deferred resume completed\n", mmc_hostname(host));
1459 return 0;
1460}
1461
1462EXPORT_SYMBOL(mmc_resume_bus);
1463
1124/* 1464/*
1125 * Assign a mmc bus handler to a host. Only one bus handler may control a 1465 * Assign a mmc bus handler to a host. Only one bus handler may control a
1126 * host at any given time. 1466 * host at any given time.
@@ -1186,6 +1526,8 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1186 spin_unlock_irqrestore(&host->lock, flags); 1526 spin_unlock_irqrestore(&host->lock, flags);
1187#endif 1527#endif
1188 1528
1529 host->detect_change = 1;
1530 wake_lock(&host->detect_wake_lock);
1189 mmc_schedule_delayed_work(&host->detect, delay); 1531 mmc_schedule_delayed_work(&host->detect, delay);
1190} 1532}
1191 1533
@@ -1383,7 +1725,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1383 if (err) { 1725 if (err) {
1384 printk(KERN_ERR "mmc_erase: group start error %d, " 1726 printk(KERN_ERR "mmc_erase: group start error %d, "
1385 "status %#x\n", err, cmd.resp[0]); 1727 "status %#x\n", err, cmd.resp[0]);
1386 err = -EINVAL; 1728 err = -EIO;
1387 goto out; 1729 goto out;
1388 } 1730 }
1389 1731
@@ -1398,7 +1740,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1398 if (err) { 1740 if (err) {
1399 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", 1741 printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
1400 err, cmd.resp[0]); 1742 err, cmd.resp[0]);
1401 err = -EINVAL; 1743 err = -EIO;
1402 goto out; 1744 goto out;
1403 } 1745 }
1404 1746
@@ -1517,10 +1859,34 @@ int mmc_can_trim(struct mmc_card *card)
1517{ 1859{
1518 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) 1860 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1519 return 1; 1861 return 1;
1862 if (mmc_can_discard(card))
1863 return 1;
1520 return 0; 1864 return 0;
1521} 1865}
1522EXPORT_SYMBOL(mmc_can_trim); 1866EXPORT_SYMBOL(mmc_can_trim);
1523 1867
1868int mmc_can_discard(struct mmc_card *card)
1869{
1870 /*
1871 * As there's no way to detect the discard support bit at v4.5
1872 * use the s/w feature support filed.
1873 */
1874 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1875 return 1;
1876 return 0;
1877}
1878EXPORT_SYMBOL(mmc_can_discard);
1879
1880int mmc_can_sanitize(struct mmc_card *card)
1881{
1882#ifndef CONFIG_MMC_NOT_USE_SANITIZE
1883 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1884 return 1;
1885#endif
1886 return 0;
1887}
1888EXPORT_SYMBOL(mmc_can_sanitize);
1889
1524int mmc_can_secure_erase_trim(struct mmc_card *card) 1890int mmc_can_secure_erase_trim(struct mmc_card *card)
1525{ 1891{
1526 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) 1892 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
@@ -1540,6 +1906,82 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1540} 1906}
1541EXPORT_SYMBOL(mmc_erase_group_aligned); 1907EXPORT_SYMBOL(mmc_erase_group_aligned);
1542 1908
1909static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1910 unsigned int arg)
1911{
1912 struct mmc_host *host = card->host;
1913 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1914 unsigned int last_timeout = 0;
1915
1916 if (card->erase_shift)
1917 max_qty = UINT_MAX >> card->erase_shift;
1918 else if (mmc_card_sd(card))
1919 max_qty = UINT_MAX;
1920 else
1921 max_qty = UINT_MAX / card->erase_size;
1922
1923 /* Find the largest qty with an OK timeout */
1924 do {
1925 y = 0;
1926 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1927 timeout = mmc_erase_timeout(card, arg, qty + x);
1928 if (timeout > host->max_discard_to)
1929 break;
1930 if (timeout < last_timeout)
1931 break;
1932 last_timeout = timeout;
1933 y = x;
1934 }
1935 qty += y;
1936 } while (y);
1937
1938 if (!qty)
1939 return 0;
1940
1941 if (qty == 1)
1942 return 1;
1943
1944 /* Convert qty to sectors */
1945 if (card->erase_shift)
1946 max_discard = --qty << card->erase_shift;
1947 else if (mmc_card_sd(card))
1948 max_discard = qty;
1949 else
1950 max_discard = --qty * card->erase_size;
1951
1952 return max_discard;
1953}
1954
1955unsigned int mmc_calc_max_discard(struct mmc_card *card)
1956{
1957 struct mmc_host *host = card->host;
1958 unsigned int max_discard, max_trim;
1959
1960 if (!host->max_discard_to)
1961 return UINT_MAX;
1962
1963 /*
1964 * Without erase_group_def set, MMC erase timeout depends on clock
1965 * frequence which can change. In that case, the best choice is
1966 * just the preferred erase size.
1967 */
1968 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1969 return card->pref_erase;
1970
1971 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1972 if (mmc_can_trim(card)) {
1973 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1974 if (max_trim < max_discard)
1975 max_discard = max_trim;
1976 } else if (max_discard < card->erase_size) {
1977 max_discard = 0;
1978 }
1979 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1980 mmc_hostname(host), max_discard, host->max_discard_to);
1981 return max_discard;
1982}
1983EXPORT_SYMBOL(mmc_calc_max_discard);
1984
1543int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1985int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1544{ 1986{
1545 struct mmc_command cmd = {0}; 1987 struct mmc_command cmd = {0};
@@ -1554,6 +1996,94 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1554} 1996}
1555EXPORT_SYMBOL(mmc_set_blocklen); 1997EXPORT_SYMBOL(mmc_set_blocklen);
1556 1998
1999static void mmc_hw_reset_for_init(struct mmc_host *host)
2000{
2001 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2002 return;
2003 mmc_host_clk_hold(host);
2004 host->ops->hw_reset(host);
2005 mmc_host_clk_release(host);
2006}
2007
2008int mmc_can_reset(struct mmc_card *card)
2009{
2010 u8 rst_n_function;
2011
2012 if (!mmc_card_mmc(card))
2013 return 0;
2014 rst_n_function = card->ext_csd.rst_n_function;
2015 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
2016 return 0;
2017 return 1;
2018}
2019EXPORT_SYMBOL(mmc_can_reset);
2020
2021static int mmc_do_hw_reset(struct mmc_host *host, int check)
2022{
2023 struct mmc_card *card = host->card;
2024
2025 if (!host->bus_ops->power_restore)
2026 return -EOPNOTSUPP;
2027
2028 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2029 return -EOPNOTSUPP;
2030
2031 if (!card)
2032 return -EINVAL;
2033
2034 if (!mmc_can_reset(card))
2035 return -EOPNOTSUPP;
2036
2037 mmc_host_clk_hold(host);
2038 mmc_set_clock(host, host->f_init);
2039
2040 host->ops->hw_reset(host);
2041
2042 /* If the reset has happened, then a status command will fail */
2043 if (check) {
2044 struct mmc_command cmd = {0};
2045 int err;
2046
2047 cmd.opcode = MMC_SEND_STATUS;
2048 if (!mmc_host_is_spi(card->host))
2049 cmd.arg = card->rca << 16;
2050 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2051 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2052 if (!err) {
2053 mmc_host_clk_release(host);
2054 return -ENOSYS;
2055 }
2056 }
2057
2058 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
2059 if (mmc_host_is_spi(host)) {
2060 host->ios.chip_select = MMC_CS_HIGH;
2061 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
2062 } else {
2063 host->ios.chip_select = MMC_CS_DONTCARE;
2064 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
2065 }
2066 host->ios.bus_width = MMC_BUS_WIDTH_1;
2067 host->ios.timing = MMC_TIMING_LEGACY;
2068 mmc_set_ios(host);
2069
2070 mmc_host_clk_release(host);
2071
2072 return host->bus_ops->power_restore(host);
2073}
2074
2075int mmc_hw_reset(struct mmc_host *host)
2076{
2077 return mmc_do_hw_reset(host, 0);
2078}
2079EXPORT_SYMBOL(mmc_hw_reset);
2080
2081int mmc_hw_reset_check(struct mmc_host *host)
2082{
2083 return mmc_do_hw_reset(host, 1);
2084}
2085EXPORT_SYMBOL(mmc_hw_reset_check);
2086
1557static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) 2087static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1558{ 2088{
1559 host->f_init = freq; 2089 host->f_init = freq;
@@ -1565,6 +2095,12 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1565 mmc_power_up(host); 2095 mmc_power_up(host);
1566 2096
1567 /* 2097 /*
2098 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2099 * do a hardware reset if possible.
2100 */
2101 mmc_hw_reset_for_init(host);
2102
2103 /*
1568 * sdio_reset sends CMD52 to reset card. Since we do not know 2104 * sdio_reset sends CMD52 to reset card. Since we do not know
1569 * if the card is being re-initialized, just send it. CMD52 2105 * if the card is being re-initialized, just send it. CMD52
1570 * should be ignored by SD/eMMC cards. 2106 * should be ignored by SD/eMMC cards.
@@ -1586,12 +2122,50 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1586 return -EIO; 2122 return -EIO;
1587} 2123}
1588 2124
2125int _mmc_detect_card_removed(struct mmc_host *host)
2126{
2127 int ret;
2128
2129 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
2130 return 0;
2131
2132 if (!host->card || mmc_card_removed(host->card))
2133 return 1;
2134
2135 ret = host->bus_ops->alive(host);
2136 if (ret) {
2137 mmc_card_set_removed(host->card);
2138 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2139 }
2140
2141 return ret;
2142}
2143
2144int mmc_detect_card_removed(struct mmc_host *host)
2145{
2146 struct mmc_card *card = host->card;
2147
2148 WARN_ON(!host->claimed);
2149 /*
2150 * The card will be considered unchanged unless we have been asked to
2151 * detect a change or host requires polling to provide card detection.
2152 */
2153 if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2154 return mmc_card_removed(card);
2155
2156 host->detect_change = 0;
2157
2158 return _mmc_detect_card_removed(host);
2159}
2160EXPORT_SYMBOL(mmc_detect_card_removed);
2161
1589void mmc_rescan(struct work_struct *work) 2162void mmc_rescan(struct work_struct *work)
1590{ 2163{
1591 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; 2164 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1592 struct mmc_host *host = 2165 struct mmc_host *host =
1593 container_of(work, struct mmc_host, detect.work); 2166 container_of(work, struct mmc_host, detect.work);
1594 int i; 2167 int i;
2168 bool extend_wakelock = false;
1595 2169
1596 if (host->rescan_disable) 2170 if (host->rescan_disable)
1597 return; 2171 return;
@@ -1606,6 +2180,14 @@ void mmc_rescan(struct work_struct *work)
1606 && !(host->caps & MMC_CAP_NONREMOVABLE)) 2180 && !(host->caps & MMC_CAP_NONREMOVABLE))
1607 host->bus_ops->detect(host); 2181 host->bus_ops->detect(host);
1608 2182
2183 /* If the card was removed the bus will be marked
2184 * as dead - extend the wakelock so userspace
2185 * can respond */
2186 if (host->bus_dead)
2187 extend_wakelock = 1;
2188
2189 host->detect_change = 0;
2190
1609 /* 2191 /*
1610 * Let mmc_bus_put() free the bus/bus_ops if we've found that 2192 * Let mmc_bus_put() free the bus/bus_ops if we've found that
1611 * the card is no longer present. 2193 * the card is no longer present.
@@ -1630,16 +2212,24 @@ void mmc_rescan(struct work_struct *work)
1630 2212
1631 mmc_claim_host(host); 2213 mmc_claim_host(host);
1632 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2214 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1633 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 2215 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
2216 extend_wakelock = true;
1634 break; 2217 break;
2218 }
1635 if (freqs[i] <= host->f_min) 2219 if (freqs[i] <= host->f_min)
1636 break; 2220 break;
1637 } 2221 }
1638 mmc_release_host(host); 2222 mmc_release_host(host);
1639 2223
1640 out: 2224 out:
1641 if (host->caps & MMC_CAP_NEEDS_POLL) 2225 if (extend_wakelock)
2226 wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
2227 else
2228 wake_unlock(&host->detect_wake_lock);
2229 if (host->caps & MMC_CAP_NEEDS_POLL) {
2230 wake_lock(&host->detect_wake_lock);
1642 mmc_schedule_delayed_work(&host->detect, HZ); 2231 mmc_schedule_delayed_work(&host->detect, HZ);
2232 }
1643} 2233}
1644 2234
1645void mmc_start_host(struct mmc_host *host) 2235void mmc_start_host(struct mmc_host *host)
@@ -1659,7 +2249,8 @@ void mmc_stop_host(struct mmc_host *host)
1659 2249
1660 if (host->caps & MMC_CAP_DISABLE) 2250 if (host->caps & MMC_CAP_DISABLE)
1661 cancel_delayed_work(&host->disable); 2251 cancel_delayed_work(&host->disable);
1662 cancel_delayed_work_sync(&host->detect); 2252 if (cancel_delayed_work_sync(&host->detect))
2253 wake_unlock(&host->detect_wake_lock);
1663 mmc_flush_scheduled_work(); 2254 mmc_flush_scheduled_work();
1664 2255
1665 /* clear pm flags now and let card drivers set them as needed */ 2256 /* clear pm flags now and let card drivers set them as needed */
@@ -1766,6 +2357,72 @@ int mmc_card_can_sleep(struct mmc_host *host)
1766} 2357}
1767EXPORT_SYMBOL(mmc_card_can_sleep); 2358EXPORT_SYMBOL(mmc_card_can_sleep);
1768 2359
2360/*
2361 * Flush the cache to the non-volatile storage.
2362 */
2363int mmc_flush_cache(struct mmc_card *card)
2364{
2365 struct mmc_host *host = card->host;
2366 int err = 0;
2367
2368 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2369 return err;
2370
2371 if (mmc_card_mmc(card) &&
2372 (card->ext_csd.cache_size > 0) &&
2373 (card->ext_csd.cache_ctrl & 1)) {
2374 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2375 EXT_CSD_FLUSH_CACHE, 1, 0);
2376 if (err) {
2377 pr_err("%s: cache flush error %d\n",
2378 mmc_hostname(card->host), err);
2379 panic("[TEST] mmc%d, %s returns %d.\n",
2380 host->index, __func__, err);
2381 }
2382 }
2383
2384 return err;
2385}
2386EXPORT_SYMBOL(mmc_flush_cache);
2387
2388/*
2389 * Turn the cache ON/OFF.
2390 * Turning the cache OFF shall trigger flushing of the data
2391 * to the non-volatile storage.
2392 */
2393int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2394{
2395 struct mmc_card *card = host->card;
2396 unsigned int timeout;
2397 int err = 0;
2398
2399 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2400 mmc_card_is_removable(host))
2401 return err;
2402
2403 if (card && mmc_card_mmc(card) &&
2404 (card->ext_csd.cache_size > 0)) {
2405 enable = !!enable;
2406
2407 if (card->ext_csd.cache_ctrl ^ enable) {
2408 timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2409 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2410 EXT_CSD_CACHE_CTRL, enable, timeout);
2411
2412 if (err)
2413 pr_err("%s: cache %s error %d\n",
2414 mmc_hostname(card->host),
2415 enable ? "on" : "off",
2416 err);
2417 else
2418 card->ext_csd.cache_ctrl = enable;
2419 }
2420 }
2421
2422 return err;
2423}
2424EXPORT_SYMBOL(mmc_cache_ctrl);
2425
1769#ifdef CONFIG_PM 2426#ifdef CONFIG_PM
1770 2427
1771/** 2428/**
@@ -1776,28 +2433,89 @@ int mmc_suspend_host(struct mmc_host *host)
1776{ 2433{
1777 int err = 0; 2434 int err = 0;
1778 2435
2436 if (mmc_bus_needs_resume(host))
2437 return 0;
2438
1779 if (host->caps & MMC_CAP_DISABLE) 2439 if (host->caps & MMC_CAP_DISABLE)
1780 cancel_delayed_work(&host->disable); 2440 cancel_delayed_work(&host->disable);
1781 cancel_delayed_work(&host->detect); 2441 if (cancel_delayed_work(&host->detect))
2442 wake_unlock(&host->detect_wake_lock);
1782 mmc_flush_scheduled_work(); 2443 mmc_flush_scheduled_work();
2444 if (mmc_try_claim_host(host)) {
2445 u32 status;
2446 u32 count=300000; /* up to 300ms */
2447
2448 /* if a sdmmc card exists and the card is mmc */
2449 if (((host->card) && mmc_card_mmc(host->card))) {
2450 int ret;
2451 /* flush emmc's cache before getting suspend */
2452 ret = mmc_flush_cache(host->card);
2453 if (ret)
2454 pr_err("%s: there is error %d while "
2455 "flushing emmc's cache\n",
2456 mmc_hostname(host),ret);
2457 }
2458 err = mmc_cache_ctrl(host, 0);
2459
2460 /* to make sure that emmc is not working. should check
2461 emmc's state */
2462 if (((host->card) && mmc_card_mmc(host->card))) {
2463 do {
2464 err = mmc_send_status(host->card, &status);
2465 if (err)
2466 break;
2467
2468 /* if it is not the first time */
2469 if (count != 300000)
2470 udelay(1);
2471 count--;
2472 } while (count && R1_CURRENT_STATE(status) == 7);
2473 }
2474 mmc_do_release_host(host);
2475 } else {
2476 err = -EBUSY;
2477 }
2478
2479 if (err)
2480 goto out;
1783 2481
1784 mmc_bus_get(host); 2482 mmc_bus_get(host);
1785 if (host->bus_ops && !host->bus_dead) { 2483 if (host->bus_ops && !host->bus_dead) {
1786 if (host->bus_ops->suspend) 2484
1787 err = host->bus_ops->suspend(host); 2485 /*
1788 if (err == -ENOSYS || !host->bus_ops->resume) { 2486 * A long response time is not acceptable for device drivers
1789 /* 2487 * when doing suspend. Prevent mmc_claim_host in the suspend
1790 * We simply "remove" the card in this case. 2488 * sequence, to potentially wait "forever" by trying to
1791 * It will be redetected on resume. 2489 * pre-claim the host.
1792 */ 2490 */
1793 if (host->bus_ops->remove) 2491 if (mmc_try_claim_host(host)) {
1794 host->bus_ops->remove(host); 2492 if (host->bus_ops->suspend) {
1795 mmc_claim_host(host); 2493 /*
1796 mmc_detach_bus(host); 2494 * For eMMC 4.5 device send notify command
1797 mmc_power_off(host); 2495 * before sleep, because in sleep state eMMC 4.5
1798 mmc_release_host(host); 2496 * devices respond to only RESET and AWAKE cmd
1799 host->pm_flags = 0; 2497 */
1800 err = 0; 2498 mmc_poweroff_notify(host);
2499 err = host->bus_ops->suspend(host);
2500 }
2501 if (err == -ENOSYS || !host->bus_ops->resume) {
2502 /*
2503 * We simply "remove" the card in this case.
2504 * It will be redetected on resume.
2505 */
2506 if (host->bus_ops->remove)
2507 host->bus_ops->remove(host);
2508 mmc_claim_host(host);
2509 mmc_detach_bus(host);
2510 mmc_power_off(host);
2511 mmc_release_host(host);
2512 host->pm_flags = 0;
2513 err = 0;
2514 }
2515
2516 mmc_do_release_host(host);
2517 } else {
2518 err = -EBUSY;
1801 } 2519 }
1802 } 2520 }
1803 mmc_bus_put(host); 2521 mmc_bus_put(host);
@@ -1805,6 +2523,7 @@ int mmc_suspend_host(struct mmc_host *host)
1805 if (!err && !mmc_card_keep_power(host)) 2523 if (!err && !mmc_card_keep_power(host))
1806 mmc_power_off(host); 2524 mmc_power_off(host);
1807 2525
2526out:
1808 return err; 2527 return err;
1809} 2528}
1810 2529
@@ -1819,6 +2538,12 @@ int mmc_resume_host(struct mmc_host *host)
1819 int err = 0; 2538 int err = 0;
1820 2539
1821 mmc_bus_get(host); 2540 mmc_bus_get(host);
2541 if (mmc_bus_manual_resume(host)) {
2542 host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
2543 mmc_bus_put(host);
2544 return 0;
2545 }
2546
1822 if (host->bus_ops && !host->bus_dead) { 2547 if (host->bus_ops && !host->bus_dead) {
1823 if (!mmc_card_keep_power(host)) { 2548 if (!mmc_card_keep_power(host)) {
1824 mmc_power_up(host); 2549 mmc_power_up(host);
@@ -1869,9 +2594,15 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1869 case PM_SUSPEND_PREPARE: 2594 case PM_SUSPEND_PREPARE:
1870 2595
1871 spin_lock_irqsave(&host->lock, flags); 2596 spin_lock_irqsave(&host->lock, flags);
2597 if (mmc_bus_needs_resume(host)) {
2598 spin_unlock_irqrestore(&host->lock, flags);
2599 break;
2600 }
1872 host->rescan_disable = 1; 2601 host->rescan_disable = 1;
2602 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
1873 spin_unlock_irqrestore(&host->lock, flags); 2603 spin_unlock_irqrestore(&host->lock, flags);
1874 cancel_delayed_work_sync(&host->detect); 2604 if (cancel_delayed_work_sync(&host->detect))
2605 wake_unlock(&host->detect_wake_lock);
1875 2606
1876 if (!host->bus_ops || host->bus_ops->suspend) 2607 if (!host->bus_ops || host->bus_ops->suspend)
1877 break; 2608 break;
@@ -1882,7 +2613,9 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1882 host->bus_ops->remove(host); 2613 host->bus_ops->remove(host);
1883 2614
1884 mmc_detach_bus(host); 2615 mmc_detach_bus(host);
1885 mmc_power_off(host); 2616 /* for BCM WIFI */
2617 if (!(host->pm_flags & MMC_PM_IGNORE_SUSPEND_RESUME))
2618 mmc_power_off(host);
1886 mmc_release_host(host); 2619 mmc_release_host(host);
1887 host->pm_flags = 0; 2620 host->pm_flags = 0;
1888 break; 2621 break;
@@ -1892,9 +2625,16 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1892 case PM_POST_RESTORE: 2625 case PM_POST_RESTORE:
1893 2626
1894 spin_lock_irqsave(&host->lock, flags); 2627 spin_lock_irqsave(&host->lock, flags);
2628 if (mmc_bus_manual_resume(host)) {
2629 spin_unlock_irqrestore(&host->lock, flags);
2630 break;
2631 }
1895 host->rescan_disable = 0; 2632 host->rescan_disable = 0;
2633 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
1896 spin_unlock_irqrestore(&host->lock, flags); 2634 spin_unlock_irqrestore(&host->lock, flags);
1897 mmc_detect_change(host, 0); 2635 /* for BCM WIFI */
2636 if (!(host->pm_flags & MMC_PM_IGNORE_SUSPEND_RESUME))
2637 mmc_detect_change(host, 0);
1898 2638
1899 } 2639 }
1900 2640
@@ -1902,6 +2642,22 @@ int mmc_pm_notify(struct notifier_block *notify_block,
1902} 2642}
1903#endif 2643#endif
1904 2644
2645#ifdef CONFIG_MMC_EMBEDDED_SDIO
2646void mmc_set_embedded_sdio_data(struct mmc_host *host,
2647 struct sdio_cis *cis,
2648 struct sdio_cccr *cccr,
2649 struct sdio_embedded_func *funcs,
2650 int num_funcs)
2651{
2652 host->embedded_sdio_data.cis = cis;
2653 host->embedded_sdio_data.cccr = cccr;
2654 host->embedded_sdio_data.funcs = funcs;
2655 host->embedded_sdio_data.num_funcs = num_funcs;
2656}
2657
2658EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
2659#endif
2660
1905static int __init mmc_init(void) 2661static int __init mmc_init(void)
1906{ 2662{
1907 int ret; 2663 int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 14664f1fb16..34009241213 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -24,6 +24,7 @@ struct mmc_bus_ops {
24 int (*resume)(struct mmc_host *); 24 int (*resume)(struct mmc_host *);
25 int (*power_save)(struct mmc_host *); 25 int (*power_save)(struct mmc_host *);
26 int (*power_restore)(struct mmc_host *); 26 int (*power_restore)(struct mmc_host *);
27 int (*alive)(struct mmc_host *);
27}; 28};
28 29
29void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); 30void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -59,6 +60,8 @@ void mmc_rescan(struct work_struct *work);
59void mmc_start_host(struct mmc_host *host); 60void mmc_start_host(struct mmc_host *host);
60void mmc_stop_host(struct mmc_host *host); 61void mmc_stop_host(struct mmc_host *host);
61 62
63int _mmc_detect_card_removed(struct mmc_host *host);
64
62int mmc_attach_mmc(struct mmc_host *host); 65int mmc_attach_mmc(struct mmc_host *host);
63int mmc_attach_sd(struct mmc_host *host); 66int mmc_attach_sd(struct mmc_host *host);
64int mmc_attach_sdio(struct mmc_host *host); 67int mmc_attach_sdio(struct mmc_host *host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 998797ed67a..b4155824b4a 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -113,6 +113,18 @@ static int mmc_ios_show(struct seq_file *s, void *data)
113 case MMC_TIMING_SD_HS: 113 case MMC_TIMING_SD_HS:
114 str = "sd high-speed"; 114 str = "sd high-speed";
115 break; 115 break;
116 case MMC_TIMING_UHS_SDR50:
117 str = "sd uhs SDR50";
118 break;
119 case MMC_TIMING_UHS_SDR104:
120 str = "sd uhs SDR104";
121 break;
122 case MMC_TIMING_UHS_DDR50:
123 str = "sd uhs DDR50";
124 break;
125 case MMC_TIMING_MMC_HS200:
126 str = "mmc high-speed SDR200";
127 break;
116 default: 128 default:
117 str = "invalid"; 129 str = "invalid";
118 break; 130 break;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 793d0a0dad8..3664c49253a 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -53,7 +53,27 @@ static DEFINE_IDR(mmc_host_idr);
53static DEFINE_SPINLOCK(mmc_host_lock); 53static DEFINE_SPINLOCK(mmc_host_lock);
54 54
55#ifdef CONFIG_MMC_CLKGATE 55#ifdef CONFIG_MMC_CLKGATE
56static ssize_t clkgate_delay_show(struct device *dev,
57 struct device_attribute *attr, char *buf)
58{
59 struct mmc_host *host = cls_dev_to_mmc_host(dev);
60 return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
61}
62
63static ssize_t clkgate_delay_store(struct device *dev,
64 struct device_attribute *attr, const char *buf, size_t count)
65{
66 struct mmc_host *host = cls_dev_to_mmc_host(dev);
67 unsigned long flags, value;
68
69 if (kstrtoul(buf, 0, &value))
70 return -EINVAL;
56 71
72 spin_lock_irqsave(&host->clk_lock, flags);
73 host->clkgate_delay = value;
74 spin_unlock_irqrestore(&host->clk_lock, flags);
75 return count;
76}
57/* 77/*
58 * Enabling clock gating will make the core call out to the host 78 * Enabling clock gating will make the core call out to the host
59 * once up and once down when it performs a request or card operation 79 * once up and once down when it performs a request or card operation
@@ -87,8 +107,11 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
87 */ 107 */
88 if (!host->clk_requests) { 108 if (!host->clk_requests) {
89 spin_unlock_irqrestore(&host->clk_lock, flags); 109 spin_unlock_irqrestore(&host->clk_lock, flags);
90 tick_ns = DIV_ROUND_UP(1000000000, freq); 110 /* wait only when clk_gate_delay is 0 */
91 ndelay(host->clk_delay * tick_ns); 111 if (!host->clkgate_delay) {
112 tick_ns = DIV_ROUND_UP(1000000000, freq);
113 ndelay(host->clk_delay * tick_ns);
114 }
92 } else { 115 } else {
93 /* New users appeared while waiting for this work */ 116 /* New users appeared while waiting for this work */
94 spin_unlock_irqrestore(&host->clk_lock, flags); 117 spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -113,7 +136,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
113static void mmc_host_clk_gate_work(struct work_struct *work) 136static void mmc_host_clk_gate_work(struct work_struct *work)
114{ 137{
115 struct mmc_host *host = container_of(work, struct mmc_host, 138 struct mmc_host *host = container_of(work, struct mmc_host,
116 clk_gate_work); 139 clk_gate_work.work);
117 140
118 mmc_host_clk_gate_delayed(host); 141 mmc_host_clk_gate_delayed(host);
119} 142}
@@ -130,6 +153,8 @@ void mmc_host_clk_hold(struct mmc_host *host)
130{ 153{
131 unsigned long flags; 154 unsigned long flags;
132 155
156 /* cancel any clock gating work scheduled by mmc_host_clk_release() */
157 cancel_delayed_work_sync(&host->clk_gate_work);
133 mutex_lock(&host->clk_gate_mutex); 158 mutex_lock(&host->clk_gate_mutex);
134 spin_lock_irqsave(&host->clk_lock, flags); 159 spin_lock_irqsave(&host->clk_lock, flags);
135 if (host->clk_gated) { 160 if (host->clk_gated) {
@@ -179,7 +204,9 @@ void mmc_host_clk_release(struct mmc_host *host)
179 host->clk_requests--; 204 host->clk_requests--;
180 if (mmc_host_may_gate_card(host->card) && 205 if (mmc_host_may_gate_card(host->card) &&
181 !host->clk_requests) 206 !host->clk_requests)
182 queue_work(system_nrt_wq, &host->clk_gate_work); 207 queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
208 msecs_to_jiffies(host->clkgate_delay));
209
183 spin_unlock_irqrestore(&host->clk_lock, flags); 210 spin_unlock_irqrestore(&host->clk_lock, flags);
184} 211}
185 212
@@ -212,8 +239,13 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
212 host->clk_requests = 0; 239 host->clk_requests = 0;
213 /* Hold MCI clock for 8 cycles by default */ 240 /* Hold MCI clock for 8 cycles by default */
214 host->clk_delay = 8; 241 host->clk_delay = 8;
242 /*
243 * Default clock gating delay is 0ms to avoid wasting power.
244 * This value can be tuned by writing into sysfs entry.
245 */
246 host->clkgate_delay = 3;
215 host->clk_gated = false; 247 host->clk_gated = false;
216 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); 248 INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
217 spin_lock_init(&host->clk_lock); 249 spin_lock_init(&host->clk_lock);
218 mutex_init(&host->clk_gate_mutex); 250 mutex_init(&host->clk_gate_mutex);
219} 251}
@@ -228,7 +260,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
228 * Wait for any outstanding gate and then make sure we're 260 * Wait for any outstanding gate and then make sure we're
229 * ungated before exiting. 261 * ungated before exiting.
230 */ 262 */
231 if (cancel_work_sync(&host->clk_gate_work)) 263 if (cancel_delayed_work_sync(&host->clk_gate_work))
232 mmc_host_clk_gate_delayed(host); 264 mmc_host_clk_gate_delayed(host);
233 if (host->clk_gated) 265 if (host->clk_gated)
234 mmc_host_clk_hold(host); 266 mmc_host_clk_hold(host);
@@ -236,6 +268,18 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
236 WARN_ON(host->clk_requests > 1); 268 WARN_ON(host->clk_requests > 1);
237} 269}
238 270
271static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
272{
273 host->clkgate_delay_attr.show = clkgate_delay_show;
274 host->clkgate_delay_attr.store = clkgate_delay_store;
275 sysfs_attr_init(&host->clkgate_delay_attr.attr);
276 host->clkgate_delay_attr.attr.name = "clkgate_delay";
277 host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
278 if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
279 pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
280 mmc_hostname(host));
281}
282
239#else 283#else
240 284
241static inline void mmc_host_clk_init(struct mmc_host *host) 285static inline void mmc_host_clk_init(struct mmc_host *host)
@@ -246,6 +290,9 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
246{ 290{
247} 291}
248 292
293static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
294{
295}
249#endif 296#endif
250 297
251/** 298/**
@@ -284,6 +331,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
284 331
285 spin_lock_init(&host->lock); 332 spin_lock_init(&host->lock);
286 init_waitqueue_head(&host->wq); 333 init_waitqueue_head(&host->wq);
334 wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND,
335 kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host)));
287 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 336 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
288 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); 337 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
289#ifdef CONFIG_PM 338#ifdef CONFIG_PM
@@ -335,8 +384,11 @@ int mmc_add_host(struct mmc_host *host)
335 mmc_add_host_debugfs(host); 384 mmc_add_host_debugfs(host);
336#endif 385#endif
337 386
387 mmc_host_clk_sysfs_init(host);
388
338 mmc_start_host(host); 389 mmc_start_host(host);
339 register_pm_notifier(&host->pm_notify); 390 if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
391 register_pm_notifier(&host->pm_notify);
340 392
341 return 0; 393 return 0;
342} 394}
@@ -353,7 +405,9 @@ EXPORT_SYMBOL(mmc_add_host);
353 */ 405 */
354void mmc_remove_host(struct mmc_host *host) 406void mmc_remove_host(struct mmc_host *host)
355{ 407{
356 unregister_pm_notifier(&host->pm_notify); 408 if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
409 unregister_pm_notifier(&host->pm_notify);
410
357 mmc_stop_host(host); 411 mmc_stop_host(host);
358 412
359#ifdef CONFIG_DEBUG_FS 413#ifdef CONFIG_DEBUG_FS
@@ -380,6 +434,7 @@ void mmc_free_host(struct mmc_host *host)
380 spin_lock(&mmc_host_lock); 434 spin_lock(&mmc_host_lock);
381 idr_remove(&mmc_host_idr, host->index); 435 idr_remove(&mmc_host_idr, host->index);
382 spin_unlock(&mmc_host_lock); 436 spin_unlock(&mmc_host_lock);
437 wake_lock_destroy(&host->detect_wake_lock);
383 438
384 put_device(&host->class_dev); 439 put_device(&host->class_dev);
385} 440}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index fb8a5cd2e4a..08a7852ade4 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -14,27 +14,6 @@
14 14
15int mmc_register_host_class(void); 15int mmc_register_host_class(void);
16void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
17
18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_hold(struct mmc_host *host);
20void mmc_host_clk_release(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22
23#else
24static inline void mmc_host_clk_hold(struct mmc_host *host)
25{
26}
27
28static inline void mmc_host_clk_release(struct mmc_host *host)
29{
30}
31
32static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
33{
34 return host->ios.clock;
35}
36#endif
37
38void mmc_host_deeper_disable(struct work_struct *work); 17void mmc_host_deeper_disable(struct work_struct *work);
39 18
40#endif 19#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f6011802745..f1b510f3258 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -22,6 +22,16 @@
22#include "mmc_ops.h" 22#include "mmc_ops.h"
23#include "sd_ops.h" 23#include "sd_ops.h"
24 24
25#if defined(CONFIG_MIDAS_COMMON)
26#if defined(CONFIG_TARGET_LOCALE_KOR)
27/* For the check ext_csd register in KOR model */
28#define MMC_RETRY_READ_EXT_CSD
29#else
30/* For debugging about ext_csd register value */
31#define MMC_CHECK_EXT_CSD
32#endif
33#endif
34
25static const unsigned int tran_exp[] = { 35static const unsigned int tran_exp[] = {
26 10000, 100000, 1000000, 10000000, 36 10000, 100000, 1000000, 10000000,
27 0, 0, 0, 0 37 0, 0, 0, 0
@@ -41,7 +51,7 @@ static const unsigned int tacc_mant[] = {
41 35, 40, 45, 50, 55, 60, 70, 80, 51 35, 40, 45, 50, 55, 60, 70, 80,
42}; 52};
43 53
44#define UNSTUFF_BITS(resp,start,size) \ 54#define UNSTUFF_BITS(resp, start, size) \
45 ({ \ 55 ({ \
46 const int __size = size; \ 56 const int __size = size; \
47 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \ 57 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
@@ -170,9 +180,88 @@ static int mmc_decode_csd(struct mmc_card *card)
170 csd->erase_size <<= csd->write_blkbits - 9; 180 csd->erase_size <<= csd->write_blkbits - 9;
171 } 181 }
172 182
183 if (UNSTUFF_BITS(resp, 13, 1))
184 printk(KERN_ERR "%s: PERM_WRITE_PROTECT was set.\n",
185 mmc_hostname(card->host));
186
173 return 0; 187 return 0;
174} 188}
175 189
190#if defined(MMC_CHECK_EXT_CSD)
191/* For debugging about ext_csd register value */
192static u8 *ext_csd_backup;
193static void mmc_error_ext_csd(struct mmc_card *card, u8 *ext_csd,
194 int backup, unsigned int slice)
195{
196 int i = 0;
197 int err = 0;
198 unsigned int available_new = 0;
199 u8 *ext_csd_new;
200
201 if (backup) {
202 kfree(ext_csd_backup);
203
204 ext_csd_backup = kmalloc(512, GFP_KERNEL);
205 if (!ext_csd_backup) {
206 pr_err("%s: kmalloc is failed(512B).\n", __func__);
207 return;
208 }
209
210 memcpy(ext_csd_backup, ext_csd, 512);
211#if 0 /* Just checking */
212#define EXT_CSD_REV 192 /* RO */
213#define EXT_CSD_STRUCTURE 194 /* RO */
214#define EXT_CSD_CARD_TYPE 196 /* RO */
215#endif
216 pr_err("[TEST] eMMC check : %d, %d, %d.\n",
217 ext_csd_backup[EXT_CSD_REV],
218 ext_csd_backup[EXT_CSD_STRUCTURE],
219 ext_csd_backup[EXT_CSD_CARD_TYPE]);
220 } else {
221 ext_csd_new = kmalloc(512, GFP_KERNEL);
222 if (!ext_csd_new) {
223 pr_err("%s: ext_csd_new kmalloc is failed(512B).\n",
224 __func__);
225 } else {
226 err = mmc_send_ext_csd(card, ext_csd_new);
227 if (err)
228 pr_err("[TEST] Fail to get new EXT_CSD.\n");
229 else
230 available_new = 1;
231 }
232 pr_err("[TEST] %s: starting diff ext_csd.\n", __func__);
233 pr_err("[TEST] %s: error on slice %d: backup=%d, now=%d,"
234 "new=%d.\n",
235 __func__, slice,
236 ext_csd_backup[slice], ext_csd[slice],
237 available_new ? ext_csd_new[slice] : 0);
238 for (i = 0 ; i < 512 ; i++) {
239 if (available_new) {
240 if (ext_csd_backup[i] != ext_csd[i] ||
241 ext_csd_new[i] != ext_csd[i])
242 pr_err("%d : ext_csd_backup=%d,"
243 "ext_csd=%d,"
244 "ext_csd_new=%d.\n",
245 i,
246 ext_csd_backup[i],
247 ext_csd[i],
248 ext_csd_new[i]);
249 } else {
250 if (ext_csd_backup[i] != ext_csd[i])
251 pr_err("%d : ext_csd_backup=%d,"
252 "ext_csd=%d.\n",
253 i,
254 ext_csd_backup[i],
255 ext_csd[i]);
256 }
257 }
258 panic("eMMC's EXT_CSD error.\n");
259 }
260 return;
261
262}
263#endif
264
176/* 265/*
177 * Read extended CSD. 266 * Read extended CSD.
178 */ 267 */
@@ -231,6 +320,11 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
231 } else 320 } else
232 *new_ext_csd = ext_csd; 321 *new_ext_csd = ext_csd;
233 322
323#if defined(MMC_CHECK_EXT_CSD)
324/* For debugging about ext_csd register value */
325 mmc_error_ext_csd(card, ext_csd, 1, 0);
326#endif
327
234 return err; 328 return err;
235} 329}
236 330
@@ -254,15 +348,23 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
254 "version %d\n", mmc_hostname(card->host), 348 "version %d\n", mmc_hostname(card->host),
255 card->ext_csd.raw_ext_csd_structure); 349 card->ext_csd.raw_ext_csd_structure);
256 err = -EINVAL; 350 err = -EINVAL;
351#if defined(MMC_CHECK_EXT_CSD)
352 /* For debugging about ext_csd register value */
353 mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_STRUCTURE);
354#endif
257 goto out; 355 goto out;
258 } 356 }
259 } 357 }
260 358
261 card->ext_csd.rev = ext_csd[EXT_CSD_REV]; 359 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
262 if (card->ext_csd.rev > 5) { 360 if (card->ext_csd.rev > 6) {
263 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", 361 printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
264 mmc_hostname(card->host), card->ext_csd.rev); 362 mmc_hostname(card->host), card->ext_csd.rev);
265 err = -EINVAL; 363 err = -EINVAL;
364#if defined(MMC_CHECK_EXT_CSD)
365 /* For debugging about ext_csd register value */
366 mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_REV);
367#endif
266 goto out; 368 goto out;
267 } 369 }
268 370
@@ -283,6 +385,27 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
283 } 385 }
284 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; 386 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
285 switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { 387 switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
388 case EXT_CSD_CARD_TYPE_SDR_ALL:
389 case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
390 case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
391 case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
392 card->ext_csd.hs_max_dtr = 200000000;
393 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
394 break;
395 case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
396 case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
397 case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
398 case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
399 card->ext_csd.hs_max_dtr = 200000000;
400 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
401 break;
402 case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
403 case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
404 case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
405 case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
406 card->ext_csd.hs_max_dtr = 200000000;
407 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
408 break;
286 case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | 409 case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
287 EXT_CSD_CARD_TYPE_26: 410 EXT_CSD_CARD_TYPE_26:
288 card->ext_csd.hs_max_dtr = 52000000; 411 card->ext_csd.hs_max_dtr = 52000000;
@@ -305,10 +428,18 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
305 card->ext_csd.hs_max_dtr = 26000000; 428 card->ext_csd.hs_max_dtr = 26000000;
306 break; 429 break;
307 default: 430 default:
431#if defined(MMC_CHECK_EXT_CSD)
432 /* For debugging about ext_csd register value */
433 mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_CARD_TYPE);
434#endif
308 /* MMC v4 spec says this cannot happen */ 435 /* MMC v4 spec says this cannot happen */
309 printk(KERN_WARNING "%s: card is mmc v4 but doesn't " 436 printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
310 "support any high-speed modes.\n", 437 "support any high-speed modes.\n",
311 mmc_hostname(card->host)); 438 mmc_hostname(card->host));
439#if defined(MMC_RETRY_READ_EXT_CSD)
440 err = -EINVAL;
441 goto out;
442#endif
312 } 443 }
313 444
314 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; 445 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
@@ -319,6 +450,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
319 if (card->ext_csd.rev >= 3) { 450 if (card->ext_csd.rev >= 3) {
320 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; 451 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
321 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; 452 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
453 card->ext_csd.boot_part_prot = ext_csd[EXT_CSD_BOOT_CONFIG_PROT];
322 454
323 /* EXT_CSD value is in units of 10ms, but we store in ms */ 455 /* EXT_CSD value is in units of 10ms, but we store in ms */
324 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; 456 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
@@ -403,8 +535,28 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
403 ext_csd[EXT_CSD_TRIM_MULT]; 535 ext_csd[EXT_CSD_TRIM_MULT];
404 } 536 }
405 537
406 if (card->ext_csd.rev >= 5) 538 if (card->ext_csd.rev >= 5) {
539 /* enable discard feature if emmc is 4.41 */
540 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
541
542 /* check whether the eMMC card supports HPI */
543 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
544 card->ext_csd.hpi = 1;
545 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
546 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
547 else
548 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
549 /*
550 * Indicate the maximum timeout to close
551 * a command interrupted by HPI
552 */
553 card->ext_csd.out_of_int_time =
554 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
555 }
556
407 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; 557 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
558 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
559 }
408 560
409 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; 561 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
410 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 562 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
@@ -412,6 +564,27 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
412 else 564 else
413 card->erased_byte = 0x0; 565 card->erased_byte = 0x0;
414 566
567 /* eMMC v4.5 or later */
568 if (card->ext_csd.rev >= 6) {
569 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
570
571 card->ext_csd.generic_cmd6_time = 10 *
572 ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
573 card->ext_csd.power_off_longtime = 10 *
574 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
575
576 card->ext_csd.cache_size =
577 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
578 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
579 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
580 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
581
582 card->ext_csd.max_packed_writes =
583 ext_csd[EXT_CSD_MAX_PACKED_WRITES];
584 card->ext_csd.max_packed_reads =
585 ext_csd[EXT_CSD_MAX_PACKED_READS];
586 }
587
415out: 588out:
416 return err; 589 return err;
417} 590}
@@ -438,9 +611,6 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
438 goto out; 611 goto out;
439 } 612 }
440 613
441 if (bus_width == MMC_BUS_WIDTH_1)
442 goto out;
443
444 /* only compare read only fields */ 614 /* only compare read only fields */
445 err = (!(card->ext_csd.raw_partition_support == 615 err = (!(card->ext_csd.raw_partition_support ==
446 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && 616 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
@@ -532,6 +702,159 @@ static struct device_type mmc_type = {
532}; 702};
533 703
534/* 704/*
705 * Select the PowerClass for the current bus width
706 * If power class is defined for 4/8 bit bus in the
707 * extended CSD register, select it by executing the
708 * mmc_switch command.
709 */
710static int mmc_select_powerclass(struct mmc_card *card,
711 unsigned int bus_width, u8 *ext_csd)
712{
713 int err = 0;
714 unsigned int pwrclass_val;
715 unsigned int index = 0;
716 struct mmc_host *host;
717
718 BUG_ON(!card);
719
720 host = card->host;
721 BUG_ON(!host);
722
723 if (ext_csd == NULL)
724 return 0;
725
726 /* Power class selection is supported for versions >= 4.0 */
727 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
728 return 0;
729
730 /* Power class values are defined only for 4/8 bit bus */
731 if (bus_width == EXT_CSD_BUS_WIDTH_1)
732 return 0;
733
734 switch (1 << host->ios.vdd) {
735 case MMC_VDD_165_195:
736 if (host->ios.clock <= 26000000)
737 index = EXT_CSD_PWR_CL_26_195;
738 else if (host->ios.clock <= 52000000)
739 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
740 EXT_CSD_PWR_CL_52_195 :
741 EXT_CSD_PWR_CL_DDR_52_195;
742 else if (host->ios.clock <= 200000000)
743 index = EXT_CSD_PWR_CL_200_195;
744 break;
745 case MMC_VDD_32_33:
746 case MMC_VDD_33_34:
747 case MMC_VDD_34_35:
748 case MMC_VDD_35_36:
749 if (host->ios.clock <= 26000000)
750 index = EXT_CSD_PWR_CL_26_360;
751 else if (host->ios.clock <= 52000000)
752 index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
753 EXT_CSD_PWR_CL_52_360 :
754 EXT_CSD_PWR_CL_DDR_52_360;
755 else if (host->ios.clock <= 200000000)
756 index = EXT_CSD_PWR_CL_200_360;
757 break;
758 default:
759 pr_warning("%s: Voltage range not supported "
760 "for power class.\n", mmc_hostname(host));
761 return -EINVAL;
762 }
763
764 pwrclass_val = ext_csd[index];
765
766 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
767 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
768 EXT_CSD_PWR_CL_8BIT_SHIFT;
769 else
770 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
771 EXT_CSD_PWR_CL_4BIT_SHIFT;
772
773 /* If the power class is different from the default value */
774 if (pwrclass_val > 0) {
775 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
776 EXT_CSD_POWER_CLASS,
777 pwrclass_val,
778 card->ext_csd.generic_cmd6_time);
779 }
780
781 return err;
782}
783
784/*
785 * Selects the desired buswidth and switch to the HS200 mode
786 * if bus width set without error
787 */
788static int mmc_select_hs200(struct mmc_card *card)
789{
790 int idx, err = 0;
791 struct mmc_host *host;
792 static unsigned ext_csd_bits[] = {
793 EXT_CSD_BUS_WIDTH_4,
794 EXT_CSD_BUS_WIDTH_8,
795 };
796 static unsigned bus_widths[] = {
797 MMC_BUS_WIDTH_4,
798 MMC_BUS_WIDTH_8,
799 };
800
801 BUG_ON(!card);
802
803 host = card->host;
804
805 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
806 host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
807 if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
808 err = mmc_set_signal_voltage(host,
809 MMC_SIGNAL_VOLTAGE_180, 0);
810
811 /* If fails try again during next card power cycle */
812 if (err)
813 goto err;
814
815 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0;
816
817 /*
818 * Unlike SD, MMC cards dont have a configuration register to notify
819 * supported bus width. So bus test command should be run to identify
820 * the supported bus width or compare the ext csd values of current
821 * bus width and ext csd values of 1 bit mode read earlier.
822 */
823 for (; idx >= 0; idx--) {
824
825 /*
826 * Host is capable of 8bit transfer, then switch
827 * the device to work in 8bit transfer mode. If the
828 * mmc switch command returns error then switch to
829 * 4bit transfer mode. On success set the corresponding
830 * bus width on the host.
831 */
832 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
833 EXT_CSD_BUS_WIDTH,
834 ext_csd_bits[idx],
835 card->ext_csd.generic_cmd6_time);
836 if (err)
837 continue;
838
839 mmc_set_bus_width(card->host, bus_widths[idx]);
840
841 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
842 err = mmc_compare_ext_csds(card, bus_widths[idx]);
843 else
844 err = mmc_bus_test(card, bus_widths[idx]);
845 if (!err)
846 break;
847 }
848
849 /* switch to HS200 mode if bus width set successfully */
850 if (!err)
851 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
852 EXT_CSD_HS_TIMING, 2, 0);
853err:
854 return err;
855}
856
857/*
535 * Handle the detection and initialisation of a card. 858 * Handle the detection and initialisation of a card.
536 * 859 *
537 * In the case of a resume, "oldcard" will contain the card 860 * In the case of a resume, "oldcard" will contain the card
@@ -644,14 +967,28 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
644 /* 967 /*
645 * Fetch and process extended CSD. 968 * Fetch and process extended CSD.
646 */ 969 */
647 970#if defined(MMC_RETRY_READ_EXT_CSD)
971 {
972 int i = 0;
973 for (i = 0 ; i < 3 ; i++) {
974 err = mmc_get_ext_csd(card, &ext_csd);
975 if (err)
976 continue;
977 err = mmc_read_ext_csd(card, ext_csd);
978 if (!err)
979 break;
980 }
981 if (err)
982 goto free_card;
983 }
984#else
648 err = mmc_get_ext_csd(card, &ext_csd); 985 err = mmc_get_ext_csd(card, &ext_csd);
649 if (err) 986 if (err)
650 goto free_card; 987 goto free_card;
651 err = mmc_read_ext_csd(card, ext_csd); 988 err = mmc_read_ext_csd(card, ext_csd);
652 if (err) 989 if (err)
653 goto free_card; 990 goto free_card;
654 991#endif
655 /* If doing byte addressing, check if required to do sector 992 /* If doing byte addressing, check if required to do sector
656 * addressing. Handle the case of <2GB cards needing sector 993 * addressing. Handle the case of <2GB cards needing sector
657 * addressing. See section 8.1 JEDEC Standard JED84-A441; 994 * addressing. See section 8.1 JEDEC Standard JED84-A441;
@@ -670,7 +1007,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
670 */ 1007 */
671 if (card->ext_csd.enhanced_area_en) { 1008 if (card->ext_csd.enhanced_area_en) {
672 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1009 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
673 EXT_CSD_ERASE_GROUP_DEF, 1, 0); 1010 EXT_CSD_ERASE_GROUP_DEF, 1,
1011 card->ext_csd.generic_cmd6_time);
674 1012
675 if (err && err != -EBADMSG) 1013 if (err && err != -EBADMSG)
676 goto free_card; 1014 goto free_card;
@@ -708,12 +1046,53 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
708 } 1046 }
709 1047
710 /* 1048 /*
711 * Activate high speed (if supported) 1049 * Ensure eMMC boot config is protected.
712 */ 1050 */
713 if ((card->ext_csd.hs_max_dtr != 0) && 1051 if (!(card->ext_csd.boot_part_prot & (0x1<<4)) &&
714 (host->caps & MMC_CAP_MMC_HIGHSPEED)) { 1052 !(card->ext_csd.boot_part_prot & (0x1<<0))) {
1053 card->ext_csd.boot_part_prot |= (0x1<<0);
715 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1054 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
716 EXT_CSD_HS_TIMING, 1, 0); 1055 EXT_CSD_BOOT_CONFIG_PROT,
1056 card->ext_csd.boot_part_prot,
1057 card->ext_csd.part_time);
1058 if (err && err != -EBADMSG)
1059 goto free_card;
1060 }
1061
1062 /*
1063 * If the host supports the power_off_notify capability then
1064 * set the notification byte in the ext_csd register of device
1065 */
1066 if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
1067 (card->ext_csd.rev >= 6)) {
1068 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1069 EXT_CSD_POWER_OFF_NOTIFICATION,
1070 EXT_CSD_POWER_ON,
1071 card->ext_csd.generic_cmd6_time);
1072 if (err && err != -EBADMSG)
1073 goto free_card;
1074 }
1075
1076 if (!err && (host->caps2 & MMC_CAP2_POWEROFF_NOTIFY))
1077 /*
1078 * The err can be -EBADMSG or 0,
1079 * so check for success and update the flag
1080 */
1081 if (!err)
1082 card->poweroff_notify_state = MMC_POWERED_ON;
1083
1084 /*
1085 * Activate high speed (if supported)
1086 */
1087 if (card->ext_csd.hs_max_dtr != 0) {
1088 err = 0;
1089 if (card->ext_csd.hs_max_dtr > 52000000 &&
1090 host->caps2 & MMC_CAP2_HS200)
1091 err = mmc_select_hs200(card);
1092 else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
1093 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1094 EXT_CSD_HS_TIMING, 1, 0);
1095
717 if (err && err != -EBADMSG) 1096 if (err && err != -EBADMSG)
718 goto free_card; 1097 goto free_card;
719 1098
@@ -722,8 +1101,15 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
722 mmc_hostname(card->host)); 1101 mmc_hostname(card->host));
723 err = 0; 1102 err = 0;
724 } else { 1103 } else {
725 mmc_card_set_highspeed(card); 1104 if (card->ext_csd.hs_max_dtr > 52000000 &&
726 mmc_set_timing(card->host, MMC_TIMING_MMC_HS); 1105 host->caps2 & MMC_CAP2_HS200) {
1106 mmc_card_set_hs200(card);
1107 mmc_set_timing(card->host,
1108 MMC_TIMING_MMC_HS200);
1109 } else {
1110 mmc_card_set_highspeed(card);
1111 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1112 }
727 } 1113 }
728 } 1114 }
729 1115
@@ -732,7 +1118,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
732 */ 1118 */
733 max_dtr = (unsigned int)-1; 1119 max_dtr = (unsigned int)-1;
734 1120
735 if (mmc_card_highspeed(card)) { 1121 if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
736 if (max_dtr > card->ext_csd.hs_max_dtr) 1122 if (max_dtr > card->ext_csd.hs_max_dtr)
737 max_dtr = card->ext_csd.hs_max_dtr; 1123 max_dtr = card->ext_csd.hs_max_dtr;
738 } else if (max_dtr > card->csd.max_dtr) { 1124 } else if (max_dtr > card->csd.max_dtr) {
@@ -758,9 +1144,51 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
758 } 1144 }
759 1145
760 /* 1146 /*
1147 * Indicate HS200 SDR mode (if supported).
1148 */
1149 if (mmc_card_hs200(card)) {
1150 u32 ext_csd_bits;
1151 u32 bus_width = card->host->ios.bus_width;
1152
1153 /*
1154 * For devices supporting HS200 mode, the bus width has
1155 * to be set before executing the tuning function. If
1156 * set before tuning, then device will respond with CRC
1157 * errors for responses on CMD line. So for HS200 the
1158 * sequence will be
1159 * 1. set bus width 4bit / 8 bit (1 bit not supported)
1160 * 2. switch to HS200 mode
1161 * 3. set the clock to > 52Mhz <=200MHz and
1162 * 4. execute tuning for HS200
1163 */
1164 if ((host->caps2 & MMC_CAP2_HS200) &&
1165 card->host->ops->execute_tuning) {
1166 mmc_host_clk_hold(card->host);
1167 err = card->host->ops->execute_tuning(card->host,
1168 MMC_SEND_TUNING_BLOCK_HS200);
1169 mmc_host_clk_release(card->host);
1170 }
1171 if (err) {
1172 pr_warning("%s: tuning execution failed\n",
1173 mmc_hostname(card->host));
1174 goto err;
1175 }
1176
1177 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1178 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
1179 err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
1180 if (err) {
1181 pr_err("%s: power class selection to bus width %d failed\n",
1182 mmc_hostname(card->host), 1 << bus_width);
1183 goto err;
1184 }
1185 }
1186
1187 /*
761 * Activate wide bus and DDR (if supported). 1188 * Activate wide bus and DDR (if supported).
762 */ 1189 */
763 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 1190 if (!mmc_card_hs200(card) &&
1191 (card->csd.mmca_vsn >= CSD_SPEC_VER_3) &&
764 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 1192 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
765 static unsigned ext_csd_bits[][2] = { 1193 static unsigned ext_csd_bits[][2] = {
766 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, 1194 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
@@ -782,10 +1210,18 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
782 bus_width = bus_widths[idx]; 1210 bus_width = bus_widths[idx];
783 if (bus_width == MMC_BUS_WIDTH_1) 1211 if (bus_width == MMC_BUS_WIDTH_1)
784 ddr = 0; /* no DDR for 1-bit width */ 1212 ddr = 0; /* no DDR for 1-bit width */
1213 err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
1214 ext_csd);
1215 if (err)
1216 pr_err("%s: power class selection to "
1217 "bus width %d failed\n",
1218 mmc_hostname(card->host),
1219 1 << bus_width);
1220
785 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1221 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
786 EXT_CSD_BUS_WIDTH, 1222 EXT_CSD_BUS_WIDTH,
787 ext_csd_bits[idx][0], 1223 ext_csd_bits[idx][0],
788 0); 1224 card->ext_csd.generic_cmd6_time);
789 if (!err) { 1225 if (!err) {
790 mmc_set_bus_width(card->host, bus_width); 1226 mmc_set_bus_width(card->host, bus_width);
791 1227
@@ -805,10 +1241,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
805 } 1241 }
806 1242
807 if (!err && ddr) { 1243 if (!err && ddr) {
1244 /* to inform to mshci driver
1245 that it is working as DDR mode */
1246 (host->ios).ddr = (unsigned char)ddr;
1247 err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
1248 ext_csd);
1249 if (err)
1250 pr_err("%s: power class selection to "
1251 "bus width %d ddr %d failed\n",
1252 mmc_hostname(card->host),
1253 1 << bus_width, ddr);
1254
808 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1255 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
809 EXT_CSD_BUS_WIDTH, 1256 EXT_CSD_BUS_WIDTH,
810 ext_csd_bits[idx][1], 1257 ext_csd_bits[idx][1],
811 0); 1258 card->ext_csd.generic_cmd6_time);
812 } 1259 }
813 if (err) { 1260 if (err) {
814 printk(KERN_WARNING "%s: switch to bus width %d ddr %d " 1261 printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
@@ -842,6 +1289,68 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
842 } 1289 }
843 } 1290 }
844 1291
1292 /*
1293 * Enable HPI feature (if supported)
1294 */
1295 if (card->ext_csd.hpi) {
1296 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1297 EXT_CSD_HPI_MGMT, 1,
1298 card->ext_csd.generic_cmd6_time);
1299 if (err && err != -EBADMSG)
1300 goto free_card;
1301 if (err) {
1302 pr_warning("%s: Enabling HPI failed\n",
1303 mmc_hostname(card->host));
1304 err = 0;
1305 } else
1306 card->ext_csd.hpi_en = 1;
1307 }
1308
1309 /*
1310 * If cache size is higher than 0, this indicates
1311 * the existence of cache and it can be turned on.
1312 */
1313 if ((host->caps2 & MMC_CAP2_CACHE_CTRL) &&
1314 card->ext_csd.cache_size > 0) {
1315 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1316 EXT_CSD_CACHE_CTRL, 1,
1317 card->ext_csd.generic_cmd6_time);
1318 if (err && err != -EBADMSG)
1319 goto free_card;
1320
1321 /*
1322 * Only if no error, cache is turned on successfully.
1323 */
1324 if (err) {
1325 pr_warning("%s: Cache is supported, "
1326 "but failed to turn on (%d)\n",
1327 mmc_hostname(card->host), err);
1328 card->ext_csd.cache_ctrl = 0;
1329 err = 0;
1330 } else {
1331 card->ext_csd.cache_ctrl = 1;
1332 }
1333 }
1334
1335 if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
1336 (card->ext_csd.max_packed_writes > 0) &&
1337 (card->ext_csd.max_packed_reads > 0)) {
1338 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1339 EXT_CSD_EXP_EVENTS_CTRL,
1340 EXT_CSD_PACKED_EVENT_EN,
1341 card->ext_csd.generic_cmd6_time);
1342 if (err && err != -EBADMSG)
1343 goto free_card;
1344 if (err) {
1345 pr_warning("%s: Enabling packed event failed\n",
1346 mmc_hostname(card->host));
1347 card->ext_csd.packed_event_en = 0;
1348 err = 0;
1349 } else {
1350 card->ext_csd.packed_event_en = 1;
1351 }
1352 }
1353
845 if (!oldcard) 1354 if (!oldcard)
846 host->card = card; 1355 host->card = card;
847 1356
@@ -870,6 +1379,14 @@ static void mmc_remove(struct mmc_host *host)
870} 1379}
871 1380
872/* 1381/*
1382 * Card detection - card is alive.
1383 */
1384static int mmc_alive(struct mmc_host *host)
1385{
1386 return mmc_send_status(host->card, NULL);
1387}
1388
1389/*
873 * Card detection callback from host. 1390 * Card detection callback from host.
874 */ 1391 */
875static void mmc_detect(struct mmc_host *host) 1392static void mmc_detect(struct mmc_host *host)
@@ -884,7 +1401,7 @@ static void mmc_detect(struct mmc_host *host)
884 /* 1401 /*
885 * Just check if our card has been removed. 1402 * Just check if our card has been removed.
886 */ 1403 */
887 err = mmc_send_status(host->card, NULL); 1404 err = _mmc_detect_card_removed(host);
888 1405
889 mmc_release_host(host); 1406 mmc_release_host(host);
890 1407
@@ -985,6 +1502,7 @@ static const struct mmc_bus_ops mmc_ops = {
985 .suspend = NULL, 1502 .suspend = NULL,
986 .resume = NULL, 1503 .resume = NULL,
987 .power_restore = mmc_power_restore, 1504 .power_restore = mmc_power_restore,
1505 .alive = mmc_alive,
988}; 1506};
989 1507
990static const struct mmc_bus_ops mmc_ops_unsafe = { 1508static const struct mmc_bus_ops mmc_ops_unsafe = {
@@ -995,6 +1513,7 @@ static const struct mmc_bus_ops mmc_ops_unsafe = {
995 .suspend = mmc_suspend, 1513 .suspend = mmc_suspend,
996 .resume = mmc_resume, 1514 .resume = mmc_resume,
997 .power_restore = mmc_power_restore, 1515 .power_restore = mmc_power_restore,
1516 .alive = mmc_alive,
998}; 1517};
999 1518
1000static void mmc_attach_bus_ops(struct mmc_host *host) 1519static void mmc_attach_bus_ops(struct mmc_host *host)
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 845ce7c533b..15b64318f2d 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -17,6 +17,8 @@
17#include <linux/mmc/card.h> 17#include <linux/mmc/card.h>
18#include <linux/mmc/mmc.h> 18#include <linux/mmc/mmc.h>
19 19
20#include <plat/cpu.h>
21
20#include "core.h" 22#include "core.h"
21#include "mmc_ops.h" 23#include "mmc_ops.h"
22 24
@@ -334,6 +336,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
334 return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, 336 return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
335 ext_csd, 512); 337 ext_csd, 512);
336} 338}
339EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
337 340
338int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 341int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
339{ 342{
@@ -400,6 +403,10 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
400 403
401 /* Must check status to be sure of no errors */ 404 /* Must check status to be sure of no errors */
402 do { 405 do {
406#if defined(CONFIG_MACH_SMDKC210) || defined(CONFIG_MACH_SMDKV310)
407 /* HACK: in case of smdkc210, smdkv310 has problem at inand */
408 mmc_delay(3);
409#endif
403 err = mmc_send_status(card, &status); 410 err = mmc_send_status(card, &status);
404 if (err) 411 if (err)
405 return err; 412 return err;
@@ -547,3 +554,38 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
547 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); 554 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
548 return err; 555 return err;
549} 556}
557
558int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
559{
560 struct mmc_command cmd = {0};
561 unsigned int opcode;
562 int err;
563
564 if (!card->ext_csd.hpi) {
565 pr_warning("%s: Card didn't support HPI command\n",
566 mmc_hostname(card->host));
567 return -EINVAL;
568 }
569
570 opcode = card->ext_csd.hpi_cmd;
571 if (opcode == MMC_STOP_TRANSMISSION)
572 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
573 else if (opcode == MMC_SEND_STATUS)
574 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
575
576 cmd.opcode = opcode;
577 cmd.arg = card->rca << 16 | 1;
578 cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time;
579
580 err = mmc_wait_for_cmd(card->host, &cmd, 0);
581 if (err) {
582 pr_warn("%s: error %d interrupting operation. "
583 "HPI command response %#x\n", mmc_hostname(card->host),
584 err, cmd.resp[0]);
585 return err;
586 }
587 if (status)
588 *status = cmd.resp[0];
589
590 return 0;
591}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 9276946fa5b..3dd8941c298 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
26int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 26int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
27int mmc_card_sleepawake(struct mmc_host *host, int sleep); 27int mmc_card_sleepawake(struct mmc_host *host, int sleep);
28int mmc_bus_test(struct mmc_card *card, u8 bus_width); 28int mmc_bus_test(struct mmc_card *card, u8 bus_width);
29int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
29 30
30#endif 31#endif
31 32
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 3a596217029..aca1c3c6d60 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -21,6 +21,22 @@
21#define SDIO_DEVICE_ID_TI_WL1271 0x4076 21#define SDIO_DEVICE_ID_TI_WL1271 0x4076
22#endif 22#endif
23 23
24#ifndef SDIO_VENDOR_ID_BRCM
25#define SDIO_VENDOR_ID_BRCM 0x02D0
26#endif
27
28#ifndef SDIO_DEVICE_ID_BRCM_BCM4330
29#define SDIO_DEVICE_ID_BRCM_BCM4330 0x4330
30#endif
31
32#ifndef SDIO_DEVICE_ID_BRCM_BCM4334
33#define SDIO_DEVICE_ID_BRCM_BCM4334 0x4334
34#endif
35
36#ifndef SDIO_DEVICE_ID_BRCM_BCM43241
37#define SDIO_DEVICE_ID_BRCM_BCM43241 0x4324
38#endif
39
24/* 40/*
25 * This hook just adds a quirk for all sdio devices 41 * This hook just adds a quirk for all sdio devices
26 */ 42 */
@@ -46,6 +62,15 @@ static const struct mmc_fixup mmc_fixup_methods[] = {
46 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, 62 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
47 add_quirk, MMC_QUIRK_DISABLE_CD), 63 add_quirk, MMC_QUIRK_DISABLE_CD),
48 64
65 SDIO_FIXUP(SDIO_VENDOR_ID_BRCM, SDIO_DEVICE_ID_BRCM_BCM4330,
66 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
67
68 SDIO_FIXUP(SDIO_VENDOR_ID_BRCM, SDIO_DEVICE_ID_BRCM_BCM4334,
69 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
70
71 SDIO_FIXUP(SDIO_VENDOR_ID_BRCM, SDIO_DEVICE_ID_BRCM_BCM43241,
72 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
73
49 END_FIXUP 74 END_FIXUP
50}; 75};
51 76
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index bd8805c9e8a..4586eaa762c 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -306,6 +306,9 @@ static int mmc_read_switch(struct mmc_card *card)
306 goto out; 306 goto out;
307 } 307 }
308 308
309 if (status[13] & 0x02)
310 card->sw_caps.hs_max_dtr = 50000000;
311
309 if (card->scr.sda_spec3) { 312 if (card->scr.sda_spec3) {
310 card->sw_caps.sd3_bus_mode = status[13]; 313 card->sw_caps.sd3_bus_mode = status[13];
311 314
@@ -348,9 +351,6 @@ static int mmc_read_switch(struct mmc_card *card)
348 } 351 }
349 352
350 card->sw_caps.sd3_curr_limit = status[7]; 353 card->sw_caps.sd3_curr_limit = status[7];
351 } else {
352 if (status[13] & 0x02)
353 card->sw_caps.hs_max_dtr = 50000000;
354 } 354 }
355 355
356out: 356out:
@@ -409,52 +409,64 @@ out:
409 409
410static int sd_select_driver_type(struct mmc_card *card, u8 *status) 410static int sd_select_driver_type(struct mmc_card *card, u8 *status)
411{ 411{
412 int host_drv_type = 0, card_drv_type = 0; 412 int host_drv_type = SD_DRIVER_TYPE_B;
413 int card_drv_type = SD_DRIVER_TYPE_B;
414 int drive_strength;
413 int err; 415 int err;
414 416
415 /* 417 /*
416 * If the host doesn't support any of the Driver Types A,C or D, 418 * If the host doesn't support any of the Driver Types A,C or D,
417 * default Driver Type B is used. 419 * or there is no board specific handler then default Driver
420 * Type B is used.
418 */ 421 */
419 if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C 422 if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
420 | MMC_CAP_DRIVER_TYPE_D))) 423 | MMC_CAP_DRIVER_TYPE_D)))
421 return 0; 424 return 0;
422 425
423 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) { 426 if (!card->host->ops->select_drive_strength)
424 host_drv_type = MMC_SET_DRIVER_TYPE_A; 427 return 0;
425 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) 428
426 card_drv_type = MMC_SET_DRIVER_TYPE_A; 429 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
427 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B) 430 host_drv_type |= SD_DRIVER_TYPE_A;
428 card_drv_type = MMC_SET_DRIVER_TYPE_B; 431
429 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) 432 if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
430 card_drv_type = MMC_SET_DRIVER_TYPE_C; 433 host_drv_type |= SD_DRIVER_TYPE_C;
431 } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) { 434
432 host_drv_type = MMC_SET_DRIVER_TYPE_C; 435 if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
433 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) 436 host_drv_type |= SD_DRIVER_TYPE_D;
434 card_drv_type = MMC_SET_DRIVER_TYPE_C; 437
435 } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) { 438 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
436 /* 439 card_drv_type |= SD_DRIVER_TYPE_A;
437 * If we are here, that means only the default driver type 440
438 * B is supported by the host. 441 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
439 */ 442 card_drv_type |= SD_DRIVER_TYPE_C;
440 host_drv_type = MMC_SET_DRIVER_TYPE_B; 443
441 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B) 444 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
442 card_drv_type = MMC_SET_DRIVER_TYPE_B; 445 card_drv_type |= SD_DRIVER_TYPE_D;
443 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
444 card_drv_type = MMC_SET_DRIVER_TYPE_C;
445 }
446 446
447 err = mmc_sd_switch(card, 1, 2, card_drv_type, status); 447 /*
448 * The drive strength that the hardware can support
449 * depends on the board design. Pass the appropriate
450 * information and let the hardware specific code
451 * return what is possible given the options
452 */
453 mmc_host_clk_hold(card->host);
454 drive_strength = card->host->ops->select_drive_strength(
455 card->sw_caps.uhs_max_dtr,
456 host_drv_type, card_drv_type);
457 mmc_host_clk_release(card->host);
458
459 err = mmc_sd_switch(card, 1, 2, drive_strength, status);
448 if (err) 460 if (err)
449 return err; 461 return err;
450 462
451 if ((status[15] & 0xF) != card_drv_type) { 463 if ((status[15] & 0xF) != drive_strength) {
452 printk(KERN_WARNING "%s: Problem setting driver strength!\n", 464 printk(KERN_WARNING "%s: Problem setting drive strength!\n",
453 mmc_hostname(card->host)); 465 mmc_hostname(card->host));
454 return 0; 466 return 0;
455 } 467 }
456 468
457 mmc_set_driver_type(card->host, host_drv_type); 469 mmc_set_driver_type(card->host, drive_strength);
458 470
459 return 0; 471 return 0;
460} 472}
@@ -624,8 +636,12 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
624 goto out; 636 goto out;
625 637
626 /* SPI mode doesn't define CMD19 */ 638 /* SPI mode doesn't define CMD19 */
627 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) 639 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) {
628 err = card->host->ops->execute_tuning(card->host); 640 mmc_host_clk_hold(card->host);
641 err = card->host->ops->execute_tuning(card->host,
642 MMC_SEND_TUNING_BLOCK);
643 mmc_host_clk_release(card->host);
644 }
629 645
630out: 646out:
631 kfree(status); 647 kfree(status);
@@ -764,6 +780,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
764 bool reinit) 780 bool reinit)
765{ 781{
766 int err; 782 int err;
783#ifdef CONFIG_MMC_PARANOID_SD_INIT
784 int retries;
785#endif
767 786
768 if (!reinit) { 787 if (!reinit) {
769 /* 788 /*
@@ -790,7 +809,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
790 /* 809 /*
791 * Fetch switch information from card. 810 * Fetch switch information from card.
792 */ 811 */
812#ifdef CONFIG_MMC_PARANOID_SD_INIT
813 for (retries = 1; retries <= 3; retries++) {
814 err = mmc_read_switch(card);
815 if (!err) {
816 if (retries > 1) {
817 printk(KERN_WARNING
818 "%s: recovered\n",
819 mmc_hostname(host));
820 }
821 break;
822 } else {
823 printk(KERN_WARNING
824 "%s: read switch failed (attempt %d)\n",
825 mmc_hostname(host), retries);
826 }
827 }
828#else
793 err = mmc_read_switch(card); 829 err = mmc_read_switch(card);
830#endif
831
794 if (err) 832 if (err)
795 return err; 833 return err;
796 } 834 }
@@ -813,8 +851,11 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
813 if (!reinit) { 851 if (!reinit) {
814 int ro = -1; 852 int ro = -1;
815 853
816 if (host->ops->get_ro) 854 if (host->ops->get_ro) {
855 mmc_host_clk_hold(card->host);
817 ro = host->ops->get_ro(host); 856 ro = host->ops->get_ro(host);
857 mmc_host_clk_release(card->host);
858 }
818 859
819 if (ro < 0) { 860 if (ro < 0) {
820 printk(KERN_WARNING "%s: host does not " 861 printk(KERN_WARNING "%s: host does not "
@@ -926,14 +967,17 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
926 goto free_card; 967 goto free_card;
927 968
928 /* Card is an ultra-high-speed card */ 969 /* Card is an ultra-high-speed card */
929 mmc_sd_card_set_uhs(card); 970 mmc_card_set_uhs(card);
930 971
931 /* 972 /*
932 * Since initialization is now complete, enable preset 973 * Since initialization is now complete, enable preset
933 * value registers for UHS-I cards. 974 * value registers for UHS-I cards.
934 */ 975 */
935 if (host->ops->enable_preset_value) 976 if (host->ops->enable_preset_value) {
977 mmc_host_clk_hold(card->host);
936 host->ops->enable_preset_value(host, true); 978 host->ops->enable_preset_value(host, true);
979 mmc_host_clk_release(card->host);
980 }
937 } else { 981 } else {
938 /* 982 /*
939 * Attempt to change to high-speed (if supported) 983 * Attempt to change to high-speed (if supported)
@@ -985,22 +1029,48 @@ static void mmc_sd_remove(struct mmc_host *host)
985} 1029}
986 1030
987/* 1031/*
1032 * Card detection - card is alive.
1033 */
1034static int mmc_sd_alive(struct mmc_host *host)
1035{
1036 return mmc_send_status(host->card, NULL);
1037}
1038
1039/*
988 * Card detection callback from host. 1040 * Card detection callback from host.
989 */ 1041 */
990static void mmc_sd_detect(struct mmc_host *host) 1042static void mmc_sd_detect(struct mmc_host *host)
991{ 1043{
992 int err; 1044 int err = 0;
1045#ifdef CONFIG_MMC_PARANOID_SD_INIT
1046 int retries = 5;
1047#endif
993 1048
994 BUG_ON(!host); 1049 BUG_ON(!host);
995 BUG_ON(!host->card); 1050 BUG_ON(!host->card);
996 1051
997 mmc_claim_host(host); 1052 mmc_claim_host(host);
998 1053
999 /* 1054 /*
1000 * Just check if our card has been removed. 1055 * Just check if our card has been removed.
1001 */ 1056 */
1002 err = mmc_send_status(host->card, NULL); 1057#ifdef CONFIG_MMC_PARANOID_SD_INIT
1003 1058 while(retries) {
1059 err = _mmc_detect_card_removed(host);
1060 if (err) {
1061 retries--;
1062 udelay(5);
1063 continue;
1064 }
1065 break;
1066 }
1067 if (!retries) {
1068 printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
1069 __func__, mmc_hostname(host), err);
1070 }
1071#else
1072 err = _mmc_detect_card_removed(host);
1073#endif
1004 mmc_release_host(host); 1074 mmc_release_host(host);
1005 1075
1006 if (err) { 1076 if (err) {
@@ -1039,12 +1109,31 @@ static int mmc_sd_suspend(struct mmc_host *host)
1039static int mmc_sd_resume(struct mmc_host *host) 1109static int mmc_sd_resume(struct mmc_host *host)
1040{ 1110{
1041 int err; 1111 int err;
1112#ifdef CONFIG_MMC_PARANOID_SD_INIT
1113 int retries;
1114#endif
1042 1115
1043 BUG_ON(!host); 1116 BUG_ON(!host);
1044 BUG_ON(!host->card); 1117 BUG_ON(!host->card);
1045 1118
1046 mmc_claim_host(host); 1119 mmc_claim_host(host);
1120#ifdef CONFIG_MMC_PARANOID_SD_INIT
1121 retries = 5;
1122 while (retries) {
1123 err = mmc_sd_init_card(host, host->ocr, host->card);
1124
1125 if (err) {
1126 printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
1127 mmc_hostname(host), err, retries);
1128 mdelay(5);
1129 retries--;
1130 continue;
1131 }
1132 break;
1133 }
1134#else
1047 err = mmc_sd_init_card(host, host->ocr, host->card); 1135 err = mmc_sd_init_card(host, host->ocr, host->card);
1136#endif
1048 mmc_release_host(host); 1137 mmc_release_host(host);
1049 1138
1050 return err; 1139 return err;
@@ -1068,6 +1157,7 @@ static const struct mmc_bus_ops mmc_sd_ops = {
1068 .suspend = NULL, 1157 .suspend = NULL,
1069 .resume = NULL, 1158 .resume = NULL,
1070 .power_restore = mmc_sd_power_restore, 1159 .power_restore = mmc_sd_power_restore,
1160 .alive = mmc_sd_alive,
1071}; 1161};
1072 1162
1073static const struct mmc_bus_ops mmc_sd_ops_unsafe = { 1163static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
@@ -1076,6 +1166,7 @@ static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
1076 .suspend = mmc_sd_suspend, 1166 .suspend = mmc_sd_suspend,
1077 .resume = mmc_sd_resume, 1167 .resume = mmc_sd_resume,
1078 .power_restore = mmc_sd_power_restore, 1168 .power_restore = mmc_sd_power_restore,
1169 .alive = mmc_sd_alive,
1079}; 1170};
1080 1171
1081static void mmc_sd_attach_bus_ops(struct mmc_host *host) 1172static void mmc_sd_attach_bus_ops(struct mmc_host *host)
@@ -1096,6 +1187,9 @@ int mmc_attach_sd(struct mmc_host *host)
1096{ 1187{
1097 int err; 1188 int err;
1098 u32 ocr; 1189 u32 ocr;
1190#ifdef CONFIG_MMC_PARANOID_SD_INIT
1191 int retries;
1192#endif
1099 1193
1100 BUG_ON(!host); 1194 BUG_ON(!host);
1101 WARN_ON(!host->claimed); 1195 WARN_ON(!host->claimed);
@@ -1103,15 +1197,18 @@ int mmc_attach_sd(struct mmc_host *host)
1103 /* Make sure we are at 3.3V signalling voltage */ 1197 /* Make sure we are at 3.3V signalling voltage */
1104 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false); 1198 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
1105 if (err) 1199 if (err)
1106 return err; 1200 goto ret_err;
1107 1201
1108 /* Disable preset value enable if already set since last time */ 1202 /* Disable preset value enable if already set since last time */
1109 if (host->ops->enable_preset_value) 1203 if (host->ops->enable_preset_value) {
1204 mmc_host_clk_hold(host);
1110 host->ops->enable_preset_value(host, false); 1205 host->ops->enable_preset_value(host, false);
1206 mmc_host_clk_release(host);
1207 }
1111 1208
1112 err = mmc_send_app_op_cond(host, 0, &ocr); 1209 err = mmc_send_app_op_cond(host, 0, &ocr);
1113 if (err) 1210 if (err)
1114 return err; 1211 goto ret_err;
1115 1212
1116 mmc_sd_attach_bus_ops(host); 1213 mmc_sd_attach_bus_ops(host);
1117 if (host->ocr_avail_sd) 1214 if (host->ocr_avail_sd)
@@ -1160,9 +1257,27 @@ int mmc_attach_sd(struct mmc_host *host)
1160 /* 1257 /*
1161 * Detect and init the card. 1258 * Detect and init the card.
1162 */ 1259 */
1260#ifdef CONFIG_MMC_PARANOID_SD_INIT
1261 retries = 5;
1262 while (retries) {
1263 err = mmc_sd_init_card(host, host->ocr, NULL);
1264 if (err) {
1265 retries--;
1266 continue;
1267 }
1268 break;
1269 }
1270
1271 if (!retries) {
1272 printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
1273 mmc_hostname(host), err);
1274 goto err;
1275 }
1276#else
1163 err = mmc_sd_init_card(host, host->ocr, NULL); 1277 err = mmc_sd_init_card(host, host->ocr, NULL);
1164 if (err) 1278 if (err)
1165 goto err; 1279 goto err;
1280#endif
1166 1281
1167 mmc_release_host(host); 1282 mmc_release_host(host);
1168 err = mmc_add_card(host->card); 1283 err = mmc_add_card(host->card);
@@ -1170,6 +1285,8 @@ int mmc_attach_sd(struct mmc_host *host)
1170 if (err) 1285 if (err)
1171 goto remove_card; 1286 goto remove_card;
1172 1287
1288 mmc_host_sd_set_init_stat(host);
1289 mmc_host_sd_clear_prev_stat(host);
1173 return 0; 1290 return 0;
1174 1291
1175remove_card: 1292remove_card:
@@ -1182,6 +1299,9 @@ err:
1182 1299
1183 printk(KERN_ERR "%s: error %d whilst initialising SD card\n", 1300 printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
1184 mmc_hostname(host), err); 1301 mmc_hostname(host), err);
1302ret_err:
1303 mmc_host_sd_clear_init_stat(host);
1304 mmc_host_sd_set_prev_stat(host);
1185 1305
1186 return err; 1306 return err;
1187} 1307}
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 9b18b541659..ff1850eeacf 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/mmc/host.h> 15#include <linux/mmc/host.h>
16#include <linux/mmc/card.h> 16#include <linux/mmc/card.h>
17#include <linux/mmc/mmc.h>
17#include <linux/mmc/sdio.h> 18#include <linux/mmc/sdio.h>
18#include <linux/mmc/sdio_func.h> 19#include <linux/mmc/sdio_func.h>
19#include <linux/mmc/sdio_ids.h> 20#include <linux/mmc/sdio_ids.h>
@@ -27,6 +28,10 @@
27#include "sdio_ops.h" 28#include "sdio_ops.h"
28#include "sdio_cis.h" 29#include "sdio_cis.h"
29 30
31#ifdef CONFIG_MMC_EMBEDDED_SDIO
32#include <linux/mmc/sdio_ids.h>
33#endif
34
30static int sdio_read_fbr(struct sdio_func *func) 35static int sdio_read_fbr(struct sdio_func *func)
31{ 36{
32 int ret; 37 int ret;
@@ -97,11 +102,13 @@ fail:
97 return ret; 102 return ret;
98} 103}
99 104
100static int sdio_read_cccr(struct mmc_card *card) 105static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
101{ 106{
102 int ret; 107 int ret;
103 int cccr_vsn; 108 int cccr_vsn;
109 int uhs = ocr & R4_18V_PRESENT;
104 unsigned char data; 110 unsigned char data;
111 unsigned char speed;
105 112
106 memset(&card->cccr, 0, sizeof(struct sdio_cccr)); 113 memset(&card->cccr, 0, sizeof(struct sdio_cccr));
107 114
@@ -111,7 +118,7 @@ static int sdio_read_cccr(struct mmc_card *card)
111 118
112 cccr_vsn = data & 0x0f; 119 cccr_vsn = data & 0x0f;
113 120
114 if (cccr_vsn > SDIO_CCCR_REV_1_20) { 121 if (cccr_vsn > SDIO_CCCR_REV_3_00) {
115 printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n", 122 printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n",
116 mmc_hostname(card->host), cccr_vsn); 123 mmc_hostname(card->host), cccr_vsn);
117 return -EINVAL; 124 return -EINVAL;
@@ -140,12 +147,60 @@ static int sdio_read_cccr(struct mmc_card *card)
140 } 147 }
141 148
142 if (cccr_vsn >= SDIO_CCCR_REV_1_20) { 149 if (cccr_vsn >= SDIO_CCCR_REV_1_20) {
143 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &data); 150 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
144 if (ret) 151 if (ret)
145 goto out; 152 goto out;
146 153
147 if (data & SDIO_SPEED_SHS) 154 card->scr.sda_spec3 = 0;
155 card->sw_caps.sd3_bus_mode = 0;
156 card->sw_caps.sd3_drv_type = 0;
157 if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) {
158 card->scr.sda_spec3 = 1;
159 ret = mmc_io_rw_direct(card, 0, 0,
160 SDIO_CCCR_UHS, 0, &data);
161 if (ret)
162 goto out;
163
164 if (card->host->caps &
165 (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
166 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
167 MMC_CAP_UHS_DDR50)) {
168 if (data & SDIO_UHS_DDR50)
169 card->sw_caps.sd3_bus_mode
170 |= SD_MODE_UHS_DDR50;
171
172 if (data & SDIO_UHS_SDR50)
173 card->sw_caps.sd3_bus_mode
174 |= SD_MODE_UHS_SDR50;
175
176 if (data & SDIO_UHS_SDR104)
177 card->sw_caps.sd3_bus_mode
178 |= SD_MODE_UHS_SDR104;
179 }
180
181 ret = mmc_io_rw_direct(card, 0, 0,
182 SDIO_CCCR_DRIVE_STRENGTH, 0, &data);
183 if (ret)
184 goto out;
185
186 if (data & SDIO_DRIVE_SDTA)
187 card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A;
188 if (data & SDIO_DRIVE_SDTC)
189 card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
190 if (data & SDIO_DRIVE_SDTD)
191 card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
192 }
193
194 /* if no uhs mode ensure we check for high speed */
195 if (!card->sw_caps.sd3_bus_mode) {
196 if (speed & SDIO_SPEED_SHS) {
148 card->cccr.high_speed = 1; 197 card->cccr.high_speed = 1;
198 card->sw_caps.hs_max_dtr = 50000000;
199 } else {
200 card->cccr.high_speed = 0;
201 card->sw_caps.hs_max_dtr = 25000000;
202 }
203 }
149 } 204 }
150 205
151out: 206out:
@@ -327,6 +382,194 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
327 return max_dtr; 382 return max_dtr;
328} 383}
329 384
385static unsigned char host_drive_to_sdio_drive(int host_strength)
386{
387 switch (host_strength) {
388 case MMC_SET_DRIVER_TYPE_A:
389 return SDIO_DTSx_SET_TYPE_A;
390 case MMC_SET_DRIVER_TYPE_B:
391 return SDIO_DTSx_SET_TYPE_B;
392 case MMC_SET_DRIVER_TYPE_C:
393 return SDIO_DTSx_SET_TYPE_C;
394 case MMC_SET_DRIVER_TYPE_D:
395 return SDIO_DTSx_SET_TYPE_D;
396 default:
397 return SDIO_DTSx_SET_TYPE_B;
398 }
399}
400
401static void sdio_select_driver_type(struct mmc_card *card)
402{
403 int host_drv_type = SD_DRIVER_TYPE_B;
404 int card_drv_type = SD_DRIVER_TYPE_B;
405 int drive_strength;
406 unsigned char card_strength;
407 int err;
408
409 /*
410 * If the host doesn't support any of the Driver Types A,C or D,
411 * or there is no board specific handler then default Driver
412 * Type B is used.
413 */
414 if (!(card->host->caps &
415 (MMC_CAP_DRIVER_TYPE_A |
416 MMC_CAP_DRIVER_TYPE_C |
417 MMC_CAP_DRIVER_TYPE_D)))
418 return;
419
420 if (!card->host->ops->select_drive_strength)
421 return;
422
423 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
424 host_drv_type |= SD_DRIVER_TYPE_A;
425
426 if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
427 host_drv_type |= SD_DRIVER_TYPE_C;
428
429 if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
430 host_drv_type |= SD_DRIVER_TYPE_D;
431
432 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
433 card_drv_type |= SD_DRIVER_TYPE_A;
434
435 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
436 card_drv_type |= SD_DRIVER_TYPE_C;
437
438 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
439 card_drv_type |= SD_DRIVER_TYPE_D;
440
441 /*
442 * The drive strength that the hardware can support
443 * depends on the board design. Pass the appropriate
444 * information and let the hardware specific code
445 * return what is possible given the options
446 */
447 drive_strength = card->host->ops->select_drive_strength(
448 card->sw_caps.uhs_max_dtr,
449 host_drv_type, card_drv_type);
450
451 /* if error just use default for drive strength B */
452 err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
453 &card_strength);
454 if (err)
455 return;
456
457 card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
458 card_strength |= host_drive_to_sdio_drive(drive_strength);
459
460 err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
461 card_strength, NULL);
462
463 /* if error default to drive strength B */
464 if (!err)
465 mmc_set_driver_type(card->host, drive_strength);
466}
467
468
469static int sdio_set_bus_speed_mode(struct mmc_card *card)
470{
471 unsigned int bus_speed, timing;
472 int err;
473 unsigned char speed;
474
475 /*
476 * If the host doesn't support any of the UHS-I modes, fallback on
477 * default speed.
478 */
479 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
480 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
481 return 0;
482
483 bus_speed = SDIO_SPEED_SDR12;
484 timing = MMC_TIMING_UHS_SDR12;
485 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
486 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
487 bus_speed = SDIO_SPEED_SDR104;
488 timing = MMC_TIMING_UHS_SDR104;
489 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
490 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
491 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
492 bus_speed = SDIO_SPEED_DDR50;
493 timing = MMC_TIMING_UHS_DDR50;
494 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
495 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
496 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
497 SD_MODE_UHS_SDR50)) {
498 bus_speed = SDIO_SPEED_SDR50;
499 timing = MMC_TIMING_UHS_SDR50;
500 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
501 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
502 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
503 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
504 bus_speed = SDIO_SPEED_SDR25;
505 timing = MMC_TIMING_UHS_SDR25;
506 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
507 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
508 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
509 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
510 SD_MODE_UHS_SDR12)) {
511 bus_speed = SDIO_SPEED_SDR12;
512 timing = MMC_TIMING_UHS_SDR12;
513 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
514 }
515
516 err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
517 if (err)
518 return err;
519
520 speed &= ~SDIO_SPEED_BSS_MASK;
521 speed |= bus_speed;
522 err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
523 if (err)
524 return err;
525
526 if (bus_speed) {
527 mmc_set_timing(card->host, timing);
528 mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
529 }
530
531 return 0;
532}
533
534/*
535 * UHS-I specific initialization procedure
536 */
537static int mmc_sdio_init_uhs_card(struct mmc_card *card)
538{
539 int err;
540
541 if (!card->scr.sda_spec3)
542 return 0;
543
544 /*
545 * Switch to wider bus (if supported).
546 */
547 if (card->host->caps & MMC_CAP_4_BIT_DATA) {
548 err = sdio_enable_4bit_bus(card);
549 if (err > 0) {
550 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
551 err = 0;
552 }
553 }
554
555 /* Set the driver strength for the card */
556 sdio_select_driver_type(card);
557
558 /* Set bus speed mode of the card */
559 err = sdio_set_bus_speed_mode(card);
560 if (err)
561 goto out;
562
563 /* Initialize and start re-tuning timer */
564 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
565 err = card->host->ops->execute_tuning(card->host,
566 MMC_SEND_TUNING_BLOCK);
567
568out:
569
570 return err;
571}
572
330/* 573/*
331 * Handle the detection and initialisation of a card. 574 * Handle the detection and initialisation of a card.
332 * 575 *
@@ -394,6 +637,30 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
394 host->ops->init_card(host, card); 637 host->ops->init_card(host, card);
395 638
396 /* 639 /*
640 * If the host and card support UHS-I mode request the card
641 * to switch to 1.8V signaling level. No 1.8v signalling if
642 * UHS mode is not enabled to maintain compatibilty and some
643 * systems that claim 1.8v signalling in fact do not support
644 * it.
645 */
646 if ((ocr & R4_18V_PRESENT) &&
647 (host->caps &
648 (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
649 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
650 MMC_CAP_UHS_DDR50))) {
651 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
652 true);
653 if (err) {
654 ocr &= ~R4_18V_PRESENT;
655 host->ocr &= ~R4_18V_PRESENT;
656 }
657 err = 0;
658 } else {
659 ocr &= ~R4_18V_PRESENT;
660 host->ocr &= ~R4_18V_PRESENT;
661 }
662
663 /*
397 * For native busses: set card RCA and quit open drain mode. 664 * For native busses: set card RCA and quit open drain mode.
398 */ 665 */
399 if (!powered_resume && !mmc_host_is_spi(host)) { 666 if (!powered_resume && !mmc_host_is_spi(host)) {
@@ -449,19 +716,35 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
449 goto finish; 716 goto finish;
450 } 717 }
451 718
452 /* 719#ifdef CONFIG_MMC_EMBEDDED_SDIO
453 * Read the common registers. 720 if (host->embedded_sdio_data.cccr)
454 */ 721 memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
455 err = sdio_read_cccr(card); 722 else {
456 if (err) 723#endif
457 goto remove; 724 /*
725 * Read the common registers.
726 */
727 err = sdio_read_cccr(card, ocr);
728 if (err)
729 goto remove;
730#ifdef CONFIG_MMC_EMBEDDED_SDIO
731 }
732#endif
458 733
459 /* 734#ifdef CONFIG_MMC_EMBEDDED_SDIO
460 * Read the common CIS tuples. 735 if (host->embedded_sdio_data.cis)
461 */ 736 memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
462 err = sdio_read_common_cis(card); 737 else {
463 if (err) 738#endif
464 goto remove; 739 /*
740 * Read the common CIS tuples.
741 */
742 err = sdio_read_common_cis(card);
743 if (err)
744 goto remove;
745#ifdef CONFIG_MMC_EMBEDDED_SDIO
746 }
747#endif
465 748
466 if (oldcard) { 749 if (oldcard) {
467 int same = (card->cis.vendor == oldcard->cis.vendor && 750 int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -494,6 +777,16 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
494 if (err) 777 if (err)
495 goto remove; 778 goto remove;
496 779
780 /* Initialization sequence for UHS-I cards */
781 /* Only if card supports 1.8v and UHS signaling */
782 if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) {
783 err = mmc_sdio_init_uhs_card(card);
784 if (err)
785 goto remove;
786
787 /* Card is an ultra-high-speed card */
788 mmc_card_set_uhs(card);
789 } else {
497 /* 790 /*
498 * Switch to high-speed (if supported). 791 * Switch to high-speed (if supported).
499 */ 792 */
@@ -516,7 +809,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
516 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); 809 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
517 else if (err) 810 else if (err)
518 goto remove; 811 goto remove;
519 812 }
520finish: 813finish:
521 if (!oldcard) 814 if (!oldcard)
522 host->card = card; 815 host->card = card;
@@ -552,6 +845,14 @@ static void mmc_sdio_remove(struct mmc_host *host)
552} 845}
553 846
554/* 847/*
848 * Card detection - card is alive.
849 */
850static int mmc_sdio_alive(struct mmc_host *host)
851{
852 return mmc_select_card(host->card);
853}
854
855/*
555 * Card detection callback from host. 856 * Card detection callback from host.
556 */ 857 */
557static void mmc_sdio_detect(struct mmc_host *host) 858static void mmc_sdio_detect(struct mmc_host *host)
@@ -573,7 +874,7 @@ static void mmc_sdio_detect(struct mmc_host *host)
573 /* 874 /*
574 * Just check if our card has been removed. 875 * Just check if our card has been removed.
575 */ 876 */
576 err = mmc_select_card(host->card); 877 err = _mmc_detect_card_removed(host);
577 878
578 mmc_release_host(host); 879 mmc_release_host(host);
579 880
@@ -632,11 +933,14 @@ static int mmc_sdio_suspend(struct mmc_host *host)
632 } 933 }
633 } 934 }
634 935
936#ifdef CONFIG_MACH_PX
937#else
635 if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { 938 if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
636 mmc_claim_host(host); 939 mmc_claim_host(host);
637 sdio_disable_wide(host->card); 940 sdio_disable_wide(host->card);
638 mmc_release_host(host); 941 mmc_release_host(host);
639 } 942 }
943#endif
640 944
641 return err; 945 return err;
642} 946}
@@ -728,7 +1032,7 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
728 if (host->ocr_avail_sdio) 1032 if (host->ocr_avail_sdio)
729 host->ocr_avail = host->ocr_avail_sdio; 1033 host->ocr_avail = host->ocr_avail_sdio;
730 1034
731 host->ocr = mmc_select_voltage(host, ocr & ~0x7F); 1035 host->ocr = mmc_select_voltage(host, ocr & ~0xFF);
732 if (!host->ocr) { 1036 if (!host->ocr) {
733 ret = -EINVAL; 1037 ret = -EINVAL;
734 goto out; 1038 goto out;
@@ -751,9 +1055,12 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
751 .suspend = mmc_sdio_suspend, 1055 .suspend = mmc_sdio_suspend,
752 .resume = mmc_sdio_resume, 1056 .resume = mmc_sdio_resume,
753 .power_restore = mmc_sdio_power_restore, 1057 .power_restore = mmc_sdio_power_restore,
1058 .alive = mmc_sdio_alive,
754}; 1059};
755 1060
756 1061#if defined(CONFIG_MACH_M0) && defined(CONFIG_TARGET_LOCALE_EUR)
1062extern void print_epll_con0(void);
1063#endif
757/* 1064/*
758 * Starting point for SDIO card init. 1065 * Starting point for SDIO card init.
759 */ 1066 */
@@ -770,6 +1077,11 @@ int mmc_attach_sdio(struct mmc_host *host)
770 if (err) 1077 if (err)
771 return err; 1078 return err;
772 1079
1080#if defined(CONFIG_MACH_M0) && defined(CONFIG_TARGET_LOCALE_EUR)
1081 /* a sdio module is detected. print EPLL */
1082 print_epll_con0();
1083#endif
1084
773 mmc_attach_bus(host, &mmc_sdio_ops); 1085 mmc_attach_bus(host, &mmc_sdio_ops);
774 if (host->ocr_avail_sdio) 1086 if (host->ocr_avail_sdio)
775 host->ocr_avail = host->ocr_avail_sdio; 1087 host->ocr_avail = host->ocr_avail_sdio;
@@ -778,11 +1090,11 @@ int mmc_attach_sdio(struct mmc_host *host)
778 * Sanity check the voltages that the card claims to 1090 * Sanity check the voltages that the card claims to
779 * support. 1091 * support.
780 */ 1092 */
781 if (ocr & 0x7F) { 1093 if (ocr & 0xFF) {
782 printk(KERN_WARNING "%s: card claims to support voltages " 1094 printk(KERN_WARNING "%s: card claims to support voltages "
783 "below the defined range. These will be ignored.\n", 1095 "below the defined range. These will be ignored.\n",
784 mmc_hostname(host)); 1096 mmc_hostname(host));
785 ocr &= ~0x7F; 1097 ocr &= ~0xFF;
786 } 1098 }
787 1099
788 host->ocr = mmc_select_voltage(host, ocr); 1100 host->ocr = mmc_select_voltage(host, ocr);
@@ -799,8 +1111,17 @@ int mmc_attach_sdio(struct mmc_host *host)
799 * Detect and init the card. 1111 * Detect and init the card.
800 */ 1112 */
801 err = mmc_sdio_init_card(host, host->ocr, NULL, 0); 1113 err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
1114 if (err) {
1115 if (err == -EAGAIN) {
1116 /*
1117 * Retry initialization with S18R set to 0.
1118 */
1119 host->ocr &= ~R4_18V_PRESENT;
1120 err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
1121 }
802 if (err) 1122 if (err)
803 goto err; 1123 goto err;
1124 }
804 card = host->card; 1125 card = host->card;
805 1126
806 /* 1127 /*
@@ -827,14 +1148,36 @@ int mmc_attach_sdio(struct mmc_host *host)
827 funcs = (ocr & 0x70000000) >> 28; 1148 funcs = (ocr & 0x70000000) >> 28;
828 card->sdio_funcs = 0; 1149 card->sdio_funcs = 0;
829 1150
1151#ifdef CONFIG_MMC_EMBEDDED_SDIO
1152 if (host->embedded_sdio_data.funcs)
1153 card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
1154#endif
1155
830 /* 1156 /*
831 * Initialize (but don't add) all present functions. 1157 * Initialize (but don't add) all present functions.
832 */ 1158 */
833 for (i = 0; i < funcs; i++, card->sdio_funcs++) { 1159 for (i = 0; i < funcs; i++, card->sdio_funcs++) {
834 err = sdio_init_func(host->card, i + 1); 1160#ifdef CONFIG_MMC_EMBEDDED_SDIO
835 if (err) 1161 if (host->embedded_sdio_data.funcs) {
836 goto remove; 1162 struct sdio_func *tmp;
837 1163
1164 tmp = sdio_alloc_func(host->card);
1165 if (IS_ERR(tmp))
1166 goto remove;
1167 tmp->num = (i + 1);
1168 card->sdio_func[i] = tmp;
1169 tmp->class = host->embedded_sdio_data.funcs[i].f_class;
1170 tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
1171 tmp->vendor = card->cis.vendor;
1172 tmp->device = card->cis.device;
1173 } else {
1174#endif
1175 err = sdio_init_func(host->card, i + 1);
1176 if (err)
1177 goto remove;
1178#ifdef CONFIG_MMC_EMBEDDED_SDIO
1179 }
1180#endif
838 /* 1181 /*
839 * Enable Runtime PM for this func (if supported) 1182 * Enable Runtime PM for this func (if supported)
840 */ 1183 */
@@ -882,3 +1225,84 @@ err:
882 return err; 1225 return err;
883} 1226}
884 1227
1228int sdio_reset_comm(struct mmc_card *card)
1229{
1230 struct mmc_host *host = card->host;
1231 u32 ocr;
1232 int err;
1233
1234 printk("%s():\n", __func__);
1235 mmc_claim_host(host);
1236
1237 mmc_go_idle(host);
1238
1239 mmc_set_clock(host, host->f_min);
1240
1241 err = mmc_send_io_op_cond(host, 0, &ocr);
1242 if (err)
1243 goto err;
1244
1245 if (ocr & 0xFF) {
1246 printk(KERN_WARNING "%s: card claims to support voltages "
1247 "below the defined range. These will be ignored.\n",
1248 mmc_hostname(host));
1249 ocr &= ~0xFF;
1250 }
1251
1252 host->ocr = mmc_select_voltage(host, ocr);
1253 if (!host->ocr) {
1254 err = -EINVAL;
1255 goto err;
1256 }
1257
1258 err = mmc_send_io_op_cond(host, host->ocr, &ocr);
1259 if (err)
1260 goto err;
1261
1262 if (mmc_host_is_spi(host)) {
1263 err = mmc_spi_set_crc(host, use_spi_crc);
1264 if (err)
1265 goto err;
1266 }
1267
1268 if (!mmc_host_is_spi(host)) {
1269 err = mmc_send_relative_addr(host, &card->rca);
1270 if (err)
1271 goto err;
1272 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1273 }
1274 if (!mmc_host_is_spi(host)) {
1275 err = mmc_select_card(card);
1276 if (err)
1277 goto err;
1278 }
1279
1280 /*
1281 * Switch to high-speed (if supported).
1282 */
1283 err = sdio_enable_hs(card);
1284 if (err > 0)
1285 mmc_sd_go_highspeed(card);
1286 else if (err)
1287 goto err;
1288
1289 /*
1290 * Change to the card's maximum speed.
1291 */
1292 mmc_set_clock(host, mmc_sdio_get_max_clock(card));
1293
1294 err = sdio_enable_4bit_bus(card);
1295 if (err > 0)
1296 mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
1297 else if (err)
1298 goto err;
1299
1300 mmc_release_host(host);
1301 return 0;
1302err:
1303 printk("%s: Error resetting SDIO communications (%d)\n",
1304 mmc_hostname(host), err);
1305 mmc_release_host(host);
1306 return err;
1307}
1308EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d2565df8a7f..52429a98201 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -23,6 +23,10 @@
23#include "sdio_cis.h" 23#include "sdio_cis.h"
24#include "sdio_bus.h" 24#include "sdio_bus.h"
25 25
26#ifdef CONFIG_MMC_EMBEDDED_SDIO
27#include <linux/mmc/host.h>
28#endif
29
26/* show configuration fields */ 30/* show configuration fields */
27#define sdio_config_attr(field, format_string) \ 31#define sdio_config_attr(field, format_string) \
28static ssize_t \ 32static ssize_t \
@@ -260,7 +264,14 @@ static void sdio_release_func(struct device *dev)
260{ 264{
261 struct sdio_func *func = dev_to_sdio_func(dev); 265 struct sdio_func *func = dev_to_sdio_func(dev);
262 266
263 sdio_free_func_cis(func); 267#ifdef CONFIG_MMC_EMBEDDED_SDIO
268 /*
269 * If this device is embedded then we never allocated
270 * cis tables for this func
271 */
272 if (!func->card->host->embedded_sdio_data.funcs)
273#endif
274 sdio_free_func_cis(func);
264 275
265 if (func->info) 276 if (func->info)
266 kfree(func->info); 277 kfree(func->info);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 0f687cdeb06..3169452e031 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -383,6 +383,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
383EXPORT_SYMBOL_GPL(sdio_readb); 383EXPORT_SYMBOL_GPL(sdio_readb);
384 384
385/** 385/**
386 * sdio_readb_ext - read a single byte from a SDIO function
387 * @func: SDIO function to access
388 * @addr: address to read
389 * @err_ret: optional status value from transfer
390 * @in: value to add to argument
391 *
392 * Reads a single byte from the address space of a given SDIO
393 * function. If there is a problem reading the address, 0xff
394 * is returned and @err_ret will contain the error code.
395 */
396unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
397 int *err_ret, unsigned in)
398{
399 int ret;
400 unsigned char val;
401
402 BUG_ON(!func);
403
404 if (err_ret)
405 *err_ret = 0;
406
407 ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
408 if (ret) {
409 if (err_ret)
410 *err_ret = ret;
411 return 0xFF;
412 }
413
414 return val;
415}
416EXPORT_SYMBOL_GPL(sdio_readb_ext);
417
418/**
386 * sdio_writeb - write a single byte to a SDIO function 419 * sdio_writeb - write a single byte to a SDIO function
387 * @func: SDIO function to access 420 * @func: SDIO function to access
388 * @b: byte to write 421 * @b: byte to write
@@ -711,8 +744,11 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
711 744
712 host = func->card->host; 745 host = func->card->host;
713 746
747#ifdef CONFIG_MACH_PX
748#else
714 if (flags & ~host->pm_caps) 749 if (flags & ~host->pm_caps)
715 return -EINVAL; 750 return -EINVAL;
751#endif
716 752
717 /* function suspend methods are serialized, hence no lock needed */ 753 /* function suspend methods are serialized, hence no lock needed */
718 host->pm_flags |= flags; 754 host->pm_flags |= flags;
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index d58ae915337..7fe09d47c22 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -148,15 +148,21 @@ static int sdio_irq_thread(void *_host)
148 } 148 }
149 149
150 set_current_state(TASK_INTERRUPTIBLE); 150 set_current_state(TASK_INTERRUPTIBLE);
151 if (host->caps & MMC_CAP_SDIO_IRQ) 151 if (host->caps & MMC_CAP_SDIO_IRQ) {
152 mmc_host_clk_hold(host);
152 host->ops->enable_sdio_irq(host, 1); 153 host->ops->enable_sdio_irq(host, 1);
154 mmc_host_clk_release(host);
155 }
153 if (!kthread_should_stop()) 156 if (!kthread_should_stop())
154 schedule_timeout(period); 157 schedule_timeout(period);
155 set_current_state(TASK_RUNNING); 158 set_current_state(TASK_RUNNING);
156 } while (!kthread_should_stop()); 159 } while (!kthread_should_stop());
157 160
158 if (host->caps & MMC_CAP_SDIO_IRQ) 161 if (host->caps & MMC_CAP_SDIO_IRQ) {
162 mmc_host_clk_hold(host);
159 host->ops->enable_sdio_irq(host, 0); 163 host->ops->enable_sdio_irq(host, 0);
164 mmc_host_clk_release(host);
165 }
160 166
161 pr_debug("%s: IRQ thread exiting with code %d\n", 167 pr_debug("%s: IRQ thread exiting with code %d\n",
162 mmc_hostname(host), ret); 168 mmc_hostname(host), ret);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 56dbf3f6ad0..e231dcc1059 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -24,6 +24,36 @@ config MMC_PXA
24 24
25 If unsure, say N. 25 If unsure, say N.
26 26
27config MMC_MSHCI
28 tristate "Mobile Storage Host Controller Interface support"
29 depends on HAS_DMA
30 help
31 This selects the Mobile Storage Host Controller Interface.
32 It is made by synopsys. It supports SD/MMC card.
33
34 If you have a controller with this interface, say Y or M here. You
35 also need to enable an appropriate bus interface.
36
37 If unsure, say N.
38
39config MMC_MSHCI_S3C_DMA_MAP
40 tristate "Use own S3C_DMA_MAP function for mshci"
41 depends on MMC_MSHCI
42 help
43 This selects using the s3c_dma_map_sg, s3c_unmap_sg functions.
44 Those functions are optimized for flushing cache.
45
46 If unsure, say N.
47
48config MMC_MSHCI_ASYNC_OPS
49 tristate "Use Asyn ops like pre_req, post_req"
50 depends on MMC_MSHCI
51 help
52 This selects using the pre_req and post_req functions.
53 These functions might make the performance of MMC better.
54
55 If unsure, say N.
56
27config MMC_SDHCI 57config MMC_SDHCI
28 tristate "Secure Digital Host Controller Interface support" 58 tristate "Secure Digital Host Controller Interface support"
29 depends on HAS_DMA 59 depends on HAS_DMA
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 58a5cf73d6e..7e8ec52f09c 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -7,6 +7,10 @@ obj-$(CONFIG_MMC_PXA) += pxamci.o
7obj-$(CONFIG_MMC_IMX) += imxmmc.o 7obj-$(CONFIG_MMC_IMX) += imxmmc.o
8obj-$(CONFIG_MMC_MXC) += mxcmmc.o 8obj-$(CONFIG_MMC_MXC) += mxcmmc.o
9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o 9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
10obj-$(CONFIG_MMC_DW) += dw_mmc.o
11obj-$(CONFIG_MMC_MSHCI) += mshci.o
12obj-$(CONFIG_MMC_MSHCI) += mshci-s3c.o
13obj-$(CONFIG_MMC_MSHCI_S3C_DMA_MAP) += mshci-s3c-dma.o
10obj-$(CONFIG_MMC_SDHCI) += sdhci.o 14obj-$(CONFIG_MMC_SDHCI) += sdhci.o
11obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 15obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
12obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o 16obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
@@ -38,7 +42,6 @@ obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
38obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 42obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
39obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 43obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
40obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 44obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
41obj-$(CONFIG_MMC_DW) += dw_mmc.o
42obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 45obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
43obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 46obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
44obj-$(CONFIG_MMC_VUB300) += vub300.o 47obj-$(CONFIG_MMC_VUB300) += vub300.o
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 66dcddb9c20..88ee9928909 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -34,6 +34,10 @@
34#include <linux/bitops.h> 34#include <linux/bitops.h>
35#include <linux/regulator/consumer.h> 35#include <linux/regulator/consumer.h>
36 36
37#include <plat/cpu.h>
38
39#include <mach/board_rev.h>
40
37#include "dw_mmc.h" 41#include "dw_mmc.h"
38 42
39/* Common flag combinations */ 43/* Common flag combinations */
@@ -46,7 +50,15 @@
46 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) 50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
47#define DW_MCI_SEND_STATUS 1 51#define DW_MCI_SEND_STATUS 1
48#define DW_MCI_RECV_STATUS 2 52#define DW_MCI_RECV_STATUS 2
49#define DW_MCI_DMA_THRESHOLD 16 53#define DW_MCI_DMA_THRESHOLD 4
54
55/* Incresing sg_list size for eMMC 4.5 performance by incresing
56 max DMA Transfer size from 1MB to 4MB */
57//#if defined(CONFIG_MACH_P10)
58#define SG_LIST_ALLOC_SIZE (PAGE_SIZE * 4)
59//#else
60//#define SG_LIST_ALLOC_SIZE PAGE_SIZE
61//#endif
50 62
51#ifdef CONFIG_MMC_DW_IDMAC 63#ifdef CONFIG_MMC_DW_IDMAC
52struct idmac_desc { 64struct idmac_desc {
@@ -61,7 +73,7 @@ struct idmac_desc {
61 73
62 u32 des1; /* Buffer sizes */ 74 u32 des1; /* Buffer sizes */
63#define IDMAC_SET_BUFFER1_SIZE(d, s) \ 75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
64 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff)) 76 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
65 77
66 u32 des2; /* buffer 1 physical address */ 78 u32 des2; /* buffer 1 physical address */
67 79
@@ -100,6 +112,27 @@ struct dw_mci_slot {
100 int last_detect_state; 112 int last_detect_state;
101}; 113};
102 114
115#define MAX_TUING_LOOP 40
116
117static const u8 tuning_blk_pattern[] = {
118 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
119 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
120 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
121 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
122 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
123 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
124 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
125 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
126 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
127 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
128 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
129 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
130 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
131 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
132 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
133 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
134};
135
103#if defined(CONFIG_DEBUG_FS) 136#if defined(CONFIG_DEBUG_FS)
104static int dw_mci_req_show(struct seq_file *s, void *v) 137static int dw_mci_req_show(struct seq_file *s, void *v)
105{ 138{
@@ -221,6 +254,54 @@ err:
221} 254}
222#endif /* defined(CONFIG_DEBUG_FS) */ 255#endif /* defined(CONFIG_DEBUG_FS) */
223 256
257static void dw_mci_clear_set_irqs(struct dw_mci *host, u32 clear, u32 set)
258{
259 u32 ier;
260
261 /* clear interrupt */
262 mci_writel(host, RINTSTS, clear);
263
264 ier = mci_readl(host, INTMASK);
265
266 ier &= ~clear;
267 ier |= set;
268
269 mci_writel(host, INTMASK, ier);
270}
271
272static void dw_mci_unmask_irqs(struct dw_mci *host, u32 irqs)
273{
274 dw_mci_clear_set_irqs(host, 0, irqs);
275}
276
277static void dw_mci_mask_irqs(struct dw_mci *host, u32 irqs)
278{
279 dw_mci_clear_set_irqs(host, irqs, 0);
280}
281
282static void dw_mci_set_card_detection(struct dw_mci *host, bool enable)
283{
284 u32 irqs = SDMMC_INT_CD;
285
286 if (host->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
287 return;
288
289 if (enable)
290 dw_mci_unmask_irqs(host, irqs);
291 else
292 dw_mci_mask_irqs(host, irqs);
293}
294
295static void dw_mci_enable_card_detection(struct dw_mci *host)
296{
297 dw_mci_set_card_detection(host, true);
298}
299
300static void dw_mci_disable_card_detection(struct dw_mci *host)
301{
302 dw_mci_set_card_detection(host, false);
303}
304
224static void dw_mci_set_timeout(struct dw_mci *host) 305static void dw_mci_set_timeout(struct dw_mci *host)
225{ 306{
226 /* timeout (maximum) */ 307 /* timeout (maximum) */
@@ -230,6 +311,7 @@ static void dw_mci_set_timeout(struct dw_mci *host)
230static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 311static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
231{ 312{
232 struct mmc_data *data; 313 struct mmc_data *data;
314 struct dw_mci_slot *slot = mmc_priv(mmc);
233 u32 cmdr; 315 u32 cmdr;
234 cmd->error = -EINPROGRESS; 316 cmd->error = -EINPROGRESS;
235 317
@@ -259,6 +341,10 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
259 cmdr |= SDMMC_CMD_DAT_WR; 341 cmdr |= SDMMC_CMD_DAT_WR;
260 } 342 }
261 343
344 /* Use hold bit register */
345 if (slot->host->pdata->set_io_timing)
346 cmdr |= SDMMC_USE_HOLD_REG;
347
262 return cmdr; 348 return cmdr;
263} 349}
264 350
@@ -284,7 +370,7 @@ static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
284/* DMA interface functions */ 370/* DMA interface functions */
285static void dw_mci_stop_dma(struct dw_mci *host) 371static void dw_mci_stop_dma(struct dw_mci *host)
286{ 372{
287 if (host->use_dma) { 373 if (host->using_dma) {
288 host->dma_ops->stop(host); 374 host->dma_ops->stop(host);
289 host->dma_ops->cleanup(host); 375 host->dma_ops->cleanup(host);
290 } else { 376 } else {
@@ -299,9 +385,10 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
299 struct mmc_data *data = host->data; 385 struct mmc_data *data = host->data;
300 386
301 if (data) 387 if (data)
302 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, 388 if (!data->host_cookie)
303 ((data->flags & MMC_DATA_WRITE) 389 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
304 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 390 ((data->flags & MMC_DATA_WRITE)
391 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
305} 392}
306 393
307static void dw_mci_idmac_stop_dma(struct dw_mci *host) 394static void dw_mci_idmac_stop_dma(struct dw_mci *host)
@@ -398,7 +485,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
398 int i; 485 int i;
399 486
400 /* Number of descriptors in the ring buffer */ 487 /* Number of descriptors in the ring buffer */
401 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 488 host->ring_size = host->buf_size / sizeof(struct idmac_desc);
402 489
403 /* Forward link the descriptor list */ 490 /* Forward link the descriptor list */
404 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 491 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
@@ -417,24 +504,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
417 return 0; 504 return 0;
418} 505}
419 506
420static struct dw_mci_dma_ops dw_mci_idmac_ops = { 507static int dw_mci_pre_dma_transfer(struct dw_mci *host,
421 .init = dw_mci_idmac_init, 508 struct mmc_data *data,
422 .start = dw_mci_idmac_start_dma, 509 int next)
423 .stop = dw_mci_idmac_stop_dma,
424 .complete = dw_mci_idmac_complete_dma,
425 .cleanup = dw_mci_dma_cleanup,
426};
427#endif /* CONFIG_MMC_DW_IDMAC */
428
429static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
430{ 510{
431 struct scatterlist *sg; 511 struct scatterlist *sg;
432 unsigned int i, direction, sg_len; 512 int i, sg_len;
433 u32 temp;
434 513
435 /* If we don't have a channel, we can't do DMA */ 514 if (!next && data->host_cookie)
436 if (!host->use_dma) 515 return data->host_cookie;
437 return -ENODEV;
438 516
439 /* 517 /*
440 * We don't do DMA on "complex" transfers, i.e. with 518 * We don't do DMA on "complex" transfers, i.e. with
@@ -443,6 +521,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
443 */ 521 */
444 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 522 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
445 return -EINVAL; 523 return -EINVAL;
524
446 if (data->blksz & 3) 525 if (data->blksz & 3)
447 return -EINVAL; 526 return -EINVAL;
448 527
@@ -451,13 +530,95 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
451 return -EINVAL; 530 return -EINVAL;
452 } 531 }
453 532
454 if (data->flags & MMC_DATA_READ) 533 sg_len = dma_map_sg(&host->pdev->dev, data->sg,
455 direction = DMA_FROM_DEVICE; 534 data->sg_len, ((data->flags & MMC_DATA_WRITE)
456 else 535 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
457 direction = DMA_TO_DEVICE; 536 if (sg_len == 0)
537 return -EINVAL;
538
539 if (next)
540 data->host_cookie = sg_len;
541
542 return sg_len;
543}
544
545static void dw_mci_pre_req(struct mmc_host *mmc,
546 struct mmc_request *mrq,
547 bool is_first_req)
548{
549 struct dw_mci_slot *slot = mmc_priv(mmc);
550 struct mmc_data *data = mrq->data;
551
552 if (!data)
553 return;
554
555 if (data->host_cookie) {
556 data->host_cookie = 0;
557 return;
558 }
559
560 if (slot->host->use_dma) {
561 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
562 data->host_cookie = 0;
563 }
564}
565
566static void dw_mci_post_req(struct mmc_host *mmc,
567 struct mmc_request *mrq,
568 int err)
569{
570 struct dw_mci_slot *slot = mmc_priv(mmc);
571 struct mmc_data *data = mrq->data;
572
573 if (!data)
574 return;
458 575
459 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, 576 if (slot->host->use_dma) {
460 direction); 577 if (data->host_cookie)
578 dma_unmap_sg(&slot->host->pdev->dev, data->sg,
579 data->sg_len,
580 ((data->flags & MMC_DATA_WRITE)
581 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
582 data->host_cookie = 0;
583 }
584}
585
586static struct dw_mci_dma_ops dw_mci_idmac_ops = {
587 .init = dw_mci_idmac_init,
588 .start = dw_mci_idmac_start_dma,
589 .stop = dw_mci_idmac_stop_dma,
590 .complete = dw_mci_idmac_complete_dma,
591 .cleanup = dw_mci_dma_cleanup,
592};
593#else
594static int dw_mci_pre_dma_transfer(struct dw_mci *host,
595 struct mmc_data *data,
596 bool next)
597{
598 return -ENOSYS;
599}
600#define dw_mci_pre_req NULL
601#define dw_mci_post_req NULL
602#endif /* CONFIG_MMC_DW_IDMAC */
603
604static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
605{
606 int sg_len;
607 u32 temp;
608
609 host->using_dma = 0;
610
611 /* If we don't have a channel, we can't do DMA */
612 if (!host->use_dma)
613 return -ENODEV;
614
615 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
616 if (sg_len < 0) {
617 host->dma_ops->stop(host);
618 return sg_len;
619 }
620
621 host->using_dma = 1;
461 622
462 dev_vdbg(&host->pdev->dev, 623 dev_vdbg(&host->pdev->dev,
463 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 624 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
@@ -470,6 +631,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
470 mci_writel(host, CTRL, temp); 631 mci_writel(host, CTRL, temp);
471 632
472 /* Disable RX/TX IRQs, let DMA handle it */ 633 /* Disable RX/TX IRQs, let DMA handle it */
634 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
473 temp = mci_readl(host, INTMASK); 635 temp = mci_readl(host, INTMASK);
474 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 636 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
475 mci_writel(host, INTMASK, temp); 637 mci_writel(host, INTMASK, temp);
@@ -490,13 +652,20 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
490 host->data = data; 652 host->data = data;
491 653
492 if (dw_mci_submit_data_dma(host, data)) { 654 if (dw_mci_submit_data_dma(host, data)) {
655 int flags = SG_MITER_ATOMIC;
656 if (host->data->flags & MMC_DATA_READ)
657 flags |= SG_MITER_TO_SG;
658 else
659 flags |= SG_MITER_FROM_SG;
660
661 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
493 host->sg = data->sg; 662 host->sg = data->sg;
494 host->pio_offset = 0;
495 if (data->flags & MMC_DATA_READ) 663 if (data->flags & MMC_DATA_READ)
496 host->dir_status = DW_MCI_RECV_STATUS; 664 host->dir_status = DW_MCI_RECV_STATUS;
497 else 665 else
498 host->dir_status = DW_MCI_SEND_STATUS; 666 host->dir_status = DW_MCI_SEND_STATUS;
499 667
668 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
500 temp = mci_readl(host, INTMASK); 669 temp = mci_readl(host, INTMASK);
501 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 670 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
502 mci_writel(host, INTMASK, temp); 671 mci_writel(host, INTMASK, temp);
@@ -574,17 +743,17 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
574 } 743 }
575 744
576 /* Set the current slot bus width */ 745 /* Set the current slot bus width */
577 mci_writel(host, CTYPE, slot->ctype); 746 mci_writel(host, CTYPE, (slot->ctype << slot->id));
578} 747}
579 748
580static void dw_mci_start_request(struct dw_mci *host, 749static void __dw_mci_start_request(struct dw_mci *host,
581 struct dw_mci_slot *slot) 750 struct dw_mci_slot *slot, struct mmc_command *cmd)
582{ 751{
583 struct mmc_request *mrq; 752 struct mmc_request *mrq;
584 struct mmc_command *cmd;
585 struct mmc_data *data; 753 struct mmc_data *data;
586 u32 cmdflags; 754 u32 cmdflags;
587 755
756 host->prv_err = 0;
588 mrq = slot->mrq; 757 mrq = slot->mrq;
589 if (host->pdata->select_slot) 758 if (host->pdata->select_slot)
590 host->pdata->select_slot(slot->id); 759 host->pdata->select_slot(slot->id);
@@ -599,14 +768,13 @@ static void dw_mci_start_request(struct dw_mci *host,
599 host->completed_events = 0; 768 host->completed_events = 0;
600 host->data_status = 0; 769 host->data_status = 0;
601 770
602 data = mrq->data; 771 data = cmd->data;
603 if (data) { 772 if (data) {
604 dw_mci_set_timeout(host); 773 dw_mci_set_timeout(host);
605 mci_writel(host, BYTCNT, data->blksz*data->blocks); 774 mci_writel(host, BYTCNT, data->blksz*data->blocks);
606 mci_writel(host, BLKSIZ, data->blksz); 775 mci_writel(host, BLKSIZ, data->blksz);
607 } 776 }
608 777
609 cmd = mrq->cmd;
610 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 778 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
611 779
612 /* this is the first command, send the initialization clock */ 780 /* this is the first command, send the initialization clock */
@@ -624,6 +792,17 @@ static void dw_mci_start_request(struct dw_mci *host,
624 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 792 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
625} 793}
626 794
795static void dw_mci_start_request(struct dw_mci *host,
796 struct dw_mci_slot *slot)
797{
798 struct mmc_request *mrq = slot->mrq;
799 struct mmc_command *cmd;
800
801 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
802 __dw_mci_start_request(host, slot, cmd);
803}
804
805/* must be called with host->lock held */
627static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 806static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
628 struct mmc_request *mrq) 807 struct mmc_request *mrq)
629{ 808{
@@ -647,15 +826,44 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
647{ 826{
648 struct dw_mci_slot *slot = mmc_priv(mmc); 827 struct dw_mci_slot *slot = mmc_priv(mmc);
649 struct dw_mci *host = slot->host; 828 struct dw_mci *host = slot->host;
829 ktime_t expr;
830 u64 add_time = 50000; /* 50us */
831 int timeout = 100000;
650 832
651 WARN_ON(slot->mrq); 833 WARN_ON(slot->mrq);
652 834
653 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 835 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
654 mrq->cmd->error = -ENOMEDIUM; 836 mrq->cmd->error = -ENOMEDIUM;
837 host->prv_err = 1;
655 mmc_request_done(mmc, mrq); 838 mmc_request_done(mmc, mrq);
656 return; 839 return;
657 } 840 }
658 841
842 do {
843 if (mrq->cmd->opcode == MMC_STOP_TRANSMISSION)
844 break;
845
846 if (mci_readl(host, STATUS) & (1 << 9)) {
847 if (!timeout) {
848 printk(KERN_ERR "%s: Data0: Never released\n",
849 mmc_hostname(mmc));
850 mrq->cmd->error = -ENOTRECOVERABLE;
851 host->prv_err = 1;
852 mmc_request_done(mmc, mrq);
853 return;
854 }
855 if (host->prv_err) {
856 udelay(10);
857 } else {
858 expr = ktime_add_ns(ktime_get(), add_time);
859 set_current_state(TASK_UNINTERRUPTIBLE);
860 schedule_hrtimeout(&expr, HRTIMER_MODE_ABS);
861 }
862 timeout--;
863 } else
864 break;
865 } while(1);
866
659 /* We don't support multiple blocks of weird lengths. */ 867 /* We don't support multiple blocks of weird lengths. */
660 dw_mci_queue_request(host, slot, mrq); 868 dw_mci_queue_request(host, slot, mrq);
661} 869}
@@ -680,12 +888,19 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
680 break; 888 break;
681 } 889 }
682 890
891 regs = mci_readl(slot->host, UHS_REG);
892
683 /* DDR mode set */ 893 /* DDR mode set */
684 if (ios->ddr) { 894 if (ios->timing == MMC_TIMING_UHS_DDR50)
685 regs = mci_readl(slot->host, UHS_REG);
686 regs |= (0x1 << slot->id) << 16; 895 regs |= (0x1 << slot->id) << 16;
687 mci_writel(slot->host, UHS_REG, regs); 896 else
688 } 897 /* 1, 4, 8 Bit SDR */
898 regs &= ~(0x1 << slot->id) << 16;
899
900 mci_writel(slot->host, UHS_REG, regs);
901
902 if (slot->host->pdata->set_io_timing)
903 slot->host->pdata->set_io_timing(slot->host, ios->timing);
689 904
690 if (ios->clock) { 905 if (ios->clock) {
691 /* 906 /*
@@ -702,6 +917,9 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
702 default: 917 default:
703 break; 918 break;
704 } 919 }
920
921 if (slot->host->pdata->cfg_gpio)
922 slot->host->pdata->cfg_gpio(mmc->ios.bus_width);
705} 923}
706 924
707static int dw_mci_get_ro(struct mmc_host *mmc) 925static int dw_mci_get_ro(struct mmc_host *mmc)
@@ -746,11 +964,186 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
746 return present; 964 return present;
747} 965}
748 966
967static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
968{
969 struct dw_mci_slot *slot = mmc_priv(mmc);
970 struct dw_mci *host = slot->host;
971 u32 int_mask;
972
973 /* Enable/disable Slot Specific SDIO interrupt */
974 int_mask = mci_readl(host, INTMASK);
975 if (enb) {
976 mci_writel(host, INTMASK,
977 (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
978 } else {
979 mci_writel(host, INTMASK,
980 (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
981 }
982}
983
984static u8 dw_mci_tuning_sampling(struct dw_mci * host)
985{
986 u32 clksel;
987 u8 sample;
988
989 clksel = mci_readl(host, CLKSEL);
990 sample = clksel & 0x7;
991 sample = (++sample == 8) ? 0 : sample;
992 clksel = (clksel & 0xfffffff8) | (sample & 0x7);
993 mci_writel(host, CLKSEL, clksel);
994
995 return sample;
996}
997
998static void dw_mci_set_sampling(struct dw_mci * host, u8 sample)
999{
1000 u32 clksel;
1001
1002 clksel = mci_readl(host, CLKSEL);
1003 clksel = (clksel & 0xfffffff8) | (sample & 0x7);
1004 mci_writel(host, CLKSEL, clksel);
1005}
1006
1007static u8 dw_mci_get_sampling(struct dw_mci * host)
1008{
1009 u32 clksel;
1010 u8 sample;
1011
1012 clksel = mci_readl(host, CLKSEL);
1013 sample = clksel & 0x7;
1014
1015 return sample;
1016}
1017
1018static u8 get_median_sample(u8 map)
1019{
1020 u8 min = 0, max = 0;
1021 u8 pos;
1022 u8 i;
1023
1024 for (i = 0; i < 4; i++) {
1025 if ((map >> (4 + i)) & 0x1)
1026 max = 4 + i;
1027 if ((map >> (3 - i)) & 0x1)
1028 min = 3 - i;
1029 }
1030
1031 pos = max;
1032 do {
1033 max = pos;
1034 pos = DIV_ROUND_CLOSEST(min + max, 2);
1035 if ((map >> pos) & 0x1)
1036 break;
1037
1038 } while(pos != max);
1039
1040 return pos;
1041}
1042
1043static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1044{
1045 struct dw_mci_slot *slot = mmc_priv(mmc);
1046 struct dw_mci *host = slot->host;
1047 unsigned int tuning_loop = MAX_TUING_LOOP;
1048 u8 *tuning_blk;
1049 u8 blksz;
1050 u8 tune, start_tune;
1051 u8 map = 0, mid;
1052
1053 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1054 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
1055 blksz = 128;
1056 else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
1057 blksz = 64;
1058 else
1059 return -EINVAL;
1060 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1061 blksz = 64;
1062 } else {
1063 dev_err(&mmc->class_dev,
1064 "Undefined command(%d) for tuning\n",
1065 opcode);
1066 return -EINVAL;
1067 }
1068
1069 tuning_blk = kmalloc(blksz, GFP_KERNEL);
1070 if (!tuning_blk)
1071 return -ENOMEM;
1072
1073 start_tune = dw_mci_get_sampling(host);
1074
1075 do {
1076 struct mmc_request mrq = {NULL};
1077 struct mmc_command cmd = {0};
1078 struct mmc_command stop = {0};
1079 struct mmc_data data = {0};
1080 struct scatterlist sg;
1081
1082 cmd.opcode = opcode;
1083 cmd.arg = 0;
1084 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1085
1086 stop.opcode = MMC_STOP_TRANSMISSION;
1087 stop.arg = 0;
1088 stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
1089
1090 data.blksz = blksz;
1091 data.blocks = 1;
1092 data.flags = MMC_DATA_READ;
1093 data.sg = &sg;
1094 data.sg_len = 1;
1095
1096 sg_init_one(&sg, tuning_blk, blksz);
1097 dw_mci_set_timeout(host);
1098
1099 mrq.cmd = &cmd;
1100 mrq.stop = &stop;
1101 mrq.data = &data;
1102 host->mrq = &mrq;
1103
1104 tune = dw_mci_tuning_sampling(host);
1105
1106 mmc_wait_for_req(mmc, &mrq);
1107
1108 if (!cmd.error && !data.error) {
1109 if (!memcmp(tuning_blk_pattern, tuning_blk, blksz))
1110 map |= (1 << tune);
1111 } else {
1112 dev_dbg(&mmc->class_dev,
1113 "Tuning error: cmd.error:%d, data.error:%d\n",
1114 cmd.error, data.error);
1115 }
1116
1117 if (start_tune == tune) {
1118 if (!map) {
1119 tuning_loop = 0;
1120 break;
1121 }
1122
1123 mid = get_median_sample(map);
1124 dw_mci_set_sampling(host, mid);
1125 break;
1126 }
1127
1128 } while(--tuning_loop);
1129
1130 kfree(tuning_blk);
1131
1132 if (!tuning_loop)
1133 return -EIO;
1134
1135 return 0;
1136}
1137
749static const struct mmc_host_ops dw_mci_ops = { 1138static const struct mmc_host_ops dw_mci_ops = {
750 .request = dw_mci_request, 1139 .request = dw_mci_request,
751 .set_ios = dw_mci_set_ios, 1140 .pre_req = dw_mci_pre_req,
752 .get_ro = dw_mci_get_ro, 1141 .post_req = dw_mci_post_req,
753 .get_cd = dw_mci_get_cd, 1142 .set_ios = dw_mci_set_ios,
1143 .get_ro = dw_mci_get_ro,
1144 .get_cd = dw_mci_get_cd,
1145 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1146 .execute_tuning = dw_mci_execute_tuning,
754}; 1147};
755 1148
756static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1149static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
@@ -821,6 +1214,7 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
821 host->data = NULL; 1214 host->data = NULL;
822 dw_mci_stop_dma(host); 1215 dw_mci_stop_dma(host);
823 } 1216 }
1217 host->prv_err = 1;
824 } 1218 }
825} 1219}
826 1220
@@ -853,7 +1247,13 @@ static void dw_mci_tasklet_func(unsigned long priv)
853 cmd = host->cmd; 1247 cmd = host->cmd;
854 host->cmd = NULL; 1248 host->cmd = NULL;
855 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1249 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
856 dw_mci_command_complete(host, host->mrq->cmd); 1250 dw_mci_command_complete(host, cmd);
1251 if ((cmd == host->mrq->sbc) && !cmd->error) {
1252 prev_state = state = STATE_SENDING_CMD;
1253 __dw_mci_start_request(host, host->cur_slot, host->mrq->cmd);
1254 goto unlock;
1255 }
1256
857 if (!host->mrq->data || cmd->error) { 1257 if (!host->mrq->data || cmd->error) {
858 dw_mci_request_end(host, host->mrq); 1258 dw_mci_request_end(host, host->mrq);
859 goto unlock; 1259 goto unlock;
@@ -905,6 +1305,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
905 status); 1305 status);
906 data->error = -EIO; 1306 data->error = -EIO;
907 } 1307 }
1308 host->prv_err = 1;
908 } else { 1309 } else {
909 data->bytes_xfered = data->blocks * data->blksz; 1310 data->bytes_xfered = data->blocks * data->blksz;
910 data->error = 0; 1311 data->error = 0;
@@ -915,6 +1316,12 @@ static void dw_mci_tasklet_func(unsigned long priv)
915 goto unlock; 1316 goto unlock;
916 } 1317 }
917 1318
1319 if (host->mrq->sbc && !data->error) {
1320 data->stop->error = 0;
1321 dw_mci_request_end(host, host->mrq);
1322 goto unlock;
1323 }
1324
918 prev_state = state = STATE_SENDING_STOP; 1325 prev_state = state = STATE_SENDING_STOP;
919 if (!data->error) 1326 if (!data->error)
920 send_stop_cmd(host, data); 1327 send_stop_cmd(host, data);
@@ -954,7 +1361,7 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
954 1361
955 cnt = cnt >> 1; 1362 cnt = cnt >> 1;
956 while (cnt > 0) { 1363 while (cnt > 0) {
957 mci_writew(host, DATA, *pdata++); 1364 mci_writew(host, DATA(host->data_offset), *pdata++);
958 cnt--; 1365 cnt--;
959 } 1366 }
960} 1367}
@@ -967,7 +1374,7 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
967 1374
968 cnt = cnt >> 1; 1375 cnt = cnt >> 1;
969 while (cnt > 0) { 1376 while (cnt > 0) {
970 *pdata++ = mci_readw(host, DATA); 1377 *pdata++ = mci_readw(host, DATA(host->data_offset));
971 cnt--; 1378 cnt--;
972 } 1379 }
973} 1380}
@@ -981,7 +1388,7 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
981 1388
982 cnt = cnt >> 2; 1389 cnt = cnt >> 2;
983 while (cnt > 0) { 1390 while (cnt > 0) {
984 mci_writel(host, DATA, *pdata++); 1391 mci_writel(host, DATA(host->data_offset), *pdata++);
985 cnt--; 1392 cnt--;
986 } 1393 }
987} 1394}
@@ -995,7 +1402,7 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
995 1402
996 cnt = cnt >> 2; 1403 cnt = cnt >> 2;
997 while (cnt > 0) { 1404 while (cnt > 0) {
998 *pdata++ = mci_readl(host, DATA); 1405 *pdata++ = mci_readl(host, DATA(host->data_offset));
999 cnt--; 1406 cnt--;
1000 } 1407 }
1001} 1408}
@@ -1008,7 +1415,7 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1008 1415
1009 cnt = cnt >> 3; 1416 cnt = cnt >> 3;
1010 while (cnt > 0) { 1417 while (cnt > 0) {
1011 mci_writeq(host, DATA, *pdata++); 1418 mci_writeq(host, DATA(host->data_offset), *pdata++);
1012 cnt--; 1419 cnt--;
1013 } 1420 }
1014} 1421}
@@ -1021,60 +1428,49 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1021 1428
1022 cnt = cnt >> 3; 1429 cnt = cnt >> 3;
1023 while (cnt > 0) { 1430 while (cnt > 0) {
1024 *pdata++ = mci_readq(host, DATA); 1431 *pdata++ = mci_readq(host, DATA(host->data_offset));
1025 cnt--; 1432 cnt--;
1026 } 1433 }
1027} 1434}
1028 1435
1029static void dw_mci_read_data_pio(struct dw_mci *host) 1436static void dw_mci_read_data_pio(struct dw_mci *host)
1030{ 1437{
1031 struct scatterlist *sg = host->sg; 1438 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1032 void *buf = sg_virt(sg); 1439 void *buf;
1033 unsigned int offset = host->pio_offset; 1440 unsigned int offset;
1034 struct mmc_data *data = host->data; 1441 struct mmc_data *data = host->data;
1035 int shift = host->data_shift; 1442 int shift = host->data_shift;
1036 u32 status; 1443 u32 status;
1037 unsigned int nbytes = 0, len; 1444 unsigned int nbytes = 0, len;
1445 unsigned int remain, fcnt;
1038 1446
1039 do { 1447 do {
1040 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift; 1448 if (!sg_miter_next(sg_miter))
1041 if (offset + len <= sg->length) { 1449 goto done;
1042 host->pull_data(host, (void *)(buf + offset), len);
1043 1450
1044 offset += len; 1451 buf = sg_miter->addr;
1045 nbytes += len; 1452 remain = sg_miter->length;
1046 1453 offset = 0;
1047 if (offset == sg->length) {
1048 flush_dcache_page(sg_page(sg));
1049 host->sg = sg = sg_next(sg);
1050 if (!sg)
1051 goto done;
1052 1454
1053 offset = 0; 1455 do {
1054 buf = sg_virt(sg); 1456 fcnt = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
1055 } 1457 len = min(remain, fcnt);
1056 } else { 1458 if (!len)
1057 unsigned int remaining = sg->length - offset; 1459 break;
1058 host->pull_data(host, (void *)(buf + offset), 1460 host->pull_data(host, (void *)(buf + offset), len);
1059 remaining); 1461 nbytes += len;
1060 nbytes += remaining; 1462 offset += len;
1061 1463 remain -= len;
1062 flush_dcache_page(sg_page(sg)); 1464 } while (remain);
1063 host->sg = sg = sg_next(sg); 1465 sg_miter->consumed = offset;
1064 if (!sg)
1065 goto done;
1066
1067 offset = len - remaining;
1068 buf = sg_virt(sg);
1069 host->pull_data(host, buf, offset);
1070 nbytes += offset;
1071 }
1072 1466
1073 status = mci_readl(host, MINTSTS); 1467 status = mci_readl(host, MINTSTS);
1074 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1468 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1075 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1469 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1076 host->data_status = status; 1470 host->data_status = status;
1077 data->bytes_xfered += nbytes; 1471 data->bytes_xfered += nbytes;
1472 sg_miter_stop(sg_miter);
1473 host->sg = NULL;
1078 smp_wmb(); 1474 smp_wmb();
1079 1475
1080 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1476 set_bit(EVENT_DATA_ERROR, &host->pending_events);
@@ -1083,65 +1479,64 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
1083 return; 1479 return;
1084 } 1480 }
1085 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1481 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1086 len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1087 host->pio_offset = offset;
1088 data->bytes_xfered += nbytes; 1482 data->bytes_xfered += nbytes;
1483
1484 if (!remain) {
1485 if (!sg_miter_next(sg_miter))
1486 goto done;
1487 sg_miter->consumed = 0;
1488 }
1489 sg_miter_stop(sg_miter);
1089 return; 1490 return;
1090 1491
1091done: 1492done:
1092 data->bytes_xfered += nbytes; 1493 data->bytes_xfered += nbytes;
1494 sg_miter_stop(sg_miter);
1495 host->sg = NULL;
1093 smp_wmb(); 1496 smp_wmb();
1094 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1497 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1095} 1498}
1096 1499
1097static void dw_mci_write_data_pio(struct dw_mci *host) 1500static void dw_mci_write_data_pio(struct dw_mci *host)
1098{ 1501{
1099 struct scatterlist *sg = host->sg; 1502 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1100 void *buf = sg_virt(sg); 1503 void *buf;
1101 unsigned int offset = host->pio_offset; 1504 unsigned int offset;
1102 struct mmc_data *data = host->data; 1505 struct mmc_data *data = host->data;
1103 int shift = host->data_shift; 1506 int shift = host->data_shift;
1104 u32 status; 1507 u32 status;
1105 unsigned int nbytes = 0, len; 1508 unsigned int nbytes = 0, len;
1509 unsigned int fifo_depth = host->fifo_depth;
1510 unsigned int remain, fcnt;
1106 1511
1107 do { 1512 do {
1108 len = SDMMC_FIFO_SZ - 1513 if (!sg_miter_next(sg_miter))
1109 (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift); 1514 goto done;
1110 if (offset + len <= sg->length) { 1515
1516 buf = sg_miter->addr;
1517 remain = sg_miter->length;
1518 offset = 0;
1519
1520 do {
1521 fcnt = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1522 fcnt = (fifo_depth - fcnt) << shift;
1523 len = min(remain, fcnt);
1524 if (!len)
1525 break;
1111 host->push_data(host, (void *)(buf + offset), len); 1526 host->push_data(host, (void *)(buf + offset), len);
1112
1113 offset += len;
1114 nbytes += len; 1527 nbytes += len;
1115 if (offset == sg->length) { 1528 offset += len;
1116 host->sg = sg = sg_next(sg); 1529 remain -= len;
1117 if (!sg) 1530 } while (remain);
1118 goto done; 1531 sg_miter->consumed = offset;
1119
1120 offset = 0;
1121 buf = sg_virt(sg);
1122 }
1123 } else {
1124 unsigned int remaining = sg->length - offset;
1125
1126 host->push_data(host, (void *)(buf + offset),
1127 remaining);
1128 nbytes += remaining;
1129
1130 host->sg = sg = sg_next(sg);
1131 if (!sg)
1132 goto done;
1133
1134 offset = len - remaining;
1135 buf = sg_virt(sg);
1136 host->push_data(host, (void *)buf, offset);
1137 nbytes += offset;
1138 }
1139 1532
1140 status = mci_readl(host, MINTSTS); 1533 status = mci_readl(host, MINTSTS);
1141 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1534 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1142 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1535 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1143 host->data_status = status; 1536 host->data_status = status;
1144 data->bytes_xfered += nbytes; 1537 data->bytes_xfered += nbytes;
1538 sg_miter_stop(sg_miter);
1539 host->sg = NULL;
1145 1540
1146 smp_wmb(); 1541 smp_wmb();
1147 1542
@@ -1151,14 +1546,20 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
1151 return; 1546 return;
1152 } 1547 }
1153 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1548 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1154
1155 host->pio_offset = offset;
1156 data->bytes_xfered += nbytes; 1549 data->bytes_xfered += nbytes;
1157 1550
1551 if (!remain) {
1552 if (!sg_miter_next(sg_miter))
1553 goto done;
1554 sg_miter->consumed = 0;
1555 }
1556 sg_miter_stop(sg_miter);
1158 return; 1557 return;
1159 1558
1160done: 1559done:
1161 data->bytes_xfered += nbytes; 1560 data->bytes_xfered += nbytes;
1561 sg_miter_stop(sg_miter);
1562 host->sg = NULL;
1162 smp_wmb(); 1563 smp_wmb();
1163 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1564 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1164} 1565}
@@ -1179,6 +1580,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1179 struct dw_mci *host = dev_id; 1580 struct dw_mci *host = dev_id;
1180 u32 status, pending; 1581 u32 status, pending;
1181 unsigned int pass_count = 0; 1582 unsigned int pass_count = 0;
1583 int i;
1182 1584
1183 do { 1585 do {
1184 status = mci_readl(host, RINTSTS); 1586 status = mci_readl(host, RINTSTS);
@@ -1202,7 +1604,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1202 host->cmd_status = status; 1604 host->cmd_status = status;
1203 smp_wmb(); 1605 smp_wmb();
1204 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1606 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1205 tasklet_schedule(&host->tasklet); 1607 if (!(pending & SDMMC_INT_RTO))
1608 tasklet_schedule(&host->tasklet);
1206 } 1609 }
1207 1610
1208 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 1611 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
@@ -1211,7 +1614,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1211 host->data_status = status; 1614 host->data_status = status;
1212 smp_wmb(); 1615 smp_wmb();
1213 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1616 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1214 tasklet_schedule(&host->tasklet); 1617 if (!(pending & SDMMC_INT_DTO))
1618 tasklet_schedule(&host->tasklet);
1215 } 1619 }
1216 1620
1217 if (pending & SDMMC_INT_DATA_OVER) { 1621 if (pending & SDMMC_INT_DATA_OVER) {
@@ -1223,6 +1627,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1223 if (host->sg != NULL) 1627 if (host->sg != NULL)
1224 dw_mci_read_data_pio(host); 1628 dw_mci_read_data_pio(host);
1225 } 1629 }
1630
1226 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 1631 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1227 tasklet_schedule(&host->tasklet); 1632 tasklet_schedule(&host->tasklet);
1228 } 1633 }
@@ -1249,6 +1654,15 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1249 tasklet_schedule(&host->card_tasklet); 1654 tasklet_schedule(&host->card_tasklet);
1250 } 1655 }
1251 1656
1657 /* Handle SDIO Interrupts */
1658 for (i = 0; i < host->num_slots; i++) {
1659 struct dw_mci_slot *slot = host->slot[i];
1660 if (pending & SDMMC_INT_SDIO(i)) {
1661 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1662 mmc_signal_sdio_irq(slot->mmc);
1663 }
1664 }
1665
1252 } while (pass_count++ < 5); 1666 } while (pass_count++ < 5);
1253 1667
1254#ifdef CONFIG_MMC_DW_IDMAC 1668#ifdef CONFIG_MMC_DW_IDMAC
@@ -1308,6 +1722,7 @@ static void dw_mci_tasklet_card(unsigned long data)
1308 break; 1722 break;
1309 case STATE_SENDING_CMD: 1723 case STATE_SENDING_CMD:
1310 mrq->cmd->error = -ENOMEDIUM; 1724 mrq->cmd->error = -ENOMEDIUM;
1725 host->prv_err = 1;
1311 if (!mrq->data) 1726 if (!mrq->data)
1312 break; 1727 break;
1313 /* fall through */ 1728 /* fall through */
@@ -1331,6 +1746,7 @@ static void dw_mci_tasklet_card(unsigned long data)
1331 } else { 1746 } else {
1332 list_del(&slot->queue_node); 1747 list_del(&slot->queue_node);
1333 mrq->cmd->error = -ENOMEDIUM; 1748 mrq->cmd->error = -ENOMEDIUM;
1749 host->prv_err = 1;
1334 if (mrq->data) 1750 if (mrq->data)
1335 mrq->data->error = -ENOMEDIUM; 1751 mrq->data->error = -ENOMEDIUM;
1336 if (mrq->stop) 1752 if (mrq->stop)
@@ -1353,6 +1769,7 @@ static void dw_mci_tasklet_card(unsigned long data)
1353 * block interrupt, hence setting the 1769 * block interrupt, hence setting the
1354 * scatter-gather pointer to NULL. 1770 * scatter-gather pointer to NULL.
1355 */ 1771 */
1772 sg_miter_stop(&host->sg_miter);
1356 host->sg = NULL; 1773 host->sg = NULL;
1357 1774
1358 ctrl = mci_readl(host, CTRL); 1775 ctrl = mci_readl(host, CTRL);
@@ -1376,6 +1793,34 @@ static void dw_mci_tasklet_card(unsigned long data)
1376 } 1793 }
1377} 1794}
1378 1795
1796static void dw_mci_notify_change(struct platform_device *dev, int state)
1797{
1798 struct dw_mci *host = platform_get_drvdata(dev);
1799 unsigned long flags;
1800
1801 if (host) {
1802 spin_lock_irqsave(&host->lock, flags);
1803 if (state) {
1804 dev_dbg(&dev->dev, "card inserted.\n");
1805 host->quirks |= DW_MCI_QUIRK_BROKEN_CARD_DETECTION;
1806 } else {
1807 dev_dbg(&dev->dev, "card removed.\n");
1808 host->quirks &= ~DW_MCI_QUIRK_BROKEN_CARD_DETECTION;
1809 }
1810 tasklet_schedule(&host->card_tasklet);
1811 spin_unlock_irqrestore(&host->lock, flags);
1812 }
1813}
1814
1815static irqreturn_t dw_mci_detect_interrupt(int irq, void *dev_id)
1816{
1817 struct dw_mci_slot *slot = dev_id;
1818
1819 tasklet_schedule(&slot->host->card_tasklet);
1820
1821 return IRQ_HANDLED;
1822}
1823
1379static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) 1824static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1380{ 1825{
1381 struct mmc_host *mmc; 1826 struct mmc_host *mmc;
@@ -1411,20 +1856,18 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1411 else 1856 else
1412 mmc->caps = 0; 1857 mmc->caps = 0;
1413 1858
1859 if (host->pdata->caps2)
1860 mmc->caps2 = host->pdata->caps2;
1861 else
1862 mmc->caps2 = 0;
1863
1414 if (host->pdata->get_bus_wd) 1864 if (host->pdata->get_bus_wd)
1415 if (host->pdata->get_bus_wd(slot->id) >= 4) 1865 if (host->pdata->get_bus_wd(slot->id) >= 4)
1416 mmc->caps |= MMC_CAP_4_BIT_DATA; 1866 mmc->caps |= MMC_CAP_4_BIT_DATA;
1417 1867
1418 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1868 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1419 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1869 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1420 1870
1421#ifdef CONFIG_MMC_DW_IDMAC
1422 mmc->max_segs = host->ring_size;
1423 mmc->max_blk_size = 65536;
1424 mmc->max_blk_count = host->ring_size;
1425 mmc->max_seg_size = 0x1000;
1426 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1427#else
1428 if (host->pdata->blk_settings) { 1871 if (host->pdata->blk_settings) {
1429 mmc->max_segs = host->pdata->blk_settings->max_segs; 1872 mmc->max_segs = host->pdata->blk_settings->max_segs;
1430 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; 1873 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
@@ -1432,14 +1875,21 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1432 mmc->max_req_size = host->pdata->blk_settings->max_req_size; 1875 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1433 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; 1876 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1434 } else { 1877 } else {
1878#ifdef CONFIG_MMC_DW_IDMAC
1879 mmc->max_segs = host->ring_size;
1880 mmc->max_blk_size = 65536;
1881 mmc->max_seg_size = 0x1000;
1882 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
1883 mmc->max_blk_count = mmc->max_req_size / 512;
1884#else
1435 /* Useful defaults if platform data is unset. */ 1885 /* Useful defaults if platform data is unset. */
1436 mmc->max_segs = 64; 1886 mmc->max_segs = 64;
1437 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 1887 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1438 mmc->max_blk_count = 512; 1888 mmc->max_blk_count = 512;
1439 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1889 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1440 mmc->max_seg_size = mmc->max_req_size; 1890 mmc->max_seg_size = mmc->max_req_size;
1441 }
1442#endif /* CONFIG_MMC_DW_IDMAC */ 1891#endif /* CONFIG_MMC_DW_IDMAC */
1892 }
1443 1893
1444 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 1894 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1445 if (IS_ERR(host->vmmc)) { 1895 if (IS_ERR(host->vmmc)) {
@@ -1448,6 +1898,8 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1448 } else 1898 } else
1449 regulator_enable(host->vmmc); 1899 regulator_enable(host->vmmc);
1450 1900
1901 host->pdata->init(id, dw_mci_detect_interrupt, host);
1902
1451 if (dw_mci_get_cd(mmc)) 1903 if (dw_mci_get_cd(mmc))
1452 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1904 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1453 else 1905 else
@@ -1486,8 +1938,13 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1486 1938
1487static void dw_mci_init_dma(struct dw_mci *host) 1939static void dw_mci_init_dma(struct dw_mci *host)
1488{ 1940{
1941 if (host->pdata->buf_size)
1942 host->buf_size = host->pdata->buf_size;
1943 else
1944 host->buf_size = PAGE_SIZE;
1945
1489 /* Alloc memory for sg translation */ 1946 /* Alloc memory for sg translation */
1490 host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE, 1947 host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, host->buf_size,
1491 &host->sg_dma, GFP_KERNEL); 1948 &host->sg_dma, GFP_KERNEL);
1492 if (!host->sg_cpu) { 1949 if (!host->sg_cpu) {
1493 dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n", 1950 dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
@@ -1588,6 +2045,28 @@ static int dw_mci_probe(struct platform_device *pdev)
1588 goto err_freehost; 2045 goto err_freehost;
1589 } 2046 }
1590 2047
2048 host->hclk = clk_get(&pdev->dev, pdata->hclk_name);
2049 if (IS_ERR(host->hclk)) {
2050 dev_err(&pdev->dev,
2051 "failed to get hclk\n");
2052 ret = PTR_ERR(host->hclk);
2053 goto err_freehost;
2054 }
2055 clk_enable(host->hclk);
2056
2057 host->cclk = clk_get(&pdev->dev, pdata->cclk_name);
2058 if (IS_ERR(host->cclk)) {
2059 dev_err(&pdev->dev,
2060 "failed to get cclk\n");
2061 ret = PTR_ERR(host->cclk);
2062 goto err_free_hclk;
2063 }
2064 clk_enable(host->cclk);
2065
2066 if ((soc_is_exynos4412() || soc_is_exynos4212())
2067 && (samsung_rev() < EXYNOS4412_REV_1_0))
2068 pdata->bus_hz = 66 * 1000 * 1000;
2069
1591 host->bus_hz = pdata->bus_hz; 2070 host->bus_hz = pdata->bus_hz;
1592 host->quirks = pdata->quirks; 2071 host->quirks = pdata->quirks;
1593 2072
@@ -1597,7 +2076,7 @@ static int dw_mci_probe(struct platform_device *pdev)
1597 ret = -ENOMEM; 2076 ret = -ENOMEM;
1598 host->regs = ioremap(regs->start, regs->end - regs->start + 1); 2077 host->regs = ioremap(regs->start, regs->end - regs->start + 1);
1599 if (!host->regs) 2078 if (!host->regs)
1600 goto err_freehost; 2079 goto err_free_cclk;
1601 2080
1602 host->dma_ops = pdata->dma_ops; 2081 host->dma_ops = pdata->dma_ops;
1603 dw_mci_init_dma(host); 2082 dw_mci_init_dma(host);
@@ -1645,8 +2124,19 @@ static int dw_mci_probe(struct platform_device *pdev)
1645 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 2124 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
1646 * Tx Mark = fifo_size / 2 DMA Size = 8 2125 * Tx Mark = fifo_size / 2 DMA Size = 8
1647 */ 2126 */
1648 fifo_size = mci_readl(host, FIFOTH); 2127 if (!host->pdata->fifo_depth) {
1649 fifo_size = (fifo_size >> 16) & 0x7ff; 2128 /*
2129 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2130 * have been overwritten by the bootloader, just like we're
2131 * about to do, so if you know the value for your hardware, you
2132 * should put it in the platform data.
2133 */
2134 fifo_size = mci_readl(host, FIFOTH);
2135 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2136 } else {
2137 fifo_size = host->pdata->fifo_depth;
2138 }
2139 host->fifo_depth = fifo_size;
1650 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | 2140 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
1651 ((fifo_size/2) << 0)); 2141 ((fifo_size/2) << 0));
1652 mci_writel(host, FIFOTH, host->fifoth_val); 2142 mci_writel(host, FIFOTH, host->fifoth_val);
@@ -1680,6 +2170,24 @@ static int dw_mci_probe(struct platform_device *pdev)
1680 } 2170 }
1681 2171
1682 /* 2172 /*
2173 * In 2.40a spec, Data offset is changed.
2174 * Need to check the version-id and set data-offset for DATA register.
2175 */
2176 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2177 dev_info(&pdev->dev, "Version ID is %04x\n", host->verid);
2178
2179 if (host->verid < DW_MMC_240A)
2180 host->data_offset = DATA_OFFSET;
2181 else
2182 host->data_offset = DATA_240A_OFFSET;
2183
2184 if (host->pdata->cd_type == DW_MCI_CD_EXTERNAL) {
2185 host->pdata->ext_cd_init(&dw_mci_notify_change);
2186// if (host->pdata->caps == MMC_CAP_UHS_SDR50 && samsung_rev() >= EXYNOS5250_REV_1_0)
2187// clk_set_rate(host->cclk, 200 * 100 * 100);
2188 }
2189
2190 /*
1683 * Enable interrupts for command done, data over, data empty, card det, 2191 * Enable interrupts for command done, data over, data empty, card det,
1684 * receive ready and error such as transmit, receive timeout, crc error 2192 * receive ready and error such as transmit, receive timeout, crc error
1685 */ 2193 */
@@ -1690,7 +2198,9 @@ static int dw_mci_probe(struct platform_device *pdev)
1690 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2198 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
1691 2199
1692 dev_info(&pdev->dev, "DW MMC controller at irq %d, " 2200 dev_info(&pdev->dev, "DW MMC controller at irq %d, "
1693 "%d bit host data width\n", irq, width); 2201 "%d bit host data width, "
2202 "%u deep fifo\n",
2203 irq, width, fifo_size);
1694 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2204 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
1695 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n"); 2205 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
1696 2206
@@ -1708,7 +2218,7 @@ err_init_slot:
1708err_dmaunmap: 2218err_dmaunmap:
1709 if (host->use_dma && host->dma_ops->exit) 2219 if (host->use_dma && host->dma_ops->exit)
1710 host->dma_ops->exit(host); 2220 host->dma_ops->exit(host);
1711 dma_free_coherent(&host->pdev->dev, PAGE_SIZE, 2221 dma_free_coherent(&host->pdev->dev, host->buf_size,
1712 host->sg_cpu, host->sg_dma); 2222 host->sg_cpu, host->sg_dma);
1713 iounmap(host->regs); 2223 iounmap(host->regs);
1714 2224
@@ -1717,6 +2227,13 @@ err_dmaunmap:
1717 regulator_put(host->vmmc); 2227 regulator_put(host->vmmc);
1718 } 2228 }
1719 2229
2230err_free_cclk:
2231 clk_disable(host->cclk);
2232 clk_put(host->cclk);
2233
2234err_free_hclk:
2235 clk_disable(host->hclk);
2236 clk_put(host->hclk);
1720 2237
1721err_freehost: 2238err_freehost:
1722 kfree(host); 2239 kfree(host);
@@ -1731,6 +2248,10 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
1731 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2248 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1732 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2249 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1733 2250
2251 if (host->pdata->cd_type == DW_MCI_CD_EXTERNAL) {
2252 host->pdata->ext_cd_cleanup(&dw_mci_notify_change);
2253 }
2254
1734 platform_set_drvdata(pdev, NULL); 2255 platform_set_drvdata(pdev, NULL);
1735 2256
1736 for (i = 0; i < host->num_slots; i++) { 2257 for (i = 0; i < host->num_slots; i++) {
@@ -1744,7 +2265,8 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
1744 mci_writel(host, CLKSRC, 0); 2265 mci_writel(host, CLKSRC, 0);
1745 2266
1746 free_irq(platform_get_irq(pdev, 0), host); 2267 free_irq(platform_get_irq(pdev, 0), host);
1747 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2268 dma_free_coherent(&pdev->dev, host->buf_size, host->sg_cpu,
2269 host->sg_dma);
1748 2270
1749 if (host->use_dma && host->dma_ops->exit) 2271 if (host->use_dma && host->dma_ops->exit)
1750 host->dma_ops->exit(host); 2272 host->dma_ops->exit(host);
@@ -1754,6 +2276,11 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
1754 regulator_put(host->vmmc); 2276 regulator_put(host->vmmc);
1755 } 2277 }
1756 2278
2279 clk_disable(host->cclk);
2280 clk_put(host->cclk);
2281 clk_disable(host->hclk);
2282 clk_put(host->hclk);
2283
1757 iounmap(host->regs); 2284 iounmap(host->regs);
1758 2285
1759 kfree(host); 2286 kfree(host);
@@ -1769,6 +2296,8 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1769 int i, ret; 2296 int i, ret;
1770 struct dw_mci *host = platform_get_drvdata(pdev); 2297 struct dw_mci *host = platform_get_drvdata(pdev);
1771 2298
2299 dw_mci_disable_card_detection(host);
2300
1772 for (i = 0; i < host->num_slots; i++) { 2301 for (i = 0; i < host->num_slots; i++) {
1773 struct dw_mci_slot *slot = host->slot[i]; 2302 struct dw_mci_slot *slot = host->slot[i];
1774 if (!slot) 2303 if (!slot)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 23c662af561..345e2d7b91f 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -14,6 +14,8 @@
14#ifndef _DW_MMC_H_ 14#ifndef _DW_MMC_H_
15#define _DW_MMC_H_ 15#define _DW_MMC_H_
16 16
17#define DW_MMC_240A 0x240a
18
17#define SDMMC_CTRL 0x000 19#define SDMMC_CTRL 0x000
18#define SDMMC_PWREN 0x004 20#define SDMMC_PWREN 0x004
19#define SDMMC_CLKDIV 0x008 21#define SDMMC_CLKDIV 0x008
@@ -51,7 +53,15 @@
51#define SDMMC_IDINTEN 0x090 53#define SDMMC_IDINTEN 0x090
52#define SDMMC_DSCADDR 0x094 54#define SDMMC_DSCADDR 0x094
53#define SDMMC_BUFADDR 0x098 55#define SDMMC_BUFADDR 0x098
54#define SDMMC_DATA 0x100 56#define SDMMC_CLKSEL 0x09c
57#define SDMMC_DATA(x) (x)
58
59/*
60 * Data offset is difference according to Version
61 * Lower than 2.40a : data register offest is 0x100
62 */
63#define DATA_OFFSET 0x100
64#define DATA_240A_OFFSET 0x200
55 65
56/* shift bit field */ 66/* shift bit field */
57#define _SBF(f, v) ((v) << (f)) 67#define _SBF(f, v) ((v) << (f))
@@ -82,7 +92,7 @@
82#define SDMMC_CTYPE_4BIT BIT(0) 92#define SDMMC_CTYPE_4BIT BIT(0)
83#define SDMMC_CTYPE_1BIT 0 93#define SDMMC_CTYPE_1BIT 0
84/* Interrupt status & mask register defines */ 94/* Interrupt status & mask register defines */
85#define SDMMC_INT_SDIO BIT(16) 95#define SDMMC_INT_SDIO(n) BIT(16 + (n))
86#define SDMMC_INT_EBE BIT(15) 96#define SDMMC_INT_EBE BIT(15)
87#define SDMMC_INT_ACD BIT(14) 97#define SDMMC_INT_ACD BIT(14)
88#define SDMMC_INT_SBE BIT(13) 98#define SDMMC_INT_SBE BIT(13)
@@ -102,6 +112,7 @@
102#define SDMMC_INT_ERROR 0xbfc2 112#define SDMMC_INT_ERROR 0xbfc2
103/* Command register defines */ 113/* Command register defines */
104#define SDMMC_CMD_START BIT(31) 114#define SDMMC_CMD_START BIT(31)
115#define SDMMC_USE_HOLD_REG BIT(29)
105#define SDMMC_CMD_CCS_EXP BIT(23) 116#define SDMMC_CMD_CCS_EXP BIT(23)
106#define SDMMC_CMD_CEATA_RD BIT(22) 117#define SDMMC_CMD_CEATA_RD BIT(22)
107#define SDMMC_CMD_UPD_CLK BIT(21) 118#define SDMMC_CMD_UPD_CLK BIT(21)
@@ -117,8 +128,7 @@
117#define SDMMC_CMD_RESP_EXP BIT(6) 128#define SDMMC_CMD_RESP_EXP BIT(6)
118#define SDMMC_CMD_INDX(n) ((n) & 0x1F) 129#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
119/* Status register defines */ 130/* Status register defines */
120#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF) 131#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF)
121#define SDMMC_FIFO_SZ 32
122/* Internal DMAC interrupt defines */ 132/* Internal DMAC interrupt defines */
123#define SDMMC_IDMAC_INT_AI BIT(9) 133#define SDMMC_IDMAC_INT_AI BIT(9)
124#define SDMMC_IDMAC_INT_NI BIT(8) 134#define SDMMC_IDMAC_INT_NI BIT(8)
@@ -131,6 +141,8 @@
131#define SDMMC_IDMAC_ENABLE BIT(7) 141#define SDMMC_IDMAC_ENABLE BIT(7)
132#define SDMMC_IDMAC_FB BIT(1) 142#define SDMMC_IDMAC_FB BIT(1)
133#define SDMMC_IDMAC_SWRESET BIT(0) 143#define SDMMC_IDMAC_SWRESET BIT(0)
144/* Version ID register define */
145#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
134 146
135/* Register access macros */ 147/* Register access macros */
136#define mci_readl(dev, reg) \ 148#define mci_readl(dev, reg) \
diff --git a/drivers/mmc/host/mshci-s3c-dma.c b/drivers/mmc/host/mshci-s3c-dma.c
new file mode 100644
index 00000000000..d62f544a3fb
--- /dev/null
+++ b/drivers/mmc/host/mshci-s3c-dma.c
@@ -0,0 +1,220 @@
1/*
2* linux/drivers/mmc/host/mshci-s3c-dma.c
3* Mobile Storage Host Controller Interface driver
4*
5* Copyright (c) 2011 Samsung Electronics Co., Ltd.
6* http://www.samsung.com
7*
8
9* This program is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License as published by
11* the Free Software Foundation; either version 2 of the License, or (at
12* your option) any later version.
13*
14*/
15#include <linux/module.h>
16#include <linux/mm.h>
17#include <linux/gfp.h>
18#include <linux/errno.h>
19#include <linux/list.h>
20#include <linux/init.h>
21#include <linux/device.h>
22#include <linux/dma-mapping.h>
23#include <linux/highmem.h>
24
25#include <asm/memory.h>
26#include <asm/highmem.h>
27#include <asm/cacheflush.h>
28#include <asm/tlbflush.h>
29#include <asm/sizes.h>
30
31#include <linux/mmc/host.h>
32
33#include "mshci.h"
34
35
36static void mshci_s3c_dma_cache_maint_page(struct page *page,
37 unsigned long offset, size_t size, enum dma_data_direction dir,
38 void (*op)(const void *, size_t, int), int flush_type, int enable)
39{
40 /*
41 * A single sg entry may refer to multiple physically contiguous
42 * pages. But we still need to process highmem pages individually.
43 * If highmem is not configured then the bulk of this loop gets
44 * optimized out.
45 */
46 size_t left = size;
47 do {
48 size_t len = left;
49 void *vaddr;
50
51 if (PageHighMem(page)) {
52 if (len + offset > PAGE_SIZE) {
53 if (offset >= PAGE_SIZE) {
54 page += offset / PAGE_SIZE;
55 offset %= PAGE_SIZE;
56 }
57 len = PAGE_SIZE - offset;
58 }
59 vaddr = kmap_high_get(page);
60 if (vaddr) {
61 vaddr += offset;
62 if (flush_type == 0 && enable)
63 op(vaddr, len, dir);
64 kunmap_high(page);
65 } else if (cache_is_vipt()) {
66 /* unmapped pages might still be cached */
67 vaddr = kmap_atomic(page);
68 op(vaddr + offset, len, dir);
69 kunmap_atomic(vaddr);
70 }
71 } else {
72 vaddr = page_address(page) + offset;
73 if (flush_type == 0 && enable)
74 op(vaddr, len, dir);
75 }
76 offset = 0;
77 page++;
78 left -= len;
79
80 } while (left);
81}
82
83
84void mshci_s3c_dma_page_cpu_to_dev(struct page *page, unsigned long off,
85 size_t size, enum dma_data_direction dir, int flush_type)
86{
87 unsigned long paddr;
88
89 if (dir != DMA_FROM_DEVICE) {
90 mshci_s3c_dma_cache_maint_page(page, off, size, dir,
91 dmac_map_area,
92 flush_type, 1);
93
94 paddr = page_to_phys(page) + off;
95 if (flush_type != 2) {
96 outer_clean_range(paddr, paddr + size);
97 }
98 /* FIXME: non-speculating: flush on bidirectional mappings? */
99 } else {
100 paddr = page_to_phys(page) + off;
101
102 /* if flush all L1 cache,
103 L2 cache dose not neet to be clean.
104 because, all buffer dose not have split space */
105 if (flush_type != 2) {
106 outer_clean_range(paddr, paddr + size);
107 outer_inv_range(paddr, paddr + size);
108 }
109 /* FIXME: non-speculating: flush on bidirectional mappings? */
110
111 mshci_s3c_dma_cache_maint_page(page, off, size, dir,
112 dmac_unmap_area,
113 flush_type, 1);
114 }
115}
116
117
118static inline dma_addr_t mshci_s3c_dma_map_page(struct device *dev,
119 struct page *page, unsigned long offset, size_t size,
120 enum dma_data_direction dir, int flush_type)
121{
122 BUG_ON(!valid_dma_direction(dir));
123
124 mshci_s3c_dma_page_cpu_to_dev(page, offset, size, dir, flush_type);
125
126 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
127}
128
129int mshci_s3c_dma_map_sg(struct mshci_host *host, struct device *dev,
130 struct scatterlist *sg, int nents, enum dma_data_direction dir,
131 int flush_type)
132{
133 struct scatterlist *s;
134 int i, j;
135
136 BUG_ON(!valid_dma_direction(dir));
137
138 if (flush_type == 2) {
139 spin_unlock_irqrestore(&host->lock, host->sl_flags);
140 flush_all_cpu_caches();
141 outer_flush_all();
142 spin_lock_irqsave(&host->lock, host->sl_flags);
143 } else if(flush_type == 1) {
144 spin_unlock_irqrestore(&host->lock, host->sl_flags);
145 flush_all_cpu_caches();
146 spin_lock_irqsave(&host->lock, host->sl_flags);
147 }
148
149 for_each_sg(sg, s, nents, i) {
150 s->dma_address = mshci_s3c_dma_map_page(dev, sg_page(s),
151 s->offset, s->length, dir, flush_type);
152 if (dma_mapping_error(dev, s->dma_address)) {
153 goto bad_mapping;
154 }
155 }
156
157 debug_dma_map_sg(dev, sg, nents, nents, dir);
158
159 /* in case of invaldating cache, invaldating L2 cache
160 must be done prior to invaldating L1 cache */
161#if 0
162 if (dir == DMA_FROM_DEVICE) {
163 if (flush_type == 1) {
164 spin_unlock_irqrestore(&host->lock, host->sl_flags);
165 flush_all_cpu_caches();
166 spin_lock_irqsave(&host->lock, host->sl_flags);
167 }
168 }
169#endif
170 return nents;
171
172 bad_mapping:
173 for_each_sg(sg, s, i, j)
174 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
175 return 0;
176}
177
178void mshci_s3c_dma_page_dev_to_cpu(struct page *page, unsigned long off,
179 size_t size, enum dma_data_direction dir, int flush_type)
180{
181
182 unsigned long paddr = page_to_phys(page) + off;
183
184 /* FIXME: non-speculating: not required */
185 /* don't bother invalidating if DMA to device */
186
187 mshci_s3c_dma_cache_maint_page(page, off, size, dir, dmac_unmap_area,
188 flush_type, 0);
189}
190
191
192static inline void mshci_s3c_dma_unmap_page(struct device *dev,
193 dma_addr_t handle, size_t size,
194 enum dma_data_direction dir, int flush_type)
195{
196 mshci_s3c_dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), \
197 handle & ~PAGE_MASK, size, dir, flush_type);
198}
199
200
201void mshci_s3c_dma_unmap_sg(struct mshci_host *host,
202 struct device *dev, struct scatterlist *sg,
203 int nents, enum dma_data_direction dir, int flush_type)
204{
205#if 1
206 struct scatterlist *s;
207 int i;
208
209 if (dir == DMA_TO_DEVICE)
210 for_each_sg(sg, s, nents, i)
211 mshci_s3c_dma_unmap_page(dev, sg_dma_address(s),
212 sg_dma_len(s),dir, flush_type);
213#endif
214}
215
216MODULE_DESCRIPTION("Samsung MSHCI (HSMMC) own dma map functions");
217MODULE_AUTHOR("Hyunsung Jang, <hs79.jang@samsung.com>");
218MODULE_LICENSE("GPL v2");
219MODULE_ALIAS("platform:s3c-mshci");
220
diff --git a/drivers/mmc/host/mshci-s3c.c b/drivers/mmc/host/mshci-s3c.c
new file mode 100644
index 00000000000..323f11595fa
--- /dev/null
+++ b/drivers/mmc/host/mshci-s3c.c
@@ -0,0 +1,631 @@
1/*
2* linux/drivers/mmc/host/mshci-s3c.c
3* Mobile Storage Host Controller Interface driver
4*
5* Copyright (c) 2011 Samsung Electronics Co., Ltd.
6* http://www.samsung.com
7*
8* Based on linux/drivers/mmc/host/sdhci-s3c.c
9*
10* This program is free software; you can redistribute it and/or modify
11* it under the terms of the GNU General Public License as published by
12* the Free Software Foundation; either version 2 of the License, or (at
13* your option) any later version.
14*
15*/
16
17#include <linux/delay.h>
18#include <linux/dma-mapping.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/gpio.h>
24
25#include <linux/mmc/host.h>
26
27#include <plat/gpio-cfg.h>
28#include <plat/mshci.h>
29#include <plat/clock.h>
30#include <plat/cpu.h>
31
32#include "mshci.h"
33
34#ifdef CONFIG_MMC_MSHCI_S3C_DMA_MAP
35int mshci_s3c_dma_map_sg(struct mshci_host *host, struct device *dev,
36 struct scatterlist *sg, int nents,
37 enum dma_data_direction dir, int flush_type);
38
39void mshci_s3c_dma_unmap_sg(struct mshci_host *host, struct device *dev,
40 struct scatterlist *sg, int nents,
41 enum dma_data_direction dir, int flush_type);
42#endif
43
44#define MAX_BUS_CLK (1)
45
46/**
47 * struct mshci_s3c - S3C MSHCI instance
48 * @host: The MSHCI host created
49 * @pdev: The platform device we where created from.
50 * @ioarea: The resource created when we claimed the IO area.
51 * @pdata: The platform data for this controller.
52 * @cur_clk: The index of the current bus clock.
53 * @clk_io: The clock for the internal bus interface.
54 * @clk_bus: The clocks that are available for the SD/MMC bus clock.
55 */
56struct mshci_s3c {
57 struct mshci_host *host;
58 struct platform_device *pdev;
59 struct resource *ioarea;
60 struct s3c_mshci_platdata *pdata;
61 unsigned int cur_clk;
62 int ext_cd_irq;
63 int ext_cd_gpio;
64
65 struct clk *clk_io;
66 struct clk *clk_bus[MAX_BUS_CLK];
67};
68
69static inline struct mshci_s3c *to_s3c(struct mshci_host *host)
70{
71 return mshci_priv(host);
72}
73
74/**
75 * mshci_s3c_get_max_clk - callback to get maximum clock frequency.
76 * @host: The MSHCI host instance.
77 *
78 * Callback to return the maximum clock rate acheivable by the controller.
79*/
80static unsigned int mshci_s3c_get_max_clk(struct mshci_host *host)
81{
82 struct mshci_s3c *ourhost = to_s3c(host);
83 struct clk *busclk;
84 unsigned int rate, max;
85 int clk;
86
87 for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) {
88 busclk = ourhost->clk_bus[clk];
89 if (!busclk)
90 continue;
91
92 rate = clk_get_rate(busclk);
93 /* It should be checked later ############# */
94 if (rate > max) {
95 if ((soc_is_exynos4412() || soc_is_exynos4212()) &&
96 (samsung_rev() >= EXYNOS4412_REV_1_0))
97 max = rate >> 2;
98 else
99 max = rate >> 1;
100 }
101 }
102
103 /* max clock can be change after changing clock source. */
104 host->mmc->f_max = max;
105 return max;
106}
107
108/**
109 * mshci_s3c_consider_clock - consider one the bus clocks for current setting
110 * @ourhost: Our MSHCI instance.
111 * @src: The source clock index.
112 * @wanted: The clock frequency wanted.
113 */
114static unsigned int mshci_s3c_consider_clock(struct mshci_s3c *ourhost,
115 unsigned int src,
116 unsigned int wanted)
117{
118 unsigned long rate;
119 struct clk *clksrc = ourhost->clk_bus[src];
120 int div;
121
122 if (!clksrc)
123 return UINT_MAX;
124
125 rate = clk_get_rate(clksrc);
126
127 for (div = 1; div < 256; div *= 2) {
128 if ((rate / div) <= wanted)
129 break;
130 }
131
132 dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
133 src, rate, wanted, rate / div);
134
135 return wanted - (rate / div);
136}
137
138/**
139 * mshci_s3c_set_clock - callback on clock change
140 * @host: The MSHCI host being changed
141 * @clock: The clock rate being requested.
142 *
143 * When the card's clock is going to be changed, look at the new frequency
144 * and find the best clock source to go with it.
145*/
146static void mshci_s3c_set_clock(struct mshci_host *host, unsigned int clock)
147{
148 struct mshci_s3c *ourhost = to_s3c(host);
149 unsigned int best = UINT_MAX;
150 unsigned int delta;
151 int best_src = 0;
152 int src;
153
154 /* don't bother if the clock is going off. */
155 if (clock == 0)
156 return;
157
158 for (src = 0; src < MAX_BUS_CLK; src++) {
159 delta = mshci_s3c_consider_clock(ourhost, src, clock);
160 if (delta < best) {
161 best = delta;
162 best_src = src;
163 }
164 }
165
166 dev_dbg(&ourhost->pdev->dev,
167 "selected source %d, clock %d, delta %d\n",
168 best_src, clock, best);
169
170 /* select the new clock source */
171
172 if (ourhost->cur_clk != best_src) {
173 struct clk *clk = ourhost->clk_bus[best_src];
174
175 ourhost->cur_clk = best_src;
176 host->max_clk = clk_get_rate(clk);
177 }
178
179 /* reconfigure the hardware for new clock rate */
180
181 {
182 struct mmc_ios ios;
183
184 ios.clock = clock;
185
186 if (ourhost->pdata->cfg_card)
187 (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
188 &ios, NULL);
189 }
190}
191
192/**
193 * mshci_s3c_get_ro - callback for get_ro
194 * @host: The MSHCI host being changed
195 *
196 * If the WP pin is connected with GPIO, can get the value which indicates
197 * the card is locked or not.
198*/
199static int mshci_s3c_get_ro(struct mmc_host *mmc)
200{
201 struct mshci_s3c *ourhost = to_s3c(mmc_priv(mmc));
202
203 return gpio_get_value(ourhost->pdata->wp_gpio);
204}
205
206/**
207 * mshci_s3c_cfg_wp - configure GPIO for WP pin
208 * @gpio_num: GPIO number which connected with WP line from SD/MMC slot
209 *
210 * Configure GPIO for using WP line
211 */
212static void mshci_s3c_cfg_wp(unsigned int gpio_num)
213{
214 s3c_gpio_cfgpin(gpio_num, S3C_GPIO_INPUT);
215 s3c_gpio_setpull(gpio_num, S3C_GPIO_PULL_UP);
216}
217
218static void mshci_s3c_set_ios(struct mshci_host *host,
219 struct mmc_ios *ios)
220{
221 struct mshci_s3c *ourhost = to_s3c(host);
222 struct s3c_mshci_platdata *pdata = ourhost->pdata;
223 int width;
224
225 if (ios->power_mode != MMC_POWER_OFF) {
226 switch (ios->bus_width) {
227 case MMC_BUS_WIDTH_8:
228 width = 8;
229 break;
230 case MMC_BUS_WIDTH_4:
231 width = 4;
232 break;
233 case MMC_BUS_WIDTH_1:
234 width = 1;
235 break;
236 default:
237 BUG();
238 }
239
240 if (pdata->cfg_gpio)
241 pdata->cfg_gpio(ourhost->pdev, width);
242 }
243
244 if (pdata->cfg_card)
245 pdata->cfg_card(ourhost->pdev, host->ioaddr,
246 ios, host->mmc->card);
247
248 if (pdata->cfg_ddr) {
249 if (ios->timing == MMC_TIMING_UHS_DDR50)
250 pdata->cfg_ddr(ourhost->pdev, 1);
251 else
252 pdata->cfg_ddr(ourhost->pdev, 0);
253 }
254 /* after change DDR/SDR, max_clk has been changed.
255 You should re-calc the max_clk */
256 host->max_clk = mshci_s3c_get_max_clk(host);
257}
258
259/**
260 * mshci_s3c_init_card - Reset eMMC device
261 *
262 * init eMMC_card.
263 */
264
265static void mshci_s3c_init_card(struct mshci_host *host)
266{
267 struct mshci_s3c *ourhost = to_s3c(host);
268 struct s3c_mshci_platdata *pdata = ourhost->pdata;
269
270 if (pdata->init_card)
271 pdata->init_card(ourhost->pdev);
272}
273
274static int mshci_s3c_get_fifo_depth(struct mshci_host *host)
275{
276 struct mshci_s3c *ourhost = to_s3c(host);
277 struct s3c_mshci_platdata *pdata = ourhost->pdata;
278
279 return pdata->fifo_depth;
280}
281
282
283static struct mshci_ops mshci_s3c_ops = {
284 .get_max_clock = mshci_s3c_get_max_clk,
285 .set_clock = mshci_s3c_set_clock,
286 .set_ios = mshci_s3c_set_ios,
287 .init_card = mshci_s3c_init_card,
288#ifdef CONFIG_MMC_MSHCI_S3C_DMA_MAP
289 .dma_map_sg = mshci_s3c_dma_map_sg,
290 .dma_unmap_sg = mshci_s3c_dma_unmap_sg,
291#endif
292 .get_fifo_depth = mshci_s3c_get_fifo_depth,
293};
294
295static void mshci_s3c_notify_change(struct platform_device *dev, int state)
296{
297 struct mshci_host *host;
298 unsigned long flags;
299
300 local_irq_save(flags);
301 host = platform_get_drvdata(dev);
302 if (host) {
303 if (state) {
304 dev_dbg(&dev->dev, "card inserted.\n");
305 host->flags &= ~MSHCI_DEVICE_DEAD;
306 tasklet_schedule(&host->card_tasklet);
307 } else {
308 dev_dbg(&dev->dev, "card removed.\n");
309 host->flags |= MSHCI_DEVICE_DEAD;
310 tasklet_schedule(&host->card_tasklet);
311 }
312 }
313 local_irq_restore(flags);
314}
315
316static irqreturn_t mshci_s3c_gpio_card_detect_isr(int irq, void *dev_id)
317{
318 struct mshci_s3c *sc = dev_id;
319 int status = gpio_get_value(sc->ext_cd_gpio);
320 if (sc->pdata->ext_cd_gpio_invert)
321 status = !status;
322 mshci_s3c_notify_change(sc->pdev, status);
323 return IRQ_HANDLED;
324}
325
326
327static int __devinit mshci_s3c_probe(struct platform_device *pdev)
328{
329 struct s3c_mshci_platdata *pdata = pdev->dev.platform_data;
330 struct device *dev = &pdev->dev;
331 struct mshci_host *host;
332 struct mshci_s3c *sc;
333 struct resource *res;
334 int ret, irq, ptr, clks;
335
336 if (!pdata) {
337 dev_err(dev, "no device data specified\n");
338 return -ENOENT;
339 }
340
341 irq = platform_get_irq(pdev, 0);
342 if (irq < 0) {
343 dev_err(dev, "no irq specified\n");
344 return irq;
345 }
346 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
347 if (!res) {
348 dev_err(dev, "no memory specified\n");
349 return -ENOENT;
350 }
351 host = mshci_alloc_host(dev, sizeof(struct mshci_s3c));
352 if (IS_ERR(host)) {
353 dev_err(dev, "mshci_alloc_host() failed\n");
354 return PTR_ERR(host);
355 }
356 sc = mshci_priv(host);
357
358 if (soc_is_exynos4210()) {
359 host->data_addr = 0x0;
360 host->hold_bit = 0;
361 } else {
362 host->data_addr = 0x100;
363 host->hold_bit = CMD_USE_HOLD_REG;
364 }
365
366 sc->host = host;
367 sc->pdev = pdev;
368 sc->pdata = pdata;
369 sc->ext_cd_gpio = -1;
370
371 platform_set_drvdata(pdev, host);
372
373 sc->clk_io = clk_get(dev, "dwmci");
374 if (IS_ERR(sc->clk_io)) {
375 dev_err(dev, "failed to get io clock\n");
376 ret = PTR_ERR(sc->clk_io);
377 goto err_io_clk;
378 }
379
380 /* enable the local io clock and keep it running for the moment. */
381 clk_enable(sc->clk_io);
382
383 for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
384 struct clk *clk;
385 char *name = pdata->clocks[ptr];
386
387 if (name == NULL)
388 continue;
389 clk = clk_get(dev, name);
390 if (IS_ERR(clk)) {
391 dev_err(dev, "failed to get clock %s\n", name);
392 continue;
393 }
394
395#if defined(CONFIG_EXYNOS4_MSHC_VPLL_46MHZ) || \
396 defined(CONFIG_EXYNOS4_MSHC_EPLL_45MHZ)
397 if (!strcmp("sclk_dwmci", name)) {
398 struct clk *parent_clk;
399
400 parent_clk = clk_get_parent(clk);
401
402 if (!parent_clk) {
403 dev_err(dev, "failed to get parent clock %s\n"
404 , (char *)(clk->name));
405 } else {
406 for ( ; ; ) {
407 parent_clk = clk_get_parent(parent_clk);
408 if (parent_clk) {
409#ifdef CONFIG_EXYNOS4_MSHC_EPLL_45MHZ
410 if (!strcmp("fout_epll", \
411 parent_clk->name)) {
412 clk_set_rate \
413 (parent_clk, 180633600);
414 pdata->cfg_ddr(pdev, 0);
415#elif defined(CONFIG_EXYNOS4_MSHC_VPLL_46MHZ)
416 if (!strcmp("fout_vpll", \
417 parent_clk->name)) {
418 clk_set_rate \
419 (parent_clk, 370882812);
420 pdata->cfg_ddr(pdev, 0);
421#endif
422 clk_enable(parent_clk);
423 break;
424 } else
425 continue;
426 } else {
427 dev_err(dev, "failed to"
428 "get parent"
429 "clock %s\n"
430 , clk->name);
431 break;
432 }
433 }
434 }
435 }
436#endif
437 clks++;
438 sc->clk_bus[ptr] = clk;
439 clk_enable(clk);
440
441 dev_info(dev, "clock source %d: %s (%ld Hz)\n",
442 ptr, name, clk_get_rate(clk));
443 }
444
445 if (clks == 0) {
446 dev_err(dev, "failed to find any bus clocks\n");
447 ret = -ENOENT;
448 goto err_no_busclks;
449 }
450
451 sc->ioarea = request_mem_region(res->start, resource_size(res),
452 mmc_hostname(host->mmc));
453 if (!sc->ioarea) {
454 dev_err(dev, "failed to reserve register area\n");
455 ret = -ENXIO;
456 goto err_req_regs;
457 }
458
459 host->ioaddr = ioremap_nocache(res->start, resource_size(res));
460 if (!host->ioaddr) {
461 dev_err(dev, "failed to map registers\n");
462 ret = -ENXIO;
463 goto err_req_regs;
464 }
465
466 /* Ensure we have minimal gpio selected CMD/CLK/Detect */
467 if (pdata->cfg_gpio)
468 pdata->cfg_gpio(pdev, pdata->max_width);
469 else
470 dev_err(dev, "cfg_gpio dose not exist.!\n");
471
472 host->hw_name = "samsung-mshci";
473 host->ops = &mshci_s3c_ops;
474 host->quirks = 0;
475 host->irq = irq;
476
477 if (pdata->host_caps)
478 host->mmc->caps = pdata->host_caps;
479 else
480 host->mmc->caps = 0;
481
482 if (pdata->host_caps2)
483 host->mmc->caps2 = pdata->host_caps2;
484 else
485 host->mmc->caps2 = 0;
486
487 if (pdata->cd_type == S3C_MSHCI_CD_PERMANENT) {
488 host->quirks |= MSHCI_QUIRK_BROKEN_PRESENT_BIT;
489 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
490 }
491
492 /* IF SD controller's WP pin donsn't connected with SD card and there
493 * is an allocated GPIO for getting WP data form SD card,
494 * use this quirk and send the GPIO number in pdata->wp_gpio. */
495 if (pdata->has_wp_gpio && gpio_is_valid(pdata->wp_gpio)) {
496 mshci_s3c_ops.get_ro = mshci_s3c_get_ro;
497 host->quirks |= MSHCI_QUIRK_NO_WP_BIT;
498 mshci_s3c_cfg_wp(pdata->wp_gpio);
499 }
500
501 ret = mshci_add_host(host);
502
503 if (pdata->cd_type == S3C_MSHCI_CD_GPIO &&
504 gpio_is_valid(pdata->ext_cd_gpio)) {
505
506 ret = gpio_request(pdata->ext_cd_gpio, "MSHCI EXT CD");
507 if (ret) {
508 dev_err(&pdev->dev, "cannot request gpio for card detect\n");
509 goto err_add_host;
510 }
511
512 sc->ext_cd_gpio = pdata->ext_cd_gpio;
513
514 sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio);
515 if (sc->ext_cd_irq &&
516 request_irq(sc->ext_cd_irq,
517 mshci_s3c_gpio_card_detect_isr,
518 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
519 dev_name(&pdev->dev), sc)) {
520 dev_err(&pdev->dev, "cannot request irq for card detect\n");
521 sc->ext_cd_irq = 0;
522 }
523 dev_dbg(&pdev->dev, "mshci detects a card insertion/removal"
524 "by EINT\n");
525 }
526
527 if (ret) {
528 dev_err(dev, "mshci_add_host() failed\n");
529 goto err_add_host;
530 }
531
532 device_enable_async_suspend(dev);
533
534 return 0;
535
536 err_add_host:
537 release_resource(sc->ioarea);
538 kfree(sc->ioarea);
539
540 err_req_regs:
541 for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
542 clk_disable(sc->clk_bus[ptr]);
543 clk_put(sc->clk_bus[ptr]);
544 }
545
546 err_no_busclks:
547 clk_disable(sc->clk_io);
548 clk_put(sc->clk_io);
549
550 err_io_clk:
551 mshci_free_host(host);
552 return ret;
553}
554
555static int __devexit mshci_s3c_remove(struct platform_device *pdev)
556{
557 return 0;
558}
559
560#ifdef CONFIG_PM
561
562static int mshci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
563{
564 struct mshci_host *host = platform_get_drvdata(dev);
565 struct s3c_mshci_platdata *pdata = dev->dev.platform_data;
566
567 mshci_suspend_host(host, pm);
568
569 if (pdata->set_power)
570 pdata->set_power(dev, 0);
571
572 return 0;
573}
574
575static int mshci_s3c_resume(struct platform_device *dev)
576{
577 struct mshci_host *host = platform_get_drvdata(dev);
578 struct s3c_mshci_platdata *pdata = dev->dev.platform_data;
579
580 if (pdata->set_power)
581 pdata->set_power(dev, 1);
582
583 mshci_resume_host(host);
584 return 0;
585}
586
587static void mshci_s3c_shutdown(struct platform_device *dev, pm_message_t pm)
588{
589 struct mshci_host *host = platform_get_drvdata(dev);
590 struct s3c_mshci_platdata *pdata = dev->dev.platform_data;
591
592 mshci_suspend_host(host, pm);
593
594 if (pdata->shutdown)
595 pdata->shutdown();
596}
597
598
599#else
600#define mshci_s3c_suspend NULL
601#define mshci_s3c_resume NULL
602#endif
603
604static struct platform_driver mshci_s3c_driver = {
605 .probe = mshci_s3c_probe,
606 .remove = __devexit_p(mshci_s3c_remove),
607 .suspend = mshci_s3c_suspend,
608 .resume = mshci_s3c_resume,
609 .driver = {
610 .owner = THIS_MODULE,
611 .name = "dw_mmc",
612 },
613};
614
615static int __init mshci_s3c_init(void)
616{
617 return platform_driver_register(&mshci_s3c_driver);
618}
619
620static void __exit mshci_s3c_exit(void)
621{
622 platform_driver_unregister(&mshci_s3c_driver);
623}
624
625module_init(mshci_s3c_init);
626module_exit(mshci_s3c_exit);
627
628MODULE_DESCRIPTION("Samsung MSHCI (HSMMC) glue");
629MODULE_AUTHOR("Hyunsung Jang, <hs79.jang@samsung.com>");
630MODULE_LICENSE("GPL v2");
631MODULE_ALIAS("platform:dw_mmc");
diff --git a/drivers/mmc/host/mshci.c b/drivers/mmc/host/mshci.c
new file mode 100644
index 00000000000..a6a1fbcc139
--- /dev/null
+++ b/drivers/mmc/host/mshci.c
@@ -0,0 +1,2248 @@
1/*
2* linux/drivers/mmc/host/mshci.c
3* Mobile Storage Host Controller Interface driver
4*
5* Copyright (c) 2011 Samsung Electronics Co., Ltd.
6* http://www.samsung.com
7*
8* Based on linux/drivers/mmc/host/sdhci.c
9*
10* This program is free software; you can redistribute it and/or modify
11* it under the terms of the GNU General Public License as published by
12* the Free Software Foundation; either version 2 of the License, or (at
13* your option) any later version.
14*
15*/
16
17#include <linux/delay.h>
18#include <linux/highmem.h>
19#include <linux/io.h>
20#include <linux/dma-mapping.h>
21#include <linux/slab.h>
22#include <linux/scatterlist.h>
23
24#include <linux/leds.h>
25
26#include <linux/mmc/host.h>
27
28#include <plat/cpu.h>
29
30#include "mshci.h"
31
32#define DRIVER_NAME "mshci"
33
34#define DBG(f, x...) \
35 pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
36
37#define SDHC_CLK_ON 1
38#define SDHC_CLK_OFF 0
39
40static unsigned int debug_quirks;
41
42static void mshci_prepare_data(struct mshci_host *, struct mmc_data *);
43static void mshci_finish_data(struct mshci_host *);
44
45static void mshci_send_command(struct mshci_host *, struct mmc_command *);
46static void mshci_finish_command(struct mshci_host *);
47static void mshci_fifo_init(struct mshci_host *host);
48
49static void mshci_set_clock(struct mshci_host *host,
50 unsigned int clock, u32 bus_width);
51
52#define MSHCI_MAX_DMA_SINGLE_TRANS_SIZE (0x1000)
53#define MSHCI_MAX_DMA_TRANS_SIZE (0x400000)
54#define MSHCI_MAX_DMA_LIST (MSHCI_MAX_DMA_TRANS_SIZE / \
55 MSHCI_MAX_DMA_SINGLE_TRANS_SIZE)
56
57static void mshci_dumpregs(struct mshci_host *host)
58{
59 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
60 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CTRL: 0x%08x\n",
61 mshci_readl(host, MSHCI_CTRL));
62 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_PWREN: 0x%08x\n",
63 mshci_readl(host, MSHCI_PWREN));
64 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CLKDIV: 0x%08x\n",
65 mshci_readl(host, MSHCI_CLKDIV));
66 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CLKSRC: 0x%08x\n",
67 mshci_readl(host, MSHCI_CLKSRC));
68 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CLKENA: 0x%08x\n",
69 mshci_readl(host, MSHCI_CLKENA));
70 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_TMOUT: 0x%08x\n",
71 mshci_readl(host, MSHCI_TMOUT));
72 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CTYPE: 0x%08x\n",
73 mshci_readl(host, MSHCI_CTYPE));
74 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_BLKSIZ: 0x%08x\n",
75 mshci_readl(host, MSHCI_BLKSIZ));
76 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_BYTCNT: 0x%08x\n",
77 mshci_readl(host, MSHCI_BYTCNT));
78 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_INTMSK: 0x%08x\n",
79 mshci_readl(host, MSHCI_INTMSK));
80 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CMDARG: 0x%08x\n",
81 mshci_readl(host, MSHCI_CMDARG));
82 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CMD: 0x%08x\n",
83 mshci_readl(host, MSHCI_CMD));
84 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_MINTSTS: 0x%08x\n",
85 mshci_readl(host, MSHCI_MINTSTS));
86 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_RINTSTS: 0x%08x\n",
87 mshci_readl(host, MSHCI_RINTSTS));
88 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_STATUS: 0x%08x\n",
89 mshci_readl(host, MSHCI_STATUS));
90 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_FIFOTH: 0x%08x\n",
91 mshci_readl(host, MSHCI_FIFOTH));
92 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CDETECT: 0x%08x\n",
93 mshci_readl(host, MSHCI_CDETECT));
94 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_WRTPRT: 0x%08x\n",
95 mshci_readl(host, MSHCI_WRTPRT));
96 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_GPIO: 0x%08x\n",
97 mshci_readl(host, MSHCI_GPIO));
98 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_TCBCNT: 0x%08x\n",
99 mshci_readl(host, MSHCI_TCBCNT));
100 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_TBBCNT: 0x%08x\n",
101 mshci_readl(host, MSHCI_TBBCNT));
102 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_DEBNCE: 0x%08x\n",
103 mshci_readl(host, MSHCI_DEBNCE));
104 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_USRID: 0x%08x\n",
105 mshci_readl(host, MSHCI_USRID));
106 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_VERID: 0x%08x\n",
107 mshci_readl(host, MSHCI_VERID));
108 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_HCON: 0x%08x\n",
109 mshci_readl(host, MSHCI_HCON));
110 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_UHS_REG: 0x%08x\n",
111 mshci_readl(host, MSHCI_UHS_REG));
112 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_BMOD: 0x%08x\n",
113 mshci_readl(host, MSHCI_BMOD));
114 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_PLDMND: 0x%08x\n",
115 mshci_readl(host, MSHCI_PLDMND));
116 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_DBADDR: 0x%08x\n",
117 mshci_readl(host, MSHCI_DBADDR));
118 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_IDSTS: 0x%08x\n",
119 mshci_readl(host, MSHCI_IDSTS));
120 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_IDINTEN: 0x%08x\n",
121 mshci_readl(host, MSHCI_IDINTEN));
122 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_DSCADDR: 0x%08x\n",
123 mshci_readl(host, MSHCI_DSCADDR));
124 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_BUFADDR: 0x%08x\n",
125 mshci_readl(host, MSHCI_BUFADDR));
126 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_WAKEUPCON: 0x%08x\n",
127 mshci_readl(host, MSHCI_WAKEUPCON));
128 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CLOCKCON: 0x%08x\n",
129 mshci_readl(host, MSHCI_CLOCKCON));
130 printk(KERN_DEBUG DRIVER_NAME ": MSHCI_FIFODAT: 0x%08x\n",
131 mshci_readl(host, MSHCI_FIFODAT + host->data_addr));
132 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
133}
134
135
136/*****************************************************************************\
137 * *
138 * Low level functions *
139 * *
140\*****************************************************************************/
141
142static void mshci_clear_set_irqs(struct mshci_host *host, u32 clear, u32 set)
143{
144 u32 ier;
145
146 ier = mshci_readl(host, MSHCI_INTMSK);
147 ier &= ~clear;
148 ier |= set;
149 mshci_writel(host, ier, MSHCI_INTMSK);
150}
151
152static void mshci_unmask_irqs(struct mshci_host *host, u32 irqs)
153{
154 mshci_clear_set_irqs(host, 0, irqs);
155}
156
157static void mshci_mask_irqs(struct mshci_host *host, u32 irqs)
158{
159 mshci_clear_set_irqs(host, irqs, 0);
160}
161
162static void mshci_set_card_detection(struct mshci_host *host, bool enable)
163{
164 u32 irqs = INTMSK_CDETECT;
165
166 /* it can makes a problme if enable CD_DETECT interrupt,
167 * when CD pin dose not exist. */
168 if (host->quirks & MSHCI_QUIRK_BROKEN_CARD_DETECTION ||
169 host->quirks & MSHCI_QUIRK_BROKEN_PRESENT_BIT) {
170 mshci_mask_irqs(host, irqs);
171 } else if (enable) {
172 mshci_unmask_irqs(host, irqs);
173 } else {
174 mshci_mask_irqs(host, irqs);
175 }
176}
177
178static void mshci_enable_card_detection(struct mshci_host *host)
179{
180 mshci_set_card_detection(host, true);
181}
182
183static void mshci_disable_card_detection(struct mshci_host *host)
184{
185 mshci_set_card_detection(host, false);
186}
187
188static void mshci_reset_ciu(struct mshci_host *host)
189{
190 u32 timeout = 100;
191 u32 ier;
192
193 ier = mshci_readl(host, MSHCI_CTRL);
194 ier |= CTRL_RESET;
195
196 mshci_writel(host, ier, MSHCI_CTRL);
197 while (mshci_readl(host, MSHCI_CTRL) & CTRL_RESET) {
198 if (timeout == 0) {
199 printk(KERN_ERR "%s: Reset CTRL never completed.\n",
200 mmc_hostname(host->mmc));
201 mshci_dumpregs(host);
202 return;
203 }
204 timeout--;
205 mdelay(1);
206 }
207}
208
209static void mshci_reset_fifo(struct mshci_host *host)
210{
211 u32 timeout = 100;
212 u32 ier;
213
214 ier = mshci_readl(host, MSHCI_CTRL);
215 ier |= FIFO_RESET;
216
217 mshci_writel(host, ier, MSHCI_CTRL);
218 while (mshci_readl(host, MSHCI_CTRL) & FIFO_RESET) {
219 if (timeout == 0) {
220 printk(KERN_ERR "%s: Reset FIFO never completed.\n",
221 mmc_hostname(host->mmc));
222 mshci_dumpregs(host);
223 return;
224 }
225 timeout--;
226 mdelay(1);
227 }
228}
229
230static void mshci_reset_dma(struct mshci_host *host)
231{
232 u32 timeout = 100;
233 u32 ier;
234
235 ier = mshci_readl(host, MSHCI_CTRL);
236 ier |= DMA_RESET;
237
238 mshci_writel(host, ier, MSHCI_CTRL);
239 while (mshci_readl(host, MSHCI_CTRL) & DMA_RESET) {
240 if (timeout == 0) {
241 printk(KERN_ERR "%s: Reset DMA never completed.\n",
242 mmc_hostname(host->mmc));
243 mshci_dumpregs(host);
244 return;
245 }
246 timeout--;
247 mdelay(1);
248 }
249}
250
251static void mshci_reset_all(struct mshci_host *host)
252{
253 int count, err = 0;
254
255 /* Wait max 100 ms */
256 count = 10000;
257
258 /* before reset ciu, it should check DATA0. if when DATA0 is low and
259 it resets ciu, it might make a problem */
260 do {
261 if (!(mshci_readl(host, MSHCI_STATUS) & (1<<9))) {
262 udelay(100);
263 if (!(mshci_readl(host, MSHCI_STATUS) & (1<<9))) {
264 udelay(100);
265 if (!(mshci_readl(host, MSHCI_STATUS) & (1<<9)))
266 break;
267 }
268 }
269 if (count == 0) {
270 printk(KERN_ERR "%s: Controller never released "
271 "data0 before reset ciu.\n",
272 mmc_hostname(host->mmc));
273 mshci_dumpregs(host);
274 err = 1;
275 break;
276 }
277 count--;
278 udelay(10);
279 } while (1);
280
281 if (err && host->ops->init_card) {
282 printk(KERN_ERR "%s: eMMC's data lines get low.\n"
283 "Reset eMMC.\n", mmc_hostname(host->mmc));
284 host->ops->init_card(host);
285 }
286
287 mshci_reset_ciu(host);
288 udelay(1);
289 mshci_reset_fifo(host);
290 udelay(1);
291 mshci_reset_dma(host);
292 udelay(1);
293}
294
295static void mshci_init(struct mshci_host *host)
296{
297 mshci_reset_all(host);
298
299 /* clear interrupt status */
300 mshci_writel(host, INTMSK_ALL, MSHCI_RINTSTS);
301
302 mshci_clear_set_irqs(host, INTMSK_ALL,
303 INTMSK_CDETECT | INTMSK_RE |
304 INTMSK_CDONE | INTMSK_DTO | INTMSK_TXDR | INTMSK_RXDR |
305 INTMSK_RCRC | INTMSK_DCRC | INTMSK_RTO | INTMSK_DRTO |
306 INTMSK_HTO | INTMSK_FRUN | INTMSK_HLE | INTMSK_SBE |
307 INTMSK_EBE);
308}
309
310static void mshci_reinit(struct mshci_host *host)
311{
312 mshci_init(host);
313 mshci_enable_card_detection(host);
314}
315
316/*****************************************************************************\
317 * *
318 * Core functions *
319 * *
320\*****************************************************************************/
321
322static void mshci_read_block_pio(struct mshci_host *host)
323{
324 unsigned long flags;
325 size_t fifo_cnt, len, chunk;
326 u32 uninitialized_var(scratch);
327 u8 *buf;
328
329 DBG("PIO reading\n");
330
331 fifo_cnt = (mshci_readl(host, MSHCI_STATUS)&FIFO_COUNT)>>17;
332 fifo_cnt *= FIFO_WIDTH;
333 chunk = 0;
334
335 local_irq_save(flags);
336
337 while (fifo_cnt) {
338 if (!sg_miter_next(&host->sg_miter))
339 BUG();
340
341 len = min(host->sg_miter.length, fifo_cnt);
342
343 fifo_cnt -= len;
344 host->sg_miter.consumed = len;
345
346 buf = host->sg_miter.addr;
347
348 while (len) {
349 if (chunk == 0) {
350 scratch = mshci_readl(host,
351 MSHCI_FIFODAT + host->data_addr);
352 chunk = 4;
353 }
354
355 *buf = scratch & 0xFF;
356
357 buf++;
358 scratch >>= 8;
359 chunk--;
360 len--;
361 }
362 }
363
364 sg_miter_stop(&host->sg_miter);
365
366 local_irq_restore(flags);
367}
368
369static void mshci_write_block_pio(struct mshci_host *host)
370{
371 unsigned long flags;
372 size_t fifo_cnt, len, chunk;
373 u32 scratch;
374 u8 *buf;
375
376 DBG("PIO writing\n");
377
378 fifo_cnt = 8;
379
380 fifo_cnt *= FIFO_WIDTH;
381 chunk = 0;
382 scratch = 0;
383
384 local_irq_save(flags);
385
386 while (fifo_cnt) {
387 if (!sg_miter_next(&host->sg_miter)) {
388
389 /* Even though transfer is complete,
390 * TXDR interrupt occurs again.
391 * So, it has to check that it has really
392 * no next sg buffer or just DTO interrupt
393 * has not occured yet.
394 */
395
396 if ((host->data->blocks * host->data->blksz) ==
397 host->data_transfered)
398 break; /* transfer done but DTO not yet */
399 BUG();
400 }
401 len = min(host->sg_miter.length, fifo_cnt);
402
403 fifo_cnt -= len;
404 host->sg_miter.consumed = len;
405 host->data_transfered += len;
406
407 buf = (host->sg_miter.addr);
408
409 while (len) {
410 scratch |= (u32)*buf << (chunk * 8);
411
412 buf++;
413 chunk++;
414 len--;
415
416 if ((chunk == 4) || ((len == 0) && (fifo_cnt == 0))) {
417 mshci_writel(host, scratch,
418 MSHCI_FIFODAT + host->data_addr);
419 chunk = 0;
420 scratch = 0;
421 }
422 }
423 }
424
425 sg_miter_stop(&host->sg_miter);
426
427 local_irq_restore(flags);
428}
429
430static void mshci_transfer_pio(struct mshci_host *host)
431{
432 BUG_ON(!host->data);
433
434 if (host->blocks == 0)
435 return;
436
437 if (host->data->flags & MMC_DATA_READ)
438 mshci_read_block_pio(host);
439 else
440 mshci_write_block_pio(host);
441
442 DBG("PIO transfer complete.\n");
443}
444
445static void mshci_set_mdma_desc(u8 *desc_vir, u8 *desc_phy,
446 u32 des0, u32 des1, u32 des2)
447{
448 ((struct mshci_idmac *)(desc_vir))->des0 = des0;
449 ((struct mshci_idmac *)(desc_vir))->des1 = des1;
450 ((struct mshci_idmac *)(desc_vir))->des2 = des2;
451 ((struct mshci_idmac *)(desc_vir))->des3 = (u32)desc_phy +
452 sizeof(struct mshci_idmac);
453}
454
455static int mshci_mdma_table_pre(struct mshci_host *host,
456 struct mmc_data *data)
457{
458 int direction;
459
460 u8 *desc_vir, *desc_phy;
461 dma_addr_t addr;
462 int len;
463
464 struct scatterlist *sg;
465 int i;
466 u32 des_flag;
467 u32 size_idmac = sizeof(struct mshci_idmac);
468
469 if (data->flags & MMC_DATA_READ)
470 direction = DMA_FROM_DEVICE;
471 else
472 direction = DMA_TO_DEVICE;
473
474 if (!data->host_cookie) {
475 if (host->ops->dma_map_sg && data->blocks >= 2048) {
476 /* if transfer size is bigger than 1MiB */
477 host->sg_count = host->ops->dma_map_sg(host,
478 mmc_dev(host->mmc),
479 data->sg, data->sg_len, direction, 2);
480 } else if (host->ops->dma_map_sg && data->blocks >= 128) {
481 /* if transfer size is bigger than 64KiB */
482 host->sg_count = host->ops->dma_map_sg(host,
483 mmc_dev(host->mmc),
484 data->sg, data->sg_len, direction, 1);
485 } else {
486 host->sg_count = dma_map_sg(mmc_dev(host->mmc),
487 data->sg, data->sg_len, direction);
488 }
489
490 if (host->sg_count == 0)
491 goto fail;
492 } else
493 host->sg_count = data->host_cookie;
494
495 desc_vir = host->idma_desc;
496
497 /* to know phy address */
498 host->idma_addr = dma_map_single(mmc_dev(host->mmc),
499 host->idma_desc,
500 /* cache flush for only transfer size */
501 (host->sg_count+1) * 16,
502 DMA_TO_DEVICE);
503 if (dma_mapping_error(mmc_dev(host->mmc), host->idma_addr))
504 goto unmap_entries;
505 BUG_ON(host->idma_addr & 0x3);
506
507 desc_phy = (u8 *)host->idma_addr;
508
509 for_each_sg(data->sg, sg, host->sg_count, i) {
510 addr = sg_dma_address(sg);
511 len = sg_dma_len(sg);
512
513 /* tran, valid */
514 des_flag = (MSHCI_IDMAC_OWN|MSHCI_IDMAC_CH);
515 des_flag |= (i == 0) ? MSHCI_IDMAC_FS : 0;
516
517 mshci_set_mdma_desc(desc_vir, desc_phy, des_flag, len, addr);
518 desc_vir += size_idmac;
519 desc_phy += size_idmac;
520
521 /*
522 * If this triggers then we have a calculation bug
523 * somewhere. :/
524 */
525 WARN_ON((desc_vir - host->idma_desc) > MSHCI_MAX_DMA_LIST * \
526 size_idmac);
527 }
528
529 /*
530 * Add a terminating flag.
531 */
532 ((struct mshci_idmac *)(desc_vir-size_idmac))->des0 |= MSHCI_IDMAC_LD;
533
534 /* it has to dma map again to resync vir data to phy data */
535 host->idma_addr = dma_map_single(mmc_dev(host->mmc),
536 host->idma_desc,
537 /* cache flush for only transfer size */
538 (host->sg_count+1) * 16,
539 DMA_TO_DEVICE);
540 if (dma_mapping_error(mmc_dev(host->mmc), host->idma_addr))
541 goto unmap_entries;
542 BUG_ON(host->idma_addr & 0x3);
543
544 return 0;
545
546unmap_entries:
547 if (host->ops->dma_unmap_sg && data->blocks >= 2048) {
548 /* if transfer size is bigger than 1MiB */
549 host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
550 data->sg, data->sg_len, direction, 2);
551 } else if (host->ops->dma_unmap_sg && data->blocks >= 128) {
552 /* if transfer size is bigger than 64KiB */
553 host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
554 data->sg, data->sg_len, direction, 1);
555 } else {
556 dma_unmap_sg(mmc_dev(host->mmc),
557 data->sg, data->sg_len, direction);
558 }
559fail:
560 return -EINVAL;
561}
562
563static void mshci_idma_table_post(struct mshci_host *host,
564 struct mmc_data *data)
565{
566 int direction;
567
568 if (data->flags & MMC_DATA_READ)
569 direction = DMA_FROM_DEVICE;
570 else
571 direction = DMA_TO_DEVICE;
572
573 dma_unmap_single(mmc_dev(host->mmc), host->idma_addr,
574 /* cache flush for only transfer size */
575 (host->sg_count+1) * 16,
576 DMA_TO_DEVICE);
577
578 if (!host->mmc->ops->post_req || !data->host_cookie) {
579 if (host->ops->dma_unmap_sg && data->blocks >= 2048) {
580 /* if transfer size is bigger than 1MiB */
581 host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
582 data->sg, data->sg_len, direction, 2);
583 } else if (host->ops->dma_unmap_sg && data->blocks >= 128) {
584 /* if transfer size is bigger than 64KiB */
585 host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
586 data->sg, data->sg_len, direction, 1);
587 } else {
588 dma_unmap_sg(mmc_dev(host->mmc),
589 data->sg, data->sg_len, direction);
590 }
591 }
592}
593
594static u32 mshci_calc_timeout(struct mshci_host *host, struct mmc_data *data)
595{
596 return 0xffffffff; /* this value SHOULD be optimized */
597}
598
599static void mshci_set_transfer_irqs(struct mshci_host *host)
600{
601 u32 dma_irqs = INTMSK_DMA;
602 u32 pio_irqs = INTMSK_TXDR | INTMSK_RXDR;
603
604 if (host->flags & MSHCI_REQ_USE_DMA)
605 mshci_clear_set_irqs(host, dma_irqs, 0);
606 else
607 mshci_clear_set_irqs(host, 0, pio_irqs);
608}
609
610static void mshci_prepare_data(struct mshci_host *host, struct mmc_data *data)
611{
612 u32 count;
613 u32 ret;
614
615 WARN_ON(host->data);
616
617 if (data == NULL)
618 return;
619
620 BUG_ON(data->blksz > host->mmc->max_blk_size);
621 BUG_ON(data->blocks > host->mmc->max_blk_count);
622
623 host->data = data;
624 host->data_early = 0;
625
626 count = mshci_calc_timeout(host, data);
627 mshci_writel(host, count, MSHCI_TMOUT);
628
629 mshci_reset_fifo(host);
630
631 if (host->flags & (MSHCI_USE_IDMA))
632 host->flags |= MSHCI_REQ_USE_DMA;
633
634 if (data->host_cookie)
635 goto check_done;
636 /*
637 * FIXME: This doesn't account for merging when mapping the
638 * scatterlist.
639 */
640 if (host->flags & MSHCI_REQ_USE_DMA) {
641 /* mshc's IDMAC can't transfer data that is not aligned
642 * or has length not divided by 4 byte. */
643 int i;
644 struct scatterlist *sg;
645
646 for_each_sg(data->sg, sg, data->sg_len, i) {
647 if (sg->length & 0x3) {
648 DBG("Reverting to PIO because of "
649 "transfer size (%d)\n",
650 sg->length);
651 host->flags &= ~MSHCI_REQ_USE_DMA;
652 break;
653 } else if (sg->offset & 0x3) {
654 DBG("Reverting to PIO because of "
655 "bad alignment\n");
656 host->flags &= ~MSHCI_REQ_USE_DMA;
657 break;
658 }
659 }
660 }
661check_done:
662
663 if (host->flags & MSHCI_REQ_USE_DMA) {
664 ret = mshci_mdma_table_pre(host, data);
665 if (ret) {
666 /*
667 * This only happens when someone fed
668 * us an invalid request.
669 */
670 WARN_ON(1);
671 host->flags &= ~MSHCI_REQ_USE_DMA;
672 } else {
673 mshci_writel(host, host->idma_addr,
674 MSHCI_DBADDR);
675 }
676 }
677
678 if (host->flags & MSHCI_REQ_USE_DMA) {
679 /* enable DMA, IDMA interrupts and IDMAC */
680 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) |
681 ENABLE_IDMAC|DMA_ENABLE), MSHCI_CTRL);
682 mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
683 (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)),
684 MSHCI_BMOD);
685 mshci_writel(host, INTMSK_IDMAC_ERROR, MSHCI_IDINTEN);
686 }
687
688 if (!(host->flags & MSHCI_REQ_USE_DMA)) {
689 int flags;
690
691 flags = SG_MITER_ATOMIC;
692 if (host->data->flags & MMC_DATA_READ)
693 flags |= SG_MITER_TO_SG;
694 else
695 flags |= SG_MITER_FROM_SG;
696
697 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
698 host->blocks = data->blocks;
699
700 printk(KERN_ERR "it starts transfer on PIO\n");
701 }
702
703 /* set transfered data as 0. this value only uses for PIO write */
704 host->data_transfered = 0;
705 mshci_set_transfer_irqs(host);
706
707 mshci_writel(host, data->blksz, MSHCI_BLKSIZ);
708 mshci_writel(host, (data->blocks * data->blksz), MSHCI_BYTCNT);
709}
710
711static u32 mshci_set_transfer_mode(struct mshci_host *host,
712 struct mmc_data *data)
713{
714 u32 ret = 0;
715
716 if (data == NULL)
717 return ret;
718
719 WARN_ON(!host->data);
720
721 /* this cmd has data to transmit */
722 ret |= CMD_DATA_EXP_BIT;
723
724 if (data->flags & MMC_DATA_WRITE)
725 ret |= CMD_RW_BIT;
726 if (data->flags & MMC_DATA_STREAM)
727 ret |= CMD_TRANSMODE_BIT;
728
729 return ret;
730}
731
732static void mshci_finish_data(struct mshci_host *host)
733{
734 struct mmc_data *data;
735
736 BUG_ON(!host->data);
737
738 data = host->data;
739 host->data = NULL;
740
741 if (host->flags & MSHCI_REQ_USE_DMA) {
742 mshci_idma_table_post(host, data);
743 /* disable IDMAC and DMA interrupt */
744 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) &
745 ~(DMA_ENABLE|ENABLE_IDMAC)), MSHCI_CTRL);
746 /* mask all interrupt source of IDMAC */
747 mshci_writel(host, 0x0, MSHCI_IDINTEN);
748 }
749
750 if (data->error) {
751 /* to go to idle state */
752 mshci_reset_ciu(host);
753 /* to clear fifo */
754 mshci_reset_fifo(host);
755 /* to reset dma */
756 mshci_reset_dma(host);
757 data->bytes_xfered = 0;
758 } else
759 data->bytes_xfered = data->blksz * data->blocks;
760
761 /*
762 * Need to send CMD12 if -
763 * a) open-ended multiblock transfer (no CMD23)
764 * b) error in multiblock transfer
765 */
766 if (data->stop && ((data->error) ||
767 !(host->mmc->caps & MMC_CAP_CMD23) ||
768 ((host->mmc->caps & MMC_CAP_CMD23) &&
769 !host->mrq->sbc))) /* packed cmd case */
770 mshci_send_command(host, data->stop);
771 else
772 tasklet_schedule(&host->finish_tasklet);
773}
774
775static void mshci_wait_release_start_bit(struct mshci_host *host)
776{
777 u32 loop_count = 1000000;
778
779 ktime_t expires;
780 u64 add_time = 100000; /* 100us */
781
782 /* before off clock, make sure data busy is released. */
783 while (mshci_readl(host, MSHCI_STATUS) & (1<<9) && --loop_count) {
784 spin_unlock_irqrestore(&host->lock, host->sl_flags);
785 expires = ktime_add_ns(ktime_get(), add_time);
786 set_current_state(TASK_UNINTERRUPTIBLE);
787 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
788 spin_lock_irqsave(&host->lock, host->sl_flags);
789 }
790 if (loop_count == 0)
791 printk(KERN_ERR "%s: cmd_strt_bit not released for 11sec\n",
792 mmc_hostname(host->mmc));
793
794 loop_count = 1000000;
795 do {
796 if (!(mshci_readl(host, MSHCI_CMD) & CMD_STRT_BIT))
797 break;
798 loop_count--;
799 udelay(1);
800 } while (loop_count);
801 if (loop_count == 0)
802 printk(KERN_ERR "%s: cmd_strt_bit not released for 1sec\n",
803 mmc_hostname(host->mmc));
804}
805
806static void mshci_clock_onoff(struct mshci_host *host, bool val)
807{
808 mshci_wait_release_start_bit(host);
809
810 if (val) {
811 mshci_writel(host, (0x1<<0), MSHCI_CLKENA);
812 mshci_writel(host, 0, MSHCI_CMD);
813 mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
814 } else {
815 mshci_writel(host, (0x0<<0), MSHCI_CLKENA);
816 mshci_writel(host, 0, MSHCI_CMD);
817 mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
818 }
819}
820
821static void mshci_send_command(struct mshci_host *host, struct mmc_command *cmd)
822{
823 int flags, ret;
824
825 WARN_ON(host->cmd);
826
827 /* clear error_state */
828 if (cmd->opcode != 12)
829 host->error_state = 0;
830
831 /* disable interrupt before issuing cmd to the card. */
832 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) & ~INT_ENABLE),
833 MSHCI_CTRL);
834
835 mod_timer(&host->timer, jiffies + 10 * HZ);
836
837 host->cmd = cmd;
838
839 mshci_prepare_data(host, cmd->data);
840
841 mshci_writel(host, cmd->arg, MSHCI_CMDARG);
842
843 flags = mshci_set_transfer_mode(host, cmd->data);
844
845 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
846 printk(KERN_ERR "%s: Unsupported response type!\n",
847 mmc_hostname(host->mmc));
848 cmd->error = -EINVAL;
849 tasklet_schedule(&host->finish_tasklet);
850 return;
851 }
852
853 if (cmd->flags & MMC_RSP_PRESENT) {
854 flags |= CMD_RESP_EXP_BIT;
855 if (cmd->flags & MMC_RSP_136)
856 flags |= CMD_RESP_LENGTH_BIT;
857 }
858 if (cmd->flags & MMC_RSP_CRC)
859 flags |= CMD_CHECK_CRC_BIT;
860
861 flags |= (cmd->opcode | CMD_STRT_BIT | host->hold_bit |
862 CMD_WAIT_PRV_DAT_BIT);
863
864 ret = mshci_readl(host, MSHCI_CMD);
865 if (ret & CMD_STRT_BIT)
866 printk(KERN_ERR "CMD busy. current cmd %d. last cmd reg 0x%x\n",
867 cmd->opcode, ret);
868
869 mshci_writel(host, flags, MSHCI_CMD);
870
871 /* enable interrupt upon it sends a command to the card. */
872 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
873 MSHCI_CTRL);
874}
875
876static void mshci_finish_command(struct mshci_host *host)
877{
878 int i;
879
880 BUG_ON(host->cmd == NULL);
881
882 if (host->cmd->flags & MMC_RSP_PRESENT) {
883 if (host->cmd->flags & MMC_RSP_136) {
884 /*
885 * response data are overturned.
886 */
887 for (i = 0; i < 4; i++) {
888 host->cmd->resp[0] = mshci_readl(host,
889 MSHCI_RESP3);
890 host->cmd->resp[1] = mshci_readl(host,
891 MSHCI_RESP2);
892 host->cmd->resp[2] = mshci_readl(host,
893 MSHCI_RESP1);
894 host->cmd->resp[3] = mshci_readl(host,
895 MSHCI_RESP0);
896 }
897 } else {
898 host->cmd->resp[0] = mshci_readl(host, MSHCI_RESP0);
899 }
900 }
901
902 host->cmd->error = 0;
903
904 if (host->data && host->data_early)
905 mshci_finish_data(host);
906
907 if (!host->cmd->data)
908 tasklet_schedule(&host->finish_tasklet);
909
910 host->cmd = NULL;
911}
912
913static void mshci_set_clock(struct mshci_host *host,
914 unsigned int clock, u32 ddr)
915{
916 int div;
917
918 /* befor changing clock. clock needs to be off. */
919 mshci_clock_onoff(host, CLK_DISABLE);
920
921 if (clock == 0)
922 goto out;
923
924 if (clock >= host->max_clk) {
925 div = 0;
926 } else {
927 for (div = 1; div <= 0xff; div++) {
928 /* div value should not be greater than 0xff */
929 if ((host->max_clk / (div<<1)) <= clock)
930 break;
931 }
932 }
933
934 mshci_wait_release_start_bit(host);
935
936 mshci_writel(host, div, MSHCI_CLKDIV);
937
938 mshci_writel(host, 0, MSHCI_CMD);
939 mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
940 mshci_writel(host, mshci_readl(host, MSHCI_CMD)&(~CMD_SEND_CLK_ONLY),
941 MSHCI_CMD);
942
943 mshci_clock_onoff(host, CLK_ENABLE);
944
945out:
946 host->clock = clock;
947}
948
949static void mshci_set_power(struct mshci_host *host, unsigned short power)
950{
951 u8 pwr = power;
952
953 if (power == (unsigned short)-1)
954 pwr = 0;
955
956 if (host->pwr == pwr)
957 return;
958
959 host->pwr = pwr;
960
961 if (pwr == 0)
962 mshci_writel(host, 0, MSHCI_PWREN);
963 else
964 mshci_writel(host, 0x1, MSHCI_PWREN);
965}
966
967#ifdef CONFIG_MMC_POLLING_WAIT_CMD23
968static void mshci_check_sbc_status(struct mshci_host *host, int intmask)
969{
970 int timeout, int_status;;
971
972 /* wait for command done or error by polling */
973 timeout = 0x100000; /* it is bigger than 1ms */
974 do {
975 int_status = mshci_readl(host, MSHCI_RINTSTS);
976 if (int_status & CMD_STATUS)
977 break;
978 timeout--;
979 } while (timeout);
980
981 /* clear pending interupt bit */
982 mshci_writel(host, int_status, MSHCI_RINTSTS);
983
984 /* check whether command error has been occured or not. */
985 if (int_status & INTMSK_HTO) {
986 printk(KERN_ERR "%s: %s Host timeout error\n",
987 mmc_hostname(host->mmc),
988 __func__);
989 host->mrq->sbc->error = -ETIMEDOUT;
990 } else if (int_status & INTMSK_DRTO) {
991 printk(KERN_ERR "%s: %s Data read timeout error\n",
992 mmc_hostname(host->mmc),
993 __func__);
994 host->mrq->sbc->error = -ETIMEDOUT;
995 } else if (int_status & INTMSK_SBE) {
996 printk(KERN_ERR "%s: %s FIFO Start bit error\n",
997 mmc_hostname(host->mmc),
998 __func__);
999 host->mrq->sbc->error = -EIO;
1000 } else if (int_status & INTMSK_EBE) {
1001 printk(KERN_ERR "%s: %s FIFO Endbit/Write no CRC error\n",
1002 mmc_hostname(host->mmc),
1003 __func__);
1004 host->mrq->sbc->error = -EIO;
1005 } else if (int_status & INTMSK_DCRC) {
1006 printk(KERN_ERR "%s: %s Data CRC error\n",
1007 mmc_hostname(host->mmc),
1008 __func__);
1009 host->mrq->sbc->error = -EIO;
1010 } else if (int_status & INTMSK_FRUN) {
1011 printk(KERN_ERR "%s: %s FIFO underrun/overrun error\n",
1012 mmc_hostname(host->mmc),
1013 __func__);
1014 host->mrq->sbc->error = -EIO;
1015 } else if (int_status & CMD_ERROR) {
1016 printk(KERN_ERR "%s: %s cmd %s error\n",
1017 mmc_hostname(host->mmc),
1018 __func__, (intmask & INTMSK_RCRC) ?
1019 "response crc" :
1020 (intmask & INTMSK_RE) ? "response" :
1021 "response timeout");
1022 host->mrq->sbc->error = -ETIMEDOUT;
1023 }
1024
1025 if (host->mrq->sbc->error) {
1026 /* restore interrupt mask bit */
1027 mshci_writel(host, intmask, MSHCI_INTMSK);
1028 return;
1029 }
1030
1031 if (!timeout) {
1032 printk(KERN_ERR "%s: %s no interrupt occured\n",
1033 mmc_hostname(host->mmc), __func__);
1034 host->mrq->sbc->error = -ETIMEDOUT;
1035 /* restore interrupt mask bit */
1036 mshci_writel(host, intmask, MSHCI_INTMSK);
1037 return;
1038 }
1039
1040 /* command done interrupt has been occured with no errors.
1041 nothing to do. just return to the previous function */
1042 if ((int_status & INTMSK_CDONE) && !(int_status & CMD_ERROR)) {
1043 /* restore interrupt mask bit */
1044 mshci_writel(host, intmask, MSHCI_INTMSK);
1045 return;
1046 }
1047
1048 /* should not be here */
1049 printk(KERN_ERR "%s: an error that has not to be occured was"
1050 " occured 0x%x\n",mmc_hostname(host->mmc),int_status);
1051}
1052
1053static void mshci_send_sbc(struct mshci_host *host, struct mmc_command *cmd)
1054{
1055 int flags = 0, ret, intmask;
1056
1057 WARN_ON(host->cmd);
1058
1059 /* disable interrupt before issuing cmd to the card. */
1060 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) & ~INT_ENABLE),
1061 MSHCI_CTRL);
1062
1063 host->cmd = cmd;
1064
1065 mod_timer(&host->timer, jiffies + 10 * HZ);
1066
1067 mshci_writel(host, cmd->arg, MSHCI_CMDARG);
1068
1069 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1070 printk(KERN_ERR "%s: Unsupported response type!\n",
1071 mmc_hostname(host->mmc));
1072 cmd->error = -EINVAL;
1073 tasklet_schedule(&host->finish_tasklet);
1074 return;
1075 }
1076
1077 if (cmd->flags & MMC_RSP_PRESENT) {
1078 flags |= CMD_RESP_EXP_BIT;
1079 if (cmd->flags & MMC_RSP_136)
1080 flags |= CMD_RESP_LENGTH_BIT;
1081 }
1082 if (cmd->flags & MMC_RSP_CRC)
1083 flags |= CMD_CHECK_CRC_BIT;
1084
1085 flags |= (cmd->opcode | CMD_STRT_BIT | host->hold_bit |
1086 CMD_WAIT_PRV_DAT_BIT);
1087
1088 ret = mshci_readl(host, MSHCI_CMD);
1089 if (ret & CMD_STRT_BIT)
1090 printk(KERN_ERR "CMD busy. current cmd %d. last cmd reg 0x%x\n",
1091 cmd->opcode, ret);
1092
1093 /* backup interrupt mask bit */
1094 intmask = mshci_readl(host, MSHCI_INTMSK);
1095
1096 /* disable interrupts for sbc command. it will wait for command done
1097 by polling. it expects a faster repsonse */
1098 mshci_clear_set_irqs(host, INTMSK_ALL, 0);
1099
1100 /* send command */
1101 mshci_writel(host, flags, MSHCI_CMD);
1102
1103 /* enable interrupt upon it sends a command to the card. */
1104 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
1105 MSHCI_CTRL);
1106
1107 /* check the interrupt by polling */
1108 mshci_check_sbc_status(host,intmask);
1109}
1110#endif
1111
1112/*****************************************************************************\
1113 * *
1114 * MMC callbacks *
1115 * *
1116\*****************************************************************************/
1117
1118static void mshci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1119{
1120 struct mshci_host *host;
1121 bool present;
1122 int timeout;
1123 ktime_t expires;
1124 u64 add_time = 50000; /* 50us */
1125
1126 host = mmc_priv(mmc);
1127
1128 WARN_ON(host->mrq != NULL);
1129
1130 host->mrq = mrq;
1131
1132 /* Wait max 1 sec */
1133 timeout = 100000;
1134
1135 /* We shouldn't wait for data inihibit for stop commands, even
1136 though they might use busy signaling */
1137 if (mrq->cmd->opcode == 12) {
1138 /* nothing to do */
1139 } else {
1140 for (;;) {
1141 spin_lock_irqsave(&host->lock, host->sl_flags);
1142 if (mshci_readl(host, MSHCI_STATUS) & (1<<9)) {
1143 if (timeout == 0) {
1144 printk(KERN_ERR "%s: Controller never"
1145 " released data0.\n",
1146 mmc_hostname(host->mmc));
1147 mshci_dumpregs(host);
1148
1149 mrq->cmd->error = -ENOTRECOVERABLE;
1150 host->error_state = 1;
1151
1152 tasklet_schedule \
1153 (&host->finish_tasklet);
1154 spin_unlock_irqrestore \
1155 (&host->lock, host->sl_flags);
1156 return;
1157 }
1158 timeout--;
1159
1160 /* if previous command made an error,
1161 * this function might be called by tasklet.
1162 * So, it SHOULD NOT use schedule_hrtimeout */
1163 if (host->error_state == 1) {
1164 spin_unlock_irqrestore
1165 (&host->lock, host->sl_flags);
1166 udelay(10);
1167 } else {
1168 spin_unlock_irqrestore
1169 (&host->lock, host->sl_flags);
1170 expires = ktime_add_ns
1171 (ktime_get(), add_time);
1172 set_current_state
1173 (TASK_UNINTERRUPTIBLE);
1174 schedule_hrtimeout
1175 (&expires, HRTIMER_MODE_ABS);
1176 }
1177 } else {
1178 spin_unlock_irqrestore(&host->lock,
1179 host->sl_flags);
1180 break;
1181 }
1182 }
1183 }
1184 spin_lock_irqsave(&host->lock, host->sl_flags);
1185 /* If polling, assume that the card is always present. */
1186 if (host->quirks & MSHCI_QUIRK_BROKEN_CARD_DETECTION ||
1187 host->quirks & MSHCI_QUIRK_BROKEN_PRESENT_BIT)
1188 present = true;
1189 else
1190 present = !(mshci_readl(host, MSHCI_CDETECT) & CARD_PRESENT);
1191
1192 if (!present || host->flags & MSHCI_DEVICE_DEAD) {
1193 host->mrq->cmd->error = -ENOMEDIUM;
1194 tasklet_schedule(&host->finish_tasklet);
1195 } else {
1196#ifdef CONFIG_MMC_POLLING_WAIT_CMD23
1197 if (mrq->sbc) {
1198 mshci_send_sbc(host, mrq->sbc);
1199 if (mrq->sbc->error) {
1200 tasklet_schedule(&host->finish_tasklet);
1201 } else {
1202 if (host->cmd)
1203 host->cmd = NULL;
1204 mshci_send_command(host, mrq->cmd);
1205 }
1206 } else
1207#endif
1208 mshci_send_command(host, mrq->cmd);
1209 }
1210
1211 mmiowb();
1212 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1213}
1214
1215static void mshci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1216{
1217 struct mshci_host *host;
1218 u32 regs;
1219
1220 host = mmc_priv(mmc);
1221
1222 spin_lock_irqsave(&host->lock, host->sl_flags);
1223
1224 if (host->flags & MSHCI_DEVICE_DEAD)
1225 goto out;
1226
1227 if (ios->power_mode == MMC_POWER_OFF)
1228 mshci_reinit(host);
1229
1230#ifdef CONFIG_MMC_CLKGATE
1231 /* gating the clock and out */
1232 if (mmc->clk_gated) {
1233 WARN_ON(ios->clock != 0);
1234 if (host->clock != 0)
1235 mshci_set_clock(host, ios->clock, ios->ddr);
1236 goto out;
1237 }
1238#endif
1239
1240 if (host->ops->set_ios)
1241 host->ops->set_ios(host, ios);
1242
1243 mshci_set_clock(host, ios->clock, ios->ddr);
1244
1245 if (ios->power_mode == MMC_POWER_OFF)
1246 mshci_set_power(host, -1);
1247 else
1248 mshci_set_power(host, ios->vdd);
1249
1250 regs = mshci_readl(host, MSHCI_UHS_REG);
1251
1252 if (ios->bus_width == MMC_BUS_WIDTH_8) {
1253 mshci_writel(host, (0x1<<16), MSHCI_CTYPE);
1254 if (ios->timing == MMC_TIMING_UHS_DDR50) {
1255 regs |= (0x1 << 16);
1256 mshci_writel(host, regs, MSHCI_UHS_REG);
1257 /* if exynos4412 EVT1 or the latest one */
1258 if (soc_is_exynos4412() &&
1259 samsung_rev() >= EXYNOS4412_REV_1_0) {
1260 if ((host->max_clk/2) < 46300000) {
1261 mshci_writel(host, (0x00010001),
1262 MSHCI_CLKSEL);
1263 } else {
1264 mshci_writel(host, (0x00020002),
1265 MSHCI_CLKSEL);
1266 }
1267 } else {
1268 if ((host->max_clk/2) < 40000000)
1269 mshci_writel(host, (0x00010001),
1270 MSHCI_CLKSEL);
1271 else
1272 mshci_writel(host, (0x00020002),
1273 MSHCI_CLKSEL);
1274 }
1275 } else {
1276 regs &= ~(0x1 << 16);
1277 mshci_writel(host, regs|(0x0<<0), MSHCI_UHS_REG);
1278 mshci_writel(host, (0x00010001), MSHCI_CLKSEL);
1279 }
1280 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
1281 mshci_writel(host, (0x1<<0), MSHCI_CTYPE);
1282 if (ios->timing == MMC_TIMING_UHS_DDR50) {
1283 regs |= (0x1 << 16);
1284 mshci_writel(host, regs, MSHCI_UHS_REG);
1285 mshci_writel(host, (0x00010001), MSHCI_CLKSEL);
1286 } else {
1287 regs &= ~(0x1 << 16);
1288 mshci_writel(host, regs|(0x0<<0), MSHCI_UHS_REG);
1289 mshci_writel(host, (0x00010001), MSHCI_CLKSEL);
1290 }
1291 } else {
1292 regs &= ~(0x1 << 16);
1293 mshci_writel(host, regs|0, MSHCI_UHS_REG);
1294 mshci_writel(host, (0x0<<0), MSHCI_CTYPE);
1295 mshci_writel(host, (0x00010001), MSHCI_CLKSEL);
1296 }
1297out:
1298 mmiowb();
1299 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1300}
1301
1302static int mshci_get_ro(struct mmc_host *mmc)
1303{
1304 struct mshci_host *host;
1305 int wrtprt;
1306
1307 host = mmc_priv(mmc);
1308
1309 spin_lock_irqsave(&host->lock, host->sl_flags);
1310
1311 if (host->quirks & MSHCI_QUIRK_NO_WP_BIT)
1312 wrtprt = host->ops->get_ro(mmc) ? 0 : WRTPRT_ON;
1313 else if (host->flags & MSHCI_DEVICE_DEAD)
1314 wrtprt = 0;
1315 else
1316 wrtprt = mshci_readl(host, MSHCI_WRTPRT);
1317
1318 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1319
1320 return wrtprt & WRTPRT_ON;
1321}
1322
1323static void mshci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1324{
1325 struct mshci_host *host;
1326
1327 host = mmc_priv(mmc);
1328
1329 spin_lock_irqsave(&host->lock, host->sl_flags);
1330
1331 if (host->flags & MSHCI_DEVICE_DEAD)
1332 goto out;
1333
1334 if (enable)
1335 mshci_unmask_irqs(host, SDIO_INT_ENABLE);
1336 else
1337 mshci_mask_irqs(host, SDIO_INT_ENABLE);
1338out:
1339 mmiowb();
1340
1341 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1342}
1343
1344static void mshci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1345{
1346 struct mshci_host *host;
1347
1348 host = mmc_priv(mmc);
1349
1350 spin_lock_irqsave(&host->lock, host->sl_flags);
1351
1352 if (host->flags & MSHCI_DEVICE_DEAD)
1353 goto out;
1354
1355 if (host->ops->init_card)
1356 host->ops->init_card(host);
1357out:
1358 mmiowb();
1359
1360 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1361}
1362
1363static void mshci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
1364 bool is_first_req)
1365{
1366 struct mshci_host *host;
1367 struct mmc_data *data = mrq->data;
1368 int sg_count, direction;
1369
1370 host = mmc_priv(mmc);
1371 spin_lock_irqsave(&host->lock, host->sl_flags);
1372
1373 if (!data)
1374 goto out;
1375
1376 if (data->host_cookie) {
1377 data->host_cookie = 0;
1378 goto out;
1379 }
1380
1381 if (host->flags & MSHCI_USE_IDMA) {
1382 /* mshc's IDMAC can't transfer data that is not aligned
1383 * or has length not divided by 4 byte. */
1384 int i;
1385 struct scatterlist *sg;
1386
1387 for_each_sg(data->sg, sg, data->sg_len, i) {
1388 if (sg->length & 0x3) {
1389 DBG("Reverting to PIO because of "
1390 "transfer size (%d)\n",
1391 sg->length);
1392 data->host_cookie = 0;
1393 goto out;
1394 } else if (sg->offset & 0x3) {
1395 DBG("Reverting to PIO because of "
1396 "bad alignment\n");
1397 host->flags &= ~MSHCI_REQ_USE_DMA;
1398 data->host_cookie = 0;
1399 goto out;
1400 }
1401 }
1402 }
1403
1404 if (data->flags & MMC_DATA_READ)
1405 direction = DMA_FROM_DEVICE;
1406 else
1407 direction = DMA_TO_DEVICE;
1408
1409 if (host->ops->dma_map_sg && data->blocks >= 2048) {
1410 /* if transfer size is bigger than 1MiB */
1411 sg_count = host->ops->dma_map_sg(host,
1412 mmc_dev(host->mmc),
1413 data->sg, data->sg_len, direction, 2);
1414 } else if (host->ops->dma_map_sg && data->blocks >= 128) {
1415 /* if transfer size is bigger than 64KiB */
1416 sg_count = host->ops->dma_map_sg(host,
1417 mmc_dev(host->mmc),
1418 data->sg, data->sg_len, direction, 1);
1419 } else {
1420 sg_count = dma_map_sg(mmc_dev(host->mmc),
1421 data->sg, data->sg_len, direction);
1422 }
1423
1424 if (sg_count == 0)
1425 data->host_cookie = 0;
1426 else
1427 data->host_cookie = sg_count;
1428out:
1429 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1430 return;
1431}
1432
1433static void mshci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
1434 int err)
1435{
1436 struct mshci_host *host;
1437 struct mmc_data *data = mrq->data;
1438 int direction;
1439
1440 host = mmc_priv(mmc);
1441 spin_lock_irqsave(&host->lock, host->sl_flags);
1442
1443 if (!data)
1444 goto out;
1445
1446 if (data->flags & MMC_DATA_READ)
1447 direction = DMA_FROM_DEVICE;
1448 else
1449 direction = DMA_TO_DEVICE;
1450
1451 if ((host->ops->dma_unmap_sg && data->blocks >= 2048 &&
1452 data->host_cookie)) {
1453 /* if transfer size is bigger than 1MiB */
1454 host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
1455 data->sg, data->sg_len, direction, 2);
1456 } else if ((host->ops->dma_unmap_sg && data->blocks >= 128 &&
1457 data->host_cookie)) {
1458 /* if transfer size is bigger than 64KiB */
1459 host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
1460 data->sg, data->sg_len, direction, 1);
1461 } else if (data->host_cookie) {
1462 dma_unmap_sg(mmc_dev(host->mmc),
1463 data->sg, data->sg_len, direction);
1464 }
1465out:
1466 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1467 return;
1468}
1469
1470static struct mmc_host_ops mshci_ops = {
1471 .request = mshci_request,
1472 .set_ios = mshci_set_ios,
1473 .get_ro = mshci_get_ro,
1474 .enable_sdio_irq = mshci_enable_sdio_irq,
1475 .init_card = mshci_init_card,
1476#ifdef CONFIG_MMC_MSHCI_ASYNC_OPS
1477 .pre_req = mshci_pre_req,
1478 .post_req = mshci_post_req,
1479#endif
1480};
1481
1482/*****************************************************************************\
1483 * *
1484 * Tasklets *
1485 * *
1486\*****************************************************************************/
1487
1488static void mshci_tasklet_card(unsigned long param)
1489{
1490 struct mshci_host *host;
1491
1492 host = (struct mshci_host *)param;
1493
1494 spin_lock_irqsave(&host->lock, host->sl_flags);
1495
1496 if ((host->quirks & MSHCI_QUIRK_BROKEN_CARD_DETECTION) ||
1497 (host->quirks & MSHCI_QUIRK_BROKEN_PRESENT_BIT) ||
1498 (mshci_readl(host, MSHCI_CDETECT) & CARD_PRESENT)) {
1499 if (host->mrq) {
1500 printk(KERN_ERR "%s: Card removed during transfer!\n",
1501 mmc_hostname(host->mmc));
1502 printk(KERN_ERR "%s: Resetting controller.\n",
1503 mmc_hostname(host->mmc));
1504
1505 host->mrq->cmd->error = -ENOMEDIUM;
1506 tasklet_schedule(&host->finish_tasklet);
1507 }
1508 }
1509
1510 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1511
1512 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1513}
1514
1515static void mshci_tasklet_finish(unsigned long param)
1516{
1517 struct mshci_host *host;
1518 struct mmc_request *mrq;
1519
1520 host = (struct mshci_host *)param;
1521
1522 if (host == NULL)
1523 return;
1524
1525 spin_lock_irqsave(&host->lock, host->sl_flags);
1526
1527 del_timer(&host->timer);
1528
1529 mrq = host->mrq;
1530
1531 if (mrq == NULL || mrq->cmd == NULL)
1532 goto out;
1533
1534 /*
1535 * The controller needs a reset of internal state machines
1536 * upon error conditions.
1537 */
1538 if (!(host->flags & MSHCI_DEVICE_DEAD) &&
1539 (mrq->cmd->error ||
1540#ifdef CONFIG_MMC_POLLING_WAIT_CMD23
1541 (mrq->sbc && mrq->sbc->error) ||
1542#endif
1543 (mrq->data && (mrq->data->error ||
1544 (mrq->data->stop && mrq->data->stop->error))))) {
1545 mshci_reset_fifo(host);
1546 }
1547
1548out:
1549 host->mrq = NULL;
1550 host->cmd = NULL;
1551 host->data = NULL;
1552
1553 mmiowb();
1554 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1555
1556 if (mrq)
1557 mmc_request_done(host->mmc, mrq);
1558}
1559
1560static void mshci_timeout_timer(unsigned long data)
1561{
1562 struct mshci_host *host;
1563
1564 host = (struct mshci_host *)data;
1565
1566 spin_lock_irqsave(&host->lock, host->sl_flags);
1567
1568 if (host->mrq) {
1569 printk(KERN_ERR "%s: Timeout waiting for hardware "
1570 "interrupt.\n", mmc_hostname(host->mmc));
1571 mshci_dumpregs(host);
1572
1573 if (host->data) {
1574 host->data->error = -ETIMEDOUT;
1575 mshci_finish_data(host);
1576 } else {
1577 if (host->cmd)
1578 host->cmd->error = -ETIMEDOUT;
1579 else
1580 host->mrq->cmd->error = -ETIMEDOUT;
1581
1582 tasklet_schedule(&host->finish_tasklet);
1583 }
1584 }
1585
1586 mmiowb();
1587 spin_unlock_irqrestore(&host->lock, host->sl_flags);
1588}
1589
1590/*****************************************************************************\
1591 * *
1592 * Interrupt handling *
1593 * *
1594\*****************************************************************************/
1595
1596static void mshci_cmd_irq(struct mshci_host *host, u32 intmask)
1597{
1598 BUG_ON(intmask == 0);
1599
1600 if (!host->cmd) {
1601 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1602 "though no command operation was in progress.\n",
1603 mmc_hostname(host->mmc), (unsigned)intmask);
1604 mshci_dumpregs(host);
1605 return;
1606 }
1607
1608 if (intmask & INTMSK_RTO) {
1609 host->cmd->error = -ETIMEDOUT;
1610 printk(KERN_ERR "%s: cmd %d response timeout error\n",
1611 mmc_hostname(host->mmc), host->cmd->opcode);
1612 } else if (intmask & (INTMSK_RCRC | INTMSK_RE)) {
1613 host->cmd->error = -EILSEQ;
1614 printk(KERN_ERR "%s: cmd %d repsonse %s error\n",
1615 mmc_hostname(host->mmc), host->cmd->opcode,
1616 (intmask & INTMSK_RCRC) ? "crc" : "RE");
1617 }
1618 if (host->cmd->error) {
1619 /* to notify an error happend */
1620 host->error_state = 1;
1621#if defined(CONFIG_MACH_M0) || defined(CONFIG_MACH_P4NOTE) || \
1622 defined(CONFIG_MACH_C1_USA_ATT)
1623 /* dh0421.hwang */
1624 if (host->mmc && host->mmc->card)
1625 mshci_dumpregs(host);
1626#endif
1627 tasklet_schedule(&host->finish_tasklet);
1628 return;
1629 }
1630
1631 if (intmask & INTMSK_CDONE)
1632 mshci_finish_command(host);
1633}
1634
1635static void mshci_data_irq(struct mshci_host *host, u32 intmask, u8 intr_src)
1636{
1637 BUG_ON(intmask == 0);
1638
1639 if (!host->data) {
1640 /*
1641 * The "data complete" interrupt is also used to
1642 * indicate that a busy state has ended. See comment
1643 * above in mshci_cmd_irq().
1644 */
1645 if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1646 if (intmask & INTMSK_DTO) {
1647 mshci_finish_command(host);
1648 return;
1649 }
1650 }
1651
1652 printk(KERN_ERR "%s: Got data interrupt 0x%08x from %s "
1653 "even though no data operation was in progress.\n",
1654 mmc_hostname(host->mmc), (unsigned)intmask,
1655 intr_src ? "MINT" : "IDMAC");
1656 mshci_dumpregs(host);
1657
1658 return;
1659 }
1660 if (intr_src == INT_SRC_MINT) {
1661 if (intmask & INTMSK_HTO) {
1662 printk(KERN_ERR "%s: Host timeout error\n",
1663 mmc_hostname(host->mmc));
1664 host->data->error = -ETIMEDOUT;
1665#if 1 /* debugging for Host timeout error */
1666 mshci_dumpregs(host);
1667 panic("[TEST] %s: HTO error interrupt occured\n",
1668 mmc_hostname(host->mmc));
1669#endif
1670 } else if (intmask & INTMSK_DRTO) {
1671 printk(KERN_ERR "%s: Data read timeout error\n",
1672 mmc_hostname(host->mmc));
1673 host->data->error = -ETIMEDOUT;
1674 } else if (intmask & INTMSK_SBE) {
1675 printk(KERN_ERR "%s: FIFO Start bit error\n",
1676 mmc_hostname(host->mmc));
1677 host->data->error = -EIO;
1678 } else if (intmask & INTMSK_EBE) {
1679 printk(KERN_ERR "%s: FIFO Endbit/Write no CRC error\n",
1680 mmc_hostname(host->mmc));
1681 host->data->error = -EIO;
1682 } else if (intmask & INTMSK_DCRC) {
1683 printk(KERN_ERR "%s: Data CRC error\n",
1684 mmc_hostname(host->mmc));
1685 host->data->error = -EIO;
1686 } else if (intmask & INTMSK_FRUN) {
1687 printk(KERN_ERR "%s: FIFO underrun/overrun error\n",
1688 mmc_hostname(host->mmc));
1689 host->data->error = -EIO;
1690 }
1691 } else {
1692 if (intmask & IDSTS_FBE) {
1693 printk(KERN_ERR "%s: Fatal Bus error on DMA\n",
1694 mmc_hostname(host->mmc));
1695 host->data->error = -EIO;
1696 } else if (intmask & IDSTS_CES) {
1697 printk(KERN_ERR "%s: Card error on DMA\n",
1698 mmc_hostname(host->mmc));
1699 host->data->error = -EIO;
1700 } else if (intmask & IDSTS_DU) {
1701 printk(KERN_ERR "%s: Description error on DMA\n",
1702 mmc_hostname(host->mmc));
1703 host->data->error = -EIO;
1704 }
1705 }
1706
1707 if (host->data->error) {
1708 /* to notify an error happend */
1709 host->error_state = 1;
1710#if defined(CONFIG_MACH_M0) || defined(CONFIG_MACH_P4NOTE) || \
1711 defined(CONFIG_MACH_C1_USA_ATT)
1712 /* dh0421.hwang */
1713 if (host->mmc && host->mmc->card)
1714 mshci_dumpregs(host);
1715#endif
1716 mshci_finish_data(host);
1717 } else {
1718 if (!(host->flags & MSHCI_REQ_USE_DMA) &&
1719 (((host->data->flags & MMC_DATA_READ) &&
1720 (intmask & (INTMSK_RXDR | INTMSK_DTO))) ||
1721 ((host->data->flags & MMC_DATA_WRITE) &&
1722 (intmask & (INTMSK_TXDR)))))
1723 mshci_transfer_pio(host);
1724
1725 if (intmask & INTMSK_DTO) {
1726 if (host->cmd) {
1727 /*
1728 * Data managed to finish before the
1729 * command completed. Make sure we do
1730 * things in the proper order.
1731 */
1732 host->data_early = 1;
1733 } else {
1734 mshci_finish_data(host);
1735 }
1736 }
1737 }
1738}
1739
1740static irqreturn_t mshci_irq(int irq, void *dev_id)
1741{
1742 irqreturn_t result;
1743 struct mshci_host *host = dev_id;
1744 u32 intmask;
1745 int cardint = 0;
1746 int timeout = 0x10000;
1747
1748 spin_lock(&host->lock);
1749
1750 intmask = mshci_readl(host, MSHCI_MINTSTS);
1751
1752 if (!intmask || intmask == 0xffffffff) {
1753 /* check if there is a interrupt for IDMAC */
1754 intmask = mshci_readl(host, MSHCI_IDSTS);
1755 if (intmask) {
1756 mshci_writel(host, intmask, MSHCI_IDSTS);
1757 mshci_data_irq(host, intmask, INT_SRC_IDMAC);
1758 result = IRQ_HANDLED;
1759 goto out;
1760 }
1761 result = IRQ_NONE;
1762 goto out;
1763 }
1764 DBG("*** %s got interrupt: 0x%08x\n",
1765 mmc_hostname(host->mmc), intmask);
1766
1767 mshci_writel(host, intmask, MSHCI_RINTSTS);
1768
1769 if (intmask & (INTMSK_CDETECT)) {
1770 if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE))
1771 tasklet_schedule(&host->card_tasklet);
1772 }
1773 intmask &= ~INTMSK_CDETECT;
1774
1775 if (intmask & CMD_STATUS) {
1776 if (!(intmask & INTMSK_CDONE) && (intmask & INTMSK_RTO)) {
1777 /*
1778 * when a error about command timeout occurs,
1779 * cmd done intr comes together.
1780 * cmd done intr comes later than error intr.
1781 * so, it has to wait for cmd done intr.
1782 */
1783 while (--timeout && !(mshci_readl(host, MSHCI_MINTSTS)
1784 & INTMSK_CDONE))
1785 ; /* Nothing to do */
1786 if (!timeout)
1787 printk(KERN_ERR"*** %s time out for CDONE intr\n",
1788 mmc_hostname(host->mmc));
1789 else
1790 mshci_writel(host, INTMSK_CDONE,
1791 MSHCI_RINTSTS);
1792 mshci_cmd_irq(host, intmask & CMD_STATUS);
1793 } else {
1794 mshci_cmd_irq(host, intmask & CMD_STATUS);
1795 }
1796 }
1797
1798 if (intmask & DATA_STATUS) {
1799 if (!(intmask & INTMSK_DTO) && (intmask & INTMSK_DRTO)) {
1800 /*
1801 * when a error about data timout occurs,
1802 * DTO intr comes together.
1803 * DTO intr comes later than error intr.
1804 * so, it has to wait for DTO intr.
1805 */
1806 while (--timeout && !(mshci_readl(host, MSHCI_MINTSTS)
1807 & INTMSK_DTO))
1808 ; /* Nothing to do */
1809 if (!timeout)
1810 printk(KERN_ERR"*** %s time out for DTO intr\n",
1811 mmc_hostname(host->mmc));
1812 else
1813 mshci_writel(host, INTMSK_DTO,
1814 MSHCI_RINTSTS);
1815 mshci_data_irq(host, intmask & DATA_STATUS,
1816 INT_SRC_MINT);
1817 } else {
1818 mshci_data_irq(host, intmask & DATA_STATUS,
1819 INT_SRC_MINT);
1820 }
1821 }
1822
1823 intmask &= ~(CMD_STATUS | DATA_STATUS);
1824
1825 if (intmask & SDIO_INT_ENABLE)
1826 cardint = 1;
1827
1828 intmask &= ~SDIO_INT_ENABLE;
1829
1830 if (intmask) {
1831 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1832 mmc_hostname(host->mmc), intmask);
1833 mshci_dumpregs(host);
1834 }
1835
1836 result = IRQ_HANDLED;
1837
1838 mmiowb();
1839out:
1840 spin_unlock(&host->lock);
1841
1842 /*
1843 * We have to delay this as it calls back into the driver.
1844 */
1845 if (cardint)
1846 mmc_signal_sdio_irq(host->mmc);
1847
1848 return result;
1849}
1850
1851/*****************************************************************************\
1852 * *
1853 * Suspend/resume *
1854 * *
1855\*****************************************************************************/
1856
1857#ifdef CONFIG_PM
1858
1859int mshci_suspend_host(struct mshci_host *host, pm_message_t state)
1860{
1861 int ret;
1862
1863 mshci_disable_card_detection(host);
1864
1865 ret = mmc_suspend_host(host->mmc);
1866 if (ret)
1867 return ret;
1868
1869 free_irq(host->irq, host);
1870
1871 return 0;
1872}
1873EXPORT_SYMBOL_GPL(mshci_suspend_host);
1874
1875int mshci_resume_host(struct mshci_host *host)
1876{
1877 int ret;
1878 int count;
1879
1880 if (host->flags & (MSHCI_USE_IDMA)) {
1881 if (host->ops->enable_dma)
1882 host->ops->enable_dma(host);
1883 }
1884
1885 mshci_init(host);
1886
1887 ret = request_irq(host->irq, mshci_irq, IRQF_SHARED,
1888 mmc_hostname(host->mmc), host);
1889 if (ret)
1890 return ret;
1891
1892 mmiowb();
1893
1894 mshci_fifo_init(host);
1895
1896 /* set debounce filter value*/
1897 mshci_writel(host, 0xfffff, MSHCI_DEBNCE);
1898
1899 /* clear card type. set 1bit mode */
1900 mshci_writel(host, 0x0, MSHCI_CTYPE);
1901
1902 /* set bus mode register for IDMAC */
1903 if (host->flags & MSHCI_USE_IDMA) {
1904 mshci_writel(host, BMOD_IDMAC_RESET, MSHCI_BMOD);
1905 count = 100;
1906 while ((mshci_readl(host, MSHCI_BMOD) & BMOD_IDMAC_RESET)
1907 && --count)
1908 ; /* nothing to do */
1909
1910 mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
1911 (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)), MSHCI_BMOD);
1912 }
1913
1914 ret = mmc_resume_host(host->mmc);
1915 if (ret)
1916 return ret;
1917
1918 mshci_enable_card_detection(host);
1919
1920 return 0;
1921}
1922EXPORT_SYMBOL_GPL(mshci_resume_host);
1923
1924#endif /* CONFIG_PM */
1925
1926/*****************************************************************************\
1927 * *
1928 * Device allocation/registration *
1929 * *
1930\*****************************************************************************/
1931
1932struct mshci_host *mshci_alloc_host(struct device *dev,
1933 size_t priv_size)
1934{
1935 struct mmc_host *mmc;
1936 struct mshci_host *host;
1937
1938 WARN_ON(dev == NULL);
1939
1940 mmc = mmc_alloc_host(sizeof(struct mshci_host) + priv_size, dev);
1941 if (!mmc)
1942 return ERR_PTR(-ENOMEM);
1943
1944 host = mmc_priv(mmc);
1945 host->mmc = mmc;
1946
1947 return host;
1948}
1949
1950static void mshci_fifo_init(struct mshci_host *host)
1951{
1952 int fifo_val, fifo_depth, fifo_threshold;
1953
1954 fifo_val = mshci_readl(host, MSHCI_FIFOTH);
1955 fifo_depth = host->ops->get_fifo_depth(host);
1956 fifo_threshold = fifo_depth/2;
1957 host->fifo_threshold = fifo_threshold;
1958 host->fifo_depth = fifo_threshold*2;
1959
1960 printk(KERN_INFO "%s: FIFO WMARK FOR RX 0x%x WX 0x%x. ###########\n",
1961 mmc_hostname(host->mmc), fifo_depth,
1962 fifo_threshold);
1963
1964 fifo_val &= ~(RX_WMARK | TX_WMARK | MSIZE_MASK);
1965
1966 fifo_val |= (fifo_threshold | ((fifo_threshold-1)<<16));
1967 if (fifo_threshold >= 0x40)
1968 fifo_val |= MSIZE_64;
1969 else if (fifo_threshold >= 0x20)
1970 fifo_val |= MSIZE_32;
1971 else if (fifo_threshold >= 0x10)
1972 fifo_val |= MSIZE_16;
1973 else if (fifo_threshold >= 0x8)
1974 fifo_val |= MSIZE_8;
1975 else
1976 fifo_val |= MSIZE_1;
1977
1978 mshci_writel(host, fifo_val, MSHCI_FIFOTH);
1979}
1980EXPORT_SYMBOL_GPL(mshci_alloc_host);
1981
1982int mshci_add_host(struct mshci_host *host)
1983{
1984 struct mmc_host *mmc;
1985 int ret, count;
1986
1987 WARN_ON(host == NULL);
1988 if (host == NULL)
1989 return -EINVAL;
1990
1991 mmc = host->mmc;
1992
1993 if (debug_quirks)
1994 host->quirks = debug_quirks;
1995
1996 mshci_reset_all(host);
1997
1998 host->version = mshci_readl(host, MSHCI_VERID);
1999
2000 /* there are no reasons not to use DMA */
2001 host->flags |= MSHCI_USE_IDMA;
2002
2003 if (host->flags & MSHCI_USE_IDMA) {
2004 /* We need to allocate descriptors for all sg entries
2005 * MSHCI_MAX_DMA_LIST transfer for each of those entries. */
2006 host->idma_desc = kmalloc(MSHCI_MAX_DMA_LIST * \
2007 sizeof(struct mshci_idmac), GFP_KERNEL);
2008 if (!host->idma_desc) {
2009 kfree(host->idma_desc);
2010 printk(KERN_WARNING "%s: Unable to allocate IDMA "
2011 "buffers. Falling back to standard DMA.\n",
2012 mmc_hostname(mmc));
2013 host->flags &= ~MSHCI_USE_IDMA;
2014 }
2015 }
2016
2017 /*
2018 * If we use DMA, then it's up to the caller to set the DMA
2019 * mask, but PIO does not need the hw shim so we set a new
2020 * mask here in that case.
2021 */
2022 if (!(host->flags & (MSHCI_USE_IDMA))) {
2023 host->dma_mask = DMA_BIT_MASK(64);
2024 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
2025 }
2026
2027 printk(KERN_ERR "%s: Version ID 0x%x.\n",
2028 mmc_hostname(host->mmc), host->version);
2029
2030 host->max_clk = 0;
2031
2032 if (host->max_clk == 0) {
2033 if (!host->ops->get_max_clock) {
2034 printk(KERN_ERR
2035 "%s: Hardware doesn't specify base clock "
2036 "frequency.\n", mmc_hostname(mmc));
2037 return -ENODEV;
2038 }
2039 host->max_clk = host->ops->get_max_clock(host);
2040 }
2041
2042 /*
2043 * Set host parameters.
2044 */
2045 if (host->ops->get_ro)
2046 mshci_ops.get_ro = host->ops->get_ro;
2047
2048 mmc->ops = &mshci_ops;
2049 mmc->f_min = 400000;
2050 mmc->f_max = host->max_clk;
2051 mmc->caps |= MMC_CAP_SDIO_IRQ;
2052
2053 mmc->caps |= MMC_CAP_4_BIT_DATA;
2054
2055 mmc->ocr_avail = 0;
2056 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
2057 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
2058
2059
2060 if (mmc->ocr_avail == 0) {
2061 printk(KERN_ERR "%s: Hardware doesn't report any "
2062 "support voltages.\n", mmc_hostname(mmc));
2063 return -ENODEV;
2064 }
2065
2066 spin_lock_init(&host->lock);
2067
2068 /*
2069 * Maximum number of segments. Depends on if the hardware
2070 * can do scatter/gather or not.
2071 */
2072 if (host->flags & MSHCI_USE_IDMA)
2073 mmc->max_segs = MSHCI_MAX_DMA_LIST;
2074 else /* PIO */
2075 mmc->max_segs = MSHCI_MAX_DMA_LIST;
2076
2077 mmc->max_segs = MSHCI_MAX_DMA_LIST;
2078
2079 /*
2080 * Maximum number of sectors in one transfer. Limited by DMA boundary
2081 * size (4KiB).
2082 * Limited by CPU I/O boundry size (0xfffff000 KiB)
2083 */
2084
2085 /* to prevent starvation of a process that want to access SD device
2086 * it should limit size that transfer at one time. */
2087 mmc->max_req_size = MSHCI_MAX_DMA_TRANS_SIZE;
2088
2089 /*
2090 * Maximum segment size. Could be one segment with the maximum number
2091 * of bytes. When doing hardware scatter/gather, each entry cannot
2092 * be larger than 4 KiB though.
2093 */
2094 if (host->flags & MSHCI_USE_IDMA)
2095 mmc->max_seg_size = 0x1000;
2096 else
2097 mmc->max_seg_size = mmc->max_req_size;
2098
2099 /* from SD spec 2.0 and MMC spec 4.2, block size has been
2100 * fixed to 512 byte */
2101 mmc->max_blk_size = 0;
2102
2103 mmc->max_blk_size = 512 << mmc->max_blk_size;
2104
2105 /*
2106 * Maximum block count.
2107 */
2108 mmc->max_blk_count = MSHCI_MAX_DMA_TRANS_SIZE / mmc->max_blk_size ;
2109
2110 /*
2111 * Init tasklets.
2112 */
2113 tasklet_init(&host->card_tasklet,
2114 mshci_tasklet_card, (unsigned long)host);
2115 tasklet_init(&host->finish_tasklet,
2116 mshci_tasklet_finish, (unsigned long)host);
2117
2118 setup_timer(&host->timer, mshci_timeout_timer, (unsigned long)host);
2119
2120 ret = request_irq(host->irq, mshci_irq, IRQF_SHARED,
2121 mmc_hostname(mmc), host);
2122 if (ret)
2123 goto untasklet;
2124
2125 mshci_init(host);
2126
2127 mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
2128 MSHCI_CTRL);
2129
2130 mshci_fifo_init(host);
2131
2132 /* set debounce filter value*/
2133 mshci_writel(host, 0xfffff, MSHCI_DEBNCE);
2134
2135 /* clear card type. set 1bit mode */
2136 mshci_writel(host, 0x0, MSHCI_CTYPE);
2137
2138 /* set bus mode register for IDMAC */
2139 if (host->flags & MSHCI_USE_IDMA) {
2140 mshci_writel(host, BMOD_IDMAC_RESET, MSHCI_BMOD);
2141 count = 100;
2142 while ((mshci_readl(host, MSHCI_BMOD) & BMOD_IDMAC_RESET)
2143 && --count)
2144 ; /* nothing to do */
2145
2146 mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
2147 (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)), MSHCI_BMOD);
2148 }
2149#ifdef CONFIG_MMC_DEBUG
2150 mshci_dumpregs(host);
2151#endif
2152
2153 mmiowb();
2154
2155 mmc_add_host(mmc);
2156
2157 printk(KERN_INFO "%s: MSHCI controller on %s [%s] using %s\n",
2158 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
2159 (host->flags & MSHCI_USE_IDMA) ? "IDMA" : "PIO");
2160
2161 mshci_enable_card_detection(host);
2162
2163 return 0;
2164
2165untasklet:
2166 tasklet_kill(&host->card_tasklet);
2167 tasklet_kill(&host->finish_tasklet);
2168
2169 return ret;
2170}
2171EXPORT_SYMBOL_GPL(mshci_add_host);
2172
2173void mshci_remove_host(struct mshci_host *host, int dead)
2174{
2175 if (dead) {
2176 spin_lock_irqsave(&host->lock, host->sl_flags);
2177
2178 host->flags |= MSHCI_DEVICE_DEAD;
2179
2180 if (host->mrq) {
2181 printk(KERN_ERR "%s: Controller removed during "
2182 " transfer!\n", mmc_hostname(host->mmc));
2183
2184 host->mrq->cmd->error = -ENOMEDIUM;
2185 tasklet_schedule(&host->finish_tasklet);
2186 }
2187
2188 spin_unlock_irqrestore(&host->lock, host->sl_flags);
2189 }
2190
2191 mshci_disable_card_detection(host);
2192
2193 mmc_remove_host(host->mmc);
2194
2195 if (!dead)
2196 mshci_reset_all(host);
2197
2198 free_irq(host->irq, host);
2199
2200 del_timer_sync(&host->timer);
2201
2202 tasklet_kill(&host->card_tasklet);
2203 tasklet_kill(&host->finish_tasklet);
2204
2205 kfree(host->idma_desc);
2206
2207 host->idma_desc = NULL;
2208 host->align_buffer = NULL;
2209}
2210EXPORT_SYMBOL_GPL(mshci_remove_host);
2211
2212void mshci_free_host(struct mshci_host *host)
2213{
2214 mmc_free_host(host->mmc);
2215}
2216EXPORT_SYMBOL_GPL(mshci_free_host);
2217
2218/*****************************************************************************\
2219 * *
2220 * Driver init/exit *
2221 * *
2222\*****************************************************************************/
2223
2224static int __init mshci_drv_init(void)
2225{
2226 int ret = 0;
2227 printk(KERN_INFO DRIVER_NAME
2228 ": Mobile Storage Host Controller Interface driver\n");
2229 printk(KERN_INFO DRIVER_NAME
2230 ": Copyright (c) 2011 Samsung Electronics Co., Ltd\n");
2231
2232 return ret;
2233}
2234
2235static void __exit mshci_drv_exit(void)
2236{
2237}
2238
2239module_init(mshci_drv_init);
2240module_exit(mshci_drv_exit);
2241
2242module_param(debug_quirks, uint, 0444);
2243
2244MODULE_AUTHOR("Hyunsung Jang, <hs79.jang@samsung.com>");
2245MODULE_DESCRIPTION("Mobile Storage Host Controller Interface core driver");
2246MODULE_LICENSE("GPL");
2247
2248MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
diff --git a/drivers/mmc/host/mshci.h b/drivers/mmc/host/mshci.h
new file mode 100644
index 00000000000..40212ae9cc3
--- /dev/null
+++ b/drivers/mmc/host/mshci.h
@@ -0,0 +1,463 @@
1/*
2* linux/drivers/mmc/host/mshci.h
3* Mobile Storage Host Controller Interface driver
4*
5* Copyright (c) 2011 Samsung Electronics Co., Ltd.
6* http://www.samsung.com
7*
8* Based on linux/drivers/mmc/host/sdhci.h
9*
10* This program is free software; you can redistribute it and/or modify
11* it under the terms of the GNU General Public License as published by
12* the Free Software Foundation; either version 2 of the License, or (at
13* your option) any later version.
14*
15*/
16
17#include <linux/scatterlist.h>
18#include <linux/compiler.h>
19#include <linux/types.h>
20#include <linux/io.h>
21
22/*
23 * Controller registers
24 */
25/*****************************************************/
26/* MSHC Internal Registers */
27/*****************************************************/
28
29#define MSHCI_CTRL 0x00 /* Control */
30#define MSHCI_PWREN 0x04 /* Power-enable */
31#define MSHCI_CLKDIV 0x08 /* Clock divider */
32#define MSHCI_CLKSRC 0x0C /* Clock source */
33#define MSHCI_CLKENA 0x10 /* Clock enable */
34#define MSHCI_TMOUT 0x14 /* Timeout */
35#define MSHCI_CTYPE 0x18 /* Card type */
36#define MSHCI_BLKSIZ 0x1C /* Block Size */
37#define MSHCI_BYTCNT 0x20 /* Byte count */
38#define MSHCI_INTMSK 0x24 /* Interrupt Mask */
39#define MSHCI_CMDARG 0x28 /* Command Argument */
40#define MSHCI_CMD 0x2C /* Command */
41#define MSHCI_RESP0 0x30 /* Response 0 */
42#define MSHCI_RESP1 0x34 /* Response 1 */
43#define MSHCI_RESP2 0x38 /* Response 2 */
44#define MSHCI_RESP3 0x3C /* Response 3 */
45#define MSHCI_MINTSTS 0x40 /* Masked interrupt status */
46#define MSHCI_RINTSTS 0x44 /* Raw interrupt status */
47#define MSHCI_STATUS 0x48 /* Status */
48#define MSHCI_FIFOTH 0x4C /* FIFO threshold */
49#define MSHCI_CDETECT 0x50 /* Card detect */
50#define MSHCI_WRTPRT 0x54 /* Write protect */
51#define MSHCI_GPIO 0x58 /* General Purpose IO */
52#define MSHCI_TCBCNT 0x5C /* Transferred CIU byte count */
53#define MSHCI_TBBCNT 0x60 /* Transferred host/DMA to/from byte count */
54#define MSHCI_DEBNCE 0x64 /* Card detect debounce */
55#define MSHCI_USRID 0x68 /* User ID */
56#define MSHCI_VERID 0x6C /* Version ID */
57#define MSHCI_HCON 0x70 /* Hardware Configuration */
58#define MSHCI_UHS_REG 0x74 /* UHS and DDR setting */
59#define MSHCI_BMOD 0x80 /* Bus mode register */
60#define MSHCI_PLDMND 0x84 /* Poll demand */
61#define MSHCI_DBADDR 0x88 /* Descriptor list base address */
62#define MSHCI_IDSTS 0x8C /* Internal DMAC status */
63#define MSHCI_IDINTEN 0x90 /* Internal DMAC interrupt enable */
64#define MSHCI_DSCADDR 0x94 /* Current host descriptor address */
65#define MSHCI_BUFADDR 0x98 /* Current host buffer address */
66#define MSHCI_CLKSEL 0x9C /* Clock Selection Register */
67#define MSHCI_WAKEUPCON 0xA0 /* Wakeup control register */
68#define MSHCI_CLOCKCON 0xA4 /* Clock (delay) control register */
69#define MSHCI_FIFODAT 0x100 /* FIFO data read write */
70
71/*****************************************************
72 * Control Register Register
73 * MSHCI_CTRL - offset 0x00
74 *****************************************************/
75
76#define CTRL_RESET (0x1<<0) /* Reset DWC_mobile_storage controller */
77#define FIFO_RESET (0x1<<1) /* Reset FIFO */
78#define DMA_RESET (0x1<<2) /* Reset DMA interface */
79#define INT_ENABLE (0x1<<4) /* Global interrupt enable/disable bit */
80#define DMA_ENABLE (0x1<<5) /* DMA transfer mode enable/disable bit */
81#define READ_WAIT (0x1<<6) /* For sending read-wait to SDIO cards */
82#define SEND_IRQ_RESP (0x1<<7) /* Send auto IRQ response */
83#define ABRT_READ_DATA (0x1<<8)
84#define SEND_CCSD (0x1<<9)
85#define SEND_AS_CCSD (0x1<<10)
86#define CEATA_INTSTAT (0x1<<11)
87#define CARD_VOLA (0xF<<16)
88#define CARD_VOLB (0xF<<20)
89#define ENABLE_OD_PULLUP (0x1<<24)
90#define ENABLE_IDMAC (0x1<<25)
91
92#define MSHCI_RESET_ALL (0x1)
93
94/*****************************************************
95 * Power Enable Register
96 * MSHCI_PWREN - offset 0x04
97 *****************************************************/
98#define POWER_ENABLE (0x1<<0)
99
100/*****************************************************
101 * Clock Divider Register
102 * MSHCI_CLKDIV - offset 0x08
103 *****************************************************/
104#define CLK_DIVIDER0 (0xFF<<0)
105#define CLK_DIVIDER1 (0xFF<<8)
106#define CLK_DIVIDER2 (0xFF<<16)
107#define CLK_DIVIDER3 (0xFF<<24)
108
109/*****************************************************
110 * Clock Enable Register
111 * MSHCI_CLKENA - offset 0x10
112 *****************************************************/
113#define CLK_SDMMC_MAX (48000000) /* 96Mhz. it SHOULDBE optimized */
114#define CLK_ENABLE (0x1<<0)
115#define CLK_DISABLE (0x0<<0)
116
117/*****************************************************
118 * Timeout Register
119 * MSHCI_TMOUT - offset 0x14
120 *****************************************************/
121#define RSP_TIMEOUT (0xFF<<0)
122#define DATA_TIMEOUT (0xFFFFFF<<8)
123
124/*****************************************************
125 * Card Type Register
126 * MSHCI_CTYPE - offset 0x18
127 *****************************************************/
128#define CARD_WIDTH4 (0xFFFF<<0)
129#define CARD_WIDTH8 (0xFFFF<<16)
130
131/*****************************************************
132 * Block Size Register
133 * MSHCI_BLKSIZ - offset 0x1C
134 *****************************************************/
135#define BLK_SIZ (0xFFFF<<0)
136
137/*****************************************************
138 * Interrupt Mask Register
139 * MSHCI_INTMSK - offset 0x24
140 *****************************************************/
141#define INT_MASK (0xFFFF<<0)
142#define SDIO_INT_MASK (0xFFFF<<16)
143#define SDIO_INT_ENABLE (0x1<<16)
144
145/* interrupt bits */
146#define INTMSK_ALL 0xFFFFFFFF
147#define INTMSK_CDETECT (0x1<<0)
148#define INTMSK_RE (0x1<<1)
149#define INTMSK_CDONE (0x1<<2)
150#define INTMSK_DTO (0x1<<3)
151#define INTMSK_TXDR (0x1<<4)
152#define INTMSK_RXDR (0x1<<5)
153#define INTMSK_RCRC (0x1<<6)
154#define INTMSK_DCRC (0x1<<7)
155#define INTMSK_RTO (0x1<<8)
156#define INTMSK_DRTO (0x1<<9)
157#define INTMSK_HTO (0x1<<10)
158#define INTMSK_FRUN (0x1<<11)
159#define INTMSK_HLE (0x1<<12)
160#define INTMSK_SBE (0x1<<13)
161#define INTMSK_ACD (0x1<<14)
162#define INTMSK_EBE (0x1<<15)
163#define INTMSK_DMA (INTMSK_ACD|INTMSK_RXDR|INTMSK_TXDR)
164
165#define INT_SRC_IDMAC (0x0)
166#define INT_SRC_MINT (0x1)
167
168
169/*****************************************************
170 * Command Register
171 * MSHCI_CMD - offset 0x2C
172 *****************************************************/
173
174#define CMD_RESP_EXP_BIT (0x1<<6)
175#define CMD_RESP_LENGTH_BIT (0x1<<7)
176#define CMD_CHECK_CRC_BIT (0x1<<8)
177#define CMD_DATA_EXP_BIT (0x1<<9)
178#define CMD_RW_BIT (0x1<<10)
179#define CMD_TRANSMODE_BIT (0x1<<11)
180#define CMD_SENT_AUTO_STOP_BIT (0x1<<12)
181#define CMD_WAIT_PRV_DAT_BIT (0x1<<13)
182#define CMD_ABRT_CMD_BIT (0x1<<14)
183#define CMD_SEND_INIT_BIT (0x1<<15)
184#define CMD_CARD_NUM_BITS (0x1F<<16)
185#define CMD_SEND_CLK_ONLY (0x1<<21)
186#define CMD_READ_CEATA (0x1<<22)
187#define CMD_CCS_EXPECTED (0x1<<23)
188#define CMD_USE_HOLD_REG (0x1<<29)
189#define CMD_STRT_BIT (0x1<<31)
190#define CMD_ONLY_CLK (CMD_STRT_BIT | CMD_SEND_CLK_ONLY | \
191 CMD_WAIT_PRV_DAT_BIT)
192
193/*****************************************************
194 * Masked Interrupt Status Register
195 * MSHCI_MINTSTS - offset 0x40
196 *****************************************************/
197/*****************************************************
198 * Raw Interrupt Register
199 * MSHCI_RINTSTS - offset 0x44
200 *****************************************************/
201#define INT_STATUS (0xFFFF<<0)
202#define SDIO_INTR (0xFFFF<<16)
203#define DATA_ERR (INTMSK_EBE|INTMSK_SBE|INTMSK_HLE|INTMSK_FRUN|\
204 INTMSK_EBE|INTMSK_DCRC)
205#define DATA_TOUT (INTMSK_HTO|INTMSK_DRTO)
206#define DATA_STATUS (DATA_ERR|DATA_TOUT|INTMSK_RXDR|INTMSK_TXDR|INTMSK_DTO)
207#define CMD_STATUS (INTMSK_RTO|INTMSK_RCRC|INTMSK_CDONE|INTMSK_RE)
208#define CMD_ERROR (INTMSK_RCRC|INTMSK_RTO|INTMSK_RE)
209
210/*****************************************************
211 * Status Register
212 * MSHCI_STATUS - offset 0x48
213 *****************************************************/
214#define FIFO_RXWTRMARK (0x1<<0)
215#define FIFO_TXWTRMARK (0x1<<1)
216#define FIFO_EMPTY (0x1<<2)
217#define FIFO_FULL (0x1<<3)
218#define CMD_FSMSTAT (0xF<<4)
219#define DATA_3STATUS (0x1<<8)
220#define DATA_BUSY (0x1<<9)
221#define DATA_MCBUSY (0x1<<10)
222#define RSP_INDEX (0x3F<<11)
223#define FIFO_COUNT (0x1FFF<<17)
224#define DMA_ACK (0x1<<30)
225#define DMA_REQ (0x1<<31)
226#define FIFO_WIDTH (0x4)
227#define FIFO_DEPTH (0x20)
228
229/*Command FSM status */
230#define FSM_IDLE (0<<4)
231#define FSM_SEND_INIT_SEQ (1<<4)
232#define FSM_TX_CMD_STARTBIT (2<<4)
233#define FSM_TX_CMD_TXBIT (3<<4)
234#define FSM_TX_CMD_INDEX_ARG (4<<4)
235#define FSM_TX_CMD_CRC7 (5<<4)
236#define FSM_TX_CMD_ENDBIT (6<<4)
237#define FSM_RX_RESP_STARTBIT (7<<4)
238#define FSM_RX_RESP_IRQRESP (8<<4)
239#define FSM_RX_RESP_TXBIT (9<<4)
240#define FSM_RX_RESP_CMDIDX (10<<4)
241#define FSM_RX_RESP_DATA (11<<4)
242#define FSM_RX_RESP_CRC7 (12<<4)
243#define FSM_RX_RESP_ENDBIT (13<<4)
244#define FSM_CMD_PATHWAITNCC (14<<4)
245#define FSM_WAIT (15<<4)
246
247/*****************************************************
248 * FIFO Threshold Watermark Register
249 * MSHCI_FIFOTH - offset 0x4C
250 *****************************************************/
251#define TX_WMARK (0xFFF<<0)
252#define RX_WMARK (0xFFF<<16)
253#define MSIZE_MASK (0x7<<28)
254
255/* DW DMA Mutiple Transaction Size */
256#define MSIZE_1 (0<<28)
257#define MSIZE_4 (1<<28)
258#define MSIZE_8 (2<<28)
259#define MSIZE_16 (3<<28)
260#define MSIZE_32 (4<<28)
261#define MSIZE_64 (5<<28)
262#define MSIZE_128 (6<<28)
263#define MSIZE_256 (7<<28)
264
265/*****************************************************
266 * FIFO Threshold Watermark Register
267 * MSHCI_FIFOTH - offset 0x4C
268 *****************************************************/
269#define GPI (0xFF<<0)
270#define GPO (0xFFFF<<8)
271
272
273/*****************************************************
274 * Card Detect Register
275 * MSHCI_CDETECT - offset 0x50
276 * It assumes there is only one SD slot
277 *****************************************************/
278#define CARD_PRESENT (0x1<<0)
279
280/*****************************************************
281 * Write Protect Register
282 * MSHCI_WRTPRT - offset 0x54
283 * It assumes there is only one SD slot
284 *****************************************************/
285#define WRTPRT_ON (0x1<<0)
286
287/*****************************************************
288 * Bus Mode Register
289 * MSHCI_BMOD - offset 0x80
290 *****************************************************/
291#define BMOD_IDMAC_RESET (0x1<<0)
292#define BMOD_IDMAC_FB (0x1<<1)
293#define BMOD_IDMAC_ENABLE (0x1<<7)
294
295/*****************************************************
296 * Hardware Configuration Register
297 * MSHCI_HCON - offset 0x70
298 *****************************************************/
299#define CARD_TYPE (0x1<<0)
300#define NUM_CARDS (0x1F<<1)
301#define H_BUS_TYPE (0x1<<6)
302#define H_DATA_WIDTH (0x7<<7)
303#define H_ADDR_WIDTH (0x3F<<10)
304#define DMA_INTERFACE (0x3<<16)
305#define GE_DMA_DATA_WIDTH (0x7<<18)
306#define FIFO_RAM_INSIDE (0x1<<21)
307#define UMPLEMENT_HOLD_REG (0x1<<22)
308#define SET_CLK_FALSE_PATH (0x1<<23)
309#define NUM_CLK_DIVIDER (0x3<<24)
310
311/*****************************************************
312 * Hardware Configuration Register
313 * MSHCI_IDSTS - offset 0x8c
314 *****************************************************/
315#define IDSTS_FSM (0xf<<13)
316#define IDSTS_EB (0x7<<10)
317#define IDSTS_AIS (0x1<<9)
318#define IDSTS_NIS (0x1<<8)
319#define IDSTS_CES (0x1<<5)
320#define IDSTS_DU (0x1<<4)
321#define IDSTS_FBE (0x1<<2)
322#define IDSTS_RI (0x1<<1)
323#define IDSTS_TI (0x1<<0)
324
325struct mshci_ops;
326
327struct mshci_idmac {
328 u32 des0;
329 u32 des1;
330 u32 des2;
331 u32 des3;
332#define MSHCI_IDMAC_OWN (1<<31)
333#define MSHCI_IDMAC_ER (1<<5)
334#define MSHCI_IDMAC_CH (1<<4)
335#define MSHCI_IDMAC_FS (1<<3)
336#define MSHCI_IDMAC_LD (1<<2)
337#define MSHCI_IDMAC_DIC (1<<1)
338#define INTMSK_IDMAC_ALL (0x337)
339#define INTMSK_IDMAC_ERROR (0x214)
340};
341
342struct mshci_host {
343 /* Data set by hardware interface driver */
344 const char *hw_name; /* Hardware bus name */
345
346 unsigned int quirks; /* Deviations from spec. */
347/* Controller has no write-protect pin connected with SD card */
348#define MSHCI_QUIRK_NO_WP_BIT (1<<0)
349#define MSHCI_QUIRK_BROKEN_CARD_DETECTION (1<<1)
350#define MSHCI_QUIRK_BROKEN_PRESENT_BIT (1<<2)
351
352 int irq; /* Device IRQ */
353 void __iomem *ioaddr; /* Mapped address */
354
355 const struct mshci_ops *ops; /* Low level hw interface */
356
357 /* Internal data */
358 struct mmc_host *mmc; /* MMC structure */
359 u64 dma_mask; /* custom DMA mask */
360
361 spinlock_t lock; /* Mutex */
362
363 int flags; /* Host attributes */
364#define MSHCI_USE_IDMA (1<<1) /* Host is ADMA capable */
365#define MSHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
366#define MSHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
367
368 unsigned int version; /* SDHCI spec. version */
369
370 unsigned int max_clk; /* Max possible freq (MHz) */
371 unsigned int timeout_clk; /* Timeout freq (KHz) */
372
373 unsigned int clock; /* Current clock (MHz) */
374 unsigned int clock_to_restore; /* Saved clock for dynamic clock gating (MHz) */
375 u8 pwr; /* Current voltage */
376
377 struct mmc_request *mrq; /* Current request */
378 struct mmc_command *cmd; /* Current command */
379 struct mmc_data *data; /* Current data request */
380 unsigned int data_early:1; /* Data finished before cmd */
381
382 struct sg_mapping_iter sg_miter; /* SG state for PIO */
383 unsigned int blocks; /* remaining PIO blocks */
384
385 int sg_count; /* Mapped sg entries */
386
387 u8 *idma_desc; /* ADMA descriptor table */
388 u8 *align_buffer; /* Bounce buffer */
389
390 dma_addr_t idma_addr; /* Mapped ADMA descr. table */
391 dma_addr_t align_addr; /* Mapped bounce buffer */
392
393 struct tasklet_struct card_tasklet; /* Tasklet structures */
394 struct tasklet_struct finish_tasklet;
395
396 struct timer_list timer; /* Timer for timeouts */
397
398 u32 fifo_depth;
399 u32 fifo_threshold;
400 u32 data_transfered;
401
402 /* IP version control */
403 u32 data_addr;
404 u32 hold_bit;
405
406 u32 error_state;
407
408 unsigned long sl_flags;
409 unsigned long private[0] ____cacheline_aligned;
410};
411
412struct mshci_ops {
413 void (*set_clock)(struct mshci_host *host, unsigned int clock);
414
415 int (*enable_dma)(struct mshci_host *host);
416 unsigned int (*get_max_clock)(struct mshci_host *host);
417 unsigned int (*get_min_clock)(struct mshci_host *host);
418 unsigned int (*get_timeout_clock)(struct mshci_host *host);
419 void (*set_ios)(struct mshci_host *host,
420 struct mmc_ios *ios);
421 int (*get_ro) (struct mmc_host *mmc);
422 void (*init_issue_cmd)(struct mshci_host *host);
423 void (*init_card)(struct mshci_host *host);
424
425 int (*dma_map_sg)(struct mshci_host *host,
426 struct device *dev,
427 struct scatterlist *sg,
428 int nents, enum dma_data_direction dir,
429 int flush_type);
430 void (*dma_unmap_sg)(struct mshci_host *host,
431 struct device *dev,
432 struct scatterlist *sg,
433 int nents, enum dma_data_direction dir,
434 int flush_type);
435 int (*get_fifo_depth)(struct mshci_host *host);
436};
437
438static inline void mshci_writel(struct mshci_host *host, u32 val, int reg)
439{
440 __raw_writel(val, host->ioaddr + reg);
441}
442
443static inline u32 mshci_readl(struct mshci_host *host, int reg)
444{
445 return readl(host->ioaddr + reg);
446}
447
448extern struct mshci_host *mshci_alloc_host(struct device *dev,
449 size_t priv_size);
450extern void mshci_free_host(struct mshci_host *host);
451
452static inline void *mshci_priv(struct mshci_host *host)
453{
454 return (void *)host->private;
455}
456
457extern int mshci_add_host(struct mshci_host *host);
458extern void mshci_remove_host(struct mshci_host *host, int dead);
459
460#ifdef CONFIG_PM
461extern int mshci_suspend_host(struct mshci_host *host, pm_message_t state);
462extern int mshci_resume_host(struct mshci_host *host);
463#endif
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 8cd999f4af5..a5e11cd2ab7 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -24,6 +24,7 @@
24 24
25#include <plat/sdhci.h> 25#include <plat/sdhci.h>
26#include <plat/regs-sdhci.h> 26#include <plat/regs-sdhci.h>
27#include <plat/gpio-cfg.h>
27 28
28#include "sdhci.h" 29#include "sdhci.h"
29 30
@@ -47,6 +48,7 @@ struct sdhci_s3c {
47 unsigned int cur_clk; 48 unsigned int cur_clk;
48 int ext_cd_irq; 49 int ext_cd_irq;
49 int ext_cd_gpio; 50 int ext_cd_gpio;
51 int ext_cd_gpio_invert;
50 52
51 struct clk *clk_io; 53 struct clk *clk_io;
52 struct clk *clk_bus[MAX_BUS_CLK]; 54 struct clk *clk_bus[MAX_BUS_CLK];
@@ -212,6 +214,12 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
212 if (ourhost->pdata->cfg_card) 214 if (ourhost->pdata->cfg_card)
213 (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr, 215 (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
214 &ios, NULL); 216 &ios, NULL);
217#ifdef CONFIG_MACH_MIDAS
218 /* call cfg_gpio with 4bit data bus */
219 if (ourhost->pdata->cfg_gpio)
220 ourhost->pdata->cfg_gpio(ourhost->pdev, 4);
221
222#endif
215 } 223 }
216} 224}
217 225
@@ -288,6 +296,7 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
288static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width) 296static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
289{ 297{
290 u8 ctrl; 298 u8 ctrl;
299 struct sdhci_s3c *ourhost = to_s3c(host);
291 300
292 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 301 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
293 302
@@ -295,14 +304,23 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
295 case MMC_BUS_WIDTH_8: 304 case MMC_BUS_WIDTH_8:
296 ctrl |= SDHCI_CTRL_8BITBUS; 305 ctrl |= SDHCI_CTRL_8BITBUS;
297 ctrl &= ~SDHCI_CTRL_4BITBUS; 306 ctrl &= ~SDHCI_CTRL_4BITBUS;
307 /* call cfg_gpio with 8bit data bus */
308 if (ourhost->pdata->cfg_gpio)
309 ourhost->pdata->cfg_gpio(ourhost->pdev, 8);
298 break; 310 break;
299 case MMC_BUS_WIDTH_4: 311 case MMC_BUS_WIDTH_4:
300 ctrl |= SDHCI_CTRL_4BITBUS; 312 ctrl |= SDHCI_CTRL_4BITBUS;
301 ctrl &= ~SDHCI_CTRL_8BITBUS; 313 ctrl &= ~SDHCI_CTRL_8BITBUS;
314 /* call cfg_gpio with 4bit data bus */
315 if (ourhost->pdata->cfg_gpio)
316 ourhost->pdata->cfg_gpio(ourhost->pdev, 4);
302 break; 317 break;
303 default: 318 default:
304 ctrl &= ~SDHCI_CTRL_4BITBUS;
305 ctrl &= ~SDHCI_CTRL_8BITBUS; 319 ctrl &= ~SDHCI_CTRL_8BITBUS;
320 ctrl &= ~SDHCI_CTRL_4BITBUS;
321 /* call cfg_gpio with 1bit data bus */
322 if (ourhost->pdata->cfg_gpio)
323 ourhost->pdata->cfg_gpio(ourhost->pdev, 1);
306 break; 324 break;
307 } 325 }
308 326
@@ -311,13 +329,69 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
311 return 0; 329 return 0;
312} 330}
313 331
332#ifdef CONFIG_MIDAS_COMMON
333/* midas board control the vdd for tflash by gpio,
334 not regulator directly.
335 so, code related vdd control should be added */
336static void sdhci_s3c_vtf_on_off(int on_off)
337{
338#ifdef CONFIG_MIDAS_COMMON
339 int gpio = GPIO_TF_EN;
340#else
341 int gpio = EXYNOS4212_GPJ0(7);
342#endif
343
344 if (on_off) {
345 gpio_set_value(gpio, 1);
346 } else {
347 gpio_set_value(gpio, 0);
348 }
349}
350
351
352static int sdhci_s3c_get_card_exist(struct sdhci_host *host)
353{
354 struct sdhci_s3c *sc;
355 int status;
356
357 sc = sdhci_priv(host);
358
359 status = gpio_get_value(sc->ext_cd_gpio);
360 if (sc->pdata->ext_cd_gpio_invert)
361 status = !status;
362
363 return status;
364}
365#endif
366
314static struct sdhci_ops sdhci_s3c_ops = { 367static struct sdhci_ops sdhci_s3c_ops = {
315 .get_max_clock = sdhci_s3c_get_max_clk, 368 .get_max_clock = sdhci_s3c_get_max_clk,
316 .set_clock = sdhci_s3c_set_clock, 369 .set_clock = sdhci_s3c_set_clock,
317 .get_min_clock = sdhci_s3c_get_min_clock, 370 .get_min_clock = sdhci_s3c_get_min_clock,
318 .platform_8bit_width = sdhci_s3c_platform_8bit_width, 371 .platform_8bit_width = sdhci_s3c_platform_8bit_width,
372#ifdef CONFIG_MIDAS_COMMON
373 .set_power = sdhci_s3c_vtf_on_off,
374#endif
319}; 375};
320 376
377//----------------------------------------------------------------------------------------------------
378//
379// Hardkernel / ODROID
380//
381//----------------------------------------------------------------------------------------------------
382/*
383* call this when you need sd stack to recognize insertion or removal of card
384* that can't be told by SDHCI regs
385*/
386static void sdhci_s3c_notify_change(struct platform_device *dev, int state);
387
388void sdhci_s3c_force_presence_change(struct platform_device *pdev, int state)
389{
390 sdhci_s3c_notify_change(pdev, state);
391}
392
393EXPORT_SYMBOL_GPL(sdhci_s3c_force_presence_change);
394
321static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 395static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
322{ 396{
323 struct sdhci_host *host = platform_get_drvdata(dev); 397 struct sdhci_host *host = platform_get_drvdata(dev);
@@ -327,12 +401,20 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
327 spin_lock_irqsave(&host->lock, flags); 401 spin_lock_irqsave(&host->lock, flags);
328 if (state) { 402 if (state) {
329 dev_dbg(&dev->dev, "card inserted.\n"); 403 dev_dbg(&dev->dev, "card inserted.\n");
330 host->flags &= ~SDHCI_DEVICE_DEAD; 404 pr_info("%s: card inserted.\n",
405 mmc_hostname(host->mmc));
331 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 406 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
407#ifdef CONFIG_MACH_MIDAS_01_BD
408 sdhci_s3c_vtf_on_off(1);
409#endif
332 } else { 410 } else {
333 dev_dbg(&dev->dev, "card removed.\n"); 411 dev_dbg(&dev->dev, "card removed.\n");
334 host->flags |= SDHCI_DEVICE_DEAD; 412 pr_info("%s: card removed.\n",
413 mmc_hostname(host->mmc));
335 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 414 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
415#ifdef CONFIG_MACH_MIDAS_01_BD
416 sdhci_s3c_vtf_on_off(0);
417#endif
336 } 418 }
337 tasklet_schedule(&host->card_tasklet); 419 tasklet_schedule(&host->card_tasklet);
338 spin_unlock_irqrestore(&host->lock, flags); 420 spin_unlock_irqrestore(&host->lock, flags);
@@ -345,6 +427,17 @@ static irqreturn_t sdhci_s3c_gpio_card_detect_thread(int irq, void *dev_id)
345 int status = gpio_get_value(sc->ext_cd_gpio); 427 int status = gpio_get_value(sc->ext_cd_gpio);
346 if (sc->pdata->ext_cd_gpio_invert) 428 if (sc->pdata->ext_cd_gpio_invert)
347 status = !status; 429 status = !status;
430
431 if (sc->host->mmc) {
432 if (status)
433 mmc_host_sd_set_present(sc->host->mmc);
434 else
435 mmc_host_sd_clear_present(sc->host->mmc);
436
437 pr_debug("SDcard present state=%d.\n",
438 mmc_host_sd_present(sc->host->mmc));
439 }
440
348 sdhci_s3c_notify_change(sc->pdev, status); 441 sdhci_s3c_notify_change(sc->pdev, status);
349 return IRQ_HANDLED; 442 return IRQ_HANDLED;
350} 443}
@@ -354,8 +447,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
354 struct s3c_sdhci_platdata *pdata = sc->pdata; 447 struct s3c_sdhci_platdata *pdata = sc->pdata;
355 struct device *dev = &sc->pdev->dev; 448 struct device *dev = &sc->pdev->dev;
356 449
357 if (gpio_request(pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) { 450 if (sc->ext_cd_gpio > 0) {
358 sc->ext_cd_gpio = pdata->ext_cd_gpio;
359 sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio); 451 sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio);
360 if (sc->ext_cd_irq && 452 if (sc->ext_cd_irq &&
361 request_threaded_irq(sc->ext_cd_irq, NULL, 453 request_threaded_irq(sc->ext_cd_irq, NULL,
@@ -365,16 +457,56 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
365 int status = gpio_get_value(sc->ext_cd_gpio); 457 int status = gpio_get_value(sc->ext_cd_gpio);
366 if (pdata->ext_cd_gpio_invert) 458 if (pdata->ext_cd_gpio_invert)
367 status = !status; 459 status = !status;
460
461 if (status)
462 mmc_host_sd_set_present(sc->host->mmc);
463 else
464 mmc_host_sd_clear_present(sc->host->mmc);
465
466 /* T-Flash EINT for CD SHOULD be wakeup source */
467 irq_set_irq_wake(sc->ext_cd_irq, 1);
468
368 sdhci_s3c_notify_change(sc->pdev, status); 469 sdhci_s3c_notify_change(sc->pdev, status);
369 } else { 470 } else {
370 dev_warn(dev, "cannot request irq for card detect\n"); 471 dev_warn(dev, "cannot request irq for card detect\n");
371 sc->ext_cd_irq = 0; 472 sc->ext_cd_irq = 0;
372 } 473 }
474 }
475}
476
477//extern struct class *sec_class;
478//static struct device *sd_detection_cmd_dev;
479
480static ssize_t sd_detection_cmd_show(struct device *dev,
481 struct device_attribute *attr, char *buf)
482{
483 struct sdhci_s3c *sc = dev_get_drvdata(dev);
484 unsigned int detect;
485
486 if (sc && sc->ext_cd_gpio)
487 detect = gpio_get_value(sc->ext_cd_gpio);
488 else {
489 pr_info("%s : External SD detect pin Error\n", __func__);
490 return sprintf(buf, "Error\n");
491 }
492
493 if (sc->pdata->ext_cd_gpio_invert) {
494 pr_info("%s : Invert External SD detect pin\n", __func__);
495 detect = !detect;
496 }
497
498 pr_info("%s : detect = %d.\n", __func__, detect);
499 if (detect) {
500 pr_debug("sdhci: card inserted.\n");
501 return sprintf(buf, "Insert\n");
373 } else { 502 } else {
374 dev_err(dev, "cannot request gpio for card detect\n"); 503 pr_debug("sdhci: card removed.\n");
504 return sprintf(buf, "Remove\n");
375 } 505 }
376} 506}
377 507
508static DEVICE_ATTR(status, 0444, sd_detection_cmd_show, NULL);
509
378static int __devinit sdhci_s3c_probe(struct platform_device *pdev) 510static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
379{ 511{
380 struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data; 512 struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
@@ -501,11 +633,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
501 * SDHCI block, or a missing configuration that needs to be set. */ 633 * SDHCI block, or a missing configuration that needs to be set. */
502 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ; 634 host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
503 635
504 /* This host supports the Auto CMD12 */ 636 if (pdata->cd_type == S3C_SDHCI_CD_NONE)
505 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
506
507 if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
508 pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
509 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; 637 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
510 638
511 if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT) 639 if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
@@ -514,6 +642,10 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
514 if (pdata->host_caps) 642 if (pdata->host_caps)
515 host->mmc->caps |= pdata->host_caps; 643 host->mmc->caps |= pdata->host_caps;
516 644
645 /* if vmmc_name is in pdata */
646 if (pdata->vmmc_name)
647 host->vmmc_name = pdata->vmmc_name;
648
517 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | 649 host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
518 SDHCI_QUIRK_32BIT_DMA_SIZE); 650 SDHCI_QUIRK_32BIT_DMA_SIZE);
519 651
@@ -534,21 +666,88 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
534 if (pdata->host_caps) 666 if (pdata->host_caps)
535 host->mmc->caps |= pdata->host_caps; 667 host->mmc->caps |= pdata->host_caps;
536 668
669 /* for BCM WIFI */
670 if (pdata->pm_flags)
671 host->mmc->pm_flags |= pdata->pm_flags;
672
673#ifdef CONFIG_MACH_MIDAS_01_BD
674 /* before calling shhci_add_host, you should turn vdd_tflash on */
675 sdhci_s3c_vtf_on_off(1);
676#endif
677
678 /* To turn on vmmc regulator only if sd card exists,
679 GPIO pin for card detection should be initialized.
680 Moved from sdhci_s3c_setup_card_detect_gpio() function */
681 if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
682 gpio_is_valid(pdata->ext_cd_gpio)) {
683 if (gpio_request(pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) {
684 sc->ext_cd_gpio = pdata->ext_cd_gpio;
685 sc->ext_cd_gpio_invert = pdata->ext_cd_gpio_invert;
686
687 mmc_host_sd_set_present(host->mmc);
688// if (sd_detection_cmd_dev == NULL &&
689// sc->ext_cd_gpio) {
690// sd_detection_cmd_dev =
691// device_create(sec_class, NULL, 0,
692// NULL, "sdcard");
693// if (IS_ERR(sd_detection_cmd_dev))
694// pr_err("Fail to create sysfs dev\n");
695//
696// if (device_create_file(sd_detection_cmd_dev,
697// &dev_attr_status) < 0)
698// pr_err("Fail to create sysfs file\n");
699//
700// dev_set_drvdata(sd_detection_cmd_dev, sc);
701// }
702#ifdef CONFIG_MIDAS_COMMON
703 /* set TF_EN gpio as OUTPUT */
704 gpio_request(GPIO_TF_EN, "TF_EN");
705 gpio_direction_output(GPIO_TF_EN, 1);
706 s3c_gpio_cfgpin(GPIO_TF_EN, S3C_GPIO_SFN(1));
707 s3c_gpio_setpull(GPIO_TF_EN, S3C_GPIO_PULL_NONE);
708#endif
709 } else {
710 dev_err(dev, "cannot request gpio for card detect\n");
711 }
712 }
713
537 ret = sdhci_add_host(host); 714 ret = sdhci_add_host(host);
538 if (ret) { 715 if (ret) {
539 dev_err(dev, "sdhci_add_host() failed\n"); 716 dev_err(dev, "sdhci_add_host() failed\n");
540 goto err_add_host; 717 goto err_add_host;
541 } 718 }
542 719
720 /* if it is set SDHCI_QUIRK_BROKEN_CARD_DETECTION before calling
721 sdhci_add_host, in sdhci_add_host, MMC_CAP_NEEDS_POLL flag will
722 be set. The flag S3C_SDHCI_CD_PERMANENT dose not need to
723 detect a card by polling. */
724 if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT || \
725 pdata->cd_type == S3C_SDHCI_CD_GPIO)
726 host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
727
543 /* The following two methods of card detection might call 728 /* The following two methods of card detection might call
544 sdhci_s3c_notify_change() immediately, so they can be called 729 sdhci_s3c_notify_change() immediately, so they can be called
545 only after sdhci_add_host(). Setup errors are ignored. */ 730 only after sdhci_add_host(). Setup errors are ignored. */
546 if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init) 731 if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init) {
547 pdata->ext_cd_init(&sdhci_s3c_notify_change); 732 pdata->ext_cd_init(&sdhci_s3c_notify_change);
733#ifdef CONFIG_MACH_PX
734 if (pdata->ext_pdev)
735 pdata->ext_pdev(pdev);
736#endif
737 }
548 if (pdata->cd_type == S3C_SDHCI_CD_GPIO && 738 if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
549 gpio_is_valid(pdata->ext_cd_gpio)) 739 gpio_is_valid(pdata->ext_cd_gpio))
550 sdhci_s3c_setup_card_detect_gpio(sc); 740 sdhci_s3c_setup_card_detect_gpio(sc);
551 741
742#ifdef CONFIG_MACH_MIDAS_01_BD
743 /* if card dose not exist, it should turn vtf off */
744 if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
745 sdhci_s3c_get_card_exist(host))
746 sdhci_s3c_vtf_on_off(1);
747 else
748 sdhci_s3c_vtf_on_off(0);
749#endif
750
552 return 0; 751 return 0;
553 752
554 err_add_host: 753 err_add_host:
@@ -613,17 +812,32 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
613static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm) 812static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
614{ 813{
615 struct sdhci_host *host = platform_get_drvdata(dev); 814 struct sdhci_host *host = platform_get_drvdata(dev);
815 int ret = 0;
616 816
617 sdhci_suspend_host(host, pm); 817 ret = sdhci_suspend_host(host, pm);
618 return 0; 818
819#ifdef CONFIG_MACH_MIDAS_01_BD
820 /* turn vdd_tflash off */
821 sdhci_s3c_vtf_on_off(0);
822#endif
823 return ret;
619} 824}
620 825
621static int sdhci_s3c_resume(struct platform_device *dev) 826static int sdhci_s3c_resume(struct platform_device *dev)
622{ 827{
623 struct sdhci_host *host = platform_get_drvdata(dev); 828 struct sdhci_host *host = platform_get_drvdata(dev);
829 int ret = 0;
624 830
625 sdhci_resume_host(host); 831#ifdef CONFIG_MACH_MIDAS_01_BD
626 return 0; 832 /* turn vdd_tflash off if a card exists*/
833 if (sdhci_s3c_get_card_exist(host))
834 sdhci_s3c_vtf_on_off(1);
835 else
836 sdhci_s3c_vtf_on_off(0);
837
838#endif
839 ret = sdhci_resume_host(host);
840 return ret;
627} 841}
628 842
629#else 843#else
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 153008fff54..57dcdbe9bc7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -25,13 +25,16 @@
25 25
26#include <linux/mmc/mmc.h> 26#include <linux/mmc/mmc.h>
27#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
28#include <linux/mmc/card.h>
28 29
29#include "sdhci.h" 30#include "sdhci.h"
30 31
32#include <linux/gpio.h>
33
31#define DRIVER_NAME "sdhci" 34#define DRIVER_NAME "sdhci"
32 35
33#define DBG(f, x...) \ 36#define DBG(f, x...) \
34 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) 37 pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
35 38
36#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ 39#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
37 defined(CONFIG_MMC_SDHCI_MODULE)) 40 defined(CONFIG_MMC_SDHCI_MODULE))
@@ -46,9 +49,25 @@ static void sdhci_finish_data(struct sdhci_host *);
46 49
47static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); 50static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
48static void sdhci_finish_command(struct sdhci_host *); 51static void sdhci_finish_command(struct sdhci_host *);
49static int sdhci_execute_tuning(struct mmc_host *mmc); 52static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
50static void sdhci_tuning_timer(unsigned long data); 53static void sdhci_tuning_timer(unsigned long data);
51 54
55#define MAX_BUS_CLK (4)
56
57struct sdhci_s3c {
58 struct sdhci_host *host;
59 struct platform_device *pdev;
60 struct resource *ioarea;
61 struct s3c_sdhci_platdata *pdata;
62 unsigned int cur_clk;
63 int ext_cd_irq;
64 int ext_cd_gpio;
65 int ext_cd_gpio_invert;
66
67 struct clk *clk_io;
68 struct clk *clk_bus[MAX_BUS_CLK];
69};
70
52static void sdhci_dumpregs(struct sdhci_host *host) 71static void sdhci_dumpregs(struct sdhci_host *host)
53{ 72{
54 printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", 73 printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
@@ -651,6 +670,17 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
651 break; 670 break;
652 } 671 }
653 672
673 /* card's type is SD, set timeout */
674 if (host->mmc->card && mmc_card_sd(host->mmc->card)) {
675 count += 2;
676 /*
677 * It's to prevent warning error log,
678 * If count value is more than 0xD before add 2.
679 */
680 if (count >= 0xF)
681 count = 0xE;
682 }
683
654 if (count >= 0xF) { 684 if (count >= 0xF) {
655 printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n", 685 printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
656 mmc_hostname(host->mmc), cmd->opcode); 686 mmc_hostname(host->mmc), cmd->opcode);
@@ -1006,7 +1036,7 @@ static void sdhci_finish_command(struct sdhci_host *host)
1006 if (host->cmd->flags & MMC_RSP_PRESENT) { 1036 if (host->cmd->flags & MMC_RSP_PRESENT) {
1007 if (host->cmd->flags & MMC_RSP_136) { 1037 if (host->cmd->flags & MMC_RSP_136) {
1008 /* CRC is stripped so we need to do some shifting. */ 1038 /* CRC is stripped so we need to do some shifting. */
1009 for (i = 0;i < 4;i++) { 1039 for (i = 0 ; i < 4 ; i++) {
1010 host->cmd->resp[i] = sdhci_readl(host, 1040 host->cmd->resp[i] = sdhci_readl(host,
1011 SDHCI_RESPONSE + (3-i)*4) << 8; 1041 SDHCI_RESPONSE + (3-i)*4) << 8;
1012 if (i != 3) 1042 if (i != 3)
@@ -1044,7 +1074,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1044 u16 clk = 0; 1074 u16 clk = 0;
1045 unsigned long timeout; 1075 unsigned long timeout;
1046 1076
1047 if (clock == host->clock) 1077 if (clock && clock == host->clock)
1048 return; 1078 return;
1049 1079
1050 if (host->ops->set_clock) { 1080 if (host->ops->set_clock) {
@@ -1250,13 +1280,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1250 if ((host->flags & SDHCI_NEEDS_RETUNING) && 1280 if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1251 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { 1281 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1252 spin_unlock_irqrestore(&host->lock, flags); 1282 spin_unlock_irqrestore(&host->lock, flags);
1253 sdhci_execute_tuning(mmc); 1283 sdhci_execute_tuning(mmc, mrq->cmd->opcode);
1254 spin_lock_irqsave(&host->lock, flags); 1284 spin_lock_irqsave(&host->lock, flags);
1255 1285
1256 /* Restore original mmc_request structure */ 1286 /* Restore original mmc_request structure */
1257 host->mrq = mrq; 1287 host->mrq = mrq;
1258 } 1288 }
1259
1260 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) 1289 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1261 sdhci_send_command(host, mrq->sbc); 1290 sdhci_send_command(host, mrq->sbc);
1262 else 1291 else
@@ -1275,10 +1304,41 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1275 1304
1276 host = mmc_priv(mmc); 1305 host = mmc_priv(mmc);
1277 1306
1307 if ((!mmc_host_sd_present(mmc) ||
1308 (mmc_host_sd_present(mmc) &&
1309 !mmc_host_sd_init_stat(mmc) &&
1310 mmc_host_sd_prev_stat(mmc))) &&
1311 ios->power_mode == MMC_POWER_OFF) {
1312 mmc_host_sd_clear_prev_stat(mmc);
1313 if (host->vmmc && regulator_is_enabled(host->vmmc)) {
1314#ifdef CONFIG_MIDAS_COMMON
1315 if (host->ops->set_power)
1316 host->ops->set_power(0);
1317#endif
1318 regulator_disable(host->vmmc);
1319 pr_info("%s : MMC Card OFF %s\n", __func__,
1320 host->hw_name);
1321 }
1322 } else if (mmc_host_sd_present(mmc) &&
1323 !mmc_host_sd_prev_stat(mmc)) {
1324 mmc_host_sd_set_prev_stat(mmc);
1325 if (host->vmmc && !regulator_is_enabled(host->vmmc)) {
1326#ifdef CONFIG_MIDAS_COMMON
1327 if (host->ops->set_power)
1328 host->ops->set_power(1);
1329#endif
1330 regulator_enable(host->vmmc);
1331 pr_info("%s : MMC Card ON %s\n", __func__,
1332 host->hw_name);
1333 }
1334 }
1335
1278 spin_lock_irqsave(&host->lock, flags); 1336 spin_lock_irqsave(&host->lock, flags);
1279 1337
1280 if (host->flags & SDHCI_DEVICE_DEAD) 1338 if (host->flags & SDHCI_DEVICE_DEAD) {
1339 sdhci_set_clock(host, 0);
1281 goto out; 1340 goto out;
1341 }
1282 1342
1283 /* 1343 /*
1284 * Reset the chip on each power off. 1344 * Reset the chip on each power off.
@@ -1415,7 +1475,7 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1415 * signalling timeout and CRC errors even on CMD0. Resetting 1475 * signalling timeout and CRC errors even on CMD0. Resetting
1416 * it on each ios seems to solve the problem. 1476 * it on each ios seems to solve the problem.
1417 */ 1477 */
1418 if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) 1478 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1419 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1479 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1420 1480
1421out: 1481out:
@@ -1468,6 +1528,14 @@ static int sdhci_get_ro(struct mmc_host *mmc)
1468 return 0; 1528 return 0;
1469} 1529}
1470 1530
1531static void sdhci_hw_reset(struct mmc_host *mmc)
1532{
1533 struct sdhci_host *host = mmc_priv(mmc);
1534
1535 if (host->ops && host->ops->hw_reset)
1536 host->ops->hw_reset(host);
1537}
1538
1471static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1539static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1472{ 1540{
1473 struct sdhci_host *host; 1541 struct sdhci_host *host;
@@ -1592,7 +1660,7 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1592 return 0; 1660 return 0;
1593} 1661}
1594 1662
1595static int sdhci_execute_tuning(struct mmc_host *mmc) 1663static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1596{ 1664{
1597 struct sdhci_host *host; 1665 struct sdhci_host *host;
1598 u16 ctrl; 1666 u16 ctrl;
@@ -1650,7 +1718,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
1650 if (!tuning_loop_counter && !timeout) 1718 if (!tuning_loop_counter && !timeout)
1651 break; 1719 break;
1652 1720
1653 cmd.opcode = MMC_SEND_TUNING_BLOCK; 1721 cmd.opcode = opcode;
1654 cmd.arg = 0; 1722 cmd.arg = 0;
1655 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 1723 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1656 cmd.retries = 0; 1724 cmd.retries = 0;
@@ -1802,6 +1870,7 @@ static const struct mmc_host_ops sdhci_ops = {
1802 .request = sdhci_request, 1870 .request = sdhci_request,
1803 .set_ios = sdhci_set_ios, 1871 .set_ios = sdhci_set_ios,
1804 .get_ro = sdhci_get_ro, 1872 .get_ro = sdhci_get_ro,
1873 .hw_reset = sdhci_hw_reset,
1805 .enable_sdio_irq = sdhci_enable_sdio_irq, 1874 .enable_sdio_irq = sdhci_enable_sdio_irq,
1806 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 1875 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
1807 .execute_tuning = sdhci_execute_tuning, 1876 .execute_tuning = sdhci_execute_tuning,
@@ -1819,16 +1888,16 @@ static void sdhci_tasklet_card(unsigned long param)
1819 struct sdhci_host *host; 1888 struct sdhci_host *host;
1820 unsigned long flags; 1889 unsigned long flags;
1821 1890
1822 host = (struct sdhci_host*)param; 1891 host = (struct sdhci_host *)param;
1823 1892
1824 spin_lock_irqsave(&host->lock, flags); 1893 spin_lock_irqsave(&host->lock, flags);
1825 1894
1826 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { 1895 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
1827 if (host->mrq) { 1896 if (host->mrq) {
1828 printk(KERN_ERR "%s: Card removed during transfer!\n", 1897 printk(KERN_ERR "%s: Card removed during transfer!\n",
1829 mmc_hostname(host->mmc)); 1898 mmc_hostname(host->mmc));
1830 printk(KERN_ERR "%s: Resetting controller.\n", 1899 printk(KERN_ERR "%s: Resetting controller.\n",
1831 mmc_hostname(host->mmc)); 1900 mmc_hostname(host->mmc));
1832 1901
1833 sdhci_reset(host, SDHCI_RESET_CMD); 1902 sdhci_reset(host, SDHCI_RESET_CMD);
1834 sdhci_reset(host, SDHCI_RESET_DATA); 1903 sdhci_reset(host, SDHCI_RESET_DATA);
@@ -1840,7 +1909,11 @@ static void sdhci_tasklet_card(unsigned long param)
1840 1909
1841 spin_unlock_irqrestore(&host->lock, flags); 1910 spin_unlock_irqrestore(&host->lock, flags);
1842 1911
1843 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 1912 if (host->vmmc &&
1913 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
1914 mmc_detect_change(host->mmc, msecs_to_jiffies(0));
1915 else
1916 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1844} 1917}
1845 1918
1846static void sdhci_tasklet_finish(unsigned long param) 1919static void sdhci_tasklet_finish(unsigned long param)
@@ -1849,12 +1922,12 @@ static void sdhci_tasklet_finish(unsigned long param)
1849 unsigned long flags; 1922 unsigned long flags;
1850 struct mmc_request *mrq; 1923 struct mmc_request *mrq;
1851 1924
1852 host = (struct sdhci_host*)param; 1925 host = (struct sdhci_host *)param;
1853 1926
1854 /* 1927 /*
1855 * If this tasklet gets rescheduled while running, it will 1928 * If this tasklet gets rescheduled while running, it will
1856 * be run again afterwards but without any active request. 1929 * be run again afterwards but without any active request.
1857 */ 1930 */
1858 if (!host->mrq) 1931 if (!host->mrq)
1859 return; 1932 return;
1860 1933
@@ -1909,7 +1982,7 @@ static void sdhci_timeout_timer(unsigned long data)
1909 struct sdhci_host *host; 1982 struct sdhci_host *host;
1910 unsigned long flags; 1983 unsigned long flags;
1911 1984
1912 host = (struct sdhci_host*)data; 1985 host = (struct sdhci_host *)data;
1913 1986
1914 spin_lock_irqsave(&host->lock, flags); 1987 spin_lock_irqsave(&host->lock, flags);
1915 1988
@@ -1967,11 +2040,20 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1967 return; 2040 return;
1968 } 2041 }
1969 2042
1970 if (intmask & SDHCI_INT_TIMEOUT) 2043 if (intmask & SDHCI_INT_TIMEOUT) {
2044 printk(KERN_INFO "%s: cmd %d command timeout error\n",
2045 mmc_hostname(host->mmc), host->cmd->opcode);
1971 host->cmd->error = -ETIMEDOUT; 2046 host->cmd->error = -ETIMEDOUT;
1972 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | 2047 } else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1973 SDHCI_INT_INDEX)) 2048 SDHCI_INT_INDEX)) {
2049 printk(KERN_ERR "%s: cmd %d %s error\n",
2050 mmc_hostname(host->mmc), host->cmd->opcode,
2051 (intmask & SDHCI_INT_CRC) ? "command crc" :
2052 (intmask & SDHCI_INT_END_BIT) ? "command end bit" :
2053 "command index error");
1974 host->cmd->error = -EILSEQ; 2054 host->cmd->error = -EILSEQ;
2055 }
2056
1975 2057
1976 if (host->cmd->error) { 2058 if (host->cmd->error) {
1977 tasklet_schedule(&host->finish_tasklet); 2059 tasklet_schedule(&host->finish_tasklet);
@@ -2068,15 +2150,17 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2068 return; 2150 return;
2069 } 2151 }
2070 2152
2071 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2153 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2072 host->data->error = -ETIMEDOUT; 2154 printk(KERN_ERR "%s: cmd %d data timeout error\n",
2073 else if (intmask & SDHCI_INT_DATA_END_BIT) 2155 mmc_hostname(host->mmc), host->mrq->cmd->opcode);
2074 host->data->error = -EILSEQ; 2156 host->data->error = -ETIMEDOUT;
2075 else if ((intmask & SDHCI_INT_DATA_CRC) && 2157 } else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) {
2076 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 2158 printk(KERN_ERR "%s: cmd %d %s error\n",
2077 != MMC_BUS_TEST_R) 2159 mmc_hostname(host->mmc), host->mrq->cmd->opcode,
2160 (intmask & SDHCI_INT_DATA_CRC) ? "data crc" :
2161 "command end bit");
2078 host->data->error = -EILSEQ; 2162 host->data->error = -EILSEQ;
2079 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2163 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
2080 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); 2164 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
2081 sdhci_show_adma_error(host); 2165 sdhci_show_adma_error(host);
2082 host->data->error = -EIO; 2166 host->data->error = -EIO;
@@ -2133,7 +2217,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2133static irqreturn_t sdhci_irq(int irq, void *dev_id) 2217static irqreturn_t sdhci_irq(int irq, void *dev_id)
2134{ 2218{
2135 irqreturn_t result; 2219 irqreturn_t result;
2136 struct sdhci_host* host = dev_id; 2220 struct sdhci_host *host = dev_id;
2137 u32 intmask; 2221 u32 intmask;
2138 int cardint = 0; 2222 int cardint = 0;
2139 2223
@@ -2230,14 +2314,27 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
2230 host->flags &= ~SDHCI_NEEDS_RETUNING; 2314 host->flags &= ~SDHCI_NEEDS_RETUNING;
2231 } 2315 }
2232 2316
2317 if (host->mmc->pm_flags & MMC_PM_IGNORE_SUSPEND_RESUME) {
2318 host->mmc->pm_flags |= MMC_PM_KEEP_POWER;
2319 pr_info("%s : Enter WIFI suspend\n", __func__);
2320 }
2321
2233 ret = mmc_suspend_host(host->mmc); 2322 ret = mmc_suspend_host(host->mmc);
2234 if (ret) 2323 if (ret)
2235 return ret; 2324 return ret;
2236 2325
2237 free_irq(host->irq, host); 2326 free_irq(host->irq, host);
2238 2327
2239 if (host->vmmc) 2328 if (host->vmmc) {
2240 ret = regulator_disable(host->vmmc); 2329 if (regulator_is_enabled(host->vmmc)) {
2330#ifdef CONFIG_MIDAS_COMMON
2331 if (host->ops->set_power)
2332 host->ops->set_power(0);
2333#endif
2334 ret = regulator_disable(host->vmmc);
2335 pr_info("%s : MMC Card OFF\n", __func__);
2336 }
2337 }
2241 2338
2242 return ret; 2339 return ret;
2243} 2340}
@@ -2248,10 +2345,15 @@ int sdhci_resume_host(struct sdhci_host *host)
2248{ 2345{
2249 int ret; 2346 int ret;
2250 2347
2251 if (host->vmmc) { 2348 if (host->vmmc && !regulator_is_enabled(host->vmmc)) {
2252 int ret = regulator_enable(host->vmmc); 2349#ifdef CONFIG_MIDAS_COMMON
2350 if (host->ops->set_power)
2351 host->ops->set_power(1);
2352#endif
2353 ret = regulator_enable(host->vmmc);
2253 if (ret) 2354 if (ret)
2254 return ret; 2355 return ret;
2356 pr_info("%s : MMC Card ON\n", __func__);
2255 } 2357 }
2256 2358
2257 2359
@@ -2276,6 +2378,13 @@ int sdhci_resume_host(struct sdhci_host *host)
2276 (host->tuning_mode == SDHCI_TUNING_MODE_1)) 2378 (host->tuning_mode == SDHCI_TUNING_MODE_1))
2277 host->flags |= SDHCI_NEEDS_RETUNING; 2379 host->flags |= SDHCI_NEEDS_RETUNING;
2278 2380
2381#ifdef CONFIG_MACH_PX
2382 /* host has a card and the card is SDIO type */
2383 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
2384 /* enable sdio interrupt */
2385 sdhci_enable_sdio_irq(host->mmc, 1);
2386 }
2387#endif
2279 return ret; 2388 return ret;
2280} 2389}
2281 2390
@@ -2326,6 +2435,7 @@ int sdhci_add_host(struct sdhci_host *host)
2326 u32 max_current_caps; 2435 u32 max_current_caps;
2327 unsigned int ocr_avail; 2436 unsigned int ocr_avail;
2328 int ret; 2437 int ret;
2438 struct sdhci_s3c *sc;
2329 2439
2330 WARN_ON(host == NULL); 2440 WARN_ON(host == NULL);
2331 if (host == NULL) 2441 if (host == NULL)
@@ -2483,7 +2593,12 @@ int sdhci_add_host(struct sdhci_host *host)
2483 } else 2593 } else
2484 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 2594 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2485 2595
2486 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; 2596 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
2597 mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000);
2598 else
2599 mmc->max_discard_to = (1 << 27) / host->timeout_clk;
2600
2601 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE;
2487 2602
2488 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 2603 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
2489 host->flags |= SDHCI_AUTO_CMD12; 2604 host->flags |= SDHCI_AUTO_CMD12;
@@ -2540,6 +2655,15 @@ int sdhci_add_host(struct sdhci_host *host)
2540 if (caps[1] & SDHCI_DRIVER_TYPE_D) 2655 if (caps[1] & SDHCI_DRIVER_TYPE_D)
2541 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 2656 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
2542 2657
2658 /*
2659 * If Power Off Notify capability is enabled by the host,
2660 * set notify to short power off notify timeout value.
2661 */
2662 if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY)
2663 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2664 else
2665 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE;
2666
2543 /* Initial value for re-tuning timer count */ 2667 /* Initial value for re-tuning timer count */
2544 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> 2668 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
2545 SDHCI_RETUNING_TIMER_COUNT_SHIFT; 2669 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
@@ -2714,12 +2838,32 @@ int sdhci_add_host(struct sdhci_host *host)
2714 if (ret) 2838 if (ret)
2715 goto untasklet; 2839 goto untasklet;
2716 2840
2717 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 2841 sc = sdhci_priv(host);
2842
2843 if (host->vmmc_name)
2844 host->vmmc = regulator_get(mmc_dev(mmc), host->vmmc_name);
2845 else
2846 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
2847
2718 if (IS_ERR(host->vmmc)) { 2848 if (IS_ERR(host->vmmc)) {
2719 printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc)); 2849 printk(KERN_ERR "%s: no %s regulator found\n",
2850 mmc_hostname(mmc),
2851 host->vmmc_name ? host->vmmc_name : "vmmc");
2720 host->vmmc = NULL; 2852 host->vmmc = NULL;
2721 } else { 2853 } else {
2722 regulator_enable(host->vmmc); 2854 printk(KERN_INFO "%s: %s regulator found\n",
2855 mmc_hostname(mmc),
2856 host->vmmc_name ? host->vmmc_name : "vmmc");
2857 if (sc->ext_cd_gpio) {
2858 if (gpio_get_value(sc->ext_cd_gpio) != (sc->ext_cd_gpio_invert)) {
2859#ifdef CONFIG_MIDAS_COMMON
2860 if (host->ops->set_power)
2861 host->ops->set_power(1);
2862#endif
2863 regulator_enable(host->vmmc);
2864 mdelay(100);
2865 }
2866 }
2723 } 2867 }
2724 2868
2725 sdhci_init(host, 0); 2869 sdhci_init(host, 0);
@@ -2808,7 +2952,11 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
2808 tasklet_kill(&host->card_tasklet); 2952 tasklet_kill(&host->card_tasklet);
2809 tasklet_kill(&host->finish_tasklet); 2953 tasklet_kill(&host->finish_tasklet);
2810 2954
2811 if (host->vmmc) { 2955 if (host->vmmc && regulator_is_enabled(host->vmmc)) {
2956#ifdef CONFIG_MIDAS_COMMON
2957 if (host->ops->set_power)
2958 host->ops->set_power(0);
2959#endif
2812 regulator_disable(host->vmmc); 2960 regulator_disable(host->vmmc);
2813 regulator_put(host->vmmc); 2961 regulator_put(host->vmmc);
2814 } 2962 }
@@ -2837,11 +2985,12 @@ EXPORT_SYMBOL_GPL(sdhci_free_host);
2837 2985
2838static int __init sdhci_drv_init(void) 2986static int __init sdhci_drv_init(void)
2839{ 2987{
2988 int ret = 0;
2840 printk(KERN_INFO DRIVER_NAME 2989 printk(KERN_INFO DRIVER_NAME
2841 ": Secure Digital Host Controller Interface driver\n"); 2990 ": Secure Digital Host Controller Interface driver\n");
2842 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 2991 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2843 2992
2844 return 0; 2993 return ret;
2845} 2994}
2846 2995
2847static void __exit sdhci_drv_exit(void) 2996static void __exit sdhci_drv_exit(void)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 745c42fa41e..b04e361c084 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -273,7 +273,8 @@ struct sdhci_ops {
273 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); 273 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
274 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); 274 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
275 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); 275 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
276 276 void (*hw_reset)(struct sdhci_host *host);
277 void (*set_power)(int on_off);
277}; 278};
278 279
279#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 280#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS